query
stringlengths
9
3.4k
document
stringlengths
9
87.4k
metadata
dict
negatives
sequencelengths
4
101
negative_scores
sequencelengths
4
101
document_score
stringlengths
3
10
document_rank
stringclasses
102 values
Report usage metrics for all active Crypto Express adapters of CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_crypto(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def help(self, args):\n print('No commands available for this consumer')", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def usage(self, host):", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def ShowAllVouchers(cmd_args=[], cmd_options={}):\n iv_hash_table = kern.globals.ivht_bucket\n num_buckets = sizeof(kern.globals.ivht_bucket) / sizeof(kern.globals.ivht_bucket[0])\n print GetIPCVoucherSummary.header\n for i in range(num_buckets):\n for v in IterateQueue(iv_hash_table[i], 'ipc_voucher_t', 'iv_hash_link'):\n print GetIPCVoucherSummary(v)", "def help_help(self):\n print(\"List commands or print details about a command\")", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def help_opt(self):\n print(OPTIONS)", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def measure(self,command_exe, command_args, measure_out):\n pass", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def cmd_help(args):", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def info():\n f = Figlet(font='standard')\n click.echo(f.renderText('covtool'))\n click.secho(\n \"covtool: a simple CLI for fetching covid data\", fg='cyan')\n click.echo(\n \"Data Sources: https://www.worldometers.info/coronavirus\\nJohn Hopkins [https://github.com/CSSEGISandData/COVID-19] \")\n click.secho(\"Author: Amayo II <[email protected]>\", fg='magenta')", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def ShowIPCVoucherAttributeControl(cmd_args=[], cmd_options={}):\n if not cmd_args:\n raise ArgumentError(\"Please provide correct arguments.\")\n ivac = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_attr_control_t')\n print GetIPCVoucherAttrControlSummary.header\n print GetIPCVoucherAttrControlSummary(ivac)\n if config['verbosity'] > vHUMAN:\n cur_entry_index = 0\n last_entry_index = unsigned(ivac.ivac_table_size)\n print \"index \" + GetIPCVoucherAttributeEntrySummary.header\n while cur_entry_index < last_entry_index:\n print \"{: <5d} \".format(cur_entry_index) + GetIPCVoucherAttributeEntrySummary(addressof(ivac.ivac_table[cur_entry_index]))\n cur_entry_index += 1", "def explainerdashboard_cli(ctx):", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def list_usage(progname, description, command_keys, command_helps, command_aliases):\n dvars = {'prog': progname}\n dvars.update(vars())\n result = []\n result.append(description % dvars)\n for key in command_keys:\n if key in command_aliases:\n alias = ' (%s)' % command_aliases[key]\n else:\n alias = ''\n if key is not None:\n result.append((\"%s%s\" % (key, alias)).ljust(10) + ' \\t' + command_helps[key])\n else:\n result.append('')\n return '\\n'.join(result)", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def list_metrics(self):\n pass", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "def reports_cli():", "def metric_options(self):\n return Optimizer.list_method_options(self.metric_creator.method_dict)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "async def eventstats(self, ctx):\n if ctx.invoked_subcommand is None:\n await ctx.send_help(ctx.command)", "def usage():", "def usage():", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def show_command_multiple(self, command, arglist, vdc=None, parser=None, optdict={}):\n self.logger.debug(\"run multiple show commands {} {}\".format(command, str(arglist)))\n output = \"\"\n if isinstance(arglist, str):\n arglist = [arglist]\n for vdcname in vdc:\n self.switchto_vdc(vdcname)\n if len(vdc) > 1:\n output = output + \"\\nvdc {}: \\n\".format(self.get_current_vdc())\n for a in arglist:\n self.logger.debug(\"run show commands {} {} in vdc {}\".format(command, a, vdcname))\n if parser is not None:\n scratch = parser(self._send_xml_cli_show(\"{} {}\".format(command, a)), **optdict)\n if scratch is None:\n output = output + \"Command '{} {}' returned no output\\n\".format(command, a)\n else:\n output = output + scratch\n else:\n output = output + self._send_xml_cli_show(\"{} {}\".format(command, a))\n self.logger.debug(\"multiple show commands output {}\".format(output))\n return output", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def _help(self):\n self.onecmd('help')", "def get_config_metrics():\n\n metrics = {'disk_usage': 'YES',\n 'cpu_percent': 'YES',\n 'memory_info': 'YES',\n 'cpu_stats': 'YES'}\n\n return metrics", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def describe_cost_management_exports(self):\n return [{\"name\": self.export_name, \"container\": self.container, \"directory\": self.directory}]", "async def help(self, context):\n prefix = config.BOT_PREFIX\n user=context.message.author\n if not isinstance(prefix, str):\n prefix = prefix[0]\n embed = discord.Embed(title=\"Help\", description=\"List of available commands:\", color=0x00FF00)\n for i in self.bot.cogs:\n cog = self.bot.get_cog(i.lower())\n commands = cog.get_commands()\n command_list = [command.name for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n command_description = [command.help for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n help_text = '\\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))\n embed = discord.Embed(title=f\"Commands in {i.capitalize()} Cog\", description=f'```{help_text}```', color=0x00FF00)\n await user.send(embed=embed)\n if not isinstance(context.message.channel, discord.channel.DMChannel):\n await context.send(f\"DM sent to {user.mention}\")\n await context.message.delete()", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)", "def collect_stats(xcnode, cmds):\n output = ''\n\n if not xcnode.client:\n print 'ssh session does not exist for {}'.format(xcnode.host)\n return output\n\n for cmd in cmds:\n stdin, stdout, stderr = xcnode.client.exec_command(cmd)\n out = stdout.read()\n outerr = stderr.read()\n xcnode.fd.write('{} run @ {}\\n'.format(cmd, datetime.now()))\n xcnode.fd.write('stdout:\\n============:\\n{}\\n'.format(out))\n if outerr:\n xcnode.fd.write('stderr\\n===========:\\n{}\\n'.format(outerr))\n output += out + '\\n'\n output += outerr + '\\n'\n xcnode.fd.flush()\n\n return output", "def help(self, *args):\n\n\t\tif self.available_cmds:\n\t\t\tdir_text = \"Enter commands in the format 'cmd [args]'. Available commands: \\n\"\n\t\t\tfor cmd in self.available_cmds.keys():\n\t\t\t\tdir_text += \" -\" + cmd + \"\\n\"\n\t\telse:\n\t\t\tdir_text = \"No commands available.\"\n\n\t\tif self.available_apps:\n\t\t\tapp_txt = \"Available applications to run: \\n\"\n\t\t\tfor app in self.available_apps.keys():\n\t\t\t\tapp_txt += \" -\" + app + \"\\n\"\n\t\telse:\n\t\t\tapp_txt = \"No applications available.\"\n\n\t\tprint(dir_text + \"\\n\" + app_txt + \"\\n\")", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def showUsage():\n None", "def cbstats_test(self):\n cluster_len = RestConnection(self.master).get_cluster_size()\n if self.command == \"kvstore\":\n self.verify_cluster_stats()\n if self.command != \"key\":\n if \"tapagg\" in self.command and cluster_len == 1:\n self.log.info(\"This command only works with cluster with 2 nodes or more\")\n raise Exception(\"This command does not work with one node cluster\")\n else:\n # tapagg needs replica items to print out results\n if \"tapagg\" in self.command:\n for bucket in self.buckets:\n self.shell.execute_cbworkloadgen(self.couchbase_usrname, \\\n self.couchbase_password, self.num_items, \\\n self.set_get_ratio, bucket.name, \\\n self.item_size, self.command_options)\n self.sleep(5)\n for bucket in self.buckets:\n if \"allocator\" in self.command:\n output, error = self.shell.execute_mcstat(bucket,\"\",\n keyname=self.command, vbid=\"\", enable_ipv6=self.enable_ipv6)\n else:\n output, error = self.shell.execute_cbstats(bucket, self.command)\n self.verify_results(output, error)\n if self.command in [\"allocator\", \"kvtimings\", \"timings\"]:\n self.log.warning(\"We will not verify exact values for this stat\")\n else:\n self._verify_direct_client_stats(bucket, self.command, output)\n else:\n mc_conn = MemcachedClientHelper.direct_client(self.master, self.buckets[0].name, self.timeout)\n bucket_info = RestConnection(self.master).get_bucket(self.buckets[0])\n keys_map = {}\n for i in range(1, self.num_items + 1):\n vb_id = i - len(bucket_info.vbuckets) * int(i // len(bucket_info.vbuckets))\n try:\n mc_conn.set(\"test_docs-%s\" % i, 0, 0, json.dumps('{ \"test\" : \"test\"}').encode(\"ascii\", \"ignore\"), vb_id)\n except Exception:\n continue\n keys_map[\"test_docs-%s\" % i] = vb_id\n count = 0\n for key, vb_id in keys_map.items():\n output, error = self.shell.execute_cbstats(self.buckets[0], self.command, key, vb_id)\n self.verify_results(output, error)\n count += 1\n if self.master.ip.endswith(\"amazonaws.com\") and count == 10:\n self.log.info(\"check only 10 keys in aws \")\n break", "def _usage_options_example(self):\n pass", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def help():\n print(UI.HELP)", "def main():\n\n parser = argparse.ArgumentParser()\n parser.add_argument('-H', '--host', required=True)\n parser.add_argument('-p', '--port', default=443)\n parser.add_argument('-u', '--url', default='/')\n parser.add_argument('-c', '--cert', required=True)\n parser.add_argument('-k', '--key', required=True)\n parser.add_argument('-P', '--perfdata', action='append')\n args = parser.parse_args()\n\n csv = get_csv(args)\n rows = get_rows(csv)\n\n frontend_errors = get_frontend_errors(rows)\n backend_errors = get_backend_errors(rows)\n member_errors = get_member_errors(rows)\n perfdata = get_perfdata(args.perfdata, rows)\n\n code = NAGIOS_OK\n if member_errors:\n code = NAGIOS_WARNING\n if frontend_errors or backend_errors:\n code = NAGIOS_CRITICAL\n\n status = ['OK', 'WARNING', 'CRITICAL'][code]\n print '{} frontend errors {}; backend errors {}; member errors {} | {}'.\\\n format(status,\n ', '.join(frontend_errors) if frontend_errors else 'none',\n ', '.join(backend_errors) if backend_errors else 'none',\n ', '.join(member_errors) if member_errors else 'none',\n ' '.join(perfdata))\n\n sys.exit(code)", "def print_current_mem_usage():\n mem = get_current_mem_usage()\n output = \"# Mem usage = {} MiB #\".format(mem)\n print(\"\\n\" + \"-\" * len(output))\n print(output)\n print(\"-\" * len(output) + \"\\n\")", "def help_text(command):\n\n courses_list = ('ENPM611', 'ENPM613', 'ENPM631', 'ENPM687',\\\n 'ENPM691', 'ENPM693', 'ENPM694', 'ENPM696',\\\n 'ENPM809J','ENPM809R', 'ENPM809W')\n\n response = 'I have course descriptions for: '\n for course_name in courses_list:\n response = response + course_name + ' '\n\n response = response + '\\nTo get the course description, execute command: about ENPM<course_number>'\n\n return response", "def help_analyze(self):\n print(ANALYZE)", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def usage():\n usage_string = \"oxclient.py\\n\" + \\\n \"\\t[-n, --number] -- NNNN [0-9999] (optional: default is \" + str(CONFIG['number']) + \")\\n\" + \\\n \"\\t[-H, --host] -- Something like freedom.dynalis.org(optional: default is \" + str(CONFIG['host']) + \")\\n\" + \\\n \"\\t[-p, --port] -- Something like 50007(optional: default is \" + str(CONFIG['port']) + \")\\n\" + \\\n \"\\t[-l, --logfile] -- log file(optional: default is \" + str(CONFIG['logfile']) + \")\\n\" + \\\n \"\\t[-s, --stdout] -- stdout on.\\n\" + \\\n \"\\t[-d, --debug] -- debug on.\\n\" + \\\n \"\\t[-h, --help] -- show usage.\\n\\n\" + \\\n \"oxclient.py --number 9999 --host freedom.dynalis.org --port 50007 --logfile /tmp/oxclient.log --stdout\\n\"\n print(usage_string)" ]
[ "0.61710244", "0.5943986", "0.5742064", "0.5595102", "0.55200773", "0.54445076", "0.5411969", "0.53798133", "0.5354008", "0.535235", "0.5349474", "0.5332392", "0.53124905", "0.5226011", "0.51669437", "0.51390135", "0.5104674", "0.5103828", "0.50836706", "0.5078611", "0.5078313", "0.50721085", "0.50588006", "0.5033912", "0.5027998", "0.5023201", "0.5021214", "0.5019734", "0.49973506", "0.49908763", "0.4974831", "0.49402538", "0.49365178", "0.49252337", "0.49246788", "0.49160758", "0.48973888", "0.48973888", "0.4896006", "0.48905233", "0.48851627", "0.4881996", "0.48818433", "0.48680407", "0.48656747", "0.48558584", "0.48517093", "0.48455235", "0.48436087", "0.48398516", "0.48343647", "0.48338714", "0.4831536", "0.48242217", "0.48151648", "0.48119023", "0.48111948", "0.48095688", "0.48089173", "0.4808279", "0.48071194", "0.48071194", "0.47975737", "0.4791053", "0.47898155", "0.4789028", "0.47857523", "0.47775057", "0.47757116", "0.47662723", "0.47538093", "0.47477615", "0.4745148", "0.4733583", "0.47327498", "0.47315255", "0.47306946", "0.4729147", "0.47269544", "0.4724885", "0.4721301", "0.47162217", "0.47108045", "0.47099078", "0.47099045", "0.4706227", "0.47041956", "0.47040513", "0.47029847", "0.47008055", "0.46983072", "0.46934146", "0.4692047", "0.4691446", "0.46899718", "0.46879852", "0.46860752", "0.46851397", "0.46750435", "0.46682778" ]
0.6303562
0
Report usage metrics for all active Flash Express adapters of CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_flash(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def usage(self, host):", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def help(self, args):\n print('No commands available for this consumer')", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def help_help(self):\n print(\"List commands or print details about a command\")", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def help_opt(self):\n print(OPTIONS)", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "def measure(self,command_exe, command_args, measure_out):\n pass", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def reports_cli():", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def cmd_help(args):", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def usage():", "def usage():", "def showUsage():\n None", "def help_analyze(self):\n print(ANALYZE)", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def show_command_multiple(self, command, arglist, vdc=None, parser=None, optdict={}):\n self.logger.debug(\"run multiple show commands {} {}\".format(command, str(arglist)))\n output = \"\"\n if isinstance(arglist, str):\n arglist = [arglist]\n for vdcname in vdc:\n self.switchto_vdc(vdcname)\n if len(vdc) > 1:\n output = output + \"\\nvdc {}: \\n\".format(self.get_current_vdc())\n for a in arglist:\n self.logger.debug(\"run show commands {} {} in vdc {}\".format(command, a, vdcname))\n if parser is not None:\n scratch = parser(self._send_xml_cli_show(\"{} {}\".format(command, a)), **optdict)\n if scratch is None:\n output = output + \"Command '{} {}' returned no output\\n\".format(command, a)\n else:\n output = output + scratch\n else:\n output = output + self._send_xml_cli_show(\"{} {}\".format(command, a))\n self.logger.debug(\"multiple show commands output {}\".format(output))\n return output", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def help():", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def collect_stats(xcnode, cmds):\n output = ''\n\n if not xcnode.client:\n print 'ssh session does not exist for {}'.format(xcnode.host)\n return output\n\n for cmd in cmds:\n stdin, stdout, stderr = xcnode.client.exec_command(cmd)\n out = stdout.read()\n outerr = stderr.read()\n xcnode.fd.write('{} run @ {}\\n'.format(cmd, datetime.now()))\n xcnode.fd.write('stdout:\\n============:\\n{}\\n'.format(out))\n if outerr:\n xcnode.fd.write('stderr\\n===========:\\n{}\\n'.format(outerr))\n output += out + '\\n'\n output += outerr + '\\n'\n xcnode.fd.flush()\n\n return output", "def help(self):\r\n self._short_help(None, None, None, None)", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def list_metrics(self):\n pass", "def help():\n print(UI.HELP)", "def _help(self):\n self.onecmd('help')", "def command_help(self, bot, update):\n\n messages = [\n 'Available commands:',\n '/who - Who is Myles?',\n '/where - Where is Myles?',\n '/tweet - What was the last tweet Myles sent?',\n '/photo - What was the last Instagram photo Myles took?',\n '/web - Where can I find Myles on the interwebs?',\n ]\n\n self.send_messages(bot, update, messages)", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def help(self, msg=None):\n\n # Print the message if given.\n if not msg == None:\n print str(msg) + \"\\n\"\n\n # Display the list of commands, in the alphabetical order.\n print \"Use one of the following commands:\"\n for action in sorted(self.actions.keys()):\n info = self.actions[action]\n joined_oblig = ' '.join(info['required'])\n if len(info['additional']) > 0:\n add = [\"<%s>\" % x for x in info['additional']]\n joined_add = '[' + ' '.join(add) + ']'\n else:\n joined_add = ''\n print \"\\t* %s %s %s\" % (action, joined_oblig, joined_add)", "def display_help(self):\n pass", "def HMC_Help():\n os.system(\"cls\")\n while True:\n print((\"\\n\\n\",\"Help\".center(50)))\n print_list = [\"ManagedSystem\",\"LogicalPartition\",\"VirtualIOServer\",\"Cluster\",\"Performance Capaity Monitoring\",\"Return to Main Menu\"]\n choice = int(print_obj.print_on_screen(print_list))\n directory = os.path.dirname(os.path.dirname(__file__))\n if choice == 1:\n path = directory+\"/help/ManagedSystem\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 2:\n path = directory+\"/help/LogicalPartition\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 3:\n path = directory+\"/help/VirtualIOServer\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 4:\n print((open(directory+\"/help/Cluster.txt\").read()))\n elif choice == 5:\n print((open(directory+\"/help/PerformanceCapacityMonitoring.txt\").read()))\n elif choice == 6:\n os.system(\"cls\")\n return\n else:\n print(\"\\nTry using Valid option\")\n back_to_menu()", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def demo():\n logging.basicConfig(level=logging.INFO, format=\"%(message)s\")\n fcm = FMQLCacher(\"Caches\")\n fcm.setVista(\"CGVISTA\", \"http://vista.caregraf.org/fmqlEP\")\n \"\"\"\n for i, scheme in enumerate(fcm.describeSchemaTypes()):\n if \"count\" in scheme:\n print \"%d: %s (%s)\" % (i, scheme[\"number\"], scheme[\"count\"])\n else:\n print \"%d: %s\" % (i, scheme[\"number\"])\n \"\"\"\n for entry in fcm.describeFileEntries(\"9_6\", cstop=\"1000\"):\n print entry[\"uri\"][\"label\"]", "def list_usage(progname, description, command_keys, command_helps, command_aliases):\n dvars = {'prog': progname}\n dvars.update(vars())\n result = []\n result.append(description % dvars)\n for key in command_keys:\n if key in command_aliases:\n alias = ' (%s)' % command_aliases[key]\n else:\n alias = ''\n if key is not None:\n result.append((\"%s%s\" % (key, alias)).ljust(10) + ' \\t' + command_helps[key])\n else:\n result.append('')\n return '\\n'.join(result)", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def help():\n \n pass", "def printhelp_01():\n print('Convert simba3d report output from npz to .mat, or .json')\n print('simba3d-convertion --ext_out [.mat, .json, .txt, or .pdb] <list of files>')", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def explainerdashboard_cli(ctx):", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def collect_metrics(application):\n\n try:\n subprocess.check_call(['juju', 'collect-metrics', application])\n except subprocess.CalledProcessError as e:\n raise Exception(\"Unable to collect metrics: {}\".format(e))", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "async def help(self, context):\n prefix = config.BOT_PREFIX\n user=context.message.author\n if not isinstance(prefix, str):\n prefix = prefix[0]\n embed = discord.Embed(title=\"Help\", description=\"List of available commands:\", color=0x00FF00)\n for i in self.bot.cogs:\n cog = self.bot.get_cog(i.lower())\n commands = cog.get_commands()\n command_list = [command.name for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n command_description = [command.help for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n help_text = '\\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))\n embed = discord.Embed(title=f\"Commands in {i.capitalize()} Cog\", description=f'```{help_text}```', color=0x00FF00)\n await user.send(embed=embed)\n if not isinstance(context.message.channel, discord.channel.DMChannel):\n await context.send(f\"DM sent to {user.mention}\")\n await context.message.delete()", "def help(self):\n pass", "def help(self):\n pass", "def help(self, *args):\n\n\t\tif self.available_cmds:\n\t\t\tdir_text = \"Enter commands in the format 'cmd [args]'. Available commands: \\n\"\n\t\t\tfor cmd in self.available_cmds.keys():\n\t\t\t\tdir_text += \" -\" + cmd + \"\\n\"\n\t\telse:\n\t\t\tdir_text = \"No commands available.\"\n\n\t\tif self.available_apps:\n\t\t\tapp_txt = \"Available applications to run: \\n\"\n\t\t\tfor app in self.available_apps.keys():\n\t\t\t\tapp_txt += \" -\" + app + \"\\n\"\n\t\telse:\n\t\t\tapp_txt = \"No applications available.\"\n\n\t\tprint(dir_text + \"\\n\" + app_txt + \"\\n\")", "def usage(self):\n self._usage1()\n print 'folder COOL_channel COOL_tag ROOT_file'\n self._usage2()", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "async def help(self, ctx):\n self.log_command_call(\"help\", ctx.message)\n await ctx.send(HELP_TEXT)\n embed_output = create_embed(description=MORE_INFO_TEXT)\n await ctx.send(embed=embed_output)", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def usage(msg):\n ap.print_usage()\n print \"-\"*40\n print msg\n exit(1)", "def main():\n logging.info(\"Testing iOS application performance metrics: application size, launch duration and RAM memory usage!\")\n\n try:\n args = parse_args()\n\n TEST_RESULTS = run_tests(args)\n test_summary = create_test_summary(args, TEST_RESULTS)\n write_results_to_file(TEST_RESULTS, RESULTS_FILE, test_summary, SUMMARY_FILE)\n report_tests(args, test_summary)\n\n except Exception as e:\n logging.error(\"Testing performance of application failed with error '{ERROR}'\".format(ERROR=e))", "def usage(progname):\n sys.stderr.write(\"usage: \" + progname + \n \" [-s id_list] [-i] [-t]\\n\"\n \" [-p preiviously_built_list]\\n\"\n \" [-g potential_upgrades_list]\\n\"\n \" [-u subsets_tap_stderr_file]\\n\"\n \" mods_file nochange_vht_file invidual_tap_stderr_file \"\n \"pairwise_tap_stderr_file budget\\n\")\n sys.stderr.write(\" -i : round all values to integer\\n\")\n sys.stderr.write(\" -s id_list : subsets of ids in id_list only\\n\")\n sys.stderr.write(\" -t timeperiod: add time period, intersrate to dzn\\n\")\n sys.stderr.write(\" -u substs_tap_stderr_file: data for triples also\\n\")\n sys.stderr.write(\" -p preiouvlys_built_list: list of previously built upgrades\\n\")\n sys.stderr.write(\" -g potential_upgrades_list: list of potenti upgrades to consider\\n\")\n sys.exit(1)", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)" ]
[ "0.617125", "0.58856094", "0.5707514", "0.5675522", "0.5634532", "0.54365146", "0.53614813", "0.53592706", "0.53404266", "0.53303754", "0.5302817", "0.5274004", "0.5224067", "0.52236396", "0.52125716", "0.51809424", "0.51480365", "0.5107816", "0.5101575", "0.50771254", "0.5061744", "0.5061137", "0.50485176", "0.50409716", "0.50384724", "0.5008888", "0.5007734", "0.50066376", "0.5005284", "0.50015926", "0.499838", "0.49978504", "0.4989914", "0.4989818", "0.49843383", "0.49674562", "0.4965389", "0.49399567", "0.49359903", "0.49309155", "0.4929803", "0.4921723", "0.49206382", "0.4919536", "0.4913752", "0.49016917", "0.48883396", "0.48841438", "0.48841438", "0.48840162", "0.48822334", "0.48796535", "0.48767158", "0.48743945", "0.48639706", "0.485461", "0.48411664", "0.48367518", "0.4835057", "0.4835057", "0.48262364", "0.48239475", "0.48231998", "0.48202813", "0.4815986", "0.48142466", "0.48021272", "0.47954303", "0.4795103", "0.4792847", "0.47842428", "0.47830272", "0.4768331", "0.47660503", "0.47648734", "0.47641653", "0.47636577", "0.476009", "0.47538322", "0.47532102", "0.47497034", "0.4743432", "0.47368562", "0.47356483", "0.47355902", "0.47286695", "0.47286695", "0.47213343", "0.47153986", "0.47118193", "0.47013694", "0.46989965", "0.46906054", "0.4689399", "0.46890444", "0.46876895", "0.4685884", "0.4681822", "0.46805468", "0.46766183" ]
0.5578356
5
Report usage metrics for all active RoCE adapters of CPCs. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_roce(cmd_ctx, cpc, **options): cmd_ctx.execute_cmd(lambda: cmd_metrics_roce(cmd_ctx, cpc, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def help(self, args):\n print('No commands available for this consumer')", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def usage(self, host):", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def getHelp(self):\r\n help_str =\\\r\n \"\"\"##########################################################################################\r\n#\r\n# Required:\r\n#\r\n# --query_NAST multi-fasta file containing query sequences in alignment format\r\n#\r\n# Common opts:\r\n#\r\n# --db_NAST db in NAST format\r\n# --db_FASTA db in fasta format (megablast formatted)\r\n#\r\n#\r\n# -n number of top matching database sequences to compare to (default 15)\r\n# -R min divergence ratio default: 1.007\r\n# -P min percent identity among matching sequences (default: 90)\r\n#\r\n# ## parameters to tune ChimeraParentSelector:\r\n#\r\n# Scoring parameters:\r\n# -M match score (default: +5)\r\n# -N mismatch penalty (default: -4)\r\n# -Q min query coverage by matching database sequence (default: 70)\r\n# -T maximum traverses of the multiple alignment (default: 1)\r\n\r\n#\r\n# ## parameters to tune ChimeraPhyloChecker:\r\n#\r\n#\r\n# --windowSize default 50\r\n# --windowStep default 5\r\n# --minBS minimum bootstrap support for calling chimera (default: 90)\r\n# -S percent of SNPs to sample on each side of breakpoint for computing bootstrap support (default: 10)\r\n# --num_parents_test number of potential parents to test for chimeras (default: 3)\r\n# --MAX_CHIMERA_PARENT_PER_ID Chimera/Parent alignments with perID above this are considered non-chimeras (default 100; turned off)\r\n#\r\n# ## misc opts\r\n#\r\n# --printFinalAlignments shows alignment between query sequence and pair of candidate chimera parents\r\n# --printCSalignments print ChimeraSlayer alignments in ChimeraSlayer output\r\n# --exec_dir chdir to here before running\r\n#\r\n#########################################################################################\r\n \"\"\"\r\n return help_str", "def usage():\n print(\"[1] Getting help from a cipher \")\n print(\" ---> ./cryptogra.py caesar -h \")\n print(\"\")", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def do_overview(self):\n summaries = []\n for name, cmd in self.base.commands.iteritems():\n summaries.append(' %-14s %s\\n' % (name, cmd.get_summary()))\n summaries.sort()\n sys.stdout.write('Usage: %s COMMAND ARGUMENTS...\\n\\n' \\\n 'Available commands:\\n' % (self.base.scriptname, ))\n for line in summaries:\n sys.stdout.write(line)", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def main():\n test_cases = ast.literal_eval(sys.argv[1])\n results = str(my_info()) + '\\t\\t'\n for test_case in test_cases:\n mode = test_case[0]\n id_1 = int(test_case[1])\n id_2 = int(test_case[2])\n if mode == 'jc':\n results += str(Jaccard_Coefficient(id_1, id_2)) + '\\t\\t'\n elif mode == 'cc':\n results += str(Correlation_Coefficient(id_1, id_2)) + '\\t\\t'\n else:\n exit('bad command')\n print results + '\\n'", "def command_help(self, *args, **kwargs):\n print(\"Commands available:\\n\")\n for name in dir(self):\n if not name.startswith(\"command_\"):\n continue\n name_clean = name[len(\"command_\"):]\n print(\"%s:\\n - %s\\n\" % (name_clean, getattr(self, name).__doc__.strip()))", "def usage():\n\n # Local constants\n\n # Local variables\n\n #****** start usage() ******#\n print()\n print(\" Usage: python TCGCardTracker.py <arguement below> <optional-argument-1>\")\n print(\"\\tadd (Optional): Add a card to your collection. Requires TCGPlayer URL.\")\n print(\"\\tdelete (Optional): Delete a card from your collection. Requires TCGPlayer URL.\")\n print(\"\\tupdate (Optional): Updates pricing data for every card in your collection.\")\n print(\"\\ttop25 (Optional): Outputs the 25 most valuable cards from your collection.\")\n print(\"\\texport (Optional): Exports a list of TCGPlayer URLs to a text file.\")\n print(\"\\texport_collection (Optional): Exports your collection to a .csv including most recent price data.\")\n print(\"\\timport (Optional): Imports a text file of TCGPlayer URLs to bulk import cards into your collection. Requires text file.\")\n print(\"\\tworth (Optional): Ouputs how much your collection is worth using latest price data.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tgraph (Optional): Outputs historical pricing data for a given card. Requires TCGPlayer URL.\")\n print(\"\\tticker (Optional): Displays a ticker grid of the change in value over a given time. If run without the days back parameter it will default to 7 days.\")\n sys.exit()", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def help_help(self):\n print(\"List commands or print details about a command\")", "def cbstats_test(self):\n cluster_len = RestConnection(self.master).get_cluster_size()\n if self.command == \"kvstore\":\n self.verify_cluster_stats()\n if self.command != \"key\":\n if \"tapagg\" in self.command and cluster_len == 1:\n self.log.info(\"This command only works with cluster with 2 nodes or more\")\n raise Exception(\"This command does not work with one node cluster\")\n else:\n # tapagg needs replica items to print out results\n if \"tapagg\" in self.command:\n for bucket in self.buckets:\n self.shell.execute_cbworkloadgen(self.couchbase_usrname, \\\n self.couchbase_password, self.num_items, \\\n self.set_get_ratio, bucket.name, \\\n self.item_size, self.command_options)\n self.sleep(5)\n for bucket in self.buckets:\n if \"allocator\" in self.command:\n output, error = self.shell.execute_mcstat(bucket,\"\",\n keyname=self.command, vbid=\"\", enable_ipv6=self.enable_ipv6)\n else:\n output, error = self.shell.execute_cbstats(bucket, self.command)\n self.verify_results(output, error)\n if self.command in [\"allocator\", \"kvtimings\", \"timings\"]:\n self.log.warning(\"We will not verify exact values for this stat\")\n else:\n self._verify_direct_client_stats(bucket, self.command, output)\n else:\n mc_conn = MemcachedClientHelper.direct_client(self.master, self.buckets[0].name, self.timeout)\n bucket_info = RestConnection(self.master).get_bucket(self.buckets[0])\n keys_map = {}\n for i in range(1, self.num_items + 1):\n vb_id = i - len(bucket_info.vbuckets) * int(i // len(bucket_info.vbuckets))\n try:\n mc_conn.set(\"test_docs-%s\" % i, 0, 0, json.dumps('{ \"test\" : \"test\"}').encode(\"ascii\", \"ignore\"), vb_id)\n except Exception:\n continue\n keys_map[\"test_docs-%s\" % i] = vb_id\n count = 0\n for key, vb_id in keys_map.items():\n output, error = self.shell.execute_cbstats(self.buckets[0], self.command, key, vb_id)\n self.verify_results(output, error)\n count += 1\n if self.master.ip.endswith(\"amazonaws.com\") and count == 10:\n self.log.info(\"check only 10 keys in aws \")\n break", "def help_opt(self):\n print(OPTIONS)", "def print_help_info(self, global_options):\r\n usage = ['',\"Type '%s help <subcommand>' for help on a specific subcommand.\" % self.prog_name,'']\r\n usage.append('Available subcommands:')\r\n commands = self.get_commands(global_options).keys()\r\n commands.sort()\r\n for cmd in commands:\r\n usage.append(' %s' % cmd)\r\n return '\\n'.join(usage)", "def usage(self):\n self._usage1()\n print 'folder COOL_channel COOL_tag ROOT_file'\n self._usage2()", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def help(self):\n msg = \"`%s' performs the computational aspects of genotyping-by-sequencing.\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Usage: %s [OPTIONS] ...\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Options:\\n\"\n msg += \" -h, --help\\tdisplay the help and exit\\n\"\n msg += \" -V, --version\\toutput version information and exit\\n\"\n msg += \" -v, --verbose\\tverbosity level (0/default=1/2/3)\\n\"\n msg += \" --proj1\\tname of the project used for steps 1 to 4\\n\"\n msg += \"\\t\\tmention a reference genome only if all samples belong to\\n\"\n msg += \"\\t\\t the same species, and will be mapped to the same ref genome\\n\"\n msg += \" --proj2\\tname of the project used for steps 4 to 8\\n\"\n msg += \"\\t\\tcan be the same as --proj1, or can be different\\n\"\n msg +=\"\\t\\t notably when samples come from different species\\n\"\n msg += \"\\t\\t or if one wants to align reads to different ref genomes\\n\"\n msg += \" --schdlr\\tname of the cluster scheduler (default=SGE)\\n\"\n msg += \" --queue\\tname of the cluster queue (default=normal.q)\\n\"\n msg += \" --resou\\tcluster resources (e.g. 'test' for 'qsub -l test')\\n\"\n msg += \" --rmvb\\tremove bash scripts for jobs launched in parallel\\n\"\n msg += \" --step\\tstep to perform (1/2/3/.../9)\\n\"\n msg += \"\\t\\t1: raw read quality per lane (with FastQC v >= 0.11.2)\\n\"\n msg += \"\\t\\t2: demultiplexing per lane (with demultiplex.py v >= 1.14.0)\\n\"\n msg += \"\\t\\t3: cleaning per sample (with CutAdapt v >= 1.8)\\n\"\n msg += \"\\t\\t4: alignment per sample (with BWA MEM v >= 0.7.12, Samtools v >= 1.3, Picard and R v >= 3)\\n\"\n msg += \"\\t\\t5: local realignment per sample (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t6: local realignment per genotype (with GATK v >= 3.5)\\n\"\n msg += \"\\t\\t7: variant and genotype calling per genotype (with GATK HaplotypeCaller v >= 3.5)\\n\"\n msg += \"\\t\\t8: variant and genotype calling jointly across genotypes (with GATK GenotypeGVCFs v >= 3.5)\\n\"\n msg += \"\\t\\t9: variant and genotype filtering (with GATK v >= 3.5)\\n\"\n msg += \" --samples\\tpath to the 'samples' file\\n\"\n msg += \"\\t\\tcompulsory for all steps, but can differ between steps\\n\"\n msg += \"\\t\\t e.g. if samples come from different species or are aligned\\n\"\n msg += \"\\t\\t on different ref genomes, different samples file should\\n\"\n msg += \"\\t\\t be used for steps 4-9, representing different subsets of\\n\"\n msg += \"\\t\\t the file used for steps 1-3\\n\"\n msg += \"\\t\\tthe file should be encoded in ASCII\\n\"\n msg += \"\\t\\tthe first row should be a header with column names\\n\"\n msg += \"\\t\\teach 'sample' (see details below) should have one and only one row\\n\"\n msg += \"\\t\\tany two columns should be separated with one tabulation\\n\"\n msg += \"\\t\\tcolumns can be in any order\\n\"\n msg += \"\\t\\trows starting by '#' are skipped\\n\"\n msg += \"\\t\\t12 columns are compulsory (but there can be more):\\n\"\n msg += \"\\t\\t genotype (see details below, e.g. 'Col-0', but use neither underscore '_' nor space ' ' nor dot '.', use dash '-' instead)\\n\"\n msg += \"\\t\\t ref_genome (identifier of the reference genome used for alignment, e.g. 'Atha_v2', but use neither space ' ' nor dot '.'; the full species name, e.g. 'Arabidopsis thaliana', will be present in the file given to --dict)\\n\"\n msg += \"\\t\\t library (e.g. can be the same as 'genotype')\\n\"\n msg += \"\\t\\t barcode (e.g. 'ATGG')\\n\"\n msg += \"\\t\\t seq_center (e.g. 'Broad Institute', 'GenoToul', etc)\\n\"\n msg += \"\\t\\t seq_platform (e.g. 'ILLUMINA', see SAM format specification)\\n\"\n msg += \"\\t\\t seq_platform_model (e.g. 'HiSeq 2000')\\n\"\n msg += \"\\t\\t flowcell (e.g. 'C5YMDACXX')\\n\"\n msg += \"\\t\\t lane (e.g. '3', can be '31' if a first demultiplexing was done per index)\\n\"\n msg += \"\\t\\t date (e.g. '2015-01-15', see SAM format specification)\\n\"\n msg += \"\\t\\t fastq_file_R1 (filename, one per lane, gzip-compressed)\\n\"\n msg += \"\\t\\t fastq_file_R2 (filename, one per lane, gzip-compressed)\\n\"\n msg += \" --fcln\\tidentifier of a flowcell and lane number\\n\"\n msg += \"\\t\\tformat as <flowcell>_<lane-number>, e.g. 'C5YMDACXX_1'\\n\"\n msg += \"\\t\\tif set, only the samples from this lane will be analyzed\\n\"\n msg += \" --pird\\tpath to the input reads directory\\n\"\n msg += \"\\t\\tcompulsory for steps 1 and 2\\n\"\n msg += \"\\t\\twill be added to the columns 'fastq_file_R*' from the sample file\\n\"\n msg += \"\\t\\tif not set, input read files should be in current directory\\n\"\n msg += \" --enz\\tname of the restriction enzyme\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=ApeKI\\n\"\n msg += \" --dmxmet\\tmethod used to demultiplex\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=4c (see the help of demultiplex.py to know more)\\n\"\n msg += \" --subst\\tnumber of substitutions allowed during demultiplexing\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=2\\n\"\n msg += \" --ensubst\\tenforce the nb of substitutions allowed\\n\"\n msg += \"\\t\\tcompulsory for step 2\\n\"\n msg += \"\\t\\tdefault=lenient/strict\\n\"\n msg += \" --adp\\tpath to the file containing the adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tsame format as FastQC: name<tab>sequence\\n\"\n msg += \"\\t\\tname: at least 'adpR1' (also 'adpR2' if paired-end)\\n\"\n msg += \"\\t\\tsequence: from 5' (left) to 3' (right)\\n\"\n msg += \" --errtol\\terror tolerance to find adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --minovl\\tminimum overlap length between reads and adapters\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=3 (in bases)\\n\"\n msg += \" --minrl\\tminimum length to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=35 (in bases)\\n\"\n msg += \" --minq\\tminimum quality to trim a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=20 (used for both reads if paired-end)\\n\"\n msg += \" --maxNp\\tmaximum percentage of N to keep a read\\n\"\n msg += \"\\t\\tcompulsory for step 3\\n\"\n msg += \"\\t\\tdefault=0.2\\n\"\n msg += \" --ref\\tpath to the prefix of files for the reference genome\\n\"\n msg += \"\\t\\tcompulsory for steps 4, 5, 6, 7, 8, 9\\n\"\n msg += \"\\t\\tshould correspond to the 'ref_genome' column in --samples\\n\"\n msg += \"\\t\\te.g. '/data/Atha_v2' for '/data/Atha_v2.fa', '/data/Atha_v2.bwt', etc\\n\"\n msg += \"\\t\\tthese files are produced via 'bwa index ...'\\n\"\n msg += \" --dict\\tpath to the 'dict' file (SAM header with @SQ tags)\\n\"\n msg += \"\\t\\tcompulsory for step 4\\n\"\n msg += \"\\t\\tsee 'CreateSequenceDictionary' in the Picard software\\n\"\n msg += \" --jgid\\tcohort identifier to use for joint genotyping\\n\"\n msg += \"\\t\\tcompulsory for steps 8, 9\\n\"\n msg += \"\\t\\tuseful to launch several, different cohorts in parallel\\n\"\n msg += \" --rat\\trestrict alleles to be of a particular allelicity\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdefault=ALL/BIALLELIC/MULTIALLELIC\\n\"\n msg += \"\\t\\tsee '--restrictAllelesTo' in GATK's SelectVariant\\n\"\n msg += \" --mdp\\tminimum value for DP (read depth; e.g. 10)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mgq\\tminimum value for GQ (genotype quality; e.g. 20)\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee GATK's VariantFiltration\\n\"\n msg += \" --mnfg\\tmaximum number of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mffg\\tmaximum fraction of filtered genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxFractionFilteredGenotypes' in GATK's SelectVariants\\n\"\n msg += \" --mnnc\\tmaximum number of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \" --mfnc\\tmaximum fraction of not-called genotypes to keep a variant\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tsee '--maxNOCALLfraction' in GATK's SelectVariants\\n\"\n msg += \" --fam\\tpath to the file containing pedigree information\\n\"\n msg += \"\\t\\tused in step 9\\n\"\n msg += \"\\t\\tdiscard variants with Mendelian violations (see Semler et al, 2012)\\n\"\n msg += \"\\t\\tshould be in the 'fam' format specified by PLINK\\n\"\n msg += \"\\t\\tvalidation strictness (GATK '-pedValidationType') is set at 'SILENT'\\n\"\n msg += \"\\t\\t allowing some samples to be absent from the pedigree\\n\"\n msg += \" --mvq\\tminimum GQ for each trio member to accept a variant as a Mendelian violation\\n\"\n msg += \"\\t\\tused in step 9 if '--fam' is specified\\n\"\n msg += \"\\t\\tdefault=0\\n\"\n msg += \" --xlssf\\tpath to the file with genotypes to exclude\\n\"\n msg += \"\\t\\tused in step 9 (can be especially useful if '--fam' is specified)\\n\"\n msg += \" --tmpd\\tpath to a temporary directory on child nodes (default=.)\\n\"\n msg += \"\\t\\te.g. it can be /tmp or /scratch\\n\"\n msg += \"\\t\\tused in step 4 for 'samtools sort'\\n\"\n msg += \"\\t\\tused in step 7 for 'GATK HaplotypeCaller'\\n\"\n msg += \" --jvmXms\\tinitial memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=512m (can also be specified as 1024k, 1g, etc)\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --jvmXmx\\tmaximum memory allocated to the Java Virtual Machine\\n\"\n msg += \"\\t\\tdefault=4g\\n\"\n msg += \"\\t\\tused in steps 4, 5, 6, 7 and 8 for Picard and GATK\\n\"\n msg += \" --queue2\\tname of the second cluster queue (default=bigmem.q)\\n\"\n msg += \"\\t\\tused in step 4 for Picard to collect insert sizes\\n\"\n msg += \" --knowni\\tpath to a VCF file with known indels (for local realignment)\\n\"\n msg += \" --known\\tpath to a VCF file with known variants (e.g. from dbSNP)\\n\"\n msg += \" --force\\tforce to re-run step(s)\\n\"\n msg += \"\\t\\tthis removes without warning the step directory if it exists\\n\"\n msg += \"\\n\"\n msg += \"Examples:\\n\"\n msg += \" %s --step 1 --samples samples.txt\\n\" % os.path.basename(sys.argv[0])\n msg += \"\\n\"\n msg += \"Details:\\n\"\n msg += \"This program aims at genotyping a set of 'genotypes' using data from\\n\"\n msg += \"a restriction-assisted DNA sequencing (RAD-seq) experiment, also known\\n\"\n msg += \"as a genotyping-by-sequencing (GBS) experiment.\\n\"\n msg += \"Here, by 'genotype', we mean the entity which is the focus of the\\n\"\n msg += \"study. For instance, it can be a plant variety (or a human being), or\\n\"\n msg += \"the specific clone of a given plant variety (or a specific tumor of a\\n\"\n msg += \"given human being), etc.\\n\"\n msg += \"Importantly, note that the content of the 'genotype' column will\\n\"\n msg += \"be used to set the 'SM' (sample) tag of the 'RG' (read group) header\\n\"\n msg += \"record type of the SAM format (see http://www.htslib.org/). However,\\n\"\n msg += \"internal to this program, the term 'sample' corresponds to the unique\\n\"\n msg += \"quadruplet (genotype,flowcell,lane,barcode) for steps 1 and 2, and to\\n\"\n msg += \"the unique triplet (genotype,flowcell,lane) for the others.\\n\"\n msg += \"Jobs are executed in parallel (--schdlr). Their return status is\\n\"\n msg += \"recorded in a SQLite database which is removed at the end. If a job\\n\"\n msg += \"fails, the whole script stops with an error.\\n\"\n msg += \"\\n\"\n msg += \"Dependencies:\\n\"\n msg += \"Python >= 2.7; Biopython; pyutilstimflutre >= 0.5\\n\"\n msg += \"\\n\"\n msg += \"Report bugs to <[email protected]>.\"\n print(msg); sys.stdout.flush()", "def usage():", "def usage():", "def cmd_help(args):", "def test_usage(self):\n # Make sure the usage message is shown when no arguments\n # are given and when the -h or --help option is given.\n for options in [], ['-h'], ['--help']:\n exit_code, output = run_cli(*options)\n assert \"Usage:\" in output", "def usage(self):\n\n # header\n self.usage_header()\n\n print _(\"\"\"Screen: %(screen)s\nDescription: %(description)s\n\nUsage: %(app_name)s %(screen)s [options]\"\"\") % {\n 'app_name': constants.App.NAME,\n 'screen': self.name,\n 'description': self.description,\n }\n # any additional info in between (see other classes for reference)\n self._usage_options_example()\n\n #footer\n self.usage_footer()", "def reports_cli():", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def explainerdashboard_cli(ctx):", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def help(cls, extra_args=None):\n if (_is_text_interface()):\n return _create_text_help_str(cls, cls._TEXT_USAGE)\n else:\n return cls._GRAPHICAL_USAGE", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def usage(progname):\n \n sys.stderr.write(\"Usage: \" +progname + \" [-cmnv] [-z score] \"\n \" <outdir>\\n\")\n sys.stderr.write(' -c class level not fold level evaluation\\n')\n sys.stderr.write(' -m read multiquery file on stdin\\n')\n sys.stderr.write(' -n negate scores (so that most -ve is best)\\n')\n sys.stderr.write(' -v verbose messages to stderr\\n')\n sys.stderr.write(' -z score : assign identifiers not present in the output a score of score\\n')\n sys.exit(1)", "def list_metrics(self):\n pass", "def measure(self,command_exe, command_args, measure_out):\n pass", "def help(update, context):\n update.message.reply_text(\"\"\"usage \n /bus <bus name> or /bus <bus name> <stop name>\n /addstop <stop name> <stop code>\n /delstop <stop name>\n /showstops\n /help\n \"\"\")\n\n # log info\n logger.info(\"help used username:{0}\".format(update.message.from_user.username))", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def help(self, *args):\n for _, v in self.useage.items():\n print v.__doc__", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def _usage_options_example(self):\n pass", "def ShowAllVouchers(cmd_args=[], cmd_options={}):\n iv_hash_table = kern.globals.ivht_bucket\n num_buckets = sizeof(kern.globals.ivht_bucket) / sizeof(kern.globals.ivht_bucket[0])\n print GetIPCVoucherSummary.header\n for i in range(num_buckets):\n for v in IterateQueue(iv_hash_table[i], 'ipc_voucher_t', 'iv_hash_link'):\n print GetIPCVoucherSummary(v)", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def __show_all_metrics(self):\n for obj in self.metrics_list:\n self.__print_metrics_info(obj.get_name())\n print()", "def usage(app_name):\n global version\n print '\\npython {0} -a MediaLive_ARN -n Dashboard_Name [Optional parameters]\\n'.format(app_name)\n print 'Version:', version\n print '\\nThis script creates a CloudWatch Dashboard for a MediaLive/MediaPackage workflow.'\n print \"It uses the MediaLive Channel Arn as input and determines the MediaPackage instances from the \"\n print \"MediaLive channel configuration. It then creates the CloudWatch Dashboard that contains info on the\"\n print \"MediaLive channel, the two MediaPackage channels, and all of the MediaPackage endpoints.\"\n print \"\\nRequired parameters:\"\n print \"-a, --arn: MediaLive Channel ARN\"\n print \"-n, --name: Name for the CloudWatch Dashboard. \"\n print \"\"\n print \"Optional parameters\"\n print \"-l, --list: Filename of a file that contains a list of MediaLive Channel ARNs, 1 ARN per line. \"\n print \" All MediaLive channels and their corresponding MediaPackage channels will be included in \"\n print \" the CloudWatch Dashboard.\"\n print \" Note: This parameter is ignored if a channel ARN is provided via the '-a/--arn' option\"\n print \" Note: All ARNs in the list must be for channels in the same region. All ARNs not in the same\"\n print \" region as the first ARN in the list will be ignored.\"\n print '-h, --help: Print this help and exit.'\n print \"\"\n print 'Examples:'\n print \"\"\n print 'Using MediaLive ARN arn:aws:medialive:us-west-2:0123456789:channel:123456 and create a CloudWatch ' \\\n 'Dashboard called \"My TV Dashboard\"'\n print 'python {0} -a arn:aws:medialive:us-west-2:0123456789:channel:123456 ' \\\n '-n \"My TV Dashboard\" '.format(app_name)\n print \"\"\n print 'Using the MediaLive Channel ARN list defined in the text file \"My EML arns.txt\" create a CloudWatch' \\\n 'Dashboard called \"Primary Bouquet\".'\n print 'python {0} -l \"My EML arns.txt\" -n \"Primary Bouquet\"\\n'.format(app_name)", "def help_analyze(self):\n print(ANALYZE)", "def show_usage():\n\n usage_screen = \"\\nUsage:\\n\" \\\n f\" {basename(argv[0])} <mock_1> [<mock_2> ...]\\n\" \\\n \"\\nOptions:\\n\" \\\n \" mock-departments Send HTTP requests to create some mock departments in the backend.\\n\" \\\n \" mock-employees Send HTTP requests to create some mock employees in the backend.\\n\" \\\n \" help Show this help page.\\n\" \\\n \"\" \\\n \" verbose Enables detailed request logging for the remaining options.\\n\"\n print(usage_screen)", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def collect_stats(xcnode, cmds):\n output = ''\n\n if not xcnode.client:\n print 'ssh session does not exist for {}'.format(xcnode.host)\n return output\n\n for cmd in cmds:\n stdin, stdout, stderr = xcnode.client.exec_command(cmd)\n out = stdout.read()\n outerr = stderr.read()\n xcnode.fd.write('{} run @ {}\\n'.format(cmd, datetime.now()))\n xcnode.fd.write('stdout:\\n============:\\n{}\\n'.format(out))\n if outerr:\n xcnode.fd.write('stderr\\n===========:\\n{}\\n'.format(outerr))\n output += out + '\\n'\n output += outerr + '\\n'\n xcnode.fd.flush()\n\n return output", "def main():\n parser = argparse.ArgumentParser(description='investigate code health and random statistics')\n sub_parsers = parser.add_subparsers(dest='command_name', title='Commands', help='', metavar='<command>')\n\n sub = sub_parsers.add_parser('line-count', help='list line counts')\n sub.add_argument('files', nargs='+', help='files or folders to look in')\n sub.add_argument('--each', type=int, default=1)\n sub.add_argument('--show', action='store_true')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_line_count)\n\n sub = sub_parsers.add_parser('include-list', help='list headers from files')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--print', dest='print_files', action='store_true')\n sub.add_argument('--print-stats', dest='print_stats', action='store_true')\n sub.add_argument('--print-max', dest='print_max', action='store_true')\n sub.add_argument('--no-list', dest='print_list', action='store_false')\n sub.add_argument('--count', default=2, type=int, help=\"only print includes that are more or equal to <count>\")\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.set_defaults(func=handle_list)\n\n sub = sub_parsers.add_parser('include-gv', help='generate a graphviz of the includes')\n cc.add_argument(sub)\n sub.add_argument('files', nargs='+')\n sub.add_argument('--limit', nargs='+', help=\"limit search to theese files and folders\")\n sub.add_argument('--group', action='store_true', help=\"group output\")\n sub.add_argument('--cluster', action='store_true', help=\"group output into clusters\")\n sub.set_defaults(func=handle_gv)\n\n sub = sub_parsers.add_parser('list-indents', help='list the files with the maximum indents')\n sub.add_argument('files', nargs='+')\n sub.add_argument('--each', type=int, default=1, help='group counts')\n sub.add_argument('--show', action='store_true', help='include files in list')\n sub.add_argument('--hist', action='store_true', help='show simple histogram')\n sub.add_argument('--include-empty', dest='discard_empty', action='store_false')\n sub.set_defaults(func=handle_list_indents)\n\n sub = sub_parsers.add_parser('missing-pragma-once', help='find headers with missing include guards')\n sub.add_argument('files', nargs='+')\n sub.set_defaults(func=handle_missing_include_guards)\n\n sub = sub_parsers.add_parser('missing-in-cmake', help='find files that existis on disk but missing in cmake')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_missing_in_cmake)\n\n sub = sub_parsers.add_parser('list-no-project-folders', help='find projects that have not set the solution folder')\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_list_no_project_folder)\n\n sub = sub_parsers.add_parser('check-files', help=\"find files that doesn't match the name style\")\n sub.add_argument('files', nargs='+')\n cc.add_argument(sub)\n sub.set_defaults(func=handle_check_files)\n\n args = parser.parse_args()\n if args.command_name is not None:\n args.func(args)\n else:\n parser.print_help()", "def usage():\n with open(USAGE, 'r') as f:\n for line in f:\n print(line)", "def main_CL():\r\n version=1.0\r\n st = time.time()\r\n parser = OptionParser(usage=usage(), version='%s'%version)\r\n parser.add_option(\"-n\", \"--days\", dest=\"days\", default=\"30\", help=\"Days ago, defaults to 30 days\")\r\n parser.add_option(\"-s\", \"--stream\", dest=\"stream\", default=\"all\", help=\"Code Stream, defaults to all\")\r\n parser.add_option(\"-u\", \"--usage\", dest=\"usage\", default=\"\", help=\"Show usage information\")\r\n parser.add_option(\"-d\", \"--debug\", dest='debug', action=\"count\", help=\"The debug level, use multiple to get more.\")\r\n (options, args) = parser.parse_args()\r\n\r\n if options.debug > 1:\r\n print ' days %s' %(options.days)\r\n print ' args: %s' %args\r\n else:\r\n options.debug = 0\r\n \r\n if options.usage:\r\n print usage()\r\n else:\r\n obj=ListCRs()\r\n obj.setUp()\r\n since = options.days \r\n \r\n #stream = str(stream).strip() \r\n obj.listCRsCL(since, options, st) \r\n \r\n print '\\nTook a total of %3.2f secs -^' %(time.time()-st)", "def cli_help(self):\n self._generate_cli_version()\n self._generate_cli_help()\n sys.exit(0)", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def metric_options(self):\n return Optimizer.list_method_options(self.metric_creator.method_dict)", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def do_stats(cs, args):\n stats_info = cs.containers.stats(args.container)\n utils.print_dict(stats_info)", "def get_all_metrics(self):\n up_time = self.uptime()\n down_time = self.downtime()\n customer_sla = self.sla()\n objective = self.slo()\n indicator = self.sli()\n avail_percentage = self.availability()\n mt_bf = self.mtbf(up_time)\n mt_tr = self.mttr(down_time)\n list_results = [up_time,down_time,customer_sla,objective,indicator,avail_percentage,mt_bf,mt_tr]\n return list_results", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def list_known_cup_metrics():\r\n return [metric.__name__ for metric in cup_metrics]", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def list_usage(progname, description, command_keys, command_helps, command_aliases):\n dvars = {'prog': progname}\n dvars.update(vars())\n result = []\n result.append(description % dvars)\n for key in command_keys:\n if key in command_aliases:\n alias = ' (%s)' % command_aliases[key]\n else:\n alias = ''\n if key is not None:\n result.append((\"%s%s\" % (key, alias)).ljust(10) + ' \\t' + command_helps[key])\n else:\n result.append('')\n return '\\n'.join(result)", "def about( cls, ):\n url = r\"http://www.opencircuits.com/Python_Smart_Terminal\"\n __, mem_msg = cls.show_process_memory( )\n msg = ( f\"{cls.controller.app_name} version:{cls.controller.version} \\nmode: {cls.parameters.mode}\"\n f\"\\n by Russ Hensel\"\n f\"\\nMemory in use {mem_msg} \\nCheck <Help> or \\n{url} \\nfor more info.\" )\n messagebox.showinfo( \"About\", msg )", "def showUsage():\n None", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def parsing_arguments(args=None):\n description = ''\n parser = argparse.ArgumentParser(\n prog='hatchet plot-cn',\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument('INPUT', help='One or more space-separated files in CN_BBC format')\n parser.add_argument(\n '-n',\n '--patientnames',\n required=False,\n default=config.plot_cn.patientnames,\n type=str,\n help='One or more space-separated patient names (default: inferred from filenames)',\n )\n parser.add_argument(\n '-u',\n '--minu',\n required=False,\n default=config.plot_cn.minu,\n type=float,\n help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"',\n )\n parser.add_argument(\n '-x',\n '--rundir',\n required=False,\n default=config.plot_cn.rundir,\n type=str,\n help='Running directory (default: current directory)',\n )\n parser.add_argument(\n '-b',\n '--baseCN',\n required=False,\n default=config.plot_cn.basecn,\n type=int,\n help='Base copy number (default: inferred from tumor ploidy)',\n )\n parser.add_argument(\n '-sC',\n '--figsizeclones',\n required=False,\n default=config.plot_cn.figsizeclones,\n type=str,\n help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sP',\n '--figsizecn',\n required=False,\n default=config.plot_cn.figsizecn,\n type=str,\n help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sG',\n '--figsizegrid',\n required=False,\n default=config.plot_cn.figsizegrid,\n type=str,\n help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-rC',\n '--resolutionclones',\n required=False,\n default=config.plot_cn.resolutionclones,\n type=int,\n help='Number of bins to merge together for plotting clone profiles (default: 100)\"',\n )\n parser.add_argument(\n '-rP',\n '--resolutioncn',\n required=False,\n default=config.plot_cn.resolutioncn,\n type=int,\n help='Number of bins to merge together for plotting proportions (default: 500)\"',\n )\n parser.add_argument(\n '-rG',\n '--resolutiongrid',\n required=False,\n default=config.plot_cn.resolutiongrid,\n type=int,\n help='Number of bins to merge together in grids (default: 100)\"',\n )\n parser.add_argument(\n '-e',\n '--threshold',\n required=False,\n default=config.plot_cn.threshold,\n type=float,\n help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"',\n )\n parser.add_argument(\n '--ymax',\n required=False,\n default=config.plot_cn.ymax,\n type=int,\n help='Maximum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--ymin',\n required=False,\n default=config.plot_cn.ymin,\n type=int,\n help='Minimum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--clonepalette',\n required=False,\n default=config.plot_cn.clonepalette,\n type=str,\n help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"',\n )\n parser.add_argument(\n '--linkage',\n required=False,\n default=config.plot_cn.linkage,\n type=str,\n help=(\n 'Linkage method used for clustering (default: single, available (single, complete, average, weighted, '\n 'centroid, median, ward) from SciPy)\"'\n ),\n )\n parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error('Please specify at least one sample as input!'))\n if args.patientnames is None:\n patientnames = {fil: os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f: n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error('Multiple patients have the same name but they should unique!'))\n if args.figsizeclones is not None:\n figsizeclones = to_tuple(args.figsizeclones, error_message='Wrong format of figsizeclones!')\n if args.figsizecn is not None:\n figsizecn = to_tuple(args.figsizecn, error_message='Wrong format of figsizecn!')\n if args.figsizegrid is not None:\n figsizegrid = to_tuple(args.figsizegrid, error_message='Wrong format of figsizegrid!')\n\n if not os.path.isdir(args.rundir):\n raise ValueError(error('Running directory does not exist!'))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error('The minimum proportion for subclonal CNAs must be in [0, 1]!'))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error('Base CN must be greater or equal than 2!'))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.threshold < 0:\n raise ValueError(error('Threshold must be positive!'))\n if args.linkage not in {\n 'single',\n 'complete',\n 'average',\n 'weighted',\n 'centroid',\n 'median',\n 'ward',\n }:\n raise ValueError(error('Unknown linkage method!'))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {\n 'input': args.INPUT.split(),\n 'names': patientnames,\n 'rundir': args.rundir,\n 'minu': args.minu,\n 'base': args.baseCN,\n 'clonefigsize': figsizeclones,\n 'propsfigsize': figsizecn,\n 'clusterfigsize': figsizegrid,\n 'profileres': args.resolutionclones,\n 'cnres': args.resolutioncn,\n 'clusterres': args.resolutiongrid,\n 'threshold': args.threshold,\n 'linkage': args.linkage,\n 'ymax': args.ymax,\n 'ymin': args.ymin,\n 'clonepalette': pal,\n }", "def show_all(self):\n cmodules.showModuleData(\n Options.Author,\n Options.Name,\n Options.Call,\n Options.Category,\n Options.Type,\n Options.Version,\n Options.Description,\n Options.License,\n Options.Datecreation,\n Options.Lastmodified\n )\n self.show_commands()\n self.show_opt()", "def collect_metrics(application):\n\n try:\n subprocess.check_call(['juju', 'collect-metrics', application])\n except subprocess.CalledProcessError as e:\n raise Exception(\"Unable to collect metrics: {}\".format(e))", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def display_help(self):\n pass", "def help(self):\n pass", "def help(self):\n pass", "def help():\n print(UI.HELP)", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)" ]
[ "0.6161586", "0.5805755", "0.5797474", "0.5729157", "0.563271", "0.55953175", "0.5529902", "0.55097723", "0.54637194", "0.54599464", "0.54326856", "0.5381607", "0.5343133", "0.5302566", "0.52546537", "0.52469516", "0.52173275", "0.5206837", "0.5200306", "0.51897854", "0.51727396", "0.51640743", "0.5158241", "0.51516634", "0.5144523", "0.5138758", "0.513784", "0.513784", "0.5126805", "0.5117838", "0.51126784", "0.5084329", "0.507002", "0.5039946", "0.50384605", "0.5037361", "0.50331336", "0.50312936", "0.5007026", "0.50038606", "0.5002192", "0.49883378", "0.49883378", "0.4986805", "0.49821955", "0.49809027", "0.49675718", "0.49643016", "0.4961833", "0.49612725", "0.49585804", "0.49431637", "0.4935399", "0.49195337", "0.4908134", "0.4907686", "0.4902067", "0.489451", "0.4884208", "0.4878332", "0.48761544", "0.48756397", "0.48698032", "0.486975", "0.48683614", "0.4856896", "0.4854707", "0.48489055", "0.48389983", "0.48241976", "0.4821812", "0.48212132", "0.48209876", "0.48206854", "0.48198757", "0.48195964", "0.4819038", "0.48087952", "0.48070392", "0.47979268", "0.47978592", "0.47960466", "0.47859916", "0.47835273", "0.4782812", "0.47728583", "0.47706094", "0.4769582", "0.47669828", "0.47597373", "0.47585702", "0.47567037", "0.47558102", "0.47542176", "0.4752028", "0.47457978", "0.4745686", "0.4745686", "0.4744196", "0.4743613" ]
0.54437673
10
Report usage metrics for the ports of network adapters of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_networkport(cmd_ctx, cpc, adapter, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def usage(self, host):", "def ShowPort(cmd_args=None, cmd_options={}):\n show_kmsgs = True\n if \"-K\" in cmd_options:\n show_kmsgs = False\n if not cmd_args:\n print \"Please specify the address of the port whose details you want to print\"\n print ShowPort.__doc__\n return\n port = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_port *')\n print PrintPortSummary.header\n PrintPortSummary(port, show_kmsgs)", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def port_desc_stats_reply_handler(self, ev):\n msg = ev.msg\n dpid = msg.datapath.id\n ofproto = msg.datapath.ofproto\n\n config_dict = {ofproto.OFPPC_PORT_DOWN: \"Down\",\n ofproto.OFPPC_NO_RECV: \"No Recv\",\n ofproto.OFPPC_NO_FWD: \"No Farward\",\n ofproto.OFPPC_NO_PACKET_IN: \"No Packet-in\"}\n\n state_dict = {ofproto.OFPPS_LINK_DOWN: \"Down\",\n ofproto.OFPPS_BLOCKED: \"Blocked\",\n ofproto.OFPPS_LIVE: \"Live\"}\n\n ports = []\n for p in ev.msg.body:\n ports.append('port_no=%d hw_addr=%s name=%s config=0x%08x '\n 'state=0x%08x curr=0x%08x advertised=0x%08x '\n 'supported=0x%08x peer=0x%08x curr_speed=%d '\n 'max_speed=%d' %\n (p.port_no, p.hw_addr,\n p.name, p.config,\n p.state, p.curr, p.advertised,\n p.supported, p.peer, p.curr_speed,\n p.max_speed))\n\n if p.config in config_dict:\n config = config_dict[p.config]\n else:\n config = \"up\"\n\n if p.state in state_dict:\n state = state_dict[p.state]\n else:\n state = \"up\"\n port_feature = (config, state, p.curr_speed)\n self.port_features[dpid][p.port_no] = port_feature", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def handleCmdLine(self):\n description = \"Nagios monitoring script to check for open ports\\n\"\n usage = (\"%prog <options>\\n\")\n parser = OptionParser(usage=usage, description=description)\n\n parser.add_option(\"-c\", \"--config\",\n type=\"string\",\n help=\"path to open ports configuration file\")\n parser.add_option(\"-l\", \"--list\",\n type=\"string\",\n help=\"supply list of allowed ports seperated by comma.\")\n\n (self.options, args) = parser.parse_args()", "def CountAllPorts(cmd_args=None, cmd_options={}):\n p_set = set()\n p_intransit = set()\n p_bytask = {}\n\n find_psets = False\n if \"-P\" in cmd_options:\n find_psets = True\n\n ## optionally include port sets\n ## DO recurse on busy ports\n ## DO log progress\n IterateAllPorts(None, CountPortsCallback, (p_set, p_intransit, p_bytask), find_psets, True, True)\n sys.stderr.write(\"{:120s}\\r\".format(' '))\n\n print \"Total ports found: {:d}\".format(len(p_set))\n print \"In Transit: {:d}\".format(len(p_intransit))\n print \"By Task:\"\n for pname in sorted(p_bytask.keys()):\n count = p_bytask[pname]\n print \"\\t{: <20s}: table={: <5d}, transit={: <5d}, other={: <5d}\".format(pname, count['table'], count['transit'], count['other'])\n return", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def test_shortopt(self):\n pp = ParlaiParser(False, False)\n pp.add_argument(\"-m\", \"--model\")\n pp.add_argument(\"-mtw\", \"--multitask-weights\")\n opt = pp.parse_args([\"-m\", \"memnn\"])\n print(opt)", "def usage(print_code_name=True):\n\tprint(\"*********************************************************************\")\n\tprint(\"* Scanner and Flooder Tool *\")\n\tprint(\"*********************************************************************\")\n\tprint()\n\tprint(\"ex, scan usage: scanner.py -s <target_host> <start_port> <end_port>\")\n\tprint(\"-h, -help\t- print out the description of usage\")\n\tprint(\"-s\t - scan a target host and a range of ports\\n\"\n\t\t\t\" Requires three args, <host> and <port start> and <port end>\")\n\tprint(\"-l - list the sets of ports found open for all hosts scanned\")\n\tprint(\"-pf - flood a target host with an ICMP PING flood.\\n\" \n\t\t\t\" Requires three args, <host> and <port start> and <port end>\")\n\tprint(\"-syn - flood a target host with an SYN ACK flood.\\n\"\n\t\t \" Requires two arguments: <host>, <ports> in format of 'p1,p2,p3,...,pn'. Has optional third argument, <amount> \")\n\tprint(\"-udp - DDOS a target host with UPD Packets.\\n\"\n\t\t \" Requires 3 arguments: <host>, <port>, <amount> (default =1)\")\n\tprint(\"-a - save hosts and open ports to a .txt file\")\n\tprint(\"-r - read in hosts and open ports from a .txt file\")\n\tprint()\n\tprint()\n\tprint(\"Examples: \")\n\tprint(\"-l\")\n\tprint(\"-s 192.168.0.1 0 500 # host, port range (space delimited)\")\n\tprint(\"-pf 192.168.0.1 100 # host, num of pings (optional, defaults to 1)\")\n\tprint(\"-syn 192.168.0.1 80,8080 100 # host, ports (comma delimited), and amount (optional)\")\n\tprint(\"-udp 192.168.0.1 80 100 # host, port, amount (optional, defaults to 1)\")", "def show_meraki_mx_ports(self, job_req):\n logger.info(\"Job Received : %s\", job_req)\n api_uri = f\"/v1/networks/{self.meraki_net}/appliance/ports\"\n data = get_meraki_api_data(api_uri)\n # Parse the JSON\n message = \"Here is the detail: \\n\"\n port_counter = 0\n check_icon = chr(0x2705)\n for mx_port in data:\n message += f\"* **{mx_port['number']}** | Port Mode: **{mx_port['type']}** | Vlan ID: **{mx_port['vlan']}** \\n\"\n port_counter += 1\n message += f\"{check_icon} Total: **{port_counter}** \\n\" \n return message", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def build_command(args, parser):\n cmd = \"ipmitool -I lanplus\"\n if not args.host:\n print \"\\nERROR: hostname is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -H ' + args.host\n if args.port:\n cmd += ' -p ' + args.port\n if not args.user:\n print \"\\nERROR: username is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -U ' + args.user\n if args.passwd:\n cmd += ' -P ' + args.passwd\n cmd += ' dcmi power reading'\n if args.interval:\n global INTERVAL\n INTERVAL = args.interval\n if args.nread:\n global NREAD\n NREAD = args.nread\n else:\n global INFINITY\n INFINITY = True\n if args.store:\n global STORE\n STORE = True\n return cmd", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def cli_options():\n\n parser = argparse.ArgumentParser(\n description='c[apirca]grep',\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument('-d', '--def', dest='defs',\n help='Network Definitions directory location. \\n',\n default='./def')\n\n # -i and -t can be used together, but not with any other option.\n ip_group = parser.add_argument_group()\n # take 1 or more IPs\n ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip,\n help='Return list of definitions containing the '\n 'IP(s).\\nMultiple IPs permitted.')\n\n ip_group.add_argument('-t', '--token', dest='token',\n help=('See if an IP is contained within the given '\n 'token.\\nMust be used in conjunction with '\n '-i/--ip [addr].'))\n\n exclusive_group = parser.add_mutually_exclusive_group()\n # the rest of the arguments are mutually exclusive with each other,\n # and -i / -t\n exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2,\n metavar=('OBJ', 'OBJ'),\n help=('Compare the two given network '\n 'definition tokens'))\n\n exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,\n type=is_valid_ip, metavar=('IP', 'IP'),\n help=('Diff the network objects to'\n ' which the given IP(s) belong'))\n\n exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',\n help=('Return list of IP(s) contained within '\n 'the given token(s)'))\n\n exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',\n help=('Return list of port(s) contained '\n 'within given token(s)'))\n\n exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,\n metavar=('PORT', 'PROTO'),\n help=('Returns a list of tokens containing '\n 'the given port and protocol'))\n\n return parser", "def help_usage(self):\n\t\thelptext = \"\"\"\nUSAGE\n==========\n1.) connect to server:\n\tWhen starting p22p, you dont automatically connect to a server.\n\tTo do this, use the 'connect'-command.\n\tWithout additional arguements, p22p will connect to {default}.\n\tIf you want to connect to a other server, use the following syntax:\n\t\tconnect PROTO://SERVER:PORT\n\twhere PROTO is either 'ws' or 'wss'. 'wss' is a SSL/TLS connection, ws a insecure connection.\n\tNote that the communication between to clients is always CBC-encrypted (additionaly to other encryption methods.)\n\tThe CBC-password will never be sent to the server.\n\tThe Server only receives a hash of the password.\n\n2.) join or create a Group\n\tp22p is using Group as Network-Namespaces.\n\tEach Groupmember has a unique CID. However, the CID is only unique in the Group and only unique during that clients connection.\n\tTo create a new Group, use the 'create'-command:\n\t\tcreate NAME PASSWORD [KEYFILE]\n\tThe server only receives a hash of the PASSWORD.\n\tNote that groupnames starting with a \"#\" are reserved (You cant create them except if you have the key).\n\tIf you want to create a reserved group, pass the path to the keyfile.\n\tWhen creating a Group, you will automatically join that Group.\n\t\n\tTo join a Group, use the 'join'-command:\n\t\tjoin NAME PSWD\n\tThe Server only reveives a hash of the Password.\n\n3.) relay a Port\n\tTo relay a port from your Device to a target device, use the 'relay'-command:\n\t\trelay PEER [LOCAL] REMOTE\n\tIf LOCAL is 0 or ommited, a free port is choosen.\n\tThis Command will create a socket listening to Port LOCAL on your DEVICE.\n\tOnce a connection is made to that Port, P22P will send a message to PEER, telling him to create a connection to Port REMOTE.\n\tAll data sent trough this connection will be encrypted with the Group's Password.\n\tThe Server only knows the hash of the password, meaning only Groupmembers know how to decrypt the Message.\n\tThe Server knows who should receive this message and sends it to only that Client.\n\n4.) Leaving a Group\n\tOnce you are finished, you can leave the Group.\n\tThis will close all connections to peers and free your CID.\n\tAll Groupmembers will receive a message that you left the Group.\n\tto leave a Group, use thr 'leave'-command.\n\n5.) Disconnecting\n\tIf you want to disconnect from the Server, use the 'disconnect'-command.\n\tThis will close all connections and also auto-leaves the Group (see 4.)\n\n6.) Exiting\n\tTo close this script, use the 'exit'-command.\n\tIf required, the 'disconnect'-command is invoked.\n\n7.) Additional commands\n\tTo get a list of all aviable commands, use the 'help'-command.\n\tTo get a description about a command, use the gollowing syntax:\n\t\thelp COMMAND\n\tHere are some useful commands:\n\t\tping PEER: pings a peer (not the Server.)\n\t\tlist: shows a list of all connections and relayed ports. also shows some information.\n\t\tcid: shows your current CID.\n\"\"\".format(default=DEFAULT_SERVER)\n\t\tself.stdout.write(helptext)", "def show_cdp(self):\n txt = \"\"\n for inf in self.interfaces:\n if self.interfaces[inf]['connect'] != ['none', 'none']:\n txt += \"%s interface %s connect to %s on interface %s\\n\"%(self.hostname, inf, self.interfaces[inf]['connect'][0], self.interfaces[inf]['connect'][1]) \n return txt", "def metrics_nic(cmd_ctx, cpc, partition, nic, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options))", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def help_opt(self):\n print(OPTIONS)", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def usage(exit_code):\n\n sys.stderr.write(\"\"\"\n List the processes that are listening to a port.\n Defaults to ZeroMQ port of 5570.\n\n Use by:\n listeningPort [--help] [--short | --pid | --proc] [--kill] \\\n <port0> [<port1> ...]\n e.g.:\n listeningPort 5570 # The ZeroMQ default port\n listeningPort 5570 5571 5572 # Multiple ports may be checked\n listeningPort --short 5570\n listeningPort $(seq 5570 5580) # Ports 5570 through 5580 inclusive.\n\n For the case of a free port, output similar to:\n Port 5571 : Nobody listening\n\n --help = this message\n\n Only one of the following can be supplied:\n --short = Output consists of only three space separated fields:\n <port> <pid of listener> <process name of listener>\n Ports with nobody listening gets ignored for output.\n --pid = Output consists only of a pid\n --proc = Output consists only of process names\n --kill = Any ports with a listener will be killed with \"kill -9 <pid>\"\n\n Return codes:\n 255 == Invalid command line.\n 0 == Nobody listening to <port>\n > 0 == The number of ports someone is listening to.\n For a series of port, this value is the number\n of ports with a listener.\n For a single port, this will be 1 is someone\n is listening.\n \\n\n ***NOTICE***: This routine does NOT work on OSX!\n Replace this with:\n lsof -i<port> | awk '{ print $2; }' | head -2\n PID\n 18101\n This prints only the pid of the process using this port.\n Now use \"ps\" to find the process:\n ps ax | grep 18191 | grep -v grep\n 10191 s001 S+ 0:00.00 /usr/bin/python /usr/local/bin/logCollector\n \"\"\")\n sys.exit(exit_code)", "def cmd_port(args):", "def set_options():\n help_f = \"\"\"%s\n \nUsage:\n %s -h <host> -p <port>\n \nOptions:\n -i, --help \n -h, --host=<host>\n -p, --port=<port>\n \n\"\"\" % (sys.argv[0], sys.argv[0])\n arguments = docopt(help_f)\n return arguments", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def main():\n options = docopt(__doc__)\n\n # In case the user asked for verbose logging, increase\n # the log level to debug.\n if options[\"--verbose\"] > 0:\n logging.basicConfig(level=logging.DEBUG)\n LOGGER.setLevel(logging.DEBUG)\n\n LOGGER.debug(\n \"Received options: %s\",\n options,\n )\n\n billing_account_id = _get_billing_account_id()\n member_accounts = _get_member_accounts(\n billing_account_id=billing_account_id,\n options=options,\n )\n _flush_out(accounts=member_accounts, options=options)\n\n return 0", "def parse_options():\n description = \"\"\"DDoS_Wall is designed to mitigate common types of DDoS attacks. It offers system\n monitoring and will enable TCP cookies if the system is under attack, this helps\n mitigate SYN flood attacks. It also provides protection against HTTP based attacks which it\n will automatically detect and the offending IP addresses will be blocked. ddos_wall must be run\n with root privileges\"\"\"\n parser = optparse.OptionParser(description=description)\n parser.add_option('-c', '--cpu_orange', default=0, help='orange threshold for CPU utilisation', metavar='<ARG>')\n parser.add_option('-C', '--cpu_red', default=0, help='red threshold for CPU utilisation', metavar='<ARG>')\n parser.add_option('-m', '--memory_orange', default=0, help='orange threshold for RAM usage', metavar='<ARG>')\n parser.add_option('-M', '--memory_red', default=0, help='red threshold for RAM usage', metavar='<ARG>')\n parser.add_option('-n', '--network_orange', default=0, help='orange threshold for Network usage', metavar='<ARG>')\n parser.add_option('-N', '--network_red', default=0, help='red threshold for Network usage', metavar='<ARG>')\n parser.add_option('-p', '--port', default=1234, help='port that proxy listens on', metavar='<ARG>')\n parser.add_option('-a', '--ip_address', help='MANDATORY - ip address of server', metavar='<ARG>')\n parser.add_option('-I', '--interface', default='eth0', help='the interface forwarding traffic', metavar='<ARG>')\n parser.add_option('-t', '--time', default=10, help='the number of minutes that threshold is calculated over',\n metavar='<ARG>')\n parser.add_option('-i', '--interval', default=10, help='the interval between polling the server', metavar='<ARG>')\n parser.add_option('-s', '--setup', action='store_true', default=False,\n help='setup DDoS_Wall')\n parser.add_option('-r', '--reset', action='store_true', default=False, help='resets DDoS_Wall')\n\n opts, args = parser.parse_args()\n\n # IP address must be supplied\n if opts.ip_address is None:\n print(\"Please supply an IP Address for the server e.g --ip_address 10.10.10.10\")\n exit(-1)\n\n options = dict()\n options['port'] = opts.port # port that proxy listens on\n options['ip_address'] = opts.ip_address # IP address of server\n options['interface'] = opts.interface # the network interface\n options['cpu_orange_threshold'] = float(opts.cpu_orange)\n options['cpu_red_threshold'] = float(opts.cpu_red)\n options['ram_orange_threshold'] = float(opts.memory_orange)\n options['ram_red_threshold'] = float(opts.memory_red)\n options['network_orange_threshold'] = float(opts.network_orange)\n options['network_red_threshold'] = float(opts.network_red)\n options['time_period'] = opts.time # how long in minutes the running average for the monitoring should be\n options['interval'] = opts.interval # length of tim in seconds between polling resource\n options['setup'] = opts.setup # If setup needs running\n options['reset'] = opts.reset # Reset DDoS_Wall\n\n return options", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def ShowMQueue(cmd_args=None, cmd_options={}):\n if not cmd_args:\n print \"Please specify the address of the ipc_mqueue whose details you want to print\"\n print ShowMQueue.__doc__\n return\n space = 0\n if \"-S\" in cmd_options:\n space = kern.GetValueFromAddress(cmd_options[\"-S\"], 'struct ipc_space *')\n mqueue = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_mqueue *')\n wq_type = mqueue.data.pset.setq.wqset_q.waitq_type\n if int(wq_type) == 3:\n psetoff = getfieldoffset('struct ipc_pset', 'ips_messages')\n pset = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(psetoff)\n print PrintPortSetSummary.header\n PrintPortSetSummary(kern.GetValueFromAddress(pset, 'struct ipc_pset *'), space)\n elif int(wq_type) == 2:\n portoff = getfieldoffset('struct ipc_port', 'ip_messages')\n port = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(portoff)\n print PrintPortSummary.header\n PrintPortSummary(kern.GetValueFromAddress(port, 'struct ipc_port *'))\n else:\n print \"Invalid mqueue? (waitq type {:d} is invalid)\".format(int(wq_type))", "def desc_stats_reply_handler(self, ev):\n body = ev.msg.body\n datapath = ev.msg.datapath\n dpid = datapath.id\n self.logger.info('event=DescStats Switch dpid=%s is mfr_desc=\"%s\" '\n 'hw_desc=\"%s\" sw_desc=\"%s\" serial_num=\"%s\" dp_desc=\"%s\"',\n dpid, body.mfr_desc, body.hw_desc, body.sw_desc,\n body.serial_num, body.dp_desc)", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -t, --transaction\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -f, --from-file <filename>\n --not-error-tolerant\n \"\"\"", "def main():\n arguments = docopt(__doc__)\n tail(arguments['--host'], int(arguments['--port']),\n arguments['--source'], arguments['--source-host'],\n arguments['--type'])", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def do_config():\n\n tracking = get_tracking()\n for unit in (\"ppm\", \"sec\"):\n\ttunit = unit\n\tif unit == \"sec\":\n\t tunit = \"seconds\"\n\tprint \"multigraph chrony_%s\" % unit\n\tprint \"graph_title NTP (Chrony) Statistics (%s)\" % unit\n\tprint \"graph_vlabel %s\" % unit\n\tprint \"graph_args --base 1000\"\n\tprint \"graph_category time\"\n\tprint \"graph_info NTP (Chrony) tracking statistics (the ones measured in %s)\" % tunit\n\tfor key in tracking[tunit]:\n\t item = tracking[tunit][key]\n\t print \"\"\"%s.label %s\n%s.draw LINE2\n%s.info %s\"\"\" % (key, item[\"label\"], key, key, item[\"label\"])\n\tprint\n return 0", "def dicom_cli():", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def main():\n import getopt\n\n try:\n options, remainder = getopt.getopt(\n sys.argv[1:], '',\n ['help', # Print usage msg, exit\n 'short', # Output is shortened\n 'pid', # Output only pid of listenig process\n 'proc', # Output only process name of listening port\n 'kill', # Kill the process give its port\n ]\n )\n except getopt.GetoptError as err:\n sys.stderr.write(str(err) + '\\n')\n usage(1)\n\n shortened = False\n pid_only = False\n proc_only = False\n kill = False\n for opt, arg in options:\n if opt in ['--help']:\n usage(0)\n elif opt in ['--short']:\n shortened = True\n elif opt in ['--pid']:\n pid_only = True\n elif opt in ['--proc']:\n proc_only = True\n elif opt in ['--kill']:\n kill = True\n else:\n # Should never happen. getopt() will catch this.\n sys.stderr.write('Unhandled option:\"%s\"\\n' % opt)\n usage(1)\n\n try:\n if len(remainder):\n for aport in remainder:\n int(aport) # Insist on a valid integer.\n else:\n remainder = []\n remainder.append(PORT)\n except ValueError as err:\n sys.stderr.write('port number must be all numeric:%s\\n' %\n str(remainder))\n return 255\n ret_code = 0\n for aport in remainder:\n status = listening(aport, shortened, pid_only, proc_only, kill)\n if status == 255:\n return 255 # Illegal option\n ret_code += status\n\n return ret_code", "def commandline_options():\n parser = argparse.ArgumentParser(\n description='ocn_diags_generator: CESM wrapper python program for Ocean Diagnostics packages.')\n\n parser.add_argument('--backtrace', action='store_true',\n help='show exception backtraces as extra debugging '\n 'output')\n\n parser.add_argument('--debug', action='store_true',\n help='extra debugging output')\n\n #parser.add_argument('--config', nargs=1, required=True, help='path to config file')\n\n options = parser.parse_args()\n return options", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def describe_cost_management_exports(self):\n return [{\"name\": self.export_name, \"container\": self.container, \"directory\": self.directory}]", "def main():\n\n ip_filename = arguments.ip_file.strip()\n\n # Set project directory to 'logs' unless an optional directory was given\n if arguments.project_dir:\n project = arguments.project_dir\n else:\n project = 'logs'\n\n if arguments.device_class:\n device_cls = arguments.device_class.strip()\n else:\n # Default device class for Netmiko\n device_cls = 'cisco_ios'\n\n ips = []\n ips = load_txt_file(ip_filename)\n\n total_devices = len(ips)\n # Track devices which fail login or pings\n missing_devices = []\n # Track devices which were successfully accessed\n devices_verified = 0\n\n # Create Directory for show output based on the Project Name\n path = os.path.join(\"./\", project.strip())\n # print path\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"Created directory: {path}\")\n\n # Create logfile for the discovery run in same directory as the resulting show commands\n # logfilename = project + \"-logfile.log\"\n # logfilename = os.path.join(path, logfilename)\n\n if total_devices > 1:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} devices! #####\"\n else:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} device! #####\"\n\n print(\"#\" * len(heading))\n print(heading)\n print(\"#\" * len(heading))\n\n print(f\"Device IP(s) in project {project}:\")\n for i in ips:\n print(f\"\\t{i}\")\n print(\"--------------------------\")\n print(f\"Total devices: {str(len(ips))}\")\n print(\"#\" * len(heading))\n print(\"\\n\")\n\n ## Default Credentials\n # Default list of credentials in format username, user password, enable password\n credentials = ['cisco, cisco, cisco']\n\n ## Load Credentials if -c or --creds option was used\n if arguments.creds:\n # Override default credentials as a new credential file with one or more sets of credentials was provided\n cred_filename = arguments.creds\n credentials = load_txt_file(cred_filename)\n\n ##### SHOW COMMANDS\n commands = []\n\n ## Load custom show commands if -c or --show option was used\n if arguments.show:\n # Override default list of show commands as a new file with one or more show commands was provided\n show_filename = arguments.show\n custom_showcmds = load_txt_file(show_filename)\n\n # first command to send is an end to get back to the main prompt\n commands = custom_showcmds\n\n else:\n # DEFAULT SHOW COMMANDS\n commands = [\"show version\",\n ]\n\n # if not arguments.pingonly:\n # print(\"Sending \" + str(len(commands)) + \" show commands:\")\n # for x in range(0, len(commands)):\n # print(\"\\t\" + commands[x])\n\n # For each IP in the ip address file, attempt to ping, attempt to log in, attempt to enter enable mode and\n # execute and save show command output\n for mgmt_ip in ips:\n\n login_success = False\n enable_success = False\n output = ''\n hostname = \"dev_\" + mgmt_ip\n\n # If Ping is successful attempt to log in and if that is successful attempt to enter enable mode and\n # execute list of show commands\n device_pings = ping_device(mgmt_ip)\n\n if device_pings:\n print(f\"Device {mgmt_ip} Responds to Pings!\\n\")\n\n # If the -i or --icmppingonly option was provided when the script was called, then only execute the ping code.\n if arguments.icmppingonly:\n # Keep a count of the devices that are pingable\n devices_verified += 1\n # Skip everything else as the icmp ping only option was given\n continue\n\n if len(credentials) > 1:\n print(\"**** Attempting multiple credentials to access device....\")\n\n try_telnet = False\n # Credential Loop\n for line in credentials:\n\n lineitem = line.split(',')\n uname = lineitem[0].strip()\n upwd = lineitem[1].strip()\n epwd = lineitem[2].strip()\n\n if not try_telnet:\n\n print(f\"\\t**** Attempting user credentials for {uname} with SSH.\")\n\n try:\n dev_conn = ConnectHandler(device_type=device_cls, ip=mgmt_ip, username=uname, password=upwd,\n secret=epwd)\n login_success = True\n\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n # continue\n\n except (EOFError, SSHException, NetMikoTimeoutException):\n print('\\tSSH is not enabled for this device.')\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed SSH')\n login_success = False\n try_telnet = True\n # continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n # continue\n\n if login_success:\n print(\"\\t**** SSH Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** SSH Login Failed!\")\n # continue\n\n # Try Telnet\n if try_telnet:\n print(\"\\t**** Attempting user credentials for \" + uname + \" with Telnet.\")\n\n try:\n dev_conn = ConnectHandler(device_type='cisco_ios_telnet', ip=mgmt_ip, username=uname,\n password=upwd,\n secret=epwd)\n login_success = True\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n continue\n\n if login_success:\n print(\"\\t**** Telnet Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** Telnet Login Failed!\")\n continue\n\n if login_success:\n # Check to see if login has resulted in enable mode (i.e. priv level 15)\n is_enabled = dev_conn.check_enable_mode()\n\n if not is_enabled:\n try:\n dev_conn.enable()\n enable_success = True\n except Exception as e:\n print(str(e))\n print(\"\\tCannot enter enter enable mode on device!\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'failed enable')\n enable_success = False\n continue\n else:\n print(\"\\tDevice already in enabled mode!\")\n enable_success = True\n\n if enable_success:\n\n for cmd in commands:\n output += dev_conn.send_command(cmd, strip_prompt=False, strip_command=False)\n dev_conn.exit_config_mode()\n dev_conn.disconnect()\n\n # output contains a stream of text vs individual lines\n # split into individual lies for further parsing\n # output_lines = re.split(r'[\\n\\r]+', output)\n\n # show_info = get_show_info(output_lines)\n #\n # if show_info['hostname']:\n # hostname = show_info.pop('hostname')\n\n # print(\"Information for device: \" + hostname)\n # for k, v in show_info.items():\n # print(\"\\t\" + k +\"\\t\\t-\\t\" + v)\n\n # Save output to file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n log_filename = hostname + \"-\" + timestr + \".txt\"\n log_filename = os.path.join(path, log_filename)\n\n log_file = open(log_filename, 'w')\n log_file.write(\"!#Output file for device \" + hostname + \"\\n\")\n log_file.write(\"!#Commands executed on \" + timestr + \"\\n\\r\")\n log_file.write(\"!\\n\")\n log_file.write(output)\n log_file.close()\n devices_verified += 1\n print(\"\\nOutput results saved in: \" + log_filename + \"\\n\\n\")\n\n\n else:\n # Device does not PING\n print(\"Device is unreachable\")\n missing_devices.append(mgmt_ip)\n\n # Totals Verification\n if arguments.icmppingonly:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of devices which responded to pings:\\t\" + str(devices_verified) + \"\\n\")\n else:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of show command output files:\\t\" + str(devices_verified) + \"\\n\")\n\n\n # Print Note on totals\n for note in info:\n print(note)", "def main():\n cli = DhcpClientCLI()\n\n parser = argparse.ArgumentParser(\n description='Management CLI for Mobility DHCP Client',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add sub commands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # List\n subparser = subparsers.add_parser(\n 'list_dhcp_records',\n help='Lists all records from Redis',\n )\n subparser.set_defaults(func=cli.list_all_record)\n\n # Add\n subparser = subparsers.add_parser(\n 'add_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument(\n 'mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"',\n type=str,\n )\n subparser.add_argument(\n 'ip', help='IP address, e.g. \"1.1.1.1\"',\n type=ip_address,\n )\n\n subparser.add_argument(\n 'state',\n help='DHCP protocol state 1 to 7, e.g. \"1\"',\n type=int,\n )\n subparser.add_argument(\n 'subnet',\n help='IP address subnet, e.g. \"1.1.1.0/24\"',\n type=ipaddress.ip_network,\n )\n\n subparser.add_argument('dhcp', help='DHCP IP address, e.g. \"1.1.1.100\"')\n subparser.add_argument('lease', help='Lease time in seconds, e.g. \"100\"')\n subparser.set_defaults(func=cli.add_record)\n\n # del\n subparser = subparsers.add_parser(\n 'del_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n subparser.set_defaults(func=cli.del_record)\n\n # set default gw\n subparser = subparsers.add_parser(\n 'set_default_gw',\n help='Set default GW',\n )\n subparser.add_argument('ip', help='IP address, e.g. \"1.1.1.1\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # set gw mac\n subparser = subparsers.add_parser(\n 'set_gw_mac',\n help='Set GW Mac address',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # Parse the args\n args = parser.parse_args()\n if not args.cmd:\n parser.print_usage()\n sys.exit(1)\n\n # Execute the sub-command function\n args.func(args)", "def test_get_host_configuration_metrics1(self):\n pass", "def _GenAppcommandsUsage(cmd, printer):\n # pylint: disable=too-many-arguments,unused-argument\n def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n \"\"\"A replacement for app.usage.\"\"\"\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)\n\n return Usage", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def explainerdashboard_cli(ctx):", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def config_pbc_md(self):\n\n self._config_md()\n self.title = \"PBC MD Simulation\"\n self.cntrl[\"cut\"] = 8.0\n self.cntrl[\"igb\"] = 0\n self.cntrl[\"iwrap\"] = 1\n self.cntrl[\"ntp\"] = 1\n self.cntrl[\"barostat\"] = 2", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def add_opts(self, optparser):\n optlist = [\n optparse.make_option(\n '-d', '--jnc-output',\n dest='directory',\n help='Generate output to DIRECTORY.'),\n optparse.make_option(\n '--jnc-package',\n dest='package',\n help='Root package name of generated sources'),\n optparse.make_option(\n '--jnc-help',\n dest='jnc_help',\n action='store_true',\n help='Print help on usage of the JNC plugin and exit'),\n optparse.make_option(\n '--jnc-serial',\n dest='serial',\n action='store_true',\n help='Turn off usage of multiple threads.'),\n optparse.make_option(\n '--jnc-verbose',\n dest='verbose',\n action='store_true',\n help='Verbose mode: Print detailed debug messages.'),\n optparse.make_option(\n '--jnc-debug',\n dest='debug',\n action='store_true',\n help='Print debug messages. Redundant if verbose mode is on.'),\n optparse.make_option(\n '--jnc-no-classes',\n dest='no_classes',\n action='store_true',\n help='Do not generate classes.'),\n optparse.make_option(\n '--jnc-no-schema',\n dest='no_schema',\n action='store_true',\n help='Do not generate schema.'),\n optparse.make_option(\n '--jnc-no-pkginfo',\n dest='no_pkginfo',\n action='store_true',\n help='Do not generate package-info files.'),\n optparse.make_option(\n '--jnc-ignore-errors',\n dest='ignore',\n action='store_true',\n help='Ignore errors from validation.'),\n optparse.make_option(\n '--jnc-import-on-demand',\n dest='import_on_demand',\n action='store_true',\n help='Use non explicit imports where possible.'),\n optparse.make_option(\n '--jnc-classpath-schema-loading',\n dest='classpath_schema_loading',\n help='Load schema files using classpath rather than location.')\n ]\n g = optparser.add_option_group('JNC output specific options')\n g.add_options(optlist)", "def print_usage_command(self):\n print self.get_usage_command()", "def print_usage_command(self):\n print self.get_usage_command()", "def help_help(self):\n print(\"List commands or print details about a command\")", "def help(self, args):\n print('No commands available for this consumer')", "async def view_stats(self, ctx):\n app_info = await self.bot.application_info()\n total_ram = (psutil.virtual_memory().total >> 30) + 1\n embed = discord.Embed(\n title=\"Bot Stats\",\n description=f\"Running on a dedicated server with {total_ram}GB RAM \\n provided by RandomGhost#0666.\",\n )\n\n embed.add_field(name=\"**__General Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Latency\", value=f\"{self.bot.latency*1000:.03f}ms\")\n embed.add_field(name=\"Guild Count\", value=f\"{len(self.bot.guilds):,}\")\n embed.add_field(name=\"User Count\", value=f\"{len(self.bot.users):,}\")\n\n embed.add_field(name=\"**__Technical Info__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"System CPU Usage\", value=f\"{psutil.cpu_percent():.02f}%\")\n embed.add_field(name=\"System RAM Usage\", value=f\"{psutil.virtual_memory().used/1048576:.02f} MB\")\n embed.add_field(name=\"System Uptime\", value=f\"{timedelta(seconds=int(time.time() - psutil.boot_time()))}\")\n embed.add_field(name=\"Bot CPU Usage\", value=f\"{process.cpu_percent():.02f}%\")\n embed.add_field(name=\"Bot RAM Usage\", value=f\"{process.memory_info().rss / 1048576:.02f} MB\")\n embed.add_field(name=\"Bot Uptime\", value=f\"{timedelta(seconds=int(time.time() - process.create_time()))}\")\n\n embed.add_field(name=\"**__Links__**\", inline=False, value=\"\\u200b\")\n embed.add_field(name=\"Support Server\", value=\"[https://discord.swaglyrics.dev](https://discord.swaglyrics.dev)\")\n embed.add_field(name=\"Invite\", value=\"[https://invite.swaglyrics.dev](https://invite.swaglyrics.dev)\")\n embed.add_field(\n name=\"Source\",\n value=\"[https://swaglyrics.dev/SwagLyrics-Discord-Bot]\" \"(https://swaglyrics.dev/SwagLyrics-discord-bot)\",\n )\n\n embed.set_footer(\n text=f\"Made by {app_info.owner} • {self.bot.get_user(512708394994368548)}\",\n icon_url=[\n app_info.owner.avatar_url_as(size=128),\n self.bot.get_user(512708394994368548).avatar_url_as(size=128),\n ][getrandbits(1)],\n ) # randomize clash or flabbet avatar\n\n await ctx.send(embed=embed)", "def diagnostics(self,\n *opts, # type: DiagnosticsOptions\n **kwargs # type: Dict[str, Any]\n ) -> DiagnosticsResult:\n\n return super().diagnostics(*opts, **kwargs)", "def parsing_arguments(args=None):\n description = ''\n parser = argparse.ArgumentParser(\n prog='hatchet plot-cn',\n description=description,\n formatter_class=argparse.RawTextHelpFormatter,\n )\n parser.add_argument('INPUT', help='One or more space-separated files in CN_BBC format')\n parser.add_argument(\n '-n',\n '--patientnames',\n required=False,\n default=config.plot_cn.patientnames,\n type=str,\n help='One or more space-separated patient names (default: inferred from filenames)',\n )\n parser.add_argument(\n '-u',\n '--minu',\n required=False,\n default=config.plot_cn.minu,\n type=float,\n help='Minimum proportion of a CNA to be considered subclonal (default: 0.2)\"',\n )\n parser.add_argument(\n '-x',\n '--rundir',\n required=False,\n default=config.plot_cn.rundir,\n type=str,\n help='Running directory (default: current directory)',\n )\n parser.add_argument(\n '-b',\n '--baseCN',\n required=False,\n default=config.plot_cn.basecn,\n type=int,\n help='Base copy number (default: inferred from tumor ploidy)',\n )\n parser.add_argument(\n '-sC',\n '--figsizeclones',\n required=False,\n default=config.plot_cn.figsizeclones,\n type=str,\n help='Size of clone plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sP',\n '--figsizecn',\n required=False,\n default=config.plot_cn.figsizecn,\n type=str,\n help='Size of CN plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-sG',\n '--figsizegrid',\n required=False,\n default=config.plot_cn.figsizegrid,\n type=str,\n help='Size of grid plots in the form \"(X-SIZE, Y-SIZE)\"',\n )\n parser.add_argument(\n '-rC',\n '--resolutionclones',\n required=False,\n default=config.plot_cn.resolutionclones,\n type=int,\n help='Number of bins to merge together for plotting clone profiles (default: 100)\"',\n )\n parser.add_argument(\n '-rP',\n '--resolutioncn',\n required=False,\n default=config.plot_cn.resolutioncn,\n type=int,\n help='Number of bins to merge together for plotting proportions (default: 500)\"',\n )\n parser.add_argument(\n '-rG',\n '--resolutiongrid',\n required=False,\n default=config.plot_cn.resolutiongrid,\n type=int,\n help='Number of bins to merge together in grids (default: 100)\"',\n )\n parser.add_argument(\n '-e',\n '--threshold',\n required=False,\n default=config.plot_cn.threshold,\n type=float,\n help='Threshold used to classify a tumor into either diploid or tetraploid (default: 3.0)\"',\n )\n parser.add_argument(\n '--ymax',\n required=False,\n default=config.plot_cn.ymax,\n type=int,\n help='Maximum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--ymin',\n required=False,\n default=config.plot_cn.ymin,\n type=int,\n help='Minimum values in y-axis (default: automatically inferred)\"',\n )\n parser.add_argument(\n '--clonepalette',\n required=False,\n default=config.plot_cn.clonepalette,\n type=str,\n help='Palette for coloring the clones among Set1, Set2, Set3, Paired (default: Set1)\"',\n )\n parser.add_argument(\n '--linkage',\n required=False,\n default=config.plot_cn.linkage,\n type=str,\n help=(\n 'Linkage method used for clustering (default: single, available (single, complete, average, weighted, '\n 'centroid, median, ward) from SciPy)\"'\n ),\n )\n parser.add_argument('-V', '--version', action='version', version=f'%(prog)s {__version__}')\n args = parser.parse_args(args)\n\n if len(args.INPUT.split()) == 0:\n raise ValueError(error('Please specify at least one sample as input!'))\n if args.patientnames is None:\n patientnames = {fil: os.path.basename(fil) for fil in args.INPUT.split()}\n else:\n patientnames = {f: n for f, n in zip(args.INPUT.split(), args.patientnames.split())}\n if len(args.INPUT.split()) != len(set(patientnames.values())):\n raise ValueError(error('Multiple patients have the same name but they should unique!'))\n if args.figsizeclones is not None:\n figsizeclones = to_tuple(args.figsizeclones, error_message='Wrong format of figsizeclones!')\n if args.figsizecn is not None:\n figsizecn = to_tuple(args.figsizecn, error_message='Wrong format of figsizecn!')\n if args.figsizegrid is not None:\n figsizegrid = to_tuple(args.figsizegrid, error_message='Wrong format of figsizegrid!')\n\n if not os.path.isdir(args.rundir):\n raise ValueError(error('Running directory does not exist!'))\n if not 0.0 <= args.minu <= 1.0:\n raise ValueError(error('The minimum proportion for subclonal CNAs must be in [0, 1]!'))\n if args.baseCN is not None and args.baseCN < 2:\n raise ValueError(error('Base CN must be greater or equal than 2!'))\n if args.resolutionclones is not None and args.resolutionclones < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutioncn is not None and args.resolutioncn < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.resolutiongrid is not None and args.resolutiongrid < 1:\n raise ValueError(error('Resolution must be greater than 1!'))\n if args.threshold < 0:\n raise ValueError(error('Threshold must be positive!'))\n if args.linkage not in {\n 'single',\n 'complete',\n 'average',\n 'weighted',\n 'centroid',\n 'median',\n 'ward',\n }:\n raise ValueError(error('Unknown linkage method!'))\n\n if args.clonepalette == 'Set1':\n pal = plt.cm.Set1\n elif args.clonepalette == 'Set2':\n pal = plt.cm.Set2\n elif args.clonepalette == 'Set3':\n pal = plt.cm.Set3\n elif args.clonepalette == 'Paired':\n pal = plt.cm.Paired\n else:\n raise ValueError(error('Unknown clone palette!'))\n\n return {\n 'input': args.INPUT.split(),\n 'names': patientnames,\n 'rundir': args.rundir,\n 'minu': args.minu,\n 'base': args.baseCN,\n 'clonefigsize': figsizeclones,\n 'propsfigsize': figsizecn,\n 'clusterfigsize': figsizegrid,\n 'profileres': args.resolutionclones,\n 'cnres': args.resolutioncn,\n 'clusterres': args.resolutiongrid,\n 'threshold': args.threshold,\n 'linkage': args.linkage,\n 'ymax': args.ymax,\n 'ymin': args.ymin,\n 'clonepalette': pal,\n }", "def main():\r\n\r\n try:\r\n argv = flags.parse_args(sys.argv)\r\n logging.setup(\"traffic\")\r\n except cfg.ConfigFilesNotFoundError:\r\n cfgfile = FLAGS.config_file[-1] if FLAGS.config_file else None\r\n if cfgfile and not os.access(cfgfile, os.R_OK):\r\n st = os.stat(cfgfile)\r\n print _(\"Could not read %s. Re-running with sudo\") % cfgfile\r\n try:\r\n os.execvp('sudo', ['sudo', '-u', '#%s' % st.st_uid] + sys.argv)\r\n except Exception:\r\n print _('sudo failed, continuing as if nothing happened')\r\n\r\n print _('Please re-run traffic-manage as root.')\r\n sys.exit(2)\r\n\r\n script_name = argv.pop(0)\r\n if len(argv) < 1:\r\n print (_(\"\\nOpenStack Traffic version: %(version)s (%(vcs)s)\\n\") %\r\n {'version': version.version_string(),\r\n 'vcs': version.version_string_with_vcs()})\r\n print script_name + \" category action [<args>]\"\r\n print _(\"Available categories:\")\r\n for k, _v in CATEGORIES:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n category = argv.pop(0)\r\n if category == \"bash-completion\":\r\n if len(argv) < 1:\r\n print \" \".join([k for (k, v) in CATEGORIES])\r\n else:\r\n query_category = argv.pop(0)\r\n matches = lazy_match(query_category, CATEGORIES)\r\n # instantiate the command group object\r\n category, fn = matches[0]\r\n command_object = fn()\r\n actions = methods_of(command_object)\r\n print \" \".join([k for (k, v) in actions])\r\n sys.exit(0)\r\n matches = lazy_match(category, CATEGORIES)\r\n # instantiate the command group object\r\n category, fn = matches[0]\r\n command_object = fn()\r\n actions = methods_of(command_object)\r\n if len(argv) < 1:\r\n if hasattr(command_object, '__call__'):\r\n action = ''\r\n fn = command_object.__call__\r\n else:\r\n print script_name + \" category action [<args>]\"\r\n print _(\"Available actions for %s category:\") % category\r\n for k, _v in actions:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n else:\r\n action = argv.pop(0)\r\n matches = lazy_match(action, actions)\r\n action, fn = matches[0]\r\n\r\n # For not decorated methods\r\n options = getattr(fn, 'options', [])\r\n\r\n usage = \"%%prog %s %s <args> [options]\" % (category, action)\r\n parser = optparse.OptionParser(usage=usage)\r\n for ar, kw in options:\r\n parser.add_option(*ar, **kw)\r\n (opts, fn_args) = parser.parse_args(argv)\r\n fn_kwargs = vars(opts)\r\n\r\n for k, v in fn_kwargs.items():\r\n if v is None:\r\n del fn_kwargs[k]\r\n elif isinstance(v, basestring):\r\n fn_kwargs[k] = v.decode('utf-8')\r\n else:\r\n fn_kwargs[k] = v\r\n\r\n fn_args = [arg.decode('utf-8') for arg in fn_args]\r\n\r\n # call the action with the remaining arguments\r\n try:\r\n fn(*fn_args, **fn_kwargs)\r\n rpc.cleanup()\r\n sys.exit(0)\r\n except TypeError:\r\n print _(\"Possible wrong number of arguments supplied\")\r\n print fn.__doc__\r\n parser.print_help()\r\n raise\r\n except Exception:\r\n print _(\"Command failed, please check log for more info\")\r\n raise", "def usage():\n\n if len(sys.argv) != 3:\n print \"Usage: ./dnsclient.py <DNS name/IP> <query type>\"\n exit(0)", "def parse_cmd_line_options():\n parser = argparse.ArgumentParser(prog='runner.py', description=\"Measure and view your internet speeds\")\n subparsers = parser.add_subparsers(help='help for subcommand', dest=\"command\")\n\n # create the parser for the \"run\" command\n run_parser = subparsers.add_parser('run', description=\"Measure internet speed periodically by setting frequency and duration.\",\n epilog=\"Both frequency and duration should be formatted as follows \\\n ----------- interger [sec|min|hour|day|] ex) 5 min\")\n run_parser.add_argument(\"-f\", \"--frequency\", nargs=2, required=True,\n help='How often should we run.')\n run_parser.add_argument(\"-d\", \"--duration\", nargs=2, default=[24, \"hour\"],\n help=\"How long should we run. (default=%(default)s)\")\n run_parser.add_argument(\"-resultfile\", default=\"speedresults.json\",\n help=\"Location where results shouls be saved (default=%(default)s)\")\n run_parser.add_argument(\"-configfile\")\n run_parser.add_argument(\"-pidfile\")\n\n # create the parser for the \"draw\" command\n draw_parser = subparsers.add_parser('draw', help='help for command_2')\n draw_parser.add_argument(\"-resultfile\", default=\"speedresults.json\", help=\"Choose results file to draw. (default=%(default)s)\")\n draw_parser.add_argument(\"-type\", default=\"pyplot\", choices=[\"pyplot\", \"plotly\"],\n help=\"The type of graph to display (default=%(default)s)\")\n draw_parser.add_argument(\"-filter\", nargs=2, help='Filter data on specific key value pairs')\n draw_parser.add_argument(\"-options\", default=\"download\", choices=[\"download\", \"upload\"],\n help='Graph upload or download speeds. (default=%(default)s)')\n return parser.parse_args()", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def main() -> None:\n # get argparse object and parse args\n parser = get_parser()\n args = parser.parse_args()\n\n # define the log format used for stdout depending on the requested loglevel\n if args.loglevel == \"DEBUG\":\n console_logformat = \"%(asctime)s %(levelname)s mobile_modem_exporter.%(funcName)s():%(lineno)i: %(message)s\"\n else:\n console_logformat = (\n \"%(asctime)s %(levelname)s mobile_modem_exporter %(message)s\"\n )\n\n # configure the log format used for console\n logging.basicConfig(\n level=getattr(logging, str(args.loglevel)),\n format=console_logformat,\n datefmt=\"%Y-%m-%d %H:%M:%S %z\",\n )\n\n logger.debug(\"Initialising serial ports and Prometheus objects...\")\n registry = CollectorRegistry()\n # mobile_modem_up\n up = Gauge(\n \"mobile_modem_up\",\n \"This metric is always 1 if the mobile_modem scrape worked, 0 if there was a problem getting info from one or more modems.\",\n registry=registry,\n )\n\n # mobile_modem_build_info\n build_info = Info(\n \"mobile_modem_build\",\n \"Information about the mobile_modem_exporter itself.\",\n registry=registry,\n )\n build_info.info(\n {\"version\": __version__, \"pipeserial_version\": PipeSerial.__version__}\n )\n\n # mobile_modem_info\n modem_info = Info(\n \"mobile_modem\",\n \"Information about the mobile modem being monitored, including device path, manufacturer, model, revision and serial number.\",\n [\"device\"],\n registry=registry,\n )\n\n # mobile_modem_atcsq_rssi\n modem_rssi = Gauge(\n \"mobile_modem_atcsq_rssi\",\n \"RSSI for the mobile modem as returned by AT+CSQ\",\n [\"device\"],\n registry=registry,\n )\n\n # mobile_modem_ber\n modem_ber = Gauge(\n \"mobile_modem_atcsq_ber\",\n \"BER for the mobile modem as returned by AT+CSQ\",\n [\"device\"],\n registry=registry,\n )\n\n # initialise pipeserial objects\n devices = []\n logger.info(\"Initialising serial ports...\")\n for device in args.SERIALDEVICE:\n logger.debug(f\"Opening serial port {device} and getting modem info...\")\n pipe = PipeSerial(serialport=device)\n pipe.open()\n devices.append(pipe)\n\n # get serial device info\n output = pipe.run(\"ATI\", [\"OK\"])\n manufacturer, model, revision = parse_ati(output)\n\n # get serial device serial number\n output = pipe.run(\"AT+GSN\", [\"OK\"])\n serial = parse_atgsn(output)\n\n # set mobile_modem_info for this device\n modem_info.labels(device=device).info(\n {\n \"manufacturer\": manufacturer,\n \"model\": model,\n \"revision\": revision,\n \"serial\": serial,\n }\n )\n\n # init done, start loop\n logger.info(\n f\"Entering main loop, writing metrics for modems {args.SERIALDEVICE} to {args.PROMPATH}, sleeping {args.sleep} seconds between runs...\"\n )\n while True:\n # start out optimistic!\n up.set(1)\n for device in devices:\n logger.debug(f\"Getting CSQ from device: {device.ser.name}\")\n output = device.run(\"AT+CSQ\", [\"OK\"])\n try:\n rssi, ber = parse_atcsq(output)\n except Exception:\n logger.exception(\"Got an exception while parsing AT+CSQ output\")\n # set up to 0 for this scrape\n up.set(0)\n continue\n logger.debug(f\"parsed AT+CSQ output to rssi {rssi} and BER {ber}\")\n modem_rssi.labels(device=device.ser.name).set(rssi)\n modem_ber.labels(device=device.ser.name).set(ber)\n\n # output metrics to textfile exporter path\n write_to_textfile(args.PROMPATH, registry)\n logger.debug(f\"Sleeping {args.sleep} seconds before next run...\")\n time.sleep(args.sleep)", "def measure(self,command_exe, command_args, measure_out):\n pass", "def usage():", "def usage():", "def reports_cli():", "def _memtop_setup_parser(parser):\n parser.add_argument('file', nargs=1, help='Python script to check for memory usage.')\n parser.add_argument('-o', default=None, action='store', dest='outfile',\n help='Name of output file. By default, output goes to stdout.')\n parser.add_argument('-l', '--limit', action='store', type=int, default=20, dest='limit',\n help='Limit the number of lines in the output.')", "def _usage_options_example(self):\n pass", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def do_list(self, cmd):\n\t\tif self.client.client is None:\n\t\t\tself.stdout.write(\"Error: Not connected!\\n\")\n\t\t\treturn\n\t\tif not self.ingroup:\n\t\t\tself.stdout.write(\"Error: Not in a group!\\n\")\n\t\t\treturn\n\t\tstats = self.client.list()\n\t\tself.stdout.write(\n\t\t\t\" Type |From Pid |From Port| To Pid | To Port | Recv | Send \\n\"\n\t\t\t)\n\t\tself.stdout.write(\"---------+\"*6+\"---------\\n\")\n\t\tfor l in stats:\n\t\t\ttext = (\n\t\t\t\t(\"{:>9}|\"*7)[:-1]\n\t\t\t\t).format(*l).replace(\"None\", \"----\")\n\t\t\tself.stdout.write(text+\"\\n\")\n\t\tself.stdout.write(\"\\n\")", "def server_stats():\n out = subprocess.check_output(cmd_preamble + [\"admin\", \"stats\"])\n return out.decode()", "def portstats64show(obj, content):\n global _portstats_to_api\n\n i, x, chassis_obj = 0, len('portstats64show'), obj.r_chassis_obj()\n while len(content) > i:\n\n # Get the port object\n buf = gen_util.remove_duplicate_char(content[i].replace('\\t', ' '), ' ')\n if len(buf) == 0:\n i += 1\n continue\n if len(buf) < x or buf[0:x] != 'portstats64show':\n break\n index = int(buf.split(' ')[1])\n port_obj = brcddb_port.port_obj_for_index(chassis_obj, int(buf.split(' ')[1]))\n if port_obj is None:\n brcdapi_log.exception('Could not find port matching: ' + buf, echo=False) # Just so it gets in the log\n raise Exception('Could not find port matching: ' + buf)\n port_stats_d = port_obj.r_get(brcdapi_util.stats_uri)\n if port_stats_d is None:\n port_stats_d = dict()\n port_obj.s_new_key(brcdapi_util.stats_uri, port_stats_d)\n\n # Parse the port statistics\n i += 1\n while len(content) > i and len(content[i]) > 0:\n buf = gen_util.remove_duplicate_char(content[i].replace('\\t', ' '), ' ')\n cl = buf.split(' ')\n key = _portstats_to_api.get(cl[0])\n if key is not None:\n if 'top_int :' in buf:\n i += 1\n lv = int(gen_util.remove_duplicate_char(content[i].replace('\\t', ' ').strip().split(' ')[0], ' '))\n v = int('{:x}'.format(int(cl[1])) + '{:08x}'.format(lv), 16)\n else:\n v = int(cl[1])\n port_stats_d.update({key: v})\n i += 1\n\n return i", "def process_meter_message(self, d):\n dpid = int(d.get(\"dpid\", 0))\n dp = self.dpset.get(dpid)\n if not dp:\n return \"Datapath does not exist!\"\n\n ofproto = dp.ofproto\n parser = dp.ofproto_parser\n\n command = {\n 'add': ofproto.OFPMC_ADD,\n 'mod': ofproto.OFPMC_MODIFY,\n 'del': ofproto.OFPMC_DELETE,\n }\n cmd = command.get(d[\"operation\"], ofproto.OFPMC_ADD)\n\n meter_id = d[\"meter_id\"]\n\n flags = 0\n bands = []\n if \"flags\" in d: # Ryu's format\n print(d['flags'])\n for f in d['flags']:\n flags += 0x01 if f == 'KBPS' else 0\n flags += 0x02 if f == 'PKTPS' else 0\n flags += 0x04 if f == 'BURST' else 0\n flags += 0x08 if f == 'STATS' else 0\n\n for band in d[\"bands\"]:\n if band['type'] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band['rate'],\n burst_size=band['burst_size'])]\n elif band['type'] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band['rate'],\n burst_size=band['burst_size'], prec_level=band['prec_level'])]\n\n else: # FlowManager's format\n flags += 0x01 if d['OFPMF_KBPS'] else 0\n flags += 0x02 if d['OFPMF_PKTPS'] else 0\n flags += 0x04 if d['OFPMF_BURST'] else 0\n flags += 0x08 if d['OFPMF_STATS'] else 0\n\n # Flags must have KBPS or PKTPS\n flags = flags if (flags & 0x03) else (flags | 0x01)\n\n for band in d[\"bands\"]:\n #mtype = type_convert.get(band[0])\n if band[0] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band[1],\n burst_size=band[2])]\n elif band[0] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band[1],\n burst_size=band[2], prec_level=band[3])]\n\n # TODO: catch some errors\n meter_mod = parser.OFPMeterMod(dp, cmd, flags, meter_id, bands)\n try:\n dp.send_msg(meter_mod)\n except KeyError as e:\n return e.__repr__()\n except Exception as e:\n return e.__repr__()\n\n return \"Message sent successfully.\"", "def test_get_host_configuration_metrics(self):\n pass", "def get_kong_node_usage_metrics(opts):\n\n url = \"{0}/status\".format(opts['base_url'])\n\n r = requests.get(url)\n try:\n r.raise_for_status()\n except requests.exceptions.RequestException as e:\n logging.debug(\"http response body - %s\", r.text)\n logging.error(\"An exception occurred: (%s)\", e)\n sys.exit(2)\n\n print r.text\n\n return True", "def main(docopt_args):\n\n # Notice, no checking for -h, or --help is written here.\n logger = logging.getLogger()\n logger.debug('Docopt Dictionary: %s', pp.pformat(args))\n # docopt will automagically check for it and use your usage string.\n\n if docopt_args['init']:\n get_switchports_d(docopt_args['<initcsv>'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE']\n )\n elif docopt_args['mark']:\n mark_switchports_final(docopt_args['<finalcsv>'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'])\n elif docopt_args['move']:\n move_interfaces( docopt_args['--RUNDIR'],\n docopt_args['--RUNSHEET'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'],\n docopt_args['<source>'],\n docopt_args['<destination>'])\n elif docopt_args['update']:\n update_switchports( docopt_args['<updatecsv>'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'],\n docopt_args['--UPDATEDIR'],\n docopt_args['--UPDATEFILE'])\n elif docopt_args['final']:\n finalize( docopt_args['--RUNDIR'],\n docopt_args['--RUNSHEET'],\n docopt_args['--CONFDIR'],\n docopt_args['--CONFILE'],\n docopt_args['<source>'],\n docopt_args['<destination>'])\n\n # load_switchports()", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def handle_program_options():\n parser = argparse.ArgumentParser(description=\"Gather numeric information \\\n about the processed sequence data in an \\\n MG-RAST project.\")\n parser.add_argument('project_id',\n help=\"The project identifier (MG-RAST ID)\")\n parser.add_argument('-a', '--auth_key',\n help=\"An MG-RAST API authorization key. This is \\\n necessary to access projects marked as private.\")\n parser.add_argument('-g', '--group_by', action='append',\n help=\"A string that matches some part of the \\\n 'Metagenome Name' field. All matching project \\\n metagenomes will be grouped by this identifier \\\n and their stats will be summed. This option can \\\n be specified multiple times to create multiple \\\n groups. All non-matching metagenomes will \\\n appear separately in the table. NOTE: \\\n Strings will be matched longest first. This \\\n allows for matching names that might be a \\\n substring of another match. For example: -g S \\\n -g NS. The name field will first be matched \\\n against the longest string (NS) first and then \\\n each smaller string in order.\")\n parser.add_argument('-o', '--output_filename', default='meta_stats.txt',\n help=\"The name of the file the project summary \\\n information will be written to.\")\n\n# parser.add_argument('-v', '--verbose', action='store_true')\n\n return parser.parse_args()" ]
[ "0.6124976", "0.59950477", "0.5756093", "0.5687688", "0.55233353", "0.5309227", "0.52750784", "0.5252932", "0.5208929", "0.5170246", "0.5106128", "0.50764817", "0.50519806", "0.50233275", "0.5003854", "0.4984393", "0.4930946", "0.49051604", "0.49042228", "0.49029616", "0.48779762", "0.48247278", "0.48189783", "0.4812931", "0.47986087", "0.47940293", "0.47751278", "0.477329", "0.4772547", "0.47691017", "0.47558922", "0.47550327", "0.4749837", "0.4744861", "0.4738837", "0.4735084", "0.47337285", "0.47245824", "0.47226498", "0.47204867", "0.47172242", "0.46990496", "0.46926317", "0.4684202", "0.4681644", "0.4681492", "0.4678017", "0.4674168", "0.4671851", "0.46612823", "0.46600017", "0.46588728", "0.46527466", "0.46512514", "0.4612259", "0.4610711", "0.46040756", "0.46030843", "0.4602735", "0.45915303", "0.4584523", "0.45812523", "0.4578998", "0.45739037", "0.45734364", "0.45677122", "0.4565681", "0.45537847", "0.45498973", "0.45497376", "0.45489854", "0.45477465", "0.45477465", "0.45433357", "0.45419917", "0.4540211", "0.45370603", "0.45355526", "0.4531167", "0.45256028", "0.4524551", "0.45241055", "0.45209587", "0.4517887", "0.4515977", "0.4515977", "0.4515809", "0.45142233", "0.45127538", "0.4507055", "0.4503768", "0.45036703", "0.44986698", "0.44867718", "0.4486672", "0.4486612", "0.4485056", "0.4483058", "0.44799778", "0.44766065" ]
0.70290184
0
Report usage metrics for the NICs of partitions of CPCs in DPM mode. In addition to the commandspecific options shown in this help text, the general options (see 'zhmc help') can also be specified right after the 'zhmc' command name.
def metrics_nic(cmd_ctx, cpc, partition, nic, **options): cmd_ctx.execute_cmd( lambda: cmd_metrics_nic(cmd_ctx, cpc, partition, nic, options))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def metrics_cpc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_cpc(cmd_ctx, cpc, options))", "def metrics_networkport(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_networkport(cmd_ctx, cpc, adapter, options))", "def metrics_channel(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_channel(cmd_ctx, cpc, options))", "def do_hostinfo(self, args):\n host = opts = None\n if args:\n args = args.split()\n host = args.pop()\n\n if not host:\n print('Usage: hostinfo [-cdmu] host_name_or_ip')\n print(' uptime and load stats returned if no options specified')\n return\n\n try:\n ip = socket.gethostbyname(host)\n except socket.gaierror:\n print('cannot resolve', host, file=sys.stderr)\n return\n\n opts = []\n while args:\n arg = args.pop(0)\n if arg.startswith('--'):\n if arg == '--cpu':\n opts.append('c')\n elif arg == '--disk':\n opts.append('d')\n elif arg == '--memory':\n opts.append('m')\n elif arg == '--uptime':\n opts.append('u')\n else:\n print('unrecognized option:', arg, file=sys.stderr)\n return\n else:\n if arg[0] == '-':\n for ch in arg[1:]:\n if ch in ('cdmu') and ch not in opts:\n opts.append(ch)\n else:\n print('unrecognized option:', ch, file=sys.stderr)\n return\n\n stats = self._qm.get_host_stats(ip)\n\n if not opts:\n # Get uptime and load averages.\n up = stats['uptime']\n load = stats['cpu_load']\n print('Up for %s days, %s hours, %s minutes, '\n 'load averages: %s, %s, %s'\n % (up['days'], up['hours'], up['minutes'], load['one'],\n load['five'], load['fifteen']))\n return\n\n all_stats = []\n for opt in opts:\n if opt == 'd':\n # Get disk usage.\n disks = stats['disk_usage']\n st = ['Disk Usage:']\n for mount, disk_info in disks.viewitems():\n st.append(' Usage for: %s' % mount)\n for k, v in disk_info.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'c':\n # Get CPU load.\n load_stats = stats['cpu_load']\n st = ['CPU Load Average:']\n st.append(' last one minute: %s' % load_stats['one'])\n st.append(' last five minutes: %s' % load_stats['five'])\n st.append(' last fifteen minutes: %s' % load_stats['fifteen'])\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'm':\n # Get Memory Usage.\n memory_usage = stats['memory_usage']\n st = ['Memory usage:']\n for k, v in memory_usage.viewitems():\n st.append(' %s: %s' % (k, v))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n elif opt == 'u':\n # Get uptime.\n up = stats['uptime']\n st = ['Uptime:']\n st.append(' Up for %s days, %s hours and %s minutes'\n % (up['days'], up['hours'], up['minutes']))\n all_stats.append('\\n'.join(st))\n all_stats.append('')\n\n print('\\n'.join(all_stats))", "def metrics_proc(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_proc(cmd_ctx, cpc, options))", "def main():\n\n args = parse_args()\n metric_sender = MetricSender(verbose=args.verbose, debug=args.debug)\n\n discovery_key_disk = 'disc.disk'\n interval = 3\n pcp_disk_dev_metrics = ['disk.dev.total', 'disk.dev.avactive']\n item_prototype_macro_disk = '#OSO_DISK'\n item_prototype_key_tps = 'disc.disk.tps'\n item_prototype_key_putil = 'disc.disk.putil'\n\n disk_metrics = pminfo.get_sampled_data(pcp_disk_dev_metrics, interval, 2)\n\n pcp_metrics_divided = {}\n for metric in pcp_disk_dev_metrics:\n pcp_metrics_divided[metric] = {k: v for k, v in disk_metrics.items() if metric in k}\n\n # do TPS checks; use disk.dev.total\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[0]],\n pcp_disk_dev_metrics[0] + '.')\n\n # Add dynamic items\n metric_sender.add_dynamic_metric(discovery_key_disk, item_prototype_macro_disk, filtered_disk_totals.keys())\n\n # calculate the TPS and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n disk_tps = (totals[1] - totals[0]) / interval\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_tps, disk): disk_tps})\n\n # do % Util checks; use disk.dev.avactive\n filtered_disk_totals = clean_up_metric_dict(pcp_metrics_divided[pcp_disk_dev_metrics[1]],\n pcp_disk_dev_metrics[1] + '.')\n\n # calculate the % Util and add them to the ZaggSender\n for disk, totals in filtered_disk_totals.iteritems():\n total_active = (float)(totals[1] - totals[0]) / 1000.0\n putil = 100 * total_active / interval\n\n metric_sender.add_metric({'%s[%s]' % (item_prototype_key_putil, disk): putil})\n\n metric_sender.send_metrics()", "def metrics_partition(cmd_ctx, cpc, partition, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_partition(cmd_ctx, cpc, partition, options))", "def metrics_adapter(cmd_ctx, cpc, adapter, **options):\n cmd_ctx.execute_cmd(\n lambda: cmd_metrics_adapter(cmd_ctx, cpc, adapter, options))", "def metrics_crypto(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_crypto(cmd_ctx, cpc, options))", "def print_help(self):\r\n\t\ttext = \"\\tName: ml_scikit_OPTICS\"\r\n\t\ttext += \"\\n\\t\\tThis machine learning plugin uses scikit-learn's OPTICS algorithm.\\n\"\r\n\t\ttext += \"\\n\\t\\tOptional Parameters:\"\r\n\t\ttext += \"\\n\\t\\t\\tOPTICS_skip_normalization: Do NOT perform normalization (scaling) of data, skip this step.\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_eps: Specify eps parameter (default is 1.0).\"\r\n\t\ttext += \"\\n\\t\\t\\OPTICS_min_samples: Specify min_samples parameter (default is 5).\"\r\n#\r\n# OPTICS (with memory complexity n) is an alternative to DBSCAN (with memory complexity n^2)\r\n# which has time complexity n^2 in general with the default max_eps = np.inf. \r\n# We will set max_eps = eps to reduce the run-time.\r\n#\r\n\t\treturn text", "def stat_cuda(msg: str) -> None:\n print(f'-- {msg:<35} allocated: %dM, max allocated: %dM, cached: %dM, max cached: %dM' % (\n torch.cuda.memory_allocated() / 1024 / 1024,\n torch.cuda.max_memory_allocated() / 1024 / 1024,\n torch.cuda.memory_cached() / 1024 / 1024,\n torch.cuda.max_memory_cached() / 1024 / 1024\n ))", "def _cmd_metrics(args):\n if (\n len(args.cnarrays) > 1\n and args.segments\n and len(args.segments) > 1\n and len(args.cnarrays) != len(args.segments)\n ):\n raise ValueError(\n \"Number of coverage/segment filenames given must be \"\n \"equal, if more than 1 segment file is given.\"\n )\n\n cnarrs = map(read_cna, args.cnarrays)\n if args.segments:\n args.segments = map(read_cna, args.segments)\n table = metrics.do_metrics(cnarrs, args.segments, args.drop_low_coverage)\n write_dataframe(args.output, table)", "def metrics_env(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_env(cmd_ctx, cpc, options))", "def show_help(argv=None):\n if argv:\n if \"list_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"list_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ls\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"search_datasets\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"search_datasets\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm search <keyword>\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"show_mounts\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"show_mounts\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm ps\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mount <dataset_name> [<mount_path>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"mmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"mmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm mmount <dataset_name> [<dataset_name> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"unmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"unmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm unmount <mount_id> [<cleanup_flag>]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"munmount\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"munmount\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm munmount <mount_id> [<mount_id> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n elif \"clean\" in argv:\n karr, _, desc = COMMANDS_TABLE[\"clean\"]\n sdm_util.print_message(\"command : %s\" % (\" | \".join(karr)))\n sdm_util.print_message(\"usage : sdm clean\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(desc)\n return 0\n else:\n sdm_util.print_message(\"Unrecognized command\")\n return 1\n else:\n sdm_util.print_message(\"command : sdm <COMMAND> [<COMMAND_SPECIFIC_ARGS> ...]\")\n sdm_util.print_message(\"\")\n sdm_util.print_message(\"Available Commands\")\n\n tbl = PrettyTable()\n tbl.field_names = [\"COMMAND\", \"DESCRIPTION\"]\n for cmd in COMMANDS:\n command, _, desc = cmd\n command_str = \" | \".join(command)\n tbl.add_row([command_str, desc])\n\n sdm_util.print_message(tbl)\n sdm_util.print_message(\"\")\n return 0", "def show(self):\n prev_queries = 0\n prev_cpu_sys = 0\n prev_cpu_user = 0\n \n lines = {\n \"Uptime (seconds)\": \"--\",\n \"Number of queries\": \"--\",\n \"Query per second\": \"--\",\n \"ACL drops\": \"--\",\n \"Dynamic drops\": \"--\",\n \"Rule drops\": \"--\",\n \"CPU Usage (%s)\": \"--\",\n \"Cache hitrate\": \"--\"\n }\n\n while True:\n try:\n # get stats from dnsdist\n stats = Statistics(console=self.console)\n global_stats = stats[\"global\"]\n \n qps = int(global_stats[\"queries\"]) - prev_queries\n prev_queries = int(global_stats[\"queries\"])\n cpu = (int(global_stats[\"cpu-sys-msec\"])+int(global_stats[\"cpu-user-msec\"]) - prev_cpu_sys - prev_cpu_user) / 10\n prev_cpu_sys = int(global_stats[\"cpu-sys-msec\"])\n prev_cpu_user = int(global_stats[\"cpu-user-msec\"])\n \n lines[\"Uptime (seconds)\"] = global_stats[\"uptime\"]\n lines[\"Number of queries\"] = global_stats[\"queries\"]\n lines[\"Query per second\"] = qps\n lines[\"CPU Usage (%s)\"] = cpu\n lines[\"ACL drops\"] = global_stats[\"acl-drops\"]\n lines[\"Rule drops\"] = global_stats[\"rule-drop\"]\n lines[\"Cache hitrate\"] = global_stats[\"cache-hits\"]\n lines[\"Dynamic drops\"] = global_stats[\"dyn-blocked\"]\n\n # reprint the lines \n sys.stdout.write(\"\\033[1mDashboard for dnsdist\\033[0m\\n\")\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Global:\\n\")\n for k,v in lines.items():\n sys.stdout.write(\"\\t%s: %s\\n\" % (k,v))\n sys.stdout.write(\"Backends:\\n\")\n for s in stats[\"backends\"]:\n if not len(s[\"name\"]):\n s[\"name\"] = \"--\"\n if not len(s[\"pools\"]):\n s[\"pools\"] = \"--\"\n sys.stdout.write(\"\\t#%s / %s / %s / %s\\n\" % (s[\"#\"],s[\"address\"],s[\"name\"],s[\"pools\"]) )\n sys.stdout.write(\"\\t\\tNumber of queries: %s\\n\" % s[\"queries\"])\n sys.stdout.write(\"\\t\\tQuery per second: %s\\n\" % s[\"qps\"])\n sys.stdout.write(\"\\t\\tNumber of drops: %s\\n\" % s[\"drops\"])\n sys.stdout.write(\"\\n\")\n sys.stdout.write(\"Ctrl+C to exit\\n\")\n \n time.sleep(1)\n \n \n # move up cursor and delete whole line\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for k,v in lines.items():\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n for s in stats[\"backends\"]:\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\") \n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n sys.stdout.write(\"\\x1b[1A\\x1b[2K\")\n \n del stats\n except KeyboardInterrupt:\n break", "def print_help():\n print \"\"\"\nMeasure Operating System Performance (mosp)\n-------------------------------------------\n\nUse this program to measure and report on operating system\nperformance.\n\nThis code measures operating system performance,\nincluding CPU, memory, disk and network, and\noutputs stats to screen and optionally to file\ntoo for use in performance analysis\n\nUses the psutil library\n\nInstall psutil (Ubuntu) if you don't already have it:\n sudo apt-get install python-dev\n sudo pip install psutil\n\nUsage:\n python mosp.py [options]\n\nExample usage:\n python mosp.py -W -i 2\n\nOptions:\n -h --help Display this help and exit\n -m --max-run-time Maximum time to run for before exiting\n (default is infinite)\n -i --interval Interval between requests in seconds\n (default is 1)\n -w --output-file Specify an output filename\n -W Output results to default filename\n default format is:\n mosp-HOSTNAME-YYYYMMDD-HHMMSS.csv\n -b --output-path Specify path to output file directory\n -j --no-header-row Suppress writing header row into CSV\n -v --version Output version information and exit\n\n \"\"\"\n return()", "def main(cls):\n parser = cls.make_argument_parser()\n args = parser.parse_args()\n args.device = make_hoomd_device(args)\n benchmark = cls(**vars(args))\n performance = benchmark.execute()\n\n if args.device.communicator.rank == 0:\n print(f'{numpy.mean(performance)}')", "async def stats(self, ctx):\n if ctx.invoked_subcommand is None:\n await send_cmd_help(ctx)", "def dicom_cli():", "def _display_cuda_devices():\n\n cuda_query_output = subprocess.run(\"nvidia-smi --query-gpu=gpu_uuid,gpu_name,compute_mode --format=csv\", shell=True, capture_output=True, text=True)\n # Check if command worked\n if cuda_query_output.returncode == 0:\n # Split by line jump and comma\n cuda_devices_list = [entry for entry in cuda_query_output.stdout.splitlines()]\n logger.debug(f\"CUDA devices available: {*cuda_devices_list,}\")\n # We only support \"Default\" and not \"Exclusive_Process\" for the compute mode\n if \"Default\" not in cuda_query_output.stdout:\n logger.warning(f\"GPU in 'Exclusive_Process' mode (or Prohibited), one context is allowed per device. This may prevent some openmmtools features from working. GPU must be in 'Default' compute mode\")\n # Handel the case where the command had some error\n else:\n logger.debug(f\"nvidia-smi command failed: {cuda_query_output.stderr}, this is expected if there is no GPU available\")", "def metrics_flash(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_metrics_flash(cmd_ctx, cpc, options))", "def main():\n parser = argparse.ArgumentParser(\n description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter\n )\n add_common_snmp_args(parser)\n parser.add_argument(\n \"-w\",\n \"--warning\",\n type=int,\n default=70,\n help=\"Warning memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-c\",\n \"--critical\",\n type=int,\n default=90,\n help=\"Critical memory usage percentage (0-100)\",\n )\n parser.add_argument(\n \"-f\",\n \"--family\",\n required=True,\n help=\"Switch family. Supported families: 1910, 1920, 1920S\",\n )\n\n config = vars(parser.parse_args())\n check_snmp_args(config)\n check_thresholds(config)\n\n dataset = {}\n\n if config[\"family\"] == \"1920S\":\n cpu = ObjectType(\n ObjectIdentity(\n \"HP-SWITCHING-MIB\", \"agentSwitchCpuProcessTotalUtilization\", 0\n )\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n cpu = ObjectType(\n ObjectIdentity(\"HH3C-ENTITY-EXT-MIB\", \"hh3cEntityExtCpuUsage\", 8)\n )\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n try:\n raw_data = get_snmp_data(config, cpu)\n except ValueError as err:\n unknown_exit(SERVICE, err)\n add_vars_to_dataset(dataset, raw_data)\n\n if config[\"family\"] == \"1920S\":\n dataset[\"cpu_usage\"] = get_hp_cpu_usage(\n dataset[\"agentSwitchCpuProcessTotalUtilization\"]\n )\n elif config[\"family\"] in [\"1910\", \"1920\"]:\n dataset[\"cpu_usage\"] = int(dataset[\"hh3cEntityExtCpuUsage\"])\n else:\n unknown_exit(SERVICE, f\"Switch family {config['family']} NOT known\")\n\n state, message = generate_output(config, dataset)\n report(state, message)", "def collect_cluster_info(output_dir, k8s_cli):\n collect_helper(output_dir, cmd=\"{} cluster-info\".format(k8s_cli),\n file_name=\"cluster_info\", resource_name=\"cluster-info\")", "def display_memcache_info(request):\n # pylint: disable-msg=E1101\n return utility.respond(request, 'admin/memcache_info',\n {'memcache_info': memcache.get_stats()})", "def do_stats(self, args):\n total_cpu = free_cpu = in_use_cpu = 0\n\n summary = self._qm.get_all_host_summary()\n for host_id, host_info in summary.viewitems():\n host_cpu = int(host_info['total cores'])\n total_cpu += host_cpu\n locked = host_info.get('locked by')\n if locked:\n # If host is locked then all CPUs are in use.\n in_use_cpu += host_cpu\n else:\n free_host_cpu = int(host_info['free cores'])\n in_use_cpu += (host_cpu - free_host_cpu)\n free_cpu += free_host_cpu\n\n print('total CPU: ', total_cpu)\n print('used/locked CPU: ', in_use_cpu)\n print('free CPU: ', free_cpu)\n capacity = float(in_use_cpu) / float(total_cpu)\n print('capacity used: %.1f%%' % (capacity * 100,))\n capacity = float(free_cpu) / float(total_cpu)\n print('capacity remaining: %.1f%%' % (capacity * 100,))", "def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)", "def printOptions(opts,subject_ids,session_ids,task_list, run_list, acq, rec):\n uname = os.popen('uname -s -n -r').read()\n print \"\\n\"\n print \"* Pipeline started at \"+time.strftime(\"%c\")+\"on \"+uname\n print \"* Command line is : \\n \"+str(sys.argv)+\"\\n\"\n print \"* The source directory is : \"+opts.sourceDir\n print \"* The target directory is : \"+opts.targetDir+\"\\n\"\n print \"* Data-set Subject ID(s) is/are : \"+str(', '.join(subject_ids))+\"\\n\"\n # print \"* PET conditions : \"+ ','.join(opts.condiList)+\"\\n\"\n print \"* Sessions : \", session_ids, \"\\n\"\n print \"* Tasks : \" , task_list , \"\\n\"\n print \"* Runs : \" , run_list , \"\\n\"\n print \"* Acquisition : \" , acq , \"\\n\"\n print \"* Reconstruction : \" , rec , \"\\n\"", "def phast_cmmd(self):\n temp = '{prog} -R {rho} -C {ecov} -E {elen} -N {chrom} -i MAF {maf} {model} > {wig}\\n'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def gather_info_and_display():\n # Obtain total rss displayed in memory.stat for each group,\n # container and service.\n try:\n output_mem = pipe_command(GREP_CMD, AWK_CMD, cwd=MEMPATH)\n LOG.debug(\n 'command: %s\\n%s',\n \"grep -rs total_rss '/sys/fs/cgroup/memory/' \"\n \"| awk '$2>0{print$0}' \",\n output_mem)\n except subprocess.CalledProcessError as error:\n LOG.error('Could not get total_rss memory, error=%s', error)\n return 1\n\n mem_info = get_meminfo()\n pt_groups = gather_groups_memory(output_mem)\n pt_cont = gather_containers_memory(output_mem)\n pt_serv = sys_service_memory()\n\n # Dump the tables out\n print('\\nPer groups memory usage:')\n\n # Get string to be printed and create list of elements separated by \\n\n list_of_table_lines = pt_groups.get_string().split('\\n')\n\n # Use the first line (+---+-- ...) as horizontal rule to insert later\n horizontal_line = list_of_table_lines[0]\n\n # Print the table, except last two lines ( \"Total\" row + final separator).\n print(\"\\n\".join(list_of_table_lines[:-2]))\n # Print separator, and finally the \"Total\" row.\n print(horizontal_line)\n print(\"\\n\".join(list_of_table_lines[-2:]))\n\n pt_namespc = prettytable.PrettyTable(\n ['Namespace',\n 'Resident Set Size (MiB)',\n ], caching=False)\n pt_namespc.align = 'l'\n pt_namespc.align['Resident Set Size (MiB)'] = 'r'\n\n print('\\nPer namespace memory usage:')\n for n_s in MEMORY['namespaces']:\n pt_namespc.add_row(\n [n_s,\n MEMORY['namespaces'][n_s],\n ])\n print(pt_namespc)\n\n print('\\nPer container memory usage:')\n print(pt_cont)\n\n print('\\nPer service memory usage:')\n print(pt_serv)\n\n base_mebib = 0.0\n k8s_system = 0.0\n k8s_addon = 0.0\n platform_memory_percent = 0.0\n\n # Calculate base memory usage (i.e., normal memory, exclude K8S and VMs)\n # e.g., docker, system.slice, user.slice\n for group in MEMORY['cgroups']:\n if group in BASE_GROUPS:\n base_mebib += float(MEMORY['cgroups'][group])\n\n # K8S platform system usage (essential) and addons usage (non-essential)\n for n_s in MEMORY['namespaces']:\n if n_s in K8S_NAMESPACE_SYSTEM:\n k8s_system += MEMORY['namespaces'][n_s]\n elif n_s in K8S_NAMESPACE_ADDON:\n k8s_addon += MEMORY['namespaces'][n_s]\n\n # Calculate platform memory usage\n platform_mebib = base_mebib + k8s_system\n\n anon_mebib = float(mem_to_mebibytes(\n mem_info['Active(anon)'] + mem_info['Inactive(anon)'])) * KBYTE\n avail_mebib = float(mem_to_mebibytes(\n mem_info['MemAvailable'])) * KBYTE\n total_mebib = float(anon_mebib + avail_mebib)\n\n anon_percent = py2_round(100 * anon_mebib / total_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n reserved_mebib = get_platform_reserved_memory()\n # Calculate platform memory in terms of percent reserved\n if reserved_mebib > 0.0:\n platform_memory_percent = py2_round(\n 100 * platform_mebib / reserved_mebib, DECIMAL_DIGITS) # pylint: disable=W1619\n\n pt_platf = prettytable.PrettyTable(\n ['Reserved',\n 'Platform',\n 'Base',\n 'K8s Platform system',\n 'k8s-addon'\n ], caching=False)\n pt_platf.align = 'l'\n\n pt_platf.add_row(\n [reserved_mebib,\n '{} ({}%)'.format(platform_mebib, platform_memory_percent),\n base_mebib,\n k8s_system,\n k8s_addon\n ])\n print('\\nPlatform memory usage in MiB:')\n print(pt_platf)\n\n pt_4k = prettytable.PrettyTable(\n ['Anon',\n 'Cgroup-rss',\n 'Available',\n 'Total'\n ], caching=False)\n pt_4k.align = 'l'\n\n pt_4k.add_row(\n ['{} ({}%)'.format(anon_mebib, anon_percent),\n MEMORY['cgroups']['total_rss'],\n avail_mebib,\n total_mebib\n ])\n\n print('\\n4K memory usage in MiB:')\n print(pt_4k)\n\n return 0", "def show_mem(cmd, cnt, args):\n if cpu is None:\n log(\"Load program first\") \n return\n elif len(cpu.memory) == 0:\n log(\"Load program first\") \n return \n chunk = 0\n chunk_count = len(cpu.memory)\n while chunk < chunk_count: \n chunk_start = cpu.memory[chunk][MEMADDR]\n chunk_end = chunk_start + cpu.memory[chunk][MEMSIZE] \n log(\"{:d} {:#x}..{:#x}\".format(chunk, chunk_start, chunk_end)) \n chunk += 1\n if machine == \"ARM\":\n if len(cpu.high_memory) != 0:\n log(\"High memory\")\n for addr in sorted(cpu.high_memory):\n log(\"{:#x}\".format(addr))", "def _cmd_segmetrics(args):\n if not 0.0 < args.alpha <= 1.0:\n raise RuntimeError(\"alpha must be between 0 and 1.\")\n\n if not any((args.location_stats, args.spread_stats, args.interval_stats)):\n logging.info(\"No stats specified\")\n return\n\n # Calculate all metrics\n cnarr = read_cna(args.cnarray)\n segarr = read_cna(args.segments)\n segarr = do_segmetrics(\n cnarr,\n segarr,\n args.location_stats,\n args.spread_stats,\n args.interval_stats,\n args.alpha,\n args.bootstrap,\n args.smooth_bootstrap,\n skip_low=args.drop_low_coverage,\n )\n tabio.write(segarr, args.output or segarr.sample_id + \".segmetrics.cns\")", "def ShowIPCSummary(cmd_args=None):\n print GetTaskIPCSummary.header\n ipc_table_size = 0\n for t in kern.tasks:\n (summary, table_size) = GetTaskIPCSummary(t)\n ipc_table_size += table_size\n print summary\n for t in kern.terminated_tasks:\n (summary, table_size) = GetTaskIPCSummary(t)\n ipc_table_size += table_size\n print \"Total Table size: {:d}\".format(ipc_table_size)\n return", "def qsub_cmmd(self):\n temp = 'qsub -l mem=1G,time=:5: -cwd -j y -o {log} {job}'.format(**self.dict)\n return temp.format(fnum=self.fnum)", "def main(argv=None):\n\n if argv is None:\n argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser(version=\"%prog version: $Id$\",\n usage=globals()[\"__doc__\"])\n\n parser.add_option(\"-t\", \"--test\", dest=\"test\", type=\"string\",\n help=\"supply help\")\n\n parser.add_option(\"--method\", dest=\"method\", type=\"choice\",\n choices=(\"metrics\", \"summary\", \"module_summary\"),\n help=\"method to summarise clustering\")\n\n parser.add_option(\"--ref-gtf-files\", dest=\"ref_gtf\", type=\"string\",\n help=\"comma separated list of reference gtf files\")\n\n # add common options (-h/--help, ...) and parse command line\n (options, args) = E.Start(parser, argv=argv)\n\n if options.method == \"metrics\":\n infile = argv[-1]\n E.info(\"loading input file: %s\" % infile)\n assert infile\n\n df = pd.read_table(infile,\n sep=\"\\t\",\n header=None,\n index_col=0)\n\n df = df.ix[:, :50]\n cluster_combs = (x for x in itertools.combinations(df.columns,\n 2))\n genes = df.index\n results_dict = {}\n all_clusts = {}\n\n E.info(\"setting up cluster containers\")\n for i in df.columns:\n clusters = set(df[i].values.tolist())\n cluster_dict = {}\n for clust in clusters:\n cluster_dict[clust] = []\n for gene in genes:\n cluster_dict[df[i][gene]].append(gene)\n\n for col in clusters:\n col_set = set()\n clust_col = cluster_dict[col]\n gene_members = itertools.combinations(clust_col,\n 2)\n col_set.update(gene_members)\n cluster_dict[col] = col_set\n all_clusts[i] = cluster_dict\n E.info(\"generating all pair-wise cluster comparisons\")\n E.info(\"calculating adjusted mutual information\")\n for k in cluster_combs:\n clusters1 = all_clusts[k[0]]\n clusters2 = all_clusts[k[1]]\n metric_dict = {}\n metric_dict['AMI'] = TS.adjustedMutualInformation(clusters1,\n clusters2)\n results_dict[k] = metric_dict\n\n res_frame = pd.DataFrame(results_dict).T\n res_frame = res_frame.reset_index()\n res_frame.drop(['level_0'], inplace=True, axis=1)\n res_frame.drop(['level_1'], inplace=True, axis=1)\n\n # flatten rand indices and add to output dataframe\n rand_arrays = TS.randIndexes(df)\n flat_adj_rand = TS.unravel_arrays(rand_arrays[0])\n flat_rand = TS.unravel_arrays(rand_arrays[1])\n res_frame['Rand_Index'] = flat_rand\n res_frame['Adjusted_Rand_Index'] = flat_adj_rand\n E.info(\"aggregating results\")\n\n res_frame.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"summary\":\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n file_dict = {}\n for fle in list_of_files:\n fname = fle.split(\"/\")[-1]\n condition = fname.split(\"-\")[0]\n ref = fname.split(\"-\")[1]\n df_ = pd.read_table(fle,\n sep=\"\\t\",\n header=0,\n index_col=0)\n df_.columns = ['gene_id', 'cluster']\n clust_dict = {}\n for idx in df_.index:\n cluster = df_.loc[idx]['cluster']\n gene = df_.loc[idx]['gene_id']\n try:\n clust_dict[cluster] += 1\n except KeyError:\n clust_dict[cluster] = 1\n med_size = np.median(clust_dict.values())\n file_dict[fname] = {'condition': condition,\n 'reference': ref,\n 'median_cluster_size': med_size}\n\n outframe = pd.DataFrame(file_dict).T\n outframe.to_csv(options.stdout,\n sep=\"\\t\",\n index_label='idx')\n\n elif options.method == \"module_summary\":\n # get lncRNA/gene lengths from reference gtfs\n ref_gtfs = options.ref_gtf.split(\",\")\n length_dict = {}\n for ref in ref_gtfs:\n oref = IOTools.openFile(ref, \"rb\")\n git = GTF.transcript_iterator(GTF.iterator(oref))\n for gene in git:\n for trans in gene:\n length = trans.end - trans.start\n try:\n length_dict[trans.gene_id] += length\n except KeyError:\n length_dict[trans.gene_id] = length\n oref.close()\n\n infiles = argv[-1]\n list_of_files = infiles.split(\",\")\n\n fdfs = []\n for fle in list_of_files:\n cond = fle.split(\"/\")[-1].split(\"-\")[0]\n refer = fle.split(\"/\")[-1].split(\"-\")[1]\n _df = pd.read_table(fle, sep=\"\\t\",\n header=0, index_col=0)\n _df.columns = ['gene_id', 'cluster']\n clusters = set(_df['cluster'])\n c_dict = {}\n # summarize over each cluster\n for clust in clusters:\n lengths = []\n c_df = _df[_df['cluster'] == clust]\n for lid in c_df['gene_id']:\n lengths.append(length_dict[lid])\n c_dict[clust] = {'cluster_size': len(c_df['gene_id']),\n 'mean_length': np.mean(lengths),\n 'index': (cond, refer),\n 'module': clust}\n cdf = pd.DataFrame(c_dict).T\n # use a multindex for hierarchical indexing\n midx = pd.MultiIndex.from_tuples(cdf['index'])\n cdf.index = midx\n cdf.drop(['index'], inplace=True, axis=1)\n fdfs.append(cdf)\n\n # generate a single output df\n s_df = fdfs[0]\n fdfs.pop(0)\n for df in fdfs:\n s_df = s_df.append(df)\n\n s_df.to_csv(options.stdout,\n index_label=(\"condition\", \"reference\"),\n sep=\"\\t\")\n\n # write footer and output benchmark information.\n E.Stop()", "def main():\n cli = DhcpClientCLI()\n\n parser = argparse.ArgumentParser(\n description='Management CLI for Mobility DHCP Client',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n )\n\n # Add sub commands\n subparsers = parser.add_subparsers(title='subcommands', dest='cmd')\n\n # List\n subparser = subparsers.add_parser(\n 'list_dhcp_records',\n help='Lists all records from Redis',\n )\n subparser.set_defaults(func=cli.list_all_record)\n\n # Add\n subparser = subparsers.add_parser(\n 'add_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument(\n 'mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"',\n type=str,\n )\n subparser.add_argument(\n 'ip', help='IP address, e.g. \"1.1.1.1\"',\n type=ip_address,\n )\n\n subparser.add_argument(\n 'state',\n help='DHCP protocol state 1 to 7, e.g. \"1\"',\n type=int,\n )\n subparser.add_argument(\n 'subnet',\n help='IP address subnet, e.g. \"1.1.1.0/24\"',\n type=ipaddress.ip_network,\n )\n\n subparser.add_argument('dhcp', help='DHCP IP address, e.g. \"1.1.1.100\"')\n subparser.add_argument('lease', help='Lease time in seconds, e.g. \"100\"')\n subparser.set_defaults(func=cli.add_record)\n\n # del\n subparser = subparsers.add_parser(\n 'del_rec',\n help='Add ip allocation record',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n subparser.set_defaults(func=cli.del_record)\n\n # set default gw\n subparser = subparsers.add_parser(\n 'set_default_gw',\n help='Set default GW',\n )\n subparser.add_argument('ip', help='IP address, e.g. \"1.1.1.1\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # set gw mac\n subparser = subparsers.add_parser(\n 'set_gw_mac',\n help='Set GW Mac address',\n )\n subparser.add_argument('mac', help='Mac address, e.g. \"8a:00:00:00:0b:11\"')\n\n subparser.set_defaults(func=cli.set_deafult_gw)\n\n # Parse the args\n args = parser.parse_args()\n if not args.cmd:\n parser.print_usage()\n sys.exit(1)\n\n # Execute the sub-command function\n args.func(args)", "def printHelp():\n print(\"amqWorkApiMass.py -n <msgcnt> -b <body> -m <headers> -s <path/to/bodyandheaders>\")", "def capacitygroup_show(cmd_ctx, cpc, capacitygroup):\n cmd_ctx.execute_cmd(\n lambda: cmd_capacitygroup_show(cmd_ctx, cpc, capacitygroup))", "def help(self, args):\n print('No commands available for this consumer')", "def usage(msgarg):\n if msgarg:\n sys.stderr.write(\"error: %s\\n\" % msgarg)\n print(\"\"\"\\\n usage: %s [options]\n\n options:\n -d increase debug msg verbosity level\n -c N emit N classes (def: 500) per instances\n -I N emit N instances\n\n \"\"\" % os.path.basename(sys.argv[0]))\n sys.exit(1)", "def main():\n\n ip_filename = arguments.ip_file.strip()\n\n # Set project directory to 'logs' unless an optional directory was given\n if arguments.project_dir:\n project = arguments.project_dir\n else:\n project = 'logs'\n\n if arguments.device_class:\n device_cls = arguments.device_class.strip()\n else:\n # Default device class for Netmiko\n device_cls = 'cisco_ios'\n\n ips = []\n ips = load_txt_file(ip_filename)\n\n total_devices = len(ips)\n # Track devices which fail login or pings\n missing_devices = []\n # Track devices which were successfully accessed\n devices_verified = 0\n\n # Create Directory for show output based on the Project Name\n path = os.path.join(\"./\", project.strip())\n # print path\n if not os.path.exists(path):\n os.makedirs(path)\n print(f\"Created directory: {path}\")\n\n # Create logfile for the discovery run in same directory as the resulting show commands\n # logfilename = project + \"-logfile.log\"\n # logfilename = os.path.join(path, logfilename)\n\n if total_devices > 1:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} devices! #####\"\n else:\n heading = f\"##### Executing show commands for discovery project {project} for {str(total_devices)} device! #####\"\n\n print(\"#\" * len(heading))\n print(heading)\n print(\"#\" * len(heading))\n\n print(f\"Device IP(s) in project {project}:\")\n for i in ips:\n print(f\"\\t{i}\")\n print(\"--------------------------\")\n print(f\"Total devices: {str(len(ips))}\")\n print(\"#\" * len(heading))\n print(\"\\n\")\n\n ## Default Credentials\n # Default list of credentials in format username, user password, enable password\n credentials = ['cisco, cisco, cisco']\n\n ## Load Credentials if -c or --creds option was used\n if arguments.creds:\n # Override default credentials as a new credential file with one or more sets of credentials was provided\n cred_filename = arguments.creds\n credentials = load_txt_file(cred_filename)\n\n ##### SHOW COMMANDS\n commands = []\n\n ## Load custom show commands if -c or --show option was used\n if arguments.show:\n # Override default list of show commands as a new file with one or more show commands was provided\n show_filename = arguments.show\n custom_showcmds = load_txt_file(show_filename)\n\n # first command to send is an end to get back to the main prompt\n commands = custom_showcmds\n\n else:\n # DEFAULT SHOW COMMANDS\n commands = [\"show version\",\n ]\n\n # if not arguments.pingonly:\n # print(\"Sending \" + str(len(commands)) + \" show commands:\")\n # for x in range(0, len(commands)):\n # print(\"\\t\" + commands[x])\n\n # For each IP in the ip address file, attempt to ping, attempt to log in, attempt to enter enable mode and\n # execute and save show command output\n for mgmt_ip in ips:\n\n login_success = False\n enable_success = False\n output = ''\n hostname = \"dev_\" + mgmt_ip\n\n # If Ping is successful attempt to log in and if that is successful attempt to enter enable mode and\n # execute list of show commands\n device_pings = ping_device(mgmt_ip)\n\n if device_pings:\n print(f\"Device {mgmt_ip} Responds to Pings!\\n\")\n\n # If the -i or --icmppingonly option was provided when the script was called, then only execute the ping code.\n if arguments.icmppingonly:\n # Keep a count of the devices that are pingable\n devices_verified += 1\n # Skip everything else as the icmp ping only option was given\n continue\n\n if len(credentials) > 1:\n print(\"**** Attempting multiple credentials to access device....\")\n\n try_telnet = False\n # Credential Loop\n for line in credentials:\n\n lineitem = line.split(',')\n uname = lineitem[0].strip()\n upwd = lineitem[1].strip()\n epwd = lineitem[2].strip()\n\n if not try_telnet:\n\n print(f\"\\t**** Attempting user credentials for {uname} with SSH.\")\n\n try:\n dev_conn = ConnectHandler(device_type=device_cls, ip=mgmt_ip, username=uname, password=upwd,\n secret=epwd)\n login_success = True\n\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n # continue\n\n except (EOFError, SSHException, NetMikoTimeoutException):\n print('\\tSSH is not enabled for this device.')\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed SSH')\n login_success = False\n try_telnet = True\n # continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n # continue\n\n if login_success:\n print(\"\\t**** SSH Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** SSH Login Failed!\")\n # continue\n\n # Try Telnet\n if try_telnet:\n print(\"\\t**** Attempting user credentials for \" + uname + \" with Telnet.\")\n\n try:\n dev_conn = ConnectHandler(device_type='cisco_ios_telnet', ip=mgmt_ip, username=uname,\n password=upwd,\n secret=epwd)\n login_success = True\n\n except NetMikoAuthenticationException:\n print(f\"\\tNetMikoAuthenticationException: Device failed SSH Authentication with username {uname}\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'Failed Authentication')\n login_success = False\n continue\n\n except Exception as e:\n print(\"\\tGeneral Exception: ERROR!:\" + str(sys.exc_info()[0]) + \"==>\" + str(sys.exc_info()[1]))\n print(str(e))\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'General Exception')\n login_success = False\n continue\n\n if login_success:\n print(\"\\t**** Telnet Login Succeeded! Will not attempt login with any other credentials.\")\n # Break out of credential loop\n break\n else:\n print(\"\\t**** Telnet Login Failed!\")\n continue\n\n if login_success:\n # Check to see if login has resulted in enable mode (i.e. priv level 15)\n is_enabled = dev_conn.check_enable_mode()\n\n if not is_enabled:\n try:\n dev_conn.enable()\n enable_success = True\n except Exception as e:\n print(str(e))\n print(\"\\tCannot enter enter enable mode on device!\")\n missing_devices = missing_device_log(missing_devices, mgmt_ip, 'failed enable')\n enable_success = False\n continue\n else:\n print(\"\\tDevice already in enabled mode!\")\n enable_success = True\n\n if enable_success:\n\n for cmd in commands:\n output += dev_conn.send_command(cmd, strip_prompt=False, strip_command=False)\n dev_conn.exit_config_mode()\n dev_conn.disconnect()\n\n # output contains a stream of text vs individual lines\n # split into individual lies for further parsing\n # output_lines = re.split(r'[\\n\\r]+', output)\n\n # show_info = get_show_info(output_lines)\n #\n # if show_info['hostname']:\n # hostname = show_info.pop('hostname')\n\n # print(\"Information for device: \" + hostname)\n # for k, v in show_info.items():\n # print(\"\\t\" + k +\"\\t\\t-\\t\" + v)\n\n # Save output to file\n timestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n log_filename = hostname + \"-\" + timestr + \".txt\"\n log_filename = os.path.join(path, log_filename)\n\n log_file = open(log_filename, 'w')\n log_file.write(\"!#Output file for device \" + hostname + \"\\n\")\n log_file.write(\"!#Commands executed on \" + timestr + \"\\n\\r\")\n log_file.write(\"!\\n\")\n log_file.write(output)\n log_file.close()\n devices_verified += 1\n print(\"\\nOutput results saved in: \" + log_filename + \"\\n\\n\")\n\n\n else:\n # Device does not PING\n print(\"Device is unreachable\")\n missing_devices.append(mgmt_ip)\n\n # Totals Verification\n if arguments.icmppingonly:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of devices which responded to pings:\\t\" + str(devices_verified) + \"\\n\")\n else:\n info = (\"Total number of devices in IP list:\\t\\t\" + str(total_devices) + \"\\n\",\n \"Total number of show command output files:\\t\" + str(devices_verified) + \"\\n\")\n\n\n # Print Note on totals\n for note in info:\n print(note)", "def _get_metrics_options(metrics):\n metrics_options = []\n if metrics is None:\n metrics = []\n for static_metric in metrics:\n metrics_options += [\n \"-m\",\n static_metric.metric.mp_metric_name,\n str(static_metric.value),\n ]\n return metrics_options", "def usage(self, host):", "def get_memory_info(ssh):\r\n cmd04='wmic memorychip get capacity'\r\n retry_number1=3\r\n try:\r\n while True:\r\n if retry_number1 == 0:\r\n logger.writeLog(\"get memory sum size fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd04)\r\n data04=stdout.read().decode().strip('Capacity')\r\n print(data04)\r\n if data04 == \"\":\r\n retry_number1 -= 1\r\n logger.writeLog(\"get memory sum size data null\",level='error')\r\n continue\r\n else:\r\n result_list=data04.split()\r\n print(result_list)\r\n memory_size=float(int(result_list[0])+int(result_list[1]))/1024/1024/1024\r\n print(\"mem total Gb: \",memory_size)\r\n logger.writeLog(\"get memory sum size success\",level='info')\r\n # return memory_size\r\n break\r\n except:\r\n logger.writeLog(\"get memory size error\",level='error')\r\n return None\r\n\r\n#6.内存剩余量/Gb\r\n# def get_memory_surplus(ssh):\r\n \"\"\"get memory surplus\"\"\"\r\n cmd05='wmic OS get FreePhysicalMemory'\r\n retry_number2=3\r\n try:\r\n while True:\r\n if retry_number2 == 0:\r\n logger.writeLog(\"get memory surplus fail\",level='error')\r\n break\r\n stdin,stdout,stderr=ssh.exec_command(cmd05)\r\n data05=int(stdout.read().decode().split()[1])\r\n print(data05)\r\n if data05 == \"\":\r\n logger.writeLog(\"get memory surplus data null\",level='error')\r\n retry_number2 -= 1\r\n continue\r\n else:\r\n memory_surplus=round(float(data05)/1024/1024,4)\r\n print(\"mem free Gb: \",memory_surplus)\r\n logger.writeLog(\"get memory surplus data success\",level='info')\r\n # return memory_surplus\r\n break\r\n except:\r\n logger.writeLog(\"get memory surplus error\",level='error')\r\n return None\r\n\r\n#7.内存使用率\r\n# def get_memory_ratio(ssh):\r\n \"\"\"get memory ratio\"\"\"\r\n # memory_size=get_memory_size(ssh)\r\n # memory_surplus=get_memory_surplus(ssh)\r\n if memory_size == \"\" or memory_surplus == \"\":\r\n logger.writeLog(\"memory_szie is null or memory_surplus is null\",level='error')\r\n return None\r\n else:\r\n try:\r\n data06=round(float((memory_size-memory_surplus))/memory_size,4)\r\n print(\"mem use ratio: \",data06)\r\n logger.writeLog(\"get memory ratio success\",level='info')\r\n return (memory_size,memory_surplus,data06)\r\n except:\r\n logger.writeLog(\"get memory ratio error\",level='error')\r\n return None", "def cli(ctx, host, device_id, api_key, inching, wait):\n if ctx.invoked_subcommand == \"discover\":\n return\n\n if host is None and device_id is None:\n logger.error(\"No host name or device_id given, see usage below:\")\n click.echo(ctx.get_help())\n sys.exit(1)\n\n ctx.obj = {\n \"host\": host,\n \"device_id\": device_id,\n \"api_key\": api_key,\n \"inching\": inching,\n \"wait\": wait,\n }", "def HelpCommand(self, unused_args, unused_sub_opts=None, unused_headers=None,\n unused_debug=None):\n self.OutputUsageAndExit()", "def measure(self,command_exe, command_args, measure_out):\n pass", "def main():\n\n parser = argparse.ArgumentParser(description=main.__doc__,\n formatter_class=argparse.ArgumentDefaultsHelpFormatter,\n epilog=\"Homepage: https://github.com/kbat/mc-tools\")\n parser.add_argument('com', type=str, help='plot requests file name', nargs='?', default=\"/tmp/foo.c\")\n parser.add_argument('comout', type=str, help='COMOUT file name', nargs='?', default=\"zoom\")\n\n args = parser.parse_args()\n\n cmd = {} # dictionary of commands\n\n bas = False\n plane = False\n \n with open(args.com) as f:\n for line in f.readlines():\n words = line.strip().split()\n if len(words) is 0:\n continue\n\n for i,w in enumerate(words):\n if re.search(\"^bas\", w):\n cmd['bas'] = list(map(float, words[i+1:i+7]))\n if plane is False: bas = True # basis was before plane cuts\n elif re.search(\"^or\", w):\n cmd['or'] = list(map(float, words[i+1:i+4]))\n elif re.search(\"^ex\", w):\n try: # both x and y scales are given\n cmd['ex'] = list(map(float, words[i+1:i+3]))\n continue\n except ValueError: # just 1 scale is given\n cmd['ex'] = list(map(float, words[i+1:i+2]))\n elif re.search(\"^lab\", w):\n cmd['label'] = list(map(int, map(float, words[i+1:i+3]))) #+ [words[i+3]]\n elif re.search(\"^p[xyz]\", w):\n cmd[w] = [float(words[i+1])]\n if bas is False: plane = True # plane cuts were before basis\n elif re.search(\"^legend\", w):\n cmd[w] = [words[i+1]]\n elif w == \"scale\":\n print(w)\n if int(words[i+1]): # no need to put 'scale 0'\n cmd[w] = [words[i+1]]\n elif w in (\"mesh\"):\n if int(words[i+1])==1: # no need to put 'mesh 1'\n cmd[w] = [words[i+1]]\n\n print(bas, plane)\n\n if plane: # bas was first\n keys = ('bas', 'or', 'ex', 'px', 'py', 'pz', 'label', 'mesh', 'legend', 'scale')\n elif bas:\n keys = ('or', 'ex', 'px', 'py', 'pz', 'bas', 'label', 'mesh', 'legend', 'scale')\n else:\n keys = {'or', 'ex', 'label', 'mesh', 'legend', 'scale'}\n \n with open(args.comout, 'w') as f:\n for key in keys:\n if key in cmd:\n # newline required by mcplot:\n if key in ('mesh', 'legend', 'scale', 'label'):\n f.write(\"\\n\")\n f.write(\"%s %s \" % (key,\" \".join(str(e) for e in cmd[key]),))\n f.write(\"\\n\")", "def options():\n print \"\"\"Options summary:\n -h, --help\n -u, --usage\n -v, --verbose <verb_level>\n -e, --endpoint <endpoint>\n -i, --interface-type <iface_type>\n -r, --recursive\n --dbs-conf <conf_file>\n --show-prod\n --show-caf\n --only-subscribed\n --only-custodial\n \"\"\"", "def fusion_generate_mmmc_script(x: hammer_vlsi.HammerTool) -> str:\n mmmc_output = [] # type: List[str]\n\n def append_mmmc(cmd: str) -> None:\n x.verbose_tcl_append(cmd, mmmc_output)\n\n # Create an Innovus constraint mode.\n constraint_mode = \"my_constraint_mode\"\n sdc_files = [] # type: List[str]\n\n # Generate constraints\n clock_constraints_fragment = os.path.join(x.run_dir, \"clock_constraints_fragment.sdc\")\n with open(clock_constraints_fragment, \"w\") as f:\n f.write(x.sdc_clock_constraints)\n sdc_files.append(clock_constraints_fragment)\n\n # Generate port constraints.\n pin_constraints_fragment = os.path.join(x.run_dir, \"pin_constraints_fragment.sdc\")\n with open(pin_constraints_fragment, \"w\") as f:\n f.write(x.sdc_pin_constraints)\n sdc_files.append(pin_constraints_fragment)\n\n # Add the post-synthesis SDC, if present.\n post_synth_sdc = x.post_synth_sdc\n if post_synth_sdc is not None:\n sdc_files.append(post_synth_sdc)\n\n # TODO: add floorplanning SDC\n if len(sdc_files) > 0:\n sdc_files_arg = \"-sdc_files [list {sdc_files}]\".format(\n sdc_files=\" \".join(sdc_files)\n )\n else:\n blank_sdc = os.path.join(x.run_dir, \"blank.sdc\")\n x.run_executable([\"touch\", blank_sdc])\n sdc_files_arg = \"-sdc_files {{ {} }}\".format(blank_sdc)\n append_mmmc(\"create_constraint_mode -name {name} {sdc_files_arg}\".format(\n name=constraint_mode,\n sdc_files_arg=sdc_files_arg\n ))\n\n corners = x.get_mmmc_corners() # type: List[MMMCCorner]\n # In parallel, create the delay corners\n if corners:\n setup_corner = corners[0] # type: MMMCCorner\n hold_corner = corners[0] # type: MMMCCorner\n pwr_corner = corners[0] # type: MMMCCorner\n # TODO(colins): handle more than one corner and do something with extra corners\n for corner in corners:\n if corner.type is MMMCCornerType.Setup:\n setup_corner = corner\n if corner.type is MMMCCornerType.Hold:\n hold_corner = corner\n if corner.type is MMMCCornerType.Extra:\n pwr_corner = corner\n\n # First, create Innovus library sets\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.setup_set\".format(n=setup_corner.name),\n list=x.get_timing_libs(setup_corner)\n ))\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.hold_set\".format(n=hold_corner.name),\n list=x.get_timing_libs(hold_corner)\n ))\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=\"{n}.pwr_set\".format(n=pwr_corner.name),\n list=x.get_timing_libs(pwr_corner)\n ))\n # Skip opconds for now\n # Next, create Innovus timing conditions\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.setup_cond\".format(n=setup_corner.name),\n list=\"{n}.setup_set\".format(n=setup_corner.name)\n ))\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.hold_cond\".format(n=hold_corner.name),\n list=\"{n}.hold_set\".format(n=hold_corner.name)\n ))\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=\"{n}.pwr_cond\".format(n=pwr_corner.name),\n list=\"{n}.pwr_set\".format(n=pwr_corner.name)\n ))\n # Next, create Innovus rc corners from qrc tech files\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.setup_rc\".format(n=setup_corner.name),\n tempInCelsius=str(setup_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(setup_corner)) if x.get_mmmc_qrc(setup_corner) != '' else ''\n ))\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.hold_rc\".format(n=hold_corner.name),\n tempInCelsius=str(hold_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(hold_corner)) if x.get_mmmc_qrc(hold_corner) != '' else ''\n ))\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=\"{n}.pwr_rc\".format(n=pwr_corner.name),\n tempInCelsius=str(pwr_corner.temp.value),\n qrc=\"-qrc_tech {}\".format(x.get_mmmc_qrc(pwr_corner)) if x.get_mmmc_qrc(pwr_corner) != '' else ''\n ))\n # Next, create an Innovus delay corner.\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.setup\".format(n=setup_corner.name)\n ))\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.hold\".format(n=hold_corner.name)\n ))\n append_mmmc(\n \"create_delay_corner -name {name}_delay -timing_condition {name}_cond -rc_corner {name}_rc\".format(\n name=\"{n}.pwr\".format(n=pwr_corner.name)\n ))\n # Next, create the analysis views\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.setup\".format(n=setup_corner.name), constraint=constraint_mode))\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.hold\".format(n=hold_corner.name), constraint=constraint_mode))\n append_mmmc(\"create_analysis_view -name {name}_view -delay_corner {name}_delay -constraint_mode {constraint}\".format(\n name=\"{n}.pwr\".format(n=pwr_corner.name), constraint=constraint_mode))\n # Finally, apply the analysis view.\n append_mmmc(\"set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }} -leakage {{ {pwr_view} }} -dynamic {{ {pwr_view} }}\".format(\n setup_view=\"{n}.setup_view\".format(n=setup_corner.name),\n hold_view=\"{n}.hold_view\".format(n=hold_corner.name),\n pwr_view=\"{n}.pwr_view\".format(n=pwr_corner.name)\n ))\n else:\n # First, create an Innovus library set.\n library_set_name = \"my_lib_set\"\n append_mmmc(\"create_library_set -name {name} -timing [list {list}]\".format(\n name=library_set_name,\n list=x.get_timing_libs()\n ))\n # Next, create an Innovus timing condition.\n timing_condition_name = \"my_timing_condition\"\n append_mmmc(\"create_timing_condition -name {name} -library_sets [list {list}]\".format(\n name=timing_condition_name,\n list=library_set_name\n ))\n # extra junk: -opcond ...\n rc_corner_name = \"rc_cond\"\n append_mmmc(\"create_rc_corner -name {name} -temperature {tempInCelsius} {qrc}\".format(\n name=rc_corner_name,\n tempInCelsius=120, # TODO: this should come from tech config\n qrc=\"-qrc_tech {}\".format(x.get_qrc_tech()) if x.get_qrc_tech() != '' else ''\n ))\n # Next, create an Innovus delay corner.\n delay_corner_name = \"my_delay_corner\"\n append_mmmc(\n \"create_delay_corner -name {name} -timing_condition {timing_cond} -rc_corner {rc}\".format(\n name=delay_corner_name,\n timing_cond=timing_condition_name,\n rc=rc_corner_name\n ))\n # extra junk: -rc_corner my_rc_corner_maybe_worst\n # Next, create an Innovus analysis view.\n analysis_view_name = \"my_view\"\n append_mmmc(\"create_analysis_view -name {name} -delay_corner {corner} -constraint_mode {constraint}\".format(\n name=analysis_view_name, corner=delay_corner_name, constraint=constraint_mode))\n # Finally, apply the analysis view.\n # TODO: introduce different views of setup/hold and true multi-corner\n append_mmmc(\"set_analysis_view -setup {{ {setup_view} }} -hold {{ {hold_view} }}\".format(\n setup_view=analysis_view_name,\n hold_view=analysis_view_name\n ))\n\n return \"\\n\".join(mmmc_output)", "def get_cluster_usage_info(cluster_id, kind, namespace_id=None, pods_list=None):\n if pods_list is None:\n pods_list = []\n else:\n logger.info('pod list not none')\n if pods_list == 'no_pod_resource':\n return {'cpu': 0,\n 'memory': 0}\n else:\n logger.info('resources no 0')\n # node usage stats if needed\n if kind == 'nodes':\n cpu_usage_info = client.CustomObjectsApi().list_cluster_custom_object('metrics.k8s.io', 'v1beta1', kind)\n cpu_usage_in_cores = sum([int(''.join(filter(\n str.isdigit, str(cpu_usage_item['usage']['cpu'].encode(\n 'utf-8'))))) for cpu_usage_item in cpu_usage_info['items']])\n cpu_usage_in_percentage = round(cpu_usage_in_cores / 10000000, 0)\n memory_usage = sum([unit_conversion(int(''.join(filter(\n str.isdigit, str(memory_usage_item['usage']['memory'].encode(\n 'utf-8'))))), ''.join(filter(str.isalpha, str(memory_usage_item['usage']['memory'].encode('utf-8')))))\n for memory_usage_item in cpu_usage_info['items']])\n # pods usage stats\n elif kind == 'pods':\n if namespace_id:\n cpu_usage_info = client.CustomObjectsApi().list_namespaced_custom_object('metrics.k8s.io', 'v1beta1',\n namespace_id, kind)\n else:\n cpu_usage_info = client.CustomObjectsApi().list_cluster_custom_object('metrics.k8s.io', 'v1beta1', kind)\n if len(pods_list) != 0:\n cpu_usage_in_cores = round(unit_conversion(sum([int(''.join(filter(\n str.isdigit, str(cpu_usage_item['containers'][0]['usage']['cpu'].encode(\n 'utf-8'))))) for cpu_usage_item in cpu_usage_info['items'] if cpu_usage_item['metadata']['name']\n in pods_list]), 'n'), 2)\n memory_usage = round(sum([unit_conversion(int(''.join(filter(\n str.isdigit, str(memory_usage_item['containers'][0]['usage']['memory'].encode(\n 'utf-8'))))),\n ''.join(\n filter(str.isalpha, str(memory_usage_item['containers'][0]['usage']['memory'].encode('utf-8')))))\n for memory_usage_item in cpu_usage_info['items'] if memory_usage_item['metadata']['name']\n in pods_list]), 2)\n else:\n cpu_usage_in_cores = round(unit_conversion(sum([int(''.join(filter(\n str.isdigit, str(cpu_usage_item['containers'][0]['usage']['cpu'].encode(\n 'utf-8'))))) for cpu_usage_item in cpu_usage_info['items']]), 'n'), 2)\n memory_usage = round(sum([unit_conversion(int(''.join(filter(\n str.isdigit, str(memory_usage_item['containers'][0]['usage']['memory'].encode(\n 'utf-8'))))),\n ''.join(filter(str.isalpha, str(memory_usage_item['containers'][0]['usage']['memory'].encode('utf-8')))))\n for memory_usage_item in cpu_usage_info['items']]), 2)\n return {'cpu': cpu_usage_in_cores,\n 'memory': memory_usage}", "def bdev_nvme_get_mdns_discovery_info(client):\n return client.call('bdev_nvme_get_mdns_discovery_info')", "def config_pbc_md(self):\n\n self._config_md()\n self.title = \"PBC MD Simulation\"\n self.cntrl[\"cut\"] = 8.0\n self.cntrl[\"igb\"] = 0\n self.cntrl[\"iwrap\"] = 1\n self.cntrl[\"ntp\"] = 1\n self.cntrl[\"barostat\"] = 2", "def Usage():\n print \"\"\"\n To plot the result using the iter number of the x axis:\n\n plot_sdcard.py -i /tmp/data.txt\n\n To plot the result using time for the x axis:\n\n plot_sdcard.py -t /tmp/data.txt\n\n To plot the result from the profiler:\n\n profile_sdcard.sh\n plot_sdcard.py -p\n\n \"\"\"\n sys.exit(2)", "def usage():\n \n print '-b <bench> the bench to show.'\n print '-c <config> the config to show (GPU, 8888, 565, etc).'\n print '-d <dir> a directory containing bench_r<revision>_<scalar> files.'\n print '-e <file> file containing expected bench values/ranges.'\n print ' Will raise exception if actual bench values are out of range.'\n print ' See bench_expectations.txt for data format and examples.'\n print '-f <revision>[:<revision>] the revisions to use for fitting.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-i <time> the time to ignore (w, c, g, etc).'\n print ' The flag is ignored when -t is set; otherwise we plot all the'\n print ' times except the one specified here.'\n print '-l <title> title to use for the output graph'\n print '-m <representation> representation of bench value.'\n print ' See _ListAlgorithm class in bench_util.py.'\n print '-o <path> path to which to write output; writes to stdout if not specified'\n print '-r <revision>[:<revision>] the revisions to show.'\n print ' Negative <revision> is taken as offset from most recent revision.'\n print '-s <setting>[=<value>] a setting to show (alpha, scalar, etc).'\n print '-t <time> the time to show (w, c, g, etc).'\n print '-x <int> the desired width of the svg.'\n print '-y <int> the desired height of the svg.'\n print '--default-setting <setting>[=<value>] setting for those without.'", "def usage(err=''):\r\n m = '%s\\n' %err\r\n m += 'Default usage is to list Cases closed for the 30 days\\n'\r\n m += '\\n Example:\\n'\r\n m += ' closedcases -n 90 \\n' \r\n m += ' \\n'\r\n# m += ' closedcases -n 60 -s blast5 \\n'\r\n return m", "def main(argv=None):\n parser = ArgParser(\n description=\"Calculate percentiled data over a given coordinate by \"\n \"collapsing that coordinate. Typically used to convert realization \"\n \"data into percentiled data, but may calculate over any \"\n \"dimension coordinate. Alternatively, calling this CLI with a dataset\"\n \" containing probabilities will convert those to percentiles using \"\n \"the ensemble copula coupling plugin. If no particular percentiles \"\n \"are given at which to calculate values and no 'number of percentiles'\"\n \" to calculate are specified, the following defaults will be used: \"\n \"[0, 5, 10, 20, 25, 30, 40, 50, 60, 70, 75, 80, 90, 95, 100]\")\n parser.add_argument(\"input_filepath\", metavar=\"INPUT_FILE\",\n help=\"A path to an input NetCDF file to be processed\")\n parser.add_argument(\"output_filepath\", metavar=\"OUTPUT_FILE\",\n help=\"The output path for the processed NetCDF\")\n parser.add_argument(\"--coordinates\", metavar=\"COORDINATES_TO_COLLAPSE\",\n nargs=\"+\",\n help=\"Coordinate or coordinates over which to collapse\"\n \" data and calculate percentiles; e.g. \"\n \"'realization' or 'latitude longitude'. This argument \"\n \"must be provided when collapsing a coordinate or \"\n \"coordinates to create percentiles, but is redundant \"\n \"when converting probabilities to percentiles and may \"\n \"be omitted. This coordinate(s) will be removed \"\n \"and replaced by a percentile coordinate.\")\n parser.add_argument('--ecc_bounds_warning', default=False,\n action='store_true',\n help='If True, where calculated percentiles are '\n 'outside the ECC bounds range, raise a warning '\n 'rather than an exception.')\n group = parser.add_mutually_exclusive_group(required=False)\n group.add_argument(\"--percentiles\", metavar=\"PERCENTILES\",\n nargs=\"+\", default=None, type=float,\n help=\"Optional definition of percentiles at which to \"\n \"calculate data, e.g. --percentiles 0 33.3 66.6 100\")\n group.add_argument('--no-of-percentiles', default=None, type=int,\n metavar='NUMBER_OF_PERCENTILES',\n help=\"Optional definition of the number of percentiles \"\n \"to be generated, these distributed regularly with the \"\n \"aim of dividing into blocks of equal probability.\")\n\n args = parser.parse_args(args=argv)\n\n # Load Cube\n cube = load_cube(args.input_filepath)\n\n # Process Cube\n result = process(cube, args.coordinates, args.ecc_bounds_warning,\n args.percentiles, args.no_of_percentiles)\n\n # Save Cube\n save_netcdf(result, args.output_filepath)", "def cmd_help(self, commands=None, usage=False):\n if commands:\n usage = True\n commands = {self.approx.decmd(c.lower()) for c in commands}\n rejects = commands - self.approx.keys()\n for reject in rejects:\n self.put_pretty(\"No command named %r\" % reject)\n continue\n commands -= rejects\n if self.debug:\n assert not any(self.approx.encmd(r) in self.mod_commands for\n r in rejects)\n assert all(self.approx.encmd(c) in self.mod_commands for\n c in commands)\n if not commands:\n return\n requested = zip(commands, (self.approx[c] for c in commands))\n else:\n requested = self.approx.items()\n help = znc.CTable()\n help.AddColumn(\"Command\")\n help.AddColumn(\"Usage\" if usage else \"Description\")\n from itertools import zip_longest\n #\n for command, parser in requested:\n if usage:\n upre = \"usage: %s\" % command\n rest = (parser.format_usage()\n .replace(upre, \"\", 1)\n .replace(\"[-h] \", \"\", 1))\n desc = [l.strip() for l in rest.split(\"\\n\") if l.strip()]\n else:\n desc = [parser.description]\n for line, comm in zip_longest(desc, (command,), fillvalue=\"\"):\n help.AddRow()\n help.SetCell(\"Command\", comm)\n help.SetCell(\"Usage\" if usage else \"Description\", line)\n #\n s_line = znc.String()\n strung = []\n while help.GetLine(len(strung), s_line):\n strung.append(s_line.s)\n also = \" (<command> [-h] for details)\"\n strung[1] = strung[1].replace(len(also) * \" \", also, 1)\n self.put_pretty(\"\\n\".join(strung))", "def DoHelp(options, args):\n __pychecker__ = 'unusednames=options'\n if len(args) == 1 and args[0] in COMMAND_USAGE_TEXT:\n print(COMMAND_USAGE_TEXT[args[0]])\n else:\n raise gclient_utils.Error(\"unknown subcommand '%s'; see 'gclient help'\" %\n args[0])", "def main():\n known_args, unknown_args = parse_known_args()\n if not unknown_args:\n # return an error message if no command is provided\n sys.exit(\"Please provide a command to benchmark: $ humann_benchmark COMMAND\")\n try:\n process = subprocess.Popen(\" \".join(unknown_args),shell=True)\n except (EnvironmentError, subprocess.CalledProcessError):\n sys.exit(\"Unable to execute command: \" + \" \".join(unknown_args))\n pid=str(process.pid)\n start=time.time()\n max_memory=0\n while process.poll() is None:\n time.sleep(1)\n # while the process is running check on the memory use\n # get the pids of the main process and all children (and their children)\n pids=get_pids(pid)\n stdout=subprocess.check_output([\"ps\",\"--pid\",\",\".join(pids),\"-o\",\"pid,rss,command\"]).decode(\"utf-8\")\n print(\"\\n\"+stdout+\"\\n\")\n # remove the header from the process output\n status=[i.split() for i in filter(lambda x: x, stdout.split(\"\\n\")[1:])]\n # memory is the sum of all rss\n memory=sum(int(i[1]) for i in status)\n if memory > max_memory:\n max_memory=memory\n \n end=time.time()\n print(\"Time: {:.0f} minutes\".format((end-start)/60))\n print(\"Max Memory (RSS): {:.1f} GB\".format(max_memory*1.0/1024**2))", "def ShowMQueue(cmd_args=None, cmd_options={}):\n if not cmd_args:\n print \"Please specify the address of the ipc_mqueue whose details you want to print\"\n print ShowMQueue.__doc__\n return\n space = 0\n if \"-S\" in cmd_options:\n space = kern.GetValueFromAddress(cmd_options[\"-S\"], 'struct ipc_space *')\n mqueue = kern.GetValueFromAddress(cmd_args[0], 'struct ipc_mqueue *')\n wq_type = mqueue.data.pset.setq.wqset_q.waitq_type\n if int(wq_type) == 3:\n psetoff = getfieldoffset('struct ipc_pset', 'ips_messages')\n pset = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(psetoff)\n print PrintPortSetSummary.header\n PrintPortSetSummary(kern.GetValueFromAddress(pset, 'struct ipc_pset *'), space)\n elif int(wq_type) == 2:\n portoff = getfieldoffset('struct ipc_port', 'ip_messages')\n port = unsigned(ArgumentStringToInt(cmd_args[0])) - unsigned(portoff)\n print PrintPortSummary.header\n PrintPortSummary(kern.GetValueFromAddress(port, 'struct ipc_port *'))\n else:\n print \"Invalid mqueue? (waitq type {:d} is invalid)\".format(int(wq_type))", "def treatCmdOpts(argv):\n baseName = os.path.basename(__file__)\n amc.cBaseName = colored(baseName, 'yellow')\n\n helpTxt = amc.cBaseName + ' analyses observation statistics file for selected GNSSs'\n\n # create the parser for command line arguments\n parser = argparse.ArgumentParser(description=helpTxt)\n\n parser.add_argument('--obsstat', help='observation statistics file', type=str, required=True)\n\n parser.add_argument('--freqs', help='select frequencies to use (out of {freqs:s}, default {freq:s})'.format(freqs='|'.join(gfzc.lst_freqs), freq=colored(gfzc.lst_freqs[0], 'green')), default=gfzc.lst_freqs[0], type=str, required=False, action=gco.freqtype_action, nargs='+')\n\n parser.add_argument('--cutoff', help='cutoff angle in degrees (default {mask:s})'.format(mask=colored('0', 'green')), default=0, type=int, required=False, action=gco.cutoff_action)\n\n parser.add_argument('--dbcvs', help='Add information to CVS database (default {cvsdb:s})'.format(cvsdb=colored(gco.CVSDB_OBSTLE, 'green')), required=False, type=str, default=gco.CVSDB_OBSTLE)\n\n parser.add_argument('--plot', help='displays interactive plots (default False)', action='store_true', required=False, default=False)\n\n parser.add_argument('--logging', help='specify logging level console/file (two of {choices:s}, default {choice:s})'.format(choices='|'.join(gco.lst_logging_choices), choice=colored(' '.join(gco.lst_logging_choices[3:5]), 'green')), nargs=2, required=False, default=gco.lst_logging_choices[3:5], action=gco.logging_action)\n\n # drop argv[0]\n args = parser.parse_args(argv[1:])\n\n # return arguments\n return args.obsstat, args.freqs, args.cutoff, args.dbcvs, args.plot, args.logging", "def ShowAllIPC(cmd_args=None):\n for t in kern.tasks:\n print GetTaskSummary.header + \" \" + GetProcSummary.header\n pval = Cast(t.bsd_info, 'proc *')\n print GetTaskSummary(t) + \" \" + GetProcSummary(pval)\n print PrintIPCInformation.header\n PrintIPCInformation(t.itk_space, False, False) + \"\\n\\n\"", "def disk():\n run(env.disk_usage_command % env)", "def capacitygroup_list(cmd_ctx, cpc, **options):\n cmd_ctx.execute_cmd(lambda: cmd_capacitygroup_list(cmd_ctx, cpc, options))", "def help(update, context):\n msg = \"\"\n msg += \"\\n/covid 7-Day-Incident per Million\"\n msg += \"\\n/daylio What did I do a year ago today?\"\n msg += \"\\n/f1last Results of the last race\"\n msg += \"\\n/f1stand Driver standings\"\n msg += \"\\n/f1next Time and place of the next race\"\n msg += \"\\n/fuel prices and consump. (args: Xeur Ykm)\"\n msg += \"\\n/ip Outside ip address\"\n msg += \"\\n/rate Exchange rates (args: Xeur/Yhuf)\"\n msg += \"\\n/rss check rss feeds for new content\"\n msg += \"\\n/sun Time of sunrise and sunset\"\n msg += \"\\n/xkcd Sends last comic image and alt\"\n msg.rstrip()\n update.message.reply_text(msg)", "def _GenAppcommandsUsage(cmd, printer):\n # pylint: disable=too-many-arguments,unused-argument\n def Usage(shorthelp=0, writeto_stdout=0, detailed_error=None,\n exitcode=None, show_cmd=None, show_global_flags=False):\n \"\"\"A replacement for app.usage.\"\"\"\n printer('%s: Incorrect usage; details below.' % show_cmd)\n printer('Correct usage is as follows:')\n printer('')\n for line in (' ' + cmd.__doc__.rstrip()).splitlines():\n printer(line)\n # Print out str(FLAGS) for just the UICmd-specific flags.\n tmp_flags = flags.FlagValues()\n unused_cmd = type(cmd)(show_cmd, tmp_flags)\n prefix = _UICMD_MODULE_NAME + ':\\n'\n flag_str = tmp_flags.ModuleHelp(_UICMD_MODULE_NAME)\n flag_str = flag_str.lstrip()\n if flag_str.startswith(prefix):\n flag_str = flag_str[len(prefix):]\n if flag_str:\n printer('')\n printer('flags:')\n for line in flag_str.splitlines():\n printer(line)\n if detailed_error is not None:\n printer('')\n printer('The incorrect usage is as follows:')\n printer('')\n for line in unicode(detailed_error).splitlines():\n printer(' ' + line)\n\n return Usage", "async def help(self, context):\n prefix = config.BOT_PREFIX\n user=context.message.author\n if not isinstance(prefix, str):\n prefix = prefix[0]\n embed = discord.Embed(title=\"Help\", description=\"List of available commands:\", color=0x00FF00)\n for i in self.bot.cogs:\n cog = self.bot.get_cog(i.lower())\n commands = cog.get_commands()\n command_list = [command.name for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n command_description = [command.help for command in commands if not command.hidden or context.message.author.id in config.OWNERS]\n help_text = '\\n'.join(f'{prefix}{n} - {h}' for n, h in zip(command_list, command_description))\n embed = discord.Embed(title=f\"Commands in {i.capitalize()} Cog\", description=f'```{help_text}```', color=0x00FF00)\n await user.send(embed=embed)\n if not isinstance(context.message.channel, discord.channel.DMChannel):\n await context.send(f\"DM sent to {user.mention}\")\n await context.message.delete()", "async def run_mpc(self) -> Dict[str, Dict[Metric, int]]:\n pass", "def build_command(args, parser):\n cmd = \"ipmitool -I lanplus\"\n if not args.host:\n print \"\\nERROR: hostname is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -H ' + args.host\n if args.port:\n cmd += ' -p ' + args.port\n if not args.user:\n print \"\\nERROR: username is required.\\n\"\n parser.print_help()\n sys.exit(1)\n else:\n cmd += ' -U ' + args.user\n if args.passwd:\n cmd += ' -P ' + args.passwd\n cmd += ' dcmi power reading'\n if args.interval:\n global INTERVAL\n INTERVAL = args.interval\n if args.nread:\n global NREAD\n NREAD = args.nread\n else:\n global INFINITY\n INFINITY = True\n if args.store:\n global STORE\n STORE = True\n return cmd", "def HMC_Help():\n os.system(\"cls\")\n while True:\n print((\"\\n\\n\",\"Help\".center(50)))\n print_list = [\"ManagedSystem\",\"LogicalPartition\",\"VirtualIOServer\",\"Cluster\",\"Performance Capaity Monitoring\",\"Return to Main Menu\"]\n choice = int(print_obj.print_on_screen(print_list))\n directory = os.path.dirname(os.path.dirname(__file__))\n if choice == 1:\n path = directory+\"/help/ManagedSystem\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 2:\n path = directory+\"/help/LogicalPartition\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 3:\n path = directory+\"/help/VirtualIOServer\"\n files = [f for f in os.listdir(path)if os.path.isfile(os.path.join(path,f))]\n for f in files :\n print((open(path+\"/%s\"%(f)).read()))\n elif choice == 4:\n print((open(directory+\"/help/Cluster.txt\").read()))\n elif choice == 5:\n print((open(directory+\"/help/PerformanceCapacityMonitoring.txt\").read()))\n elif choice == 6:\n os.system(\"cls\")\n return\n else:\n print(\"\\nTry using Valid option\")\n back_to_menu()", "def ShowIPCVoucherAttributeControl(cmd_args=[], cmd_options={}):\n if not cmd_args:\n raise ArgumentError(\"Please provide correct arguments.\")\n ivac = kern.GetValueFromAddress(cmd_args[0], 'ipc_voucher_attr_control_t')\n print GetIPCVoucherAttrControlSummary.header\n print GetIPCVoucherAttrControlSummary(ivac)\n if config['verbosity'] > vHUMAN:\n cur_entry_index = 0\n last_entry_index = unsigned(ivac.ivac_table_size)\n print \"index \" + GetIPCVoucherAttributeEntrySummary.header\n while cur_entry_index < last_entry_index:\n print \"{: <5d} \".format(cur_entry_index) + GetIPCVoucherAttributeEntrySummary(addressof(ivac.ivac_table[cur_entry_index]))\n cur_entry_index += 1", "def test_cw_metrics(self):\n\n instances = set()\n result = self.cw_client.list_metrics(Namespace=\"CWAgent\", MetricName=\"cpu_usage_system\")\n for i in result[\"Metrics\"]:\n instances.add(i[\"Dimensions\"][0][\"Value\"])\n\n for key, value in self.cdk_output_map.items():\n if \"Instance\" in key:\n self.assertTrue(value in instances)", "def main( argv = None ):\n\n if argv == None: argv = sys.argv\n\n # setup command line parser\n parser = E.OptionParser( version = \"%prog version: $Id$\", \n usage = globals()[\"__doc__\"] )\n\n parser.add_option(\"--category\", dest=\"category\", type=\"choice\",\n choices = (\"B\", \"C\"), help=\"supply help\" )\n\n ## add common options (-h/--help, ...) and parse command line \n (options, args) = E.Start( parser, argv = argv )\n\n data = getData(options.stdin)\n if options.category == \"B\":\n options.stdout.write(\"Category B pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in b2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n\n elif options.category == \"C\":\n options.stdout.write(\"Category C pathway\\tKO\\tGenes\\tDescriptions\\n\")\n for pathway, descriptions in c2ko(data).iteritems():\n options.stdout.write(\"\\t\".join([pathway, \"; \".join(descriptions[0]), \"; \".join(descriptions[1]), \"; \".join(descriptions[2])]) + \"\\n\")\n else:\n raise ValueError(\"must specify the category of pathway\")\n\n\n ## write footer and output benchmark information.\n E.Stop()", "def help_opt(self):\n print(OPTIONS)", "def runMCMC(df, cents, show=False):\n if type(cents) is not list:\n cents = [cents]\n numCents = len(cents)\n p = None\n \n # Tau = the precision of the normal distribution (of the above peaks)\n taus = 1. / pm.Uniform('stds', 0, 100, size=numCents)**2 # tau = 1/sigma**2\n centers = pm.Normal('centers', cents, [0.0025 for i in cents],\n size=numCents)\n \n if numCents == 2: # Assignment probability\n p = pm.Uniform('p', 0, 1)\n assignment = pm.Categorical('asisgnment', [p, 1-p],\n size=len(df.intervals))\n @pm.deterministic\n def center_i(assignment=assignment, centers=centers):\n return centers[assignment]\n @pm.deterministic\n def tau_i(assignment=assignment, taus=taus):\n return taus[assignment]\n observations = pm.Normal('obs', center_i, tau_i, value=df.intervals,\n observed=True)\n # Create the model 2 peaks\n mcmc = pm.MCMC([p, assignment, observations, taus, centers])\n \n else:\n observations = pm.Normal('obs', value=df.intervals, observed=True)\n mcmc = pm.MCMC([observations, taus, centers]) # Create model, 1 peak\n \n # Run the model\n mcmc.sample(50000)\n center_trace = mcmc.trace(\"centers\")[:]\n try:\n clusts = [center_trace[:,i] for i in range(numCents)]\n except:\n clusts = [center_trace]\n \n if show:\n for i in range(numCents):\n plt.hist(center_trace[:,i], bins=50, histtype='stepfilled',\n color=['blue', 'red'][i], alpha=0.7)\n plt.show()\n \n print('Evolved clusters at:')\n print([np.mean(c) for c in clusts])\n return clusts", "def main(self, names, options) :\n names = self.sanitizeNames(options, names)\n suffix = (options[\"groups\"] and \"Group\") or \"User\" \n printernames = options[\"printer\"].split(\",\")\n \n if not options[\"list\"] :\n percent = Percent(self)\n percent.display(\"%s...\" % _(\"Extracting datas\"))\n printers = self.storage.getMatchingPrinters(options[\"printer\"])\n entries = getattr(self.storage, \"getMatching%ss\" % suffix)(\",\".join(names))\n if not options[\"list\"] :\n percent.setSize(len(printers) * len(entries))\n \n if options[\"list\"] :\n for printer in printers :\n for entry in entries :\n pqentry = getattr(self.storage, \"get%sPQuota\" % suffix)(entry, printer)\n if pqentry.Exists :\n print \"%s@%s\" % (entry.Name, printer.Name)\n print \" %s\" % (_(\"Page counter : %s\") % pqentry.PageCounter)\n print \" %s\" % (_(\"Lifetime page counter : %s\") % pqentry.LifePageCounter)\n print \" %s\" % (_(\"Soft limit : %s\") % pqentry.SoftLimit)\n print \" %s\" % (_(\"Hard limit : %s\") % pqentry.HardLimit)\n print \" %s\" % (_(\"Date limit : %s\") % pqentry.DateLimit)\n print \" %s (Not supported yet)\" % (_(\"Maximum job size : %s\") % ((pqentry.MaxJobSize and (_(\"%s pages\") % pqentry.MaxJobSize)) or _(\"Unlimited\")))\n if hasattr(pqentry, \"WarnCount\") :\n print \" %s\" % (_(\"Warning banners printed : %s\") % pqentry.WarnCount)\n print\n elif options[\"delete\"] : \n percent.display(\"\\n%s...\" % _(\"Deletion\"))\n getattr(self.storage, \"deleteMany%sPQuotas\" % suffix)(printers, entries)\n percent.display(\"\\n\")\n else :\n skipexisting = options[\"skipexisting\"]\n used = options[\"used\"]\n if used :\n used = used.strip()\n try :\n int(used)\n except ValueError :\n raise CPSCommandLineError, _(\"Invalid used value %s.\") % used\n \n increase = options[\"increase\"]\n if increase :\n try :\n increase = int(increase.strip())\n except ValueError :\n raise CPSCommandLineError, _(\"Invalid increase value %s.\") % increase\n \n noquota = options[\"noquota\"]\n reset = options[\"reset\"] \n hardreset = options[\"hardreset\"]\n softlimit = hardlimit = None\n if not noquota :\n if options[\"softlimit\"] :\n try :\n softlimit = int(options[\"softlimit\"].strip())\n if softlimit < 0 :\n raise ValueError\n except ValueError : \n raise CPSCommandLineError, _(\"Invalid softlimit value %s.\") % options[\"softlimit\"]\n if options[\"hardlimit\"] :\n try :\n hardlimit = int(options[\"hardlimit\"].strip())\n if hardlimit < 0 :\n raise ValueError\n except ValueError : \n raise CPSCommandLineError, _(\"Invalid hardlimit value %s.\") % options[\"hardlimit\"]\n if (softlimit is not None) and (hardlimit is not None) and (hardlimit < softlimit) : \n # error, exchange them\n self.printInfo(_(\"Hard limit %i is less than soft limit %i, values will be exchanged.\") % (hardlimit, softlimit))\n (softlimit, hardlimit) = (hardlimit, softlimit)\n if hardlimit is None : \n hardlimit = softlimit\n if hardlimit is not None :\n self.printInfo(_(\"Undefined hard limit set to soft limit (%s).\") % str(hardlimit))\n if softlimit is None : \n softlimit = hardlimit\n if softlimit is not None :\n self.printInfo(_(\"Undefined soft limit set to hard limit (%s).\") % str(softlimit))\n \n self.storage.beginTransaction() \n try :\n if options[\"add\"] :\n percent.display(\"\\n%s...\\n\" % _(\"Creation\"))\n if not entries : \n self.printInfo(_(\"No entry matches %s. Please use pkusers to create them first.\") % (\" \".join(names)), \"warn\")\n \n factory = globals()[\"Storage%sPQuota\" % suffix]\n for printer in printers :\n pname = printer.Name\n for entry in entries :\n ename = entry.Name\n pqkey = \"%s@%s\" % (ename, pname)\n pqentry = factory(self.storage, entry, printer)\n self.modifyPQEntry(pqkey, pqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n oldpqentry = getattr(self.storage, \"add%sPQuota\" % suffix)(pqentry)\n if oldpqentry is not None : \n if skipexisting :\n self.logdebug(\"%s print quota entry %s@%s already exists, skipping.\" % (suffix, ename, pname))\n else : \n self.logdebug(\"%s print quota entry %s@%s already exists, will be modified.\" % (suffix, ename, pname))\n self.modifyPQEntry(pqkey, oldpqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n oldpqentry.save() \n percent.oneMore()\n else : \n percent.display(\"\\n%s...\\n\" % _(\"Modification\"))\n for printer in printers :\n for entry in entries :\n pqkey = \"%s@%s\" % (entry.Name, printer.Name)\n pqentry = getattr(self.storage, \"get%sPQuota\" % suffix)(entry, printer)\n if pqentry.Exists : \n self.modifyPQEntry(pqkey, pqentry, noquota, \\\n softlimit, hardlimit, \\\n increase, reset, \\\n hardreset, suffix, used)\n pqentry.save() \n percent.oneMore()\n except : \n self.storage.rollbackTransaction()\n raise\n else : \n self.storage.commitTransaction()\n \n if not options[\"list\"] :\n percent.done()", "def help_command(server, output, conf):\n server.tell(output.name, 'Available commands:')\n for key in COMMANDS.keys():\n cmd_func = COMMANDS[key]\n if cmd_func.__doc__:\n server.tell(output.name, '%s: %s' % (key[1:], cmd_func.__doc__))\n else:\n server.tell(output.name, key[1:])\n return", "def mem_info(self):\n\t\t\tavailable, total = cuda.mem_get_info() #Note: pycuda._driver.LogicError: cuMemGetInfo failed: context is destroyed\n\t\t\tprint(\"Available: %.2f GB\\nTotal: %.2f GB\"%(available/1e9, total/1e9))", "def print_usage():\n usage_msg = \"\"\"\n%s.py -H <host or group> -P <path> -M <mode>\n\nUsage:\n -h, --help\n Print detailed help screen\n -H, --hostname=STRING\n Host name or group of hosts\n -V, --version\n Print version information\n -P, --path=STRING\n Path to rancid var directory. Usually the dir contains a logs dirs and hostgroup dirs\n Example : /usr/local/rancid/var\n -M, --mod=STRING\n Plugin mod. Must be one of the following : ping, hash, config, cards, filter, qos\n *ping:\n Check if all host in the hostgroup are up from the rancid point of view.\n It uses the .up file to determine the lists of host to look for\n *hash:\n Check if the firmware hash is different from the ref one (or from the previous one)\n *config:\n Check if the configuration has changed for the host / group (notify diff)\n *cards:\n Specific to 8600 models. Check the hardware cards plugged to the host (notify diff).\n *filter:\n Specific to ES-470. Check the filters (notify diff)\n *qos:\n Specific to ES-470. Check the qos values (notify diff)\n -u, --url=URL\n URL to submit passive results to Shinken Receiver with HTTP\n Need a host and service to send result.\n -a, --passive-host=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n -b, --passive-service=STRING\n Required if not in plugin mod to send data to Shinken ws_arbiter\n\"\"\" % PLUGIN_NAME\n print usage_msg", "def stats(caesar, input):\n commands = {}\n users = {}\n channels = {}\n\n ignore = set(['f_note', 'startup', 'message', 'noteuri'])\n for (name, user), count in caesar.stats.items(): \n if name in ignore: continue\n if not user: continue\n\n if not user.startswith('#'): \n try: users[user] += count\n except KeyError: users[user] = count\n else: \n try: commands[name] += count\n except KeyError: commands[name] = count\n\n try: channels[user] += count\n except KeyError: channels[user] = count\n\n comrank = sorted([(b, a) for (a, b) in commands.iteritems()], reverse=True)\n userank = sorted([(b, a) for (a, b) in users.iteritems()], reverse=True)\n charank = sorted([(b, a) for (a, b) in channels.iteritems()], reverse=True)\n\n # most heavily used commands\n creply = 'most used commands: '\n for count, command in comrank[:10]: \n creply += '%s (%s), ' % (command, count)\n caesar.say(creply.rstrip(', '))\n\n # most heavy users\n reply = 'power users: '\n for count, user in userank[:10]: \n reply += '%s (%s), ' % (user, count)\n caesar.say(reply.rstrip(', '))\n\n # most heavy channels\n chreply = 'power channels: '\n for count, channel in charank[:3]: \n chreply += '%s (%s), ' % (channel, count)\n caesar.say(chreply.rstrip(', '))", "def metrics(self, account_id):\n from pureport_client.commands.accounts.metrics import Command\n return Command(self.client, account_id)", "def procs_calculate_axyzc(molecules, n_cores=-1, show_progress=True, scr=None, cmd=XTB_CMD):\n results = None\n return results", "def cmd_help(args):", "def help(bot, sender, sendmsg, label, args):\n\n clist = commands.commands\n csort = sorted(clist.values(), key=lambda c: c.__name__.lower())\n\n if len(args) > 0:\n page = int(args[0]) - 1\n else:\n page = 0\n\n pages = len(clist) // 10 + 1\n\n sendmsg(\"-- Help (Page {} of {}) --\".format(page + 1, pages))\n for i in range(10):\n if i >= len(csort):\n break\n\n command = csort[i + (page * 10)]\n sendmsg(\"{}: {}\".format(command.__name__, command.__doc__))", "def process_meter_message(self, d):\n dpid = int(d.get(\"dpid\", 0))\n dp = self.dpset.get(dpid)\n if not dp:\n return \"Datapath does not exist!\"\n\n ofproto = dp.ofproto\n parser = dp.ofproto_parser\n\n command = {\n 'add': ofproto.OFPMC_ADD,\n 'mod': ofproto.OFPMC_MODIFY,\n 'del': ofproto.OFPMC_DELETE,\n }\n cmd = command.get(d[\"operation\"], ofproto.OFPMC_ADD)\n\n meter_id = d[\"meter_id\"]\n\n flags = 0\n bands = []\n if \"flags\" in d: # Ryu's format\n print(d['flags'])\n for f in d['flags']:\n flags += 0x01 if f == 'KBPS' else 0\n flags += 0x02 if f == 'PKTPS' else 0\n flags += 0x04 if f == 'BURST' else 0\n flags += 0x08 if f == 'STATS' else 0\n\n for band in d[\"bands\"]:\n if band['type'] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band['rate'],\n burst_size=band['burst_size'])]\n elif band['type'] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band['rate'],\n burst_size=band['burst_size'], prec_level=band['prec_level'])]\n\n else: # FlowManager's format\n flags += 0x01 if d['OFPMF_KBPS'] else 0\n flags += 0x02 if d['OFPMF_PKTPS'] else 0\n flags += 0x04 if d['OFPMF_BURST'] else 0\n flags += 0x08 if d['OFPMF_STATS'] else 0\n\n # Flags must have KBPS or PKTPS\n flags = flags if (flags & 0x03) else (flags | 0x01)\n\n for band in d[\"bands\"]:\n #mtype = type_convert.get(band[0])\n if band[0] == 'DROP':\n bands += [parser.OFPMeterBandDrop(rate=band[1],\n burst_size=band[2])]\n elif band[0] == 'DSCP_REMARK':\n bands += [parser.OFPMeterBandDscpRemark(rate=band[1],\n burst_size=band[2], prec_level=band[3])]\n\n # TODO: catch some errors\n meter_mod = parser.OFPMeterMod(dp, cmd, flags, meter_id, bands)\n try:\n dp.send_msg(meter_mod)\n except KeyError as e:\n return e.__repr__()\n except Exception as e:\n return e.__repr__()\n\n return \"Message sent successfully.\"", "def getMemDetail(self):\n mem = {}\n if self.type in ['E', 'T', 'S', 'K', 'A', 'AX', 'W']:\n m = \"The percentage of CP memory utilization:\\s*([\\d\\.]+)%\\s+DP memory utilization:\\s*([\\d\\.]+)%\"\n rt = re.search(m, self.dut.cli(\"show memory detail\"))\n if rt:\n mem = {\"cp\": float(rt.groups()[0]), \"dp\": float(rt.groups()[1])}\n return mem", "def do_stats(cs, args):\n stats_info = cs.containers.stats(args.container)\n utils.print_dict(stats_info)", "def info(self):\n import string\n results = self.info_list()\n labels = \"%-8s %-9s %-4s %-8s %-8s %-4s\" % \\\n ('MACHINE','CPU','GHZ','MB TOTAL',\n 'MB FREE','LOAD')\n print labels\n for i in range(len(self.workers)):\n name = string.split(self.workers[i].host,'.')[0]\n res = results[i]\n s = \"%-8s %2dx%-6s %4.1f %8.1f %8.1f %4.2f\" % \\\n (name[-8:], res['cpu_count'],res['cpu_type'][-6:], \\\n res['cpu_speed'],res['mem_total'],res['mem_free'],\\\n res['load_1'])\n print s", "def test_shortopt(self):\n pp = ParlaiParser(False, False)\n pp.add_argument(\"-m\", \"--model\")\n pp.add_argument(\"-mtw\", \"--multitask-weights\")\n opt = pp.parse_args([\"-m\", \"memnn\"])\n print(opt)", "def gmcp_setup_data(self):\n yield \"Core.Supports.Debug\", 20\n yield \"Core.Supports.Set\", [ \"MG.char 1\", \"MG.room 1\", \"comm.channel 1\" ]", "def mdadmConfEntry(self):\n if self.memberDevices is None or not self.mdadmFormatUUID:\n raise errors.DeviceError(\"array is not fully defined\", self.name)\n\n # containers and the sets within must only have a UUID= parameter\n if self.type == \"mdcontainer\" or self.type == \"mdbiosraidarray\":\n fmt = \"ARRAY %s UUID=%s\\n\"\n return fmt % (self.path, self.mdadmFormatUUID)\n\n fmt = \"ARRAY %s level=%s num-devices=%d UUID=%s\\n\"\n return fmt % (self.path, self.level, self.memberDevices, self.mdadmFormatUUID)", "def cm_analysis(cm=None, y_true=None, y_pred=None, labels=None, ymap=None, figsize=(10,10), filename=None):\n if cm is None:\n if ymap is not None:\n y_pred = [ymap[yi] for yi in y_pred]\n y_true = [ymap[yi] for yi in y_true]\n labels = [ymap[yi] for yi in labels]\n cm = confusion_matrix(y_true, y_pred, labels=labels)\n\n cm_sum = np.sum(cm, axis=1, keepdims=True)\n cm_perc = cm / cm_sum.astype(float) * 100\n annot = np.empty_like(cm).astype(str)\n nrows, ncols = cm.shape\n for i in range(nrows):\n for j in range(ncols):\n c = cm[i, j]\n p = cm_perc[i, j]\n if i == j:\n s = cm_sum[i]\n annot[i, j] = '%.1f%%\\n%d/%d' % (p, c, s)\n elif c == 0:\n annot[i, j] = ''\n else:\n annot[i, j] = '%.1f%%\\n%d' % (p, c)\n cm = pd.DataFrame(cm, index=labels, columns=labels)\n cm.index.name = 'Actual'\n cm.columns.name = 'Predicted'\n fig, ax = plt.subplots(figsize=figsize)\n sns.heatmap(cm, annot=annot, fmt='', ax=ax)\n if filename:\n plt.savefig(filename)", "def testHClusters(cntsDf, members, cols=None, min_count=5):\n\n if cols is None:\n cols = cntsDf.columns\n\n tot = cntsDf.sum()\n Ncells = tot.sum()\n uCDR3 = list(cntsDf.index)\n\n results = []\n\n for cid, m in members.items():\n notM = [i for i in range(cntsDf.shape[0]) if not i in m]\n obs = np.concatenate((np.sum(cntsDf[cols].values[m, :], axis=0, keepdims=True),\n np.sum(cntsDf[cols].values[notM, :], axis=0, keepdims=True)), axis=0)\n if np.sum(obs, axis=1)[0] > min_count:\n \"\"\"Inner product of the marginal totals along both axes, divided by total cells\"\"\"\n expect = np.dot(np.sum(obs, keepdims=True, axis=1),\n np.sum(obs, keepdims=True, axis=0)) / Ncells\n with warnings.catch_warnings():\n warnings.simplefilter('ignore')\n chi2 = (obs - expect)**2 / expect\n\n sum_chi2 = np.sum(chi2)\n\n degf = len(cols) - 1\n pvalue = 1 - stats.chi2.cdf(sum_chi2, degf)\n results.append({'cid':cid,\n 'chi2':sum_chi2,\n 'pvalue':pvalue,\n 'observed':tuple(obs[0, :]),\n 'observed_prop':(obs / np.sum(obs, axis=0))[0, :],\n 'expected':tuple(expect[0, :]),\n 'expected_prop':(expect / np.sum(obs, axis=0))[0, :],\n 'members':tuple(m),\n 'labels':cols})\n else:\n results.append({'cid':cid,\n 'chi2':np.nan,\n 'pvalue':np.nan,\n 'observed':tuple(obs[0, :]),\n 'observed_prop': (obs / np.sum(obs, axis=0))[0, :],\n 'expected':(np.nan, )*len(cols),\n 'expected_prop': (np.nan, )*len(cols),\n 'members':tuple(m),\n 'labels':cols})\n resDf = pd.DataFrame(results)\n\n if 'adjustwithin' in sys.modules:\n resDf.loc[:, 'FWER-pvalue'] = adjustnonnan(resDf['pvalue'], method='holm')\n resDf.loc[:, 'FDR-qvalue'] = adjustnonnan(resDf['pvalue'], method='fdr_bh')\n return resDf.set_index('cid')", "def main():\n options = docopt(__doc__)\n\n # In case the user asked for verbose logging, increase\n # the log level to debug.\n if options[\"--verbose\"] > 0:\n logging.basicConfig(level=logging.DEBUG)\n LOGGER.setLevel(logging.DEBUG)\n\n LOGGER.debug(\n \"Received options: %s\",\n options,\n )\n\n billing_account_id = _get_billing_account_id()\n member_accounts = _get_member_accounts(\n billing_account_id=billing_account_id,\n options=options,\n )\n _flush_out(accounts=member_accounts, options=options)\n\n return 0", "def show_command_multiple(self, command, arglist, vdc=None, parser=None, optdict={}):\n self.logger.debug(\"run multiple show commands {} {}\".format(command, str(arglist)))\n output = \"\"\n if isinstance(arglist, str):\n arglist = [arglist]\n for vdcname in vdc:\n self.switchto_vdc(vdcname)\n if len(vdc) > 1:\n output = output + \"\\nvdc {}: \\n\".format(self.get_current_vdc())\n for a in arglist:\n self.logger.debug(\"run show commands {} {} in vdc {}\".format(command, a, vdcname))\n if parser is not None:\n scratch = parser(self._send_xml_cli_show(\"{} {}\".format(command, a)), **optdict)\n if scratch is None:\n output = output + \"Command '{} {}' returned no output\\n\".format(command, a)\n else:\n output = output + scratch\n else:\n output = output + self._send_xml_cli_show(\"{} {}\".format(command, a))\n self.logger.debug(\"multiple show commands output {}\".format(output))\n return output", "def collect_metrics():\n p = os.path.join(os.sep, \"mnt\", \"glusterfs\")\n mount_stats = os.statvfs(p)\n # block size * total blocks\n total_space = mount_stats.f_blocks * mount_stats.f_bsize\n free_space = mount_stats.f_bfree * mount_stats.f_bsize\n # capsize only operates on i64 values\n used_space = total_space - free_space\n gb_used = used_space / 1024 / 1024 / 1024\n\n # log!(format!(\"Collecting metric gb-used {}\", gb_used), Info)\n add_metric(\"gb-used\", \"{}\".format(gb_used))", "def help_help(self):\n print(\"List commands or print details about a command\")", "def command_help(args):\n\tprint_usage()\n\treturn 0", "def qc_metrics(self, files_in, qc_files):\n self.cmd(\"{samtools} index {bam_in}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in=files_in[0],\n ),\n shell=True)\n self.cmd(\"{samtools} idxstats {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[0],\n ),\n shell=True,\n log_output=True)\n self.cmd(\"{samtools} flagstat {bam_in} | tee {qc_file}\"\n .format(\n samtools=self.cmds[\"samtools\"],\n bam_in = files_in[0],\n qc_file = qc_files[1],\n ),\n shell=True,\n log_output=True)\n \n self.checkpoint(qc_files[0])\n self.checkpoint(qc_files[1])\n self.checkpoint(qc_files[2])", "def print_metric_groups(cmd_ctx, client, metric_groups, resource_filter):\n\n if not isinstance(metric_groups, (list, tuple)):\n metric_groups = [metric_groups]\n\n properties = {\n 'anticipated-frequency-seconds': MIN_ANTICIPATED_FREQUENCY,\n 'metric-groups': metric_groups,\n }\n mc = client.metrics_contexts.create(properties)\n mg_values = wait_for_metrics(mc, metric_groups)\n filtered_object_values = list() # of MetricObjectValues\n\n if not mg_values:\n\n mg_name = metric_groups[0] # just pick any\n res_class = zhmcclient._metrics._resource_class_from_group(mg_name)\n mg_def = zhmcclient.MetricGroupDefinition(\n name=mg_name, resource_class=res_class, metric_definitions=[])\n\n else:\n\n mg_def = mc.metric_group_definitions[mg_values.name]\n\n filter_cpc = None\n filter_partition = None\n filter_lpar = None\n filter_adapter = None\n filter_nic = None\n for r_class, r_name in resource_filter:\n if r_class == 'cpc' and r_name:\n filter_cpc = client.cpcs.find(name=r_name)\n elif r_class == 'partition' and r_name:\n assert filter_cpc\n filter_partition = filter_cpc.partitions.find(name=r_name)\n elif r_class == 'logical-partition' and r_name:\n assert filter_cpc\n filter_lpar = filter_cpc.lpars.find(name=r_name)\n elif r_class == 'adapter' and r_name:\n assert filter_cpc\n filter_adapter = filter_cpc.adapters.find(name=r_name)\n elif r_class == 'nic' and r_name:\n assert filter_partition\n filter_nic = filter_partition.nics.find(name=r_name)\n\n resource_class = mg_def.resource_class\n\n for ov in mg_values.object_values:\n included = False\n if resource_class == 'cpc':\n if not filter_cpc:\n included = True\n elif ov.resource_uri == filter_cpc.uri:\n included = True\n elif resource_class == 'partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource_uri == filter_partition.uri:\n included = True\n elif resource_class == 'logical-partition':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_lpar:\n included = True\n elif ov.resource_uri == filter_lpar.uri:\n included = True\n elif resource_class == 'adapter':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.cpc.uri == filter_cpc.uri:\n if not filter_adapter:\n included = True\n elif ov.resource_uri == filter_adapter.uri:\n included = True\n elif resource_class == 'nic':\n if not filter_cpc:\n included = True\n elif ov.resource.manager.partition.manager.cpc.uri == \\\n filter_cpc.uri:\n if not filter_partition:\n included = True\n elif ov.resource.manager.partition.uri == \\\n filter_partition.uri:\n if not filter_nic:\n included = True\n elif ov.resource_uri == filter_nic.uri:\n included = True\n else:\n raise ValueError(\n \"Invalid resource class: {}\".format(resource_class))\n\n if included:\n filtered_object_values.append(ov)\n\n resource_classes = [f[0] for f in resource_filter]\n\n cmd_ctx.spinner.stop()\n print_object_values(filtered_object_values, mg_def, resource_classes,\n cmd_ctx.output_format, cmd_ctx.transpose)\n\n mc.delete()" ]
[ "0.6029862", "0.59694433", "0.593867", "0.5872437", "0.57943094", "0.56738853", "0.5640134", "0.5486397", "0.5422634", "0.53095937", "0.5250618", "0.5230403", "0.510633", "0.5087089", "0.50808835", "0.5045866", "0.49836123", "0.4982057", "0.49766484", "0.4954867", "0.49396986", "0.49386987", "0.49295557", "0.49147475", "0.49089444", "0.49000442", "0.4887093", "0.48772013", "0.48531625", "0.483787", "0.48343217", "0.48121184", "0.48026255", "0.4751185", "0.47390375", "0.47351623", "0.47349462", "0.47295198", "0.4729289", "0.4728753", "0.47154427", "0.4713606", "0.4706581", "0.46934152", "0.4690462", "0.468788", "0.46848005", "0.46822557", "0.46820882", "0.4676374", "0.46703193", "0.4668666", "0.46592528", "0.46585327", "0.4653381", "0.46364546", "0.46189743", "0.46168122", "0.4602729", "0.4594839", "0.4591365", "0.45850497", "0.45848668", "0.4577522", "0.45763347", "0.4569936", "0.4559621", "0.45540643", "0.45513278", "0.4550957", "0.45502695", "0.45483318", "0.45437664", "0.45420456", "0.45378047", "0.45366415", "0.45352352", "0.45347136", "0.45214626", "0.45186377", "0.45142433", "0.4512374", "0.45081368", "0.45076117", "0.45057982", "0.4504116", "0.4502744", "0.44987705", "0.44927007", "0.44910753", "0.44846767", "0.4484044", "0.44783044", "0.4477386", "0.4475575", "0.44750977", "0.44736955", "0.4468936", "0.4466711", "0.44614774" ]
0.6058172
0
A decorator that uses cProfile to profile a function
def profile(fnc): def inner(*args, **kwargs): pr = cProfile.Profile() pr.enable() retval = fnc(*args, **kwargs) pr.disable() s = io.StringIO() sortby = 'cumulative' ps = pstats.Stats(pr, stream=s).sort_stats(sortby) ps.print_stats() logging.info(s.getvalue()) return retval return inner
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def profile(func):\n def wrapper(*args, **kwargs):\n profile_filename = func.__name__ + '.prof'\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n return wrapper", "def profile(func):\n\n def wrapper(*args, **kwargs):\n profile_filename = func.__name__ + \".prof\"\n profiler = cProfile.Profile()\n result = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(profile_filename)\n return result\n\n return wrapper", "def cprofiler(fun, *args, **kwargs):\n print(f\"Profiling {fun.__name__}\")\n with cProfile.Profile() as pr:\n fun(*args, **kwargs)\n pr.print_stats()", "def profileit(func):\n def wrapper(*args, **kwargs):\n func_name = func.__name__ + \".pfl\"\n prof = cProfile.Profile()\n retval = prof.runcall(func, *args, **kwargs)\n prof.dump_stats(func_name)\n return retval\n\n return wrapper", "def profile(fnc):\n\n def inner(*args, **kwargs):\n pr = cProfile.Profile()\n pr.enable()\n\n # wrapped function starts\n retval = fnc(*args, **kwargs) # fnc is whatever function has the @profile tag\n # wrapped function ends\n\n pr.disable()\n s = io.StringIO()\n sortby = pstats.SortKey.CALLS\n ps = pstats.Stats(pr, stream=s).strip_dirs().sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n return retval\n\n return inner", "def do_cprofile(sort_key='time'):\n def decorator(func):\n\n @wraps(func)\n def profiled_func(*args, **kwargs):\n profile = cProfile.Profile()\n try:\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n return result\n finally:\n profile.print_stats(sort_key)\n return profiled_func\n\n return decorator", "def profiled(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n inner.ncalls += 1\n return func(*args, **kwargs)\n\n inner.ncalls = 0\n return inner", "def profile(fnc):\r\n \r\n def inner(*args, **kwargs):\r\n \r\n pr = cProfile.Profile()\r\n pr.enable()\r\n retval = fnc(*args, **kwargs)\r\n pr.disable()\r\n s = io.StringIO()\r\n sortby = 'time'\r\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\r\n ps.print_stats()\r\n print(s.getvalue())\r\n return retval\r\n\r\n return inner", "def profile(fnc):\r\n\r\n def inner(*args, **kwargs):\r\n pr = cProfile.Profile()\r\n pr.enable()\r\n retval = fnc(*args, **kwargs)\r\n pr.disable()\r\n s = io.StringIO()\r\n sortby = 'cumulative'\r\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\r\n ps.print_stats()\r\n print(s.getvalue())\r\n return retval\r\n\r\n return inner", "def do_cprofile(filename):\n\n def wrapper(func):\n @functools.wraps(func)\n def profiled_func(*args, **kwargs):\n # Flag for do profiling or not.\n # DO_PROF = os.getenv('PROFILING')\n DO_PROF = True\n if DO_PROF:\n profile = cProfile.Profile()\n profile.enable()\n result = func(*args, **kwargs)\n profile.disable()\n # Sort stat by internal time.\n sortby = 'tottime'\n ps = pstats.Stats(profile).sort_stats(sortby)\n ps.dump_stats(filename)\n else:\n result = func(*args, **kwargs)\n return result\n\n return profiled_func\n\n return wrapper", "def profile(func, *args, **kwargs):\n\n import cProfile as profile\n\n filename = 'Reynir.profile'\n\n pr = profile.Profile()\n result = pr.runcall(func, *args, **kwargs)\n pr.dump_stats(filename)\n\n return result", "def profile_function(func):\n do_profiling = os.getenv(\"GRASS_TGIS_PROFILE\")\n\n if do_profiling == \"True\" or do_profiling == \"1\":\n import cProfile, pstats\n try:\n import StringIO as io\n except ImportError:\n import io\n pr = cProfile.Profile()\n pr.enable()\n func()\n pr.disable()\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(pr, stream=s).sort_stats(sortby)\n ps.print_stats()\n print(s.getvalue())\n else:\n func()", "def profiled(path):\n def _decorator(func):\n def _newfunc(*args, **kwargs):\n profiler = profile.Profile()\n ret = profiler.runcall(func, *args, **kwargs)\n profiler.dump_stats(path)\n return ret\n # Be well-behaved\n _newfunc.__name__ = func.__name__\n _newfunc.__doc__ = func.__doc__\n _newfunc.__dict__.update(func.__dict__)\n return _newfunc\n return _decorator", "def profiler(func): # type: ignore\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs): # type: ignore\n if wrapper.exits == wrapper.calls:\n wrapper.exits = 0\n wrapper.calls = 0\n wrapper.begin = datetime.datetime.now()\n wrapper.calls += 1\n resulted_func = func(*args, **kwargs)\n wrapper.exits += 1\n wrapper.last_time_taken = (datetime.datetime.now() - wrapper.begin).total_seconds()\n return resulted_func\n\n wrapper.calls = 0\n wrapper.exits = 0\n return wrapper", "def profile(_func=None,\n profile_id=None,\n sort_by=u'cumulative'):\n\n profilers.register(profile_id)\n\n def arg_wrapper(func):\n\n \"\"\" This is the real decorator and profiles the decorated function. \"\"\"\n\n @functools.wraps(func)\n def wrapper(*args,\n **kwargs):\n\n \"\"\" Simple profiler for the function.\n\n :param args: Args for the function.\n :param kwargs: Kwargs for the function.\n :return: The result of the function.\n \"\"\"\n\n _profiler = profilers.start_if_active(profile_id)\n\n # Run the function\n result = func(*args,\n **kwargs)\n\n profilers.stop_if_active(func=func,\n profile_id=profile_id,\n profiler=_profiler,\n sort_by=sort_by)\n\n # Return the function result\n return result\n\n # Return the decorated function\n return wrapper\n\n # _func's type depends on the usage of the decorator. It's a function\n # if it's used as `@decorator` but ``None`` if used as `@decorator()`.\n return arg_wrapper if _func is None else arg_wrapper(_func)", "def profile(f):\n def inner(*args, **kwargs):\n p = Profiler()\n result = p.runcall(f, *args, **kwargs)\n p.print_stats()\n return result\n return inner", "def benchmark(func):\n import time\n @wraps(func)\n def wrapper(*args, **kwargs):\n t = time.clock()\n res = func(*args, **kwargs)\n print(func.__name__, time.clock()-t)\n return res\n return wrapper", "def benchmark(func):\n start = time.time()\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n rc = func(*args, **kwargs)\n print('Running time: {}'.format(time.time() - start))\n return rc\n return wrapper", "def profile(x):\n return x", "def count_time(func):\n\n def decorated_func(*args, **kwargs):\n start = time.perf_counter()\n result = func(*args, **kwargs)\n end = time.perf_counter()\n logging.info(f\"performance time: {end - start}\")\n return result\n\n return decorated_func", "def timeit(func_to_decorate):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = func_to_decorate(*args, **kwargs)\n elapsed = ((time.time() - start) /60 )\n log.debug(\"[TIMING]: %s - %s minutos\" % (func_to_decorate.__name__, elapsed))\n print(\"[TIMING]: %s - %s minutos\" % (func_to_decorate.__name__, round(elapsed, 2)))\n print(\"********************************* fin ********************************\")\n return result\n\n wrapper.__doc__ = func_to_decorate.__doc__\n wrapper.__name__ = func_to_decorate.__name__\n return wrapper", "def timeit(func):\n\n @functools.wraps(func)\n def wrapper(*arg, **kw):\n start_time = time.time()\n ret = func(*arg, **kw)\n end_time = time.time()\n print \"{name} costs {seconds}s.\".format(name=func.__name__, seconds=end_time-start_time)\n return ret\n return wrapper", "def trace_time(func):\n\n @functools.wraps(func)\n def wrapper(*args, **kw):\n begin = time.time()\n logging.debug(\"begin at %s: %s()\" % (__format_time(begin), func.__name__))\n result = func(*args, **kw)\n end = time.time()\n logging.debug(\n \"end at %s, cost %.2fs: %s() -- return type: %s\"\n % (__format_time(end), end - begin, func.__name__, type(result).__name__))\n return result\n\n return wrapper", "def go (fun, *args, **kwargs):\n if 'profile_filename' in kwargs:\n profile_filename = kwargs['profile_filename']\n del kwargs['profile_filename']\n else:\n profile_filename = '/tmp/coro_profile.bin'\n\n if 'profile_bench' in kwargs:\n profile_bench = kwargs['profile_bench']\n del kwargs['profile_bench']\n else:\n profile_bench = coro.rusage_bench\n\n p = coro.new_profiler (profile_bench)\n p.start()\n try:\n return fun (*args, **kwargs)\n finally:\n total_ticks = p.stop()\n user_ticks = _dump (p, profile_filename)", "def decorator(func):\n @functools.wraps(func)\n def inner(*args, **kwargs):\n start_time = time.time()\n func_result = func(*args, **kwargs)\n end_time = time.time()\n print(f'Time of execution of function \"{func.__name__}\": {end_time - start_time}')\n return func_result\n return inner", "def execution_time(func):\n import time\n\n @functools.wraps(func)\n def decorated(*args, **kwargs):\n start = time.time()\n output = func(*args, **kwargs)\n end = time.time()\n print(\"Took {} secondes.\".format(end - start))\n return output\n\n return decorated", "def timed(func):\n\n @functools.wraps(func)\n def wrap(*args, **kwargs):\n start = time.perf_counter()\n func(*args, **kwargs)\n stop = time.perf_counter()\n print(f'{func.__name__} executed in {stop - start}s')\n\n return wrap", "def measure(func):\n @functools.wraps(func)\n def _time_it(*args, **kwargs):\n start = int(round(time() * 1000000000))\n try:\n return func(*args, **kwargs)\n finally:\n end_ = int(round(time() * 1000000000)) - start\n print(f\"Total execution time: {end_ if end_ > 0 else 0} ns\")\n\n return _time_it", "def time_it_decorator(func):\n\n def wrapper():\n start = time.time()\n result = func()\n total = time.time() - start\n print(f\"Function: {func.__name__} took {int(total) * 1000}ms\")\n return result\n\n return wrapper", "def timing(func):\n @wraps(func)\n def wrap(*args, **kw):\n start = time()\n result = func(*args, **kw)\n end = time()\n print(f\"{func.__name__} took: {end-start}:2.4f sec\\n\")\n return result\n return wrap", "def profile_function(fun: Callable,\n args: tuple or list = (),\n kwargs: dict or None = None,\n backends=None,\n trace=True,\n subtract_trace_time=True,\n retime=True,\n warmup=1,\n call_count=1) -> Profile:\n kwargs = kwargs if isinstance(kwargs, dict) else {}\n for _ in range(warmup):\n fun(*args, **kwargs)\n with profile(backends=backends, trace=trace, subtract_trace_time=subtract_trace_time) as prof:\n fun(*args, **kwargs)\n if retime:\n with prof.retime():\n fun(*args, **kwargs)\n if call_count > 1:\n with prof._accumulate_average(call_count):\n for _ in range(call_count - 1):\n fun(*args, **kwargs)\n return prof", "def timeit(func):\n\n @wraps(func)\n def timed_function(*args, **kwargs):\n start = time.time()\n output = func(*args, **kwargs)\n end = time.time()\n print '%s execution time: %f secs' % (func.__name__, end - start)\n return output\n\n return timed_function", "def timeit_decorator(func: Callable) -> Callable:\n @wraps(func)\n def wrap(*args, **kwargs) -> Any:\n start_time = time()\n result = func(*args, **kwargs)\n delta = timedelta(seconds=round(time() - start_time))\n info = logging.getLogger(logger_name).info if logger_name is not None else print\n info(f'Elapsed time = {delta}')\n\n return result\n\n return wrap", "def _profile_func(self, module: Callable, x: torch.Tensor, i: int) -> torch.Tensor:\n start_time = time.monotonic()\n y = module(x)\n time_took = time.monotonic() - start_time\n self.profile_result[i][\"time\"][self.n_running] = time_took\n\n return y", "def with_timings(function):\n\n @functools.wraps(function)\n def decorator(*args, **kwargs):\n start_time = time.time()\n ret = function(*args, **kwargs)\n duration_secs = time.time() - start_time\n print(\n f\"== Index Call == {style(function.__name__, bold=True)}: \"\n f\"{duration_secs*1000}\",\n file=sys.stderr,\n flush=True,\n )\n return ret\n\n return decorator", "def benchmark(func):\n\n def decoredFunc(*args, **keyArgs):\n t1 = time.time()\n r = func(*args, **keyArgs)\n t2 = time.time()\n print(f'Function={func.__name__}, Time={t2 - t1}')\n return r\n\n return decoredFunc", "def time_counter(func):\n\n import time\n\n @functools.wraps(func)\n def wrapper(*args, **kwargs):\n t = time.time()\n result = func(*args, **kwargs)\n print(func.__name__, time.time() - t)\n return result\n return wrapper", "def measure(func):\n if func not in measured_funcs:\n measured_funcs.add(func)\n if not hasattr(func, 'total_runtime'):\n func.total_runtime = 0.0\n if not hasattr(func, 'total_calls'):\n func.total_calls = 0\n\n def wrapper(*args, **kwargs):\n before_call = datetime.datetime.now()\n res = func(*args, **kwargs)\n elapsed = datetime.datetime.now() - before_call\n func.total_runtime += elapsed.total_seconds()\n func.total_calls += 1\n return res\n\n return wrapper", "def setprofile(function): # real signature unknown; restored from __doc__\n pass", "def timeit(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n start = time.time()\n\n ret = func(*args, **kwargs)\n\n elapsed = time.time() - start\n print('elapsed time: {} seconds'.format(elapsed))\n log.debug('Elapsed time for {}: {} seconds'.format(func.__name__, elapsed))\n\n return ret\n\n return wrapper", "def decorator_timeit(func):\r\n\r\n def wrapper(*args, **kwargs):\r\n start = time.time()\r\n results = func(*args, **kwargs)\r\n end = round((time.time() - start) / 60, 2)\r\n print(f'TIMEIT DECORATOR: {func.__name__} took: {end} min.')\r\n return results\r\n\r\n return wrapper", "def timeit(func):\n\n def measure_time(*args, **kw):\n start_time = time.perf_counter()\n result = func(*args, **kw)\n time_ms = (time.perf_counter() - start_time) * 1000\n if time_ms < 0.1:\n print(\"Processing time of %s(): %.1f μs.\"\n % (func.__qualname__, time_ms*1000))\n else:\n print(\"Processing time of %s(): %.3f ms.\"\n % (func.__qualname__, time_ms))\n return result\n\n return measure_time", "def benchmark(func):\n def wrapper(*args, **kwargs):\n t = time.process_time()\n res = func(*args, **kwargs)\n t_sec = round((time.process_time()-t) % 60,1)\n t_min = int((time.process_time()-t)/ 60)\n ls.logger.info(f'Application function {func.__name__} execution time {t_min} [min] {t_sec} [sec]')\n return res\n return wrapper", "def time_it(foo): # TeamRome\n\n def wrapper(*args, **kwargs):\n time_0 = perf_counter()\n log_verbose(\"%s()\" % foo.__name__)\n result = foo(*args, **kwargs)\n log_info('\\t%s() - OK; TimeIt: %.6f sec.' % (foo.__name__, perf_counter() - time_0))\n return result\n\n return wrapper", "def time_fn(fn):\r\n\r\n @wraps(fn) # to save __name__, type(), ...\r\n def measure_time(*args, **kwargs):\r\n t1 = time.time()\r\n result = fn(*args, **kwargs)\r\n t2 = time.time()\r\n print(f'@time_fn: {fn.__name__:20} took {t2 - t1} seconds')\r\n return result\r\n\r\n return measure_time", "def mem_profile(func: Callable) -> Callable:\n\n @wraps(func)\n def with_memory(*args, **kwargs):\n log = logging.getLogger(__name__)\n mem_usage, result = memory_usage(\n (func, args, kwargs),\n interval=0.1,\n timeout=1,\n max_usage=True,\n retval=True,\n include_children=True,\n )\n # memory_profiler < 0.56.0 returns list instead of float\n mem_usage = mem_usage[0] if isinstance(mem_usage, (list, tuple)) else mem_usage\n log.info(\n \"Running %r consumed %2.2fMiB memory at peak time\",\n _func_full_name(func),\n mem_usage,\n )\n return result\n\n return with_memory", "def decor(func):\n def wrap():\n print(\"@@@ STATISTICS REPORT START @@@\\n\")\n func()\n print(\"@@@ STATISTICS REPORT FINISH @@@\\n\")\n return wrap", "def time_func(func):\n def return_fn(*args, **kwargs):\n global FUNCTION_LOGS\n stopwatch = Stopwatch()\n result = func(*args, **kwargs)\n split = stopwatch.mark()[1]\n FUNCTION_LOGS.append((func.__name__, args, kwargs, split))\n return result\n\n return return_fn", "def time_function_call(fn):\n def wrapper(*args, **kwargs):\n t1 = time.time()\n r = fn(*args, **kwargs)\n diff = time.time() - t1\n logger.debug(\"%s, duration=%6fs\", fn.__name__, diff)\n return r\n return wrapper", "def profile_thunk(i, node, th):\n global run_cthunk\n if hasattr(th, 'cthunk'):\n t0 = time.time()\n failure = run_cthunk(th.cthunk)\n dt = time.time() - t0\n if failure:\n raise RuntimeError(\n ('A C Op raised an exception. ProfileMode cannot'\n ' tell you what it was though. Use a standard mode'\n ' such as FAST_RUN to correct the problem.'))\n else:\n t0 = time.time()\n th()\n dt = time.time() - t0\n\n # Some Op are so fast that the time.time() resolution is\n # insufficient to measure it. So we add an epsilon.\n self.apply_time[node] += max(dt, 1e-14)", "def profile_thunk(i, node, th):\r\n global run_cthunk\r\n if hasattr(th, 'cthunk'):\r\n t0 = time.time()\r\n failure = run_cthunk(th.cthunk)\r\n dt = time.time() - t0\r\n if failure:\r\n raise RuntimeError(\r\n ('A C Op raised an exception. ProfileMode cannot'\r\n ' tell you what it was though. Use a standard mode'\r\n ' such as FAST_RUN to correct the problem.'))\r\n else:\r\n t0 = time.time()\r\n th()\r\n dt = time.time() - t0\r\n\r\n # Some Op are so fast that the time.time() resolution is\r\n # insufficient to measure it. So we add an epsilon.\r\n self.apply_time[node] += max(dt, 1e-14)", "def timeit(func):\n def inner(*args, **kwargs):\n time_start = time.time()\n ret = func(*args, **kwargs)\n time_end = time.time()\n print('**** With total running time of {:.2f}s'.format(\n time_end - time_start\n ))\n return ret\n return inner", "def timer(func):\n @wraps(func)\n def wrap_timer(*args, **kwargs):\n t0 = perf_counter()\n returned = func(*args, **kwargs)\n t1 = perf_counter()\n print(f\"[Time: {t1-t0:.6f} s]\")\n return returned\n return wrap_timer", "def call_counter_and_time(func):\n @wraps(func)\n def helper(*args, **kwds):\n helper.calls += 1\n startt = timeit.default_timer()\n result = func(*args, **kwds)\n print(timeit.default_timer()- startt)\n return result\n helper.calls = 0\n return helper", "def timing(f):\n\n def wrap(*args, **kwargs):\n print('Timing....')\n time1 = time.time()\n ret = f(*args, **kwargs)\n time2 = time.time()\n print(f.__name__, 'function took %0.3f ms' % ((time2 - time1) * 1000.0))\n print('....end Timimg')\n return ret\n\n return wrap", "def arg_wrapper(func):\n\n @functools.wraps(func)\n def wrapper(*args,\n **kwargs):\n\n \"\"\" Simple profiler for the function.\n\n :param args: Args for the function.\n :param kwargs: Kwargs for the function.\n :return: The result of the function.\n \"\"\"\n\n _profiler = profilers.start_if_active(profile_id)\n\n # Run the function\n result = func(*args,\n **kwargs)\n\n profilers.stop_if_active(func=func,\n profile_id=profile_id,\n profiler=_profiler,\n sort_by=sort_by)\n\n # Return the function result\n return result\n\n # Return the decorated function\n return wrapper", "def profile(script, argv, timer, pickle_protocol, dump_filename, mono):\n filename, code, globals_ = script\n sys.argv[:] = [filename] + list(argv)\n __profile__(filename, code, globals_,\n timer=timer, pickle_protocol=pickle_protocol,\n dump_filename=dump_filename, mono=mono)", "def measure_time(func):\n def timer(*args, **kwargs):\n start = timeit.default_timer()\n ret = func(*args, **kwargs)\n end = timeit.default_timer()\n print(\"Time[{}] : {}\".format(func.__name__, end-start))\n return ret\n return timer", "def waste_time(func):\n\n @functools.wraps(func)\n def wrapper_waste_time(*args, **kwargs):\n start_time = time.perf_counter()\n value = func(*args, **kwargs)\n end_time = time.perf_counter()\n print(f\"Finished {func.__name__!r} in {end_time - start_time} secs\")\n return value\n return wrapper_waste_time", "def took(func):\n\n @wraps(func)\n def func_wrapper(*args, **kwargs):\n start = time.time()\n\n result = func(*args, **kwargs)\n\n spent = time.time() - start\n\n # Log things which take more than 0.5 seconds\n if spent > 0.5:\n msg = '[xml_file.flush()] %s took %.2f seconds to run.'\n function_name = func.__name__\n args = (function_name, spent)\n om.out.debug(msg % args)\n\n return result\n\n return func_wrapper", "def timeit(f):\r\n @functools.wraps(f)\r\n def wrapper(*args, **kwargs):\r\n t0 = time.time()\r\n result = f(*args, **kwargs)\r\n print('Executed {0!r} in {1:4f} s'.format(f.__name__, time.time() - t0))\r\n return result\r\n return wrapper", "def count_time(func):\n\n def wrapper(*args, **kwargs):\n start_time = time()\n res = func(*args, **kwargs)\n over_time = time()\n total_time = over_time - start_time\n logging.info('Func: %s, Run Time: %.6f' % (func.__name__, total_time))\n return res\n\n return wrapper", "def timed(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n tstart = datetime.now()\n result = func(*args, **kwargs)\n elapsed = (datetime.now() - tstart).microseconds / 1e6\n print(\"Elapsed time: %.3f seconds.\" % elapsed)\n return result\n return wrapper", "def timing(f):\n def wrapper(*args, **kwargs):\n start = time.time()\n result = f(*args, **kwargs)\n end = time.time()\n print('function:%r took: %2.2f sec' % (f.__name__, end - start))\n return result\n return wrapper", "def print_timing(func):\n def wrapper(*arg):\n t1 = time.time()\n res = func(*arg)\n t2 = time.time()\n print '%s took %0.3f ms' % (func.func_name, (t2-t1)*1000.0)\n return res\n return wrapper", "def _profile_function(function, profiles, game):\n return [function(game, prof) for prof in profiles] # pragma: no cover", "def excute_time(func):\n\n @wraps(func)\n def excuting(*args, **kwargs):\n start = time.time()\n func(*args, **kwargs)\n print(\"Calling {}: {}\".format(func.__name__, format(time.time() - start, '.5f')))\n\n return excuting", "def profile(key_name, custom_emit=None):\n def decorator(func):\n @wraps(func)\n def wrapped(*args, **kwargs):\n with ProfiledBlock(key_name, custom_emit):\n return func(*args, **kwargs)\n return wrapped\n return decorator", "def timeit(func):\n def timed(*args, **kwargs):\n ts = time.time()\n result = func(*args, **kwargs)\n te = time.time()\n print(\"%r (%r, %r) %2.2f sec\" % (\n func.__qualname__, args, kwargs, te-ts))\n return result\n\n return timed", "def func_decorator(fun):\r\n count = 0\r\n\r\n def wrapper(*args, **kwargs):\r\n try:\r\n nonlocal count\r\n count += 1\r\n start = time.time()\r\n with contextlib.redirect_stdout(io.StringIO()) as f: fun(*args)\r\n duration = time.time() - start\r\n print(f'{fun.__name__}' + f' call {count}' + ' executed in ' + f'{duration}' + ' sec')\r\n print('Name: ' + f' {fun.__name__}')\r\n print('Type: ' + f' {type(fun)}')\r\n sig = signature(fun)\r\n print('Sign: ' + f' {sig}')\r\n print('Args: ' + ' positional ' + f'{args}' '\\n\\t key=worded ' + f'{kwargs}')\r\n doc = fun.__doc__\r\n doc = doc.splitlines()[1:-1]\r\n doc = '\\n\\t'.join(map(str, doc))\r\n print('Doc:' + f'{doc}')\r\n source = inspect.getsource(fun)\r\n source = source.splitlines()\r\n source = '\\n\\t\\t'.join(map(str, source))\r\n print('Source: ' + f'{source}')\r\n output = f.getvalue().splitlines()\r\n output = '\\n\\t\\t'.join(map(str, output))\r\n print('Output: ' + f'{output}')\r\n\r\n except:\r\n logging.exception(f'timestamp: {datetime.now()}')\r\n pass\r\n\r\n return wrapper", "def calculate_time(func):\n def wrapper():\n \"\"\"\n Wrapper within calculate_time decorator that executes the function\n and calculates the time spent executing a function\n\n Parameters\n ----------\n None\n\n Returns\n -------\n Nothing\n\n Examples\n --------\n >>> calculate_time(time.sleep(2))\n 2\n \"\"\"\n start_time = time.time()\n func()\n end_time = time.time()\n run = end_time - start_time\n print(f'Total time {run}')\n return wrapper", "def wrapper():\n start_time = time.time()\n func()\n end_time = time.time()\n run = end_time - start_time\n print(f'Total time {run}')", "def timed(fn):\n @wraps(fn)\n def inner(*args, **kwargs):\n \"\"\"\n Inner function to calculate the time.\n \"\"\"\n start = perf_counter()\n result = fn(*args, **kwargs)\n end = perf_counter()\n time_elapsed = (end - start)\n return time_elapsed, result\n return inner", "def timeit(method):\n\n def timed(*args, **kw):\n print(\"@timeit : starting %r \" % method.__name__)\n t_start_sec = time.time()\n # function to measure\n result = method(*args, **kw)\n t_end_sec = time.time()\n\n print(\"%r %g (sec)\" % (method.__name__, (t_end_sec - t_start_sec)))\n\n return result\n\n return timed", "def timed(func):\n def _func(*args, **kwargs):\n print(f\"Starting {func.__name__}...\")\n start_time = time.time()\n out = func(*args, **kwargs)\n print(f\"{func.__name__} took {time.time() - start_time} seconds\")\n return out\n return _func", "def time_it(func):\n def wrapped(*args, **kwargs):\n time_start = time.time()\n result = func(*args, **kwargs)\n time_end = time.time()\n\n print ('%s called with (%s, %s) took %.3f sec' %\n (func.__name__, args, kwargs, time_end - time_start))\n\n return result\n\n return wrapped", "def profile_function(\n game, function, profiles, num_resamples, *, percentiles=None, processes=None\n):\n profiles = profiles.reshape((-1, game.num_strats))\n return game_function(\n game,\n functools.partial(_profile_function, function, profiles),\n num_resamples,\n profiles.shape[0],\n percentiles=percentiles,\n processes=processes,\n )", "def timeit(func):\n def wrapped(*args, **kwargs):\n ts = time.time()\n result = func(*args, **kwargs)\n te = time.time()\n print('{method} ({args}, {kw}) took {time} sec'.format(\n method=func.__name__, args=args, kw=kwargs, time=te - ts))\n return result\n return wrapped", "def function_timer(orig_func):\n import time\n\n @wraps(orig_func)\n def wrapper(*args, **kwargs):\n t1 = time.time()\n result = orig_func(*args, **kwargs)\n t2 = time.time()\n print('{} ran in: {} sec'.format(orig_func.__name__, t2))\n return result\n\n return wrapper", "def count_time(func):\n def wrapper():\n start_time = time.time()\n res = func()\n end_time = time.time()\n print(\"The progress cost: {:4}\".format(end_time-start_time))\n return res\n return wrapper", "def trace(func: Callable) -> Callable:\n\n def _(*args: Any, **kwargs: Any) -> Any:\n start = datetime.now()\n\n ret = func(*args, **kwargs)\n end = datetime.now()\n\n time_elapsed = (end - start).total_seconds() * 1000 # in ms\n logger.info(f\"Completed in {time_elapsed:,.0f} ms\")\n\n return ret\n\n return _", "def time_it(function):\n from time import time\n\n def wrapper(*args, **kwargs):\n before = time()\n result = function(*args, **kwargs)\n after = time()\n print(\"Execution of {} took {:.8f} seconds\".format(\n function.__name__, (after - before)))\n\n return result\n\n return wrapper", "def _performance_log(func):\n\n def wrapper(*arg):\n \"\"\" wrapper \"\"\"\n\n start = datetime.datetime.now()\n\n # Code execution\n res = func(*arg)\n\n if _log_performance:\n usage = resource.getrusage(resource.RUSAGE_SELF)\n memory_process = (usage[2])/1000\n\n delta = datetime.datetime.now() - start\n delta_milliseconds = int(delta.total_seconds() * 1000)\n\n _logger.info(\"PERFORMANCE - {0} - milliseconds |{1:>8,}| - memory MB |{2:>8,}|\"\n .format(func.__name__,\n delta_milliseconds,\n memory_process))\n\n return res\n\n return wrapper", "def getprofile(): # real signature unknown; restored from __doc__\n pass", "def timer(fun):\n @wraps(fun)\n def wrapper(args):\n \"\"\"Wraps function execution time.\"\"\"\n if args[\"--time\"]:\n import time\n start_time = time.time()\n result = fun(args)\n LOGGER.info(\"Total time:\", time.time() - start_time)\n return result\n\n return fun(args)\n\n return wrapper", "def metrics_collector(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n start_time = perf_counter()\n value = func(*args, **kwargs)\n end_time = perf_counter()\n run_time = (end_time - start_time) * 1000 # convert to milliseconds\n run_time = int(run_time) # we'll lose some accuracy here, but I think it's negligible\n if 'metrics' not in g:\n g.metrics = []\n g.metrics.append({\n func.__name__: run_time\n })\n return value\n return wrapper_timer", "def test_func(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit(func)), types.FunctionType)", "def clocked( fun, output = sys.stderr ):\n @functools.wraps( fun )\n def call( *args, **kword ):\n \"\"\" \n Call the function\n \"\"\"\n # create and output message\n msg = fun.func_name\n start = time.time()\n result = fun( *args, **kword )\n end = time.time() \n msg += \" (%.4f s)\" % ( end - start)\n print >> output, msg \n return result\n return call", "def perf_logging(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n urlpath = request.path\n remote_addr = request.remote_addr\n try:\n start_time = time.time()\n ret = func(*args, **kwargs)\n end_time = time.time()\n proc_time = round(end_time - start_time, 3)\n _LOGGER.info('%s|%s|%s s.', urlpath, remote_addr, proc_time)\n except Exception as e:\n end_time = time.time()\n proc_time = round(end_time - start_time, 3)\n _LOGGER.error('%s error in %s s.', urlpath, proc_time)\n raise e\n return ret\n return wrapper", "def st_time(show_func_name=True):\n\n def wrapper(func):\n def st_func(*args, **keyArgs):\n t1 = time.time()\n r = func(*args, **keyArgs)\n t2 = time.time()\n if show_func_name:\n print(\"Function=%s, Time elapsed = %ds\" % (func.__name__, t2 - t1))\n else:\n print(\"Time elapsed = %ds\" % (t2 - t1))\n return r\n\n return st_func\n\n return wrapper", "def wrapper(*args,\n **kwargs):\n\n _profiler = profilers.start_if_active(profile_id)\n\n # Run the function\n result = func(*args,\n **kwargs)\n\n profilers.stop_if_active(func=func,\n profile_id=profile_id,\n profiler=_profiler,\n sort_by=sort_by)\n\n # Return the function result\n return result", "def time_function(func):\n def wrapper(*args, **kwargs):\n start = dt.datetime.utcnow()\n result = func(*args, **kwargs)\n end = dt.datetime.utcnow()\n logging.info(\"Function %s took: %s\", func.__name__, (end - start))\n return result\n return wrapper", "def important(func):\n\n def decorated(*args, **kwargs):\n \"\"\"Decorated method.\"\"\"\n runLog.important(func(*args, **kwargs))\n\n return decorated", "def measure_time(f_name):\n\n def wrapper(f):\n f_inner_name = f_name\n if f_inner_name is None:\n f_inner_name = f.__name__\n\n def wrapped_f(*args, **kwargs):\n global indentation\n global tab_stop\n\n start = time.time()\n print(\"%s%11s start\" % (\" \" * indentation, f_inner_name))\n indentation += tab_stop\n try:\n return_data = f(*args, **kwargs)\n except TypeError:\n return_data = f(*args)\n finally:\n end = time.time()\n indentation -= tab_stop\n\n print(\n \"%s%11s: %0.3f sec\"\n % (\" \" * indentation, f_inner_name, (end - start))\n )\n\n return return_data\n\n return wrapped_f\n\n return wrapper", "def time_me(function, argument, type):\n start = time.perf_counter()\n function(argument, type)\n end = time.perf_counter()\n return end - start", "def timer(func):\n @functools.wraps(func)\n def wrapper_timer(*args, **kwargs):\n logger = logging.getLogger(__name__)\n start_time = time.perf_counter()\n record_processed = func(*args, **kwargs)\n end_time = time.perf_counter()\n run_time = end_time - start_time\n logger.info(f\"Function {func.__name__!r} processed {record_processed} records in \"\n f\"{run_time:.4f} secs\")\n return record_processed\n return wrapper_timer", "def timer(func):\n def inner(*args, **kwargs):\n before = time.time()\n func(*args, **kwargs)\n after = time.time()\n print(f\"{func.__name__} took {after-before:.2f} sec\")\n return inner", "def test_func_2(self):\n def func():\n return 0\n self.assertEqual(type(decorators.timeit_2(func)), types.FunctionType)", "def timed(function):\n def timed_function(*args, **kwargs):\n t0 = time.time()\n result = function(*args, **kwargs)\n print(\"[{}] - Elapsed time : {} s\"\n .format(function.__name__, sec_to_time(time.time() - t0)))\n return result\n return timed_function", "def print_time(fn):\n @functools.wraps(fn)\n def fn_exec_time(*args, **kwargs):\n start = time.perf_counter()\n value = fn(*args, **kwargs)\n end = time.perf_counter()\n elapsed = end - start\n print(\"{0} took={1:.4f}s\".format(fn.__name__, elapsed))\n return value\n\n return fn_exec_time" ]
[ "0.84530425", "0.8446885", "0.82060313", "0.81897914", "0.81137836", "0.805328", "0.7986637", "0.7973362", "0.77897024", "0.77074057", "0.77040017", "0.7657467", "0.7563646", "0.7490806", "0.74640775", "0.7423344", "0.7059046", "0.7050842", "0.6984666", "0.6982411", "0.69302946", "0.68770736", "0.68481696", "0.6828433", "0.6822135", "0.6789972", "0.6775959", "0.67646044", "0.6763036", "0.674075", "0.6726426", "0.671826", "0.6714121", "0.671377", "0.6691388", "0.66835225", "0.6666333", "0.6650723", "0.66360366", "0.6635997", "0.66209555", "0.6606354", "0.65724254", "0.6565348", "0.65577424", "0.6546747", "0.646354", "0.64413434", "0.64409333", "0.64318097", "0.6417499", "0.6404018", "0.6395567", "0.63875914", "0.63610184", "0.6350462", "0.63421106", "0.63415337", "0.6340345", "0.63273585", "0.63265336", "0.63217664", "0.6316484", "0.63069594", "0.6303855", "0.6291594", "0.6282309", "0.62729645", "0.6272451", "0.6268575", "0.62662935", "0.6263111", "0.62596023", "0.6258907", "0.62485653", "0.6247641", "0.6245751", "0.6233876", "0.62186074", "0.6204913", "0.62026596", "0.6202202", "0.6196396", "0.61948395", "0.6174425", "0.6169372", "0.6165394", "0.61643267", "0.6142669", "0.613412", "0.6131824", "0.6089832", "0.6089052", "0.60819876", "0.6077272", "0.60763663", "0.6063314", "0.60629594", "0.6058696", "0.6053542" ]
0.783562
8
Number of days left.
def ssl_valid_time_remaining(domainname): expires = ssl_expiry_date(domainname) return expires - datetime.datetime.utcnow().date()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_number_days(self):\r\n return 1", "def elapsed_days(self) -> int:\n return (datetime.today() - self.release_datetime).days", "def remaining_days_in_current_period(self):\n try:\n return self.count_days_from_now(self.current_period_ends_at)\n except AttributeError:\n return 0", "def remaining_retention_days(self) -> int:\n return pulumi.get(self, \"remaining_retention_days\")", "def remaining_days_in_cycle(self) -> int:\n if not self.expiration:\n return 0\n delta = self.expiration - _today()\n return int(delta.days)", "def size(self):\n\t\treturn (self.dates[1] - self.dates[0]).days", "def timesLeft(self)->int:\n return self.maxTimes - self.timesUsed", "def joined_days(self):\n return (timezone.now() - self.user.date_joined).days", "def secondsLeft(self)->int:\n t = datetime.utcnow()\n if self._scenario == LM_HardDate.Scenario.ValidSince:\n return 0 if t >= self.timeBegin else int((self.timeBegin - t).total_seconds())\n else:\n return 0 if t >= self.timeEnd else int((self.timeEnd - t).total_seconds())", "def remaining_days(self):\n if self.trialing or self.trial_ended:\n return self.remaining_trial_days\n else:\n return self.remaining_days_in_current_period", "def compute_real_days(self):\n if (self.end_date > date.today()):\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, date.today())\n else:\n return SchoolDB.models.get_num_days_in_period(\n self.start_date, self.end_date)", "def secondsLeft(self)->int:\n x = self.expirePeriodInSeconds - self.secondsPassed\n return x if x >=0 else 0", "def calculate_time_left(self):\n time_left = self.attributes[AT.TIME_CREATED] \\\n + self.attributes[AT.TIME_TO_EXPIRE] \\\n - get_ticks()\n if time_left < 0:\n time_left = 0\n return time_left", "def get_number_days(self):\r\n raise NotImplementedError", "def secondsLeft(self)->int:\n return 0 if self.secondsPassed >= self.secondsTotal else self.secondsTotal - self.secondsPassed", "def remaining_trial_days(self):\n try:\n return self.count_days_from_now(self.trial_ended_at)\n except AttributeError:\n return 0", "def days(self) -> Optional[int]:\n return pulumi.get(self, \"days\")", "def Daysleftverification():\n pass", "def get_skipped_days(self) -> int:\n return self._skipped_days.get()", "def time_left(self):\r\n return 10 - (int(time.time()) - self.start_time)", "def secondsLeft(self)->int:\n return 0 if self.secondsPassed >= self.duration else self.duration - self.secondsLeft", "def days(self):\n ends_at = created_at = datetime.datetime.now().replace(tzinfo=utc)\n if self.created_at:\n created_at = self.created_at\n if self.ends_at:\n ends_at = self.ends_at\n return (ends_at - created_at).days", "def days(self):\n return self._days", "def decreases_remaining(self):\n return 2 - self.decreases_today", "def days_registered(self):\n days_registered = (datetime.utcnow() - self.date_joined).days\n if not days_registered:\n return 1\n return days_registered", "def numOfDays():\n\n print(\"Podaj rok, miesiac oraz dzien pierwszej daty: \")\n inputs = [input() for i in range(3)]\n\n print(\"Podaj rok, miesiac oraz dzien drugiej daty: \")\n inputs1 = [input() for i in range(3)]\n\n d0 = date(inputs[0], inputs[1], inputs[2])\n d1 = date(inputs1[0], inputs1[1], inputs1[2])\n delta = abs(d1 - d0)\n \n print(delta.days)\n return abs(delta.days)", "def workdaysleft(self):\n\n one_day = datetime.timedelta(hours=HOURS_PER_DAY)\n fulldays = self.timeleft//one_day\n remains = self.timeleft-fulldays*one_day\n return {\"days\": fulldays, \"remaining\": remains}", "def compute_days(start: date, end: date) -> int:\n delta = end - start\n return delta.days + 1", "def getTimeLeftSec(self):\n if self.sess is None: return 0\n since = self.sess.data.get('validSince')\n if not since: return 0\n\n sofar = time.time() - since\n if sofar < 0: return 0\n out = self.sess.data.get('validLifetime', 0) - sofar\n if out < 0: out = 0\n return out", "def days(self):\n return int(self.hours / 24)", "def calc_remained_days(name: str, full_date: str, current: str):\n expiry_date = get_expiry_date(name, full_date)\n intervals = datetime.strptime(expiry_date, DATE_FORMAT) - datetime.strptime(current, DATE_FORMAT)\n days = intervals.days + 1\n if days <= 0:\n raise ValueError(f'remained days {expiry_date} - {current}, {days} out of range. ')\n return days", "def time_left(self):\n return self.timeout - self.current_milli_time()", "def get_period_length(self) -> int:\n return (dataset.max_date - dataset.min_date).days + 1", "def _get_number_of_days(self, date_from, date_to, employee_id):\n\t\tfrom_dt = fields.Datetime.from_string (date_from)\n\t\tto_dt = fields.Datetime.from_string (date_to)\n\t\tif employee_id:\n\t\t\temployee = self.env['hr.employee'].browse (employee_id)\n\n\t\t\t# Testing 16/11/19\n\t\t\tshift = employee.resource_calendar_ids\n\t\t\treturn employee.get_work_days_count (from_dt, to_dt, shift)\n\n\t\ttime_delta = to_dt - from_dt\n\t\treturn math.ceil (time_delta.days + float (time_delta.seconds) / 86400)", "def calculate_time_percentage_left(self):\n time_left = self.calculate_time_left()\n return time_left / self.attributes[AT.TIME_TO_EXPIRE]", "def soft_delete_retention_days(self) -> int:\n return pulumi.get(self, \"soft_delete_retention_days\")", "def age(self):\n then = self.ship_date\n if self.status == 'delivered':\n now = self.event_time.date()\n else:\n now = datetime.datetime.now().date()\n delta = now - then\n return delta.days", "def get_remaining_count(self):\n return self.total_count - self.count", "def compute_total_days(start, end):\n # Use the datetime module to subtract the dates (+1 if inclusive)\n return (end - start).days + 1", "def GetTimeLeft(self, *args, **kwargs):\n pass", "def days_since_last_checkin(self):\n # TODO use local timezone\n checkin_date = (self.last_checkin - datetime.timedelta(hours=5)).date()\n today = datetime.date.today()\n return (today - checkin_date).days", "def ssl_days_left(self) -> t.Optional[int]:\n value = None\n if isinstance(self.HTTP, Http):\n cert: t.Optional[cert_human.Cert] = self.HTTP.get_cert()\n if isinstance(cert, cert_human.Cert):\n value = tools.dt_days_ago(cert.not_valid_after, from_now=False)\n return value", "def num_tickets_left(self):\r\n return self._contributions_left[1]", "def period_length_in_days(self, period_tensor):\n return (self + period_tensor).ordinal() - self._ordinals", "def time_left(self):\n if (not os.path.exists(self.filename)):\n return 0\n\n if (\"grid_proxy\" in self.type) or (\"cert_pair\" in self.type):\n time_list = condorExe.iexe_cmd(\"openssl x509 -in %s -noout -enddate\" % self.filename)\n if \"notAfter=\" in time_list[0]:\n time_str = time_list[0].split(\"=\")[1].strip()\n timeleft = calendar.timegm(time.strptime(time_str, \"%b %d %H:%M:%S %Y %Z\"))-int(time.time())\n return timeleft\n else:\n return -1", "def budget_left(self):\n return max(0, self.budget_total - self.budget_spent_with_commission)", "def retention_days(self) -> Optional[int]:\n return pulumi.get(self, \"retention_days\")", "def days_since_start():\n initial = initial_time('time_stamp.txt')\n actual = time.localtime(time.time())\n if initial[0] == actual[0]:\n return actual[7] - initial[7]\n else:\n if calendar.isleap(initial[0]):\n return (366 - initial[7]) + actual[7]\n else:\n return (365 - initial[7]) + actual[7]", "def hindu_day_count(cls, date):\n return date - cls.EPOCH", "def day(self):\n return 0", "def day(self):\n return 0", "def retention_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"retention_days\")", "def find_duration(discharge, enroll_date, discharge_date):\n #pass\n today = datetime.datetime.today()\n if discharge : #True\n return (discharge_date - enroll_date).days\n else:\n return (today - enroll_date).days", "def retention_duration_in_days(self) -> Optional[pulumi.Input[float]]:\n return pulumi.get(self, \"retention_duration_in_days\")", "def staleness_days(self) -> Optional[int]:\n return pulumi.get(self, \"staleness_days\")", "def DAYS(end_date, start_date):\n return (_make_datetime(end_date) - _make_datetime(start_date)).days", "def get_num_of_days(start, end):\n\n start = clean_date(start)\n end = clean_date(end)\n # print(date(start[0], start[1], start[2]))\n\n start_date = date(start[0], start[1], start[2])\n end_date = date(end[0], end[1], end[2])\n\n delta = end_date - start_date # as timedelta\n \n return delta.days", "def time_left(self) -> float:\n return self._alarm_silence - time.monotonic()", "def _get_number_of_days(self, date_from, date_to):\n\n DATETIME_FORMAT = \"%Y-%m-%d %H:%M:%S\"\n from_dt = datetime.strptime(date_from, DATETIME_FORMAT)\n to_dt = datetime.strptime(date_to, DATETIME_FORMAT)\n timedelta = to_dt - from_dt\n diff_days = timedelta.days + float(timedelta.seconds) / 86400\n return diff_days", "def calculate_days(time):\n return int(time / 86400)", "def getDays(self):\r\n return self._repo", "def get_runing_days(start_date_list,expired_date_list):\n min_d = min(start_date_list)\n max_d = max(expired_date_list)\n num_days = (max_d-min_d).days +1\n return num_days", "def retention_duration_in_days(self) -> Optional[float]:\n return pulumi.get(self, \"retention_duration_in_days\")", "def getDailyPlayTimeLeft(self):\n d, _ = self.__stats.playLimits\n return d[0] - self._getDailyPlayHours()", "def last_seen_days(self):\n return self.last_seen.days", "def min_days(self):\n # This can be refined, as there are no more than 1 consecutive months\n # with 28 days\n days_fractional = self.number * 365.24 + self.days\n return int(np.floor(days_fractional))", "def get_n_days_ago(self, startdate, n):\n return startdate - datetime.timedelta(days=n)", "def count_left_players(definition):\n return int(parse_player_definition(definition)[1]['left_players'])", "def length(self):\n if self.running:\n return ZERO_TIME\n else:\n return self.end - self.start", "def secondsPassed(self)->int:\n return 0 if not self.used else int((datetime.utcnow() - self.firstAccessDate).total_seconds())", "def length(self):\n\t\treturn datetime.now() - self.toggles[0]", "def get_fine_due(self):\n fine = 0\n ndays = (dt.datetime.now() - self._checkout_date).days\n ndays_over = ndays - self.loantime\n if ndays_over > 0:\n fine += (ndays_over * self.finerate)\n return fine", "def get_length(self) -> int:\n return (self.pivot_departure_fix.timestamp - self.pivot_arrival_fix.timestamp).total_seconds()", "def calcDays(dateToCheck):\n today = datetime.date.today()\n # guard against *somehow* receiving an incorrect data type\n if type(dateToCheck) is not datetime.date:\n origBirthday = datetime.date.fromisoformat(str(dateToCheck))\n else:\n origBirthday = dateToCheck\n # determine the next birthday for this date of birth\n nextBirthday = datetime.date(today.year, origBirthday.month, origBirthday.day)\n # calculate days to next birthday\n if today<nextBirthday:\n daysLeft = (nextBirthday - today).days\n return daysLeft\n elif today == nextBirthday:\n daysLeft = 0\n return daysLeft\n else:\n newDate = datetime.date(nextBirthday.year + 1, nextBirthday.month, nextBirthday.day)\n daysLeft = (newDate - today).days\n return daysLeft", "def _interval_left(self, current_id, previous_id, current_time,\r\n previous_time):\r\n\r\n _left = 0.0\r\n\r\n # Create the next set of values to insert to the RTK Program database.\r\n if current_id == previous_id: # Same assembly.\r\n # Failures occurred at same time.\r\n if current_time == previous_time:\r\n _left = float(current_time)\r\n else:\r\n _left = float(previous_time)\r\n\r\n return _left", "def days_between(self, other):\n new_self = self.copy()\n new_other = other.copy()\n count=0\n if self.is_before(other):\n while(True):\n if new_self == new_other:\n break\n count-=1\n new_self.advance_one()\n elif self.is_after(other):\n while(True):\n if new_self==new_other:\n break\n count+=1\n new_other.advance_one()\n\n return count", "def days(self) -> Optional[pulumi.Input[int]]:\n warnings.warn(\"\"\"Deprecated in favor of duration\"\"\", DeprecationWarning)\n pulumi.log.warn(\"\"\"days is deprecated: Deprecated in favor of duration\"\"\")\n\n return pulumi.get(self, \"days\")", "def transaction_log_retention_days(self) -> int:\n return pulumi.get(self, \"transaction_log_retention_days\")", "def datediff_today(date):\n today = datetime.date.today()\n datediff = (today - date).days\n return datediff", "def data_refresh_window_days(self) -> pulumi.Output[int]:\n return pulumi.get(self, \"data_refresh_window_days\")", "def max_days(self):\n # This can be refined, as there are at max 2 consecutive months with 31\n # days\n days_fractional = self.number * 365.24 + self.days\n return int(np.ceil(days_fractional))", "def interval_days(self) -> pulumi.Input[int]:\n return pulumi.get(self, \"interval_days\")", "def freeTimeLeft(self):\n assert self.notify.debugStateCall(self, 'loginFSM', 'gameFSM')\n # -1 == never expires (paid/exempt)\n # 0 == expired\n if self.freeTimeExpiresAt == -1 or \\\n self.freeTimeExpiresAt == 0:\n return 0\n\n # freeTimeExpiresAt is an epoch time\n secsLeft = self.freeTimeExpiresAt - time.time()\n # if free time just expired, secsLeft <=0\n # make sure we don't return a negative number\n return max(0, secsLeft)", "def get_interactive_days(self):\n answer = input(\"Press return to get entries of past day or input number of days to go back in time: \")\n if answer == '':\n days = 1\n else:\n try:\n days = int(answer)\n except:\n print(\"You didn't enter a number, assuming 1 day.\")\n days = 1\n return days", "def get_diff_dates(self):\n if self.projected_start_date and self.projected_finish_date:\n diff = self.projected_finish_date - self.projected_start_date\n return diff.days\n return 0", "def n_diff(self):\n return 1 + int(self.differential)", "def diff(self, d2):\n copyD1 = self.copy()\n copyD2 = d2.copy()\n\n count = 0\n\n if copyD1.equals(copyD2):\n return count\n\n elif copyD1.isBefore(copyD2):\n while copyD1.isBefore(copyD2):\n count += 1\n copyD1.tomorrow()\n return -count\n\n else:\n while copyD1.isAfter(copyD2):\n count += 1\n copyD1.yesterday()\n return count", "def day(self):\n return self._days", "def time_left(self):\n t=self.transport\n return (t.stoptime or t.get_length())-t.get_time()", "def days(input=None):\n return get(input).days", "def message_retention_in_days(self) -> Optional[pulumi.Input[int]]:\n return pulumi.get(self, \"message_retention_in_days\")", "def remaining(self):\n return self.value - time.time()", "def calculate_seconds_in_days(days):\n return int(days * 86400)", "def DAYS(\n end_date: func_xltypes.XlDateTime,\n start_date: func_xltypes.XlDateTime\n) -> func_xltypes.XlNumber:\n\n days = end_date - start_date\n return days", "def getAge(self):\n if self.birthday == None:\n raise ValueError\n return (datetime.date.today() - self.birthday).days", "def getAge(self):\n if self.birthday == None:\n raise ValueError\n return (datetime.date.today() - self.birthday).days", "def getAge(self):\n if self.birthday == None:\n raise ValueError\n return (datetime.date.today() - self.birthday).days", "def getAge(self):\n if self.birthday == None:\n raise ValueError\n return (datetime.date.today() - self.birthday).days", "def num_deltas(self):\n if hasattr(self, '_m_num_deltas'):\n return self._m_num_deltas if hasattr(self, '_m_num_deltas') else None\n\n self._m_num_deltas = self.num_deltas_raw.value\n return self._m_num_deltas if hasattr(self, '_m_num_deltas') else None", "def num_deltas(self):\n if hasattr(self, '_m_num_deltas'):\n return self._m_num_deltas if hasattr(self, '_m_num_deltas') else None\n\n self._m_num_deltas = self.num_deltas_raw.value\n return self._m_num_deltas if hasattr(self, '_m_num_deltas') else None", "def seconds_remaining(self):\n pass" ]
[ "0.7505091", "0.7279096", "0.7158447", "0.71100044", "0.7087657", "0.70665115", "0.7026382", "0.7017739", "0.6946899", "0.69389665", "0.6917387", "0.6910316", "0.6885656", "0.68470883", "0.6816302", "0.6804258", "0.6734145", "0.670508", "0.6679895", "0.6673183", "0.66385496", "0.6632437", "0.6630138", "0.6627241", "0.6601242", "0.6578529", "0.65769064", "0.6542838", "0.65134037", "0.65049636", "0.6468212", "0.64428157", "0.6391804", "0.63092405", "0.6284646", "0.6276296", "0.6273565", "0.62552303", "0.6223043", "0.6173723", "0.6170872", "0.61246926", "0.6091606", "0.60845786", "0.60814595", "0.608007", "0.60594773", "0.6046566", "0.60460484", "0.6044072", "0.6044072", "0.6037503", "0.60359627", "0.6033369", "0.6029008", "0.60280716", "0.60081273", "0.6002484", "0.59995186", "0.599606", "0.5984292", "0.59804225", "0.59501547", "0.59297097", "0.59151894", "0.5912748", "0.5907268", "0.5907254", "0.58983535", "0.58937824", "0.58828115", "0.58827156", "0.5882268", "0.5876205", "0.5857997", "0.5847526", "0.58462167", "0.584359", "0.58327353", "0.5830309", "0.58195436", "0.57776564", "0.57701254", "0.5767374", "0.57605207", "0.5752887", "0.57495576", "0.5745408", "0.57349104", "0.5709449", "0.57072526", "0.5705364", "0.5693289", "0.56721085", "0.566323", "0.566323", "0.566323", "0.566323", "0.5657395", "0.5657395", "0.5654791" ]
0.0
-1
Returns the truncated SHA521 hash of the message.
def get_hash(data, n): import hashlib message_hash = hashlib.sha512(data).digest() e = int.from_bytes(message_hash, 'big') # FIPS 180 says that when a hash needs to be truncated, the rightmost bits # should be discarded. z = e >> (e.bit_length() - n.bit_length()) assert z.bit_length() <= n.bit_length() return z
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _sha512(message):\n return hashlib.sha512(message).hexdigest()", "def _sha512(data):\r\n return hashlib.sha512(data).hexdigest()", "def sha512(s: str) -> str:\n return hashlib.sha512(s.encode()).hexdigest()", "def _hash(data):\n return hashlib.sha512(data).hexdigest()", "def get_sha512_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m = hashlib.sha512()\n return calc_hash(src, m)", "def _truncated_digest(blob, digest_size=24):\n\n # b64 encoding results in 4/3 size expansion of data and padded if\n # not multiple of 3, which doesn't make sense for this use\n assert digest_size % 3 == 0, \"digest size must be multiple of 3\"\n\n digest = hashlib.sha512(blob).digest()\n tdigest_b64us = base64.urlsafe_b64encode(digest[:digest_size])\n return tdigest_b64us", "def get_sha512(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha512_from_stream(i)", "def digest(self, message):\n\n hasher = hashlib.md5()\n hasher.update(message)\n digest = hasher.digest()[0:self.HASHLEN]\n\n return binascii.hexlify(digest)", "def sha3_512(data=None):\n return SpongeHash(1024, 512, data, \"SHA3-512\", KeccakSponge, PAD_SHA3)", "def str_sha(raw_sha):\n return hexlify(raw_sha)[:12]", "def get_short_fingerprint(length=6):\n assert 6 <= length <= 32\n #\n return get_fingerprint(md5=True)[-length:]", "def trim_hash(info_hash):\n if len(info_hash) == 40:\n return info_hash.decode(\"hex\")\n if len(info_hash) != 20:\n raise TrackerRequestException(\"Infohash not equal to 20 digits\", info_hash)\n return info_hash", "def hack_hash(message, charset, length):\n digests = []\n i = 0\n bits_for_charset = int(math.ceil(math.log2(len(charset))))\n bytes_for_charset = int(math.ceil(bits_for_charset / 8))\n while len(digests) * 64 < length * bytes_for_charset:\n digests.append(hashlib.sha512(\n bytes(str(i) + message, 'utf-8')).digest())\n i += 1\n digest = bytes(b'').join(digests)\n out = []\n for i in range(length):\n b = digest[i * bytes_for_charset:(i + 1) * bytes_for_charset]\n n = int.from_bytes(b, byteorder='little', signed=False)\n out.append(charset[n % len(charset)])\n return ''.join(out)", "def _get_hex_digest(cls, message, secret):\n hmac_digester = hmac.new(secret.encode('utf-8'), message.encode('utf-8'), digestmod='sha512')\n return hmac_digester.hexdigest()", "def _hash_content(data):\n return hashlib.sha512(str(data).encode('utf-8')).hexdigest()", "def checksum(payload):\n return hashlib.sha256(hashlib.sha256(payload).digest()).digest()[:4]", "def sha512(self):\n return sha512file(self.abspath)", "def hash_hmac( self, msg ):\n result = hmac.new( self.secret, msg, hashlib.sha512 )\n return result.hexdigest()", "def HexDigest(self, name, truncation_length=None):\n\n if truncation_length is None:\n truncation_length = 64\n name_bytes = name.encode('UTF-8')\n return hashlib.sha256(name_bytes).hexdigest()[:truncation_length]", "def _Hash(self):\n fullhash = util.PrefixHash(self.key_bytes)\n return util.Base64WSEncode(fullhash[:constants.KEY_HASH_SIZE])", "def get_sha384_from_stream(src: io.IOBase) -> str:\n if not isinstance(src, io.IOBase) or not src.readable():\n raise Exception(\"src is not stream or unreadable\")\n m = hashlib.sha384()\n return calc_hash(src, m)", "def _sign_tx_hash(self, tx_message: TransactionMessage) -> str:\n if tx_message.ledger_id == OFF_CHAIN:\n crypto_object = self.wallet.crypto_objects.get(\"ethereum\")\n # TODO: replace with default_ledger when recover_hash function is available for FETCHAI\n else:\n crypto_object = self.wallet.crypto_objects.get(tx_message.ledger_id)\n tx_hash = tx_message.signing_payload.get(\"tx_hash\")\n is_deprecated_mode = tx_message.signing_payload.get(\"is_deprecated_mode\", False)\n tx_signature = crypto_object.sign_message(tx_hash, is_deprecated_mode)\n return tx_signature", "def shake128(data=None, digest_size=256):\n return SpongeHash(256, digest_size, data, \"SHAKE128\", KeccakSponge, PAD_SHAKE)", "def short_hash(hash, chars=11):\n ch_ea = int((chars - 3) / 2)\n if hash is None:\n return (\"0\" * ch_ea) + \"...\" + (\"0\" * ch_ea)\n return hash[:ch_ea] + \"...\" + hash[(-1 * ch_ea):]", "def card_id_sha512(self):\n return hashlib.sha512(self.card_id.encode('utf-8')).hexdigest() if self._card_type is not None else None", "def _get_prefix(self):\r\n return _sha512('health'.encode('utf-8'))[0:6]", "def hash_transaction(transaction: SignedRawTransaction) -> str:\n hashable_transaction = transaction.SerializeToString()\n return Verification.hash_bytes_256(hashable_transaction)", "def _Hash(self):\n fullhash = util.Hash(util.IntToBytes(len(self.key_bytes)), self.key_bytes)\n return util.Encode(fullhash[:keyczar.KEY_HASH_SIZE])", "def xml_get_sha512(xml, secret):\n xml_string = xml_to_string(xml, encode_base64=False) + secret\n return hashlib.sha512(xml_string).hexdigest()", "def getHash():\n return str(uuid.uuid4())[-17:].replace(\"-\", \"\")", "def sha512_file(file_name):\n\n hash_func = hashlib.sha256()\n\n with open(file_name, \"rb\") as fd:\n hash_func.update(fd.read())\n\n return hash_func.hexdigest()", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def calculate_hash(self):\n return sha256_2_string(str(self.header()))", "def generate_hash(passwd):\n return hashlib.sha512(passwd.encode(\"utf-8\")).hexdigest()", "def get_partial_sha512(self, nbytes):\n return sha512file(abspath=self.abspath, nbytes=nbytes)", "def _sha256(sha256):\n if not sha256:\n sha256 = \"0\" * 64\n\n return sha256", "def hash_password(password):\n\n return hashlib.sha224(password).hexdigest()[:20]", "def hash_str_length(self):\n return self.hash_byte_length() * 2", "def HmacSha512(key: Union[bytes, str],\n data: Union[bytes, str]) -> bytes:\n return hmac.new(AlgoUtils.Encode(key), AlgoUtils.Encode(data), hashlib.sha512).digest()", "def hash_str(c, hash_length):\n if isinstance(c, float):\n if numpy.isnan(c):\n return c\n raise ValueError(f\"numpy.nan expected, not {c}\")\n m = hashlib.sha256()\n m.update(c.encode(\"utf-8\"))\n r = m.hexdigest()\n if len(r) >= hash_length:\n return r[:hash_length]\n return r", "def sha256(message: bytes):\n # convert message bitarray\n bit_msg = bitarray(endian='big')\n bit_msg.frombytes(message)\n L = len(bit_msg)\n\n # additions done mod 2^32\n pow2 = pow(2,32)\n\n # append 1 followed by K 0s where K is the minimum number >= 0 such that \n # len(bit_msg) + 1 + K + 64 is a multiple of 512\n bit_msg = bit_msg + bitarray('1') + (bitarray('0') * ((-L-65) % 512))\n # append len(bit_msg) as a 64-bit int to bit_msg\n bit_msg = bit_msg + util.int2ba(L, length=64, endian='big')\n\n # initialize hash to predefined values\n current_hash = [h for h in initial_hash]\n\n # operate on each 512-bit chunk\n for chunk_index in range(len(bit_msg)//512):\n chunk = bit_msg[chunk_index * 512 : (chunk_index+1) * 512]\n # w is array of 64 32-bit words with first 16 equal to chunk\n w = [chunk[i*32 : (i+1)*32] for i in range(16)]\n w.extend([bitarray(32) for _ in range(48)])\n # create last 48 words in w from first 16\n for i in range(16, 64):\n s0 = rightrotate(w[i-15], 7) ^ rightrotate(w[i-15], 18) ^ rightshift(w[i-15], 3)\n s1 = rightrotate(w[i-2], 17) ^ rightrotate(w[i-2], 19) ^ rightshift(w[i-2], 10)\n w[i] = int2ba32(sum(map(util.ba2int, [w[i-16], s0, w[i-7], s1])) % pow2)\n\n # copy current hash (stored in hex) into working list v as bitarrays\n v = list(map(int2ba32, current_hash))\n # compression\n for i in range(64):\n S1 = rightrotate(v[4], 6) ^ rightrotate(v[4], 11) ^ rightrotate(v[4], 25)\n ch = (v[4] & v[5]) ^ ((~v[4]) & v[6])\n temp1 = (constants[i] + sum(map(util.ba2int, [v[7], S1, ch, w[i]]))) % pow2\n S0 = rightrotate(v[0], 2) ^ rightrotate(v[0], 13) ^ rightrotate(v[0], 22)\n maj = (v[0] & v[1]) ^ (v[0] & v[2]) ^ (v[1] & v[2])\n temp2 = (util.ba2int(S0) + util.ba2int(maj)) % pow2\n\n # shift elements of v by 1\n for j in reversed(range(1, len(v))):\n v[j] = v[j-1]\n v[0] = int2ba32((temp1 + temp2) % pow2)\n v[4] = int2ba32((util.ba2int(v[4]) + temp1) % pow2)\n\n # add compressed values (which are bitarrays) to current_hash (which are ints)\n current_hash = list(map(lambda a,b: (a + util.ba2int(b)) % pow2, current_hash, v))\n\n # each entry of current_hash is a 32-bit integer so convert to 4 bytes \n # adding bytes appends them\n return b''.join(x.to_bytes(4, 'big') for x in current_hash)", "def sha512(key: bytes, buffer: Optional[bytes] = None) -> Hmac:\n return new(key, buffer, \"sha512\")", "def _hash_fitting_schema(fitting_schema: BespokeOptimizationSchema) -> str:\n hash_string = (\n fitting_schema.smirk_settings.json() + fitting_schema.initial_force_field_hash\n )\n for stage in fitting_schema.stages:\n # drop the reference data form each target and the parameters from each stage\n hash_string += stage.json(\n exclude={\"targets\": {\"__all__\": {\"reference_data\"}}, \"parameters\": ...}\n )\n hash_string = hashlib.sha512(hash_string.encode()).hexdigest()\n return hash_string", "def sha(self):\n return self._sha", "def sha(self):\n return self._sha", "def sha(self):\n return self._sha", "def _hashsanitize(bytesin):\n # Used for converting raw byte data into a hex string. If the byte isn't a hex digit, use nothing instead.\n return \"\".join([x if x.lower() in 'abcdef0123456789' else '' for x in bytesin])", "def calc_statistics_hash(self) -> bytes:\n return b\"somehash\"", "def calculateHash(self):\n hashData = str(self.index) + str(self.data) + self.timestamp + self.previousHash + str(self.nonce)\n return hashlib.sha256(hashData.encode(encoding='UTF-8',errors='strict')).hexdigest()", "def strongHash(self):\n\t\treturn self._strongHash", "def hash_with_salt(self, s):\n\n data = f'{s} {self.salt}'.encode('ascii') # encode string to raw bytes object\n hash_obj = hashlib.md5(data) # hash it \n if self.trunc > 0:\n hash_txt = hash_obj.hexdigest()[0:self.trunc] # get truncated hash symbols\n else:\n hash_txt = hash_obj.hexdigest()\n return f'{s} {hash_txt}'", "def hash_1(self):\n return self.unpack_qword(0x18)", "def strongHashFunction(self):\n\t\treturn self._strongHashFunction", "def uniquely_shorten(string, length):\n\n if len(string) <= length and not (len(string) == length and\n string.startswith(SHORTENED_PREFIX)):\n return string\n\n h = hashlib.sha256()\n h.update(\"%s \" % length)\n h.update(string)\n hash_text = h.hexdigest()\n\n return SHORTENED_PREFIX + hash_text[:length-len(SHORTENED_PREFIX)]", "def hexdigest(self):\n # bytes.hex() is simpler, but not available For Python <= 3.4\n return \"\".join(\"{0:0>2x}\".format(b) for b in self.digest())", "def get_hash(self) -> str:\n return self.__hash.hexdigest()", "def SHA1(self) -> _n_0_t_3[_n_0_t_9]:", "def encoded_hash(sha):\n return urlsafe_b64encode(sha.digest()).decode('ascii').rstrip('=')", "def hex(self) -> str:\n return self.__hash.hexdigest()", "def dopplr(name):\n return \"#\" + hashlib.sha224(name).hexdigest()[:6]", "def _get_signature(value):\n mySha = hashlib.sha256()\n mySha.update(value)\n # print mySha.hexdigest()\n return mySha.hexdigest()", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def _hash(self, key):\n\n return long(hashlib.md5(key).hexdigest(), 16)", "def get_hash(s):\n hash_object = hashlib.md5(s.encode())\n return hash_object.hexdigest()", "def hash_password(password: str) -> str:\n return pbkdf2_sha512.hash(password)", "def hash(self) -> bytes:", "def obscure(data: bytes) -> bytes:\n return b64e(compress(data, 9))", "def sha256(self):\n return self._sha256", "def calculate_checksum(self, message):\n s = 0\n for i in range(0, len(message)-1, 2):\n w = (message[i]) + (message[i + 1] << 8) << 8\n s = ((w + s) & 0xffff) + ((w + s) >> 16)\n return s", "def make_hash(self, long_url: str, hash_length: int):\n hasher = hashlib.md5(long_url.encode())\n bytes_hash = base64.urlsafe_b64encode(hasher.digest())[:hash_length]\n str_hash = bytes_hash.decode()\n return str_hash", "def slophash(val):\n\n if not val:\n return None\n else:\n return sha256(val.encode('utf8')).hexdigest()[0:10]", "def get_min_hash(shingles: set) -> MinHash:\n track_min_hash = MinHash(num_perm=128)\n for shin in shingles:\n track_min_hash.update(str(shin).encode('utf-8'))\n return track_min_hash", "def short_token():\n hash = hashlib.sha1(force_bytes(shortuuid.uuid()))\n hash.update(force_bytes(settings.SECRET_KEY))\n return hash.hexdigest()[::2]", "def calculate_checksum(self, message):\n return sum([int(x, 16) if type(x) == str else x for x in message]) & 0xFF", "def get_hash(self):\n source = \"\"\n for cell in self.original_cells:\n source += \"\\n\".join(get_source(cell))\n return hashlib.sha256(source.encode(\"utf-8\")).hexdigest()", "def sha512(digest, fileName):\n\n rc = sha512sum(digest, fileName)\n logging.debug(\"SHA512 (%s) = %s\", fileName, 'ok' if rc else 'fail')\n return int(rc)", "def sha1(self) -> str:\n return self.data.sha1", "def geometry_hash(geometry):\n if hasattr(geometry, 'md5'):\n # for most of our trimesh objects\n md5 = geometry.md5()\n elif hasattr(geometry, 'tostring'):\n # for unwrapped ndarray objects\n md5 = str(hash(geometry.tostring()))\n\n if hasattr(geometry, 'visual'):\n # if visual properties are defined\n md5 += str(geometry.visual.crc())\n return md5", "def f(data=None):\n\n hsh = SHA512.new()\n hsh.update(b\"1\")\n hsh.update(data)\n return hsh", "def hexdigest(self):\n return self.hashObject.hexdigest()", "def hexdigest(self):\n return \"\".join(\"%02x\" % ord(x)\n for x in MegaCrypto.a32_to_str(self.digest()))", "def get_sha384(src: str) -> str:\n if not isinstance(src, str) or src == \"\":\n raise Exception(\"Invalid src str\")\n i = io.BytesIO(bytearray(src, encoding='utf-8'))\n return get_sha384_from_stream(i)", "def hashname(self):\n return hashlib.md5(self.name.encode('utf-8')).hexdigest()", "def outerHash(self) -> str:\r\n\r\n return self.__outer_hash", "def sha256(s: str) -> str:\n return hashlib.sha256(s.encode()).hexdigest()", "def sha256_fdh(message: bytes, target_length=None, seed=0):\n if target_length is None:\n return sha256(message)\n if target_length < 32:\n raise ValueError(\"target length must be a value in bytes >= 32, the length of one SHA256 output\")\n cycles = target_length // 32\n # number of bytes needed to store largest cycle index to append to message\n max_num_bytes = int(math.log(cycles, 2)//8) + 1\n # concatenate hashes together\n output = b''.join(sha256(message + (c + seed).to_bytes(max_num_bytes, 'big')) for c in range(cycles))\n # append 0s to output until it reaches target_length\n if target_length > len(output):\n return output + (0).to_bytes(target_length - len(output), 'big')\n return output", "def hash(self) -> str:\r\n ...", "def hexlify(self: str, verbose=False):\n nbytes = len(_chunk_bs(self))\n buf = b''\n strlen = ''\n for b in to_bytes(_chunk_bs(self)):\n buf+=b\n# for s in _from_list(_chunk_bs(self)):\n# strlen+=f'{ _bit_length(s): 02d}'\n if verbose:\n for n in range(nbytes):\n strlen += f'{_bit_length(_from_list(_chunk_bs(self))[n])} @[{n}] '\n print(strlen)\n return buf", "def hash_password(password):\n return pbkdf2_sha512.encrypt(password)", "def apply_hash (self, s):\r\n m = md5()\r\n m.update (s)\r\n d = m.digest()\r\n # base64.encodestring tacks on an extra linefeed.\r\n return encodestring (d)[:-1]", "def short_token():\n hash = hashlib.sha1(shortuuid.uuid().encode('utf-8'))\n hash.update(settings.SECRET_KEY.encode('utf-8'))\n return hash.hexdigest()[::2]", "def smallHash(number, text):\n m = hashlib.md5()\n m.update(bytes(number))\n m.update(text.encode('utf-8'))\n return int(m.hexdigest(), 16) % 1000000", "def _Hash(self):\n out = [self.key.string_id()]\n properties = self._PropList()\n for prop in properties:\n out.append(unicode(getattr(self, prop, '')))\n to_hash = ''.join(out)\n return hashlib.md5(to_hash.encode('utf-8')).hexdigest()", "def hash_string(self):\n return self._hash_string", "def hashLink(link):\n\n return str(md5.new(link).hexdigest())[:5]", "def _hash_value(value):\n return hashlib.md5(value.encode('utf-8')).hexdigest()[:9]", "def badhash(x):\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = (((x >> 16) ^ x) * 0x045d9f3b) & 0xFFFFFFFF\n x = ((x >> 16) ^ x) & 0xFFFFFFFF\n return x", "def s3_md5(s3key, blocksize=65536):\n return s3key.etag.strip('\"').strip(\"'\")", "def hash_128_bit_pass(passwd):\n h = hashlib.sha256()\n h.update(passwd)\n return h.hexdigest()[:16]", "def _calculate_hash(self) -> str:\n data_str = str(self.version) + str(self.index) + self.pre_hash + str(self.timestamp) + str(self.data)\n return sha256(data_str.encode('utf-8')).hexdigest()" ]
[ "0.7193293", "0.6325299", "0.61913234", "0.5894766", "0.58219564", "0.57631546", "0.5762267", "0.5743988", "0.5704884", "0.56312925", "0.56283605", "0.56206113", "0.54951626", "0.54939073", "0.54397", "0.5402861", "0.539359", "0.5392251", "0.535287", "0.534708", "0.5297465", "0.527627", "0.52758586", "0.5230274", "0.5219051", "0.5214602", "0.52091676", "0.519477", "0.51771784", "0.51553243", "0.5140837", "0.5136747", "0.5136747", "0.51219237", "0.5108919", "0.50916123", "0.50722283", "0.50682503", "0.50572604", "0.5043888", "0.5029342", "0.5010934", "0.5000959", "0.49965325", "0.49965325", "0.49965325", "0.49958947", "0.49709347", "0.49558055", "0.49507305", "0.49480364", "0.49393752", "0.49378365", "0.4935544", "0.49122858", "0.48907435", "0.48709282", "0.48674995", "0.48672822", "0.48646253", "0.48561856", "0.48558626", "0.48558626", "0.48475695", "0.48457804", "0.4837152", "0.4835886", "0.4835049", "0.48302156", "0.4826386", "0.4824761", "0.48118803", "0.48114946", "0.4809859", "0.48015133", "0.47950196", "0.47901595", "0.47807318", "0.4778641", "0.4778378", "0.47708958", "0.47650325", "0.47644484", "0.47593102", "0.47553855", "0.4748707", "0.47467712", "0.4742451", "0.47385627", "0.4737625", "0.473613", "0.4731747", "0.47287205", "0.47278354", "0.47261664", "0.47231826", "0.47227347", "0.47225294", "0.47209764", "0.47083354" ]
0.5229747
24
Computes the Modulation SpectrumBased ECG Quality Index (MSQI) for one or many ECG signals defined in x, sampled with a sampling frequency fs
def msqi_ama(x, fs): # test ecg shape try: x.shape[1] except IndexError: x = x[:, np.newaxis] # Empirical values for the STFFT transformation win_size_sec = 0.125 #seconds win_over_sec = 0.09375 #seconds nfft_factor_1 = 16 nfft_factor_2 = 4 win_size_smp = int(win_size_sec * fs) #samples win_over_smp = int(win_over_sec * fs) #samples win_shft_smp = win_size_smp - win_over_smp # Computes Modulation Spectrogram modulation_spectrogram = ama.strfft_modulation_spectrogram(x, fs, win_size_smp, win_shft_smp, nfft_factor_1, 'cosine', nfft_factor_2, 'cosine' ) # Find fundamental frequency (HR) # f = (0, 40)Hz ix_f_00 = (np.abs(modulation_spectrogram['freq_axis'] - 0)).argmin(0) ix_f_40 = (np.abs(modulation_spectrogram['freq_axis'] - 40)).argmin(0) + 1 # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm) valid_f_ix = np.logical_or(modulation_spectrogram['freq_mod_axis'] < 0.66 , modulation_spectrogram['freq_mod_axis'] > 3) # number of epochs n_epochs = modulation_spectrogram['power_modulation_spectrogram'].shape[2] msqi_vals = np.zeros(n_epochs) hr_vals = np.zeros(n_epochs) for ix_epoch in range(n_epochs): B = np.sqrt(modulation_spectrogram['power_modulation_spectrogram'][:, :, ix_epoch]) # Scale to maximun of B B = B / np.max(B) # Add B in the conventional frequency axis from 0 to 40 Hz tmp = np.sum(B[ix_f_00:ix_f_40, :], axis=0) # Look for the maximum only from 0.6 to 3 Hz (36 to 180 bpm) tmp[valid_f_ix] = 0 ix_max = np.argmax(tmp) freq_funda = modulation_spectrogram['freq_mod_axis'][ix_max] # TME tme = np.sum(B) eme = 0 for ix_harm in range(1, 5): ix_fm = (np.abs(modulation_spectrogram['freq_mod_axis'] - (ix_harm * freq_funda) )).argmin(0) ix_b = int(round(.3125 / modulation_spectrogram['freq_mod_delta'] )) # 0.3125Hz, half lobe # EME eme = eme + np.sum(B[ 0 : ix_f_40, ix_fm - ix_b : ix_fm + ix_b + 1 ]) # RME rme = tme - eme # MS-QI msqi_vals[ix_epoch] = eme / rme # HR hr_vals[ix_epoch] = freq_funda * 60 return (msqi_vals, hr_vals, modulation_spectrogram)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def find_optimal_Q(x, y, data, min_Q, max_Q, fs=1., added_kernel = None, plot_BIC=True):\n Qs= np.arange(min_Q, max_Q)\n BIC = np.zeros((Qs.shape[0]))\n \n for i, q in enumerate(Qs):\n sm = SpectralMixture(q, x=x.flatten(),y=y.flatten(),fs=fs)\n for k in sm.kernels:\n if isinstance(k, SpectralMixtureComponent):\n k.lengthscale.prior = tfd.Gamma(f64(8.), f64(.6)) \n k.mixture_weight.prior = tfd.Gamma(f64(2.), f64(1.))\n\n if added_kernel is not None:\n sm += added_kernel\n \n# model = models.ContinuousModel(sm, (util.ensure_tf_matrix(x),util.ensure_tf_matrix(y)))\n model = models.ContinuousModel(sm, data)\n model.train(verbose=False)\n BIC[i] = model.log_posterior_density(\"bic\").numpy()\n \n if plot_BIC:\n fig = plt.figure()\n plt.plot(Qs, BIC)\n plt.xlabel('Number of Spectral Mixture components (Q)')\n plt.show()\n\n return np.argmax(BIC) + min_Q", "def harmonic_cqt(x_in, sr, hop_length=1024, fmin=27.5, n_bins=72,\n n_harmonics=5, bins_per_octave=36, tuning=0.0, filter_scale=1,\n aggregate=None, norm=1, sparsity=0.0, real=False):\n\n kwargs = dict(n_bins=n_bins, bins_per_octave=bins_per_octave,\n hop_length=hop_length, sr=sr, tuning=tuning,\n filter_scale=filter_scale, aggregate=aggregate, norm=norm,\n sparsity=sparsity, real=real)\n\n cqt_spectra = []\n min_tdim = np.inf\n for i in range(1, n_harmonics + 1):\n cqt_spectra += [np.array([librosa.cqt(x_c, fmin=i * fmin, **kwargs).T\n for x_c in x_in.T])[:, np.newaxis, ...]]\n min_tdim = min([cqt_spectra[-1].shape[2], min_tdim])\n cqt_spectra = [x[:, :, :min_tdim, :] for x in cqt_spectra]\n\n return np.concatenate(cqt_spectra, axis=1)", "def getUIQM(x):\n x = x.astype(np.float32)\n ### from https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7300447\n # c1 = 0.4680; c2 = 0.2745; c3 = 0.2576\n ### from https://ieeexplore.ieee.org/stamp/stamp.jsp?tp=&arnumber=7300447\n c1 = 0.0282\n c2 = 0.2953\n c3 = 3.5753\n\n uicm = _uicm(x)\n uism = _uism(x)\n uiconm = _uiconm(x, 8)\n uiqm = (c1 * uicm) + (c2 * uism) + (c3 * uiconm)\n return uiqm", "def test_sm_spectrum(self):\n\n spectrum_mg = np.asarray([3.41707366e-02, 1.02592426e-02, 3.20641729e-03, 9.63481603e-04,\n 2.81233386e-04, 8.12019322e-05, 2.13711295e-05, 5.30226309e-06,\n 1.14687576e-06])\n # Number of SM events generated in MG [66095., 25637., 33458., 48654., 18351., 6849., 59869., 32043., 9044.]\n\n s = 13e3**2\n logbins = np.linspace(np.log10(200),np.log10(2000),10)\n bins = 10**logbins\n nbins = len(bins)-1\n for i in range(nbins):\n center = 0.5*(bins[i]+bins[i+1])\n width = bins[i+1]-bins[i]\n spectrum = pplnu.sigma_qqlnu_int(s, bins[i], bins[i+1], 'mu', 0, par2, center**2, 0, newphys=False)*GeVtopb/width\n err = (spectrum-spectrum_mg[i])/spectrum_mg[i]\n self.assertAlmostEqual(err,0,delta=0.02,msg=f'error in bin {i}: {err}')", "def initSMParamsFourier(Q, x, y, sn, samplingFreq, nPeaks, relMaxOrder=2):\n x = np.atleast_2d(x)\n y = np.atleast_2d(y)\n n, D = x.shape\n w = np.zeros(Q)\n m = np.zeros((D,Q))\n s = np.zeros((D,Q))\n w[:] = np.std(y) / Q\n hypinit = {\n 'cov': np.zeros(Q+2*D*Q),\n 'lik': np.atleast_1d(np.log(sn)),\n 'mean': np.array([])\n }\n\n # Assign hyperparam weights\n hypinit['cov'][0:Q] = np.log(w)\n\n # Assign hyperparam frequencies (mu's)\n signal = np.array(y.ravel()).ravel() # Make into 1D array\n n = x.shape[0]\n k = np.arange(n)\n ts = n/samplingFreq\n frqx = k/float(ts)\n frqx = frqx[range(n/2)]\n frqy = np.fft.fft(signal)/n\n frqy = abs(frqy[range(n/2)])\n # Find the peaks in the frequency spectrum\n peakIdx = np.array([])\n while not peakIdx.any() and relMaxOrder > 0:\n peakIdx = spsig.argrelmax(np.log(frqy**2), order=relMaxOrder)[0]\n relMaxOrder -= 1\n if not peakIdx.any():\n raise ValueError(\"Data doesn't have any detectable peaks in Fourier space.\"\n \" Switching to a different kernel besides the spectral \"\n \"mixture is recommended.\")\n # Find specified number (nPeaks) largest peaks\n sortedIdx = frqy[peakIdx].argsort()[::-1][:nPeaks]\n sortedPeakIdx = peakIdx[sortedIdx]\n hypinit['cov'][Q + np.arange(0,Q*D)] = np.log(frqx[sortedPeakIdx])\n\n # Assign hyperparam length scales (sigma's)\n for i in range(0,D):\n xslice = np.atleast_2d(x[:,i])\n d2 = spat.distance.cdist(xslice, xslice, 'sqeuclidean')\n if n > 1:\n d2[d2 == 0] = d2[0,1]\n else:\n d2[d2 == 0] = 1\n maxshift = np.max(np.max(np.sqrt(d2)))\n s[i,:] = 1./np.abs(maxshift*np.random.ranf((1,Q)))\n hypinit['cov'][Q + Q*D + np.arange(0,Q*D)] = np.log(s[:]).T\n \n return hypinit", "def music(csi_corr, csi_target, Ntx, Nrx, d_tx, d_rx, t):\n\n In = 0\n s = phase_correction(csi_corr, csi_target)\n s_lin = (s[:, :, 0, t:t + 2].reshape(6, 2, order='F'))\n\n '''Compute the covariance matrix and the eigendecompositon'''\n R_hat = np.cov(s_lin)\n D, Q = ln.eig(R_hat)\n\n '''Sort the eigenvalues in D'''\n Do = np.abs(D)\n D = np.sort(Do)[::-1]\n I = np.argsort(Do)[::-1]\n Q = Q[:, I]\n\n ''' Compute the Number of signal that are significative'''\n T = np.cumsum(np.real(D))\n for i in range(1, 1, np.size(T)):\n if T(i) >= 0.99 * T(np.size(T)):\n In = i\n break\n\n ''' Get the signal eigenvectors'''\n In = 0 # take the first signal\n Qs = Q[:, :In]\n\n ''' Get the noise eigenvectors'''\n Qn = Q[:, In + 1:]\n\n ''' Angles at which MUSIC Pseudospectrum will be computed '''\n angles1 = np.arange(-90, 90, 1)\n angles2 = np.arange(-90, 90, 1)\n\n '''Compute steering vectors corresponding values in angles'''\n a1 = np.exp(-1.j * 2 * np.pi * d_rx * np.tensordot(arange(Nrx), sin(angles1 * np.pi / 180), 0))\n a2 = np.exp(-1.j * 2 * np.pi * d_tx * np.tensordot(arange(Ntx), sin(angles1 * np.pi / 180), 0))\n\n '''Compute MUSIC \"spectrum\" '''\n music_spectrum = np.zeros((np.size(angles1), np.size(angles2)), dtype=complex)\n for k in range(1, np.size(angles2)):\n for j in range(1, np.size(angles1)):\n K = np.kron(a1[:, j], a2[:, k])\n s = dot(K.T, Qn)\n music_spectrum[j, k] = 1 / dot(abs(s), abs(s).T)\n\n ''' compute the mesh and plot the surf of the pseudospectrum '''\n\n fig = plt.figure()\n ax = fig.gca(projection='3d')\n x = angles2\n y = angles1\n X, Y = np.meshgrid(x, y)\n Z = np.abs(np.squeeze(music_spectrum))\n ax = fig.add_subplot(111, projection='3d')\n ax.set_ylabel('AoA')\n ax.set_xlabel('AoD')\n ax.set_xlim3d(-90, 90)\n ax.set_ylim3d(-90, 90)\n ax.plot_surface(X, Y, Z, rstride=2, cstride=2, cmap=cm.jet, alpha=0.7, linewidth=0.25)\n\n ''' detect the peaks corresponding to DoD and DoA '''\n detect = detect_peaks(Z)\n index_max = np.column_stack(np.where(detect))\n x_ind = index_max[:, 0]\n y_ind = index_max[:, 1]\n tab = (np.transpose(np.array((Z[x_ind, y_ind], x[x_ind], y[y_ind])))).tolist()\n tab.sort(key=lambda e: e[0], reverse=True)\n myarray = np.asarray(tab[0])\n angles = myarray[1:]\n plt.show()\n\n return angles", "def _eta_sfr_scaling(self,x,q):\n i = self.enum[q]\n A = self.scaling_params['A'][i]\n b = self.scaling_params['b'][i]\n return A*x**b", "def smethod(fx,L=11,nh=2**8,tstep=2**7,ng=1,df=1.0,nfbins=2**10,sigmaL=None):\r\n \t\r\n df=float(df)\r\n \r\n if type(fx) is list:\r\n fx=np.array(fx)\r\n try:\r\n fn,fm=fx.shape\r\n if fm>fn:\r\n fm,fn=fx.shape\r\n except ValueError:\r\n fn=len(fx)\r\n fm=1\r\n if fm>1:\r\n print 'computing cross spectra'\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx[0]))\r\n #fb=sps.hilbert(dctrend(fx[1]))\r\n fa=fx[0]\r\n fb=fx[1]\r\n fa=fa.reshape(fn)\r\n fb=fb.reshape(fn)\r\n pxa,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxb,tlst,flst=stft(fb,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n pxx=pxa*pxb.conj()\r\n else:\r\n #compute the analytic signal of function f and dctrend\r\n #fa=sps.hilbert(dctrend(fx))\r\n fa=fx\r\n fa=fa.reshape(fn)\r\n fb=fa\r\n pxx,tlst,flst=stft(fa,nh=nh,tstep=tstep,ng=ng,df=df,nfbins=nfbins)\r\n# pxb=pxa\r\n\r\n #make an new array to put the new tfd in\r\n tfarray=abs(pxx)**2\r\n #get shape of spectrogram\r\n nf,nt=tfarray.shape\r\n #create a list of frequency shifts\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n #create a frequency gaussian window\r\n if sigmaL==None:\r\n sigmaL=L/(1*np.sqrt(2*np.log(2)))\r\n p=sps.gaussian(L,sigmaL)\r\n #make a matrix of windows\r\n pm=np.zeros((L,nt))\r\n for kk in range(nt):\r\n pm[:,kk]=p\r\n \r\n #loop over frequency and calculate the s-method \r\n for ff in range(L/2,nf-L/2):\r\n tfarray[ff,:]=tfarray[ff,:]+2*np.real(np.sum(pm*pxx[ff+Llst,:]*\r\n pxx[ff-Llst,:].conj(),axis=0))\r\n tfarray=tfarray/L\r\n \r\n return tfarray,tlst,flst,pxx", "def metric_iaf(self, x):\n data = np.asarray(x['data'])\n iaf = [10.0] * data.shape[0]\n for ch, ch_data in enumerate(data):\n pxx, freqs = mlab.psd(ch_data, Fs=128.0, NFFT=256)\n alpha_mask = np.abs(freqs - 10) <= 2.0\n alpha_pxx = 10*np.log10(pxx[alpha_mask])\n alpha_pxx = scipy.signal.detrend(alpha_pxx)\n # iaf[ch] = alpha_pxx.shape\n iaf[ch] = freqs[alpha_mask][np.argmax(alpha_pxx)]\n return iaf", "def spectrum_processing(s):\n s = default_filters(s)\n s = add_precursor_mz(s)\n s = normalize_intensities(s)\n s = reduce_to_number_of_peaks(s, n_required=5, ratio_desired=0.5, n_max=500)\n s = select_by_mz(s, mz_from=0, mz_to=1000)\n s = add_losses(s, loss_mz_from=10.0, loss_mz_to=200.0)\n s = require_minimum_number_of_peaks(s, n_required=5)\n return s", "def qgset(x):\n return 0.2855*x - 0.8565", "def get_mixture_evals(self, x):\n \n q_xs_list, _ = theano.scan(lambda i: \n self.get_importance_evals(x,\n T.transpose(self.mix_means[i,:]) \n ),\n sequences = [T.arange(self.num_comps)])\n \n max_q = T.max(q_xs_list)\n \n q_xs_list = q_xs_list - max_q\n \n q_xs_list = max_q + T.log(T.sum(T.exp(q_xs_list)))\n \n q_xs_list = -T.log(self.num_comps) + q_xs_list\n \n return q_xs_list", "def xs_retrival_FG(self, xs_ofinterest, domain_ofinterest, out_folder, out_alias, flag_FG2semiFG):\n self.iso_read = xs_ofinterest['i']\n # only isotopes of interest are going to be read. However, iso_A3 and\n # iso_read should be the same if macroscopic XS are going to be\n # calculated.\n # A list is generated. Each element is another list with the index\n # positions of the requested domain in the phase space. e.g. [[3], [1],\n # [1], [1], [1], [1], [1], [1, 2, 3, 4, 5]]. Self.order establishes the\n # link between the phase space index of a given dimension and its names\n # (keys). Any manipulation on the domain of interest must not invalidate\n # the search np.where(), otherwise empty arrays (array()) may come up.\n idx_tuple_calc = []\n for di in range(self.d):\n idx_tuple_calc.append([np.where(val == self.phase_space[self.order[di]])[0][\n 0] + 1 for val in domain_ofinterest[self.order[di]]])\n # print idx_tuple_calc\n idx_tuple_calc = self.FG2semiFG(idx_tuple_calc, flag_FG2semiFG)\n # print idx_tuple_calc;sys.exit()\n order = [self.order[i] for i in range(0, 6)]\n # I want to locate XS for index in phase space. So a USER DEFINED set of indexes is considerd\n # The parametrization is on iota, so only [0:6] is considered, but I do need to apply the rules on FG and tupleFG2tuple_semiFG for assuring consistancy of variables.\n #'''\n # generating anisotropy vector\n # This can be passed further up if in the future many files have different\n # number of groups or anisotropy levels\n anysotropy = 3\n anysotropy_vec = [str(lvl) for lvl in range(anysotropy + 1)]\n groups = 2\n groups_vec = [str(lvl) for lvl in range(1, groups + 1)]\n # generation of xs dictionary\n xs_dic = {}\n for i in xs_ofinterest['i']:\n xs_dic[i] = {}\n xs_dic[i]['R'] = {}\n for r in xs_ofinterest['r']:\n if r != 'tran':\n if xs_exists(i, r, None):\n xs_dic[i]['R'][r] = {}\n for g in xs_ofinterest['g']:\n xs_dic[i]['R'][r][g] = {}\n for tuple_i in itertools.product(*idx_tuple_calc):\n aux = tuple(self.tupleFG2tuple_semiFG(\n np.array(tuple_i), flag_FG2semiFG))\n # print aux\n xs_dic[i]['R'][r][g][aux[0:6]] = []\n else:\n \"\"\"\n tran XS are saved indexed as 'tran'+'anisotropy level'+'input group'+'output group'\n level 0 are the standard scaterring xs for a whole assembly flux. So:\n tran011=\\sigma_{1->1},tran012=\\sigma_{1->2},tran021=\\sigma_{2->1},tran022=\\sigma_{2->2}\n\n Note: scaterring xs for iso=MACR and anisotropy>1 is generated, i.e. tran2** and tran3** but then they won't be filled with anything\n \"\"\"\n for p in anysotropy_vec:\n for g1 in groups_vec:\n for g2 in groups_vec:\n # print r+p+g1+g2\n if xs_exists(i, r + p + g1 + g2, None):\n xs_dic[i]['R'][r + p + g1 + g2] = {}\n xs_dic[i]['R'][r + p + g1 + g2][g1] = {}\n for tuple_i in itertools.product(*idx_tuple_calc):\n aux = tuple(self.tupleFG2tuple_semiFG(\n np.array(tuple_i), flag_FG2semiFG))\n xs_dic[i]['R'][r + p + g1 + g2][g1][aux[0:6]] = []\n # From the list of required indices of d dimensions a list of tuples is\n # build. For the requested tuples, a point of calculation in the auxiliary\n # *.out files is found by self.conversion_table. The condition for a\n # requesting a point of calculation is a match between the tuple and the\n # available touples in the phase space. e.g [49, 50, 51, 52, 53, 54, 55,\n # 56, 57, 58]. If user-imposed specific 'non FG' whese consider in the\n # conversion table generation here they need to be considered as well\n\n point_calc = None\n for tuple_i in itertools.product(*idx_tuple_calc):\n # USER IMPOSED: the conversion table saves user defined relation in the\n # indexes of the nodes\n tuple_i = self.tupleFG2tuple_semiFG(np.array(tuple_i), flag_FG2semiFG)\n # print tuple_i\n # for the requested tuple_i the corresponding .out file is found\n for i in range(len(self.conversion_table)):\n if all(tuple_i == self.conversion_table[i][0]):\n # the conversion table permits to consider custom naming of .out files\n point_calc = self.conversion_table[i][1]\n break # calculation points are unique. After the first match the search for that tuple is abandoned\n if i == len(self.conversion_table):\n raise ValueError(\n 'a point not existing in the .out files has been requested. tuple=', tuple_i)\n\n # un-comment for locating specific .out files in the xs reading process\n \"\"\"\n if all(tuple_i==[ 2, 2, 1, 1, 2, 1 , 1, 1, 1, 1]) or all(tuple_i==[ 2, 2, 1, 1 , 2 , 1 , 1 ,24 ,24, 24]):\n print tuple_i, point_calc\n\n if all(tuple_i==[ 2, 2, 1, 1 , 1 , 1 , 2 ,1 ,24, 24]):\n print tuple_i, point_calc\n \"\"\"\n\n # Access auxiliary *.out files\n fout = open(out_folder + out_alias + '/' + str(point_calc) + \".out\", 'r')\n iso = None\n\n for line in fout:\n # Detect isotopes specification\n if line.find('isotope') != -1:\n iso = line.split()[1]\n tran_counter = 0\n\n # Append only xs of interest. tran is a special case and treated as group independent\n # print xs_ofinterest;sys.exit()\n if iso in xs_ofinterest[\"i\"]:\n for reac in ['abso', 'fiss', 'nufi', 'spec', 'tran', 'ener', 'difc', 'tota', 'excs']:\n # A xs may not be present, this automaticly handled by line.find(reac)!=-1\n # A xs may be present but not wanted, this is handled by: reac in xs_ofinterest[\"r\"]\n # A xs may be unphysical (nufi in MACR) this is handle by\n # xs_exists(iso,r,None)\n if line.find(reac) != -1 and reac in xs_ofinterest[\"r\"] and xs_exists(iso, reac, None):\n if reac != 'tran':\n # print iso, reac,xs_dic[iso]['R'].keys(), xs_exists(iso,reac,None)\n if '1' in str(xs_ofinterest[\"g\"]):\n xs_dic[iso]['R'][reac]['1'][\n tuple(tuple_i[0:6])].append(float(line.split()[1]))\n if '2' in str(xs_ofinterest[\"g\"]):\n xs_dic[iso]['R'][reac]['2'][\n tuple(tuple_i[0:6])].append(float(line.split()[2]))\n else:\n # this is for P3 anisotropy. Associating a group preservs structure\n # of dictionary.\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '1' + '1']['1'][tuple(tuple_i[0:6])].append(float(line.split()[1]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '1' + '2']['1'][tuple(tuple_i[0:6])].append(float(line.split()[3]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '2' + '1']['2'][tuple(tuple_i[0:6])].append(float(line.split()[2]))\n xs_dic[iso]['R'][\n reac + str(tran_counter) + '2' + '2']['2'][tuple(tuple_i[0:6])].append(float(line.split()[4]))\n tran_counter += 1\n fout.close()\n self.domain_ofinterest = domain_ofinterest\n for i in xs_dic.keys():\n for r in xs_dic[i]['R'].keys():\n for g in xs_dic[i]['R'][r].keys():\n for iota in xs_dic[i]['R'][r][g].keys():\n if len(xs_dic[i]['R'][r][g][iota]) != len(domain_ofinterest['BURNUP']):\n print i, r, g, iota\n raise ValueError(\"empty entries for\")\n\n # if zero values are prefared to inexistent data (for isotopes associated\n # to CR and things like that)\n AD_HOC_ZERO = 'no'\n i0 = xs_dic.keys()[0]\n r0 = xs_dic[i0]['R'].keys()[0]\n g0 = xs_dic[i0]['R'][r0].keys()[0]\n iota0 = xs_dic[i0]['R'][r0][g0].keys()[0]\n aux = len(xs_dic[i0]['R'][r0][g0][iota0])\n\n if AD_HOC_ZERO == 'yes':\n for i in xs_dic.keys():\n for r in xs_dic[i]['R'].keys():\n for g in xs_dic[i]['R'][r].keys():\n for iota in xs_dic[i]['R'][r][g].keys():\n print iota, len(xs_dic[i]['R'][r][g][iota])\n if len(xs_dic[i]['R'][r][g][iota]) == 0:\n xs_dic[i]['R'][r][g][iota] = np.zeros(aux)\n\n return xs_dic, order", "def submitPirQuery(self,q,base):\n x,omega = self.db.shape\n print ('OMEGA IS ',omega)\n results = np.zeros(omega,dtype=np.uint64) \n for bit_idx in range(len(q)):\n if q[bit_idx]==0:\n continue\n results = (utilities.scaleArrayGF(self.db[bit_idx],q[bit_idx],base) + results) % base\n \n return results", "def qfunc(x):\n # Error check inputs\n if isinstance(x, np.ndarray):\n if x.dtype == np.complex128:\n raise TypeError(\"complex input not supported\")\n else:\n if isinstance(x, complex):\n raise TypeError(\"complex input not supported\")\n\n Q = 0.5 * erfc(x / np.sqrt(2.0))\n return Q", "def generate_measurements(\n self, x: numpy.ndarray\n ) -> Tuple[List[numpy.ndarray]]:\n # Generate A matrices\n signal_power = 0\n A_list = []\n for t in range(self.T):\n if self.A_type == 1:\n # IID Gausian with unit-norm colums\n A = (\n numpy.random.randn(self.M, self.N) +\n 1j * numpy.random.randn(self.M, self.N)\n ) / numpy.sqrt(2 * self.M)\n for n in range(self.N):\n A[:, n] /= numpy.linalg.norm(A[:, n])\n else:\n raise ValueError(\"Invalid A_type: {}\".format(self.A_type))\n\n A_list.append(A)\n signal_power += numpy.linalg.norm(A.dot(x[:, t])) ** 2\n\n # Extract noise variance for desired SNR\n sig2e = signal_power / (self.M * self.T) * 10 ** (-self.desired_SNR/10)\n\n # Generate noisy measurements\n y_list = []\n for t in range(self.T):\n e = numpy.sqrt(sig2e/2) * (\n numpy.random.randn(self.M, 2).dot([1, 1j]))\n y_list.append(\n A[t].dot(x[:, t]) + e\n )\n\n return y_list, A_list, sig2e", "def pureNi_solliq():\n # Given temperature.\n T = 800\n # Render thermodynamic database.\n db = Database(\"AlNiAnsara1997.TDB\")\n # Define the element.\n comp = \"NI\"\n # Two phases separated by the interface.\n phasenames = [\"FCC_A1\", \"LIQUID\"]\n\n # Molar volumes for elements.\n # Molar volume of Ni. \n vni = 6.718 * 10.0 ** (-6.0) + (2.936 * 10.0 ** (-5) * 10.0 ** (-6.0)) * (\n T ** 1.355\n )\n\n # Call the module for calculating solid/liquid interfacial energies in pure metals.\n sigma = SigmaPure(T, vni, db, comp, phasenames)\n\n # Print the calculated interfacial energy with xarray.Dataset type.\n print(sigma, \"\\n\")\n # Print the calculated interfacial energy with xarray.DataArray type.\n print(sigma.Interfacial_Energy, \"\\n\")\n # Print the calculated interfacial energy value.\n print(sigma.Interfacial_Energy.values)\n\n # Output\n \"\"\"\n <xarray.Dataset>\n Dimensions: ()\n Data variables:\n Component <U2 'NI'\n Temperature int64 800\n Melting_Enthalpy float64 1.748e+04\n Interfacial_Energy float64 0.3211 \n\n <xarray.DataArray 'Interfacial_Energy' ()>\n array(0.321081580921721) \n\n 0.321081580921721\n \"\"\"", "def gtgram_xe(wave, fs, channels, f_min, f_max):\n cfs = centre_freqs(fs, channels, f_min, f_max)\n fcoefs = np.flipud(make_erb_filters(fs, cfs))\n xf = erb_filterbank(wave, fcoefs)\n xe = np.power(xf, 2)\n return xe", "def gtgram_xe(wave, fs, channels, f_min, f_max):\n cfs = centre_freqs(fs, channels, f_min, f_max)\n fcoefs = np.flipud(gf.make_erb_filters(fs, cfs))\n xf = gf.erb_filterbank(wave, fcoefs)\n xe = np.power(xf, 2)\n return xe", "def SIDFT(X,D):\n N=len(X)\n x=np.zeros(N,'complex')\n for n in range(0,N,1):\n for k in range(0,N,1):\n x[n]=x[n]+np.exp(-1j*2*np.pi*k*D/N)*X[k]*np.exp(1j*2*np.pi*k*n/N)\n return x/N", "def collect_quantum_energies(quantum_outputs):\n #here we will cycle throught the outputs in order to detect SCF enery\n input_files = glob.glob(quantum_outputs)\n dict_energy = {}\n #now cycle through all the output gaussian files\n for f in input_files:\n #to be sure we take the last indexes\n phi =int( f.split(\"/\")[-2]) # to be more consistent, we know that in -2 there's phi\n psi =int( f.split(\"/\")[-1].split(\".out\")[0].split(\"structure_\")[1])\n #first fix phi and psi values:\n #plot from -180 to 180 so we can compare with Ramachandran\n if phi > 180.0:\n phi = phi - 360.0\n if psi > 180.0 :\n psi = psi - 360.0\n #open the output file\n gout = open(f,\"r\").readlines()\n #Extract energies\n scf = []\n for line in gout:\n if \"SCF Done\" in line:\n scf.append(line.split()[4])\n dict_energy[phi,psi] = float(scf[-1])*627.50\n print(\"Apparently quantum energies were correctly extracted\")\n\n return dict_energy", "def dfluxes(wavelength, s, line1, line2, lowlow= 25, lowhigh=15, highlow=15, highhigh = 25, \n lmin=0, lmax=0, fmin=0, fmax=0,\n broad1=2.355, broad2=2.355, sus_line1=True, sus_line2=True,\n plot=True, verbose=True, plot_sus = False, fcal = True, \n fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1, \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line1-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line2+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n \n if np.nanmedian(f_spec) == np.nan: print(\" NO HAY DATOS.... todo son NANs!\")\n\n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n\n # We have to find some \"guess numbers\" for the Gaussian\n # Now guess_centre is line\n guess_centre1 = line1\n guess_centre2 = line2 \n guess_centre = (guess_centre1+guess_centre2)/2. \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n\n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n\n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value\") \n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n\n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = b*np.array(w_cont)+ a \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n\n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line1)\n mini = np.nanmin(min_w)\n guess_peak1 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n min_w = np.abs(np.array(w_spec)-line2)\n mini = np.nanmin(min_w)\n guess_peak2 = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n\n # Search for beginning/end of emission line, choosing line +-10 \n # 28th Feb 2019: Check central value between low_limit and high_limit\n\n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre1-15 and w_spec[i] < guess_centre1)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n\n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n low_limit = ws[sorted_by_flux[0]]\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre2 and w_spec[i] < guess_centre2+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n# if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n high_limit = ws[sorted_by_flux[0]] \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre1, guess_peak1, broad1/2.355, guess_centre2, guess_peak2, broad2/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(dgauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n\n\n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n\n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1 or fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2:\n if warnings: \n if fit[0] < guess_centre1 - broad1 or fit[0] > guess_centre1 + broad1: \n print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if fit[3] < guess_centre2 - broad2 or fit[3] > guess_centre2 + broad2: \n print(\" Fitted center wavelength\", fit[3],\"is NOT in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n else:\n print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n print(\" Fit failed!\")\n \n fit[0]=guess_centre1\n fit_error[0] = 0.000001\n fit[1]=guess_peak1\n fit_error[1] = 0.000001\n fit[2] = broad1/2.355\n fit_error[2] = 0.000001 \n fit[3]=guess_centre2\n fit_error[3] = 0.000001\n fit[4]=guess_peak2\n fit_error[4] = 0.000001\n fit[5] = broad2/2.355\n fit_error[5] = 0.000001\n else:\n if warnings: print(\" Fitted center wavelength\", fit[0],\"is in the expected range [\",guess_centre1 - broad1,\",\",guess_centre1 + broad1,\"]\")\n if warnings: print(\" Fitted center wavelength\", fit[3],\"is in the expected range [\",guess_centre2 - broad2,\",\",guess_centre2 + broad2,\"]\")\n \n\n if warnings: \n print(\" Fit parameters = \", fit[0], fit[1], fit[2]) \n print(\" \", fit[3], fit[4], fit[5])\n if fit[2] == broad1/2.355 and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelengths (cw), peaks at (cv) & sigmas=broad/2.355 given.\") # CHECK THIS \n\n gaussian_fit = dgauss(w_spec, fit[0], fit[1], fit[2],fit[3], fit[4], fit[5])\n \n gaussian_1 = gauss(w_spec, fit[0], fit[1], fit[2])\n gaussian_2 = gauss(w_spec, fit[3], fit[4], fit[5])\n \n\n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations # CHECK THIS , not well done for dfluxes !!!\n \n gaussian_flux_1 = gauss_flux(fit[1],fit[2])\n gaussian_flux_2 = gauss_flux(fit[4],fit[5]) \n gaussian_flux = gaussian_flux_1+ gaussian_flux_2 \n if warnings: \n print(\" Gaussian flux = \", gaussian_flux_1, \" + \",gaussian_flux_2,\" = \",gaussian_flux)\n print(\" Gaussian ratio = \", gaussian_flux_1/gaussian_flux_2)\n \n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n #Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"blue\", lw=2, alpha = 0.7)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre1, color='r', linestyle='-', alpha=0.5)\n plt.axvline(x=guess_centre2, color='r', linestyle='-', alpha=0.5)\n\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n # Plot Gaussians + cont\n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.5, lw=3) \n plt.plot(w_spec, gaussian_1+continuum, color=\"navy\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, gaussian_2+continuum, color=\"#1f77b4\",linestyle='--', alpha=0.8)\n plt.plot(w_spec, np.array(f_spec)-(gaussian_fit), 'orange', alpha=0.4, linewidth=5) \n\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n plt.title('Double Gaussian Fit') # Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit))\n plt.show()\n plt.close()\n \n # Plot residuals\n# plt.figure(figsize=(10, 1))\n# plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n# plt.ylabel(\"RMS\")\n# plt.xlim((line1+line2)/2-40,(line1+line2)/2+40)\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n# plt.axvline(x=fit[3], color='k', linestyle='-', alpha=0.5)\n# plt.plot(w_spec, residuals, 'k')\n# plt.minorticks_on()\n# plt.show()\n# plt.close()\n\n \n # Printing results\n if verbose :\n #print \"\\n> WARNING !!! CAREFUL WITH THE VALUES PROVIDED BELOW, THIS TASK NEEDS TO BE UPDATED!\\n\"\n print(\"\\n> Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # New 22 Jan 2019: sustract Gaussian fit\n index=0\n s_s=np.zeros_like(s)\n sustract_this = np.zeros_like(gaussian_fit)\n if sus_line1:\n sustract_this = sustract_this + gaussian_1\n if sus_line2:\n sustract_this = sustract_this + gaussian_2 \n \n \n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-sustract_this[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-sustract_this[index]\n index=index+1\n if plot_sus: \n plt.figure(figsize=(10, 4))\n plt.plot(wavelength,s, \"r\")\n plt.plot(wavelength,s_s, \"c\")\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n plt.show()\n plt.close()\n \n # This gaussian_flux in 3 is gaussian 1 + gaussian 2, given in 15, 16, respectively\n # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s, fit[3], fit[4],fit[5], gaussian_flux_1, gaussian_flux_2 ]\n return resultado \n except Exception:\n if verbose: print(\" Double Gaussian fit failed!\")\n resultado = [0, line1, 0, 0, 0, 0, 0, 0, 0, 0, 0, s, 0, 0, 0, 0, 0 ] # line was identified at lambda=line but Gaussian fit failed\n\n # NOTA: PUEDE DEVOLVER EL FLUJO INTEGRADO AUNQUE FALLE EL AJUSTE GAUSSIANO...\n\n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n# plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n# plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n# plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n\n\n return resultado", "def test_energy_increment(self):\n sqw_ws = MuscatSofQW(SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._resolution_ws,\n ParameterWorkspace=self._param_ws,\n OutputWorkspace='__MuscatSofQWTest_result',\n EnergyInc=0.1)\n\n self.assertEqual(sqw_ws.getNumberHistograms(), self._sample_ws.getNumberHistograms())\n self.assertEqual(sqw_ws.getAxis(0).getUnit().unitID(), 'Energy')\n self.assertEqual(sqw_ws.getAxis(1).getUnit().unitID(), 'MomentumTransfer')\n\n x_data = sqw_ws.dataX(0)\n self.assertAlmostEqual(x_data[0], -0.5)\n self.assertAlmostEqual(x_data[-1], 0.5)\n self.assertAlmostEqual(x_data[len(x_data)/2], 0.0)\n\n self.assertEquals(sqw_ws.blocksize(), 10)", "def ssc(signal,samplerate=16000,winlen=0.025,winstep=0.01,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97): \n highfreq= highfreq or samplerate/2\n signal = sigproc.preemphasis(signal,preemph)\n frames = sigproc.framesig(signal, winlen*samplerate, winstep*samplerate)\n pspec = sigproc.powspec(frames,nfft)\n pspec = pylab.where(pspec == 0,pylab.finfo(float).eps,pspec) # if things are all zeros we get problems\n \n fb = get_filterbanks(nfilt,nfft,samplerate,lowfreq,highfreq)\n feat = pylab.dot(pspec,fb.T) # compute the filterbank energies\n R = pylab.tile(pylab.linspace(1,samplerate/2,pylab.size(pspec,1)),(pylab.size(pspec,0),1))\n \n return pylab.dot(pspec*R,fb.T) / feat", "def IvsQ(fileList):\n\n # Produce a list if it is not the case\n if not isinstance(fileList,list):\n fileList = [fileList]\n\n for file in fileList:\n sol = loadSol(file)\n histI = sol.histI[0:(sol.NIterGrad + 1)]\n histQ = sol.histQ[0:(sol.NIterGrad + 1)]\n plt.semilogx(histQ,histI,label=file)\n plt.grid(True)\n plt.legend()\n plt.xlabel('Hist Q')\n plt.ylabel('Hist I')\n plt.title('I vs. Q')\n plt.show()", "def measureDataComplexM_multiext(filename,sigma = 1.1,scale=0.27):\n hdu=pf.open(filename)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n sigma = sigma/scale\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n M20[i],M22[i],M31[i],M33[i]=complexMoments(data=hdui.data[i][4:].reshape(npix,npix),sigma=sigma)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data)\n hp.mwrfits(filename[:-7]+'_complexMoments_gausswt_'+str(sigma*scale)+'.fit',data.T,colnames=colnames)\n return '---done !-----'", "def ha(sf,sfn,mX,pX,params,verbose=[],onlySelected=False,hc=-2,div=8,L=30,fs=44100,gt=[]):\r\n \r\n M,N,H,B = params\r\n \r\n idx = candidSelection(sf,t=0.025,hw=25) \r\n idx = np.concatenate((np.zeros(1),idx,np.array([sf.shape[0]])))\r\n idx_orig = idx.copy()\r\n mask = np.ones(idx.shape)\r\n mask[0]=0\r\n mask[-1]=0\r\n errors = np.zeros(mX.shape[0])\r\n scores = np.zeros(idx.shape)\r\n freqs = []\r\n \r\n tFlag = False\r\n vFlag = False # flag to enable prints and plots\r\n \r\n rms = np.sum(mX,axis=1)\r\n rms = rms-np.mean(rms)\r\n rms = rms/np.max(rms)\r\n rms = savgol_filter(rms,3,1)\r\n \r\n rms_t = -0.1\r\n \r\n # sending every onset candidate to harmonic analysis\r\n for i in range(len(idx)-2,0,-1):\r\n \r\n if onlySelected:\r\n if idx[i] not in verbose:\r\n continue\r\n \r\n b = int((idx[i]-(10240/H)) if (idx[i]>(idx[i-1]+(10240/H))) else idx[i-1])\r\n e = int((idx[i]+(10240/H)) if (idx[i]<(idx[i+1]-(10240/H))) else idx[i+1])\r\n \r\n \r\n if np.mean(rms[int(idx[i]):int(idx[i])+50])<rms_t:\r\n continue\r\n \r\n onst = int(idx[i]-b)\r\n pmX = np.copy(mX[b:e])\r\n \r\n\r\n if idx[i] in verbose:\r\n print(\"\\nOnset candidate:\")\r\n print(\"onset frame: %d\" %idx[i])\r\n print(\"sf onset number: %d\" %i)\r\n vFlag = True\r\n y = MRStftSynth(pmX,pX[b:e],M,H,B)\r\n print(\"synthesized sound\")\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n if vFlag:\r\n print(\"STFT around candidate\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n print(\"filtered spectral flux\")\r\n plt.plot(sf[b:e])\r\n plt.show()\r\n print(\"raw spectral flux\")\r\n plt.plot(sfn[b:e])\r\n plt.show()\r\n \r\n allErrors,allf0s,pmXv = f0detection(pmX,pX[b:e],sfn[b:e],-100,10,onst,vFlag,hc,div,params,fs,tFlag)\r\n\r\n aL = np.min((e-idx[i]/2,L)) \r\n segments = getSegments(allf0s,allErrors,onst,pmX,vFlag)\r\n scores[i],freq,segmentScores = harmonicScore(segments,aL,vFlag,tFlag)\r\n freqs.append(freq)\r\n \r\n if scores[i]<1: # prevent rejected candidates from creating boundary for adjacent onset\r\n idx[i] = sf.shape[0]\r\n \r\n if vFlag:\r\n print(\"Score for this onset: %d\" %scores[i])\r\n \r\n if tFlag and scores[i]<1:\r\n pred_time = np.abs(idx[i]*(H/fs))\r\n closest_gt_ind = np.argmin(pred_time-gt)[0]\r\n if np.abs(gt[closest_gt_ind]-pred_time)<0.05:\r\n if score[i]>1:\r\n tp.append[idx[i]]\r\n if score[i]<1:\r\n fn.append[idx[i]]\r\n \r\n print(\"STFT around onset\")\r\n plt.pcolormesh(np.arange(pmX.shape[0]), np.arange(pmX.shape[1]), np.transpose(pmX))\r\n plt.show()\r\n \r\n y = MRStftSynth(pmXv,pX,M,H,B)\r\n ipd.display(ipd.Audio(data=y, rate=fs))\r\n \r\n plt.pcolormesh(np.arange(pmXv.shape[0]), np.arange(pmXv.shape[1]), np.transpose(pmXv))\r\n plt.show()\r\n\r\n vFlag = False\r\n tFlag = False\r\n \r\n avg = np.mean(scores)\r\n mask[scores<1] = 0\r\n result = idx_orig[mask==1]\r\n return idx_orig[1:-1],result,freqs,scores[1:-1]", "def spectrum_test62(f):\n format_wav = ff.FortranRecordReader(\"(10f8.2)\")\n format_flux = ff.FortranRecordReader(\"(6e12.5)\")\n\n wav = []\n flux = []\n npts = int(f.readline()) # number of frequency points\n\n while len(wav) < npts:\n wav += format_wav.read(f.readline())\n wav = np.array(wav[:npts])\n\n test = f.readline() # atmospheric parameters\n if len(test.split()) == 6:\n flux += format_flux.read(test)\n\n while len(flux) < npts:\n flux += format_flux.read(f.readline())\n flux = np.array(flux[:npts])\n\n return wav, flux", "def irregularity(signal,fs, **kwargs):\n S = np.abs(np.fft.fft(signal))\n fv = np.fft.fftfreq(len(S), 1./fs)\n idx = fv >= 0\n S_plus = S[idx]\n fv_plus = fv[idx]\n S_k = S_plus[1:-1]\n S_left = S_plus[2:]\n S_right = S_plus[:-2]\n return np.log(20*np.sum(np.abs(np.log(S_k/(S_left*S_k*S_right)**(1./3)))))", "def music(idx, n_music=200):\n f = freqs[idx]\n Rxx = np.dot(X[:, idx], X[:, idx].H)\n lam, V = eig_sorted(Rxx)\n En = V[:, 1:] # Noise subspace for one source\n\n theta_range = np.linspace(0, 2*np.pi, n_music)\n P_music = np.zeros(n_music)\n for i in range(n_music):\n sv = ma.steering_vector(theta_range[i], f)\n vec = np.dot(En.H, ma.steering_vector(theta_range[i], f))\n P_music[i] = 1/np.linalg.norm(vec)**2\n\n vv = V[:, 0].flatten()\n print('----------')\n print('Performing MUSIC at {:.5} Hz'.format(f))\n print('-----------------------------')\n print('Steering vector subspace check:\\n')\n print('At the correct angle of {:.3}, '.format(theta*180/np.pi) +\n 'the real parts of the eigenvalues of R_xx are:')\n print('\\n'.join(' {:.3}'.format(np.real(l)) for l in lam))\n print('\\nSteering vector / eigenvector of max eigenvalue:')\n print((ma.steering_vector(theta, f) / vv).T)\n return P_music, theta_range", "def _signals(cls, idx, m, n):\n import numpy as np\n signal = []\n\n # Generating all the frequencies from a time series of length n\n fs = np.fft.fftfreq(n)\n\n # Loop through the frequencies in idx\n for i in idx:\n freq = fs[i]\n\n # Computing the sinusoids for the ith frequency\n signal.append(np.cos(2 * np.pi * m * freq) + complex(0, np.sin(2 * np.pi * m * freq)))\n return np.array(signal)", "def get_iPTF16asu():\n z = 0.187\n ebv = 0.0\n D = cosmo.luminosity_distance([z])[0].value * 1e+6 # in pc\n dis_mod = 5*np.log10(D / 10)\n \n tb = asci.read('../data/otherSN/Whitesides2017/table1.txt')\n tb = tb.to_pandas()\n tb = tb[tb[\"col4\"].values!=\">\"]\n \n tb = tb.rename(columns={'col1' : 'mjd',\n 'col2': 'tmax_rf',\n 'col3': 'filter',\n \"col4\": 'mag',\n 'col5': 'emag',\n 'col6': 'instrument'})\n \n ixg = tb['filter'].values == \"g\"\n ixr = tb['filter'].values == \"r\"\n ixi = tb['filter'].values == \"i\"\n tb['wave'] = np.zeros(len(tb))\n tb['wave'].values[ixg] = 4814\n tb['wave'].values[ixr] = 6422\n tb['wave'].values[ixi] = 7883\n tb[\"mag\"] = np.array(tb[\"mag\"].values, dtype = np.float)\n #tb[\"emag\"] = np.array(tb[\"emag\"].values, dtype = np.float)\n tb['mag0'] = tb['mag'] - extinction.ccm89(tb['wave'].values, 3.1*ebv, 3.1)\n tb['mag0_abs'] = tb['mag0'] - dis_mod\n tb = tb[tb.wave!=0]\n return tb", "def getSpectralEnergy(datatype, traceList, outfile, channelStart, channelEnd):\r\n assert(datatype in ['mat', 'segy']) \r\n if datatype=='segy':\r\n st = obspy.Stream(traceList) \r\n else:\r\n raise Exception('not implemented')\r\n sampleRate = traceList[0].stats.sampling_rate\r\n #for decimated data,sampleRate should be reflected\r\n #set wlen to 0.25 sec, high pass is 250\r\n wlen = 0.5*sampleRate\r\n nfft = int(_nearest_pow_2(wlen))\r\n npts = len(st[0].data)\r\n per_lap = 0.9\r\n if nfft > npts:\r\n nfft = int(_nearest_pow_2(npts / 8.0))\r\n nlap = int(nfft * float(per_lap))\r\n\r\n nTraces = len(traceList)\r\n Emat = None\r\n print ('sample rate is ', sampleRate, 'nfft=', nfft, 'noverlap', nlap)\r\n \r\n t_ = (traceList[0].stats.endtime-traceList[0].stats.starttime)\r\n dx_ = traceList[1].stats.distance - traceList[0].stats.distance\r\n extent = [0,len(traceList)*dx_/1e3,0,t_/100.0]\r\n\r\n for itr in range(0,nTraces):\r\n #F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n # window='hann', nfft=nfft, mode='magnitude')\r\n F,T,SXX = signal.spectrogram(np.array(st[itr].data), fs=sampleRate, \r\n window='hann', nfft=nfft)\r\n #sum along frequency axis \r\n #energy = np.sum((SXX[1:,:]/np.max(SXX[1:,:])),axis=0)\r\n energy = np.sum(SXX[1:,:],axis=0)\r\n #energy = np.log10(np.abs(energy/np.max(energy)))*10.0\r\n energy = np.log10(energy)*10.0\r\n if Emat is None:\r\n Emat = np.zeros((nTraces, len(T)))\r\n Emat[itr,:]=energy\r\n if DEBUG:\r\n plt.figure()\r\n im = plt.imshow(Emat,extent=extent)\r\n plt.colorbar(im)\r\n plt.savefig('spectralenergy{0}_ch{1}_{2}.png'.format(outfile,channelStart,channelEnd))\r\n plt.close()", "def IFourierSeries(input):\n N=len(input);\n w=2*cmath.pi/N;\n k=numpy.arange(0,N); \n output = [complex(0)] * N \n for n in range(N): \n r=input*cexp(-1j*w*n*k);\n output[n]=np.mean(r);\n\n print output.__class__ \n return output;", "def reassignedSmethod(fx,nh=2**7-1,tstep=2**4,nfbins=2**9,df=1.0,alpha=4,\r\n thresh=.01,L=5): \r\n \r\n# if type(fx) is list:\r\n# fx=np.array(fx)\r\n# try:\r\n# fn,fm=fx.shape\r\n# if fm>fn:\r\n# fm,fn=fx.shape\r\n# except ValueError:\r\n# fn=len(fx)\r\n# fm=1\r\n# if fm>1:\r\n# print 'computing cross spectra'\r\n# #compute the analytic signal of function f and dctrend\r\n# #fa=sps.hilbert(dctrend(fx[0]))\r\n# #fb=sps.hilbert(dctrend(fx[1]))\r\n# fa=fx[0]\r\n# fb=fx[1]\r\n# fa=fa.reshape(fn)\r\n# fb=fb.reshape(fn)\r\n# else:\r\n# fa=fx\r\n# fa=fa.reshape(fn)\r\n# fb=fa.copy()\r\n\r\n \r\n nx=len(fx) \r\n \r\n #compute gaussian window\r\n h=gausswin(nh,alpha=alpha)\r\n #h=np.hanning(nh)\r\n lh=(nh-1)/2\r\n \r\n #compute ramp window\r\n th=h*np.arange(start=-lh,stop=lh+1,step=1)\r\n \r\n #compute derivative of window\r\n dh=dwindow(h)\r\n \r\n #make a time list of indexes\r\n tlst=np.arange(start=0,stop=nx,step=tstep)\r\n nt=len(tlst)\r\n \r\n #make frequency list for plotting\r\n flst=np.fft.fftfreq(nfbins,1./df)[:nfbins/2]\r\n \r\n #initialize some time-frequency arrays\r\n tfh=np.zeros((nfbins,nt),dtype='complex128')\r\n tfth=np.zeros((nfbins,nt),dtype='complex128')\r\n tfdh=np.zeros((nfbins,nt),dtype='complex128')\r\n \r\n #compute components for reassignment\r\n for ii,tt in enumerate(tlst):\r\n #create a time shift list\r\n tau=np.arange(start=-min([np.round(nx/2.),lh,tt-1]),\r\n stop=min([np.round(nx/2.),lh,nx-tt-1])+1)\r\n #compute the frequency spots to be calculated\r\n ff=np.remainder(nfbins+tau,nfbins)\r\n #make lists of data points for each window calculation\r\n xlst=tt+tau\r\n hlst=lh+tau\r\n normh=np.sqrt(np.sum(abs(h[hlst])**2))\r\n tfh[ff,ii]=fx[xlst]*h[hlst].conj()/normh\r\n tfth[ff,ii]=fx[xlst]*th[hlst].conj()/normh\r\n tfdh[ff,ii]=fx[xlst]*dh[hlst].conj()/normh\r\n \r\n #compute Fourier Transform\r\n spech=np.fft.fft(tfh,axis=0)\r\n specth=np.fft.fft(tfth,axis=0)\r\n specdh=np.fft.fft(tfdh,axis=0)\r\n \r\n #get only positive frequencies\r\n spech=spech[nfbins/2:,:]\r\n specth=specth[nfbins/2:,:]\r\n specdh=specdh[nfbins/2:,:]\r\n \r\n #check to make sure no spurious zeros floating around\r\n szf=np.where(abs(spech)<1.E-6)\r\n spech[szf]=0.0+0.0j\r\n zerofind=np.nonzero(abs(spech))\r\n twspec=np.zeros((nfbins/2,nt),dtype='float')\r\n dwspec=np.zeros((nfbins/2,nt),dtype='float')\r\n twspec[zerofind]=np.round(np.real(specth[zerofind]/spech[zerofind]))\r\n dwspec[zerofind]=np.round(np.imag((nfbins/2.)*specdh[zerofind]/\r\n spech[zerofind])/(np.pi))\r\n \r\n #get shape of spectrogram\r\n nf,nt=spech.shape\r\n \r\n #-----calculate s-method-----\r\n Llst=np.arange(start=-L/2+1,stop=L/2+1,step=1,dtype='int')\r\n\r\n #make and empty array of zeros\r\n sm=np.zeros_like(spech)\r\n \r\n #put values where L cannot be value of L, near top and bottom\r\n sm[0:L/2,:]=abs(spech[0:L/2,:])**2\r\n sm[-L/2:,:]=abs(spech[-L/2:,:])**2\r\n\r\n #calculate s-method\r\n for ff in range(L/2,nf-L/2-1):\r\n sm[ff,:]=2*np.real(np.sum(spech[ff+Llst,:]*spech[ff-Llst,:].conj(),\r\n axis=0))/L\r\n \r\n #------compute reassignment----- \r\n\r\n \r\n rtfarray=np.zeros((nfbins/2,nt))\r\n \r\n threshold=thresh*np.max(abs(sm))\r\n \r\n for nn in range(nt):\r\n for kk in range(nf):\r\n if abs(spech[kk,nn])>threshold:\r\n #get center of gravity index in time direction from spectrogram \r\n nhat=int(nn+twspec[kk,nn])\r\n nhat=int(min([max([nhat,1]),nt-1]))\r\n #get center of gravity index in frequency direction from spec\r\n khat=int(kk-dwspec[kk,nn])\r\n khat=int(np.remainder(np.remainder(khat-1,nfbins/2)+nfbins/2,\r\n nfbins/2))\r\n rtfarray[khat,nhat]=rtfarray[khat,nhat]+abs(sm[kk,nn])\r\n else:\r\n rtfarray[kk,nn]=rtfarray[kk,nn]+sm[kk,nn]\r\n\r\n #place values where L cannot be L \r\n rtfarray[:L/2,:]=abs(sm[:L/2,:])\r\n rtfarray[-L/2:,:]=abs(sm[-L/2:,:])\r\n \r\n tz=np.where(rtfarray==0)\r\n rtfarray[tz]=1.0\r\n \r\n tz=np.where(sm==0.0)\r\n sm[tz]=1.0 \r\n \r\n #scale\r\n rtfarray=abs(rtfarray)\r\n \r\n return rtfarray,tlst,flst,sm", "def quality(self): \n\n subsetInt = [int(s) for s in self.subset.split() if s.isdigit()]\n columnNames = [] \n for i in range(len(subsetInt)):\n if subsetInt[i] == 1:\n columnNames.append(self.varNames[i])\n\n #qualityBand number of subset\n q = columnNames.index('Quality') \n\n if subsetInt[self.qualityBand] == 1:\n dataCount = self.subset.count('1')\n QC = np.repeat(self.DC[:,q].reshape((self.DC.shape[0],1)), dataCount-1, axis = 1)\n if self.dataset == 'MOD09A1.005' or self.dataset == 'MOD13Q1.005':\n QC = np.uint16(QC)\n else:\n QC = np.uint8(QC)\n\n QCm = QC & 1 #flips DCm mask\n DCm = np.delete(self.DC, q, 1) #looks good\n \n DCm = np.ma.masked_where(QCm == 1, DCm)\n DCm = np.ma.masked_where(DCm == 9999.0, DCm) \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(0, self.DC.shape[1]-1):\n cfull = DCm[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape = (self.rows*self.columns*obs, 0))\n for band in range(0,cfull.shape[0],2):\n c16 = np.ma.mean(cfull[band:band+1,:,:], axis=0)\n c16f = np.ma.filled(c16, 9999.0).astype(float).reshape((self.rows*self.columns))\n b16 = np.append(b16, c16f)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n \n self.finalDC = outArray\n \n np.save(str(self.directory) + '/' + self.dataset + '.npy', self.finalDC)\n del outArray, QC, DCm\n\n outfile = str(self.directory) + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(name + '\\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval quality-masked matrix was created successfully. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))\n \n \n if subsetInt[self.qualityBand] != 1:\n cleanDC = np.delete(self.DC, q, 1)\n \n \n if len(self.tiles) > 1:\n obs = self.observations/len(self.tiles)\n if len(self.tiles) == 1:\n obs = self.observations/2\n \n outArray = np.empty(shape = (self.rows*self.columns*obs, 0))\n for b in range(cleanDC.shape[1]):\n cfull = cleanDC[:,b].reshape((self.observations, self.rows, self.columns))\n b16 = np.empty(shape=(self.rows*self.columns*obs))\n for band in range(cfull.shape[0]):\n c16 = np.mean(cfull[band:band+1,:,:], axis=0)\n band16 = np.append(b16, c16, axis=0)\n outArray = np.append(outArray, b16.reshape((obs*self.rows*self.columns, 1)), axis = 1)\n\n np.save(self.directory + '/' + self.dataset + '.npy', self.finalDC)\n del cleanDC, outArray\n \n outfile = self.directory + '/' + self.dataset + '.txt'\n f = open(outfile, 'w')\n for name in columnNames:\n if name != 'Quality':\n f.write(str(name) + ' \\n')\n var = [a for a in columnNames if not a.startswith('Quality')]\n logger.log('SUCCESS', 'The final 16-day interval matrix was created successfully. A quality mask was not applied, though remaining no data values are set at 9999. This matrix has dimensions %d rows by %d columns. Datasets included in the matrix are %s' % (self.finalDC.shape[0], self.finalDC.shape[1], var))", "def getQiimeSffSamples(self, study_id,seq_run_id):\n try:\n con = self.getSFFDatabaseConnection()\n results = con.cursor()\n con.cursor().callproc('get_qiime_sff_samples', \\\n [study_id,seq_run_id,results])\n return results\n except Exception, e:\n print 'Exception caught: %s.\\nThe error is: %s' % (type(e), str(e))\n return False", "def processFile(filename,length = 256,q=1,fs_in=8000,divide=4,plot=False):\n length = length*divide\n #fs = sample rate, sound = multichannel sound signal\n try:\n fs1, sound = wavfile.read(filename)\n except ValueError:\n print(str(filename) + ' failed to process')\n return 'failed'\n if fs1 != fs_in:\n raise ValueError('Sampling rate should be ' + str(fs_in) + ' for: ' + filename)\n sig1 = sound[:0] #left channel\n pre_emphasis = 0.97\n sig1 = np.append(sig1[0], sig1[1:] - pre_emphasis * sig1[:-1])\n\n \n fs2, sig2 = downsample(sig1,fs1,q)\n N2 = len(sig2)\n sig3 = sig2[N2//2-length:N2//2+length]\n #print(len(sig3))\n\n FFT = abs(scipy.fft(sig3))\n FFT_side = FFT[range(len(FFT)//2)]\n #freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n #plt.plot(freqs,FFT)\n if len(FFT_side) != length:\n print('ERROR MESSAGE DETAILS')\n print('filename: ' + filename)\n print('length = ' + str(length))\n print('fs_in = ' + str(fs_in))\n print('q = ' + str(q))\n print('divide = ' + str(divide))\n total_time = len(sig1)/fs1\n print('total_time = ' + str(total_time))\n print('Please check: length < total_time*fs//(2*q)')\n print('Check: ' + str(length) + ' < ' + str(total_time*fs1//(2*q)))\n raise ValueError('Length FFT_side != length: ' + str(len(FFT_side)) + ' != ' + str(length))\n \n \n FFT_log = []\n # normalize FFT\n for value in FFT_side:\n value = np.log(value)\n FFT_log.append(value)\n max_val = getMax(FFT_log)[1]\n FFT_norm = []\n for value in FFT_log:\n FFT_norm.append(value/max_val)\n \n \n FFT_side = np.array(FFT_norm)\n FFT_divided = FFT_side[range(length//divide)]\n #plot = True\n if plot == True:\n freqs = scipy.fftpack.fftfreq(sig3.size, 1/fs2)\n freqs_divided = np.array(freqs[range(len(FFT_divided))])\n plt.plot(freqs_divided,FFT_divided) # plotting the complete fft spectrum\n plt.show()\n \n return FFT_divided", "def icqt(X_cq, c_dc, c_nyq, multiscale, shift, window_lens):\n new_multiscale = nsdual(multiscale, shift, window_lens)\n X = nsgitf_real(X_cq, c_dc, c_nyq, new_multiscale, shift)\n return X", "def chlebus_divgi_sim_fitmetric(xi, x_hat_i, *args):\n\tif len(args) == 0:\n\t\tec = ecdf(xi)\n\t\txspace = ec[:,1]\n\telse:\n\t\txspace = args[0]\n\t\n\tyspace = np.minimum(xi, x_hat_i)/np.maximum(xi, x_hat_i)\n\treturn simps(yspace, xspace)", "def mce_filter(freq, f_raw, params):\n\tz = np.exp(-2j*np.pi*freq/f_raw)\n\tb11, b12, b21, b22 = np.array(params[:4])*0.5**14\n\tH = (1+z)**4 / (1-b11*z+b12*z**2) / (1-b21*z+b22*z**2)\n\tH /= 2**4 / (1-b11+b12) / (1-b21+b22)\n\treturn H", "def cqt(X, fs, n_bins=48, fmin=27.5, fmax=\"nyq\", gamma=20):\n # nyquist\n if fmax == \"nyq\":\n fmax = fs / 2.\n multiscale, shift, window_lens = nsgcwin(fmin, fmax, n_bins, fs,\n len(X), gamma)\n fbas = fs * np.cumsum(shift[1:]) / len(X)\n fbas = fbas[:len(window_lens) // 2 - 1]\n bins = window_lens.shape[0] // 2 - 1\n window_lens[1:bins + 1] = window_lens[bins + 2]\n window_lens[bins + 2:] = window_lens[1:bins + 1][::-1]\n norm = 2. * window_lens[:bins + 2] / float(len(X))\n norm = np.concatenate((norm, norm[1:-1][::-1]))\n multiscale = [norm[ii] * multiscale[ii] for ii in range(2 * (bins + 1))]\n\n c = nsgtf_real(X, multiscale, shift, window_lens)\n c_dc = c[0]\n c_nyq = c[-1]\n c_sub = c[1:-1]\n c = np.vstack(c_sub)\n return c, c_dc, c_nyq, multiscale, shift, window_lens", "def get_freq_grid():\n (bins_per_octave, n_octaves, _, _, f_min, _) = get_hcqt_params()\n freq_grid = librosa.cqt_frequencies(\n bins_per_octave*n_octaves, f_min, bins_per_octave=bins_per_octave\n )\n return freq_grid", "def spectralIndices(\n x: Union[ee.Image, ee.ImageCollection],\n index: Union[str, List[str]] = \"NDVI\",\n G: Union[float, int] = 2.5,\n C1: Union[float, int] = 6.0,\n C2: Union[float, int] = 7.5,\n L: Union[float, int] = 1.0,\n cexp: Union[float, int] = 1.16,\n nexp: Union[float, int] = 2.0,\n alpha: Union[float, int] = 0.1,\n slope: Union[float, int] = 1.0,\n intercept: Union[float, int] = 0.0,\n gamma: Union[float, int] = 1.0,\n kernel: str = \"RBF\",\n sigma: Union[float, str] = \"0.5 * (a + b)\",\n p: Union[float, int] = 2,\n c: Union[float, int] = 1.0,\n online: bool = False,\n drop: bool = False,\n) -> Union[ee.Image, ee.ImageCollection]:\n platformDict = _get_platform_STAC(x)\n\n if isinstance(sigma, int) or isinstance(sigma, float):\n if sigma < 0:\n raise Exception(f\"[sigma] must be positive! Value passed: sigma = {sigma}\")\n\n if p <= 0 or c < 0:\n raise Exception(\n f\"[p] and [c] must be positive! Values passed: p = {p}, c = {c}\"\n )\n\n additionalParameters = {\n \"g\": float(G),\n \"C1\": float(C1),\n \"C2\": float(C2),\n \"L\": float(L),\n \"cexp\": float(cexp),\n \"nexp\": float(nexp),\n \"alpha\": float(alpha),\n \"sla\": float(slope),\n \"slb\": float(intercept),\n \"gamma\": float(gamma),\n \"p\": float(p),\n \"c\": float(c),\n }\n\n spectralIndices = _get_indices(online)\n indicesNames = list(spectralIndices.keys())\n\n if not isinstance(index, list):\n if index == \"all\":\n index = list(spectralIndices.keys())\n elif index in [\n \"vegetation\",\n \"burn\",\n \"water\",\n \"snow\",\n \"drought\",\n \"urban\",\n \"kernel\",\n ]:\n temporalListOfIndices = []\n for idx in indicesNames:\n if spectralIndices[idx][\"type\"] == index:\n temporalListOfIndices.append(idx)\n index = temporalListOfIndices\n else:\n index = [index]\n\n for idx in index:\n if idx not in list(spectralIndices.keys()):\n warnings.warn(\n f\"Index {idx} is not a built-in index and it won't be computed!\"\n )\n else:\n\n def temporalIndex(img):\n lookupDic = _get_expression_map(img, platformDict)\n lookupDic = {**lookupDic, **additionalParameters}\n kernelParameters = _get_kernel_parameters(img, lookupDic, kernel, sigma)\n lookupDic = {**lookupDic, **kernelParameters}\n lookupDicCurated = _remove_none_dict(lookupDic)\n if all(\n band in list(lookupDicCurated.keys())\n for band in spectralIndices[idx][\"bands\"]\n ):\n return img.addBands(\n img.expression(\n spectralIndices[idx][\"formula\"], lookupDicCurated\n ).rename(idx)\n )\n else:\n warnings.warn(\n f\"This platform doesn't have the required bands for {idx} computation!\"\n )\n return img\n\n if isinstance(x, ee.imagecollection.ImageCollection):\n x = x.map(temporalIndex)\n elif isinstance(x, ee.image.Image):\n x = temporalIndex(x)\n\n if drop:\n x = x.select(index)\n\n return x", "def get_chisqrs(prf,diff,nbins): \n off_pulse = np.zeros(39)\n off_pulse[:20] = prf[:20]\n off_pulse[20:] = prf[45:] #Making off pulse region\n # print(\"Off pulse Region \",off_pulse)\n op_rms = np.var(off_pulse) #Rms\n # print(\"Off pulse RMS \",op_rms)\n s = 0\n for d in diff:\n s += d**2/op_rms\n\n s = s/(nbins - 1)\n # print(\"Chisqr value = \",s)\n\n return s", "def gen_psi(self, x):\n\n if isinstance(x, jnp.ndarray):\n x = x[:, None]\n return jnp.exp(-self.h * (x - self.c) ** 2)", "def get_modfreq_from_quantiles_many_samples(scores_per_sample, q=0.1):\n freqs = np.zeros(len(scores_per_sample))\n minc = min(map(len, scores_per_sample))\n q1, q2 = np.quantile(np.concatenate([s[:minc] for s in scores_per_sample]), [q, 1-q])\n for i, _scores in enumerate(scores_per_sample): \n confs = [(_scores<q1).sum(), (_scores>q2).sum()]\n if not sum(confs): continue\n mod_freq = confs[1]/sum(confs)\n freqs[i] = mod_freq\n return freqs", "def test_integrate_spectrum():\n e1 = Quantity(1, \"TeV\")\n e2 = Quantity(10, \"TeV\")\n einf = Quantity(1e10, \"TeV\")\n e = Quantity(1, \"TeV\")\n g = 2.3\n I = Quantity(1e-12, \"cm-2 s-1\")\n\n ref = power_law_energy_flux(I=I, g=g, e=e, e1=e1, e2=e2)\n norm = power_law_flux(I=I, g=g, e=e, e1=e1, e2=einf)\n f = lambda x: x * power_law_evaluate(x, norm, g, e)\n val = integrate_spectrum(f, e1, e2)\n assert_quantity_allclose(val, ref)\n\n # Test quantity handling\n e2_ = Quantity(1e4, \"GeV\")\n val_ = integrate_spectrum(f, e1, e2_)\n assert_quantity_allclose(val, val_)", "def _gen_ms(theta, nb_qubits):\n routine = QRoutine()\n\n for first_qb in range(nb_qubits):\n for second_qb in range(first_qb + 1, nb_qubits):\n routine.apply(RXX(theta), [first_qb, second_qb])\n\n return routine", "def calculateenergy_freqdomain(input_signal_or_spectrum):\n if isinstance(input_signal_or_spectrum, (sumpf.Signal)):\n ip = sumpf.modules.FourierTransform(signal=input_signal_or_spectrum).GetSpectrum()\n else:\n ip = input_signal_or_spectrum\n energy_allchannels = []\n for c in ip.GetChannels():\n energy_singlechannel = []\n for s in c:\n energy_singlechannel.append(abs(s) ** 2)\n energy_allchannels.append(numpy.sum(energy_singlechannel))\n return energy_allchannels", "def _uism(x):\n # get image channels\n R = x[:, :, 0]\n G = x[:, :, 1]\n B = x[:, :, 2]\n\n # first apply Sobel edge detector to each RGB component\n Rs = sobel(R)\n Gs = sobel(G)\n Bs = sobel(B)\n\n # multiply the edges detected for each channel by the channel itself\n R_edge_map = np.multiply(Rs, R)\n G_edge_map = np.multiply(Gs, G)\n B_edge_map = np.multiply(Bs, B)\n\n # get eme for each channel\n r_eme = eme(R_edge_map, 8)\n g_eme = eme(G_edge_map, 8)\n b_eme = eme(B_edge_map, 8)\n\n # coefficients\n lambda_r = 0.299\n lambda_g = 0.587\n lambda_b = 0.144\n\n return (lambda_r * r_eme) + (lambda_g * g_eme) + (lambda_b * b_eme)", "def emd(self, S, timeLine=None, maxImf=-1):\n \n Res = S.astype(self.DTYPE)\n scale = (max(Res) - min(Res))/self.scaleFactor\n Res, scaledS = Res/scale, S/scale\n imf = np.zeros(len(S), dtype=self.DTYPE)\n imfOld = Res.copy()\n \n if timeLine == None: timeLine = np.arange(len(S))\n\n N = len(S)\n\n if Res.dtype!=self.DTYPE: print 'Res.dtype: ', Res.dtype\n if scaledS.dtype!=self.DTYPE: print 'scaledS.dtype: ', scaledS.dtype\n if imf.dtype!=self.DTYPE: print 'imf.dtype: ', imf.dtype\n if imfOld.dtype!=self.DTYPE: print 'imfOld.dtype: ', imfOld.dtype\n if timeLine.dtype!=self.DTYPE: print 'timeLine.dtype: ', timeLine.dtype\n\n if S.shape != timeLine.shape:\n info = \"Time array should be the same size as signal.\"\n raise Exception(info)\n \n # Create arrays\n IMF = {} # Dic for imfs signals\n EXT = {} # Dic for number of extrema\n TIME = {} # Dic for time computing of single imf\n ITER = {} # Dic for number of iterations\n imfNo = 0\n notFinish = True\n\n time0 = time.time()\n\n corRes = np.zeros(N, dtype=self.DTYPE)\n y = np.zeros(N, dtype=self.DTYPE)\n t = np.zeros(N, dtype=self.DTYPE)\n oldMean = np.zeros(N, dtype=self.DTYPE)\n\n meanEnv = np.zeros(N, dtype=self.DTYPE)\n \n while(notFinish):\n print 'IMF -- ', imfNo\n\n #~ yRes = (-imf - corRes).astype(self.DTYPE)\n yRes = (-(imf+corRes)).astype(self.DTYPE)\n tRes = (Res + yRes).astype(self.DTYPE)\n cor = (tRes - Res) - yRes\n Res = tRes\n\n #~ Res -= imf\n imf = Res.copy()\n \n if imf.dtype!=self.DTYPE: print 'imf.dtype: ', imf.dtype\n mean = np.zeros(len(S), dtype=self.DTYPE)\n sumEnv = np.zeros(len(S), dtype=self.DTYPE)\n corEnv = np.zeros(len(S), dtype=self.DTYPE)\n if sumEnv.dtype != self.DTYPE: print 'sumEnv.dtype: ', sumEnv.dtype\n if corEnv.dtype != self.DTYPE: print 'corEnv.dtype: ', corEnv.dtype\n\n # Counters\n n = 0 # All iterations for current imf.\n n_h = 0 # counts when |#zero - #ext| <=1\n \n t0 = time.time()\n\n # Start on-screen displaying\n if self.PLOT and self.INTERACTIVE:\n py.ion()\n \n while(n<self.MAX_ITERATION):\n n += 1\n\n #~ maxPos, maxVal, minPos, minVal, indzer = self.findExtrema_simple(timeLine, imf)\n maxPos, maxVal, minPos, minVal, indzer = self.findExtremaGeneric(timeLine, imf)\n extNo = len(minPos)+len(maxPos)\n nzm = len(indzer)\n\n if extNo > 2:\n # If scale tiny it might be noise, thus no need for\n # further decomposition\n if np.max(imf) - np.min(imf) < 1e-4:\n print 'dS: ', np.max(imf) - np.min(imf)\n notFinish = False\n break\n \n # Plotting. Either into file, or on-screen display.\n if n>1 and self.PLOT:\n py.clf()\n py.plot(timeLine, imf*scale, 'g')\n py.plot(timeLine, maxEnv*scale, 'b')\n py.plot(timeLine, minEnv*scale, 'r')\n py.plot(timeLine, mean*scale, 'k--')\n if self.INTERACTIVE:\n py.draw()\n else:\n fName = \"imf{}_{:02}\".format(imfNo, n-1)\n py.savefig(os.path.join(self.plotPath,fName))\n\n if Res.dtype!=self.DTYPE: print 'Res.dtype: ', Res.dtype\n if mean.dtype!=self.DTYPE: print 'mean.dtype: ', mean.dtype\n \n imfOld = imf.copy()\n imf = Res - self.reduceScale*mean\n\n if imf.dtype!=self.DTYPE: print '2) imf.dtype: ', imf.dtype\n \n maxEnv, minEnv, eMax, eMin = self.extractMaxMinSpline(timeLine, imf)\n tmpMean = 0.5*(maxEnv+minEnv)\n \n if type(maxEnv) == type(-1):\n notFinish = True\n break\n\n y = 0.5*maxEnv - cor\n t = sumEnv + y\n cor = (t - sumEnv) - y\n sumEnv = t\n\n y = 0.5*minEnv - cor\n t = sumEnv + y\n cor = (t - sumEnv) - y\n sumEnv = t\n \n oldMean = mean.copy()\n mean = sumEnv\n if mean.dtype!=self.DTYPE: print '3) mean.dtype: ', mean.dtype\n \n if y.dtype != self.DTYPE: print 'y.dtype: ', y.dtype\n if t.dtype != self.DTYPE: print 't.dtype: ', t.dtype\n\n # Fix number of iterations\n if self.FIXE:\n if n>=self.FIXE+1: break\n\n # Fix number of iterations after number of zero-crossings\n # and extrema differ at most by one.\n elif self.FIXE_H:\n\n #~ maxPos, maxVal, minPos, minVal, indZer = self.findExtrema_simple(timeLine, imf)\n maxPos, maxVal, minPos, minVal, indZer = self.findExtremaGeneric(timeLine, imf)\n extNo = len(maxPos)+len(minPos)\n nzm = len(indZer)\n \n \n if n == 1: continue\n if abs(extNo-nzm)>1: n_h = 0 \n else: n_h += 1\n \n #if np.all(maxVal>0) and np.all(minVal<0):\n # n_h += 1\n #else:\n # n_h = 0\n\n # STOP\n if n_h >= self.FIXE_H: break\n \n # Stops after default stopping criteria are meet.\n else:\n \n #~ maxPos, maxVal, minPos, minVal, indZer = self.findExtrema_simple(timeLine, imf)\n maxPos, maxVal, minPos, minVal, indZer = self.findExtremaGeneric(timeLine, imf)\n extNo = len(maxPos) + len(minPos)\n nzm = len(indZer)\n \n f1 = self.checkImf(imf, maxEnv, minEnv, tmpMean, extNo)\n #f2 = np.all(maxVal>0) and np.all(minVal<0)\n f2 = abs(extNo - nzm)<2\n\n # STOP \n if f1 and f2: break\n \n else:\n EXT[imfNo] = extNo\n notFinish = False\n break\n\n #IMF[imfNo] = imf.copy() - 0.5*(corEnvMax+corEnvMin)\n IMF[imfNo] = imf.copy() - cor\n ITER[imfNo] = n\n EXT[imfNo] = extNo\n TIME[imfNo] = time.time() - t0\n imfNo += 1\n\n if self.endCondition(scaledS, IMF) or imfNo==maxImf:\n notFinish = False\n break\n\n #~ # Saving residuum\n #~ Res -= imf\n #~ #Res = scaledS - np.sum([IMF[i] for i in xrange(imfNo)],axis=0) \n #~ IMF[imfNo] = Res\n #~ ITER[imfNo] = 0\n #~ EXT[imfNo] = self.getExtremaNo(Res)\n #~ TIME[imfNo] = 0\n #~ imfNo += 1\n time1 = time.time()\n\n for key in IMF.keys():\n IMF[key] *= scale\n #return IMF, EXT, TIME, ITER, imfNo\n return IMF, EXT, ITER, imfNo", "def mfcc(x, L=128, hop=64, M=14, fs=8000, fl=0.0, fh=0.5):\n\n # perform STFT, X contains frames in rows\n X = stft.analysis(x, L, hop, transform=np.fft.rfft)\n\n # get and apply the mel filter bank\n # and compute log energy\n H = melfilterbank(M, L, fs=fs, fl=fl, fh=fh)\n S = np.log(np.dot(H, np.abs(X.T) ** 2))\n\n # Now take DCT of the result\n C = dct(S, type=2, n=M, axis=0)\n\n return C", "def mod_filterbank(signal, fs, modf):\n modf = np.asarray(modf)\n fcs = modf[1:]\n fcut = modf[0]\n # Make signal odd length\n signal = signal[0:-1] if (len(signal) % 2) == 0 else signal\n\n q = 1. # Q-factor of band-pass filters\n lp_order = 3. # order of the low-pass filter\n\n n = signal.shape[-1] # length of envelope signals\n X = fft(signal)\n X_mag = np.abs(X)\n X_power = np.square(X_mag) / n # power spectrum\n X_power_pos = X_power[0:np.floor(n / 2).astype('int') + 1]\n # take positive frequencies only and multiply by two to get the same total\n # energy\n X_power_pos[1:] = X_power_pos[1:] * 2\n\n pos_freqs = np.linspace(0, fs / 2, X_power_pos.shape[-1])\n # Concatenate vector of 0:fs and -fs:1\n freqs = np.concatenate((pos_freqs, -1 * pos_freqs[-1:0:-1]))\n\n # Initialize transfer function\n TFs = np.zeros((len(fcs) + 1, len(freqs))).astype('complex')\n # Calculating frequency-domain transfer function for each center frequency:\n for k in range(len(fcs)):\n TFs[k + 1, 1:] = 1. / (1. + (1j * q * (freqs[1:] / fcs[k] - fcs[k] /\n freqs[1:]))) # p287 Hambley.\n\n # squared filter magnitude transfer functions\n Wcf = np.square(np.abs(TFs))\n\n # Low-pass filter squared transfer function, third order Butterworth filter\n # TF from:\n # http://en.wikipedia.org/wiki/Butterworth_filter\n Wcf[0, :] = 1 / (1 + ((2 * pi * freqs / (2 * pi * fcut)) ** (2 * lp_order)))\n # Transfer function of low-pass filter\n TFs[0, :] = np.sqrt(Wcf[0, :])\n\n # initialize output product:\n vout = np.zeros((len(fcs) + 1, len(pos_freqs)))\n powers = np.zeros(len(modf))\n\n # ------------ DC-power, --------------------------\n # here divide by two such that a fully modulated tone has an AC-power of 1.\n dc_power = X_power_pos[0] / n / 2\n # ------------------------------------------------\n X_filt = np.zeros((Wcf.shape[0], X.shape[-1]), dtype='complex128')\n filtered_envs = np.zeros_like(X_filt, dtype='float')\n\n for k, (w, TF) in enumerate(zip(Wcf, TFs)):\n vout[k] = X_power_pos * w[:np.floor(n / 2).astype('int') + 1]\n # Integration estimated as a sum from f > 0\n # integrate envelope power in the passband of the filter. Index goes\n # from 2:end since integration is for f>0\n powers[k] = np.sum(vout[k, 1:]) / n / dc_power\n # Filtering and inverse Fourier transform to get time signal.\n X_filt[k] = X * TF\n filtered_envs[k] = np.real(ifft(X_filt[k]))\n return powers, filtered_envs", "def _sincfunc(x, dx, dampfac=3.25):\n if dx != 0.0:\n xx = (x+dx)*np.pi #- cache shifted array for 30% faster evals\n return np.exp( -(xx/(dampfac*np.pi))**2 ) * np.sin(xx) / xx\n else:\n xx = np.zeros(len(x))\n xx[len(x)//2] = 1.0\n return xx", "def get_beat_sync_spectrums(audio):\n y, sr = core.load(audio, sr=44100)\n eql_y = EqualLoudness()(y)\n tempo, framed_dbn = self_tempo_estimation(y, sr)\n np.append(framed_dbn, np.array(len(y)/sr))\n band1 = (0, 220)\n band2 = (220, 1760)\n band3 = (1760, sr / 2)\n band1list = []\n band2list = []\n band3list = []\n for i in range(1, len(framed_dbn)):\n fft_eq = abs(np.fft.fft(eql_y[int(framed_dbn[i - 1] * sr):int(framed_dbn[i] * sr)]))\n freqs = np.fft.fftfreq(len(fft_eq), 1 / sr)\n band1list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band1[0], freqs < band1[1]))]**2))))\n band2list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band2[0], freqs < band2[1]))]**2))))\n band3list.append(np.sqrt(np.mean(sum(fft_eq[np.where(np.logical_and(freqs > band3[0], freqs < band3[1]))]**2))))\n\n band1list = np.array(band1list).transpose()\n band2list = np.array(band2list).transpose()\n band3list = np.array(band3list).transpose()\n return np.vstack([band1list, band2list, band3list])", "def efc_calcs(df_param_indexed):\n \n df_param_indexed = df_param_indexed.copy()\n \n ''' commented 20180210 after Calmetrix update\n # Remove for cc1 data exported with cc2\n mix_start = datetime.strptime(\n df_param_indexed.loc['Mix Time', 1], \"%d-%b-%Y %H:%M:%S\")\n log_start = datetime.strptime(\n df_param_indexed.loc['Start Time', 1], \"%d-%b-%Y %H:%M:%S\")\n time_difference = (log_start - mix_start).total_seconds()\n '''\n\n # Calculate mass of binder in sample\n m_slag = float(df_param_indexed.loc['Suppl 1 Mass, g', 1])\n m_fa = float(df_param_indexed.loc['Suppl 2 Mass, g', 1])\n m_water = float(df_param_indexed.loc['Water Mass, g', 1])\n m_agg = float(df_param_indexed.loc['Aggr Mass, g', 1])\n m_sample = float(df_param_indexed.loc['Sample Mass, g', 1])\n m_sample_scm = m_sample / (m_slag + m_fa + m_water + m_agg) * (m_slag + m_fa)\n \n return m_sample_scm", "def find_signal_morphology(rr_intervals, fs: float = 4):\n baseline = calculate_time_features(rr_intervals=rr_intervals)['baseline']\n vhr = rr_intervals - baseline\n accel_values = np.sort(vhr[vhr > 15]) # Change for right value\n decel_values = np.sort(vhr[vhr < -15]) # Change for right value\n accel_args = np.zeros(accel_values.shape, dtype=int)\n decel_args = np.zeros(decel_values.shape, dtype=int)\n acceleration_array = []\n deceleration_array = []\n k = 0\n for i, x in enumerate(vhr):\n if x in accel_values:\n accel_args[k] = int(i)\n k += 1\n # Make acceleration array of tuples (start, end)\n if np.sum(accel_values > 0):\n start = accel_args[0]\n end = accel_args[0]\n for i in range(len(accel_args) - 1):\n if (accel_args[i + 1] - accel_args[i] >= 2) or (i + 1 == len(accel_args) - 1):\n acceleration_array.append((start, end))\n start = accel_args[i + 1]\n else:\n end = accel_args[i + 1]\n # Make deceleration array of tuples (start, end)\n k = 0\n for i, x in enumerate(vhr):\n if x in decel_values:\n decel_args[k] = i\n k += 1\n if np.sum(decel_values < 0) > 2:\n start = decel_args[0]\n end = decel_args[0]\n for i in range(len(decel_args) - 1):\n if (decel_args[i + 1] - decel_args[i] >= 2) or (i + 1 == len(decel_args)):\n deceleration_array.append((start, end))\n start = decel_args[i + 1]\n else:\n end = decel_args[i + 1]\n delete_array = np.concatenate((accel_args, decel_args))\n vhr_pure = np.delete(vhr, delete_array)\n AmpStd = np.sqrt(np.mean(np.square(vhr_pure)))\n return baseline, AmpStd, acceleration_array, deceleration_array", "def mfcc(signal,samplerate=16000,winlen=0.025,winstep=0.01,numcep=13,\n nfilt=26,nfft=512,lowfreq=0,highfreq=None,preemph=0.97,ceplifter=22,appendEnergy=True): \n feat, energy = fbank(signal,samplerate,winlen,winstep,nfilt,nfft,lowfreq,highfreq,preemph)\n feat = pylab.log(feat)\n feat = dct(feat, type=2, axis=1, norm='ortho')[:,:numcep]\n feat = lifter(feat,ceplifter)\n if appendEnergy: feat[:,0] = pylab.log(energy) # replace first cepstral coefficient with log of frame energy\n return feat", "def asaxsseqeval(data,param,asaxsenergies,chemshift,fprimefile,samples=None,seqname=None,element=0):\n if samples is None:\n samples=utils.unique([param[i]['Title'] for i in range(0,len(data))]);\n print \"Found samples: \", samples\n if type(samples)!=types.ListType:\n samples=[samples];\n if seqname is not None:\n logfile=open('%s.log' % seqname,'wt')\n logfile.write('ASAXS sequence name: %s\\n' % seqname)\n logfile.write('Time: %s' % time.asctime())\n asaxsecalib=[];\n #asaxsenergies=np.array(utils.unique(asaxsenergies,lambda a,b:(abs(a-b)<2)))\n asaxsenergies=np.array(asaxsenergies);\n for j in range(0,len(asaxsenergies)):\n asaxsecalib.append([param[i]['EnergyCalibrated']\n for i in range(0,len(data)) \n if abs(param[i]['Energy']-asaxsenergies[j])<2][0]);\n asaxsecalib=np.array(asaxsecalib);\n \n print \"Calibrated ASAXS energies:\", asaxsecalib\n fprimes=B1io.readf1f2(fprimefile);\n pylab.plot(fprimes[:,0],fprimes[:,1],'b-');\n pylab.plot(fprimes[:,0],fprimes[:,2],'r-');\n asaxsf1=np.interp(asaxsecalib-chemshift,fprimes[:,0],fprimes[:,1]);\n asaxsf2=np.interp(asaxsecalib-chemshift,fprimes[:,0],fprimes[:,2]);\n print \"f' values\", asaxsf1\n print \"f'' values\", asaxsf2\n if seqname is not None:\n logfile.write('Calibrated ASAXS energies:\\n')\n for i in range(len(asaxsenergies)):\n logfile.write(\"%f -> %f\\tf1=%f\\tf2=%f\\n\" % (asaxsenergies[i],asaxsecalib[i],asaxsf1[i],asaxsf2[i]))\n logfile.write('Chemical shift (eV): %f\\n' % chemshift)\n logfile.write('Atomic number supplied by the user: %d\\n' % element)\n logfile.write('fprime file: %s\\n' % fprimefile)\n pylab.plot(asaxsecalib-chemshift,asaxsf1,'b.',markersize=10);\n pylab.plot(asaxsecalib-chemshift,asaxsf2,'r.',markersize=10);\n pylab.legend(['f1','f2'],loc='upper left');\n pylab.xlabel('Photon energy (eV)');\n pylab.ylabel('Anomalous corrections (e.u.)');\n pylab.title('Anomalous correction factors')\n if seqname is not None:\n pylab.savefig('%s_f1f2.eps' % seqname,dpi=300,transparent='True',format='eps')\n if len(asaxsenergies)<3:\n print \"At least 3 energies should be given!\"\n return\n for s in samples:\n print \"Evaluating sample %s\" % s\n if seqname is not None:\n logfile.write('Sample: %s\\n' % s)\n q=None;\n counter=None;\n fsns=None\n for k in range(0,len(data)): #collect the intensities energy-wise.\n if param[k]['Title']!=s:\n continue\n if q is None:\n q=np.array(data[k]['q']);\n NQ=len(q);\n Intensity=np.zeros((len(q),len(asaxsenergies)))\n Errors=np.zeros((len(q),len(asaxsenergies)))\n counter=np.zeros((1,len(asaxsenergies)))\n fsns=[[] for l in range(len(asaxsenergies))]\n if np.sum(q-np.array(data[k]['q']))>0:\n print \"Check the datasets once again: different q-scales!\"\n continue;\n energyindex=np.absolute(asaxsenergies-param[k]['Energy'])<2\n Intensity[:,energyindex]=Intensity[:,energyindex]+np.array(data[k]['Intensity']).reshape(NQ,1);\n Errors[:,energyindex]=Intensity[:,energyindex]+(np.array(data[k]['Error']).reshape(NQ,1))**2;\n counter[0,energyindex]=counter[0,energyindex]+1;\n if pylab.find(len(energyindex))>0:\n print pylab.find(energyindex)[0]\n fsns[pylab.find(energyindex)[0]].append(param[k]['FSN']);\n Errors=np.sqrt(Errors)\n Intensity=Intensity/np.kron(np.ones((NQ,1)),counter)\n if seqname is not None:\n for i in range(0,len(asaxsenergies)):\n logfile.write('FSNs for energy #%d:' % i)\n for j in fsns[i]:\n logfile.write('%d' % j)\n logfile.write('\\n')\n datatosave=np.zeros((len(q),2*len(asaxsenergies)+1))\n datatosave[:,0]=q;\n for i in range(len(asaxsenergies)):\n datatosave[:,2*i+1]=Intensity[:,i]\n datatosave[:,2*i+2]=Errors[:,i]\n np.savetxt('%s_%s_ie.txt' % (seqname, s),datatosave,delimiter='\\t')\n # now we have the Intensity and Error matrices fit to feed to asaxsbasicfunctions()\n N,M,R,DN,DM,DR=asaxsbasicfunctions(Intensity,Errors,asaxsf1,asaxsf2,element=element);\n sep12,dsep12,sep23,dsep23,R1,dR1=asaxspureresonant(Intensity[:,0],Intensity[:,1],Intensity[:,2],\n Errors[:,0],Errors[:,1],Errors[:,2],\n asaxsf1[0],asaxsf1[1],asaxsf1[2],\n asaxsf2[0],asaxsf2[1],asaxsf2[2])\n Ireconst=N+M*2*asaxsf1[0]+R*(asaxsf1[0]**2+asaxsf2[0]**2)\n if seqname is not None:\n datatosave=np.zeros((len(q),7))\n datatosave[:,0]=q;\n datatosave[:,1]=N.flatten(); datatosave[:,2]=DN.flatten();\n datatosave[:,3]=M.flatten(); datatosave[:,4]=DM.flatten();\n datatosave[:,5]=R.flatten(); datatosave[:,6]=DR.flatten();\n np.savetxt('%s_%s_basicfun.txt' % (seqname, s),datatosave,delimiter='\\t')\n datatosave[:,1]=sep12.flatten(); datatosave[:,2]=dsep12.flatten();\n datatosave[:,3]=sep23.flatten(); datatosave[:,4]=dsep23.flatten();\n datatosave[:,5]=R1.flatten(); datatosave[:,6]=dR1.flatten();\n np.savetxt('%s_%s_separation.txt' % (seqname, s),datatosave,delimiter='\\t')\n pylab.figure()\n #pylab.errorbar(q,Intensity[:,0],Errors[:,0],label='I_0',marker='.')\n #pylab.errorbar(q,N.flatten(),DN.flatten(),label='Nonresonant',marker='.')\n #pylab.errorbar(q,M.flatten(),DM.flatten(),label='Mixed',marker='.')\n #pylab.errorbar(q,R.flatten(),DR.flatten(),label='Resonant',marker='o')\n pylab.plot(q,Intensity[:,0],label='I_0',marker='.')\n pylab.plot(q,N.flatten(),label='Nonresonant',marker='.')\n pylab.plot(q,M.flatten(),label='Mixed',marker='.')\n pylab.plot(q,R.flatten(),label='Resonant',marker='o')\n pylab.plot(q,Ireconst.flatten(),label='I_0_reconstructed',marker='.')\n pylab.title(\"ASAXS basic functions for sample %s\" % s)\n pylab.xlabel(u\"q (1/%c)\" % 197)\n pylab.ylabel(\"Scattering cross-section (1/cm)\")\n pylab.gca().set_xscale('log');\n pylab.gca().set_yscale('log');\n pylab.legend();\n pylab.savefig('%s_%s_basicfun.eps'%(seqname,s),dpi=300,format='eps',transparent=True)\n pylab.figure()\n #pylab.errorbar(q,Intensity[:,0],Errors[:,0],label='I_0',marker='.')\n #pylab.errorbar(q,sep12,dsep12,label='(I_0-I_1)/(f1_0-f1_1)',marker='.')\n #pylab.errorbar(q,sep23,dsep23,label='(I_1-I_2)/(f1_1-f1_2)',marker='.')\n #pylab.errorbar(q,R1.flatten(),dR1.flatten(),label='Pure resonant',marker='.')\n pylab.plot(q,Intensity[:,0],label='I_0',marker='.')\n pylab.plot(q,sep12,label='(I_0-I_1)/(f1_0-f1_1)',marker='.')\n pylab.plot(q,sep23,label='(I_1-I_2)/(f1_1-f1_2)',marker='.')\n pylab.plot(q,R1.flatten(),label='Pure resonant',marker='.')\n \n pylab.title(\"ASAXS separated and pure resonant terms for sample %s\" % s)\n pylab.xlabel(u\"q (1/%c)\" % 197)\n pylab.ylabel(\"Scattering cross-section (1/cm)\")\n pylab.gca().set_xscale('log');\n pylab.gca().set_yscale('log');\n pylab.legend();\n pylab.savefig('%s_%s_separation.eps'%(seqname,s),dpi=300,format='eps',transparent=True)\n logfile.close()\n pylab.show()", "def _get_scfinfo(self, file):\n f = open_general(file)\n tmptxt = f.readlines()\n f.close()\n # get rms and number of iterations\n itmp, niter, rms = 0, -1, -1\n while itmp >= 0:\n itmp = search_string('average rms-error', tmptxt)\n if itmp >= 0:\n tmp = tmptxt.pop(itmp).replace('D', 'E').split()\n niter = int(tmp[1])\n rms = float(tmp[-1])\n # get max number of scf steps\n itmp = search_string('SCFSTEPS', tmptxt)\n if itmp >= 0:\n nitermax = int(tmptxt.pop(itmp).split()[-1])\n # get qbound\n itmp = search_string('QBOUND', tmptxt)\n if itmp >= 0:\n qbound = float(tmptxt.pop(itmp).split()[-1])\n # get imix\n itmp = search_string('IMIX', tmptxt)\n if itmp >= 0:\n imix = int(tmptxt.pop(itmp).split()[-1])\n # get mixfac\n itmp = search_string('MIXFAC', tmptxt)\n if itmp >= 0:\n mixfac = float(tmptxt.pop(itmp).split()[-1])\n # get fcm\n itmp = search_string('FCM', tmptxt)\n if itmp >= 0:\n fcm = float(tmptxt.pop(itmp).split()[-1])\n # set mixinfo\n mixinfo = [imix, mixfac, qbound, fcm]\n # set converged and nmax_reached logicals\n converged, nmax_reached = False, False\n if nitermax==niter: nmax_reached = True\n if rms<qbound: converged = True\n # return values\n return niter, nitermax, converged, nmax_reached, mixinfo", "def calc_bulk_values(s, Qv, Qs, print_info=False): \n # use the find_extrema algorithm\n ind, minmax = find_extrema(Qv, print_info=print_info)\n \n # compute dividing salinities\n smin=s[0]\n DS=s[1]-s[0]\n div_sal=[]\n i=0\n while i < len(ind): \n div_sal.append(smin+DS*ind[i])\n i+=1\n \n #calculate transports etc.\n Q_in_m=[]\n Q_out_m=[]\n s_in_m=[]\n s_out_m=[]\n index=[]\n i=0\n while i < len(ind)-1:\n # compute the transports and sort to in and out\n Q_i=-(Qv[ind[i+1]]-Qv[ind[i]])\n F_i=-(Qs[ind[i+1]]-Qs[ind[i]])\n s_i=np.abs(F_i)/np.abs(Q_i)\n if Q_i<0 and np.abs(Q_i)>1:\n Q_out_m.append(Q_i)\n s_out_m.append(s_i)\n elif Q_i > 0 and np.abs(Q_i)>1:\n Q_in_m.append(Q_i)\n s_in_m.append(s_i)\n else:\n index.append(i)\n i+=1\n div_sal = np.delete(div_sal, index)\n \n return Q_in_m, Q_out_m, s_in_m, s_out_m, div_sal, ind, minmax", "def I_MP9701(ps, **kw) -> MAG:\n g, i = kw.get(\"g\", \"g\"), kw.get(\"i\", \"i\")\n gmi = kw.get(\"gmi\", \"g-i\")\n\n if gmi in ps.colnames:\n gmi = ps[gmi]\n else:\n gmi = ps[g] - ps[i]\n\n ind = (-1.0 * MAG < gmi) & (gmi < 4 * MAG)\n if not all(ind):\n warnings.warn(\"MCg1.I: not all -1 mag < (g-i)_ps < 4 mag\")\n\n c0 = 0.001 * MAG\n c1 = -0.021\n c2 = 0.00398 / MAG\n c3 = -0.00369 / MAG ** 2\n i_ps = ps[i]\n\n i_cfht = i_ps + c0 + (c1 * gmi) + (c2 * gmi ** 2) + (c3 * gmi ** 3)\n return i_cfht", "def IDFT_slow(x):\r\n x = np.asarray(x, dtype=complex)\r\n N = len(x)\r\n n = np.arange(N)\r\n k = n.reshape((N, 1))\r\n M = np.exp(2j * np.pi * k * n / N)\r\n return 1.0/N * np.dot(M, x)", "def generate_data(path=resource_filename('locals', 'data/fake/'), mag_range=(11.13,18)):\n # Get some random spectra\n try:\n files = glob.glob('/user/jfilippazzo/Models/ACES/default/*.fits')[::50]\n except:\n files = glob.glob('/Users/jfilippazzo/Documents/Modules/_DEPRECATED/limb_dark_jeff/limb/specint/*.fits')[::20]\n \n # Make a fake source catalog (with only essential columns for now)\n catpath = os.path.join(path,'fake_source_catalog.ecsv')\n ids = list(range(len(files)))\n coords = SkyCoord([89.7455]*len(ids), [-29.05744]*len(ids), unit='deg', frame='icrs')\n cat = at.QTable([ids,coords], names=('id','icrs_centroid'))\n cat.write(catpath)\n \n # Open the x1d file\n header = fits.getheader(resource_filename('locals', 'data/template_x1d.fits'))\n \n # Make Spectrum objects from models at R=150\n wavelength = np.arange(0.05,2.6,0.0001)[::66]*q.um\n \n # Normalize the spectra to a random F200W magnitude\n spectra = []\n f200w = Bandpass('NIRISS.F200W')\n f200w.wave_units = q.um\n for file in files:\n \n # Create Spectrum\n flux = fits.getdata(file)[-1][::66]*q.erg/q.s/q.cm**2/q.AA\n unc = flux/50.\n spec = Spectrum(wavelength, flux, unc)\n \n # Normalize to F200W\n mag = np.random.uniform(*mag_range)\n norm_spec = spec.renormalize(mag, f200w)\n spectra.append(norm_spec)\n \n # Make a separate x1d file and photometry file for each bandpass\n # containing data for each source\n for band in NIRISS_bands:\n \n try:\n \n # Get the Bandpass object\n bp = Bandpass(band)\n bp.wave_units = q.um\n \n # Make x1d file for spectra\n x1d_file = os.path.join(path,'{}_x1d.fits'.format(band))\n x1d_hdu = fits.HDUList(fits.PrimaryHDU(header=header))\n \n # Make csv file for photometry\n phot_file = os.path.join(path,'{}_phot.csv'.format(band))\n phot_data = at.Table(names=('id','band','magnitude','magnitude_unc'), dtype=(int,'S20',float,float))\n \n # Iterate over spectra\n for id,(f,spec) in enumerate(zip(files,spectra)):\n \n # Trim spectrum to bandpass for x1d file\n spec = Spectrum(*spec.spectrum, trim=[(0*q.um,bp.WavelengthMin*1E-4*q.um),(bp.WavelengthMax*1E-4*q.um,10*q.um)])\n \n # Calculate magnitude and add to photometry table\n mag, mag_unc = spec.synthetic_magnitude(bp, force=True)\n phot_data.add_row([id, band, mag, mag_unc])\n \n # Add source spectrum params for verification\n params = f.split('/')[-1].split('-')\n header['TEFF'] = int(params[0].replace('lte',''))\n header['LOGG'] = float(params[1][:4])\n header['FEH'] = float(params[-6][:-8].split('+')[-1])\n header['FILEPATH'] = f\n header['PUPIL'] = band\n\n # Put spectrum in x1d fits file\n data = fits.BinTableHDU(data=np.rec.array(list(zip(*spec.data)),\n formats='float32,float32,float32',\n names='WAVELENGTH,FLUX,ERROR'),\n header=header)\n data.name = 'EXTRACT1D'\n \n x1d_hdu.append(data)\n \n # Write the photometry file\n phot_data.write(phot_file, format='ascii.csv')\n del phot_data\n \n # Write the x1d file\n x1d_hdu.writeto(x1d_file, overwrite=True)\n del x1d_hdu\n \n except IOError:\n pass", "def fluxes(wavelength, s, line, lowlow= 14, lowhigh=6, highlow=6, highhigh = 14, lmin=0, lmax=0, fmin=0, fmax=0, \n broad=2.355, plot=True, verbose=True, plot_sus = False, fcal = True, fit_continuum = True, median_kernel=35, warnings = True ): # Broad is FWHM for Gaussian sigma= 1,\n # s must be an array, no a list\n try: \n index_maximo_del_rango = s.tolist().index(np.nanmax(s))\n #print \" is AN ARRAY\"\n except Exception:\n #print \" s is A LIST -> must be converted into an ARRAY\" \n s = np.array(s)\n \n # Setup wavelength limits\n if lmin == 0 :\n lmin = line-65. # By default, +-65 A with respect to line\n if lmax == 0 :\n lmax = line+65.\n \n # Extract subrange to fit\n w_spec = []\n f_spec = []\n w_spec.extend((wavelength[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n f_spec.extend((s[i]) for i in range(len(wavelength)) if (wavelength[i] > lmin and wavelength[i] < lmax) ) \n \n if np.isnan(np.nanmedian(f_spec)): \n # The data are NAN!! Nothing to do\n if verbose or warnings: print(\" There is no valid data in the wavelength range [{},{}] !!\".format(lmin,lmax))\n \n resultado = [0, line, 0, 0, 0, 0, 0, 0, 0, 0, 0, s ] \n\n return resultado\n \n else: \n \n ## 20 Sep 2020\n f_spec_m=signal.medfilt(f_spec,median_kernel) # median_kernel = 35 default\n \n \n # Remove nans\n median_value = np.nanmedian(f_spec)\n f_spec = [median_value if np.isnan(x) else x for x in f_spec] \n \n \n # Setup min and max flux values in subrange to fit\n if fmin == 0 :\n fmin = np.nanmin(f_spec) \n if fmax == 0 :\n fmax = np.nanmax(f_spec) \n \n # We have to find some \"guess numbers\" for the Gaussian. Now guess_centre is line\n guess_centre = line\n \n # Define continuum regions: [-lowlow, -lowhigh] and [highlow,highhigh] in Angstroms with respect to guess_centre\n \n w_cont=[]\n f_cont=[]\n w_cont.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) or (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n \n if fit_continuum:\n # Linear Fit to continuum \n f_cont_filtered=sig.medfilt(f_cont,np.int(median_kernel))\n #print line #f_cont\n # if line == 8465.0:\n # print w_cont\n # print f_cont_filtered\n # plt.plot(w_cont,f_cont_filtered)\n # plt.show()\n # plt.close()\n # warnings=True\n try: \n mm,bb = np.polyfit(w_cont, f_cont_filtered, 1)\n except Exception:\n bb = np.nanmedian(f_cont_filtered)\n mm = 0.\n if warnings: \n print(\" WARNING: Impossible to get the continuum!\")\n print(\" Scaling the continuum to the median value b = \",bb,\": cont = 0 * w_spec + \", bb)\n continuum = mm*np.array(w_spec)+bb \n c_cont = mm*np.array(w_cont)+bb \n \n else: \n # Median value in each continuum range # NEW 15 Sep 2019\n w_cont_low = []\n f_cont_low = []\n w_cont_low.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n f_cont_low.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-lowlow and w_spec[i] < guess_centre-lowhigh) ) \n median_w_cont_low = np.nanmedian(w_cont_low)\n median_f_cont_low = np.nanmedian(f_cont_low)\n w_cont_high = []\n f_cont_high = []\n w_cont_high.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n f_cont_high.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre+highlow and w_spec[i] < guess_centre+highhigh) ) \n median_w_cont_high = np.nanmedian(w_cont_high)\n median_f_cont_high = np.nanmedian(f_cont_high) \n \n b = (median_f_cont_low-median_f_cont_high)/(median_w_cont_low-median_w_cont_high)\n a = median_f_cont_low- b * median_w_cont_low\n \n continuum = a + b*np.array(w_spec)\n c_cont = a + b*np.array(w_cont) \n \n \n # rms continuum\n rms_cont = np.nansum([ np.abs(f_cont[i] - c_cont[i]) for i in range(len(w_cont)) ]) / len(c_cont)\n \n # Search for index here w_spec(index) closest to line\n min_w = np.abs(np.array(w_spec)-line)\n mini = np.nanmin(min_w)\n # guess_peak = f_spec[min_w.tolist().index(mini)] # WE HAVE TO SUSTRACT CONTINUUM!!!\n guess_peak = f_spec[min_w.tolist().index(mini)] - continuum[min_w.tolist().index(mini)]\n \n # LOW limit\n low_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre-15 and w_spec[i] < guess_centre)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a \n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1,1,-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii-1]/c_fit[ii-1] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and low_limit == 0: low_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if low_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n low_limit = ws[sorted_by_flux[0]]\n except Exception:\n plot=True\n low_limit = 0\n \n # HIGH LIMIT \n high_limit=0\n w_fit = []\n f_fit = []\n w_fit.extend((w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n f_fit.extend((f_spec[i]) for i in range(len(w_spec)) if (w_spec[i] > guess_centre and w_spec[i] < guess_centre+15)) \n if fit_continuum: \n c_fit=mm*np.array(w_fit)+bb \n else: \n c_fit=b*np.array(w_fit)+a\n \n fs=[]\n ws=[]\n for ii in range(len(w_fit)-1):\n if f_fit[ii]/c_fit[ii] < 1.05 and f_fit[ii+1]/c_fit[ii+1] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n # if f_fit[ii]/c_fit[ii] < 1.05 and high_limit == 0: high_limit = w_fit[ii]\n fs.append(f_fit[ii]/c_fit[ii])\n ws.append(w_fit[ii])\n if high_limit == 0: \n sorted_by_flux=np.argsort(fs)\n try:\n high_limit = ws[sorted_by_flux[0]] \n except Exception:\n plot=True\n high_limit = 0 \n \n # Guess centre will be the highest value in the range defined by [low_limit,high_limit]\n \n try: \n rango = np.where((high_limit >= wavelength ) & (low_limit <= wavelength)) \n index_maximo_del_rango = s.tolist().index(np.nanmax(s[rango]))\n guess_centre = wavelength[index_maximo_del_rango]\n except Exception:\n guess_centre = line #### It was 0 before\n \n \n # Fit a Gaussian to data - continuum \n p0 = [guess_centre, guess_peak, broad/2.355] # broad is the Gaussian sigma, 1.0 for emission lines\n try:\n fit, pcov = curve_fit(gauss, w_spec, f_spec-continuum, p0=p0, maxfev=10000) # If this fails, increase maxfev...\n fit_error = np.sqrt(np.diag(pcov))\n \n # New 28th Feb 2019: Check central value between low_limit and high_limit\n # Better: between guess_centre - broad, guess_centre + broad\n # If not, redo fit fixing central value to the peak (it does not work... just fix FWHM= (high_limit-low_limit)/2.5 )\n \n if verbose != False: print(\" ----------------------------------------------------------------------------------------\")\n # if low_limit < fit[0] < high_limit:\n if fit[0] < guess_centre - broad or fit[0] > guess_centre + broad:\n # if verbose: print \" Fitted center wavelength\", fit[0],\"is NOT in the range [\",low_limit,\",\",high_limit,\"]\"\n if verbose: print(\" Fitted center wavelength\", fit[0],\"is NOT in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n # print \"Re-do fitting fixing center wavelength\"\n # p01 = [guess_peak, broad]\n # fit1, pcov1 = curve_fit(gauss_fix_x0, w_spec, f_spec-continuum, p0=p01, maxfev=100000) # If this fails, increase maxfev...\n # fit_error1 = np.sqrt(np.diag(pcov1))\n # fit[0]=guess_centre\n # fit_error[0] = 0.\n # fit[1] = fit1[0]\n # fit_error[1] = fit_error1[0]\n # fit[2] = fit1[1]\n # fit_error[2] = fit_error1[1] \n \n fit[0]=guess_centre\n fit_error[0] = 0.000001\n fit[1]=guess_peak\n fit_error[1] = 0.000001\n fit[2] = broad/2.355\n fit_error[2] = 0.000001 \n else:\n if verbose: print(\" Fitted center wavelength\", fit[0],\"IS in the expected range [\",guess_centre - broad,\",\",guess_centre + broad,\"]\")\n \n \n if verbose: print(\" Fit parameters = \", fit[0], fit[1], fit[2])\n if fit[2] == broad and warnings == True : \n print(\" WARNING: Fit in\",fit[0],\"failed! Using given centre wavelength (cw), peak at (cv) & sigma = broad/2.355 given.\") \n gaussian_fit = gauss(w_spec, fit[0], fit[1], fit[2])\n \n \n # Estimate rms of the Gaussian fit in range [low_limit, high_limit]\n residuals = f_spec-gaussian_fit-continuum\n rms_fit = np.nansum([ ((residuals[i]**2)/(len(residuals)-2))**0.5 for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n \n # Fluxes, FWHM and Eq. Width calculations\n gaussian_flux = gauss_flux(fit[1],fit[2])\n error1 = np.abs(gauss_flux(fit[1]+fit_error[1],fit[2]) - gaussian_flux)\n error2 = np.abs(gauss_flux(fit[1],fit[2]+fit_error[2]) - gaussian_flux)\n gaussian_flux_error = 1 / ( 1/error1**2 + 1/error2**2 )**0.5\n \n \n fwhm=fit[2]*2.355\n fwhm_error = fit_error[2] *2.355\n fwhm_vel = fwhm / fit[0] * C \n fwhm_vel_error = fwhm_error / fit[0] * C \n \n gaussian_ew = gaussian_flux/np.nanmedian(f_cont)\n gaussian_ew_error = gaussian_ew * gaussian_flux_error/gaussian_flux \n \n # Integrated flux\n # IRAF: flux = sum ((I(i)-C(i)) * (w(i2) - w(i1)) / (i2 - i2) \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n gauss_to_integrated = gaussian_flux/flux * 100.\n \n index=0\n s_s=np.zeros_like(s)\n for wave in range(len(wavelength)):\n s_s[wave]=s[wave]\n if wavelength[wave] == w_spec[0] : \n s_s[wave] = f_spec[0]-gaussian_fit[0]\n index=1\n if wavelength[wave] > w_spec[0] and wavelength[wave] <= w_spec[-1]:\n s_s[wave] = f_spec[index]-gaussian_fit[index]\n index=index+1\n \n # Plotting \n ptitle = 'Fit: x0=%.2f y0=%.2e sigma=%.2f flux=%.2e rms=%.3e' % (fit[0], fit[1], fit[2], gaussian_flux, rms_fit)\n if plot :\n plt.figure(figsize=(10, 4))\n # Plot input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.8)\n # Plot median input spectrum\n plt.plot(np.array(w_spec),np.array(f_spec_m), \"orange\", lw=3, alpha = 0.5) # 2021: era \"g\"\n # Plot spectrum - gauss subtracted\n plt.plot(wavelength,s_s,\"g\",lw=3, alpha = 0.6)\n \n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$ ]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\")\n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.3)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n plt.plot(w_spec, residuals, 'k')\n plt.title(ptitle)\n plt.show()\n \n # Printing results\n if verbose :\n print(\"\\n - Gauss and continuum fitting + integrated flux calculations:\\n\")\n print(\" rms continuum = %.3e erg/cm/s/A \" % (rms_cont)) \n print(\" Gaussian Fit parameters: x0 = ( %.2f +- %.2f ) A \" % (fit[0], fit_error[0]))\n print(\" y0 = ( %.3f +- %.3f ) 1E-16 erg/cm2/s/A\" % (fit[1]/1E-16, fit_error[1]/1E-16 ))\n print(\" sigma = ( %.3f +- %.3f ) A\" % (fit[2], fit_error[2])) \n print(\" rms fit = %.3e erg/cm2/s/A\" % (rms_fit))\n print(\" Gaussian Flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent)\" % (gaussian_flux/1E-16, gaussian_flux_error/1E-16, gaussian_flux_error/gaussian_flux*100))\n print(\" FWHM = ( %.3f +- %.3f ) A = ( %.1f +- %.1f ) km/s \" % (fwhm, fwhm_error, fwhm_vel, fwhm_vel_error))\n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (-gaussian_ew, gaussian_ew_error)) \n print(\"\\n Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n print(\" Gauss/Integrated = %.2f per cent \" % gauss_to_integrated)\n \n \n # Plot independent figure with substraction if requested \n if plot_sus: plot_plot(wavelength,[s,s_s], xmin=lmin, xmax=lmax, ymin=fmin, ymax=fmax, fcal=fcal, frameon=True, ptitle=ptitle)\n \n # 0 1 2 3 4 5 6 7 8 9 10 11\n resultado = [rms_cont, fit[0], fit_error[0], gaussian_flux, gaussian_flux_error, fwhm, fwhm_error, flux, flux_error, ew, ew_error, s_s ]\n return resultado \n except Exception:\n if verbose: \n print(\" - Gaussian fit failed!\")\n print(\" However, we can compute the integrated flux and the equivalent width:\")\n \n flux = np.nansum([ (f_spec[i]-continuum[i])*(w_spec[i+1]-w_spec[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n flux_error = rms_cont * (high_limit - low_limit)\n wave_resolution = (wavelength[-1]-wavelength[0])/len(wavelength)\n ew = wave_resolution * np.nansum ([ (1 - f_spec[i]/continuum[i]) for i in range(len(w_spec)) if (w_spec[i] >= low_limit and w_spec[i] <= high_limit) ]) \n ew_error = np.abs(ew*flux_error/flux) \n \n if verbose:\n print(\" Integrated flux = ( %.2f +- %.2f ) 1E-16 erg/s/cm2 (error = %.1f per cent) \" % ( flux/1E-16, flux_error/1E-16, flux_error/flux *100)) \n print(\" Eq. Width = ( %.1f +- %.1f ) A\" % (ew, ew_error))\n \n resultado = [0, guess_centre, 0, 0, 0, 0, 0, flux, flux_error, ew, ew_error, s ] # guess_centre was identified at maximum value in the [low_limit,high_limit] range but Gaussian fit failed\n \n \n # Plotting \n if plot :\n plt.figure(figsize=(10, 4))\n plt.plot(np.array(w_spec),np.array(f_spec), \"b\", lw=3, alpha = 0.5)\n plt.minorticks_on() \n plt.xlabel(\"Wavelength [$\\mathrm{\\AA}$]\")\n if fcal:\n plt.ylabel(\"Flux [ erg cm$^{-2}$ s$^{-1}$ $\\mathrm{\\AA}^{-1}$ ]\")\n else:\n plt.ylabel(\"Flux [ counts ]\") \n plt.xlim(lmin,lmax)\n plt.ylim(fmin,fmax)\n \n # Vertical line at guess_centre\n plt.axvline(x=guess_centre, color='r', linestyle='-', alpha=0.5)\n # Horizontal line at y = 0\n plt.axhline(y=0, color='k', linestyle=':', alpha=0.5) \n # Dashed green regions for continuum, defined by [lowlow, lowhigh] and [highlow,highhigh]\n plt.axvspan(guess_centre+highlow, guess_centre+highhigh, facecolor='g', alpha=0.15,zorder=3)\n plt.axvspan(guess_centre-lowlow, guess_centre-lowhigh, facecolor='g', alpha=0.15,zorder=3)\n # Plot linear fit for continuum\n plt.plot(w_spec, continuum,\"g--\")\n # Plot Gaussian fit \n # plt.plot(w_spec, gaussian_fit+continuum, 'r-', alpha=0.8) \n # Vertical line at Gaussian center\n # plt.axvline(x=fit[0], color='k', linestyle='-', alpha=0.5)\n # Vertical lines to emission line\n plt.axvline(x= low_limit, color='k', linestyle=':', alpha=0.5)\n plt.axvline(x= high_limit, color='k', linestyle=':', alpha=0.5) \n # Plot residuals\n # plt.plot(w_spec, residuals, 'k')\n plt.title(\"No Gaussian fit obtained...\")\n plt.show()\n \n \n return resultado", "def signal_xs(mass, width_frac):\n width = mass*width_frac/100.\n return ROOT.getHiggsXS(mass, width)", "def sincint(x, nres, speclist) :\n\n dampfac = 3.25*nres/2.\n ksize = int(21*nres/2.)\n if ksize%2 == 0 : ksize +=1\n nhalf = ksize//2 \n\n #number of output and input pixels\n nx = len(x)\n nf = len(speclist[0][0])\n\n # integer and fractional pixel location of each output pixel\n ix = x.astype(int)\n fx = x-ix\n\n # outputs\n outlist=[]\n for spec in speclist :\n if spec[1] is None :\n outlist.append([np.full_like(x,0),None])\n else :\n outlist.append([np.full_like(x,0),np.full_like(x,0)])\n\n for i in range(len(x)) :\n xkernel = np.arange(ksize)-nhalf - fx[i]\n # in units of Nyquist\n xkernel /= (nres/2.)\n u1 = xkernel/dampfac\n u2 = np.pi*xkernel\n sinc = np.exp(-(u1**2)) * np.sin(u2) / u2\n sinc /= (nres/2.)\n\n lobe = np.arange(ksize) - nhalf + ix[i]\n vals = np.zeros(ksize)\n vars = np.zeros(ksize)\n gd = np.where( (lobe>=0) & (lobe<nf) )[0]\n\n for spec,out in zip(speclist,outlist) :\n vals = spec[0][lobe[gd]]\n out[0][i] = (sinc[gd]*vals).sum()\n if spec[1] is not None : \n var = spec[1][lobe[gd]]\n out[1][i] = (sinc[gd]**2*var).sum()\n\n for out in outlist :\n if out[1] is not None : out[1] = np.sqrt(out[1])\n \n return outlist", "def stft(x, fs, framesz, hop):\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n w = scipy.hamming(framesamp)\n X = scipy.array([scipy.fft(w*x[i:i+framesamp]) \n for i in range(0, len(x)-framesamp, hopsamp)])\n return X", "def EGWD_fg(f):\n A = 4.2e-47\n res = np.zeros((len(f)))\n for i,freq in enumerate(f): \n if freq >=3e-3:\n # strain \n res[i] = A * freq**(-7/3) * np.exp(-2*(freq/5e-2)**2) \n else:\n res[i] = np.NaN\n return np.array(res)", "def __call__(self, x):\n f_beam_gaussian = self.i / (np.sqrt(2 * constants.pi) * constants.e * self.sigma * self.w_z) * \\\n np.exp(-(x - self.x_c) ** 2 / (2 * self.sigma ** 2))\n\n # Convert the flux density unit atoms/nm^2s to atoms/cm^2s by multiplying with factor 1e14\n return f_beam_gaussian * 1e14", "def test_9(self):\n\n sq_qe = gen_step_qe(1.42, 0.9)\n test_ill = Illumination()\n # test_qef = qe_filter(sq_qe)\n\n filtered_ill = test_ill * sq_qe\n\n assert isinstance(filtered_ill, Illumination)\n\n #plt.plot(filtered_ill.get_spectrum('eV')[0, :], filtered_ill.get_spectrum('eV')[1, :], label=\"filtered\")\n #plt.plot(test_ill.get_spectrum('eV')[0, :], test_ill.get_spectrum('eV')[1, :], label=\"original\")\n\n #plt.xlabel('wavelength (eV)')\n #plt.ylabel('spectrum (W/eV/m^2)')\n\n #plt.legend()\n\n #plt.show()", "def calc_magnitude(box,octant):\n # Read the Mi(z=2) magnitudes for the box.\n miz2 = FH.read_file(box)['Miz2'][:]\n # Read the index for each QSO in the octant, and get the Mi(z=2).\n data = FH.read_file(octant)\n zz = data['Z']\n dmod = data['DMOD']\n miz2 = miz2[data['INDX']]\n # Now convert to apparent i-band magnitude using the k-correction.\n # If a tabulated k-correction is available, use that, otherwise\n # default to a power-law continuum approximation.\n # See discussion in Ross++13, Appendix B and Section 4.\n kfile=os.getenv('MOCKINGDESI_BASE')+\"/data/qso-iband-k-correction.txt\"\n if os.path.exists(kfile):\n print(\"Using K-correction from \"+kfile)\n kcorr = np.loadtxt(kfile)\n kcorr = np.interp(zz,kcorr[:,1],kcorr[:,2])\n else:\n print(\"Using power-law K-correction\")\n alpha = -0.5\n kcorr = -2.5*(1+alpha)*np.log10( (1+zz)/(1+2.0) )\n gmi = np.poly1d([0.1502,-0.9886,2.147,-1.758,0.6397])\t# See notes.\n rmi = np.poly1d([-0.1482,1.636,-6.716,12.55,-10.39,3.017])\n magi = miz2 + dmod + kcorr\t# e.g. Ross++13, Eq. 5\n magg = magi + gmi(zz.clip(0.5,3.5))\n magr = magi + rmi(zz.clip(0.5,3.5))\n # and write the results\n data = {}\n data['GMAG'] = magg.astype('f4')\n data['RMAG'] = magr.astype('f4')\n FH.write_file(octant,data)\n #", "def generateSound(amps_samples, channel_fs, sampleRate):\r\n\r\n samples_to_gen = len(amps_samples[0]) \r\n nb_channels = len(amps_samples)\r\n duration = samples_to_gen / sampleRate # in s\r\n\r\n \r\n t = np.linspace(0.0, duration, samples_to_gen) # Produces length of samples\r\n\r\n sines = amps_samples * np.sin(2 * np.pi * np.outer(channel_fs, t) )\r\n ySum = np.sum(sines, axis=0)\r\n\r\n\r\n # Normalize data, so that it is in playable amplitude\r\n res_data = 10* ySum / np.linalg.norm(ySum)\r\n\r\n return res_data", "def sc_QC(X: pd.DataFrame,\n min_lib_size: float = 1000,\n remove_outlier_cells: bool = True,\n min_percent: float = 0.05,\n max_mito_ratio: float = 0.1,\n min_exp_avg: float = 0,\n min_exp_sum: float = 0) -> pd.DataFrame:\n outlier_coef = 1.5\n X[X < 0] = 0\n lib_size = X.sum(axis=0)\n before_s = X.shape[1]\n X = X.loc[:, lib_size > min_lib_size]\n print(f\"Removed {before_s - X.shape[1]} cells with lib size < {min_lib_size}\")\n if remove_outlier_cells:\n lib_size = X.sum(axis=0)\n before_s = X.shape[1]\n Q3 = lib_size.to_frame().quantile(0.75, axis=0).values[0]\n Q1 = lib_size.to_frame().quantile(0.25, axis=0).values[0]\n interquartile_range = Q3 - Q1\n X = X.loc[:, (lib_size >= Q1 - interquartile_range * outlier_coef) &\n (lib_size <= Q3 + interquartile_range * outlier_coef)]\n print(f\"Removed {before_s - X.shape[1]} outlier cells from original data\")\n mt_genes = X.index.str.upper().str.match(\"^MT-\")\n if any(mt_genes):\n print(f\"Found mitochondrial genes: {X[mt_genes].index.to_list()}\")\n before_s = X.shape[1]\n mt_rates = X[mt_genes].sum(axis=0) / X.sum(axis=0)\n X = X.loc[:, mt_rates < max_mito_ratio]\n print(f\"Removed {before_s - X.shape[1]} samples from original data (mt genes ratio > {max_mito_ratio})\")\n else:\n warn(\"Mitochondrial genes were not found. Be aware that apoptotic cells may be present in your sample.\")\n before_g = X.shape[0]\n X = X[(X != 0).mean(axis=1) > min_percent]\n print(f\"Removed {before_g - X.shape[0]} genes expressed in less than {min_percent} of data\")\n\n before_g = X.shape[0]\n if X.shape[1] > 500:\n X = X.loc[X.mean(axis=1) >= min_exp_avg, :]\n else:\n X = X.loc[X.sum(axis=1) >= min_exp_sum, :]\n print(f\"Removed {before_g - X.shape[0]} genes with expression values: average < {min_exp_avg} or sum < {min_exp_sum}\")\n return X", "def Q_flux(self):\n fields = self.read_vars(['x','y','z'])\n Z, Y, X = np.meshgrid(fields['z']/self.params['Lz'],\n fields['y']/self.params['Ly'] - 0.5,\n fields['x']/self.params['Lx'] - 0.5, indexing='ij')\n\n r = np.sqrt(X**2 + Y**2)\n r0 = 0.01\n msk = 0.5*(1.-np.tanh(r/r0))\n delta = 1/(self.params[\"global_nz\"])\n Q =1e-5*np.exp(-Z/delta)/delta*msk\n\n return Q", "def flux(self, q):\n q1, q2 = q\n if q1 > 0:\n u = q2/q1\n else:\n u = 0\n return np.array([q1*u, q1 * u*u + 0.5*9.81 * q1*q1])", "def ADM_QED(nf):\n Qu = 2/3\n Qd = -1/3\n Qe = -1\n nc = 3\n gamma_QED = np.array([[8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qu*nc, 8/3*Qu*Qd*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc, 8/3*Qu*Qe*nc],\n [8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qu*nc, 8/3*Qd*Qd*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc, 8/3*Qd*Qe*nc],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe],\n [8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qd, 8/3*Qe*Qu, 8/3*Qe*Qd, 8/3*Qe*Qe, 8/3*Qe*Qe, 8/3*Qe*Qe]])\n gamma_QED_1 = np.zeros((2,154))\n gamma_QED_2 = np.hstack((np.zeros((8,2)),gamma_QED,np.zeros((8,144))))\n gamma_QED_3 = np.hstack((np.zeros((8,10)),gamma_QED,np.zeros((8,136))))\n gamma_QED_4 = np.zeros((136,154))\n gamma_QED = np.vstack((gamma_QED_1, gamma_QED_2, gamma_QED_3, gamma_QED_4))\n\n if nf == 5:\n return gamma_QED\n elif nf == 4:\n return np.delete(np.delete(gamma_QED, [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 0)\\\n , [6, 14, 22, 30, 42, 50, 58, 66, 74, 82, 94, 102, 110, 118, 126, 134, 142, 150], 1)\n elif nf == 3:\n return np.delete(np.delete(gamma_QED, [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 0)\\\n , [5,6, 13,14, 21,22, 29,30, 41,42, 49,50, 57,58, 65,66, 73,74, 81,82,\\\n 93,94, 101,102, 109,110, 117,118, 125,126, 133,134, 141,142, 149,150], 1)\n else:\n raise Exception(\"nf has to be 3, 4 or 5\")", "def get_signal(self, chn):\n nsamples = self.get_samples_per_signal()\n if (chn < len(nsamples)):\n x = np.zeros(nsamples[chn], dtype=np.float64)\n\n v = x[chn * nsamples[chn]:(chn + 1) * nsamples[chn]]\n self.read_phys_signal(chn, 0, nsamples[chn], v)\n return x\n else:\n return np.array([])", "def _sigma_c(self, ndbqp, ndbhf):\n Eo = numpy.array(ndbqp['E-Eo'])\n Z = numpy.array(ndbqp['Z'])\n E_minus_Eo = numpy.array(ndbqp['E-Eo'])\n Sx = numpy.array(ndbhf['Sx'])\n Vxc = numpy.array(ndbhf['Vxc'])\n try:\n Sc = numpy.array(ndbqp['So'])\n except KeyError: \n Sc = 1/Z*E_minus_Eo -Sx + Vxc \n pdata = ArrayData()\n pdata.set_array('Eo', Eo)\n pdata.set_array('E_minus_Eo', E_minus_Eo)\n pdata.set_array('Z', Z)\n pdata.set_array('Sx', Sx)\n pdata.set_array('Sc', Sc)\n pdata.set_array('Vxc', Vxc)\n pdata.set_array('qp_table', numpy.array(ndbqp['qp_table']))\n return pdata", "def test_single_imf(self):\n\n def max_diff(a, b):\n return np.max(np.abs(a - b))\n\n emd = EMD()\n emd.FIXE_H = 2\n\n t = np.arange(0, 1, 0.001)\n c1 = np.cos(4 * 2 * np.pi * t) # 2 Hz\n S = c1.copy()\n\n # Input - linear function f(t) = sin(2Hz t)\n imfs = emd.emd(S, t)\n self.assertEqual(imfs.shape[0], 1, \"Expecting sin + trend\")\n\n diff = np.allclose(imfs[0], c1)\n self.assertTrue(diff, \"Expecting 1st IMF to be sin\\nMaxDiff = \" + str(max_diff(imfs[0], c1)))\n\n # Input - linear function f(t) = siin(2Hz t) + 2*t\n c2 = 5 * (t + 2)\n S += c2.copy()\n imfs = emd.emd(S, t)\n\n self.assertEqual(imfs.shape[0], 2, \"Expecting sin + trend\")\n diff1 = np.allclose(imfs[0], c1, atol=0.2)\n self.assertTrue(diff1, \"Expecting 1st IMF to be sin\\nMaxDiff = \" + str(max_diff(imfs[0], c1)))\n diff2 = np.allclose(imfs[1], c2, atol=0.2)\n self.assertTrue(diff2, \"Expecting 2nd IMF to be trend\\nMaxDiff = \" + str(max_diff(imfs[1], c2)))", "def box(times, signal, f0=None, fn=None, df=None, Nbin=10, qmi=0.005, qma=0.75 ):\n #-- initialize some variables needed in the FORTRAN module\n n = len(times)\n T = times.ptp()\n u = np.zeros(n)\n v = np.zeros(n)\n \n #-- frequency vector and variables\n nf = (fn-f0)/df\n if f0<2./T: f0=2./T\n \n #-- calculate EEBLS spectrum and model parameters\n power,depth,qtran,in1,in2 = eebls.eebls(times,signal,u,v,nf,f0,df,Nbin,qmi,qma,n)\n frequencies = np.linspace(f0,fn,nf)\n \n #-- to return parameters of fit, do this:\n # pars = [max_freq,depth,qtran+(1./float(nb)),(in1-1)/float(nb),in2/float(nb)]\n return frequencies,power", "def stochasticModelAnal(x, H, N, stocf):\n\n\thN = N/2+1 # positive size of fft\n\tNo2 = N/2 # half of N\n\tif (hN*stocf < 3): # raise exception if decimation factor too small\n\t\traise ValueError(\"Stochastic decimation factor too small\")\n\t\t\n\tif (stocf > 1): # raise exception if decimation factor too big\n\t\traise ValueError(\"Stochastic decimation factor above 1\")\n\t\t\n\tif (H <= 0): # raise error if hop size 0 or negative\n\t\traise ValueError(\"Hop size (H) smaller or equal to 0\")\n\n\tif not(isPower2(N)): # raise error if N not a power of two\n\t\traise ValueError(\"FFT size (N) is not a power of 2\")\n\t\t\n\tw = hanning(N) # analysis window\n\tx = np.append(np.zeros(No2),x) # add zeros at beginning to center first window at sample 0\n\tx = np.append(x,np.zeros(No2)) # add zeros at the end to analyze last sample\n\tpin = No2 # initialize sound pointer in middle of analysis window \n\tpend = x.size-No2 # last sample to start a frame\n\twhile pin<=pend: \n\t\txw = x[pin-No2:pin+No2] * w # window the input sound\n\t\tX = fft(xw) # compute FFT\n\t\tmX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies\n\t\tmY = resample(np.maximum(-200, mX), stocf*hN) # decimate the mag spectrum \n\t\tif pin == No2: # first frame\n\t\t\tstocEnv = np.array([mY])\n\t\telse: # rest of frames\n\t\t\tstocEnv = np.vstack((stocEnv, np.array([mY])))\n\t\tpin += H # advance sound pointer\n\treturn stocEnv", "def _amp_ ( self , x ) :\n v = self.amplitude ( x )\n #\n return complex( v.real () , v.imag () )", "def f_qmhosc(t, y, E):\n # replace the next line with your code\n raise NotImplementedError", "def wave_energy(F, df, rhow=1000, g=9.8):\n return rhow * g * np.sum(F * df)", "def test_get_Q_alt(self):\n vect_length = 50\n x_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n y_vect = np.random.normal(size=vect_length) \\\n + 1.j * np.random.normal(size=vect_length)\n\n self.ds.spw_Nfreqs = vect_length\n\n for i in range(vect_length):\n Q_matrix = self.ds.get_Q_alt(i)\n # Test that if the number of delay bins hasn't been set\n # the code defaults to putting that equal to Nfreqs\n self.assertEqual(self.ds.spw_Ndlys, self.ds.spw_Nfreqs)\n\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n Q_matrix = self.ds.get_Q_alt(vect_length//2)\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n # Sending in sinusoids for x and y should give delta functions\n\n # Now do all the same tests from above but for a different number\n # of delay channels\n self.ds.set_Ndlys(vect_length-3)\n for i in range(vect_length-3):\n Q_matrix = self.ds.get_Q_alt(i)\n xQy = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, y_vect))\n yQx = np.dot(np.conjugate(y_vect), np.dot(Q_matrix, x_vect))\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n\n # Test that Q matrix has the right shape\n self.assertEqual(Q_matrix.shape, (vect_length, vect_length))\n\n # Test that x^t Q y == conj(y^t Q x)\n self.assertAlmostEqual(xQy, np.conjugate(yQx))\n\n # x^t Q x should be real\n self.assertAlmostEqual(np.imag(xQx), 0.)\n\n x_vect = np.ones(vect_length)\n Q_matrix = self.ds.get_Q_alt((vect_length-2)//2-1)\n xQx = np.dot(np.conjugate(x_vect), np.dot(Q_matrix, x_vect))\n self.assertAlmostEqual(xQx, np.abs(vect_length**2.))\n # Sending in sinusoids for x and y should give delta functions\n\n # Make sure that error is raised when asking for a delay mode outside\n # of the range of delay bins\n pytest.raises(IndexError, self.ds.get_Q_alt, vect_length-1)\n\n # Ensure that in the special case where the number of channels equals\n # the number of delay bins, the FFT method gives the same answer as\n # the explicit construction method\n multiplicative_tolerance = 0.001\n self.ds.set_Ndlys(vect_length)\n for alpha in range(vect_length):\n Q_matrix_fft = self.ds.get_Q_alt(alpha)\n Q_matrix = self.ds.get_Q_alt(alpha, allow_fft=False)\n Q_diff_norm = np.linalg.norm(Q_matrix - Q_matrix_fft)\n self.assertLessEqual(Q_diff_norm, multiplicative_tolerance)\n\n # Check for error handling\n pytest.raises(ValueError, self.ds.set_Ndlys, vect_length+100)", "def test_energy_max(self):\n sqw_ws = MuscatSofQW(SampleWorkspace=self._sample_ws,\n ResolutionWorkspace=self._resolution_ws,\n ParameterWorkspace=self._param_ws,\n OutputWorkspace='__MuscatSofQWTest_result',\n EnergyMax=1.0)\n\n self.assertEqual(sqw_ws.getNumberHistograms(), self._sample_ws.getNumberHistograms())\n self.assertEqual(sqw_ws.getAxis(0).getUnit().unitID(), 'Energy')\n self.assertEqual(sqw_ws.getAxis(1).getUnit().unitID(), 'MomentumTransfer')\n\n x_data = sqw_ws.dataX(0)\n self.assertAlmostEqual(x_data[0], -1.0)\n self.assertAlmostEqual(x_data[-1], 1.0)\n self.assertAlmostEqual(x_data[len(x_data)/2], 0.0)\n\n self.assertEquals(sqw_ws.blocksize(), 400)", "def tt_irt1(q, f, xsf):\n c_irt1 = lib.tt_irt1\n c_irt1.restype = None\n c_irt1.argtypes = [c_int, POINTER(c_int), POINTER(c_double), POINTER(c_int), POINTER(c_double), c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)]\n # d n xsf ttrank ttcore M q Z lPz\n\n # Cores of f must be extracted carefully, since we might have discontinuous ps\n core = np.zeros((f.core).size, dtype=np.float64)\n ps_my = 0\n for i in range(0,f.d):\n cri = f.core[range(f.ps[i]-1,f.ps[i+1]-1)]\n core[range(ps_my,ps_my+f.r[i]*f.n[i]*f.r[i+1])] = cri\n ps_my = ps_my + f.r[i]*f.n[i]*f.r[i+1]\n\n d = c_int(f.d)\n n = (np.array(f.n)).ctypes.data_as(POINTER(c_int))\n xsfp = xsf.ctypes.data_as(POINTER(c_double))\n rf = (np.array(f.r)).ctypes.data_as(POINTER(c_int))\n corep = core.ctypes.data_as(POINTER(c_double))\n M = c_int(q.shape[0])\n qp = q.ctypes.data_as(POINTER(c_double))\n \n Z = np.zeros([q.shape[0], q.shape[1]], dtype=np.float64, order='F')\n lPz = np.zeros([q.shape[0]], dtype=np.float64, order='F')\n\n Zp = Z.ctypes.data_as(POINTER(c_double))\n lPzp = lPz.ctypes.data_as(POINTER(c_double))\n\n # Sampler is actually here\n c_irt1(d, n, xsfp, rf, corep, M, qp, Zp, lPzp)\n\n return (Z, lPz)", "def test_ssq_stft():\n th = 1e-1\n for N in (128, 129):\n x = np.random.randn(N)\n for n_fft in (120, 121):\n for window_scaling in (1., .5):\n if window_scaling == 1:\n window = None\n else:\n window = get_window(window, win_len=n_fft//1, n_fft=n_fft)\n window *= window_scaling\n\n Sx, *_ = ssq_stft(x, window=window, n_fft=n_fft)\n xr = issq_stft(Sx, window=window, n_fft=n_fft)\n\n txt = (\"\\nSSQ_STFT: (N, n_fft, window_scaling) = ({}, {}, {})\"\n ).format(N, n_fft, window_scaling)\n assert len(x) == len(xr), \"%s != %s %s\" % (N, len(xr), txt)\n mae = np.abs(x - xr).mean()\n assert mae < th, \"MAE = %.2e > %.2e %s\" % (mae, th, txt)", "def f(x):\n res = np.real(np.exp(-1j*x[1])*sum(y_obs[k+sample_size]*np.exp(1j*k*x[0]) \\\n for k in range(-sample_size,sample_size+1))) \n res = -res/np.sqrt(2*sample_size+1) \n return res", "def stft(x, fs, framesz, hop):\n framesamp = int(framesz*fs)\n hopsamp = int(hop*fs)\n w = scipy.hamming(framesamp)\n X = scipy.array([scipy.fft(w*x[i:i+framesamp],256)\n for i in range(0, len(x)-framesamp, hopsamp)])\n X=X[:,0:128]\n return X", "def spectra(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 1 # Spare\n + 1 # Comp Schema spectra S\n + 3 # Comp Schema spectra k\n + 3 # Comp Schema spectra M\n + 1 # Spare\n + 1 # Comp Schema trigger S\n + 3 # Comp Schema trigger S\n + 3 # Comp Schema trigger S\n + 4 # Spare\n + 12 # Pixel mask\n + 2*8 # Number of data samples\n )\n\n variable = (\n num_samples * (\n 1*8 # Detector index\n + 32*8 # Spectrum x 32\n + 1*8 # Trigger\n + 1*8 # Number of integrations\n )\n )\n\n return fixed_header, variable", "def f(self, x: np.array) -> np.array:\n return (1/np.sqrt(2*np.pi*self.sig**2))*np.exp(-1*((x - self.mu)**2/(2*self.sig**2)))", "def stochasticModel(x, H, N, stocf):\n\thN = N/2+1 \t\t# positive size of fft\n\tNo2 = N/2\t\t\t\t\t\t\t# half of N\n\tif (hN*stocf < 3): # raise exception if decimation factor too small\n\t\traise ValueError(\"Stochastic decimation factor too small\")\n\t\t\n\tif (stocf > 1): # raise exception if decimation factor too big\n\t\traise ValueError(\"Stochastic decimation factor above 1\")\n\t\n\tif (H <= 0): # raise error if hop size 0 or negative\n\t\traise ValueError(\"Hop size (H) smaller or equal to 0\")\n\t\t\n\tif not(isPower2(N)): # raise error if N not a power of twou\n\t\traise ValueError(\"FFT size (N) is not a power of 2\")\n\t\t\n\tw = hanning(N) # analysis/synthesis window\n\tx = np.append(np.zeros(No2),x) # add zeros at beginning to center first window at sample 0\n\tx = np.append(x,np.zeros(No2)) # add zeros at the end to analyze last sample\n\tpin = No2 # initialize sound pointer in middle of analysis window \n\tpend = x.size - No2 # last sample to start a frame\n\ty = np.zeros(x.size) # initialize output array\n\twhile pin<=pend: \n\t#-----analysis----- \n\t\txw = x[pin-No2:pin+No2]*w # window the input sound\n\t\tX = fft(xw) # compute FFT\n\t\tmX = 20 * np.log10(abs(X[:hN])) # magnitude spectrum of positive frequencies\n\t\tstocEnv = resample(np.maximum(-200, mX), hN*stocf) # decimate the mag spectrum \n\t#-----synthesis-----\n\t\tmY = resample(stocEnv, hN) # interpolate to original size\n\t\tpY = 2*np.pi*np.random.rand(hN) # generate phase random values\n\t\tY = np.zeros(N, dtype = complex)\n\t\tY[:hN] = 10**(mY/20) * np.exp(1j*pY) # generate positive freq.\n\t\tY[hN:] = 10**(mY[-2:0:-1]/20) * np.exp(-1j*pY[-2:0:-1]) # generate negative freq.\n\t\tfftbuffer = np.real(ifft(Y)) # inverse FFT\n\t\ty[pin-No2:pin+No2] += w*fftbuffer # overlap-add\n\t\tpin += H \t\t\t\t\t # advance sound pointer\n\ty = np.delete(y, range(No2)) # delete half of first window which was added \n\ty = np.delete(y, range(y.size-No2, y.size)) # delete half of last window which was added \n\treturn y", "def QMCF(sequence, kernel_type=STANDARD_KERNEL.RBF):\n quantile_lut = {STANDARD_KERNEL.RBF: qfs.standard_normal,\n STANDARD_KERNEL.M12: qfs.matern_12,\n STANDARD_KERNEL.M32: qfs.matern_32,\n STANDARD_KERNEL.M52: qfs.matern_52}\n qf = quantile_lut[kernel_type]\n s = qf(sequence.points)\n return s.T", "def read_cospectrum(path,d):\r\n spec = []\r\n timeseries = []\r\n for i in range(len(d)):\r\n filename = path + d[i]\r\n\r\n with open(filename, \"r\") as f:\r\n reader = csv.reader(f,delimiter=',')\r\n ct=1\r\n for row in reader:\r\n if ct==6:\r\n Hz = float(row[0].split('_')[-1])\r\n elif ct==7:\r\n height = float(row[0].split('_')[-1])\r\n elif ct==8:\r\n ws = float(row[0].split('_')[-1])\r\n elif ct==9:\r\n avg_period = float(row[0].split('_')[-1])\r\n elif ct==13:\r\n header = row\r\n elif ct>13:\r\n break\r\n ct+=1\r\n \r\n meta = [Hz,height,ws,avg_period]\r\n \r\n thisspec = np.genfromtxt(filename,delimiter=',',skip_header=13)\r\n spec.append(thisspec)\r\n thistime = re.findall('\\d{8}-\\d{4}',filename)[0]\r\n thisdate = datetime.strptime(thistime,'%Y%m%d-%H%M')\r\n timeseries.append(thisdate) \r\n \r\n return spec, timeseries, header, meta", "def test_QFTn(n):\n q = QuantumRegister(n, 'q') # +lost+lost2\n circ = QuantumCircuit(q)\n circ.x(q[0])\n RegX = [q[i] for i in range(n)]\n QFTn(circ, q, RegX)\n print(RegX)\n iQFTn(circ, q, RegX)\n launch2(circ)\n circ_m = measure_direct(circ, q, RegX)\n return circ_m", "def nsgitf_real(c, c_dc, c_nyq, multiscale, shift):\n c_l = []\n c_l.append(c_dc)\n c_l.extend([ci for ci in c])\n c_l.append(c_nyq)\n\n posit = np.cumsum(shift)\n seq_len = posit[-1]\n posit -= shift[0]\n out = np.zeros((seq_len,)).astype(c_l[1].dtype)\n\n for ii in range(len(c_l)):\n filt_len = len(multiscale[ii])\n win_range = posit[ii] + np.arange(-np.floor(filt_len / 2),\n np.ceil(filt_len / 2))\n win_range = (win_range % seq_len).astype(np.int)\n temp = np.fft.fft(c_l[ii]) * len(c_l[ii])\n\n fs_new_bins = len(c_l[ii])\n fk_bins = posit[ii]\n displace = int(fk_bins - np.floor(fk_bins / fs_new_bins) * fs_new_bins)\n temp = np.roll(temp, -displace)\n l = np.arange(len(temp) - np.floor(filt_len / 2), len(temp))\n r = np.arange(np.ceil(filt_len / 2))\n temp_idx = (np.concatenate((l, r)) % len(temp)).astype(np.int)\n temp = temp[temp_idx]\n lf = np.arange(filt_len - np.floor(filt_len / 2), filt_len)\n rf = np.arange(np.ceil(filt_len / 2))\n filt_idx = np.concatenate((lf, rf)).astype(np.int)\n m = multiscale[ii][filt_idx]\n out[win_range] = out[win_range] + m * temp\n\n nyq_bin = np.floor(seq_len / 2) + 1\n out_idx = np.arange(\n nyq_bin - np.abs(1 - seq_len % 2) - 1, 0, -1).astype(np.int)\n out[nyq_bin:] = np.conj(out[out_idx])\n t_out = np.real(np.fft.ifft(out)).astype(np.float64)\n return t_out", "def mel_frequencies(n_mels=128, fmin=0.0, fmax=11025.0, htk=False):\n\n # 'Center freqs' of mel bands - uniformly spaced between limits\n min_mel = hz_to_mel(fmin, htk=htk)\n max_mel = hz_to_mel(fmax, htk=htk)\n\n mels = np.linspace(min_mel, max_mel, n_mels)\n\n return mel_to_hz(mels, htk=htk)" ]
[ "0.5476735", "0.5465266", "0.5431895", "0.5348803", "0.5319814", "0.5249597", "0.5233212", "0.52282983", "0.51446587", "0.51347893", "0.51147515", "0.5109146", "0.50809836", "0.50781864", "0.5074541", "0.50507593", "0.5049618", "0.5042971", "0.50428605", "0.5027192", "0.5024835", "0.5012998", "0.5006414", "0.5004561", "0.4985085", "0.49617916", "0.49595293", "0.4952039", "0.4944486", "0.4938164", "0.49309403", "0.4930877", "0.49242696", "0.49169648", "0.4906885", "0.49058756", "0.4903733", "0.48953733", "0.48844314", "0.48786232", "0.4867429", "0.48551768", "0.48518613", "0.4849126", "0.48482534", "0.4840672", "0.48358604", "0.48301294", "0.4825506", "0.482155", "0.4817269", "0.4815175", "0.48109853", "0.48051655", "0.47987375", "0.47965991", "0.47962502", "0.4794352", "0.47878018", "0.47868332", "0.47789404", "0.47715428", "0.47641206", "0.47634703", "0.47622722", "0.47587097", "0.47581106", "0.4756976", "0.4748543", "0.4743778", "0.473737", "0.47373655", "0.4734936", "0.4734881", "0.4734355", "0.4729386", "0.47156295", "0.4713437", "0.4711971", "0.4711625", "0.4711423", "0.4708186", "0.4706289", "0.47038233", "0.4697696", "0.4695655", "0.46956187", "0.46908322", "0.46875635", "0.46756074", "0.46706796", "0.46703464", "0.4669873", "0.46688846", "0.46567494", "0.46535233", "0.46519762", "0.46506265", "0.4636759", "0.4630041" ]
0.64019674
0
Helper function to convert raw lines into a minibatch as a DotDict.
def process_batch(self, lines): batch_edges = [] batch_edges_values = [] batch_edges_target = [] # Binary classification targets (0/1) batch_nodes = [] batch_nodes_target = [] # Multi-class classification targets (`num_nodes` classes) batch_nodes_coord = [] batch_mst_edges = [] batch_mst_len = [] for line_num, line in enumerate(lines): line = line.split(" ") # Split into list # Compute signal on nodes nodes = np.ones(self.num_nodes) # All 1s for TSP... # Convert node coordinates to required format nodes_coord = [] for idx in range(0, 2 * self.num_nodes, 2): nodes_coord.append([float(line[idx]), float(line[idx + 1])]) # Compute distance matrix W_val = squareform(pdist(nodes_coord, metric='euclidean')) # Compute adjacency matrix if self.num_neighbors == -1: W = np.ones((self.num_nodes, self.num_nodes)) # Graph is fully connected else: W = np.zeros((self.num_nodes, self.num_nodes)) # Determine k-nearest neighbors for each node knns = np.argpartition(W_val, kth=self.num_neighbors, axis=-1)[:, self.num_neighbors::-1] # Make connections for idx in range(self.num_nodes): W[idx][knns[idx]] = 1 np.fill_diagonal(W, 2) # Special token for self-connections # Convert tour nodes to required format # Don't add final connection for tour/cycle _edges = [edge for edge in line[line.index('output') + 1:]] mst_edges = [literal_eval(' '.join(_edges[i:i+2])) for i in range(0, len(_edges),2)] # gets the edges from line --> equivalent to tour_nodes for TSP # Compute node and edge representation of mst + mst_len mst_len = 0 # length of mst nodes_target = np.zeros(self.num_nodes) edges_target = np.zeros((self.num_nodes, self.num_nodes)) for idx in range(len(mst_edges)): i = mst_edges[idx][0] j = mst_edges[idx][1] nodes_target[i] = idx # node targets: ordering of nodes in tour edges_target[i][j] = 1 edges_target[j][i] = 1 mst_len += W_val[i][j] # Concatenate the data batch_edges.append(W) batch_edges_values.append(W_val) batch_edges_target.append(edges_target) batch_nodes.append(nodes) batch_nodes_target.append(nodes_target) batch_nodes_coord.append(nodes_coord) batch_mst_edges.append(mst_edges) batch_mst_len.append(mst_len) # From list to tensors as a DotDict batch = DotDict() batch.edges = np.stack(batch_edges, axis=0) batch.edges_values = np.stack(batch_edges_values, axis=0) batch.edges_target = np.stack(batch_edges_target, axis=0) batch.nodes = np.stack(batch_nodes, axis=0) batch.nodes_target = np.stack(batch_nodes_target, axis=0) batch.nodes_coord = np.stack(batch_nodes_coord, axis=0) batch.mst_edges = np.stack(batch_mst_edges, axis=0) batch.mst_len = np.stack(batch_mst_len, axis=0) return batch
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parse_lines_to_dict(lines):\n res = {k: v.strip() for k, v in (m.split(':', 1) for m in lines)}\n return res", "def dictFromLines(lines,sep=None):\n reComment = re.compile('#.*')\n temp = [reComment.sub('',x).strip() for x in lines.split('\\n')]\n if sep == None or type(sep) == type(''):\n temp = dict([x.split(sep,1) for x in temp if x])\n else: #--Assume re object.\n temp = dict([sep.split(x,1) for x in temp if x])\n return temp", "def parse_entry(lines):\n entry = {}\n for line in lines:\n line = line.replace('\\n', '').replace('\\r', '')\n if ':: ' in line:\n (key, value) = line.split(':: ')\n value = base64.b64decode(value).decode('utf-8')\n elif ': ' in line:\n (key, value) = line.split(': ')\n else:\n continue\n if key not in entry:\n entry[key] = []\n entry[key].append(value)\n return entry", "def default_from_ruby(lines):\n newlines = lines[\n lines.index(\"[ruby] [INFO] Test docker hub official image first:\\n\"):\n lines.index(\"[ruby] [INFO] Test clear docker image:\\n\")].copy()\n\n line_str_key = \"Calculating\"\n line_dict = {}\n ret_lines = []\n for i in range(0, len(newlines)):\n line_dict[i] = newlines[i].split(\"\\n\")[0]\n\n for lineno, line_str in line_dict.items():\n if line_str.startswith(line_str_key):\n # print(lineno, \":\", line_str)\n tmp_line_no = lineno + 1\n while True:\n if newlines[tmp_line_no] != \"\\n\":\n if \"so_k_nucleotidepreparing\" in newlines[tmp_line_no]:\n ret_lines.append(\"so_k_nucleotidepreparing \" + newlines[tmp_line_no + 1])\n if \"so_reverse_complementpreparing\" in newlines[tmp_line_no]:\n ret_lines.append(\"so_reverse_complementpreparing \" + newlines[tmp_line_no + 1])\n ret_lines.append(newlines[tmp_line_no])\n else:\n break\n tmp_line_no += 1\n\n ret_line_list = []\n for line in ret_lines:\n # print(line)\n line_split = line.split()\n key_str = line_split[0]\n value = line_split[1]\n if \"Time\" in line:\n time_line_split = line.split(\"s -\")[0].split(\")\")\n # print(time_line_split)\n time_key = time_line_split[0].strip() + \")\"\n time_value = time_line_split[-1].strip()\n # print(time_value)\n ret_line_list.append({time_key: time_value})\n elif not value.startswith(\"/\"):\n # print(value)\n try:\n key_str = float(str(key_str))\n except Exception:\n pass\n if not isinstance(key_str, float):\n ret_line_list.append({key_str: value})\n # pprint(ret_line_list)\n # print(len(ret_line_list))\n for tmp_dict in ret_line_list:\n data.get(\"default\").get(\"ruby\").update(tmp_dict)\n\n # influs_list = [\"app_answer\", \"app_aobench\", \"app_erb\", \"app_factorial\",\n # \"app_fib\", \"app_lc_fizzbuzz\", \"app_mandelbrot\", \"app_pentomino\",\n # \"app_raise\", \"app_strconcat\", \"app_tak\", \"app_tarai\", \"app_uri\",\n # \"array_sample_100k_10\", \"array_sample_100k_11\", \"array_sample_100k__100\",\n # \"array_sample_100k__1k\", \"array_sample_100k__6k\", \"array_sample_100k___10k\",\n # \"array_sample_100k___50k\", \"array_shift\", \"array_small_and\", \"array_small_diff\",\n # \"array_small_or\", \"array_sort_block\", \"array_sort_float\", \"array_values_at_int\",\n # \"array_values_at_range\", \"bighash\", \"complex_float_add\", \"complex_float_div\",\n # \"complex_float_mul\", \"complex_float_new\", \"complex_float_power\", \"complex_float_sub\",\n # \"dir_empty_p\", \"enum_lazy_grep_v_100\", \"enum_lazy_grep_v_20\", \"enum_lazy_grep_v_50\",\n # \"enum_lazy_uniq_100\", \"enum_lazy_uniq_20\", \"enum_lazy_uniq_50\", \"erb_render\",\n # \"fiber_chain\", \"file_chmod\", \"file_rename\", \"hash_aref_dsym\", \"hash_aref_dsym_long\",\n # \"hash_aref_fix\", \"hash_aref_flo\", \"hash_aref_miss\", \"hash_aref_str\", \"hash_aref_sym\",\n # \"hash_aref_sym_long\", \"hash_flatten\", \"hash_ident_flo\", \"hash_ident_num\", \"hash_ident_obj\",\n # \"hash_ident_str\", \"hash_ident_sym\", \"hash_keys\", \"hash_literal_small2\", \"hash_literal_small4\",\n # \"hash_literal_small8\", \"hash_long\", \"hash_shift\", \"hash_shift_u16\", \"hash_shift_u24\",\n # \"hash_shift_u32\", \"hash_small2\", \"hash_small4\", \"hash_small8\", \"hash_to_proc\",\n # \"hash_values\", \"int_quo\", \"io_copy_stream_write\", \"io_copy_stream_write_socket\",\n # \"io_file_create\", \"io_file_read\", \"io_file_write\", \"io_nonblock_noex\", \"io_nonblock_noex2\",\n # \"io_pipe_rw\", \"io_select\", \"io_select2\", \"io_select3\", \"loop_for\", \"loop_generator\",\n # \"loop_times\", \"loop_whileloop\", \"loop_whileloop2\", \"marshal_dump_flo\", \"marshal_dump_load_geniv\",\n # \"marshal_dump_load_time\",\n # \"Calculating-(1..1_000_000).last(100)\",\n # \"Calculating-(1..1_000_000).last(1000)\",\n # \"Calculating-(1..1_000_000).last(10000)\",\n # \"capitalize-1\",\n # \"capitalize-10\",\n # \"capitalize-100\",\n # \"capitalize-1000\",\n # \"downcase-1\",\n # \"downcase-10\",\n # \"downcase-100\",\n # \"downcase-1000\",\n # \"require\", \"require_thread\", \"securerandom\", \"so_ackermann\",\n # \"so_array\", \"so_binary_trees\", \"so_concatenate\", \"so_count_words\", \"so_exception\", \"so_fannkuch\",\n # \"so_fasta\", \"so_k_nucleotidepreparing\", \"so_lists\", \"so_mandelbrot\", \"so_matrix\",\n # \"so_meteor_contest\",\n # \"so_nbody\", \"so_nested_loop\", \"so_nsieve\", \"so_nsieve_bits\", \"so_object\", \"so_partial_sums\",\n # \"so_pidigits\", \"so_random\", \"so_reverse_complementpreparing\", \"so_sieve\", \"so_spectralnorm\",\n # \"string_index\", \"string_scan_re\",\n # \"string_scan_str\",\n # \"to_chars-1\",\n # \"to_chars-10\",\n # \"to_chars-100\",\n # \"to_chars-1000\",\n # \"swapcase-1\",\n # \"swapcase-10\",\n # \"swapcase-100\",\n # \"swapcase-1000\",\n # \"upcase-1\",\n # \"upcase-10\",\n # \"upcase-100\",\n # \"upcase-1000\",\n # \"\"\"Time.strptime(\"28/Aug/2005:06:54:20 +0000\", \"%d/%b/%Y:%T %z\")\"\"\",\n # \"\"\"Time.strptime(\"1\", \"%s\")\"\"\",\n # \"\"\"Time.strptime(\"0 +0100\", \"%s %z\")\"\"\",\n # \"\"\"Time.strptime(\"0 UTC\", \"%s %z\")\"\"\",\n # \"\"\"Time.strptime(\"1.5\", \"%s.%N\")\"\"\",\n # \"\"\"Time.strptime(\"1.000000000001\", \"%s.%N\")\"\"\",\n # \"\"\"Time.strptime(\"20010203 -0200\", \"%Y%m%d %z\")\"\"\",\n # \"\"\"Time.strptime(\"20010203 UTC\", \"%Y%m%d %z\")\"\"\",\n # \"\"\"Time.strptime(\"2018-365\", \"%Y-%j\")\"\"\",\n # \"\"\"Time.strptime(\"2018-091\", \"%Y-%j\")\"\"\",\n # \"time_subsec\", \"vm1_attr_ivar\",\n # \"vm1_attr_ivar_set\",\n # \"vm1_block\", \"vm1_blockparam\", \"vm1_blockparam_call\", \"vm1_blockparam_pass\",\n # \"vm1_blockparam_yield\",\n # \"vm1_const\", \"vm1_ensure\", \"vm1_float_simple\", \"vm1_gc_short_lived\",\n # \"vm1_gc_short_with_complex_long\",\n # \"vm1_gc_short_with_long\", \"vm1_gc_short_with_symbol\", \"vm1_gc_wb_ary\", \"vm1_gc_wb_ary_promoted\",\n # \"vm1_gc_wb_obj\", \"vm1_gc_wb_obj_promoted\", \"vm1_ivar\", \"vm1_ivar_set\", \"vm1_length\",\n # \"vm1_lvar_init\",\n # \"vm1_lvar_set\", \"vm1_neq\", \"vm1_not\", \"vm1_rescue\", \"vm1_simplereturn\", \"vm1_swap\", \"vm1_yield\",\n # \"vm2_array\", \"vm2_bigarray\", \"vm2_bighash\", \"vm2_case\", \"vm2_case_lit\", \"vm2_defined_method\",\n # \"vm2_dstr\", \"vm2_eval\", \"vm2_fiber_switch\", \"vm2_freezestring\", \"vm2_method\",\n # \"vm2_method_missing\",\n # \"vm2_method_with_block\", \"vm2_module_ann_const_set\", \"vm2_module_const_set\", \"vm2_mutex\",\n # \"vm2_newlambda\",\n # \"vm2_poly_method\", \"vm2_poly_method_ov\", \"vm2_poly_singleton\", \"vm2_proc\", \"vm2_raise1\",\n # \"vm2_raise2\",\n # \"vm2_regexp\", \"vm2_send\", \"vm2_string_literal\", \"vm2_struct_big_aref_hi\",\n # \"vm2_struct_big_aref_lo\",\n # \"vm2_struct_big_aset\", \"vm2_struct_big_href_hi\", \"vm2_struct_big_href_lo\", \"vm2_struct_big_hset\",\n # \"vm2_struct_small_aref\", \"vm2_struct_small_aset\", \"vm2_struct_small_href\",\n # \"vm2_struct_small_hset\",\n # \"vm2_super\", \"vm2_unif1\", \"vm2_zsuper\", \"vm3_backtrace\", \"vm3_clearmethodcache\", \"vm3_gc\",\n # \"vm3_gc_old_full\",\n # \"vm3_gc_old_immediate\", \"vm3_gc_old_lazy\", \"vm_symbol_block_pass\", \"vm_thread_alive_check1\",\n # \"vm_thread_close\",\n # \"vm_thread_condvar1\", \"vm_thread_condvar2\", \"vm_thread_create_join\", \"vm_thread_mutex1\",\n # \"vm_thread_mutex2\",\n # \"vm_thread_mutex3\", \"vm_thread_pass\", \"vm_thread_pass_flood\", \"vm_thread_pipe\",\n # \"vm_thread_queue\",\n # \"vm_thread_sized_queue\", \"vm_thread_sized_queue2\", \"vm_thread_sized_queue3\",\n # \"vm_thread_sized_queue4\"\n # ]\n #\n # data_ruby = {}\n # for i in lines[\n # # lines.index(\"[ruby] [INFO] Test clear docker image:\\n\"):\n # # lines.index(\"Clr-Ruby-Server\\n\")]:\n # lines.index(\"[ruby] [INFO] Test docker hub official image first:\\n\"):\n # lines.index(\"Default-Ruby-Server\\n\")]:\n #\n # for startwith_item in influs_list:\n # # if i.startswith(startwith_item) or i.startswith(\"\\t\") and startwith_item in i:\n # if i.endswith(\"s/i)\\n\") and startwith_item in i:\n # num = re.findall(\"\\d+\\.?\\d* s|ERROR\", i)\n # data_ruby.update({startwith_item: num[-1][:-1]})\n #\n # if \"so_reverse_complementpreparing\" in i:\n # start = lines.index(i)\n # so_reverse_complementpreparing = lines[start + 1]\n # num = re.findall(\"\\d+\\.?\\d* s\", so_reverse_complementpreparing)\n # data_ruby.update({\"so_reverse_complementpreparing\": num[-1][:-1]})\n #\n # if \"so_k_nucleotidepreparing\" in i:\n # start = lines.index(i)\n # so_reverse_complementpreparing = lines[start + 1]\n # num = re.findall(\"\\d+\\.?\\d* s\", so_reverse_complementpreparing)\n # data_ruby.update({\"so_k_nucleotidepreparing\": num[-1][:-1]})\n #\n # lines = lines[\n # lines.index(\"[ruby] [INFO] Test docker hub official image first:\\n\"):\n # lines.index(\"Default-Ruby-Server\\n\")]\n #\n # for item in lines:\n # if item.startswith(\"Warming up --------------------------------------\\n\"):\n # up = lines.index(item)\n #\n # for item in lines[up:]:\n # if item.startswith(\"Comparison:\\n\"):\n # down = lines[up:].index(item) + up\n #\n # for i in lines[up:down]:\n #\n # if \"(1..1_000_000).last(100)\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"(1..1_000_000).last(100)\": num[-4]})\n #\n # if \"(1..1_000_000).last(1000)\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"(1..1_000_000).last(1000)\": num[-4]})\n #\n # if \"(1..1_000_000).last(10000)\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"(1..1_000_000).last(10000)\": num[-4]})\n #\n # for i in lines[down:]:\n #\n # if i.startswith(\"Warming up --------------------------------------\\n\"):\n # capit_start = lines[down:].index(i) + down\n #\n # for i in lines[capit_start:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # calc_start = lines[capit_start:].index(i) + capit_start\n #\n # for i in lines[calc_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # calc_end = lines[calc_start:].index(i) + calc_start\n #\n # for i in lines[calc_start:calc_end]:\n #\n # if \"capitalize-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-1\": num[1]})\n #\n # if \"capitalize-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-10\": num[1]})\n #\n # if \"capitalize-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-100\": num[1]})\n #\n # if \"capitalize-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-1000\": num[1]})\n #\n # for i in lines[calc_end:]:\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # downcase_start = lines[calc_end:].index(i) + calc_end\n #\n # for i in lines[downcase_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # downcase_end = lines[downcase_start:].index(i) + downcase_start\n #\n # for i in lines[downcase_start:downcase_end]:\n #\n # if \"downcase-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-1\": num[1]})\n #\n # if \"downcase-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-10\": num[1]})\n #\n # if \"downcase-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-100\": num[1]})\n #\n # if \"downcase-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-1000\": num[1]})\n #\n # for i in lines[downcase_end:]:\n # if i.startswith(\"Warming up --------------------------------------\\n\"):\n # to_chars = lines[downcase_end:].index(i) + downcase_end\n #\n # for i in lines[to_chars:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # to_chars_start = lines[to_chars:].index(i) + to_chars\n #\n # for i in lines[to_chars_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # to_chars_end = lines[to_chars_start:].index(i) + to_chars_start\n #\n # for i in lines[to_chars_start:to_chars_end]:\n #\n # if \"to_chars-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-1\": num[1]})\n #\n # if \"to_chars-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-10\": num[1]})\n #\n # if \"to_chars-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-100\": num[1]})\n #\n # if \"to_chars-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-1000\": num[1]})\n #\n # for i in lines[to_chars_end:]:\n #\n # if i.startswith(\"Warming up --------------------------------------\\n\"):\n # swapcase = lines[to_chars_end:].index(i) + to_chars_end\n #\n # for i in lines[swapcase:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # swapcase_start = lines[swapcase:].index(i) + swapcase\n #\n # for i in lines[swapcase_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # swapcase_end = lines[swapcase_start:].index(i) + swapcase_start\n #\n # for i in lines[swapcase_start:swapcase_end]:\n #\n # if \"swapcase-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-1\": num[1]})\n #\n # if \"swapcase-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-10\": num[1]})\n #\n # if \"swapcase-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-100\": num[1]})\n #\n # if \"swapcase-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-1000\": num[1]})\n #\n # for i in lines[swapcase_end:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # upcase_start = lines[swapcase_end:].index(i) + swapcase_end\n #\n # for i in lines[upcase_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # upcase_end = lines[upcase_start:].index(i) + upcase_start\n #\n # for i in lines[upcase_start:upcase_end]:\n #\n # if \"upcase-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-1\": num[1]})\n #\n # if \"upcase-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-10\": num[1]})\n #\n # if \"upcase-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-100\": num[1]})\n #\n # if \"upcase-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-1000\": num[1]})\n #\n # for i in lines[upcase_end:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # time_start = lines[upcase_end:].index(i) + upcase_end\n #\n # for i in lines[time_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # time_end = lines[time_start:].index(i) + time_start\n #\n # for i in lines[time_start:time_end]:\n #\n # if \"\"\"Time.strptime(\"28/Aug/2005:06:54:20 +0000\", \"%d/%b/%Y:%T %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"28/Aug/2005:06:54:20 +0000\", \"%d/%b/%Y:%T %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"1\", \"%s\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"1\", \"%s\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"0 +0100\", \"%s %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"0 +0100\", \"%s %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"0 UTC\", \"%s %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"0 UTC\", \"%s %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"1.5\", \"%s.%N\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"1.5\", \"%s.%N\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"1.000000000001\", \"%s.%N\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"1.000000000001\", \"%s.%N\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"20010203 -0200\", \"%Y%m%d %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"20010203 -0200\", \"%Y%m%d %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"20010203 UTC\", \"%Y%m%d %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"20010203 UTC\", \"%Y%m%d %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"2018-365\", \"%Y-%j\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"2018-365\", \"%Y-%j\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"2018-091\", \"%Y-%j\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"2018-091\", \"%Y-%j\")\"\"\": num[-4]})\n #\n # data.get(\"default\").get(\"ruby\").update(data_ruby)", "def _split_raw_file(raw_file: str) -> dict:\r\n input_file = raw_file.split(\"\\n\")\r\n\r\n line_count = 0\r\n statements = {}\r\n while line_count < len(input_file):\r\n line = input_file[line_count]\r\n if len(line) == 0:\r\n line_count += 1\r\n continue\r\n else:\r\n key = line\r\n value = input_file[line_count + 1]\r\n statements.update({key: value})\r\n line_count += 2\r\n return statements", "def process(raw):\n entry = { }\n cooked = [ ]\n\n for line in raw:\n line = line.strip()\n if len(line) == 0 or line[0]==\"#\" :\n continue\n parts = line.split(';')\n if len(parts) == 3:\n entry[\"description\"] = parts[0].strip() #adding key and values to the dict\n entry[\"long\"] = parts[1].strip()\n entry[\"lat\"] = parts[2].strip()\n cooked.append(entry) #add this dict entry into the array\n entry = { }\n continue\n else:\n raise ValueError(\"Trouble wiht line: '{}'\\n\".format(line))\n \n return cooked #returning an array of dicts", "def parse(line):\n return dict([pair.split(':') for pair in line.split()])", "def parse_bootstrap_support(lines):\r\n bootstraps = {}\r\n for line in lines:\r\n if line[0] == '#':\r\n continue\r\n wordlist = line.strip().split()\r\n bootstraps[wordlist[0]] = float(wordlist[1])\r\n\r\n return bootstraps", "def clr_from_ruby(lines):\n newlines = lines[\n lines.index(\"[ruby] [INFO] Test extra official docker image, 2.7.0:\\n\"):\n lines.index(\"Latest_Official\\n\")].copy()\n\n line_str_key = \"Calculating\"\n line_dict = {}\n ret_lines = []\n for i in range(0, len(newlines)):\n line_dict[i] = newlines[i].split(\"\\n\")[0]\n\n for lineno, line_str in line_dict.items():\n if line_str.startswith(line_str_key):\n # print(lineno, \":\", line_str)\n tmp_line_no = lineno + 1\n while True:\n if newlines[tmp_line_no] != \"\\n\":\n if \"so_k_nucleotidepreparing\" in newlines[tmp_line_no]:\n ret_lines.append(\"so_k_nucleotidepreparing \" + newlines[tmp_line_no + 1])\n if \"so_reverse_complementpreparing\" in newlines[tmp_line_no]:\n ret_lines.append(\"so_reverse_complementpreparing \" + newlines[tmp_line_no + 1])\n ret_lines.append(newlines[tmp_line_no])\n else:\n break\n tmp_line_no += 1\n\n ret_line_list = []\n for line in ret_lines:\n # print(line)\n line_split = line.split()\n key_str = line_split[0]\n value = line_split[1]\n if \"Time\" in line:\n time_line_split = line.split(\"s -\")[0].split(\")\")\n # print(time_line_split)\n time_key = time_line_split[0].strip() + \")\"\n time_value = time_line_split[-1].strip()\n # print(time_value)\n ret_line_list.append({time_key: time_value})\n elif not value.startswith(\"/\"):\n # print(value)\n try:\n key_str = float(str(key_str))\n except Exception:\n pass\n if not isinstance(key_str, float):\n ret_line_list.append({key_str: value})\n # print(len(ret_line_list))\n for tmp_dict in ret_line_list:\n data.get(\"clear\").get(\"ruby\").update(tmp_dict)\n # influs_list = [\"app_answer\", \"app_aobench\", \"app_erb\", \"app_factorial\",\n # \"app_fib\", \"app_lc_fizzbuzz\", \"app_mandelbrot\", \"app_pentomino\",\n # \"app_raise\", \"app_strconcat\", \"app_tak\", \"app_tarai\", \"app_uri\",\n # \"array_sample_100k_10\", \"array_sample_100k_11\", \"array_sample_100k__100\",\n # \"array_sample_100k__1k\", \"array_sample_100k__6k\", \"array_sample_100k___10k\",\n # \"array_sample_100k___50k\", \"array_shift\", \"array_small_and\", \"array_small_diff\",\n # \"array_small_or\", \"array_sort_block\", \"array_sort_float\", \"array_values_at_int\",\n # \"array_values_at_range\", \"bighash\", \"complex_float_add\", \"complex_float_div\",\n # \"complex_float_mul\", \"complex_float_new\", \"complex_float_power\", \"complex_float_sub\",\n # \"dir_empty_p\", \"enum_lazy_grep_v_100\", \"enum_lazy_grep_v_20\", \"enum_lazy_grep_v_50\",\n # \"enum_lazy_uniq_100\", \"enum_lazy_uniq_20\", \"enum_lazy_uniq_50\", \"erb_render\",\n # \"fiber_chain\", \"file_chmod\", \"file_rename\", \"hash_aref_dsym\", \"hash_aref_dsym_long\",\n # \"hash_aref_fix\", \"hash_aref_flo\", \"hash_aref_miss\", \"hash_aref_str\", \"hash_aref_sym\",\n # \"hash_aref_sym_long\", \"hash_flatten\", \"hash_ident_flo\", \"hash_ident_num\", \"hash_ident_obj\",\n # \"hash_ident_str\", \"hash_ident_sym\", \"hash_keys\", \"hash_literal_small2\", \"hash_literal_small4\",\n # \"hash_literal_small8\", \"hash_long\", \"hash_shift\", \"hash_shift_u16\", \"hash_shift_u24\",\n # \"hash_shift_u32\", \"hash_small2\", \"hash_small4\", \"hash_small8\", \"hash_to_proc\",\n # \"hash_values\", \"int_quo\", \"io_copy_stream_write\", \"io_copy_stream_write_socket\",\n # \"io_file_create\", \"io_file_read\", \"io_file_write\", \"io_nonblock_noex\", \"io_nonblock_noex2\",\n # \"io_pipe_rw\", \"io_select\", \"io_select2\", \"io_select3\", \"loop_for\", \"loop_generator\",\n # \"loop_times\", \"loop_whileloop\", \"loop_whileloop2\", \"marshal_dump_flo\", \"marshal_dump_load_geniv\",\n # \"marshal_dump_load_time\",\n # \"Calculating-(1..1_000_000).last(100)\",\n # \"Calculating-(1..1_000_000).last(1000)\",\n # \"Calculating-(1..1_000_000).last(10000)\",\n # \"capitalize-1\",\n # \"capitalize-10\",\n # \"capitalize-100\",\n # \"capitalize-1000\",\n # \"downcase-1\",\n # \"downcase-10\",\n # \"downcase-100\",\n # \"downcase-1000\",\n # \"require\", \"require_thread\", \"securerandom\", \"so_ackermann\",\n # \"so_array\", \"so_binary_trees\", \"so_concatenate\", \"so_count_words\", \"so_exception\", \"so_fannkuch\",\n # \"so_fasta\", \"so_k_nucleotidepreparing\", \"so_lists\", \"so_mandelbrot\", \"so_matrix\",\n # \"so_meteor_contest\",\n # \"so_nbody\", \"so_nested_loop\", \"so_nsieve\", \"so_nsieve_bits\", \"so_object\", \"so_partial_sums\",\n # \"so_pidigits\", \"so_random\", \"so_reverse_complementpreparing\", \"so_sieve\", \"so_spectralnorm\",\n # \"string_index\", \"string_scan_re\",\n # \"string_scan_str\",\n # \"to_chars-1\",\n # \"to_chars-10\",\n # \"to_chars-100\",\n # \"to_chars-1000\",\n # \"swapcase-1\",\n # \"swapcase-10\",\n # \"swapcase-100\",\n # \"swapcase-1000\",\n # \"upcase-1\",\n # \"upcase-10\",\n # \"upcase-100\",\n # \"upcase-1000\",\n # \"\"\"Time.strptime(\"28/Aug/2005:06:54:20 +0000\", \"%d/%b/%Y:%T %z\")\"\"\",\n # \"\"\"Time.strptime(\"1\", \"%s\")\"\"\",\n # \"\"\"Time.strptime(\"0 +0100\", \"%s %z\")\"\"\",\n # \"\"\"Time.strptime(\"0 UTC\", \"%s %z\")\"\"\",\n # \"\"\"Time.strptime(\"1.5\", \"%s.%N\")\"\"\",\n # \"\"\"Time.strptime(\"1.000000000001\", \"%s.%N\")\"\"\",\n # \"\"\"Time.strptime(\"20010203 -0200\", \"%Y%m%d %z\")\"\"\",\n # \"\"\"Time.strptime(\"20010203 UTC\", \"%Y%m%d %z\")\"\"\",\n # \"\"\"Time.strptime(\"2018-365\", \"%Y-%j\")\"\"\",\n # \"\"\"Time.strptime(\"2018-091\", \"%Y-%j\")\"\"\",\n # \"time_subsec\", \"vm1_attr_ivar\",\n # \"vm1_attr_ivar_set\",\n # \"vm1_block\", \"vm1_blockparam\", \"vm1_blockparam_call\", \"vm1_blockparam_pass\",\n # \"vm1_blockparam_yield\",\n # \"vm1_const\", \"vm1_ensure\", \"vm1_float_simple\", \"vm1_gc_short_lived\",\n # \"vm1_gc_short_with_complex_long\",\n # \"vm1_gc_short_with_long\", \"vm1_gc_short_with_symbol\", \"vm1_gc_wb_ary\", \"vm1_gc_wb_ary_promoted\",\n # \"vm1_gc_wb_obj\", \"vm1_gc_wb_obj_promoted\", \"vm1_ivar\", \"vm1_ivar_set\", \"vm1_length\",\n # \"vm1_lvar_init\",\n # \"vm1_lvar_set\", \"vm1_neq\", \"vm1_not\", \"vm1_rescue\", \"vm1_simplereturn\", \"vm1_swap\", \"vm1_yield\",\n # \"vm2_array\", \"vm2_bigarray\", \"vm2_bighash\", \"vm2_case\", \"vm2_case_lit\", \"vm2_defined_method\",\n # \"vm2_dstr\", \"vm2_eval\", \"vm2_fiber_switch\", \"vm2_freezestring\", \"vm2_method\",\n # \"vm2_method_missing\",\n # \"vm2_method_with_block\", \"vm2_module_ann_const_set\", \"vm2_module_const_set\", \"vm2_mutex\",\n # \"vm2_newlambda\",\n # \"vm2_poly_method\", \"vm2_poly_method_ov\", \"vm2_poly_singleton\", \"vm2_proc\", \"vm2_raise1\",\n # \"vm2_raise2\",\n # \"vm2_regexp\", \"vm2_send\", \"vm2_string_literal\", \"vm2_struct_big_aref_hi\",\n # \"vm2_struct_big_aref_lo\",\n # \"vm2_struct_big_aset\", \"vm2_struct_big_href_hi\", \"vm2_struct_big_href_lo\", \"vm2_struct_big_hset\",\n # \"vm2_struct_small_aref\", \"vm2_struct_small_aset\", \"vm2_struct_small_href\",\n # \"vm2_struct_small_hset\",\n # \"vm2_super\", \"vm2_unif1\", \"vm2_zsuper\", \"vm3_backtrace\", \"vm3_clearmethodcache\", \"vm3_gc\",\n # \"vm3_gc_old_full\",\n # \"vm3_gc_old_immediate\", \"vm3_gc_old_lazy\", \"vm_symbol_block_pass\", \"vm_thread_alive_check1\",\n # \"vm_thread_close\",\n # \"vm_thread_condvar1\", \"vm_thread_condvar2\", \"vm_thread_create_join\", \"vm_thread_mutex1\",\n # \"vm_thread_mutex2\",\n # \"vm_thread_mutex3\", \"vm_thread_pass\", \"vm_thread_pass_flood\", \"vm_thread_pipe\",\n # \"vm_thread_queue\",\n # \"vm_thread_sized_queue\", \"vm_thread_sized_queue2\", \"vm_thread_sized_queue3\",\n # \"vm_thread_sized_queue4\"\n # ]\n #\n # data_ruby = {}\n # for i in lines[\n # lines.index(\"[ruby] [INFO] Test clear docker image:\\n\"):\n # lines.index(\"Clr-Ruby-Server\\n\")]:\n #\n # for startwith_item in influs_list:\n # # if i.startswith(startwith_item) or i.startswith(\"\\t\") and startwith_item in i:\n # if i.endswith(\"s/i)\\n\") and startwith_item in i:\n # num = re.findall(\"\\d+\\.?\\d* s|ERROR\", i)\n # data_ruby.update({startwith_item: num[-1][:-1]})\n #\n # if \"so_reverse_complementpreparing\" in i:\n # start = lines.index(i)\n # so_reverse_complementpreparing = lines[start + 1]\n # num = re.findall(\"\\d+\\.?\\d* s\", so_reverse_complementpreparing)\n # data_ruby.update({\"so_reverse_complementpreparing\": num[-1][:-1]})\n #\n # if \"so_k_nucleotidepreparing\" in i:\n # start = lines.index(i)\n # so_reverse_complementpreparing = lines[start + 1]\n # num = re.findall(\"\\d+\\.?\\d* s\", so_reverse_complementpreparing)\n # data_ruby.update({\"so_k_nucleotidepreparing\": num[-1][:-1]})\n #\n # lines = lines[\n # lines.index(\"[ruby] [INFO] Test clear docker image:\\n\"):\n # lines.index(\"Clr-Ruby-Server\\n\")]\n #\n # for item in lines:\n # if item.startswith(\"Warming up --------------------------------------\\n\"):\n # up = lines.index(item)\n #\n # for item in lines[up:]:\n # if item.startswith(\"Comparison:\\n\"):\n # down = lines[up:].index(item) + up\n #\n # for i in lines[up:down]:\n #\n # if \"(1..1_000_000).last(100)\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"(1..1_000_000).last(100)\": num[-4]})\n #\n # if \"(1..1_000_000).last(1000)\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"(1..1_000_000).last(1000)\": num[-4]})\n #\n # if \"(1..1_000_000).last(10000)\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"(1..1_000_000).last(10000)\": num[-4]})\n #\n # for i in lines[down:]:\n #\n # if i.startswith(\"Warming up --------------------------------------\\n\"):\n # capit_start = lines[down:].index(i) + down\n #\n # for i in lines[capit_start:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # calc_start = lines[capit_start:].index(i) + capit_start\n #\n # for i in lines[calc_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # calc_end = lines[calc_start:].index(i) + calc_start\n #\n # for i in lines[calc_start:calc_end]:\n #\n # if \"capitalize-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-1\": num[1]})\n #\n # if \"capitalize-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-10\": num[1]})\n #\n # if \"capitalize-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-100\": num[1]})\n #\n # if \"capitalize-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"capitalize-1000\": num[1]})\n #\n # for i in lines[calc_end:]:\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # downcase_start = lines[calc_end:].index(i) + calc_end\n #\n # for i in lines[downcase_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # downcase_end = lines[downcase_start:].index(i) + downcase_start\n #\n # for i in lines[downcase_start:downcase_end]:\n #\n # if \"downcase-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-1\": num[1]})\n #\n # if \"downcase-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-10\": num[1]})\n #\n # if \"downcase-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-100\": num[1]})\n #\n # if \"downcase-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"downcase-1000\": num[1]})\n #\n # for i in lines[downcase_end:]:\n # if i.startswith(\"Warming up --------------------------------------\\n\"):\n # to_chars = lines[downcase_end:].index(i) + downcase_end\n #\n # for i in lines[to_chars:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # to_chars_start = lines[to_chars:].index(i) + to_chars\n #\n # for i in lines[to_chars_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # to_chars_end = lines[to_chars_start:].index(i) + to_chars_start\n #\n # for i in lines[to_chars_start:to_chars_end]:\n #\n # if \"to_chars-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-1\": num[1]})\n #\n # if \"to_chars-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-10\": num[1]})\n #\n # if \"to_chars-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-100\": num[1]})\n #\n # if \"to_chars-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"to_chars-1000\": num[1]})\n #\n # for i in lines[to_chars_end:]:\n #\n # if i.startswith(\"Warming up --------------------------------------\\n\"):\n # swapcase = lines[to_chars_end:].index(i) + to_chars_end\n #\n # for i in lines[swapcase:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # swapcase_start = lines[swapcase:].index(i) + swapcase\n #\n # for i in lines[swapcase_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # swapcase_end = lines[swapcase_start:].index(i) + swapcase_start\n #\n # for i in lines[swapcase_start:swapcase_end]:\n #\n # if \"swapcase-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-1\": num[1]})\n #\n # if \"swapcase-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-10\": num[1]})\n #\n # if \"swapcase-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-100\": num[1]})\n #\n # if \"swapcase-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"swapcase-1000\": num[1]})\n #\n # for i in lines[swapcase_end:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # upcase_start = lines[swapcase_end:].index(i) + swapcase_end\n #\n # for i in lines[upcase_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # upcase_end = lines[upcase_start:].index(i) + upcase_start\n #\n # for i in lines[upcase_start:upcase_end]:\n #\n # if \"upcase-1 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-1\": num[1]})\n #\n # if \"upcase-10 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-10\": num[1]})\n #\n # if \"upcase-100 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-100\": num[1]})\n #\n # if \"upcase-1000 \" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"upcase-1000\": num[1]})\n #\n # for i in lines[upcase_end:]:\n #\n # if i.startswith(\"Calculating -------------------------------------\\n\"):\n # time_start = lines[upcase_end:].index(i) + upcase_end\n #\n # for i in lines[time_start:]:\n #\n # if i.startswith(\"Comparison:\\n\"):\n # time_end = lines[time_start:].index(i) + time_start\n #\n # for i in lines[time_start:time_end]:\n #\n # if \"\"\"Time.strptime(\"28/Aug/2005:06:54:20 +0000\", \"%d/%b/%Y:%T %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"28/Aug/2005:06:54:20 +0000\", \"%d/%b/%Y:%T %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"1\", \"%s\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"1\", \"%s\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"0 +0100\", \"%s %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"0 +0100\", \"%s %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"0 UTC\", \"%s %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"0 UTC\", \"%s %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"1.5\", \"%s.%N\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"1.5\", \"%s.%N\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"1.000000000001\", \"%s.%N\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"1.000000000001\", \"%s.%N\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"20010203 -0200\", \"%Y%m%d %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"20010203 -0200\", \"%Y%m%d %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"20010203 UTC\", \"%Y%m%d %z\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"20010203 UTC\", \"%Y%m%d %z\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"2018-365\", \"%Y-%j\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"2018-365\", \"%Y-%j\")\"\"\": num[-4]})\n #\n # if \"\"\"Time.strptime(\"2018-091\", \"%Y-%j\") \"\"\" in i:\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data_ruby.update({\"\"\"Time.strptime(\"2018-091\", \"%Y-%j\")\"\"\": num[-4]})\n #\n # data.get(\"clear\").get(\"ruby\").update(data_ruby)\n\n # for item in lines:\n # if item.startswith(\"[perl] [INFO] Test clear docker image:\\n\"):\n # start = lines.index(item)\n #\n # for i in lines[start:]:\n # if i.startswith(\"Test: benchmarks/startup/noprog.b\"):\n # end = lines[start:].index(i) + start\n #\n # for item in lines[start:end]:\n # if item.startswith(\"Test-File: benchmarks/app/podhtml.b\\n\"):\n # up = lines[start:end].index(item) + start\n #\n # if item.startswith(\"Test: benchmarks/startup/noprog.b\"):\n # down = lines[start:end].index(item) + start\n #\n # for i in lines[up:down]:\n # if i.startswith(\"Avg\"):\n # num = re.findall(\"\\d+\\.?\\d*\", i)\n # data.get(\"clear\").get(\"perl\").update(\n # {\"podhtml.b\": num[0]}\n # )\n #\n # for item in lines[start:end]:\n # if item.startswith(\"Test: benchmarks/startup/noprog.b\"):\n # up = lines[start:end].index(item) + start\n #\n # if item.startswith(\"Test: benchmarks/statement/assign-int.b\"):\n # down = lines[start:end].index(item) + start\n #\n # for i in lines[up:down]:\n # if i.startswith(\"Avg:\"):\n # num = re.findall(\"\\d+\\.\\d*\", i)\n # data.get(\"clear\").get(\"perl\").update(\n # {\"noprog.b\": num[0]}\n # )", "def sParseBowtie(lines):\n d, s = None, None\n lines = lines.split(\"\\n\")\n s = lines[0]\n totalReads = int(lines[1].split(\";\")[0].split()[0])\n d1 = lines[4].strip().split()\n conUniqueMappedReads = int(d1[0])\n d2 = lines[8].strip().split()\n unconUniqueMappedReads = int(d2[0])\n #mapRatio = float(lines[15].split(\"%\")[0])\n mapRatio = float(lines[-2].split(\"%\")[0])\n d = {\n \"TotalRawReads\": totalReads,\n #\"ConcordantlyUniqueMapReads\": conUniqueMappedReads,\n #\"DisconcordantlyUniqueMapReads\": unconUniqueMappedReads,\n \"MappingRatio(%s)\": mapRatio\n #\"MultipleMapReads\": multipleMappedReads,\n #\"MultipleMapRatio\": multipleMappedRatio,\n }\n return d, s", "def parse_inputs(inputs):\n parsed = inputs.split('\\n')\n\n result_set = dict()\n this_tile = []\n tile_id = 0\n for line in parsed:\n if 'Tile' in line:\n tile_id = re.search('Tile ([0-9]+):', line).group(1)\n elif line:\n line = line.replace('#', '1').replace('.', '0')\n split_line = [int(x) for x in line]\n this_tile.append(split_line)\n else:\n result_set[tile_id] = array(this_tile)\n this_tile = []\n tile_id = 0\n\n return result_set", "def _raw_misc_to_dict(raw):\n ret = {}\n for elem in raw:\n key, _, val = elem.partition(',')\n key = key.lstrip(\"(\").strip()\n val = val[:-1].strip()\n ret[key] = val\n return ret", "def test_read_denoiser_mapping_empty_lines(self):\r\n\r\n mapping = \"\"\"1:\\t2\\t3\r\n4:\\t5\\t6\r\n7:\r\n\"\"\".split(\"\\n\")\r\n expected = {'1': ['2', '3'],\r\n '4': ['5', '6'],\r\n '7': []}\r\n self.assertEqual(read_denoiser_mapping(mapping),\r\n expected)\r\n\r\n # empty mapping gives empty result\r\n self.assertEqual(read_denoiser_mapping([]), {})", "def lineBuilders() :\n return dict(_lineBuilders)", "def _read_header_line_1(self, lines: list) -> dict:\n fields = (\n \"model_id\",\n \"unit_id\",\n \"software_level\",\n \"message_number\",\n \"message_subclass\",\n )\n if self._is_ct25k():\n indices = [1, 3, 4, 6, 7, 8]\n else:\n indices = [1, 3, 4, 7, 8, 9]\n values = [split_string(line, indices) for line in lines]\n return values_to_dict(fields, values)", "def parse_rating_dict(self, line):\n pass", "def from_lines(cls, lines: List[str], mode: str):\n for line in lines:\n if line.startswith('Original Input'):\n _input = line[line.find(':') + 1 :].strip()\n elif line.startswith('Predicted Str'):\n pred = line[line.find(':') + 1 :].strip()\n elif line.startswith('Ground-Truth'):\n target = line[line.find(':') + 1 :].strip()\n elif line.startswith('Ground Classes'):\n classes = line[line.find(':') + 1 :].strip()\n return cls(_input, target, pred, classes, mode)", "def parse_line(line):\n parts = line.strip().split('\\t')\n\n output = {}\n\n if len(parts) != len(COLUMNS):\n raise Exception('Incorrect number of columns in line.', parts, COLUMNS)\n\n for key, value in zip(COLUMNS, parts):\n if key == 'attributes':\n output[key] = parse_attributes(value)\n elif key == 'start' or key == 'stop':\n output[key] = int(value)\n else:\n output[key] = value\n\n return output", "def test_lines_class_parse(logger):\n raw_bytes = b''\n point_data_fragment = [\n # Only 1 line in this \"data\"\n (\n struct.pack(lines.Lines.fmt, 1),\n 1,\n ),\n # line:\n #\n # brush type\n (\n struct.pack(lines.BrushType.fmt, lines.BrushType.REVERSE['pen']),\n lines.BrushType.REVERSE['pen'],\n ),\n # colour\n (\n struct.pack(lines.Colour.fmt, lines.Colour.REVERSE['black']),\n lines.Colour.REVERSE['black']\n ),\n # magical unknown line attribute 1\n (\n struct.pack(lines.LineAttribute1.fmt, 0),\n 0\n ),\n # base brush size\n (\n struct.pack(\n lines.BrushBaseSize.fmt, lines.BrushBaseSize.REVERSE['small']\n ),\n lines.BrushBaseSize.REVERSE['small']\n ),\n # one point:\n (struct.pack(lines.Points.fmt, 1), 1),\n # the single point's data:\n (struct.pack(lines.X.fmt, 12.341), 12.341),\n (struct.pack(lines.Y.fmt, 107.301), 107.301),\n (struct.pack(lines.Pressure.fmt, 0.351), 0.351),\n (struct.pack(lines.RotX.fmt, 0.03), 0.03),\n (struct.pack(lines.RotY.fmt, 0.216), 0.216),\n ]\n for data in point_data_fragment:\n raw_bytes += data[0]\n\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = lines.Lines.load(position)\n assert result.count == 1\n assert len(result.lines) == 1\n result = result.lines[0]\n assert result.brush_type.name == 'pen'\n assert result.colour.name == 'black'\n assert result.line_attribute1.value == 0\n assert result.brush_base_size.name == 'small'\n assert result.points.count == 1\n result = result.points.points[0]\n assert round(result.x, 3) == 12.341\n assert round(result.y, 3) == 107.301\n assert round(result.pressure, 3) == 0.351\n assert round(result.rot_x, 3) == 0.03\n assert round(result.rot_y, 3) == 0.216", "def parse_line(cls, line):\n regex = re.compile(cls.pattern)\n m = regex.search(line)\n if m:\n data = m.groupdict()\n data = cls.post_process(data)\n if cls.date_format:\n data['time'] = cls.convert_time(data['time'])\n else:\n data['time'] = datetime.now()\n return data\n else:\n return {}", "def test_parse_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapdict, comments = parse_mapping_file_to_dict(s1)\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)\r\n self.assertEqual(comments, ['comment line to skip', 'more skip'])", "def parse_to_dicts(lines, containers):\n\n pairs = [(a, b.strip()) for a, b in (m.split(':', 1) for m in lines)]\n item = {}\n kind, name = None, None\n for j in range(0, len(pairs)):\n if pairs[j][0] in containers.keys():\n if j != 0:\n containers[kind].append((name, item))\n item = {}\n kind = pairs[j][0]\n name = pairs[j][1]\n else:\n item[pairs[j][0]] = pairs[j][1]\n if kind is not None:\n containers[kind].append((name, item))\n\n return containers", "def from_lines(cls, lines):\n width = len(lines[0])\n height = len(lines)\n walls = set()\n pois = {}\n for y, line in enumerate(lines):\n for x, cell in enumerate(line):\n if cell == '#':\n walls.add((x, y))\n elif cell != '.':\n poi = int(cell)\n pois[poi] = (x, y)\n return cls(width, height, walls, pois)", "def build_tree(lines: []) -> {}:\n key_regex = re.compile(r\"(?P<key_val>^.*) bags contain(?P<contents>.*$)\")\n values_regex = re.compile(r\"(?P<count>\\d) (?P<color>.+?(?= bag))\")\n bag_map = {}\n for line in lines:\n match = key_regex.match(line)\n key = match['key_val']\n bag_map[key] = {}\n contents = match['contents']\n content_matches = values_regex.findall(contents)\n for color_match in content_matches:\n bag_map[key][color_match[1]] = int(color_match[0])\n\n return bag_map", "def load_lines():\n linelist = pkg_resources.resource_stream(__name__, \"lines.csv\")\n linedict = {}\n\n for line in linelist.readlines():\n ion, wav = line.split(b\",\")\n ion = ion.decode(\"utf-8\")\n wav = float(wav)\n\n try:\n linedict[ion].append(wav)\n\n except KeyError:\n linedict[ion] = [wav]\n\n for ion in linedict:\n linedict[ion] = np.array(linedict[ion])\n\n return linedict", "def lines():\n line_dict = {}\n #\n line_dict['ArI'] = 2**0\n line_dict['HgI'] = 2**1\n line_dict['KrI'] = 2**2\n line_dict['NeI'] = 2**3\n line_dict['XeI'] = 2**4\n line_dict['CdI'] = 2**5\n line_dict['ZnI'] = 2**6\n line_dict['HeI'] = 2**7\n line_dict['OH_R24000'] = 2**8\n line_dict['OH_triplespec'] = 2**9\n line_dict['CuI'] = 2**10\n line_dict['ArII'] = 2**11\n line_dict['OH_XSHOOTER'] = 2**12\n line_dict['OH_GNIRS'] = 2**13\n line_dict['OH_NIRES'] = 2**14\n line_dict['ThAr_XSHOOTER_VIS'] = 2**15\n line_dict['OH_GMOS'] = 2**16\n line_dict['OH_MODS'] = 2**17\n line_dict['ThAr_MagE'] = 2**18 # R=4100\n line_dict['OH_FIRE_Echelle'] = 2**19 # R=6000\n line_dict['Ar_IR_GNIRS'] = 2**20 # R=6000\n line_dict['FeI'] = 2**21\n line_dict['FeII'] = 2**22\n line_dict['UNKNWN'] = 2**23\n line_dict['Ar_IR_MOSFIRE'] = 2 ** 24\n line_dict['Ne_IR_MOSFIRE'] = 2 ** 25\n line_dict['OH_MOSFIRE_Y'] = 2 ** 26\n line_dict['OH_MOSFIRE_J'] = 2 ** 27\n line_dict['OH_MOSFIRE_H'] = 2 ** 28\n line_dict['OH_MOSFIRE_K'] = 2 ** 29\n line_dict['ThAr_XSHOOTER_UVB'] = 2**30\n #\n return line_dict", "def test_mapping_file_to_dict(self):\r\n s1 = ['#sample\\ta\\tb', '#comment line to skip',\r\n 'x \\t y \\t z ', ' ', '#more skip', 'i\\tj\\tk']\r\n exp = ([['x', 'y', 'z'], ['i', 'j', 'k']],\r\n ['sample', 'a', 'b'],\r\n ['comment line to skip', 'more skip'])\r\n mapres = parse_mapping_file(s1) # map_data, header, comments\r\n mapdict = mapping_file_to_dict(*mapres[:2])\r\n expdict = {'x': {'a': 'y', 'b': 'z'}, 'i': {'a': 'j', 'b': 'k'}}\r\n self.assertEqual(mapdict, expdict)", "def parse_block(lines):\n term = {\"alt_id\": [], \"relationship\": []}\n splitkv = re.compile(r\"(^[a-zA-Z_]+): (.+)\")\n for line in lines:\n m = re.search(splitkv, line)\n # assert m, f\"unexpected line: {line}\"\n key = m.group(1)\n value = m.group(2)\n if key in [\"id\", \"name\", \"namespace\", \"is_obsolete\"]:\n term[key] = value\n elif key == \"alt_id\":\n term[\"alt_id\"].append(value)\n elif key == \"is_a\":\n goid = value.split(\"!\")[0].strip()\n term[\"relationship\"].append({\"type\": \"is_a\", \"id\": goid})\n elif key == \"relationship\":\n typedef, goid = value.split(\"!\")[0].strip().split(\" \")\n term[\"relationship\"].append({\"type\": typedef, \"id\": goid})\n return term", "def prepare_looped_lines(self, alldict, comblist):\n loopline_dict=dict()\n for stridx in comblist:\n lidx = int(stridx.split('-')[0])\n loopidx = int(stridx.split('-')[1])\n loopline_dict[lidx] = alldict[lidx]['prepend'] + alldict[lidx]['looplist'][loopidx].strip() + alldict[lidx]['append'] + '\\n'\n return loopline_dict", "def test_line_class_parse(logger):\n raw_bytes = b''\n point_data_fragment = [\n # line:\n #\n # brush type\n (\n struct.pack(lines.BrushType.fmt, lines.BrushType.REVERSE['pen']),\n lines.BrushType.REVERSE['pen'],\n ),\n # colour\n (\n struct.pack(lines.Colour.fmt, lines.Colour.REVERSE['black']),\n lines.Colour.REVERSE['black']\n ),\n # magical unknown line attribute 1\n (\n struct.pack(lines.LineAttribute1.fmt, 0),\n 0\n ),\n # base brush size\n (\n struct.pack(\n lines.BrushBaseSize.fmt, lines.BrushBaseSize.REVERSE['small']\n ),\n lines.BrushBaseSize.REVERSE['small']\n ),\n # one point:\n (struct.pack(lines.Points.fmt, 1), 1),\n # the single point's data:\n (struct.pack(lines.X.fmt, 12.341), 12.341),\n (struct.pack(lines.Y.fmt, 107.301), 107.301),\n (struct.pack(lines.Pressure.fmt, 0.351), 0.351),\n (struct.pack(lines.RotX.fmt, 0.03), 0.03),\n (struct.pack(lines.RotY.fmt, 0.216), 0.216),\n ]\n for data in point_data_fragment:\n raw_bytes += data[0]\n\n # Set up the generator with the raw bytes:\n position = recover(raw_bytes)\n data = next(position)\n assert data == ''\n\n result = lines.Line.load(position)\n assert result.brush_type.name == 'pen'\n assert result.colour.name == 'black'\n assert result.line_attribute1.value == 0\n assert result.brush_base_size.name == 'small'\n assert result.points.count == 1\n result = result.points.points[0]\n assert round(result.x, 3) == 12.341\n assert round(result.y, 3) == 107.301\n assert round(result.pressure, 3) == 0.351\n assert round(result.rot_x, 3) == 0.03\n assert round(result.rot_y, 3) == 0.216", "def parseMetadataMap(lines):\r\n return MetadataMap(*parse_mapping_file_to_dict(lines))", "def test_parse_bootstrap_support(self):\r\n input_txt = \"\"\"#\\ta\\tb\\tc.\r\n#more comments here\r\nnode2\\t0\r\n17node\\t0.11922\r\n\"\"\"\r\n lines = input_txt.splitlines()\r\n exp = {'17node': 0.11922, 'node2': 0.00}\r\n obs = parse_bootstrap_support(lines)\r\n self.assertItemsEqual(obs, exp)", "def create_dicts(self, path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def _result_to_dict(line):\n f = line.split(':;')\n return {'server': f[0], 'os_name': f[1], 'status': f[2], 'ipv4': f[3]}", "def _parse_line(line: Match[str]) -> dict:\n request = line.group(\"request\")\n request = request.split()\n req_method = request[0] # GET, POST, PUT, etc.\n url = request[1]\n x = url.split(\"/\")[3:]\n uri = f'/{\"/\".join(x)}'\n\n timestamp = line.group(\"timestamp\") # timestamp in ISO format\n timestamp = MyTime._try_isoformat(timestamp, tzinfo=\"UTC\").dt\n\n res = {\n \"url\": url,\n \"uri\": uri,\n \"req_method\": req_method,\n \"timestamp\": timestamp,\n \"user_agent\": line.group(\"user_agent\"),\n }\n return res", "async def parse(self, raw: str) -> dict:", "def file2dict(file, dict, start_id):\n id = start_id\n line_number = 0\n file.seek(0)\n for line in file:\n if line_number == 0:\n n_atoms = int(float(line.strip()))\n if line_number >= 2 and line_number < n_atoms + 2:\n values_list = line.split()\n for i in range(1, 4):\n values_list[i] = float(values_list[i])\n dict[id] = {\n \"coor\": values_list[1:],\n \"element\": values_list[0]\n }\n id += 1\n line_number += 1\n return dict", "def split(self, line):\n parts = line.split()\n return {\n 'size': 0 if parts[9] == '-' else int(parts[9]), \n 'file_requested': parts[6]\n }", "def makeGcauCfgDictFromAgc(lineList): \r\n diction = {}\r\n withinCfgData = False\r\n for eachString in lineList:\r\n if re.match(RE_COMPILED_CFG_START, eachString):\r\n withinCfgData = True\r\n elif re.match(RE_COMPILED_CFG_END, eachString):\r\n withinCfgData = False\r\n elif withinCfgData:\r\n p = re.match(RE_COMPILED_CFG_ITEM, eachString)\r\n if p:\r\n obj = p.groups()[0]\r\n attr = p.groups()[1]\r\n val = p.groups()[2]\r\n if obj not in diction:\r\n diction[obj] = {}\r\n diction[obj][attr] = val\r\n return diction", "def build_feed_dict(model_variables_list, minibatch):\n\n in_vertex1, in_edge1, in_hood_indices1, in_vertex2, in_edge2, in_hood_indices2, examples, preds, labels, dropout_keep_prob = model_variables_list\n feed_dict = {\n in_vertex1: minibatch[\"l_vertex\"], in_edge1: minibatch[\"l_edge\"],\n in_vertex2: minibatch[\"r_vertex\"], in_edge2: minibatch[\"r_edge\"],\n in_hood_indices1: minibatch[\"l_hood_indices\"],\n in_hood_indices2: minibatch[\"r_hood_indices\"],\n examples: minibatch[\"label\"][:, :2],\n labels: minibatch[\"label\"][:, 2],\n dropout_keep_prob: dropout_keep\n }\n return feed_dict", "def _parse_raw_labels(self, lines):\r\n images = []\r\n labels = []\r\n idx = 0\r\n while idx < len(lines):\r\n image_path = lines[idx].strip()\r\n images.append(self._real_image_path(image_path))\r\n idx += 1\r\n\r\n num = int(lines[idx])\r\n idx += 1\r\n\r\n labels_ = []\r\n for _ in range(num):\r\n x1, y1, w, h, blur, expression, illumination, invalid, \\\r\n occlusion, pose = [int(v) \r\n for v in lines[idx].strip().split()]\r\n x2, y2 = x1 + w - 1, y1 + h - 1 # -1 to get the read x2, y2\r\n\r\n labels_.append([x1, y1, x2, y2])\r\n idx += 1\r\n \r\n labels.append(np.array(labels_))\r\n\r\n self._data_map[self._real_image_path(image_path)] = np.array(labels_)\r\n return np.array(images), np.array(labels)", "def parse(data, raw=False, quiet=False):\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output = {}\n\n if jc.utils.has_data(data):\n\n for line in filter(None, data.splitlines()):\n linedata = line.split(':', maxsplit=1)\n key = linedata[0].strip().lower().replace(' ', '_').replace('.', '_')\n value = linedata[1].strip()\n raw_output[key] = value\n\n if raw:\n return raw_output\n else:\n return _process(raw_output)", "def set_dict(self, lines):\n for line in lines:\n line = line.rstrip()\n split_line = line.split(\"\\t\")\n old_gene_id = split_line[0]\n new_gene_id = split_line[2]\n conv_dict = self.conversion_dict\n conv_dict[old_gene_id] = new_gene_id\n self.conversion_dict = conv_dict", "def _process_line(line, status):\n\n if line.startswith(ADAPTER_LINE_STARTSWITH):\n status.add_block('adapter', 'name', line)\n return\n elif line.startswith(EXIT_LINE_STARTSWITH):\n status.consolidate()\n return\n\n key, value = [el.strip(' \\t\\r\\n') for el in line.split(':', 1)]\n\n if key in KEY_TO_CONTEXT.keys():\n status.add_block(KEY_TO_CONTEXT[key], key, value)\n else:\n status.set_property(key, value)", "def read_line2(path):\n f = open(path, 'r', encoding='utf-8')\n lines = f.readlines()\n data = {}\n for idx, i in enumerate(lines):\n data[idx] = i.strip()\n return data", "def _read_header_line_2(lines: list) -> dict:\n fields = (\n \"detection_status\",\n \"warning\",\n \"cloud_base_data\",\n \"warning_flags\",\n )\n values = [[line[0], line[1], line[3:20], line[21:].strip()] for line in lines]\n return values_to_dict(fields, values)", "async def line_to_obj(raw_line: bytearray, ref: Ref) -> Optional[ObjectRec]:\n # secondary_update = None\n if raw_line[0:1] == b\"0\":\n return None\n\n if raw_line[0:1] == b'-':\n rec = ref.obj_store[int(raw_line[1:], 16)]\n rec.alive = 0\n await mark_dead(rec.id)\n\n if 'Weapon' in rec.Type:\n impacted = await determine_contact(rec, type='impacted', ref=ref)\n if impacted:\n rec.impacted = impacted[0]\n rec.impacted_dist = impacted[1]\n sql = create_impact_stmt()\n vals = (ref.session_id, rec.parent, rec.impacted, rec.id,\n ref.time_offset, rec.impacted_dist)\n await DB.execute(sql, *vals)\n return rec\n\n comma = raw_line.find(b',')\n rec_id = int(raw_line[0:comma], 16)\n try:\n rec = ref.obj_store[rec_id]\n rec.update_last_seen(ref.time_offset)\n rec.updates += 1\n\n except KeyError:\n # Object not yet seen...create new record...\n rec = ObjectRec(id_=rec_id,\n session_id=ref.session_id,\n first_seen=ref.time_offset,\n last_seen=ref.time_offset)\n ref.obj_store[rec_id] = rec\n\n while True:\n last_comma = comma + 1\n comma = raw_line.find(b',', last_comma)\n if comma == -1:\n break\n\n chunk = raw_line[last_comma:comma]\n eq_loc = chunk.find(b\"=\")\n key = chunk[0:eq_loc]\n val = chunk[eq_loc + 1:]\n\n if key == b\"T\":\n i = 0\n pipe_pos_end = -1\n while i < COORD_KEY_LEN:\n pipe_pos_start = pipe_pos_end + 1\n pipe_pos_end = chunk[eq_loc + 1:].find(b'|', pipe_pos_start)\n if pipe_pos_start == -1:\n break\n\n coord = chunk[eq_loc + 1:][pipe_pos_start:pipe_pos_end]\n if coord != b'':\n c_key = COORD_KEYS[i]\n if c_key == \"lat\":\n rec.lat = float(coord) + ref.lat\n elif c_key == \"lon\":\n rec.lon = float(coord) + ref.lon\n else:\n rec.update_val(c_key, float(coord))\n i += 1\n else:\n rec.update_val(\n key.decode('UTF-8') if key != b'Group' else 'grp', val.decode('UTF-8'))\n\n rec.compute_velocity(ref.time_since_last)\n\n if rec.updates == 1 and rec.should_have_parent():\n parent_info = await determine_contact(rec, type='parent', ref=ref)\n if parent_info:\n rec.parent = parent_info[0]\n rec.parent_dist = parent_info[1]\n\n return rec", "def generate_bed_dict(line, bed_header):\n out_dict = dict((key, value) for key, value in izip(bed_header, line))\n return(out_dict)", "def create_dicts(path):\n line_d = {}\n rel_d = {}\n\n with open(path) as f:\n for line in islice(f, 0, None, 4):\n lister = line.split('\"')\n line_number = int(lister[0].split('\\t')[0])\n line_d[line_number] = ''.join(str(s) for s in lister[1:])\n \n with open(path) as f:\n for i, line in enumerate(islice(f, 1, None, 4)):\n rel_d[i] = line.split('\\n')[0]\n \n return (line_d, rel_d)", "def parse_line(line):\n return parse('#{id_:d} @ {x:d},{y:d}: {w:d}x{h:d}', line)", "def _parse_spec(self):\n\n key, value = self._lines.current.split(':', 1)\n key, value = key.strip(), value.strip()\n value = int(value) if key in self._INT_PROPERTIES else value\n\n try:\n next(self._lines)\n except StopIteration:\n pass\n\n return {key: value}", "def _parse_spec(self):\n\n key, value = self._lines.current.split(':', 1)\n key, value = key.strip(), value.strip()\n value = int(value) if key in self._INT_PROPERTIES else value\n\n try:\n next(self._lines)\n except StopIteration:\n pass\n\n return {key: value}", "def lines_to_json():\n from os import walk\n lines = {}\n\n filenames = list(walk('lines'))[0][2]\n for file in filenames:\n line_name = file[:-4]\n dict = {\n \"name\": line_name,\n \"rulers\": [],\n \"stations\": [],\n }\n fp = open('lines/' + file, 'r', encoding='utf-8', errors='ignore')\n for i, s in enumerate(fp):\n s = s.strip()\n if i <= 2:\n continue\n if not s:\n continue\n\n try:\n st = {\n \"zhanming\": s.split(',')[0],\n \"licheng\": int(s.split(',')[1]),\n \"dengji\": int(s.split(',')[2])\n }\n except IndexError:\n print(s, file)\n dict[\"stations\"].append(st)\n lines[line_name] = dict\n fp.close()\n\n out = open('source/lines.json', 'w', encoding='utf-8')\n json.dump(lines, out, ensure_ascii=False)\n out.close()", "def _label_line_parser(record, splitter, strict=True):\n labels = []\n result = {}\n for line in record:\n try:\n key, val = splitter(line.rstrip())\n except:\n if strict:\n raise RecordError(\n \"Failed to extract key and value from line %s\" %\n line)\n else:\n continue # just skip the line if not strict\n\n if key in result:\n result[key].append(val)\n else:\n result[key] = [val]\n labels.append(key)\n return result, labels", "def parse_lines(lines):\n for line in lines:\n yield Record(line)", "def parsemeta(metalines):\n def parseline(line):\n res = [val.strip() for val in line[5:].split(u':', 1)]\n key, value = (res[0], res[1]) if len(res) == 2 else (res[0], u'')\n if re.match(r'^-?\\d*[\\.|,]?\\d+$', value):\n value = float(value)\n elif re.match(r'^\\[(.*)\\]', value):\n value = [val.strip() for val in value[1:-1].split(u',')]\n value = [float(val) if re.match(r'^-?\\d*[\\.|,]?\\d+$', val) else val for val in value]\n return key, value\n return OrderedDict(parseline(line) for line in metalines if line.startswith(u'#CTE_'))", "def testDictOnTwoLines(self):\n pl = Pipeline(loadInitFile=False)\n repl = REPL(pl)\n repl.runCommandLine('{ \"a\": 6, \"b\": ')\n repl.runCommandLine('7 }')\n self.assertEqual({'a': 6, 'b': 7}, pl.stdin)\n self.assertEqual(REPL.DEFAULT_PS1, repl.prompt)", "def parse_detection_file(detection_file):\n read_objects = {}\n with open(detection_file, 'r') as f:\n for line in f.readlines():\n line = line.rstrip() # remove newline character\n line = line.split(',')\n line[0] = int(line[0]) # timestamp_micro\n line[2: -1] = [float(x) for x in line[2: -1]] # from h to score\n\n o = WaymoObject(*line)\n try:\n read_objects[o.timestamp_micro].append(o)\n except KeyError:\n read_objects[o.timestamp_micro] = [o]\n\n return read_objects", "def __parseLine(line):\n\n # extract name\n name_len = line.index(\" \")\n name = line[:name_len]\n line = line[name_len + 3:]\n\n # array-ize 'electron' val\n elec_pos = line.index(\"electron\") + 9\n line = line[:elec_pos] + '[' + line[elec_pos:].replace(' ', ',') + ']'\n\n # quote 'small' val\n line = line.replace(' ', '')\n line = line.replace('small:', 'small:\"').replace(',molar', '\",molar')\n\n # quote all keys\n for i in [\"position\", \"number\", \"small\", \"molar\", \"electron\"]:\n line = line.replace(i, '\"' + i + '\"')\n\n return eval('{\"name\":\"' + name + '\",' + line + '}')", "def stage0_data(rust_root):\n nightlies = os.path.join(rust_root, \"src/stage0.txt\")\n with open(nightlies, 'r') as nightlies:\n lines = [line.rstrip() for line in nightlies\n if not line.startswith(\"#\")]\n return dict([line.split(\": \", 1) for line in lines if line])", "def createTraits(self,fileName,startLine,stopLine):\n traits_file = open(fileName,'r')\n \n \n read_file = ''\n temp_dict = {}\n temp_line = ''\n while read_file[:-2].lower() != startLine.lower():\n read_file = traits_file.readline()\n \n for line in traits_file:\n if line == \"\\n\":\n pass\n elif line[:-2] == stopLine or line[:-1] == stopLine:\n traits_file.close()\n return temp_dict \n elif len(line) > 0 and \":\" in line:\n temp_line = line[:line.index(\":\")] \n temp_dict[line[:line.index(\":\")]] = ''\n \n elif len(line) > 0:\n if len(temp_dict) == 0:\n pass\n else:\n temp_dict[temp_line] = line[:-1]", "def parse_lines(lines, options=None):\n if not options:\n options = {}\n res = []\n transaction = OrderedDict()\n for (idx, line) in enumerate(lines):\n line = line.strip()\n if not line:\n continue\n field_id = line[0]\n if field_id == \"^\":\n if transaction:\n res.append(transaction)\n transaction = OrderedDict([])\n elif field_id in list(config.FIELDS.keys()):\n transaction[config.FIELDS[field_id]] = line[1:]\n elif line:\n transaction[\"%s\" % idx] = line\n\n if len(list(transaction.keys())):\n res.append(transaction)\n\n # post-check to not interfere with present keys order\n for t in res:\n for field in list(config.FIELDS.values()):\n if field not in t:\n t[field] = None\n t[u\"filename\"] = options.get(\"src\", \"\")\n return res", "def parse_line(self, line):\n\n sep = ' ' if self.is_train else ', '\n line = line.split(sep)\n xmin, ymin, xmax, ymax = [int(x) for x in line[:4]]\n assert xmin < xmax\n assert ymin < ymax\n transcription = (sep.join(line[4:]))[1:-1]\n word_annotation = {\n 'bbox': [xmin, ymin, xmax - xmin, ymax - ymin],\n 'segmentation': [[xmin, ymin, xmax, ymin, xmax, ymax, xmin, ymax]],\n 'attributes': {\n 'transcription': transcription,\n 'legible': 1,\n 'language': 'english',\n }\n }\n return word_annotation", "def parse(data, raw=False, quiet=False):\n jc.utils.compatibility(__name__, info.compatible, quiet)\n jc.utils.input_type_check(data)\n\n raw_output = {}\n\n if jc.utils.has_data(data):\n data = data.splitlines()\n\n # linux uses = and bsd uses :\n if ' = ' in data[0]:\n delim = ' = '\n else:\n delim = ': '\n\n for line in data:\n linedata = line.split(delim, maxsplit=1)\n\n # bsd adds values to newlines, which need to be fixed up with this try/except block\n try:\n key = linedata[0]\n value = linedata[1]\n\n # syctl -a repeats some keys on linux. Append values from repeating keys\n # to the previous key value\n if key in raw_output:\n existing_value = raw_output[key]\n raw_output[key] = existing_value + '\\n' + value\n continue\n\n # fix for weird multiline output in bsd\n # if the key looks strange (has spaces or no dots) then it's probably a value field\n # on a separate line. in this case, just append it to the previous key in the dictionary.\n if '.' not in key or ' ' in key:\n previous_key = [*raw_output.keys()][-1]\n raw_output[previous_key] = raw_output[previous_key] + '\\n' + line\n continue\n\n # if the key looks normal then just add to the dictionary as normal\n else:\n raw_output[key] = value\n continue\n\n # if there is an IndexError exception, then there was no delimiter in the line.\n # In this case just append the data line as a value to the previous key.\n except IndexError:\n prior_key = [*raw_output.keys()][-1]\n raw_output[prior_key] = raw_output[prior_key] + '\\n' + line\n continue\n\n if raw:\n return raw_output\n else:\n return _process(raw_output)", "def dicts_from_lines(lines):\n lines = iter(lines)\n for line in lines:\n line = line.strip()\n if not line:\n continue # skip empty lines\n try:\n data = loads(line)\n if isinstance(data, list):\n yield from data\n else:\n yield data\n except ValueError:\n content = line + ''.join(lines)\n dicts = loads(content)\n if isinstance(dicts, list):\n yield from dicts\n else:\n yield dicts", "def ingest(in_info):\n if type(in_info) == str:\n with open(in_info) as infile:\n lines = (line.split(None) for line in infile)\n in_dict = {defn[0] : defn[1:] for defn in lines}\n else:\n in_dict = in_info\n return in_dict", "def fields_to_dict(lines, delim='\\t', strip_f=strip):\r\n result = {}\r\n for line in lines:\r\n # skip empty lines\r\n if strip_f:\r\n fields = map(strip_f, line.split(delim))\r\n else:\r\n fields = line.split(delim)\r\n if not fields[0]: # empty string in first field implies problem\r\n continue\r\n result[fields[0]] = fields[1:]\r\n return result", "def read_model_performances(lines):\n performances = {}\n patients = [str(x) for x in range(13)]\n current_model = ''\n for line in lines:\n words = line.split(' ')\n if (len(words) == 10) and (words[0] == 'starting'):\n if words[-1][:-1].split('/')[0] not in performances.keys():\n performances[words[-1][:-1].split('/')[0]] = []\n current_model = words[-1][:-1].split('/')[0]\n if (len(words) == 2) and (words[0] in patients):\n performances[current_model].append(float(words[1][:-1]))\n\n return performances", "def _get_objects(self,label_fh):\n objects = []\n for line in label_fh.readlines():\n try:\n object = {}\n line = line.replace(u'\\ufeff', '')\n if line != '':\n x1, y1, x2, y2, x3, y3, x4, y4= [int(i) for i in line.split(',')[:-1]]\n p1 = (x1, y1)\n p2 = (x2, y2)\n p3 = (x3, y3)\n p4 = (x4, y4)\n object['polygon'] = [p1,p2,p3,p4]\n objects.append(object)\n except:\n pass\n return objects", "def StaDefMemcached(lines):\n\n if_n = True\n for i in lines:\n if i.startswith(\"memcached\"):\n if \"latest\" in i:\n start = lines.index(i)\n\n while if_n:\n for i in lines[start:]:\n if i == '\\n':\n if_n = False\n end = lines[start:].index(i)\n\n for i in lines[start:end + start]:\n\n if i.startswith(\"memcached\"):\n if \"latest\" in i:\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_def\").get(\"memcached\").update(\n {\"Total\": num[-1] + \"MB\"}\n )\n\n if i.startswith(\"default base layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_def\").get(\"memcached\").update(\n {\"Base_Layer\": num[0]}\n )\n\n if i.startswith(\"default microservice added layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_def\").get(\"memcached\").update(\n {\"MicroService_layer\": num[0]}\n )", "def process(line, attributes):\n line = re.sub(attributes[\"regex\"], \"\", line)\n\n # Hack to support escaping of \\{ and \\}.\n line = line.replace(\"\\\\{\", \"\\0ob\\0\")\n line = line.replace(\"\\\\}\", \"\\0cb\\0\")\n\n line = line.replace(\"{}\", attributes.get(\"name\", \"\"))\n line = line.replace(\"{.}\", attributes.get(\"stem\", \"\"))\n line = line.replace(\"{..}\", attributes.get(\"extension\", \"\"))\n line = line.replace(\"{/}\", attributes.get(\"basename\", \"\"))\n line = line.replace(\"{//}\", attributes.get(\"dirname\", \"\"))\n line = line.replace(\"{/.}\", attributes.get(\"stembase\", \"\"))\n line = line.replace(\"{@}\", attributes.get(\"args\", \"\"))\n line = line.replace(\"{!}\", attributes.get(\"digest\", \"\"))\n\n # Replace escaped {}'s if any and cleanup.\n line = line.replace(\"\\0ob\\0\", \"{\")\n line = line.replace(\"\\0cb\\0\", \"}\")\n\n return line.strip()", "def get_lineid_content():\n lineid_content = {}\n lines_file_path = os.path.join(DATA_PATH + MOVIE_LINES_FILE)\n\n with open(lines_file_path, 'r', errors='ignore') as f:\n # +++$+++ is used to split the section in a single line\n # A correct formed line includes five sections\n # The first section is lineID\n # The last section is line content\n # Here we only need lineID and content\n\n for line in f:\n line_sections = line.split(' +++$+++ ')\n assert len(line_sections) == 5\n if line_sections[4][-1] == '\\n':\n line_sections[4] = line_sections[4][:-1]\n lineid_content[line_sections[0]] = line_sections[4]\n\n return lineid_content", "def env_lines_to_dict(self, env_lines):\n env_dict = {}\n for env_line in env_lines:\n split_env_line = shlex.split(env_line)\n if split_env_line[0] == \"export\":\n split_env_line = split_env_line[1:]\n for item in split_env_line:\n if \"=\" in item:\n k, v = item.split(\"=\", 1)\n env_dict[k] = v\n return env_dict", "def parseAttrLine(line):\n\tpre, post = line.strip().split(':')\n\tnumber, attr = pre.strip().split('.')\n\tattr = attr.strip().replace('%','').replace(' ', '-')\n\tvals = [clean(x) for x in post.strip().strip('.').split(',')]\n\treturn {'num':int(number), 'attr':clean(attr), 'vals':vals}", "def mapper(self, _, line):\n if line:\n try:\n line_values = re.match(self.REGEXP, line).groupdict()\n user_agent = user_agents.parse(line_values['user_agent']).browser.family\n self.increment_counter('Browsers', user_agent, 1)\n ip = line_values['ip']\n try:\n byte_count = int(line_values['byte_count'])\n except ValueError:\n byte_count = 0\n yield ip, ValueFormat(byte_count, 1)\n except AttributeError:\n self.increment_counter('ERRORS', 'ERRORS', 1)", "def agline2(line):\n \n vals = {}\n y = line.strip('\\n').split(',')\n y.extend(y[0].strip('.MTS').split('_'))\n \n #print(y)\n \n x = ['movie', 'moviecode', 'offset', 'well', 'agmin', 'agsec', 'agdur', \n 'agtype', 'agcomm', 'escmin', 'escsec', 'escdur', 'esctype', 'escbeh', \n 'esccomm', 'gen', 'date', 'assay', 'fps', 'flyid', 'side', 'moviepart']\n \n z = zip(x, y)\n\n for item in z:\n vals[item[0]] = item[1]\n\n return(vals)", "def _parse_program(raw_program: list):\n curr_mask = dict()\n masks = list()\n splitter = ' = '\n\n for p in raw_program:\n if 'mask' in p:\n split_pea = p.split(splitter)\n mask = split_pea[1]\n if curr_mask:\n masks.append(curr_mask)\n curr_mask = dict()\n curr_mask['mask'] = mask\n else:\n split_memory = p.split(splitter)\n mem = int(split_memory[0][4:-1])\n val = int(split_memory[1])\n curr_mask[mem] = val\n\n if curr_mask:\n masks.append(curr_mask)\n\n return masks", "def _parse_line(self, line):\n fields = line.split('|', 4) # stop splitting after fourth | found\n line_info = {'raw_message': line}\n if len(fields) == 5:\n line_info.update(dict(zip(self._fieldnames, fields)))\n return line_info", "def form_dict(path):\n data={}\n try:\n f=codecs.open(path, \"r\", \"utf-8\")\n text=f.read()\n f.close()\n except Exception:text=None\n if text!=None:\n #print text\n lines=text.split(\"\\n\")\n for sline in lines:\n if sline!=\"\" or sline==None:line_data=sline.partition(\":\")\n if len(line_data)==3:\n try:\n kin=line_data[0].strip().decode(\"utf-8\")\n data[kin.lower()]=line_data[2].strip()\n except:pass\n return data", "def read_denoiser_mapping(mapping_fh):\r\n denoiser_mapping = {}\r\n for i, line in enumerate(mapping_fh):\r\n if line == \"\":\r\n continue\r\n centroid, members = line.split(':')\r\n denoiser_mapping[centroid] = members.split()\r\n return denoiser_mapping", "def _parse_input(self):\n parser = re.compile(r'(\\w+)\\D+(\\d+)\\D+(\\d+)\\D+(\\d+)')\n reindeer = {}\n for line in self.puzzle_input.splitlines():\n instruction = parser.match(line)\n if not instruction:\n continue\n name, speed, flight, rest = instruction.groups()\n reindeer[name] = Reindeer(int(speed), int(flight), int(rest))\n return reindeer", "def StaClrMemcached(lines):\n\n if_n = True\n for i in lines:\n if i.startswith(\"memcached\"):\n if \"latest\" in i:\n start = lines.index(i)\n\n while if_n:\n for i in lines[start:]:\n if i == '\\n':\n if_n = False\n end = lines[start:].index(i)\n\n for i in lines[start:end + start]:\n\n if i.startswith(\"clearlinux/memcached\"):\n if \"latest\" in i:\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_Clr\").get(\"memcached\").update(\n {\"Total\": num[-1] + \"MB\"}\n )\n\n if i.startswith(\"clearlinux base layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_Clr\").get(\"memcached\").update(\n {\"Base_Layer\": num[0]}\n )\n\n if i.startswith(\"clearlinux microservice added layer Size:\"):\n num = re.findall(\"\\d+\\.?\\d*\", i)\n data.get(\"status_Clr\").get(\"memcached\").update(\n {\"MicroService_layer\": num[0]}\n )\n\n for i in lines[start:]:\n if i.startswith(\"clearlinux/memcached version:\\n\"):\n end = lines[start:].index(i) + 1\n num = re.findall(\"\\d+\\.?\\d*\", lines[start:][end])\n data.get(\"status_Clr\").get(\"memcached\").update(\n {\"VERSION_ID\": num[0]}\n )", "def parse_user_dict(self, line):\n pass", "def pre_process_raw(raw: dict) -> dict:\n api_data = raw.get(\"data\", {}).get(\"apiList\", [])\n return {api[\"id\"]: api for api in api_data}", "def __init__(self, lines):\n self.tiles = {}\n self.parse(lines)\n self.find_neighbors()\n self.find_corners()\n self.build_grid_top()\n self.build_grid_left()\n self.fill_grid()\n self.stitch_image()", "def from_line(self, line: str):\n raise NotImplementedError()", "def task1_mapper():\n line_count = 0\n for line in sys.stdin:\n # Clean input and split it\n lines = line.strip().split(\",\")\n line_count += 1\n # Check that the line is of the correct format and filtering the HEADER record \n # If line is malformed, we ignore the line and continue to the next line\n if line_count == 1:\n continue\n else:\n if len(lines) != 12:\n continue\n \n category = lines[3].strip()\n videoid = lines[0].strip()\n country = lines[11].strip()\n k_key = category+','+videoid\n\n print(\"{}\\t{}\".format(k_key, country))", "def get_id2exonlen_id2line(lines):\n id2exonlen = dict()\n id2line = dict()\n for line in lines:\n (chrom, chromStart, chromEnd, name, score, strand, thickStart, thickEnd, itemRgb, blockCount, blockSizes, blockStarts) = line.rstrip().split(\"\\t\") \n id2exonlen[name] = sum_comma_sep_str(blockSizes)\n id2line[name] = line\n return(id2exonlen, id2line)", "def parse(self, row, training=False):\n record = {}\n for alias, key in self.key_map.items():\n if key not in row:\n continue\n if key == 'image':\n image_raw = row[key]\n pil_img = Image.open(BytesIO(image_raw)).convert('RGB')\n img_tensor = self.transformer(pil_img)\n\n elif key == 'bbox/class':\n obj_cls = row[key]\n elif key == 'bbox/xmin':\n obj_xmin = row[key]\n elif key == 'bbox/ymin':\n obj_ymin = row[key]\n elif key == 'bbox/xmax':\n obj_xmax = row[key]\n elif key == 'bbox/ymax':\n obj_ymax = row[key]\n\n bboxes = []\n labels = []\n\n for i in range(len(obj_cls)):\n label = obj_cls[i]\n bbox = [\n float(obj_xmin[i]),\n float(obj_ymin[i]),\n float(obj_xmax[i]),\n float(obj_ymax[i])\n ]\n\n ignore = False\n if self.min_size:\n w = bbox[2] - bbox[0]\n h = bbox[3] - bbox[1]\n if w < self.min_size or h < self.min_size:\n ignore = True\n\n if not ignore:\n bboxes.append(bbox)\n labels.append(label)\n\n if not bboxes:\n bboxes = np.zeros((0, 4))\n labels = np.zeros((0,))\n else:\n bboxes = np.array(bboxes, ndmin=2)\n labels = np.array(labels).astype(np.int64)\n\n width = pil_img.size[0]\n height = pil_img.size[1]\n\n h_scale = 1.0 * self.img_shape[0] / height\n w_scale = 1.0 * self.img_shape[1] / width\n\n scale_factor = np.array([w_scale, h_scale, w_scale, h_scale], dtype=np.float32)\n\n bboxes = self._resize_bboxes(bboxes, scale_factor)\n\n record['image'] = img_tensor\n record['gt_bboxes'] = torch.from_numpy(bboxes)\n record['gt_labels'] = torch.from_numpy(labels)\n\n return record", "def ParseWorkload(contents):\n fp = io.StringIO(contents)\n result = {}\n for line in fp:\n if (\n line.strip()\n and not line.lstrip().startswith('#')\n and not line.lstrip().startswith('!')\n ):\n k, v = re.split(r'\\s*[:=]\\s*', line, maxsplit=1)\n result[k] = v.strip()\n return result", "def test_parse_denoiser_mapping(self):\r\n actual = parse_denoiser_mapping(self.denoiser_mapping1)\r\n expected = {'Read1': ['Read1', 'Read4', 'Read5 some comment'],\r\n 'Read2': ['Read2'],\r\n 'Read3': ['Read3', 'Read6']}\r\n self.assertDictEqual(actual, expected)", "def extract_data_trans_info(lines, PE_dims):\n data_trans_info = {}\n for line_id in range(len(lines)):\n line = lines[line_id]\n if line.find('read_channel_intel') != -1:\n # Check the start and end of the block\n block_start, block_end = locate_data_trans_block(line_id, lines) \n block_lines = lines[block_start : block_end + 1]\n # Parse the data type\n block_line = block_lines[1]\n data_type = block_line.strip().split(' ')[0]\n #print(data_type)\n # Parse the start PE index\n block_line = block_lines[2]\n m = re.search(r'\\((.+?)\\)', block_line)\n fifo_name = m.group(1)\n PE_index_start = fifo_name.split('_')[-len(PE_dims):]\n PE_index_start = [int(s) for s in PE_index_start]\n #print(PE_index_start)\n # Parse the IO group name\n group_name = fifo_name.split('_')[1]\n #print(group_name)\n data_trans_info[group_name] = {\\\n 'in_block_lines': block_lines, 'in_block_pos': [block_start, block_end], \\\n 'PE_index_start': PE_index_start, 'data_type': data_type}\n if line.find('write_channel_intel') != -1:\n m = re.search(r'\\((.+?)\\)', line)\n fifo_name = m.group(1).split(',')[0]\n group_name = fifo_name.split('_')[1]\n if group_name in data_trans_info: \n # Check the start and end of the block\n block_start, block_end = locate_data_trans_block(line_id, lines)\n block_lines = lines[block_start : block_end + 1]\n # Parse the end PE index\n block_line = block_lines[3]\n m = re.search(r'\\((.+?)\\)', block_line)\n fifo_name = m.group(1).split(',')[0]\n PE_index_end = fifo_name.split('_')[-len(PE_dims):]\n PE_index_end = [int(s) for s in PE_index_end]\n #print(PE_index_end)\n group_name = fifo_name.split('_')[1]\n data_trans_info[group_name]['PE_index_end'] = PE_index_end\n data_trans_info[group_name]['out_block_lines'] = block_lines\n data_trans_info[group_name]['out_block_pos'] = [block_start, block_end]\n\n return data_trans_info", "def _patchlines2cache(patchlines, left):\n if left:\n it = ((old, line) for old, _, line in patchlines)\n else:\n it = ((new, line) for _, new, line in patchlines)\n return dict(it)", "def parse_batch_db_entry(row) -> PlantBatch:\n\n # We need to reassign to a new variable since dicts are immutable\n print(row)\n row0 = ast.literal_eval(row[0])\n row1 = ast.literal_eval(row[1])\n row2 = ast.literal_eval(row[2])\n\n plant: Plant = Plant(name=row0['name'], family_name=row0['family_name'], metadata=row0['metadata'])\n location: Location = Location(row1['name'], row1['area'], Climate(row1['climate_type']))\n tray: Tray = Tray(row2['tray_type'], row2['footprint'], row2['capacity'])\n n_trays: int = int(row[3])\n planting_time: datetime = datetime.fromisoformat(row[4])\n\n batch: PlantBatch = PlantBatch(plant, location, tray, n_trays, planting_time)\n\n return batch", "def _parse_line(self, line):\n msg_info = {'raw_message': line}\n line_split = line.split(None, 2)\n try:\n msg_info['timestamp'] = datetime.strptime(' '.join(line_split[:2]), self.time_format)\n msg_info['message'] = line_split[2]\n except (ValueError, IndexError):\n pass\n return msg_info", "def _parse_handle_section(lines):\n data = {}\n key = ''\n next(lines)\n\n for line in lines:\n line = line.rstrip()\n if line.startswith('\\t\\t'):\n if isinstance(data[key], list):\n data[key].append(line.lstrip())\n elif line.startswith('\\t'):\n key, value = [i.strip() for i in line.lstrip().split(':', 1)]\n key = normalize(key)\n if value:\n data[key] = value\n else:\n data[key] = []\n else:\n break\n\n return data", "def get_vehicle_data() -> List[dict]:\n result = list()\n with open('./project/delijn/dummy-vehicles.txt') as f:\n d = dict()\n for line in f:\n line = line.strip()\n if line == '{':\n d.clear()\n elif line == '}':\n result.append(d.copy())\n else:\n contents = line.split(' ', 1)\n d[''.join(contents[0].split())] = try_convert(contents[1])\n return result", "def _parse_long(value):\n dict_value = {}\n\n for line in value.split('\\n'):\n if ':' in line:\n k, v = line.split(':', 1)\n dict_value[k.strip()] = v.strip()\n\n return dict_value", "def process_data(file: TextIO) -> 'Climatematch':\n climate_dict = {}\n line = file.readline()\n \n while line != '':\n username = line.strip()\n climate_dict[username] = {}\n \n line = file.readline().strip()\n climate_dict[username]['name'] = line\n line = file.readline().strip()\n climate_dict[username]['location'] = line\n \n climate_dict[username]['bio'] = ''\n line = file.readline()\n while line != 'ENDBIO\\n': \n climate_dict[username]['bio'] += line\n line = file.readline()\n \n climate_dict[username]['skills'] = []\n line = file.readline().strip()\n while line != 'ENDSKILL': \n climate_dict[username]['skills'].append(line)\n line = file.readline().strip()\n \n climate_dict[username]['interest'] = []\n line = file.readline().strip() \n while line != 'END': \n climate_dict[username]['interest'].append(line)\n line = file.readline().strip()\n line = file.readline()\n \n return climate_dict", "def process_line(nlp, line, parsed_file):\n m = json.loads(line)\n if \"highlights\" in m:\n if m['sentences'] != '' and m['sentences'] != [] and m['sentences'] != [''] and m['highlights'] != '':\n m[\"highlights_ud\"] = dependency_parse(nlp, m['highlights'])\n m[\"sentences_ud\"] = dependency_parse(nlp, m['sentences'])\n else:\n if m['sentences'] != '' and m['sentences'] != [] and m['sentences'] != ['']:\n m[\"sentences_ud\"] = dependency_parse(nlp, m['sentences'])\n if \"sentences_ud\" in m:\n parsed_file.write(json.dumps(m))\n parsed_file.write('\\n')", "def get_id2line(self):\n id2line = {}\n id_index = 0\n text_index = 4\n with open(self.movie_lines_filepath, 'r', encoding='iso-8859-1') as f:\n for line in f:\n items = line.split(self.DELIM)\n if len(items) == 5:\n line_id = items[id_index]\n dialog_text = items[text_index].strip()\n dialog_text = clean_text(dialog_text)\n id2line[line_id] = dialog_text\n return id2line" ]
[ "0.5950162", "0.57444644", "0.5603637", "0.54143274", "0.5345499", "0.53329337", "0.5319959", "0.53030837", "0.52169234", "0.5213757", "0.5200185", "0.51813257", "0.5157761", "0.5150942", "0.5118812", "0.5090973", "0.50735104", "0.5071656", "0.5063901", "0.50526667", "0.50393873", "0.5022623", "0.50160223", "0.4997892", "0.49955985", "0.49927756", "0.4986151", "0.49845743", "0.49692804", "0.49191627", "0.4918673", "0.4918", "0.48993912", "0.4889576", "0.48895624", "0.48843277", "0.48771808", "0.48761156", "0.48743102", "0.48706338", "0.48531523", "0.4829588", "0.48252785", "0.4824812", "0.4819935", "0.48046452", "0.4800122", "0.47963113", "0.47869143", "0.47849095", "0.47666767", "0.47666767", "0.47633478", "0.47579464", "0.475415", "0.4747509", "0.4745414", "0.47393107", "0.47255448", "0.4725232", "0.4717711", "0.47018102", "0.46977866", "0.46951118", "0.46927625", "0.4685614", "0.46799803", "0.46799672", "0.46677962", "0.4664159", "0.46553636", "0.4653567", "0.46509326", "0.4650808", "0.46482652", "0.4645534", "0.4645084", "0.4643738", "0.46397078", "0.4636307", "0.46345446", "0.46325645", "0.46297017", "0.4626813", "0.46223974", "0.46210897", "0.4620713", "0.46187872", "0.46036392", "0.45997718", "0.45958564", "0.45925957", "0.45916167", "0.45870376", "0.4565483", "0.45598006", "0.45589238", "0.45587122", "0.4556227", "0.4554312", "0.45530644" ]
0.0
-1
Load the image on initial load of the application
def OnInit(self): self.imageID = self.loadImage()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def OnInit( self ):\n self.imageID = self.loadImage ()", "def load_image(self, **kwargs):\n ...", "def load_image(self):\n # Minimal progress display while image is loaded.\n group = displayio.Group()\n group.append(centered_label('LOADING...', 40, 3))\n #self.rect = Rect(-board.DISPLAY.width, 120,\n # board.DISPLAY.width, 40, fill=0x00B000)\n #group.append(self.rect)\n board.DISPLAY.show(group)\n\n # pylint: disable=eval-used\n # (It's cool, is a 'trusted string' in the code)\n duration = eval(TIMES[self.time]) # Playback time in seconds\n # The 0.9 here is an empirical guesstimate; playback is ever-so-\n # slightly slower than benchmark speed due to button testing.\n rows = int(duration * self.rows_per_second * 0.9 + 0.5)\n # Remap brightness from 0.0-1.0 to brightness_range.\n brightness = (self.brightness_range[0] + self.brightness *\n (self.brightness_range[1] - self.brightness_range[0]))\n try:\n self.num_rows = self.bmp2led.process(self.path + '/' +\n self.images[self.image_num],\n self.tempfile,\n rows, brightness,\n self.loop,\n self.load_progress)\n except (MemoryError, BMPError):\n group = displayio.Group()\n group.append(centered_label('TOO BIG', 40, 3))\n board.DISPLAY.show(group)\n sleep(4)\n\n board.DISPLAY.show(displayio.Group()) # Clear display\n self.clear_strip() # LEDs off", "def initImages(self):\n pass", "def initImages(self):\n pass", "def initImages(self):\n pass", "def set_image(self):\r\n return loader.GFX['loadgamebox']", "def load_image_i(self, img_tk):\n\n self.p2_label_img.configure(image=img_tk)\n self.p2_label_img.image = img_tk", "def initial_image(path_images):\n\n path = os.getcwd()+path_images\n dirs = os.listdir(path)\n path = os.getcwd()+path_images+dirs[0]\n parent.ui.label_design_image.setPixmap(QtGui.QPixmap(path))", "def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")", "def _load_img_label(self):\n name = self._launch_file_b()\n self._img_label.configure(text=name)", "def load_background(self, image):\n self.bg = pygame.image.load(image).convert()", "def loaded_image(self, image):\r\n self.loaded_images.append(image)", "def load(self, step=0):\n \n # take a step, if requested\n self.step_and_validate(step)\n \n with self.img_output:\n clear_output(wait=True)\n display(Image(self.imgs[self.i], width=850, unconfined=True))", "def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))", "def _load_img(self, name):\n try:\n img_path = os.path.join(global_var.PATH, \"maps\", name + \".png\")\n env_img = pygame.image.load(img_path)\n except Exception as e:\n print(e)\n print(\"Environment\", name, \"does not exist. Make sure that a PNG image exists\",\n \"under that name in the \\\"maps\\\" folder.\")\n sys.exit()\n\n return env_img", "def load(cls):\n\n cls.images[\"Wall\"] = pygame.image.load(\n \"ressources/images/wall.png\").convert()\n cls.images[\"MacGyver\"] = pygame.image.load(\n \"ressources/images/Mac.png\").convert()\n cls.images[\"Guardian\"] = pygame.image.load(\n \"ressources/images/Guardian.png\").convert()\n cls.images[\"Path\"] = pygame.image.load(\n \"ressources/images/path.png\").convert()\n cls.images[\"Tube\"] = pygame.image.load(\n \"ressources/images/tube.png\").convert()\n cls.images[\"Ether\"] = pygame.image.load(\n \"ressources/images/ether.png\").convert()\n cls.images[\"Needle\"] = pygame.image.load(\n \"ressources/images/needle.png\").convert()\n cls.images[\"gr\"] = pygame.image.load(\n \"ressources/images/but_du_jeu.png\").convert()", "def __start_loading_window(self):\n\n loading_img = ImageTk.PhotoImage(PIL.Image.open(r\"Images server\\loading screen.png\"))\n self.__main_window.geometry(f\"{loading_img.width()}x{loading_img.height()-20}\")\n self.__main_window.title(\"Loading\")\n self.__main_window.iconbitmap(r'Images server\\icon.ico') # put stuff to icon\n\n loading_label = Label(self.__main_window, image=loading_img, bg=\"#192b3d\")\n loading_label.place(x=0, y=0)\n self.__main_window.after(1000, self.__load_everything)\n self.__main_window.mainloop()", "def load_image(self, path):\n if path:\n self.original_image = cv2.imread(path, 1)\n self.prepare_images()", "def set_image(self, path):\r\n \r\n image = self._load_image(path)\r\n self.image_raw = image\r\n self.image = ImageTk.PhotoImage(image)\r\n self.image_panel.configure(image=self.image)", "def initImg(self):\n self.img = Image.new('RGBA',(self.width,self.height),color='#' + getConfigPart(self.theme,\"bg\"))\n self.draw = ImageDraw.Draw(self.img)", "def importImg(self):\n logger.info(\"import image \"+ str(self))\n file,types = QtWidgets.QFileDialog.getOpenFileName(self, 'Choose Image',\n BASE_DIR,\"Image files (*.jpg *.gif *.png)\")\n logger.debug(file)\n self.imageFile = file\n self.image.setPixmap(QtGui.QPixmap(file))\n self.image.adjustSize()", "def reload_image(self):\n img = self.img_manager.update_image()\n\n q_image = PyQt5.QtGui.QImage.fromData(img.read())\n q_pixmap = PyQt5.QtGui.QPixmap.fromImage(q_image)\n\n self.image_widget.setPixmap(q_pixmap)", "def load_background(self, filename):\n img = pygame.image.load(filename)\n return self.fit_image(img, self.width, self.height)", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def initialize(self):\n super(WXImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)", "def set_image(self, image_URL, bkg = None):\r\n\r\n self.image = self.image = pygame.image.load(image_URL).convert()\r\n if not bkg == None:\r\n # Set our transparent color\r\n self.image.set_colorkey(white)\r\n self.rect = self.image.get_rect()\r\n if self.drawable:\r\n self.set_drawable()", "def load_image(path_to_image, image_name):\n print(\"Loading: \", path_to_image + image_name, \" ...\")\n return Image.open(path_to_image + image_name)", "def load_image(self, image_name, piece_name):\n img = ImageTk.PhotoImage(Image.open(image_name))\n self.loaded_images[piece_name] = (img, image_name)\n return img", "def _load(self):\r\n\t\t\r\n\t\tself.image.blit(self.sheet.sheet, (0,0), (self.x, self.y, self.size, self.size))", "def load_image():\n # pylint: disable=global-statement\n global current_frame, current_loop, frame_count, frame_duration, bitmap\n while sprite_group:\n sprite_group.pop()\n\n filename = SPRITESHEET_FOLDER + \"/\" + file_list[current_image]\n\n bitmap = displayio.OnDiskBitmap(filename)\n ### Change the palette value proportional to BRIGHTNESS\n bitmap.pixel_shader[1] = image_brightness(brightness)\n sprite = displayio.TileGrid(\n bitmap,\n pixel_shader=bitmap.pixel_shader,\n tile_width=bitmap.width,\n tile_height=matrix.display.height,\n )\n\n sprite_group.append(sprite)\n\n current_frame = 0\n current_loop = 0\n frame_count = int(bitmap.height / matrix.display.height)\n frame_duration = DEFAULT_FRAME_DURATION", "def _setup_background(self):\r\n self.background_image = QtGui.QImage()\r\n data = self.model.get_background_image_data()\r\n self.background_image.loadFromData(data,'PNG')\r\n self.scene().addPixmap(QtGui.QPixmap.fromImage(self.background_image))\r\n self.fitInView(QtCore.QRectF(self.background_image.rect()), QtCore.Qt.KeepAspectRatio)", "def load_from_images(self):\n logging.debug(\"load_from_images called\")\n return True", "def on_load(self):\n self.__init__()", "def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")", "def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")", "def initialize(self):\n super(QtImageView, self).initialize()\n shell = self.shell_obj\n self.set_image(shell.image)\n self.set_scale_to_fit(shell.scale_to_fit)\n self.set_preserve_aspect_ratio(shell.preserve_aspect_ratio)\n self.set_allow_upscaling(shell.allow_upscaling)", "def set_image(self):\r\n return loader.GFX['title_box']", "def load_image(self, image_id):\n info = self.image_info[image_id]\n # bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n # image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n # image = image * bg_color.astype(np.uint8)\n # for shape, color, dims in info['shapes']:\n # image = self.draw_shape(image, shape, dims, color)\n\n width, height = info['width'], info['height']\n\n if info['real']:\n # load image from disk\n impath = os.path.join(self.real_image_dirpath, info['real_image_path'])\n image = cv2.imread(impath,1)\n image = cv2.resize(image, (width, height), cv2.INTER_CUBIC)\n else:\n # synthesize image\n background_path = info['background_image_path']\n card_template_path = info['card_template_path']\n cornerpoints = info['cornerpoints']\n image = self.synthesize_image(card_template_path, background_path, cornerpoints, (width, height))\n return image", "def init_graphics(self):\n if type(self.image_ref) is Surface:\n # This is the case for the special visual effect\n self.image = self.image_ref\n else:\n image = GLOBAL.img(self.image_ref)\n if type(image) is tuple:\n # for decode purpose\n self.image = Surface(TILESIZE_SCREEN)\n self.image.fill(image)\n elif type(image) is list or type(image) is dict:\n self.animated = True\n self.current_frame = 0\n self.last_update = 0\n if type(image) is list:\n self.list_image = image\n self.image = self.list_image[self.current_frame]\n else:\n self.last_direction = (1, 0)\n self.dict_image = image\n self.image = self.dict_image['E'][self.current_frame]\n else:\n self.image = image\n self._reposition_rect()", "def appInit(self):\n glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH )\n\n glClearColor(0.4, 0.4, 0.5, 1.0)\n glShadeModel(GL_SMOOTH)\n\n glEnable(GL_DEPTH_TEST)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n self.texture = Texture()\n # self.texture.load_jpeg('Sunrise.jpg')\n self.x2yAspect = self.texture.GetWidth()/self.texture.GetHeight()\n glutReshapeFunc(self.reshape)\n glutDisplayFunc(self.redraw)", "def onclick_open_image(self):\n filename = select_file(\n \"Select Image\",\n \"../\",\n \"Image Files (*.jpeg *.jpg *.png *.gif *.bmg)\")\n if filename:\n param_name = select_file(\n \"Select Parameter\", \"../\", \"Parameter Files (*.json)\")\n if param_name:\n self.moildev = Moildev(param_name)\n self.image = read_image(filename)\n self.h, self.w = self.image.shape[:2]\n self.show_to_window()", "def _load_image(file: str) -> pyglet.image.AbstractImage:\n\n return pyglet.image.load(Config.RES_DIR + \"img\" + Config.FILE_SEPARATOR + file)", "def on_load(self):\n pass", "def on_load(self):\n pass", "def load_image(default=True):\n if default:\n print(\"in heres\")\n return self.img\n else:\n img = Image.fromarray(cv2.cvtColor(self.img, cv2.COLOR_BGR2RGB))\n self.size = img.shape\n return img", "def load_image(self, index):\n image_path = os.path.join(self.folder_path, self.image_ids[index] + '.jpg')\n img = Image.open(image_path).convert('RGB')\n if debug:\n print(\"Loaded image: \", image_path)\n return img", "def initialise(self):\r\n self.set_image(\"wall.png\")\r\n return self", "def __on_pre_processing_images_started(self):\n\n self.progress_window.show_pre_process_images_animation()", "def load_image(self):\n\n if not hasattr(self, 'user_id'):\n vk_common.dprint(1, \"Не определен атрибут user_id.\")\n return -1\n\n if not hasattr(self, 'top_id'):\n vk_common.dprint(1, \"Не определен атрибут top_id.\")\n return -1\n\n if self.top_id not in (1, 2, 3):\n vk_common.dprint(1, \"Атрибут top_id <> 1,2,3.\")\n return -1\n\n err = -1\n try:\n vk_common.mk_dir(vk_common.DIR_IMAGES)\n file_path = f\"{vk_common.DIR_IMAGES}{self.user_id}_{self.top_id}.jpg\"\n vk_common.dprint(2, vk_common.func_name(), f\"Загружаем фото на форму из файла {file_path}.\")\n\n img = Image.open(file_path)\n render = ImageTk.PhotoImage(img)\n\n if hasattr(self, 'label'):\n if self.label is not None:\n self.label.destroy()\n\n self.label = Label(self.f_image, image=render)\n self.label.image = render\n self.label.pack()\n\n err = 0\n except FileNotFoundError as error:\n vk_common.dprint(2, vk_common.func_name(), \"Возникло исключение: \", error)\n err = -2\n finally:\n return err", "def launch_image_manager(self):\n \n self._image_manager_view = ImageManagerView(self._file_table_model, self._image_manager_controller)\n self._image_manager_view.show()", "def __init__(self):\n pygame.init()\n self.screen = pygame.display.set_mode([c.SCREEN_WIDTH, c.SCREEN_HEIGHT])\n\n # initialise background to grey\n self.screen.fill(pygame.Color(100, 100, 100, 100))\n\n self.car = pygame.image.load(c.CAR_IMAGE).convert()\n self.red_light = pygame.image.load(c.LIGHT_IMAGE_RED).convert()\n self.green_light = pygame.image.load(c.LIGHT_IMAGE_GREEN).convert()", "def loadImage( self, imageName = \"nehe_wall.bmp\" ):\n try:\n from PIL.Image import open\n except ImportError, err:\n from Image import open\n glActiveTexture(GL_TEXTURE0_ARB);\n return texture.Texture( open(imageName) )", "def loadImage(j, im, opts={}):\n displayMessage(j, \"j.Load(%s, ...)\" % im)\n j.Load(im, opts)\n waitStatus(j)", "def get_image(self):\n self.attributes('-topmost', 'false')\n self.image_column_label.destroy()\n imagename = tkinter.filedialog.askopenfilename(initialdir=\"\",\n title=\"Select a File\",\n filetypes=((\"Image files\",\n \"*.png*\"),\n (\"Image files\",\n \"*.jpg\"),\n (\"Image files\",\n \"*.jpeg\"),\n (\"all files\",\n \"*.*\")))\n if imagename:\n img = ImageTk.PhotoImage(Image.open(imagename))\n self.habitat_image = imagename\n\n self.image_column_label = tkinter.Label(self.rightframe, text=self.habitat_image)\n self.image_column_label.pack()\n\n self.attributes('-topmost', 'true')", "def set_background_image(self, imagename):\n self.background.image = ui.get_image(imagename, '/home/pi/music/images/')", "def charger_image(nom):\n dir = os.path.dirname(__file__)\n return PhotoImage(file=os.path.join(dir, nom))", "def load_file(self):\n extensions = DataReader().get_supported_extensions_as_string()\n file_name, _ = QFileDialog.getOpenFileName(self, \"Open data set\", \"\",\n \"Images (\" + extensions + \")\")\n if not file_name:\n return\n\n self.render_widget.load_file(file_name)\n self.switch_to_simple()", "def on_asset_loaded(self, ev: AssetLoaded, dispatch):\r\n if isinstance(ev.asset, Image):\r\n if self._image is not None:\r\n # Load the asset if it has the same name as self\r\n if ev.asset.name == self._image.name and self._sprite is None:\r\n # print(f'Loading Sprite... for {self.entity}\\n')\r\n try:\r\n # Get batch & group from window service\r\n win = kge.ServiceProvider.getWindow()\r\n batch = win.batch\r\n layers = win.render_layers\r\n\r\n scale = self.entity.transform.scale\r\n\r\n for x in range(int(scale.y)):\r\n line = []\r\n for y in range(int(scale.x)):\r\n sprite = pyglet.sprite.Sprite(\r\n img=self._image.load(), subpixel=True,\r\n batch=batch,\r\n group=layers[self.entity.layer]\r\n )\r\n line.append(sprite)\r\n self._tiles.append(line)\r\n\r\n except Exception as e:\r\n import traceback\r\n # print(f\"Error : {e}\")\r\n traceback.print_exc()\r\n else:\r\n # print(f\"Asset loaded ==> {ev.asset.name} {self._image.name} Yes !\\n\")\r\n pass", "def showImage(self, image):\n \n self.image = img", "def load_images(self):\n for image in self.gltf.images:\n self.images.append(image.load(self.path.parent))", "def load_image(self, filename, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def load_image(self, filename, colorkey=None): # -> image\n raise NotImplementedError(u'This should be implemented in a inherited class')", "def create_image(self):\n img = cv2.imread(self.url)\n self.img = cv2.resize(img, (self.window_x, self.window_y))", "def setImage(self, imagePath):\r\n if imagePath is not None:\r\n self.image = SquareImage.SquareImage(imagePath)\r\n self.statusBar.SetStatusText(\"Opened {}\".format(imagePath))\r\n self.imageView.Refresh()", "def on_load(self):", "def load_image(self, name, colorkey=None):\n dictname = name[0:name.find('.')]\n fullname = os.path.join('TeddyLevel','data', name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n self.dict[dictname] = image, image.get_rect()", "def view(self):\n window = tk.Tk()\n label = tk.Label(window)\n label.pack()\n img = self.get_tkimage()\n label[\"image\"] = label.img = img\n window.mainloop()", "def loadImage(self, file_name):\n self.surf = pygame.image.load(file_name)\n self.draw_angle = 0 # In degrees\n self.bullets = []", "def setImage(self, imagePath=None):\n event = QtGui.QResizeEvent(\n self.studPhoto.sizeHint(),\n QtCore.QSize()\n )\n image = QtGui.QPixmap()\n image.load(imagePath)\n self.studPhoto.orgPixmap = image\n self.studPhoto.setPixmap(image)\n self.studPhoto.resizeEvent(event)", "def load_images(self):\r\n self.standing_frame = [load_image(\"cat1.png\")]\r\n self.walk_frames_r = [load_image(\"cat2.png\"), load_image(\"cat3.png\"),\r\n load_image(\"cat4.png\")]", "def __init__(self, images, loader):\n super().__init__()\n self._images = images\n self._loader = loader", "def get_image(self, image):\n self.original_image = image\n self.prepare_images()", "def loadImage(self, imagePath, customScaleFactor=None):\n\t\tif customScaleFactor: scaleFactor = customScaleFactor\n\t\telse: scaleFactor = self.IMAGESCALEUP\n\n\t\timg = pygame.image.load(imagePath)\n\t\timg = pygame.transform.scale(img, (img.get_width() * scaleFactor, img.get_height() * scaleFactor))\n\t\timg.convert_alpha()\n\t\treturn img", "def on_load_theme (self):\n\n\t\tif self.has_started:\n\t\t\tself.init_buffers()\n\t\t\tself.redraw_background()\n\t\t\tself.redraw_foreground()", "def on_start(self):\n self.init()", "def import_image():\n img = cv2.imread(\"resources/lena.png\")\n\n cv2.imshow(\"Output\", img)\n cv2.waitKey(0)", "def load_from_file(self, filename):\n\n loader = ImageLoader()\n loader.load(self, filename)", "def LoadImage(self, filename, mode):\n print(\"TODO: CHECK FOR >PNG?\")\n path = \"static/CVImages/\" + filename\n print(\" path \" + path)\n img = cv2.imread(path, mode) # 0 for black, 1 for rgb\n return img", "def show_img(self):\n if self.image is not None:\n cv2.imshow(self.image_window, self.image)\n cv2.waitKey(1)\n else:\n rospy.loginfo(\"No image to show yet\")", "def full_photo():\n top = Toplevel()\n top.title(\"Full APOD Photo\")\n top.iconbitmap('10.APOD Viewer/rocket.ico')\n\n #Load the full image to the top image\n img_label = Label(top, image=full_img)\n img_label.pack()", "def _set_image_from_pil_image(self, pil_image):\n\n nx_pix, ny_pix = pil_image.size\n self.config(scrollregion=(0, 0, nx_pix, ny_pix))\n self.variables._tk_im = ImageTk.PhotoImage(pil_image)\n self.variables.image_id = self.create_image(0, 0, anchor=\"nw\", image=self.variables._tk_im)\n self.tag_lower(self.variables.image_id)", "def showImage(self,image):\n if isinstance(image,QtGui.QImage):\n filename = None\n else:\n filename = str(image)\n image = QtGui.QImage(filename)\n if image.isNull():\n raise ValueError,\"Cannot load image file %s\" % filename\n #print(\"Size %sx%s\" % (image.width(),image.height()))\n self.setPixmap(QtGui.QPixmap.fromImage(image))\n self.filename = filename\n self.image = image \n self.zoom = 1.0", "def __load(self, node, path):\n\n self.firstgid = node['firstgid']\n self.margin = node['margin']\n self.spacing = node['spacing']\n\n # convierte la ruta de la imagen en una ruta relativa al proyecto\n directory = os.path.dirname(path)\n self.image_path = os.path.join(directory, *node['image'].split(r'\\/'))\n self.image_path = os.path.normpath(self.image_path)", "def _load_img(self, img_path):\n img = Image.open(img_path).convert('RGB')\n\n if self.use_landmarks:\n landmarks = np.array(self.landmarks[img_path[img_path.rfind('/')+1:]]).reshape(-1)\n img = FivePointsAligner.align(np.array(img), landmarks, show=False)\n img = Image.fromarray(img)\n\n if self.transform is None:\n return img\n\n return self.transform(img)", "def initAlertMsgImage(self):\n return", "def getimage(self):", "def __set_image__(self, image:pygame.image) -> None:\n if not self.graphicsLive:\n Graphic.__init__(self, image)\n self.graphicsLive = True", "def load_button_released(self, event):\r\n if self.winfo_containing(event.x_root, event.y_root) == self.load_button: # If the clicked area contains the\r\n # load button\r\n filename = filedialog.askopenfilename() # A file dialog opens asking the user to select the file\r\n img = cv2.imread(filename) # Image is read from that file location\r\n img = img.astype('float32') # Convert the pixels to 8 bit float to perform float operations on them\r\n\r\n if img is not None: # If image is selected\r\n self.master.filename = filename # Set the filename parameter\r\n self.master.images.append(img) # Append the selected image in the stack\r\n self.master.display_image.display_image(img=img) # Display the image on the window\r", "def setImage(self, img):\n self.img.setPixmap(QtGui.QPixmap(img))", "def __draw_image(self):\n if self.image_name is not None:\n img = mpimg.imread(self.image_name)\n extent = (0.5, self.xmax+0.5, -0.5, self.ymax-0.5)\n self.ax.imshow(img, extent=extent, origin='lower',\n alpha=self.image_alpha)", "def show(self):\n\n self.image.show()", "def on_image(self, image):", "def load_image(self, image_id):\n# logger.info(\"image {}\".format(image_id))\n info = self.image_info[image_id]\n if info[\"image\"] is None:\n im = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n image = np.ones([info['height'], info['width'], 1], dtype=np.uint8)\n image[:,:,0] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,1] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n# image[:,:,2] = self.gen_imgs[info[\"path\"]][\"input_images\"][info[\"image_index\"]]\n self.image_info[image_id][\"image\"] = image\n# logger.info(\"cached {}\".format(image_id))\n else:\n image = self.image_info[image_id][\"image\"]\n# logger.info(\"missed {}\".format(image_id))\n\n return image", "def load_image(self, image, get_meta=False):\n hardware_components.log_method(self, \"load_image\")\n communication_object = self._get_control_software().connection\n image = communication_object.load_image(image, get_meta)\n return image", "def set_img(self, img):\n self.img = img", "def get_image(self):\n image = None\n if self.image_path:\n image=ImageTk.PhotoImage(ImageOps.fit(\n Image.open(resolve_path(self.image_path)),self.size or (32,32)))\n self._hack.append(image)\n\n return image", "def __init__(self, image):\n self.image = image", "def initDevMsgImage(self):\n return", "def _display_img(self):\n if self._file_path is None:\n Debug.printi(\"No picture has been loaded to preview\", Debug.Level.ERROR)\n return\n photo = self._open_img(self._file_path)\n ImageViewDialog(self._parent, self._file_name, photo)" ]
[ "0.7676823", "0.72862685", "0.6990877", "0.69560045", "0.69560045", "0.69560045", "0.6736933", "0.66402304", "0.6628148", "0.65887284", "0.65709776", "0.6562708", "0.6558569", "0.6513278", "0.64529467", "0.6451759", "0.6443347", "0.64257944", "0.64138216", "0.6399771", "0.6373137", "0.63559926", "0.63493764", "0.63477296", "0.6342138", "0.6341588", "0.63383293", "0.632966", "0.6303601", "0.62960935", "0.62870777", "0.6221496", "0.61838734", "0.6183667", "0.61699605", "0.61699605", "0.61674315", "0.61524093", "0.61493737", "0.6126557", "0.61244553", "0.6123082", "0.61205274", "0.612027", "0.612027", "0.6090395", "0.60855764", "0.60635453", "0.60596436", "0.60278004", "0.60236645", "0.6015632", "0.6014338", "0.601163", "0.6009852", "0.6006691", "0.5997493", "0.5988737", "0.59865487", "0.59804213", "0.5971573", "0.5969403", "0.5969403", "0.5952472", "0.59492415", "0.59433055", "0.5937797", "0.5931574", "0.59297127", "0.5922971", "0.59147125", "0.5903716", "0.5883048", "0.58784527", "0.5863649", "0.5861588", "0.58557564", "0.5854032", "0.58360904", "0.58320016", "0.5831771", "0.5826028", "0.5824734", "0.5822872", "0.5818278", "0.5807747", "0.57977057", "0.5785091", "0.57829964", "0.57828337", "0.577923", "0.57781655", "0.5776111", "0.5774375", "0.5771733", "0.5768005", "0.57629055", "0.5760383", "0.5758543", "0.5756462" ]
0.77568126
0
Load an image file as a 2D texture using PIL
def loadImage(self, imageName="nehe_wall.bmp"): # PIL defines an "open" method which is Image specific! im = open(imageName) try: ix, iy, image = im.size[0], im.size[1], im.tostring("raw", "RGBA", 0, -1) except SystemError: ix, iy, image = im.size[0], im.size[1], im.tostring("raw", "RGBX", 0, -1) # Generate a texture ID ID = glGenTextures(1) # Make our new texture ID the current 2D texture glBindTexture(GL_TEXTURE_2D, ID) glPixelStorei(GL_UNPACK_ALIGNMENT, 1) # Copy the texture data into the current texture ID glTexImage2D( GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image ) """ Note that only the ID is returned, no reference to the image object or the string data is stored in user space, the data is only present within the GL after this call exits. """ return ID
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_texture(file_name, x=0, y=0, width=0, height=0, scale=1):\n\n # See if we already loaded this file, and we can just use a cached version.\n if file_name in load_texture.texture_cache:\n return load_texture.texture_cache[file_name]\n\n source_image = PIL.Image.open(file_name)\n\n source_image_width, source_image_height = source_image.size\n\n if x != 0 or y != 0 or width != 0 or height != 0:\n if x > source_image_width:\n raise SystemError(\"Can't load texture starting at an x of {} \"\n \"when the image is only {} across.\"\n .format(x, source_image_width))\n if y > source_image_height:\n raise SystemError(\"Can't load texture starting at an y of {} \"\n \"when the image is only {} high.\"\n .format(y, source_image_height))\n if x + width > source_image_width:\n raise SystemError(\"Can't load texture ending at an x of {} \"\n \"when the image is only {} wide.\"\n .format(x + width, source_image_width))\n if y + height > source_image_height:\n raise SystemError(\"Can't load texture ending at an y of {} \"\n \"when the image is only {} high.\"\n .format(y + height, source_image_height))\n\n image = source_image.crop((x, y, x + width, y + height))\n else:\n image = source_image\n\n # image = _trim_image(image)\n\n image_width, image_height = image.size\n image_bytes = image.convert(\"RGBA\").tobytes(\"raw\", \"RGBA\", 0, -1)\n\n texture = GL.GLuint(0)\n GL.glGenTextures(1, ctypes.byref(texture))\n\n GL.glBindTexture(GL.GL_TEXTURE_2D, texture)\n GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)\n\n # Appveyor work-around\n appveyor = True\n if not appveyor:\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S,\n GL.GL_CLAMP_TO_BORDER)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T,\n GL.GL_CLAMP_TO_BORDER)\n else:\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S,\n GL.GL_REPEAT)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T,\n GL.GL_REPEAT)\n\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,\n GL.GL_LINEAR)\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,\n GL.GL_LINEAR_MIPMAP_LINEAR)\n GLU.gluBuild2DMipmaps(GL.GL_TEXTURE_2D, GL.GL_RGBA,\n image_width, image_height,\n GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, image_bytes)\n\n image_width *= scale\n image_height *= scale\n\n result = Texture(texture, image_width, image_height)\n load_texture.texture_cache[file_name] = result\n return result", "def load(file):\n img = open(file)\n\n try:\n ix, iy, image = img.size[0], img.size[1], img.tobytes(\"raw\", \"RGBA\", 0, -1)\n except SystemError:\n ix, iy, image = img.size[0], img.size[1], img.tobytes(\"raw\", \"RGBX\", 0, -1)\n\n imgID = glGenTextures(1)\n\n glBindTexture(GL_TEXTURE_2D, imgID)\n glPixelStorei(GL_UNPACK_ALIGNMENT, 1)\n\n glTexImage2D(GL_TEXTURE_2D, 0, 3, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n\n return imgID", "def BuildTexture(path):\n # Catch exception here if image file couldn't be loaded\n try:\n # Note, NYI, path specified as URL's could be access using python url lib\n # OleLoadPicturePath () supports url paths, but that capability isn't critcial to this tutorial.\n Picture = Image.open(path)\n except:\n print(\"Unable to open image file '%s'.\" % (path))\n return False, 0\n\n glMaxTexDim = glGetIntegerv(GL_MAX_TEXTURE_SIZE)\n\n WidthPixels = Picture.size[0]\n HeightPixels = Picture.size[1]\n\n if (WidthPixels > glMaxTexDim) or (HeightPixels > glMaxTexDim):\n # The image file is too large. Shrink it to fit within the texture dimensions\n # support by our rendering context for a GL texture.\n # Note, Feel free to experiemnt and force a resize by placing a small val into\n # glMaxTexDim (e.g. 32,64,128).\n if WidthPixels > HeightPixels:\n # Width is the domainant dimension.\n resizeWidthPixels = glMaxTexDim\n squash = float(resizeWidthPixels) / float(WidthPixels)\n resizeHeightPixels = int(HeighPixels * squash)\n else:\n resizeHeightPixels = glMaxTexDim\n squash = float(resizeHeightPixels) / float(HeightPixels)\n resizeWidthPixels = int(WidthPixels * squash)\n else:\n # // Resize Image To Closest Power Of Two\n if WidthPixels > HeightPixels:\n # Width is the domainant dimension.\n resizeWidthPixels = next_p2(WidthPixels)\n squash = float(resizeWidthPixels) / float(WidthPixels)\n resizeHeightPixels = int(HeighPixels * squash)\n else:\n resizeHeightPixels = next_p2(HeightPixels)\n squash = float(resizeHeightPixels) / float(HeightPixels)\n resizeWidthPixels = int(WidthPixels * squash)\n #\n # Resize the image to be used as a texture.\n # The Python image library provides a handy method resize ().\n # Several filtering options are available.\n # If you don't specify a filtering option will default NEAREST\n Picture = Picture.resize((resizeWidthPixels, resizeHeightPixels), Image.BICUBIC)\n lWidthPixels = next_p2(resizeWidthPixels)\n lHeightPixels = next_p2(resizeWidthPixels)\n # Now we create an image that has the padding needed\n newpicture = Image.new(\"RGB\", (lWidthPixels, lHeightPixels), (0, 0, 0))\n newpicture.paste(Picture)\n\n # Create a raw string from the image data - data will be unsigned bytes\n # RGBpad, no stride (0), and first line is top of image (-1)\n pBits = (\n newpicture.tostring(\"raw\", \"RGBX\", 0, -1)\n if hasattr(newpicture, \"tostring\")\n else newpicture.tobytes(\"raw\", \"RGBX\", 0, -1)\n )\n\n # // Typical Texture Generation Using Data From The Bitmap\n texid = glGenTextures(1)\n # // Create The Texture\n glBindTexture(GL_TEXTURE_2D, texid)\n # // Bind To The Texture ID\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n # // (Modify This For The Type Of Filtering You Want)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n # // (Modify This For The Type Of Filtering You Want)\n\n # // (Modify This If You Want Mipmaps)\n glTexImage2D(\n GL_TEXTURE_2D,\n 0,\n 3,\n lWidthPixels,\n lHeightPixels,\n 0,\n GL_RGBA,\n GL_UNSIGNED_BYTE,\n pBits,\n )\n\n # Cleanup (python actually handles all memory for you, so this isn't necessary)\n # // Decrements IPicture Reference Count\n Picture = None\n newpicture = None\n return True, texid # // Return True (All Good)", "def loadImage():\n\timageName = \"images/velazquez_texture_256.jpg\" \n\t# PIL defines an \"open\" method which is Image specific!\n\tim = open(imageName)\n\ttry:\n\t\tix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGBA\", 0, -1)\n\texcept (SystemError, ValueError):\n\t\tix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGBX\", 0, -1)\n\texcept AttributeError:\n\t\tix, iy, image = im.size[0], im.size[1], im.tostring(\"raw\", \"RGBX\", 0, -1)\n\n\t# Generate a texture ID\n\tID = glGenTextures(1)\n\n\t# Make our new texture ID the current 2D texture\n\tglBindTexture(GL_TEXTURE_2D, ID)\n\tglPixelStorei(GL_UNPACK_ALIGNMENT,1)\n\n\t# Copy the texture data into the current texture ID\n\tglTexImage2D(GL_TEXTURE_2D, 0, GL_RGBA, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n\n\t# Note that only the ID is returned, no reference to the image object or the \n\t# string data is stored in user space. \n\t# The data is only present within the GL after this call exits.\n\treturn ID", "def get_texture(filename, x, y):\r\n return arcade.load_texture(filename, x * 64, y * 64, width=64, height=64)", "def get_texture(filename, x, y):\r\n return arcade.load_texture(filename, x * 64, y * 64, width=64, height=64)", "def load(self):\r\n self._open_image()\r\n\r\n components, data = image_data(self.image)\r\n\r\n texture = self.ctx.texture(self.image.size, components, data,)\r\n texture.extra = {\"meta\": self.meta}\r\n\r\n if self.meta.mipmap_levels is not None:\r\n self.meta.mipmap = True\r\n\r\n if self.meta.mipmap:\r\n if isinstance(self.meta.mipmap_levels, tuple):\r\n texture.build_mipmaps(*self.meta.mipmap_levels)\r\n else:\r\n texture.build_mipmaps()\r\n\r\n if self.meta.anisotropy:\r\n texture.anisotropy = self.meta.anisotropy\r\n\r\n self._close_image()\r\n\r\n return texture", "def load(self):\r\n self._open_image()\r\n\r\n # Handle images with palettes\r\n if self.image.palette and self.image.palette.mode == 'RGB':\r\n logger.debug(\"Converting P image to RGB using palette\")\r\n self.image = self.image.convert('RGB', palette=self.image.palette)\r\n\r\n components, data = image_data(self.image)\r\n\r\n texture = self.ctx.texture(\r\n self.image.size,\r\n components,\r\n data,\r\n )\r\n texture.extra = {'meta': self.meta}\r\n\r\n if self.meta.mipmap_levels is not None:\r\n self.meta.mipmap = True\r\n\r\n if self.meta.mipmap:\r\n if isinstance(self.meta.mipmap_levels, tuple):\r\n texture.build_mipmaps(*self.meta.mipmap_levels)\r\n else:\r\n texture.build_mipmaps()\r\n\r\n if self.meta.anisotropy:\r\n texture.anisotropy = self.meta.anisotropy\r\n\r\n self._close_image()\r\n\r\n return texture", "def load_texture_pair(self, filename, x_inc, y_inc, width, height):\n return [(arcade.load_texture(filename, x=x_inc, y=y_inc, width= width, height=height )), (arcade.load_texture(filename, x=x_inc, y=y_inc, width= width, height=height, flipped_horizontally=True))]", "def loadImage( imageName ):\n im = Image.open(imageName)\n try:\n # get image meta-data (dimensions) and data\n ix, iy, image = im.size[0], im.size[1], im.tostring(\"raw\", \"RGBA\", 0, -1)\n except SystemError:\n # has no alpha channel, synthesize one, see the\n # texture module for more realistic handling\n ix, iy, image = im.size[0], im.size[1], im.tostring(\"raw\", \"RGBX\", 0, -1)\n # generate a texture ID\n ID = glGenTextures(1)\n # make it current\n glBindTexture(GL_TEXTURE_2D, ID)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n # copy the texture into the current texture ID\n #glTexImage2D(GL_TEXTURE_2D, 0, 4, ix, iy, 0, GL_RGBA, GL_UNSIGNED_BYTE, image)\n gluBuild2DMipmaps(GL_TEXTURE_2D, GL_RGBA, ix, iy, GL_RGBA, GL_UNSIGNED_BYTE, image)\n \n # return the ID for use\n return ID", "def prepare_texture(resource):\n global tex\n image = pygame.image.load(resource)\n image = pygame.transform.scale(image, (Case.pixel_size, Case.pixel_size))\n image_rect = image.get_rect()\n return image, image_rect", "def _texture_2d(self, data):\n # Start by resolving the texture format\n try:\n format_info = pixel_formats[self._dtype]\n except KeyError:\n raise ValueError(\n f\"dype '{self._dtype}' not support. Supported types are : {tuple(pixel_formats.keys())}\"\n )\n _format, _internal_format, self._type, self._component_size = format_info\n if data is not None:\n byte_length, data = data_to_ctypes(data)\n self._validate_data_size(data, byte_length, self._width, self._height)\n\n # If we are dealing with a multisampled texture we have less options\n if self._target == gl.GL_TEXTURE_2D_MULTISAMPLE:\n gl.glTexImage2DMultisample(\n self._target,\n self._samples,\n _internal_format[self._components],\n self._width,\n self._height,\n True, # Fixed sample locations\n )\n return\n\n # Make sure we unpack the pixel data with correct alignment\n # or we'll end up with corrupted textures\n gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT, self._alignment)\n gl.glPixelStorei(gl.GL_PACK_ALIGNMENT, self._alignment)\n\n # Create depth 2d texture\n if self._depth:\n gl.glTexImage2D(\n self._target,\n 0, # level\n gl.GL_DEPTH_COMPONENT24,\n self._width,\n self._height,\n 0,\n gl.GL_DEPTH_COMPONENT,\n gl.GL_UNSIGNED_INT, # gl.GL_FLOAT,\n data,\n )\n self.compare_func = \"<=\"\n # Create normal 2d texture\n else:\n try:\n self._format = _format[self._components]\n self._internal_format = _internal_format[self._components]\n\n if self._immutable:\n # Specify immutable storage for this texture.\n # glTexStorage2D can only be called once\n gl.glTexStorage2D(\n self._target,\n 1, # Levels\n self._internal_format,\n self._width,\n self._height,\n )\n if data:\n self.write(data)\n else:\n # Specify mutable storage for this texture.\n # glTexImage2D can be called multiple times to re-allocate storage\n gl.glTexImage2D(\n self._target, # target\n 0, # level\n self._internal_format, # internal_format\n self._width, # width\n self._height, # height\n 0, # border\n self._format, # format\n self._type, # type\n data, # data\n )\n except gl.GLException as ex:\n raise gl.GLException(\n (\n f\"Unable to create texture: {ex} : dtype={self._dtype} \"\n f\"size={self.size} components={self._components} \"\n f\"MAX_TEXTURE_SIZE = {self.ctx.info.MAX_TEXTURE_SIZE}\"\n )\n )", "def pygame_load_texture(filename, filter=True, mipmap=True):\n if filename not in _texture_cache:\n pygame = __import__(\"pygame\", {},{},[])\n if os.path.exists(filename):\n img = pygame.image.load(filename)\n else:\n img = pygame.image.load(os.path.join(data_directory, filename))\n data, size = pygame.image.tostring(img, 'RGBA', True), img.get_size()\n _texture_cache[filename] = load_texture(data, size, \"RGBA\",\n filter, mipmap), size\n return _texture_cache[filename]", "def __init__(self, filename, mipmaps=False):\n print(\"Loading Texture \" + filename)\n\n self.mipmaps = mipmaps\n self.filename = filename\n\n self.image = pyglet.resource.image(self.filename)\n self.texture = self.image.texture\n self._verify('width')\n self._verify('height')\n\n if self.mipmaps:\n glGenerateMipmap(self.texture.target)", "def _load_opengl(self):\r\n opengles.glGenTextures(4, ctypes.byref(self._tex), 0)\r\n from pi3d.Display import Display\r\n if Display.INSTANCE:\r\n Display.INSTANCE.textures_dict[str(self._tex)] = [self._tex, 0]\r\n opengles.glBindTexture(GL_TEXTURE_2D, self._tex)\r\n RGBv = GL_RGBA if self.alpha else GL_RGB\r\n opengles.glTexImage2D(GL_TEXTURE_2D, 0, RGBv, self.ix, self.iy, 0, RGBv,\r\n GL_UNSIGNED_BYTE,\r\n ctypes.string_at(self.image, len(self.image)))\r\n opengles.glEnable(GL_TEXTURE_2D)\r\n opengles.glGenerateMipmap(GL_TEXTURE_2D)\r\n opengles.glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n if self.mipmap:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_LINEAR_MIPMAP_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_LINEAR)\r\n else:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,\r\n GL_MIRRORED_REPEAT)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,\r\n GL_MIRRORED_REPEAT)", "def loadImage( self, imageName = \"empty-wall.bmp\" ):\n im = open(imageName)\n try:\n ix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGB\", 0, -1)\n except SystemError:\n ix, iy, image = im.size[0], im.size[1], im.tobytes(\"raw\", \"RGB\", 0, -1)\n\n img = open(imageName).transpose(FLIP_TOP_BOTTOM)\n img_data = numpy.fromstring(img.tobytes(), numpy.uint8)\n\n ID = glGenTextures(1)\n glBindTexture(GL_TEXTURE_2D, ID)\n glPixelStorei(GL_UNPACK_ALIGNMENT,1)\n\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n glTexImage2D(\n GL_TEXTURE_2D, 0, GL_RGB, ix, iy, 0,\n GL_RGB, GL_UNSIGNED_BYTE, image\n )\n return ID", "def setupTexture( self ):\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glBindTexture(GL_TEXTURE_2D, self.imageID)", "def load_texture_pair(filename):\n texture = arcade.load_texture(filename)\n return [texture, texture.flip_left_right()]", "def load_textures(file_name, image_location_list,\n mirrored=False, flipped=False):\n source_image = PIL.Image.open(file_name)\n\n source_image_width, source_image_height = source_image.size\n texture_info_list = []\n for image_location in image_location_list:\n x, y, width, height = image_location\n\n if width <= 0:\n raise ValueError(\"Texture has a width of {}, must be > 0.\"\n .format(width))\n if x > source_image_width:\n raise ValueError(\"Can't load texture starting at an x of {} \"\n \"when the image is only {} across.\"\n .format(x, source_image_width))\n if y > source_image_height:\n raise ValueError(\"Can't load texture starting at an y of {} \"\n \"when the image is only {} high.\"\n .format(y, source_image_height))\n if x + width > source_image_width:\n raise ValueError(\"Can't load texture ending at an x of {} \"\n \"when the image is only {} wide.\"\n .format(x + width, source_image_width))\n if y + height > source_image_height:\n raise ValueError(\"Can't load texture ending at an y of {} \"\n \"when the image is only {} high.\"\n .format(y + height, source_image_height))\n\n image = source_image.crop((x, y, x + width, y + height))\n # image = _trim_image(image)\n\n if mirrored:\n image = PIL.ImageOps.mirror(image)\n\n if flipped:\n image = PIL.ImageOps.flip(image)\n\n image_width, image_height = image.size\n image_bytes = image.convert(\"RGBA\").tobytes(\"raw\", \"RGBA\", 0, -1)\n\n texture = GL.GLuint(0)\n GL.glGenTextures(1, ctypes.byref(texture))\n\n GL.glBindTexture(GL.GL_TEXTURE_2D, texture)\n GL.glPixelStorei(GL.GL_UNPACK_ALIGNMENT, 1)\n\n # Appveyor work-around\n appveyor = True\n if not appveyor:\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S,\n GL.GL_CLAMP_TO_BORDER)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T,\n GL.GL_CLAMP_TO_BORDER)\n else:\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_S,\n GL.GL_REPEAT)\n GL.glTexParameterf(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_WRAP_T,\n GL.GL_REPEAT)\n\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MAG_FILTER,\n GL.GL_LINEAR)\n GL.glTexParameteri(GL.GL_TEXTURE_2D, GL.GL_TEXTURE_MIN_FILTER,\n GL.GL_LINEAR_MIPMAP_LINEAR)\n GLU.gluBuild2DMipmaps(GL.GL_TEXTURE_2D, GL.GL_RGBA,\n image_width, image_height,\n GL.GL_RGBA, GL.GL_UNSIGNED_BYTE, image_bytes)\n\n texture_info_list.append(Texture(texture, width, height))\n\n return texture_info_list", "def texture_from_image(renderer, image_name):\n soft_surface = ext.load_image(image_name)\n texture = SDL_CreateTextureFromSurface(renderer.renderer, soft_surface)\n SDL_FreeSurface(soft_surface)\n return texture", "def loadImage( self, imageName = \"nehe_wall.bmp\" ):\n try:\n from PIL.Image import open\n except ImportError, err:\n from Image import open\n glActiveTexture(GL_TEXTURE0_ARB);\n return texture.Texture( open(imageName) )", "def load_texture_pair(filename):\n return [\n arcade.load_texture(filename),\n arcade.load_texture(filename, mirrored=True)\n ]", "def load_texture_pair(filename):\n return [\n arcade.load_texture(filename),\n arcade.load_texture(filename, mirrored=True)\n ]", "def load_texture_pair(self, filename):\n return [\n arcade.load_texture(filename),\n arcade.load_texture(filename, flipped_horizontally=True)\n ]", "def pyglet_load_texture(filename):\n if filename not in _texture_cache:\n image = __import__(\"pyglet\", {},{},[\"image\"]).image\n if os.path.exists(filename):\n img = image.load(filename)\n else:\n img = image.load(os.path.join(data_directory, filename))\n _texture_cache[filename] = img.texture\n return _texture_cache[filename]", "def load_image(filename):\r\n image = pygame.image.load(filename)\r\n image = image.convert_alpha()\r\n return image, image.get_rect()", "def load_image():\n # pylint: disable=global-statement\n global current_frame, current_loop, frame_count, frame_duration, bitmap\n while sprite_group:\n sprite_group.pop()\n\n filename = SPRITESHEET_FOLDER + \"/\" + file_list[current_image]\n\n bitmap = displayio.OnDiskBitmap(filename)\n ### Change the palette value proportional to BRIGHTNESS\n bitmap.pixel_shader[1] = image_brightness(brightness)\n sprite = displayio.TileGrid(\n bitmap,\n pixel_shader=bitmap.pixel_shader,\n tile_width=bitmap.width,\n tile_height=matrix.display.height,\n )\n\n sprite_group.append(sprite)\n\n current_frame = 0\n current_loop = 0\n frame_count = int(bitmap.height / matrix.display.height)\n frame_duration = DEFAULT_FRAME_DURATION", "def test_projectedtexture(pngfile):\n tex = omf.ProjectedTexture()\n tex.image = pngfile\n assert tex.validate()", "def atlas_load(filename):\n\tt = pyglet.image.load(os.path.join('textures','ground.png'))\n\ttex = atlas.add(t)\n\treturn tex", "def setupTexture(self):\r\n # Configure the texture rendering parameters\r\n glEnable(GL_TEXTURE_2D)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\r\n\r\n # Re-select our texture, could use other generated textures\r\n # if we had generated them earlier.\r\n glBindTexture(GL_TEXTURE_2D, self.imageID)", "def load_tile(path, tile_size):\n img = pyglet.resource.image(path)\n img.width = tile_size\n img.height = tile_size\n return img", "def load_file(path):\n with open(path, \"rb\") as f: # bsps are binary files\n byte_list = f.read() # stores all bytes in bytes1 variable (named like that to not interfere with builtin names\n header = load_header(byte_list)\n skin_names = [byte_list[header.ofs_skins + 64 * x:header.ofs_skins + 64 * x + 64].decode(\"ascii\", \"ignore\") for x in range(header.num_skins)]\n triangles = load_triangles(byte_list[header.ofs_tris:header.ofs_frames], header)\n frames = load_frames(byte_list[header.ofs_frames:header.ofs_glcmds], header)\n texture_coordinates = load_texture_coordinates(byte_list[header.ofs_st:header.ofs_tris], header)\n gl_commands = load_gl_commands(byte_list[header.ofs_glcmds:header.ofs_end])\n # print(header)\n # print(skin_names)\n # print(triangles)\n # print(frames)\n # print(texture_coordinates)\n for i in range(len(texture_coordinates)):\n texture_coordinates[i].s = texture_coordinates[i].s/header.skinwidth\n texture_coordinates[i].t = texture_coordinates[i].t / header.skinheight\n # print(texture_coordinates)\n # print(header.num_xyz)\n for i_frame in range(len(frames)):\n for i_vert in range((header.num_xyz)):\n frames[i_frame].verts[i_vert].v[0] = frames[i_frame].verts[i_vert].v[0]*frames[i_frame].scale.x+frames[i_frame].translate.x\n frames[i_frame].verts[i_vert].v[1] = frames[i_frame].verts[i_vert].v[1] * frames[i_frame].scale.y + frames[i_frame].translate.y\n frames[i_frame].verts[i_vert].v[2] = frames[i_frame].verts[i_vert].v[2] * frames[i_frame].scale.z + frames[i_frame].translate.z\n model = md2_object(header, skin_names, triangles, frames, texture_coordinates, gl_commands)\n return model", "def get_texture_sequence(filename, tilewidth=32, tileheight=32, margin=1, spacing=1, nearest=False):\n\n image = pyglet.resource.image(filename)\n region = image.get_region(margin, margin, image.width-margin*2, image.height-margin*2)\n\n # we've already thrown away the margins\n rows = calculate_columns(region.height, tileheight, margin=0, spacing=spacing)\n cols = calculate_columns(region.width, tilewidth, margin=0, spacing=spacing)\n\n grid = pyglet.image.ImageGrid(region,\n rows,\n cols,\n row_padding=spacing,\n column_padding=spacing,\n )\n\n\n texture = grid.get_texture_sequence()\n\n if nearest:\n gl.glTexParameteri(texture.target, gl.GL_TEXTURE_MIN_FILTER, gl.GL_NEAREST)\n gl.glTexParameteri(texture.target, gl.GL_TEXTURE_MAG_FILTER, gl.GL_NEAREST)\n\n return texture", "def load_image(fname):\n return load_tiff(fname)", "def populate_texture(self, texture):\n texture.blit_buffer(self._cbuffer, colorfmt='bgr', bufferfmt='ubyte')", "def load_image(name, colorkey=None, char_scale=False):\n fullname = os.path.join('..\\\\','sprites')\n fullname = os.path.join(fullname, name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n image = image.convert()\n if char_scale:\n image = pygame.transform.scale2x(image)\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image, image.get_rect()", "def texture( self, mode ):\n texture = mode.cache.getData( self, 'texture' )\n if texture is None:\n texture = glGenTextures( 1 )\n holder = mode.cache.holder( self, texture, 'texture' )\n return texture", "def load_tmp_atlas(filename):\n fbase, ext = osp.splitext(filename)\n fimg = None\n if osp.isfile(fbase+\".nii\"): fimg = fbase+\".nii\"\n if osp.isfile(fbase+\".nii.gz\"): fimg = fbase+\".nii.gz\" \n\n try:\n img = nib.load(fimg)\n except ValueError as e:\n print(\"error {0}, cannot find file {1} .nii or .nii.gz \".format(fbase, e.errno))\n\n fjson = None\n if osp.isfile(fbase+\".txt\"): fjson= fbase+\".txt\"\n if osp.isfile(fbase+\".json\"): fjson= fbase+\".json\"\n\n if fjson == None:\n warn(\"cannot find file %s .txt or .json\" % fbase)\n return None\n\n with open(fjson) as f:\n j_labels = json.load(f)\n\n a_labels = [label[1] for label in j_labels]\n \n return (img.get_data(), img.get_affine(), a_labels)", "def loadimg(filename, rect=False):\n filename = os.path.join('data', filename)\n try:\n img = pygame.image.load(filename)\n if img.get_alpha is None:\n img = img.convert()\n else:\n img = img.convert_alpha()\n except pygame.error, message:\n print \"Impossible de charger l'image : \", filename\n raise SystemExit, message\n if rect:\n return img, img.get_rect()\n else:\n return img", "def _load_image(self, id_: str) -> Tensor:\n filename = os.path.join(self.root, \"output\", id_ + \".jpg\")\n with Image.open(filename) as img:\n array = np.array(img)\n tensor: Tensor = torch.from_numpy(array) # type: ignore[attr-defined]\n # Convert from HxWxC to CxHxW\n tensor = tensor.permute((2, 0, 1))\n return tensor", "def autodetect_load_texture(filename):\n\n pyglet = sys.modules.get(\"pyglet\", None)\n pygame = sys.modules.get(\"pygame\", None)\n\n if pyglet and pygame:\n # Both pygame and pyglet have been loaded, so we check if a pyglet\n # context has been created.\n get_current_context = __import__(\"pyglet.gl\",\n {},{},['get_current_context']).get_current_context\n if get_current_context():\n func = pyglet_load_texture\n else:\n func = pygame_load_texture\n elif pyglet:\n func = pyglet_load_texture\n else:\n func = pygame_load_texture\n\n set_load_texture_file_hook(func)\n return func(filename)", "def load(self):\n\t\tglTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE16_ALPHA16, \n\t\t\tself.width, self.width, self.width, 0, GL_LUMINANCE_ALPHA, \n\t\t\tGL_UNSIGNED_SHORT, ctypes.byref(self.data))", "def load(f, as_grey=False):\n use_plugin('pil')\n return imread(os.path.join(assets, f), as_grey=as_grey)", "def load_png(name):\n fullname = os.path.join('data', name)\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n return image, image.get_rect()", "def setup_texture(func):\n\n def new_func():\n \"\"\"Create png image and pass to func\"\"\"\n dirname, _ = os.path.split(os.path.abspath(__file__))\n pngfile = os.path.sep.join([dirname, \"out.png\"])\n img = [\"110010010011\", \"101011010100\", \"110010110101\", \"100010010011\"]\n img = [[int(val) for val in value] for value in img]\n writer = png.Writer(len(img[0]), len(img), greyscale=True, bitdepth=16)\n with open(pngfile, \"wb\") as file:\n writer.write(file, img)\n try:\n func(pngfile)\n finally:\n os.remove(pngfile)\n\n return new_func", "def loadLightMap( self, imageName = \"lightmap1.jpg\" ):\n try:\n from PIL.Image import open\n except ImportError, err:\n from Image import open\n glActiveTextureARB(GL_TEXTURE1);\n return texture.Texture( open(imageName) )", "def loadImageAsPlane(yresolution = 600):\n tex = Texture() #loader.loadTexture(filepath)\n tex.setBorderColor(Vec4(0,0,0,0))\n tex.setWrapU(Texture.WMBorderColor)\n tex.setWrapV(Texture.WMBorderColor)\n cm = CardMaker('card')\n #cm.setFrame(-tex.getOrigFileXSize(), tex.getOrigFileXSize(), -tex.getOrigFileYSize(), tex.getOrigFileYSize())\n k = 60\n cm.setFrame(-k,k,-k,k)\n card = NodePath(cm.generate())\n card.setTexture(tex)\n card.setScale(card.getScale()/ yresolution)\n card.flattenLight() # apply scale\n return tex, card", "def load_image(self, filename):\n return pygame.image.load(os.path.join('images', filename))", "def load_image(file_path):\r\n return Image.open(file_path)", "def import_PNG(filename, device='cuda:0'):\n\n return numpy2torch(numpy.array(Image.open(filename), dtype=float) / 255, dtype=torch.float, device=device)", "def load(image_path):\n\tpil_image = Image.open(image_path).convert(\"RGB\")\n\t# convert to BGR format\n\timage = np.array(pil_image)[:, :, [2, 1, 0]]\n\treturn image", "def parse_textures(\n file_path: str,\n node: Node,\n verbose=False,\n):\n file_path = Path(file_path)\n if file_path.is_file() and file_path.suffix.lower() == \".txv\":\n file_path = os.path.splitext(file_path)[0] + \".TXM\"\n\n binary_file = open(file_path, 'rb')\n node.name = os.path.splitext(os.path.basename(file_path))[0]\n g = BinaryReader(binary_file)\n g.endian = \">\"\n\n current_offset = g.tell()\n node.offset = current_offset\n\n # Handle TXM and TXV file\n g.seek(current_offset)\n A = g.unpack(\"4B3i\")\n logger.debug({\n \"A\": A,\n \"A[6]\": A[6],\n })\n g.seek(current_offset + 16)\n\n txv_file = os.path.splitext(file_path)[0] + \".TXV\"\n logger.debug(\"txv_file: %s\" % txv_file)\n try:\n with open(txv_file, 'rb') as f:\n txv_content = f.read()\n except FileNotFoundError:\n raise Exception(f\"{txv_file} not found! Make sure it is in the same directory\")\n dds_offset = list(find_substring_offset(txv_content, DDS_HEADER))\n dds_size = dds_offset[1]-dds_offset[0]\n logger.debug({\n \"dds_offset\": dds_offset,\n \"dds_size\": dds_size,\n })\n\n image_list = []\n for i, m in enumerate(range(A[6])):\n logger.debug(\"%s>\" % ('=' * 200))\n B = g.i(7)\n tm = g.tell()\n g.seek(tm - 4 + B[6])\n name = g.find(b\"\\x00\")\n current_total_offset = current_offset + A[4] + B[0]\n logger.debug({\n \"current_offset\": current_offset,\n \"A[4]\": A[4],\n \"B[0]\": B[0],\n \"current_total_offset\": current_total_offset,\n })\n g.seek(current_total_offset)\n\n texture_name = name + \".DDS\"\n logger.debug({\n \"texture_name\": texture_name,\n })\n\n image_data = {\n \"texture_name\": texture_name,\n \"dds_content\": txv_content[dds_offset[i]:dds_offset[i] + dds_size - 4],\n }\n\n image_list.append(image_data)\n logger.info({\n \"msg\": \"Successfully parsed texture\",\n \"texture_name\": texture_name,\n })\n logger.debug(\"tm: %s\" % tm)\n g.seek(tm)\n\n node.data[\"image_list\"] = image_list\n g.close()", "def compute_textures(\n src_image,\n src_profile,\n dst_dir,\n kind='simple',\n x_radius=5,\n y_radius=5,\n x_offset=1,\n y_offset=1,\n image_min=0,\n image_max=255,\n nb_bins=64,\n prefix='',\n):\n # Write input image to disk\n profile = src_profile.copy()\n profile.update(dtype='uint8', transform=None, count=1)\n tmp_dir = tempfile.TemporaryDirectory()\n tmp_image = os.path.join(tmp_dir.name, 'image.tif')\n with rasterio.open(tmp_image, 'w', **profile) as dst:\n dst.write(src_image.astype(np.uint8), 1)\n\n # Run OTB command\n tmp_glcm = os.path.join(tmp_dir.name, 'glcm.tif')\n subprocess.run([\n 'otbcli_HaralickTextureExtraction', '-in', tmp_image,\n '-parameters.xrad', str(x_radius), '-parameters.yrad', str(y_radius),\n '-parameters.xoff', str(x_offset), '-parameters.yoff', str(y_offset),\n '-parameters.min', str(image_min), '-parameters.max', str(image_max),\n '-parameters.nbbin', str(nb_bins), '-texture', kind,\n '-out', tmp_glcm, 'double'\n ], stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)\n\n # Save each texture in an individual GeoTIFF\n os.makedirs(dst_dir, exist_ok=True)\n with rasterio.open(tmp_glcm) as src:\n\n for i, texture in enumerate(TEXTURES[kind]):\n \n profile = src.profile\n img = src.read(i+1).astype(np.float64)\n\n # Linear rescale and convert to UINT16\n img = np.interp(img, (img.min(), img.max()), (0, 65535))\n img = img.astype(np.uint16)\n profile.update(dtype=img.dtype.name)\n\n # Save as 1-band compressed GeoTIFF\n profile.update(compression='LZW', count=1, transform=None)\n filename = f'{prefix}{texture}_{x_radius*2+1}x{y_radius*2+1}.tif'\n output_file = os.path.join(dst_dir, filename)\n with rasterio.open(output_file, 'w', **profile) as dst:\n dst.write(img, 1)\n\n tmp_dir.cleanup()\n return", "def get_texture(self, label: str) -> Union[moderngl.Texture, moderngl.TextureArray,\r\n moderngl.Texture3D, moderngl.TextureCube]:\r\n return self._get_resource(label, self._textures, \"texture\")", "def loadImage(self, file_name):\n self.surf = pygame.image.load(file_name)\n self.draw_angle = 0 # In degrees\n self.bullets = []", "def __draw_board_texture(self, texture):\n\n textureWidth, textureHeight = texture.size\n\n for x in range(0, self.width, textureWidth):\n for y in range(0, self.height, textureHeight):\n self.baseImage.paste(texture, (x, y))", "def load_image(filename):\n return tf.gfile.FastGFile(filename, 'rb').read()", "def _load_image(file: str) -> pyglet.image.AbstractImage:\n\n return pyglet.image.load(Config.RES_DIR + \"img\" + Config.FILE_SEPARATOR + file)", "def load_image(filename):\r\n \r\n # Load the file\r\n print \"INFO: Loading Image: \" +str(filename)\r\n image = Image.open(filename)\r\n pixels = image.load()\r\n print \"INFO: Image loaded.\"\r\n \r\n return (image, pixels)", "def import_texture(self, texture):\n logger.debug((\"texture\", texture))\n if texture in self._imported_assets:\n return self._imported_assets[texture]\n else:\n texture = texture.decode()\n tex = self.gridinfo.getAsset(texture)\n if \"name\" in tex:\n tex_name = tex[\"name\"]\n try:\n btex = bpy.data.textures[tex_name]\n # XXX should update\n return btex\n except:\n dest = self.decode_texture(texture, tex_name, tex[\"data\"])\n self.parse_texture(texture, tex_name, dest)", "def load_and_process_iuv(self, iuv_path, i):\n iuv_map = Image.open(iuv_path).convert('RGBA')\n iuv_map = transforms.ToTensor()(iuv_map)\n uv_map, mask, pids = self.iuv2input(iuv_map, i)\n return uv_map, mask, pids", "def load_image(filename):\n image = Image.open(filename).convert('RGB')\n image_tensor = loader(image).float()\n image_var = Variable(image_tensor).unsqueeze(0)\n return image_var.cuda()", "def _tile_image(self, data):\n image = Image.open(BytesIO(data))\n return image.convert('RGBA')", "def load_materials(file_data, headers, base_path):\n\n\n def load_material_texture(texture_file):\n filename = os.path.join(base_path, texture_file + \".jpg\")\n try:\n img = bpy.data.images.load(str(filename))\n cTex = bpy.data.textures.new('ColorTex', type = 'IMAGE')\n cTex.image = img\n return cTex\n except:\n print (\"Cannot load image {}\".format(filename))\n return None\n\n\n def material_from_pack(material):\n \"\"\" \n Extract just the data we want from the full chunk\n \"\"\"\n texture_file_name = material[0].decode(\"utf-8\").replace('\\x00', '').strip()\n return (\n texture_file_name,\n load_material_texture(texture_file_name)\n )\n texture_offset, texture_length = headers[1]\n texture_chunk = Struct(\"64sii\") \n texture_size = texture_chunk.size\n texture_count = int(texture_length / texture_size)\n\n textures = []\n for current_texture_idx in range(texture_count):\n texture_file_position = texture_offset + current_texture_idx * texture_size\n packed_texture = texture_chunk.unpack(file_data[texture_file_position : texture_file_position+texture_size])\n current_texture = material_from_pack(packed_texture)\n textures.append(current_texture)\n \n return textures", "def load_image(img_file_name):\n file_name = os.path.join('.', 'images', img_file_name)\n img = pygame.image.load(file_name)\n img.convert()\n return img", "def load_image(name, colorkey=None):\n fullname = os.path.join('img', name)\n image = pygame.image.load(fullname)\n image = image.convert()\n if colorkey is not None:\n if colorkey == -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image, image.get_rect()", "def load_textures(self):\n for texture_meta in self.gltf.textures:\n texture = MaterialTexture()\n\n if texture_meta.source is not None:\n texture.texture = self.images[texture_meta.source]\n\n if texture_meta.sampler is not None:\n texture.sampler = self.samplers[texture_meta.sampler]\n\n self.textures.append(texture)", "def load_image(file):\n\n\tfile = os.path.join(DIR_MENU_PICTURES, file)\n\ttry:\n\t\tsurface = pygame.image.load(file)\n\texcept pygame.error:\n\t\terror = \"Could not load image \\\"%s\\\" %s\"%(file, pygame.get_error())\n\t\traise SystemExit(error)\n\treturn surface.convert()", "def load_image_file(filename, mode='RGB'):\n return imread(filename, mode=mode)", "def pil_loader(path):\n with open(path, 'rb') as f:\n with Image.open(f) as img:\n sqrWidth = np.ceil(np.sqrt(img.size[0]*img.size[1])).astype(int)\n return img.convert('L').resize((sqrWidth, sqrWidth))", "def load_image(self, **kwargs):\n ...", "def load_img(name):\n\tpath = os.path.join(IMG_DIR, name)\n\timage = pygame.image.load(path)\n\tif image.get_alpha is None:\n\t\timage = image.convert()\n\telse:\n\t\timage = image.convert_alpha()\n\timage_rect = image.get_rect()\n\treturn image, image_rect", "def load_shadowed_texture(file, shader, depth, tex_file=None, type=1):\n global light_dir\n try:\n pp = assimpcy.aiPostProcessSteps\n flags = pp.aiProcess_Triangulate | pp.aiProcess_FlipUVs\n scene = assimpcy.aiImportFile(file, flags)\n except assimpcy.all.AssimpError as exception:\n print('ERROR loading', file + ': ', exception.args[0].decode())\n return []\n\n # Note: embedded textures not supported at the moment\n path = os.path.dirname(file) if os.path.dirname(file) != '' else './'\n for mat in scene.mMaterials:\n if not tex_file and 'TEXTURE_BASE' in mat.properties: # texture token\n name = os.path.basename(mat.properties['TEXTURE_BASE'])\n # search texture in file's whole subdir since path often screwed up\n paths = os.walk(path, followlinks=True)\n found = [os.path.join(d, f) for d, _, n in paths for f in n\n if name.startswith(f) or f.startswith(name)]\n assert found, 'Cannot find texture %s in %s subtree' % (name, path)\n tex_file = found[0]\n if tex_file:\n mat.properties['diffuse_map'] = Texture(file=tex_file)\n # prepare textured mesh\n meshes = []\n for mesh in scene.mMeshes:\n mat = scene.mMaterials[mesh.mMaterialIndex].properties\n assert mat['diffuse_map'], \"Trying to map using a textureless material\"\n attributes = [mesh.mVertices, mesh.mNormals, mesh.mTextureCoords[0]]\n mesh = ShadowMesh(shader, mat['diffuse_map'], attributes, type, depth, mesh.mFaces, light_dir= light_dir, k_d=mat.get('COLOR_DIFFUSE', (0.002, 0.002, 0.002)), k_s=mat.get('COLOR_SPECULAR', (0.001, 0.001, 0.001)),s=mat.get('SHININESS', 1.))\n meshes.append(mesh)\n\n size = sum((mesh.mNumFaces for mesh in scene.mMeshes))\n print('Loaded %s\\t(%d meshes, %d faces)' % (file, len(meshes), size))\n return meshes", "def load_image(filename, color=True):\n img = skimage.img_as_float(skimage.io.imread(filename, as_grey=not color)).astype(np.float32)\n if img.ndim == 2:\n img = img[:, :, np.newaxis]\n if color:\n img = np.tile(img, (1, 1, 3))\n elif img.shape[2] == 4:\n img = img[:, :, :3]\n return img", "def image_loader(image_name):\n image = Image.open(image_name)\n image = loader(image).float()\n # image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image.to(device) #assumes that you're using GPU", "def imread(fname):\r\n return skimage.io.imread(fname)", "def _load_disk(self):\r\n s = self.file_string + ' '\r\n im = Image.open(self.file_string)\r\n\r\n self.ix, self.iy = im.size\r\n s += '(%s)' % im.mode\r\n self.alpha = (im.mode == 'RGBA' or im.mode == 'LA')\r\n\r\n if self.mipmap:\r\n resize_type = Image.BICUBIC\r\n else:\r\n resize_type = Image.NEAREST\r\n\r\n # work out if sizes > MAX_SIZE or coerce to golden values in WIDTHS\r\n if self.iy > self.ix and self.iy > MAX_SIZE: # fairly rare circumstance\r\n im = im.resize((int((MAX_SIZE * self.ix) / self.iy), MAX_SIZE))\r\n self.ix, self.iy = im.size\r\n n = len(WIDTHS)\r\n for i in xrange(n-1, 0, -1):\r\n if self.ix == WIDTHS[i]:\r\n break # no need to resize as already a golden size\r\n if self.ix > WIDTHS[i]:\r\n im = im.resize((WIDTHS[i], int((WIDTHS[i] * self.iy) / self.ix)),\r\n resize_type)\r\n self.ix, self.iy = im.size\r\n break\r\n\r\n if VERBOSE:\r\n print('Loading ...{}'.format(s))\r\n\r\n if self.flip:\r\n im = im.transpose(Image.FLIP_TOP_BOTTOM)\r\n\r\n RGBs = 'RGBA' if self.alpha else 'RGB'\r\n self.image = im.convert(RGBs).tostring('raw', RGBs)\r\n self._tex = ctypes.c_int()\r\n if 'fonts/' in self.file_string:\r\n self.im = im", "def read_environment_map(fn):\n if not fn.is_file():\n print('Nonexistent texture file:', fn)\n return None\n suffix = Path(fn).suffix\n if suffix in ['.jpg', '.png']:\n reader_factory = vtk.vtkImageReader2Factory()\n img_reader = reader_factory.CreateImageReader2(str(fn))\n img_reader.SetFileName(str(fn))\n\n texture = vtk.vtkTexture()\n texture.SetInputConnection(img_reader.GetOutputPort())\n else:\n reader = vtk.vtkHDRReader()\n extensions = reader.GetFileExtensions()\n # Check the image can be read.\n if not reader.CanReadFile(str(fn)):\n print('CanReadFile failed for ', fn)\n return None\n if suffix not in extensions:\n print('Unable to read this file extension: ', suffix)\n return None\n reader.SetFileName(str(fn))\n reader.Update()\n\n texture = vtk.vtkTexture()\n texture.SetColorModeToDirectScalars()\n texture.SetInputConnection(reader.GetOutputPort())\n\n # Convert to a cube map.\n tcm = vtk.vtkEquirectangularToCubeMapTexture()\n tcm.SetInputTexture(texture)\n # Enable mipmapping to handle HDR image.\n tcm.MipmapOn()\n tcm.InterpolateOn()\n\n return tcm", "def test_uvmappedtexture(pngfile):\n tex = omf.UVMappedTexture()\n tex.image = pngfile\n with pytest.raises(properties.ValidationError):\n tex.uv_coordinates = [0.0, 1.0, 0.5]\n tex.uv_coordinates = [[0.0, -0.5], [0.5, 1]]\n assert tex.validate()\n tex.uv_coordinates = [[0.0, 0.5], [0.5, np.nan]]\n assert tex.validate()\n\n points = omf.PointSet()\n points.vertices = [[0.0, 0, 0], [1, 1, 1], [2, 2, 2]]\n points.textures = [tex]\n with pytest.raises(properties.ValidationError):\n points.validate()\n points.vertices = [[0.0, 0, 0], [1, 1, 1]]\n assert points.validate()", "def _load_image(path, filename, bits, mode):\n if filename.rsplit('.')[1].lower() == 'dcm':\n ds = pydicom.dcmread(os.path.join(path, filename))\n m = ('I;16' if bits == 16 else 'L') if mode == 'L' else 'RGB'\n image = Image.frombuffer(\n m, (ds.Columns, ds.Rows), ds.PixelData, 'raw', m, 0, 1)\n else:\n image = Image.open(os.path.join(path, filename)).convert(mode)\n return image", "def hload_pil(filepath):\n img = Image.open(filepath)\n return img", "def load(filepath):\n canvas = Canvas(100, 100)\n canvas.img = PIL.Image.open(filepath)\n if not canvas.img.mode in (\"RGB\",\"RGBA\"):\n canvas.img = canvas.img.convert(\"RGBA\")\n canvas.drawer = aggdraw.Draw(canvas.img)\n canvas.pixel_space()\n return canvas", "def load_image(filename):\n rgb = imread(filename)\n return UncertainImage(rgb)", "def load_image(self, name, colorkey=None):\n dictname = name[0:name.find('.')]\n fullname = os.path.join('TeddyLevel','data', name)\n try:\n image = pygame.image.load(fullname)\n except pygame.error, message:\n print 'Cannot load image:', fullname\n raise SystemExit, message\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0,0))\n image.set_colorkey(colorkey, RLEACCEL)\n self.dict[dictname] = image, image.get_rect()", "def load_image(path, height, width, mode='RGB'):\n image = PIL.Image.open(path)\n image = image.convert(mode)\n image = np.array(image)\n # squash\n image = scipy.misc.imresize(image, (height, width), 'bilinear')\n return image", "def load(image_path):\n out = None\n\n #####################################\n # START YOUR CODE HERE #\n #####################################\n # Use skimage io.imread\n out = io.imread(image_path)\n ######################################\n # END OF YOUR CODE #\n ######################################\n\n return out", "def createByImage(path):\n try:\n mapdata = pygame.image.load(path)\n except:\n m = PositionMap()\n m.setWidth(1)\n m.setHeight(1)\n return m\n return createBySurface(mapdata)", "def load_image(self, image_id):\n info = self.image_info[image_id]\n bg_color = np.array(info['bg_color']).reshape([1, 1, 3])\n image = np.ones([info['height'], info['width'], 3], dtype=np.uint8)\n image = image * bg_color.astype(np.uint8)\n for ship, dims in info['ships']:\n image = self.draw_ship(image, ship, dims)\n return image", "def __init__(self,file_name):\n \n #Load the graphic\n self.sprite_sheet = pygame.image.load(file_name).convert()", "def load_png(name):\n fullname = os.path.join('img', name)\n try:\n image = pygame.image.load(fullname)\n if image.get_alpha is None:\n image = image.convert()\n else:\n image = image.convert_alpha()\n return image\n except pygame.error:\n print('Cannot load image:', fullname)", "def load_and_process_image(self, im_path):\n image = Image.open(im_path).convert('RGB')\n image = transforms.ToTensor()(image)\n image = 2 * image - 1\n return image", "def psdTextureFile(*args, channelRGB: Union[List[AnyStr, int, int, int, int], List[List[AnyStr,\n int, int, int, int]]]=None, channels: Union[List[AnyStr, int, bool],\n List[List[AnyStr, int, bool]]]=None, imageFileName: Union[List[AnyStr,\n AnyStr, int], List[List[AnyStr, AnyStr, int]]]=None, psdFileName: AnyStr=\"\",\n snapShotImageName: AnyStr=\"\", uvSnapPostionTop: bool=True, xResolution:\n int=0, yResolution: int=0, **kwargs)->None:\n pass", "def test_load_jpg():\n parameters = {'path': 'green-dot.jpg'}\n\n images.load(parameters)", "def pil_loader(path, color=True):\n imgExt = os.path.splitext(path)[1]\n if imgExt == \".npy\":\n img = np.load(path)[0]\n return np.swapaxes(np.swapaxes(img, 0, 2), 0, 1)\n\n # open path as file to avoid ResourceWarning\n # (https://github.com/python-pillow/Pillow/issues/835)\n with open(path, 'rb') as f:\n img = Image.open(f)\n if color:\n return img.convert('RGB')\n else:\n return img.convert('L')", "def image_loader(image_name):\n image = Image.open(image_name)\n image = loader(image).float()\n image = Variable(image, requires_grad=True)\n image = image.unsqueeze(0) #this is for VGG, may not be needed for ResNet\n return image.cuda() #assumes that you're using GPU", "def load_tile_table(filename, width, height):\n\ttry: \n\t\ttile_table = []\n\t\timage = pygame.image.load(filename).convert()\n\texcept:\n\t\tprint(\"Could not load tileset:\", filename)\n\telse:\n\t\timage_width, image_height = image.get_size()\n\t\tfor tile_x in range(0, int(image_width/width)):\n\t\t\tline = []\n\t\t\ttile_table.append(line)\n\t\t\tfor tile_y in range(0, int(image_height/height)):\n\t\t\t\trect = (tile_x*width, tile_y*height, width, height)\n\t\t\t\tline.append(image.subsurface(rect))\n\treturn tile_table", "def load_image(self):\n try:\n return Image.open(self._path, 'r')\n except IOError:\n messagebox.showerror(\"Error\", \"Wrong sprite file path!\")", "def load_image(name, colorkey=None):\n fullname = os.path.join('assets', name)\n try:\n image = load(fullname)\n except error as message:\n print('Cannot load image:', name)\n raise SystemExit(message)\n image = image.convert()\n if colorkey is not None:\n if colorkey is -1:\n colorkey = image.get_at((0, 0))\n image.set_colorkey(colorkey, RLEACCEL)\n return image, image.get_rect()", "async def add_image_file(\n self,\n file: str,\n identifier: typing.Hashable = None,\n ) -> typing.Tuple[typing.Tuple[int, int], \"TextureAtlas\"]:\n return self.add_image(\n await mcpython.engine.ResourceLoader.read_image(file), identifier\n )", "def load_image(filename):\n with open(filename, 'rb') as img_handle:\n img = Image.open(img_handle)\n img_data = img.getdata()\n if img.mode.startswith('RGB'):\n pixels = [round(.299 * p[0] + .587 * p[1] + .114 * p[2])\n for p in img_data]\n elif img.mode == 'LA':\n pixels = [p[0] for p in img_data]\n elif img.mode == 'L':\n pixels = list(img_data)\n else:\n raise ValueError('Unsupported image mode: %r' % img.mode)\n w, h = img.size\n return {'height': h, 'width': w, 'pixels': pixels}" ]
[ "0.7242643", "0.71882534", "0.7155117", "0.7069824", "0.70061564", "0.70061564", "0.69536114", "0.68268985", "0.68221575", "0.6817364", "0.6731701", "0.6718896", "0.6691872", "0.6541551", "0.65246266", "0.6514948", "0.6454194", "0.6423829", "0.6403943", "0.63669056", "0.63510954", "0.63060516", "0.63060516", "0.62722975", "0.6233339", "0.6195044", "0.61944383", "0.61657184", "0.6165007", "0.6096856", "0.6039474", "0.5981012", "0.59675413", "0.5927355", "0.58577067", "0.5777063", "0.5768928", "0.5768224", "0.5754954", "0.5749167", "0.5748817", "0.5748727", "0.57433", "0.57344836", "0.5700195", "0.56973517", "0.56813204", "0.56559837", "0.56478465", "0.56408983", "0.5634908", "0.56318223", "0.5630144", "0.5609533", "0.5559402", "0.55510193", "0.5548731", "0.55429316", "0.5537902", "0.5513664", "0.55077213", "0.5507335", "0.5488901", "0.5465038", "0.5464384", "0.54639316", "0.5463404", "0.5463192", "0.54624003", "0.5451466", "0.5440667", "0.543993", "0.5438293", "0.5433309", "0.5427916", "0.5427123", "0.5425201", "0.54233736", "0.5419635", "0.5417668", "0.5403679", "0.5402269", "0.5376148", "0.5375246", "0.53729594", "0.5365971", "0.53643876", "0.53288966", "0.53228354", "0.531827", "0.531381", "0.5312179", "0.5311504", "0.5305521", "0.52983195", "0.52949214", "0.52893245", "0.5285536", "0.5282492", "0.52790725" ]
0.6440027
17
Rendertime texture environment setup
def setupTexture(self): # Configure the texture rendering parameters glEnable(GL_TEXTURE_2D) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST) glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST) glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL) # Re-select our texture, could use other generated textures # if we had generated them earlier. glBindTexture(GL_TEXTURE_2D, self.imageID)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def set_environment_texture(self, texture, is_srgb=False):\n # cube_map textures cannot use spherical harmonics\n if texture.cube_map:\n self.AutomaticLightCreationOff()\n # disable spherical harmonics was added in 9.1.0\n if hasattr(self, 'UseSphericalHarmonicsOff'):\n self.UseSphericalHarmonicsOff()\n\n self.UseImageBasedLightingOn()\n self.SetEnvironmentTexture(texture, is_srgb)\n self.Modified()", "def setupTexture( self ):\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glBindTexture(GL_TEXTURE_2D, self.imageID)", "def appInit(self):\n glutInitDisplayMode( GLUT_RGBA | GLUT_DOUBLE | GLUT_DEPTH )\n\n glClearColor(0.4, 0.4, 0.5, 1.0)\n glShadeModel(GL_SMOOTH)\n\n glEnable(GL_DEPTH_TEST)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n self.texture = Texture()\n # self.texture.load_jpeg('Sunrise.jpg')\n self.x2yAspect = self.texture.GetWidth()/self.texture.GetHeight()\n glutReshapeFunc(self.reshape)\n glutDisplayFunc(self.redraw)", "def __call__(self):\n Texture()", "def setup_terrain(self):\r\n self.terrain_scale = LVector3(512, 512, 100)\r\n self.terrain_pos = LVector3(-256, -256, -70)\r\n # sample values for a 4096 x 4096px heightmap.\r\n #self.terrain_scale = LVector3(4096, 4096, 1000)\r\n #self.terrain_pos = LVector3(-2048, -2048, -70)\r\n \"\"\"\r\n Diamond_subdivision is an alternating triangulation scheme and may\r\n produce better results.\r\n \"\"\"\r\n use_diamond_subdivision = True\r\n \r\n \"\"\"\r\n Construct the terrain\r\n Without scaling, any ShaderTerrainMesh is 1x1x1 units.\r\n \"\"\"\r\n self.terrain_node = ShaderTerrainMesh()\r\n \"\"\"\r\n Set a heightfield, the heightfield should be a 16-bit png and\r\n have a quadratic size of a power of two.\r\n \"\"\"\r\n heightfield = Texture()\r\n heightfield.read(self.heightfield_fn)\r\n heightfield.set_keep_ram_image(True) \r\n self.terrain_node.heightfield = heightfield\r\n \r\n # Display characteristic values of the heightfield texture\r\n #minpoint, maxpoint, avg = LPoint3(), LPoint3(), LPoint3()\r\n #heightfield.calc_min_max(minpoint, maxpoint)\r\n #heightfield.calc_average_point(avg, 0.5, 0.5, 0.5)\r\n #print(\"avg: {} min: {} max: {}\".format(avg.x, minpoint.x, maxpoint.x))\r\n\r\n \"\"\"\r\n Set the target triangle width. For a value of 10.0 for example,\r\n the ShaderTerrainMesh will attempt to make every triangle 10 pixels\r\n wide on screen.\r\n \"\"\"\r\n self.terrain_node.target_triangle_width = 10.0\r\n if use_diamond_subdivision:\r\n \"\"\"\r\n This has to be specified before calling .generate()\r\n The default is false.\r\n \"\"\"\r\n load_prc_file_data(\"\", \"stm-use-hexagonal-layout true\")\r\n \r\n self.terrain_node.generate()\r\n \"\"\"\r\n Attach the terrain to the main scene and set its scale. With no scale\r\n set, the terrain ranges from (0, 0, 0) to (1, 1, 1)\r\n \"\"\"\r\n self.terrain = self.render.attach_new_node(self.terrain_node)\r\n self.terrain.set_scale(self.terrain_scale)\r\n self.terrain.set_pos(self.terrain_pos)\r\n \"\"\"\r\n Set a vertex and a fragment shader on the terrain. The\r\n ShaderTerrainMesh only works with an applied shader.\r\n \"\"\"\r\n terrain_shader = Shader.load(Shader.SL_GLSL, \r\n \"samples/shader-terrain/terrain.vert.glsl\", \r\n \"samples/shader-terrain/terrain.frag.glsl\")\r\n self.terrain.set_shader(terrain_shader)\r\n self.terrain.set_shader_input(\"camera\", base.camera)\r\n # Set some texture on the terrain\r\n grass_tex = self.loader.load_texture(\r\n \"samples/shader-terrain/textures/grass.png\")\r\n grass_tex.set_minfilter(SamplerState.FT_linear_mipmap_linear)\r\n grass_tex.set_anisotropic_degree(16)\r\n self.terrain.set_texture(grass_tex)\r\n\r\n \"\"\"\r\n Set up the DynamicHeightfield (it's a type of PfmFile). We load the\r\n same heightfield image as with ShaderTerrainMesh.\r\n \"\"\"\r\n self.DHF = DynamicHeightfield()\r\n self.DHF.read(self.heightfield_fn)\r\n \"\"\"\r\n Set up empty PfmFiles to prepare stuff in that is going to\r\n dynamically modify our terrain.\r\n \"\"\"\r\n self.StagingPFM = PfmFile()\r\n self.RotorPFM = PfmFile()\r\n \r\n \"\"\"\r\n Set up the BulletHeightfieldShape (=collision terrain) and give it\r\n some sensible physical properties.\r\n \"\"\"\r\n self.HFS = BulletHeightfieldShape(self.DHF, self.terrain_scale.z,\r\n STM=True)\r\n if use_diamond_subdivision:\r\n self.HFS.set_use_diamond_subdivision(True)\r\n HFS_rigidbody = BulletRigidBodyNode(\"BulletTerrain\")\r\n HFS_rigidbody.set_static(True)\r\n friction = 2.0\r\n HFS_rigidbody.set_anisotropic_friction(\r\n LVector3(friction, friction, friction/1.3))\r\n HFS_rigidbody.set_restitution(0.3)\r\n HFS_rigidbody.add_shape(self.HFS)\r\n self.world.attach(HFS_rigidbody)\r\n \r\n HFS_NP = NodePath(HFS_rigidbody)\r\n HFS_NP.reparent_to(self.worldNP)\r\n \"\"\"\r\n This aligns the Bullet terrain with the ShaderTerrainMesh rendered\r\n terrain. It will be exact as long as the terrain vertex shader from\r\n the STM sample is used and no additional tessellation shader.\r\n For Bullet (as for other physics engines) the origin of objects is at\r\n the center.\r\n \"\"\"\r\n HFS_NP.set_pos(self.terrain_pos + self.terrain_scale/2)\r\n HFS_NP.set_sx(self.terrain_scale.x / heightfield.get_x_size())\r\n HFS_NP.set_sy(self.terrain_scale.y / heightfield.get_y_size())\r\n \r\n # Disables Bullet debug rendering for the terrain, because it is slow.\r\n #HFS_NP.node().set_debug_enabled(False)\r\n \r\n \"\"\"\r\n Finally, link the ShaderTerrainMesh and the BulletHeightfieldShape to\r\n the DynamicHeightfield. From now on changes to the DynamicHeightfield\r\n will propagate to the (visible) ShaderTerrainMesh and the (collidable)\r\n BulletHeightfieldShape.\r\n \"\"\"\r\n self.HFS.set_dynamic_heightfield(self.DHF)\r\n self.terrain_node.set_dynamic_heightfield(self.DHF)", "def init():\n global tube, ball, faceTextureName, woodTextureName\n tube = gluNewQuadric()\n gluQuadricDrawStyle(tube, GLU_FILL)\n ball = gluNewQuadric()\n gluQuadricDrawStyle(ball, GLU_FILL)\n\n # Set up lighting and depth-test\n glEnable(GL_LIGHTING)\n glEnable(GL_NORMALIZE) # Inefficient...\n glEnable(GL_DEPTH_TEST) # For z-buffering!\n\n generateCheckerBoardTexture()\n faceTextureName = loadImageTexture(\"brick.jpg\")\n woodTextureName = loadImageTexture(\"wood.jpg\")", "def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)", "def __init__(self):\n # Screen settings\n self.screen_width = 2400\n self.screen_height = 1600\n self.bg_color = (0, 0, 0)\n\n # Raindrop settings\n self.r_y_speed = 10", "def initTextureCache():\r\n\r\n\tglobal textureCache\r\n\r\n\ttextureCache = {}", "def main():\n base_dir = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.pardir,\n )\n default_output_path = os.path.join(base_dir, \"output\", \"out.png\")\n default_texture_path = os.path.join(base_dir, \"textures\", \"grid.png\")\n\n default_options = {\n \"resolution\": (1512, 762),\n \"texture_path\": default_texture_path,\n \"output_path\": default_output_path,\n \"iterations\": 200, # Increase this for good results\n \"camera_position\": [3.1, 1.570796, 0.],\n \"num_processes\": multi.cpu_count(),\n \"chunk_size\": 9000,\n \"gain\": 1,\n \"normalize\": 0,\n \"spin\": 0.7,\n }\n args = parse_args(default_options)\n\n output_path = os.path.dirname(args.output_path)\n if not os.path.exists(output_path):\n print(\"Error: Output path does not exist at:\")\n print(args.output_path)\n print(\"Create the directory or change the path then try again.\")\n print_help_and_exit()\n\n\n try:\n texture = spm.imread(args.texture_path)\n except FileNotFoundError as error:\n print(error)\n print(\"Error: Texture file not found at:\")\n print(args.texture_path)\n print_help_and_exit()\n\n # Convert to float to work in linear colour space\n texture = convert_image_to_float(texture)\n if not args.no_srgb:\n # Convert to sRGB before resizing for correct results\n srgbtorgb(texture)\n\n texture = convert_image_to_float(\n spm.imresize(texture, 2.0, interp=\"bicubic\"),\n )\n\n black_hole = KerrBlackHole(args.spin)\n raytracer = KerrRaytracer(\n black_hole,\n args.camera_position,\n texture,\n args.resolution,\n args.iterations,\n args.num_processes,\n args.chunk_size,\n shuffle=not args.disable_shuffle,\n )\n raytracer.generate_image()\n print(\"Raytracing Completed Succesfully.\")\n print(\n \"Total raytracing time:\",\n datetime.timedelta(seconds=(time.time() - raytracer.start_time)),\n )\n\n colour = post_process(raytracer.colour_buffer_preproc, args.gain, args.normalize)\n\n save_to_img(\n colour,\n args.output_path,\n args.resolution,\n srgb_out=not args.no_srgb,\n )", "def __init__(self, initial_x:int, initial_y:int, width:int, height:int, power_type:str, time_to_live:int, debug:bool = False):\n\n #Call the superclass contructor\n super().__init__(initial_x, initial_y, width, height, PowerUp.sprites[power_type], debug)\n\n #Store variables\n self.power_type = power_type\n self.ttl = time_to_live\n\n #Scale the image\n self.scale(30,30)", "def init():\n \n # General parameters\n exp_path = '/home/laura/Documents/stacks tif/1705_regMovie.tif' # experimental tif stack (grayscale)\n bin_path = '/home/laura/Documents/stacks tif/1705/1705_binarizedMovie.tif' # binarized tif stack\n vect_path = '/home/laura/Documents/STAGE3/1705_NET/' # gpickle directory\n dest_path = '/home/laura/Documents/STAGE3/1705_NET/superposition' # output directory\n verbose = True\n debug = True\n invert = True \n main_params = [exp_path, bin_path, vect_path, dest_path, verbose, debug, invert]\n \n # Output options\n doImg = -1 # image index\n doStack = False \n doVideo = False \n compress = 3 # advice: no more than 5\n output_params = [doImg, doStack, doVideo, compress]\n \n # Drawing options (colors as BGR)\n line = True # edges drawing\n line_color = (0, 255, 0) # green \n line_size = 1 \n apex_color = (0, 0, 255) # red\n apex_size = 5\n node_color = (255, 0, 0) # blue\n node_size = 5\n body_color = (0, 255, 0) # green\n body_size = 3\n drawing_params = [line, line_color, line_size, apex_color, apex_size,\n node_color, node_size, body_color, body_size]\n \n return main_params, output_params, drawing_params", "def populate_texture(self, texture):\n texture.blit_buffer(self._cbuffer, colorfmt='bgr', bufferfmt='ubyte')", "def OnInit( self ):\n if not glMultiTexCoord2f:\n print 'Multitexture not supported!'\n sys.exit(1)\n self.addEventHandler( \"keypress\", name=\"r\", function = self.OnReverse)\n self.addEventHandler( \"keypress\", name=\"s\", function = self.OnSlower)\n self.addEventHandler( \"keypress\", name=\"f\", function = self.OnFaster)\n print 'r -- reverse time\\ns -- slow time\\nf -- speed time'\n self.time = Timer( duration = 8.0, repeating = 1 )\n self.time.addEventHandler( \"fraction\", self.OnTimerFraction )\n self.time.register (self)\n self.time.start ()\n self.Load()", "def __init__(self):\n super().__init__()\n self.texture = arcade.load_texture(\":resources:/images/enemies/slimeBlue.png\")\n\n # Reset the viewport, necessary if we have a scrolling game and we need\n # to reset the viewport back to the start so we can see what we draw.\n arcade.set_viewport(0, constants.SCREEN_WIDTH - 1, 0, constants.SCREEN_HEIGHT - 1)", "def _load_textures(self):\n search_tex = RPLoader.load_texture(self.get_resource(\"search_tex.png\"))\n area_tex = RPLoader.load_texture(self.get_resource(\"area_tex.png\"))\n\n for tex in [search_tex, area_tex]:\n tex.set_minfilter(SamplerState.FT_linear)\n tex.set_magfilter(SamplerState.FT_linear)\n tex.set_wrap_u(SamplerState.WM_clamp)\n tex.set_wrap_v(SamplerState.WM_clamp)\n\n self._smaa_stage.area_tex = area_tex\n self._smaa_stage.search_tex = search_tex", "def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")", "def init():\n # Load images here\n assets[\"teapot\"] = pg.image.load(\"teapot.png\")\n\n # Load sounds here\n assets[\"plong\"] = pg.mixer.Sound(\"plong.wav\")", "def setupRender():\n prefs = getPreferences()\n\n # Check of the built-in environment maps path can be located.\n # Discontinue if it cannot be found.\n envPath = prefs.path_value\n if not envPath:\n return {'WARNING'}, \"No environment images path defined\"\n\n # Discontinue if there is no output path defined.\n renderPath = outputPath()\n if not renderPath:\n return {'WARNING'}, \"The scene needs to be saved before rendering\"\n\n if prefs.image_value == 'NONE':\n return {'WARNING'}, \"No environment image defined\"\n\n setRenderSettings(os.path.join(renderPath, IMAGE_NAME))\n createCamera()\n createWorld(envPath)\n return renderPath", "def __init__(self,\n debug=False,\n urdf_version=None,\n control_time_step=0.005,\n action_repeat=5,\n control_latency=0,\n pd_latency=0,\n on_rack=False,\n motor_kp=1.0,\n motor_kd=0.02,\n render=False,\n num_steps_to_log=2000,\n env_randomizer=None,\n log_path=None,\n signal_type='ik',\n target_position=None,\n backwards=None,\n gait_type='trot',\n terrain_type='plane',\n terrain_id='plane',\n mark='base',\n ):\n self.phase = 0\n\n self._gait_type = gait_type \n # for observation space bounding \n self.max_speed = 1.0\n self.min_speed = 0.5 # change back to 0.2 for OLD TD3 model evaluation\n \n self.min_side_speed = 0.0\n self.max_side_speed = 0.0\n\n self.speed = np.random.uniform(self.min_speed, self.max_speed)\n self.side_speed = np.random.uniform(self.min_side_speed, self.max_side_speed)\n self.speed_des = [self.speed, self.side_speed]\n\n # Initialization variables for periodic reward sum composition\n self.theta_FL = phase_constants.PHASE_VALS[self._gait_type]['front_left']\n self.theta_FR = phase_constants.PHASE_VALS[self._gait_type]['front_right']\n self.theta_RL = phase_constants.PHASE_VALS[self._gait_type]['rear_left']\n self.theta_RR = phase_constants.PHASE_VALS[self._gait_type]['rear_right']\n\n self.min_swing_ratio = 0.6\n self.max_swing_ratio = 0.8\n self.ratio = np.random.uniform(self.min_swing_ratio, self.max_swing_ratio)\n\n super(rexPeriodicRewardEnv,\n self).__init__(urdf_version=urdf_version,\n accurate_motor_model_enabled=True,\n motor_overheat_protection=False,\n motor_kp=motor_kp,\n motor_kd=motor_kd,\n remove_default_joint_damping=False,\n control_latency=control_latency,\n pd_latency=pd_latency,\n on_rack=on_rack,\n render=render,\n num_steps_to_log=num_steps_to_log,\n env_randomizer=env_randomizer,\n log_path=log_path,\n control_time_step=control_time_step,\n action_repeat=action_repeat,\n target_position=target_position,\n signal_type=signal_type,\n backwards=backwards,\n debug=debug,\n terrain_id=terrain_id,\n terrain_type=terrain_type,\n mark=mark,\n ratio=self.ratio,\n forward_reward_cap=5\n )\n\n self.height_des = 0.206 # this is init standing height for rex\n\n self.cycle_complete = 0\n self.cycle_len = 1000 # this is L\n \n # vonmises variables\n self.kappa = phase_constants.VON_MISES_KAPPA\n\n rex_joints = p.getNumJoints(bodyUniqueId=self.rex.quadruped)\n link_name_to_ID = {}\n for i in range(rex_joints):\n name = p.getJointInfo(self.rex.quadruped, i)[12].decode('UTF-8')\n link_name_to_ID[name] = i\n\n self.link_name_to_ID = link_name_to_ID\n self.toe_pos_last = { 'front_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_left_toe_link'])[0],\n 'front_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['front_right_toe_link'])[0],\n 'rear_left_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_left_toe_link'])[0],\n 'rear_right_toe_pos' : p.getLinkState(self.rex.quadruped, self.link_name_to_ID['rear_right_toe_link'])[0]\n\n } \n\n print('Using Periodic Reward Composition Rex Environment')", "def settexture(self, model, texture=None, wrapmode='clamp', scale=None):\n wraps={'repeat': Texture.WMRepeat, 'clamp': Texture.WMClamp,}\n if texture:\n tex = loader.loadTexture(texture)\n model.clearTexture()\n tex.setWrapU(wraps[wrapmode])\n tex.setWrapV(wraps[wrapmode])\n tex.setMinfilter(Texture.FTLinearMipmapNearest)\n ts = TextureStage('ts')\n model.setTexture(ts, tex, 1)\n if scale: model.setTexScale(ts, scale[0], scale[1])\n # autotransparent if png image file\n if texture.endswith('.png'):\n model.setTransparency(TransparencyAttrib.MAlpha)", "def __init__(self,\n shape=[300,300],\n ra=[ 64, 24, 9, 3, 1],\n ri=[ 96, 32, 12,4.5,1.5],\n dt=[.04,.03,.03,.02,.02], # wt=[1],\n pal=[[1,1,0],[1,0,0],[1,0,1],[0,0,1],[0,1,1],[1,1,1]]):\n\n \"Greyscale buffer that contains the actual Multiscale Turing Pattern.\"\n self.z = rand(*shape)\n \"Colour buffer with RGB values tracking the colour of local scales.\"\n self.c = ones(list(shape)+[3])\n \"Timestep per scale.\"\n self.dt = array(dt)\n# \"Weight per scale.\"\n# self.wt = array(wt)\n \"Activator radii\"\n self.ra = ra\n \"Inhibitor radii\"\n self.ri = ri\n \"Colourmap of scale to RGB.\"\n self.pal = array(pal)\n \"Transform function before filter.\"\n self._xform = lambda z: z\n \"Filter function.\"\n self._filter = boxG\n \"Colour buffer update speed.\"\n self._dc = .04\n # init these as instance variables so they don't have to be\n # allocated on each call to self.step()\n self._tmp = zeros_like(self.z)\n self._min_var = zeros_like(self.z).astype(int)\n self._variance = zeros([len(ra)] + list(shape))", "def make_t(self):\n self.img[1, 1:-1] = 1\n self.img[2:-1, self.l_i / 2] = 1\n self.img_name = 'T'", "def do_stuff(self):\n self.create_tourism_raster()", "def enable_texture_mode():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n space.viewport_shade = \"TEXTURED\"\n return", "def _export_texture_type_environment_map(self, bo, layer, slot):\n\n texture = slot.texture\n bl_env = texture.environment_map\n if bl_env.source in {\"STATIC\", \"ANIMATED\"}:\n # NOTE: It is assumed that if we arrive here, we are at lease dcm2dem on the\n # environment map export method. You're welcome!\n if bl_env.mapping == \"PLANE\" and self._mgr.getVer() >= pvMoul:\n pl_env = plDynamicCamMap\n else:\n pl_env = plDynamicEnvMap\n pl_env = self.export_dynamic_env(bo, layer, texture, pl_env)\n elif bl_env.source == \"IMAGE_FILE\":\n pl_env = self.export_cubic_env(bo, layer, texture)\n else:\n raise NotImplementedError(bl_env.source)\n layer.state.shadeFlags |= hsGMatState.kShadeEnvironMap\n if pl_env is not None:\n layer.texture = pl_env.key", "def __init__(self):\n pygame.init()\n self.rain_settings = RSettings()\n\n self.screen = pygame.display.set_mode((0, 0), pygame.FULLSCREEN)\n self.rain_settings.screen_width = self.screen.get_rect().width\n self.rain_settings.screen_height = self.screen.get_rect().height\n pygame.display.set_caption(\"Raindrops\")\n\n self.rain = pygame.sprite.Group()\n\n self._create_rain()", "def setup_ring_renderer(ring_texture_name, ring_inner_radius, ring_outer_radius, body):\n\n body.ring_inner_radius = ring_inner_radius\n body.ring_outer_radius = ring_outer_radius\n body.ring_texture = Texture(ring_texture_name)\n body.ring_disk = gluNewQuadric()\n gluQuadricNormals(body.ring_disk, GLU_SMOOTH)\n gluQuadricTexture(body.ring_disk, GL_TRUE)\n return body", "def __init__(self): # image, scale):\n super().__init__()\n self.textures = []\n self.center_x = random.randrange(SCREEN_WIDTH)\n self.center_y = random.randrange(SCREEN_HEIGHT)\n coin_img_path = \"img/silver_coins/Silver_%i.png\"\n for y in range(1, 11):\n self.textures.append(arcade.load_texture(coin_img_path % y, scale=0.1))\n self.cur_texture_index = random.randrange(len(self.textures))", "def __init__(self):\n self.index = 'r11_07_06c'\n self.parameters = {'run_index': 'r11_07_06c',\n 'h_1': 0.25,\n 'rho_0': 1.150,\n 'rho_1': 1.100,\n 'rho_2': 1.000,\n 'alpha': 0.5,\n 'D': 0.4,\n 'H': 0.25,\n 'sample': 1.0,\n 'perspective': 'old'}\n self.run_data = {'run_index': 'r11_07_06c',\n 'l0x': 2796,\n 'l0y': 1151,\n 'lsx': 2793,\n 'lsy': 716,\n 'j10x': 210,\n 'j10y': 1165,\n 'j1sx': 208,\n 'j1sy': 727,\n 'leakage': -76,\n 'odd_1': 'n',\n 'j20x': 2728,\n 'j20y': 1086,\n 'j2sx': 2730,\n 'j2sy': 670,\n 'r0x': 1097,\n 'r0y': 1095,\n 'rsx': 1093,\n 'rsy': 683,\n 'odd_2': 'n'}\n self.raw_image = 'tests/data/synced/r11_07_06c/cam1/img_0001.jpg'\n self.bc_image = 'tests/data/bc/r11_07_06c/cam1/img_0001.jpg'\n self.processed_path = 'tests/data/processed_ref/r11_07_06c/cam1/img_0001.jpg'", "def setup(self):\n self.total_time = 0.0\n self.timer_text = None\n arcade.set_background_color(arcade.color.WHITE)", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def __init__(self, scale=COIN_SCALE):#, image, scale):\n\n # Call the parent init\n super().__init__()#image, scale)\n self.textures_walk_left = []\n self.textures_walk_right = []\n self.textures_idle_left = []\n self.textures_idle_right = []\n self.textures_attack_right = []\n self.textures_attack_left = []\n self.textures_dead_right = []\n self.textures_dead_left = []\n self.textures_jump_right = []\n self.textures_jump_left = []\n self.textures = []\n self.center_x = random.randrange(SCREEN_WIDTH)\n self.center_y = random.randrange(SCREEN_HEIGHT)\n attack_img = \"img/sheriff_girl/Shoot (%i).png\"\n walk_img = \"img/sheriff_girl/Run (%i).png\"\n idle_img = \"img/sheriff_girl/Idle (%i).png\"\n dead_img = \"img/sheriff_girl/Dead (%i).png\"\n jump_img = \"img/sheriff_girl/Jump (%i).png\"\n self.action = \"left_idle\"\n\n for y in range(1, 4):\n self.textures_attack_right.append(arcade.load_texture(attack_img % y, scale=COIN_SCALE))\n self.textures_attack_left.append(arcade.load_texture(attack_img % y, mirrored=True, scale=COIN_SCALE))\n for y in range(1, 9):\n self.textures_walk_right.append(arcade.load_texture(walk_img % y, scale=COIN_SCALE))\n self.textures_walk_left.append(arcade.load_texture(walk_img % y, mirrored=True, scale=COIN_SCALE))\n for y in range(1, 11):\n self.textures_idle_left.append(arcade.load_texture(idle_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_idle_right.append(arcade.load_texture(idle_img % y, scale=COIN_SCALE))\n for y in range(1, 11):\n self.textures_dead_left.append(arcade.load_texture(dead_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_dead_right.append(arcade.load_texture(dead_img % y, scale=COIN_SCALE))\n for y in range(1, 11):\n self.textures_jump_left.append(arcade.load_texture(jump_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_jump_right.append(arcade.load_texture(jump_img % y, scale=COIN_SCALE))\n\n self.textures = self.textures_idle_left\n self.cur_texture_index = random.randrange(len(self.textures))\n\n # Create a variable to hold our speed. 'angle' is created by the parent\n self.speed = 0", "def load_textures(self):\n for texture_meta in self.gltf.textures:\n texture = MaterialTexture()\n\n if texture_meta.source is not None:\n texture.texture = self.images[texture_meta.source]\n\n if texture_meta.sampler is not None:\n texture.sampler = self.samplers[texture_meta.sampler]\n\n self.textures.append(texture)", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def make_frame(t):\r\n while world['t'] < hours_per_second*t:\r\n update(world)\r\n return world_to_npimage(world)", "def _start(self):\r\n opengles.glBindFramebuffer(GL_FRAMEBUFFER, self.framebuffer[0])\r\n opengles.glFramebufferTexture2D(GL_FRAMEBUFFER, GL_COLOR_ATTACHMENT0,\r\n GL_TEXTURE_2D, self._tex.value, 0)\r\n #thanks to PeterO c.o. RPi forum for pointing out missing depth attchmnt\r\n opengles.glBindRenderbuffer(GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glRenderbufferStorage(GL_RENDERBUFFER, GL_DEPTH_COMPONENT16,\r\n self.ix, self.iy)\r\n opengles.glFramebufferRenderbuffer(GL_FRAMEBUFFER, GL_DEPTH_ATTACHMENT,\r\n GL_RENDERBUFFER, self.depthbuffer[0])\r\n opengles.glClear(GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT)\r\n\r\n #assert opengles.glCheckFramebufferStatus(GL_FRAMEBUFFER) == GL_FRAMEBUFFER_COMPLETE\r", "def remove_environment_texture(self):\n self.UseImageBasedLightingOff()\n self.SetEnvironmentTexture(None)\n self.Modified()", "def prepare_texture(resource):\n global tex\n image = pygame.image.load(resource)\n image = pygame.transform.scale(image, (Case.pixel_size, Case.pixel_size))\n image_rect = image.get_rect()\n return image, image_rect", "def __init__(self, gender=\"male\"):#, image, scale):\n\n # Call the parent init\n super().__init__() #image, scale)\n self.walk_direction = 1\n self.walk_from_x = 0\n self.walk_to_x = 0\n self.textures_walk_left = []\n self.textures_walk_right = []\n self.textures_idle_left = []\n self.textures_idle_right = []\n self.textures_attack_right = []\n self.textures_attack_left = []\n self.textures_dead_right = []\n self.textures_dead_left = []\n self.textures = []\n self.center_x = random.randrange(SCREEN_WIDTH)\n self.center_y = random.randrange(SCREEN_HEIGHT)\n attack_img = \"img/\" + gender + \"/Attack (%i).png\"\n walk_img = \"img/\" + gender + \"/Walk (%i).png\"\n idle_img = \"img/\" + gender + \"/Idle (%i).png\"\n dead_img = \"img/\" + gender + \"/Dead (%i).png\"\n self.direction = \"left\"\n for y in range(1, 9):\n self.textures_attack_right.append(arcade.load_texture(attack_img % y, scale=COIN_SCALE))\n self.textures_attack_left.append(arcade.load_texture(attack_img % y, mirrored=True, scale=COIN_SCALE))\n for y in range(1, 11):\n self.textures_walk_right.append(arcade.load_texture(walk_img % y, scale=COIN_SCALE))\n self.textures_walk_left.append(arcade.load_texture(walk_img % y, mirrored=True, scale=COIN_SCALE))\n for y in range(1, 16):\n self.textures_idle_left.append(arcade.load_texture(idle_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_idle_right.append(arcade.load_texture(idle_img % y, scale=COIN_SCALE))\n for y in range(1, 13):\n self.textures_dead_left.append(arcade.load_texture(dead_img % y, mirrored=True, scale=COIN_SCALE))\n self.textures_dead_right.append(arcade.load_texture(dead_img % y, scale=COIN_SCALE))\n\n self.textures = self.textures_walk_left\n self.cur_texture_index = random.randrange(len(self.textures))\n\n # Create a variable to hold our speed. 'angle' is created by the parent\n self.speed = 0", "def simulate_cuda(initstate, t, timestep=genfb_py, nttname = False, bounds = [1, 1, 1, 1], saveinterval=10, drive=donothing, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x=dudt, dvdt_x=dvdt, dndt_x=dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # gives surface height array of the system after evert dt\n print(\"simulate start\")\n bounds = np.asarray(bounds, dtype=np.float32)\n h, n, u, v, f, dx, dy, dt = [initstate[k] for k in ('h', 'n', 'u', 'v', 'lat', 'dx', 'dy', 'dt')]#h, state.n, state.u, state.v, state.dx, state.dy, state.dt\n \n# h[np.logical_and(np.greater(h, -0.1), np.less(h, 0.2))] = np.float32(0.1)\n f = np.float32(((2*2*np.pi*np.sin(f*np.pi/180))/(24*3600))[:,np.newaxis])\n \n\n \n \n nu = (dx+dy)/1000\n # state = initstate\n mmax = np.max(np.abs(n))\n landthresh = 1.5*np.max(n) # threshhold for when sea ends and land begins\n itrs = int(np.ceil(t/dt))\n \n saveinterval = np.int(saveinterval//dt)\n assert (dt >= 0), 'negative dt!' # dont try if timstep is zero or negative\n \n ntt = np.zeros((np.int(np.ceil(itrs/saveinterval)),)+n.shape, dtype=np.float32)\n# ntt = np.memmap(str(nttname)+'_eed', dtype='float32', mode='w+', shape=(itrs,)+n.shape)\n maxn = np.zeros(n.shape, dtype=n.dtype) # max height in that area\n# minn = np.zeros(n.shape, dtype=n.dtype) # minimum height that was at each point\n# timemax = np.zeros(n.shape, dtype=n.dtype) # when the maximum height occured\n \n coastx = np.less(h, landthresh) # where the reflective condition is enforced on the coast\n# print('coastx', coastx)\n coastx = np.float32(coastx)\n \n \n ch = nb.cuda.to_device(h)\n cn = nb.cuda.to_device(n)\n cu = nb.cuda.to_device(u)\n cv = nb.cuda.to_device(v)\n# cout = nb.cuda.to_device(uout)\n# cnout = nb.cuda.to_device(nout)\n cf = nb.cuda.to_device(f)\n ccoastx= nb.cuda.to_device(coastx)\n cmaxn = nb.cuda.to_device(maxn)\n\n\n# cntt = nb.cuda.to_device(ntt)\n \n threadblock=(32,32)\n gridu = ( (u.shape[0]+threadblock[0]-1)//threadblock[0],\n (u.shape[1]+threadblock[1]-1)//threadblock[1])\n gridv = ( (v.shape[0]+threadblock[0]-1)//threadblock[0],\n (v.shape[1]+threadblock[1]-1)//threadblock[1])\n gridn = ( (n.shape[0]+threadblock[0]-1)//threadblock[0],\n (n.shape[1]+threadblock[1]-1)//threadblock[1])\n # other order.\n gridu = ( (u.shape[1]+threadblock[1]-1)//threadblock[1],\n (u.shape[0]+threadblock[0]-1)//threadblock[0])\n gridv = ( (v.shape[1]+threadblock[1]-1)//threadblock[1],\n (v.shape[0]+threadblock[0]-1)//threadblock[0])\n gridn = ( (n.shape[1]+threadblock[1]-1)//threadblock[1],\n (n.shape[0]+threadblock[0]-1)//threadblock[0])\n #gridv = ( (cv.shape[1]+32)//32,(cv.shape[0]+32)//32)\n #gridn = ( (cn.shape[1]+32)//32,(cn.shape[0]+32)//32)\n \n dudt_x = dudt_drive_cuda#[gridu,threadblock] # these override the inputs\n dvdt_x = dvdt_drive_cuda#[gridv,threadblock]\n dndt_x = dndt_drive_cuda#[gridn,threadblock]\n \n \n \n du0 = nb.cuda.device_array_like(u)\n dv0 = nb.cuda.device_array_like(v)\n dn0 = nb.cuda.device_array_like(n)\n \n cdu = (du0, nb.cuda.device_array_like(du0), nb.cuda.device_array_like(du0), nb.cuda.device_array_like(du0) )\n print(gridu,threadblock,dx,dy)\n print (du0.shape,h.shape,n.shape,f.shape,u.shape,v.shape)\n cdn = (dn0, nb.cuda.device_array_like(dn0), nb.cuda.device_array_like(dn0))\n \n dndt_x[gridn,threadblock](ch, cn, cu, cv, dx, dy, dn0)\n \n for d in cdn:\n d[:] = dn0[:]\n \n \n cdv = (dv0, nb.cuda.device_array_like(dv0), nb.cuda.device_array_like(dv0), nb.cuda.device_array_like(dv0))\n \n \n rr =dvdt_x[gridv,threadblock]\n \n h[:]=ch[:]\n print ('h',np.mean(h),h)\n n[:]=cn[:]\n print ('n',np.mean(n),n)\n f[:]=cf[:]\n # print ('f',np.mean(f),f)\n u[:]=cu[:]\n print ('u',np.mean(u),u)\n v[:]=cv[:]\n print ('v',np.mean(v),v)\n v0 =dv0.copy_to_host()\n print(\"dv0\",np.mean(v0),np.max(v0),np.min(v0),type(v0),type(dv0),dv0.shape,v0.shape,dv0,v0)\n print(\"dx,dy\",type(dx),type(dy),dx,dy)\n \n rr(ch, cn, cf, cu, cv, dx, dy, dv0)\n\n for d in cdv:\n d[:] = dv0[:]\n \n \n \n \n rr= dudt_x[gridu,threadblock]\n \n rr(ch, cn, cf, cu, cv, dx, dy, du0)\n\n for d in cdu:\n d[:] = du0[:]\n\n\n\n \n\n \n\n \n # cdu = tuple( [ nb.cuda.to_device(np.zeros_like(u)) for i in range(4)]) # not a device tuple!\n # cdv = tuple( [ nb.cuda.to_device(i) for i in dv]) # a host tuple of device arrays\n # cdn = tuple( [ nb.cuda.to_device(i) for i in dn])\n print (gridu,gridv,gridn)\n \n land = land_cuda#[(gridu[0],gridv[1]),threadblock] # is this grid right \n border = border_cuda#[(gridu[0],gridv[1]),threadblock]\n \n nb.cuda.synchronize() # needed?\n print('simulating...')\n try:\n for itr in range(itrs):# iterate for the given number of iterations\n if itr%saveinterval == 0:\n ntt[np.int(itr/saveinterval),:,:] = cn.copy_to_host()\n print(np.argmax( ntt[np.int(itr/saveinterval),:,:],axis=0)[5])\n \n cdu, cdv, cdn = timestep(ch, cn, cu, cv, cf, dt, dx, dy, cdu,cdv,cdn,gridu,gridv,gridn,threadblock, 0.281105, 0.013, 0.0880, 0.3, 0, dudt_x, dvdt_x, dndt_x, grav=True, cori=False, advx=False, advy=False, attn=False ) # pushes n, u, v one step into the future\n# if itr%100 == 0: \n# print(cdn)\n# print(cn)\n# land(h, n, u, v, coastx) # how to handle land/coast\n# border(n, u, v, 15, bounds) \n# drive(h, n, u, v, f, dt, dx, dy, nu, coastx, bounds, mu, itr)\n cudamax(cn,cmaxn)\n print('simulation complete')\n except Exception as e:\n print('timestep: ', itr)\n raise e\n maxn = cmaxn.copy_to_host()\n \n return ntt, maxn#.copy_to_host()#, minn, timemax # return surface height through time and maximum heights", "def __init__(self, *args, **kwargs):\r\n super(Target, self).__init__(*args, **kwargs)\r\n self.speed = kwargs.get('speed', 5)\r\n self.name = kwargs.get('name', 'andreas')\r\n self.state = kwargs.get('state', 'moving')\r\n self.z = kwargs.get('z', 2.5)\r\n pyglet.media.load('assets\\\\pappa1.wav', streaming=False).play()\r\n self.images = {\r\n 'andreas' : [\r\n pyglet.image.load('assets\\\\andreas.png'),\r\n pyglet.image.load('assets\\\\andreas2.png'),\r\n ]\r\n }\r\n\r\n for image in self.images[self.name]:\r\n image.anchor_x = int( image.width / 2 )\r\n image.anchor_y = int( image.height / 2 )\r\n\r\n self.image = self.images[self.name][0]\r\n self.width = self.image.width\r\n self.height = self.image.height\r\n self.anchor_x = self.image.anchor_x\r\n self.anchor_y = self.image.anchor_y\r\n self.sprite = pyglet.sprite.Sprite(self.image, self.x, self.y)\r\n\r\n self.min_x = self.x - self.anchor_x\r\n self.min_y = self.x - self.anchor_y\r\n self.max_x = self.y + self.anchor_x\r\n self.max_y = self.y + self.anchor_y\r\n\r\n self.x_direction = 1\r\n self.y_direction = 1\r\n self.hold = 64\r\n self.opacity = 255\r\n\r\n self.sprite.update(scale = self.z)\r\n self.mul_z = 2.5 # placeholder / mutator for z\r\n\r\n self.prob_change_x = random.random()\r\n self.prob_change_y = random.random()\r\n self.last_change_x_time = int(t.time())\r\n self.last_change_y_time = int(t.time())\r\n self.id = 0", "def generate_art_3(filename, x_size=350, y_size=350, t_size=30):\n # Functions for red, green, and blue channels - where the magic happens!\n r_lb = random.randint(1, 5)\n g_lb = random.randint(1, 10)\n b_lb = random.randint(1, 5)\n red_function = build_random_function_3(r_lb, r_lb+1)\n green_function = build_random_function_3(g_lb, g_lb+1)\n blue_function = build_random_function_3(b_lb, b_lb+1)\n\n # Create image and loop over all pixels\n im = Image.new(\"RGB\", (x_size, y_size))\n pixels = im.load()\n for time in range(t_size):\n for i in range(x_size):\n for j in range(y_size):\n t = remap_interval(time, 0, t_size, -1, 1)\n x = remap_interval(i, 0, x_size, -1, 1)\n y = remap_interval(j, 0, y_size, -1, 1)\n pixels[i, j] = (\n color_map(eval_r_func_3(red_function, x, y, t)),\n color_map(eval_r_func_3(green_function, x, y, t)),\n color_map(eval_r_func_3(blue_function, x, y, t))\n )\n str_num = '0' * (5 - len(str(time))) + str(time)\n print(str_num)\n im.save(filename + str_num + '.png')\n return 'saved'", "def setTexture(self,pathToNewTexture):\n self.spritePath=pathToNewTexture\n self.spriteImageFile=(Image.open(self.spritePath))\n self.reDraw()", "def __init__(self, parent):\n super(Demo5, self).__init__(parent)\n self.angle = 0.0\n self.replication = 1.0\n self.offset = 0.0\n self.deltaRep = 1\n self.revolution = 0\n self.stepsPer90 = 180\n self.stepsLeft = self.stepsPer90\n self.deltaAng = 90.0\n self.deltaOff = 0.15\n self.spin = True\n self.x2yAspect = 1.0\n self.texture = None", "def _init_anim(self):\n pass", "def draw_environment():\n rect(screen, LIGHT_GRAY, (0, 0, 800, 450)) # grey sky\n rect(screen, WHITE, (0, 450, 800, 1000)) # white ground", "def setup_texture(func):\n\n def new_func():\n \"\"\"Create png image and pass to func\"\"\"\n dirname, _ = os.path.split(os.path.abspath(__file__))\n pngfile = os.path.sep.join([dirname, \"out.png\"])\n img = [\"110010010011\", \"101011010100\", \"110010110101\", \"100010010011\"]\n img = [[int(val) for val in value] for value in img]\n writer = png.Writer(len(img[0]), len(img), greyscale=True, bitdepth=16)\n with open(pngfile, \"wb\") as file:\n writer.write(file, img)\n try:\n func(pngfile)\n finally:\n os.remove(pngfile)\n\n return new_func", "def get_movie_texture():\n\n global surface\n global surface_file\n\n playing = renpy.audio.music.get_playing(\"movie\")\n\n pss = renpy.audio.audio.pss\n\n if pss:\n size = pss.movie_size()\n else:\n size = (64, 64)\n\n if (surface is None) or (surface.get_size() != size) or (surface_file != playing):\n surface = renpy.display.pgrender.surface(size, False)\n surface_file = playing\n surface.fill((0, 0, 0, 255))\n\n tex = None\n\n if playing is not None:\n renpy.display.render.mutated_surface(surface)\n tex = renpy.display.draw.load_texture(surface, True)\n\n return tex", "def redraw(self):\n self.update_spin()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n self.SetCurrent()\n texture_id = self.texture.texture_id\n width = self.texture.GetWidth()\n height = self.texture.GetHeight()\n\n self.texture.load_jpeg('Sunrise.jpg')\n self.texture.enable()\n\n glTranslatef( 0.0, 0.0, -5.0 )\n glRotatef( self.angle, 0, 1.0, 0 )\n yscale = 1.75\n xscale = yscale * self.x2yAspect\n\n glScalef( xscale, yscale, 2.0 )\n\n glBegin( GL_QUADS )\n # Lower left quad corner\n glTexCoord2f( self.offset, self.offset )\n glVertex3f(-1.0, -1.0, 0.0)\n\n # Lower right quad corner\n glTexCoord2f( self.replication + self.offset, self.offset )\n glVertex3f(1.0, -1.0, 0.0)\n\n # Upper right quad corner\n glTexCoord2f( self.replication + self.offset, self.replication + self.offset )\n glVertex3f(1.0, 1.0, 0.0)\n\n # Upper left quad corner\n glTexCoord2f( self.offset, self.replication + self.offset )\n glVertex3f(-1.0, 1.0, 0.0)\n glEnd()\n\n self.texture.disable()\n glutSwapBuffers()", "def expose_test(self):\n with self.lock:\n self.dark = 1\n self.tstart = time.time()\n self.timestamp = time.strftime(\"%Y-%m-%dT%H:%M:%S\", time.localtime(self.tstart))\n imagesize = (self.expArea[3] - self.expArea[1],\n self.expArea[2] - self.expArea[0])\n self.data = np.ones(shape=imagesize, dtype=np.uint16)\n self.tend = time.time()", "def main():\n timer_total_start = time.time()\n rospy.init_node(\"ReplayBufferFiller\")\n rospy.loginfo(\"----- Replay Buffer Filler -----\")\n\n ground_list = [\"water1\",\n \"water2\",\n \"water3\",\n \"water4\",\n \"water5\",\n \"water6\",\n \"water7\",\n \"water8\",\n \"water9\",\n \"water10\"]\n\n replay_memory_size = 400000\n replay_buffer_path = \"./replay_buffer.pickle\"\n # replay_buffer_path_positive = \"./replay_buffer_positive.pickle\"\n # replay_buffer_path_negative = \"./replay_buffer_negative.pickle\"\n replay_buffer = ExperienceReplayBuffer(capacity=replay_memory_size)\n # replay_buffer_positive = ExperienceReplayBuffer(capacity=replay_memory_size)\n # replay_buffer_negative = ExperienceReplayBuffer(capacity=replay_memory_size)\n # Load the Replay buffer from file or accumulate experiences\n if(os.path.isfile(replay_buffer_path) == True): \n print(\"Replay buffer loading from file: \" +\n str(replay_buffer_path))\n replay_buffer.load(replay_buffer_path)\n else:\n\t print('No buffer_1 found')\n\n # if(os.path.isfile(replay_buffer_path_positive) == True): \n # print(\"Replay buffer loading from file: \" +\n # str(replay_buffer_path_positive))\n # replay_buffer_positive.load(replay_buffer_path_positive)\n # else:\n\t# print('No buffer_2 found') \n\n # if(os.path.isfile(replay_buffer_path_negative) == True): \n # print(\"Replay buffer loading from file: \" +\n # str(replay_buffer_path_negative))\n # replay_buffer_negative.load(replay_buffer_path_negative)\n # else:\n\t# print('No buffer_2 found') \n \n \n # Create a subscriber fot the greyscale image\n rospy.Subscriber(\"/quadrotor/ardrone/bottom/ardrone/bottom/image_raw\", ROSImage, image_callback)\n\n images_stack_size = 4\n tot_steps = 3000000 # finite-horizont simulation\n frame_preliminary = 0\n\n saving_every_tot_experiences = 2500\n is_buffer_saved = True\n\n noop_time = 2.0 # pause in seconds between actions\n steps_per_episodes = 30\n #saving_every_tot_experiences = 450 #TODO SET TO 250 JUST FOR TEST\n #r = rospy.Rate(10) # 10hz\n num_ground_plane = 15\n frame_per_ground_plane = int(replay_memory_size / num_ground_plane)\n frame_per_ground_plane = 3125 #!M positive / 4 classes / 10 grounds / 8 transformations\n actual_ground_index = 0\n episode_per_ground = 50\n #ground_counter = replay_buffer_positive.return_size() / frame_per_ground_plane\n ground_counter = 1\n positive_experience_counter = 0\n positive_experience_print_episode = 50\n old_positive_experience_counter = 0\n total_experience_counter = 0.0\n old_total_experience_counter = 0.0001\n episode = 1\n wrong_altitude = False\n quadrotor_pose = ModelState()\n quadrotor_pose.model_name = \"quadrotor\"\n quadrotor_pose.reference_frame = \"world\"\n while True:\n # if replay_buffer_positive.return_size() >= replay_memory_size:\n # break\n\n # if replay_buffer_positive.return_size() <= ground_counter * frame_per_ground_plane and episode != 1:\n # pass\n # else:\n # print ground_counter\n # generate_new_world(ground_list, ground_counter)\n # ground_counter = ground_counter + 1\n if(ground_counter < episode_per_ground) and episode != 1:\n ground_counter = ground_counter + 1\n else:\n ground = choose_random_ground(ground_list)\n generate_new_world(ground, ground_list)\n ground_counter = 1\n\n cumulated_reward = 0\n print \"\"\n print \"Preliminary Episode: \" + str(episode)\n print \"Ground counter value: \" + str(ground_counter)\n # Reset UAV at random pose\n reset_pose()\n send_action('stop')\n rospy.sleep(3.0)\n #get_image()\n image_t = _last_image\n # When the replay buffer is empty, fill it with the same picture 4\n # times\n image_t = np.stack([image_t] * images_stack_size, axis=2) # create a stack of X images\n timer_start = time.time()\n actual_time = rospy.get_rostime()\n \trospy_start_time = actual_time.secs + actual_time.nsecs / 1000000000.0\n frame_episode = 0\n \n done_reward = get_done_reward()\n update_quadrotor_pose(quadrotor_pose, done_reward)\n \n for step in range(tot_steps):\n # Execute a random action in the world and observe the reward and\n # state_t1.\n action = get_random_action()\n send_action(action)\n if action == \"descend\":\n # setpoint = round( quadrotor_pose.pose.position.z ) - 0.8\n # while True:\n # done_reward = get_done_reward()\n # update_quadrotor_pose(quadrotor_pose, done_reward)\n # if quadrotor_pose.pose.position.z < setpoint + 0.05 and quadrotor_pose.pose.position.z > setpoint - 0.05:\n # print \"Setpoint: \" + str(setpoint)\n # send_action(\"stop\")\n # rospy.sleep(2.0)\n # break\n rospy.sleep(5.0)\n send_action(\"stop\")\n rospy.sleep(1.0)\n #quadrotor_pose.pose.position.z = adjust_altitude(quadrotor_pose.pose.position.z)\n #set_pose(quadrotor_pose)\n else:\n #print \"Action taken: \" + action\n #send_action(action)\n rospy.sleep(noop_time)\n # Acquire a new frame and convert it in a numpy array\n image_t1 = _last_image\n done_reward = get_done_reward()\n send_action(\"stop\") #NOTE: moved here to fix problem with baricenter (partially reduced)\n\n # Get the reward and done status\n\n reward = done_reward.reward\n done = done_reward.done\n print \"Step(\" + str(step) + \"), Action: \" + action + \", Altitude: \" + str(done_reward.z) + \", Reward: \" + str(reward)\n wrong_altitude = done_reward.wrong_altitude\n if wrong_altitude == True:\n rospy.logerr(\"[ERROR] Wrong altitude!\")\n # Calculate the new cumulated_reward\n cumulated_reward += reward\n # state_t1, reward, done, info = env.step(action)\n image_t1 = np.expand_dims(image_t1, 2)\n # stack the images\n image_t1 = np.append(image_t[:, :, 1:], image_t1, axis=2)\n # Store the experience in the replay buffer\n if reward > 0:\n if action == \"descend\":\n # replay_buffer_positive.add_experience(image_t, action, reward, image_t1, done)\n # is_buffer_saved = False\n pass\n else:\n rospy.logerr(\"[POSITIVE]Wrong action for positive reward: %s\", action)\n elif reward == -1.0:\n if action == \"descend\":\n # replay_buffer_negative.add_experience(image_t, action, reward, image_t1, done)\n pass\n else:\n rospy.logerr(\"[NEGATIVE]Wrong action for negative reward: %s\", action)\n else:\n # pass\n replay_buffer.add_experience(image_t, action, reward, image_t1, done)\n frame_preliminary += 1 # To call every time a frame is obtained\n total_experience_counter += 1\n image_t = image_t1\n timer_episode_stop = time.time()\n frame_episode +=1\n update_quadrotor_pose(quadrotor_pose, done_reward)\n \n #rospy.sleep(2.0) #NOTE: fix the descend bug affecting the altitude\n if frame_episode >= steps_per_episodes:\n\t done = True\n # Save the buffer every 25000 experiences\n # if replay_buffer_positive.return_size() % saving_every_tot_experiences == 0 and is_buffer_saved == False:\n if replay_buffer.return_size() % saving_every_tot_experiences == 0 :\n timer_start = time.time()\n print(\"\")\n print(\"Saving the replay buffer in: \" + replay_buffer_path)\n print(\"Sit back and relax, it may take a while...\")\n replay_buffer.save(replay_buffer_path)\n timer_stop = time.time()\n print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n print \"Time episode: \" + str((timer_stop - timer_start) / 60) + \" minutes\"\n print(\"Done!\")\n # timer_start = time.time()\n # print(\"\")\n # print(\"Saving the replay buffer in: \" + replay_buffer_path_positive)\n # print(\"Sit back and relax, it may take a while...\")\n # replay_buffer_positive.save(replay_buffer_path_positive)\n # timer_stop = time.time()\n # print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n # print \"Time episode: \" + str((timer_stop - timer_start) / 60) + \" minutes\"\n # print(\"Done!\")\n # print(\"\")\n # print(\"Saving the replay buffer in: \" + replay_buffer_path_negative)\n # print(\"Sit back and relax, it may take a while...\")\n # replay_buffer_negative.save(replay_buffer_path_negative)\n # timer_stop = time.time()\n # print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n # print \"Time episode: \" + str((timer_stop - timer_start) / 60) + \" minutes\"\n # print(\"Done!\")\n # print(\"\")\n # is_buffer_saved = True\n if done:\n episode += 1\n timer_stop = time.time()\n actual_time = rospy.get_rostime()\n rospy_stop_time = actual_time.secs + actual_time.nsecs / 1000000000.0\n rospy_time_elapsed = rospy_stop_time - rospy_start_time\n print \"Replay Buffer Size: \" + str(replay_buffer.return_size()) + \" out of \" + str(replay_memory_size)\n # print \"Replay Buffer Positive Size: \" + str(replay_buffer_positive.return_size()) + \" out of \" + str(replay_memory_size)\n # print \"Replay Buffer Negative Size: \" + str(replay_buffer_negative.return_size()) + \" out of \" + str(replay_memory_size)\n print \"Frame counter: \" + str(frame_preliminary)\n print \"Time episode: \" + str(timer_stop - timer_start) + \" seconds\"\n print( \"Ros time episode: \" + str(rospy_time_elapsed) + \" seconds\")\n if cumulated_reward >= 0:\n rospy.logwarn(\"Positive reward obtained!\")\n print \"Cumulated reward: \" + str(cumulated_reward)\n print \"Episode finished after {} timesteps\".format(step + 1)\n break\n\n # timer_total_stop = time.time()\n # print \"Total time simulation: \" + str((timer_total_stop - timer_total_start) / 60.0) + \" minutes\"\n # print \"Total time simulation: \" + str((timer_total_stop - timer_total_start) / 3600.0) + \" hours\"\n # # Once the buffer is filled, save it to disk\n # timer_saving_start = time.time()\n # print \"Saving the replay buffer in: \" + replay_buffer_positive_path\n # print \"Sit back and relax, it may take a while...\"\n # replay_buffer_positive.save(replay_buffer_positive_path)\n # print \"Done!\"\n # timer_saving_stop = time.time()\n # print \"Time to save the buffer: \" + str(timer_saving_stop - timer_saving_start) + \" seconds\"\n # print \"Time to save the buffer: \" + str((timer_saving_stop - timer_saving_start) / 60) + \" minutes\"\n # timer_saving_start = time.time()\n # print \"Saving the replay buffer in: \" + replay_buffer_negative_path\n # print \"Sit back and relax, it may take a while...\"\n # replay_buffer_negative.save(replay_buffer_negative_path)\n # print \"Done!\"\n # timer_saving_stop = time.time()\n # print \"Time to save the buffer: \" + str(timer_saving_stop - timer_saving_start) + \" seconds\"\n # print \"Time to save the buffer: \" + str((timer_saving_stop - timer_saving_start) / 60) + \" minutes\"\n # Shutdown the node\n rospy.signal_shutdown(\"Rospy Shutdown!\")", "def main():\n\n with its.device.ItsSession() as cam:\n\n props = cam.get_camera_properties()\n its.caps.skip_unless(its.caps.raw16(props) and\n its.caps.manual_sensor(props) and\n its.caps.read_3a(props) and\n its.caps.per_frame_control(props) and\n not its.caps.mono_camera(props))\n debug = its.caps.debug_mode()\n\n # Expose for the scene with min sensitivity\n exp_min, exp_max = props[\"android.sensor.info.exposureTimeRange\"]\n sens_min, _ = props[\"android.sensor.info.sensitivityRange\"]\n # Digital gains might not be visible on RAW data\n sens_max = props[\"android.sensor.maxAnalogSensitivity\"]\n sens_step = (sens_max - sens_min) / NUM_ISO_STEPS\n white_level = float(props[\"android.sensor.info.whiteLevel\"])\n black_levels = [its.image.get_black_level(i,props) for i in range(4)]\n # Get the active array width and height.\n aax = props[\"android.sensor.info.activeArraySize\"][\"left\"]\n aay = props[\"android.sensor.info.activeArraySize\"][\"top\"]\n aaw = props[\"android.sensor.info.activeArraySize\"][\"right\"]-aax\n aah = props[\"android.sensor.info.activeArraySize\"][\"bottom\"]-aay\n raw_stat_fmt = {\"format\": \"rawStats\",\n \"gridWidth\": aaw/IMG_STATS_GRID,\n \"gridHeight\": aah/IMG_STATS_GRID}\n\n e_test = []\n mult = 1.0\n while exp_min*mult < exp_max:\n e_test.append(int(exp_min*mult))\n mult *= EXP_MULT\n if e_test[-1] < exp_max * INCREASING_THR:\n e_test.append(int(exp_max))\n e_test_ms = [e / 1000000.0 for e in e_test]\n\n for s in range(sens_min, sens_max, sens_step):\n means = []\n means.append(black_levels)\n reqs = [its.objects.manual_capture_request(s, e, 0) for e in e_test]\n # Capture raw in debug mode, rawStats otherwise\n caps = []\n for i in range(len(reqs) / SLICE_LEN):\n if debug:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[i*SLICE_LEN:(i+1)*SLICE_LEN], raw_stat_fmt)\n last_n = len(reqs) % SLICE_LEN\n if last_n == 1:\n if debug:\n caps += [cam.do_capture(reqs[-last_n:], cam.CAP_RAW)]\n else:\n caps += [cam.do_capture(reqs[-last_n:], raw_stat_fmt)]\n elif last_n > 0:\n if debug:\n caps += cam.do_capture(reqs[-last_n:], cam.CAP_RAW)\n else:\n caps += cam.do_capture(reqs[-last_n:], raw_stat_fmt)\n\n # Measure the mean of each channel.\n # Each shot should be brighter (except underexposed/overexposed scene)\n for i,cap in enumerate(caps):\n if debug:\n planes = its.image.convert_capture_to_planes(cap, props)\n tiles = [its.image.get_image_patch(p, 0.445, 0.445, 0.11, 0.11) for p in planes]\n mean = [m * white_level for tile in tiles\n for m in its.image.compute_image_means(tile)]\n img = its.image.convert_capture_to_rgb_image(cap, props=props)\n its.image.write_image(img, \"%s_s=%d_e=%05d.jpg\" % (NAME, s, e_test))\n else:\n mean_image, _ = its.image.unpack_rawstats_capture(cap)\n mean = mean_image[IMG_STATS_GRID/2, IMG_STATS_GRID/2]\n\n print \"ISO=%d, exposure time=%.3fms, mean=%s\" % (\n s, e_test[i] / 1000000.0, str(mean))\n means.append(mean)\n\n\n # means[0] is black level value\n r = [m[0] for m in means[1:]]\n gr = [m[1] for m in means[1:]]\n gb = [m[2] for m in means[1:]]\n b = [m[3] for m in means[1:]]\n\n pylab.plot(e_test_ms, r, \"r.-\")\n pylab.plot(e_test_ms, b, \"b.-\")\n pylab.plot(e_test_ms, gr, \"g.-\")\n pylab.plot(e_test_ms, gb, \"k.-\")\n pylab.xscale('log')\n pylab.yscale('log')\n pylab.title(\"%s ISO=%d\" % (NAME, s))\n pylab.xlabel(\"Exposure time (ms)\")\n pylab.ylabel(\"Center patch pixel mean\")\n matplotlib.pyplot.savefig(\"%s_s=%d.png\" % (NAME, s))\n pylab.clf()\n\n allow_under_saturated = True\n for i in xrange(1, len(means)):\n prev_mean = means[i-1]\n mean = means[i]\n\n if np.isclose(max(mean), white_level, rtol=SATURATION_TOL):\n print \"Saturated: white_level %f, max_mean %f\"% (white_level, max(mean))\n break;\n\n if allow_under_saturated and np.allclose(mean, black_levels, rtol=BLK_LVL_TOL):\n # All channel means are close to black level\n continue\n\n allow_under_saturated = False\n # Check pixel means are increasing (with small tolerance)\n channels = [\"Red\", \"Gr\", \"Gb\", \"Blue\"]\n for chan in range(4):\n err_msg = \"ISO=%d, %s, exptime %3fms mean: %.2f, %s mean: %.2f, TOL=%.f%%\" % (\n s, channels[chan],\n e_test_ms[i-1], mean[chan],\n \"black level\" if i == 1 else \"exptime %3fms\"%e_test_ms[i-2],\n prev_mean[chan],\n INCREASING_THR*100)\n assert mean[chan] > prev_mean[chan] * INCREASING_THR, err_msg", "def createBasicRenderSetup():\n\n pass", "def rrhr(band,skypos,tranges,skyrange,width=False,height=False,stepsz=1.,\n\t\t verbose=0,calpath='../cal/',tscale=1000.,response=True,hdu=False,\n\t\t retries=20):\n\timsz = gxt.deg2pix(skypos,skyrange)\n\t# TODO the if width / height\n\n\tflat = get_fits_data(flat_filename(band,calpath),verbose=verbose)\n\tflatinfo = get_fits_header(flat_filename(band,calpath))\n\tnpixx,npixy \t= flat.shape\n\tfltsz \t\t= flat.shape\n\tpixsz = flatinfo['CDELT2']\n\tdetsize = 1.25\n\n\t# Rotate the flat into the correct orientation to start.\n\tflat = np.flipud(np.rot90(flat))\n\n\t# NOTE: This upsample interpolation is done _last_ in the canonical\n\t#\tpipeline as part of the poissonbg.c routine.\n\t# \tThe interpolation function is \"congrid\" in the same file.\n\t# TODO: Should this be first order interpolation? (i.e. bilinear)\n\thrflat = scipy.ndimage.interpolation.zoom(flat,4.,order=0,prefilter=False)\n\timg = np.zeros(hrflat.shape)[\n\t\t\t\thrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,\n\t\t\t\thrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]\n\n\tfor trange in tranges:\n\t\tt0,t1=trange\n\t\tentries = gQuery.getArray(gQuery.aspect(t0,t1),retries=retries)\n\t\tn = len(entries)\n\n\t\tasptime = np.float64(np.array(entries)[:,2])/tscale\n\t\taspra = np.float32(np.array(entries)[:,3])\n\t\taspdec = np.float32(np.array(entries)[:,4])\n\t\tasptwist= np.float32(np.array(entries)[:,5])\n\t\taspflags= np.float32(np.array(entries)[:,6])\n\t\tasptwist= np.float32(np.array(entries)[:,9])\n\t\taspra0 = np.zeros(n)+skypos[0]\n\t\taspdec0 = np.zeros(n)+skypos[1]\n\n\t\txi_vec, eta_vec = gnomonic.gnomfwd_simple(\n\t\t\t\t\t\t\taspra,aspdec,aspra0,aspdec0,-asptwist,1.0/36000.,0.)\n\n\t\tcol = 4.*( ((( xi_vec/36000.)/(detsize/2.)*(detsize/(fltsz[0]*pixsz)) + 1.)/2. * fltsz[0]) - (fltsz[0]/2.) )\n\t\trow = 4.*( (((eta_vec/36000.)/(detsize/2.)*(detsize/(fltsz[1]*pixsz)) + 1.)/2. * fltsz[1]) - (fltsz[1]/2.) )\n\n\t\tvectors = rotvec(np.array([col,row]),-asptwist)\n\n\t\tfor i in range(n):\n\t\t\tif verbose>1:\n\t\t\t\tprint_inline('Stamping '+str(asptime[i]))\n\t\t\t\t# FIXME: Clean this mess up a little just for clarity.\n\t \timg += scipy.ndimage.interpolation.shift(scipy.ndimage.interpolation.rotate(hrflat,-asptwist[i],reshape=False,order=0,prefilter=False),[vectors[1,i],vectors[0,i]],order=0,prefilter=False)[hrflat.shape[0]/2.-imsz[0]/2.:hrflat.shape[0]/2.+imsz[0]/2.,hrflat.shape[1]/2.-imsz[1]/2.:hrflat.shape[1]/2+imsz[1]/2.]*dbt.compute_exptime(band,[asptime[i],asptime[i]+1],verbose=verbose,retries=retries)*gxt.compute_flat_scale(asptime[i]+0.5,band,verbose=0)\n\n\treturn img", "def img_map(ts):\n image_map = \"\"\n texdata = bpy.data.textures[ts.texture]\n if ts.mapping == \"FLAT\":\n image_map = \"map_type 0 \"\n elif ts.mapping == \"SPHERE\":\n image_map = \"map_type 1 \"\n elif ts.mapping == \"TUBE\":\n image_map = \"map_type 2 \"\n\n # map_type 3 and 4 in development (?) (ENV in pov 3.8)\n # for POV-Ray, currently they just seem to default back to Flat (type 0)\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 3 \"\n # elif ts.mapping==\"?\":\n # image_map = \" map_type 4 \"\n if ts.use_interpolation: # Available if image sampling class reactivated?\n image_map += \" interpolate 2 \"\n if texdata.extension == \"CLIP\":\n image_map += \" once \"\n # image_map += \"}\"\n # if ts.mapping=='CUBE':\n # image_map+= \"warp { cubic } rotate <-90,0,180>\"\n # no direct cube type mapping. Though this should work in POV 3.7\n # it doesn't give that good results(best suited to environment maps?)\n # if image_map == \"\":\n # print(\" No texture image found \")\n return image_map", "def __init__(self, start_color=Pigment(RGBColor(1.0, 1.0, 1.0)), end_color=Pigment(RGBColor(0.5, 0.7, 1.0)), axis=1):\n super().__init__(\"gradient_texture\")\n self.start_color = start_color\n self.end_color = end_color\n self.axis = axis", "def main():\n global TURRET\n os.environ['SDL_VIDEO_CENTERED'] = '1'\n pg.init()\n pg.display.set_caption(CAPTION)\n pg.display.set_mode(SCREEN_SIZE)\n TURRET = pg.image.load(\"turret.png\").convert()\n TURRET.set_colorkey(COLOR_KEY)\n Control().main_loop()\n pg.quit()\n sys.exit()", "def __init__(self):\n self.monsters_images = pg.sprite.Group()\n self.font_23 = pg.font.Font(prepare.FONTS['Timeless-Bold'], 23)\n self.font_20 = pg.font.Font(prepare.FONTS['Timeless'], 20)\n self.font_18 = pg.font.Font(prepare.FONTS['Timeless'], 18)\n self.bold_font = pg.font.Font(prepare.FONTS['Timeless-Bold'], 17)\n self.font_15 = pg.font.Font(prepare.FONTS['Timeless'], 15)\n\n self.init_left_zone()\n self.init_middle_zone()\n self.init_right_zone()", "def __init__(self, renderSurf):\n self.surf = renderSurf", "def preDraw(self, xform=None, bbox=None):\n\n self.modulateTexture.bindTexture(gl.GL_TEXTURE0)\n self.clipTexture .bindTexture(gl.GL_TEXTURE1)\n self.colourTexture .bindTexture(gl.GL_TEXTURE2)\n self.cmapTexture .bindTexture(gl.GL_TEXTURE3)", "def end_cast(self):\r\n #draw the actual map\r\n self.emap.draw(shader=self.mshader, camera=self.camera)\r\n super(ShadowCaster, self)._end()\r\n # set third texture to this ShadowCaster texture\r\n texs = self.emap.buf[0].textures\r\n if len(texs) == 2:\r\n texs.append(self)\r\n else:\r\n texs[2] = self\r\n # change background back to blue\r\n opengles.glClearColor(ctypes.c_float(0.4), ctypes.c_float(0.8), \r\n ctypes.c_float(0.8), ctypes.c_float(1.0))\r\n # work out left, top, right, bottom for shader\r\n self.emap.unif[48] = 0.5 * (1.0 + self.scaleu) # left [16][0]\r\n self.emap.unif[49] = 0.5 * (1.0 + self.scalev) # top [16][1]\r\n self.emap.unif[51] = 1.0 - self.emap.unif[48] # right [17][0]\r\n self.emap.unif[52] = 1.0 - self.emap.unif[49] # bottom [17][1]\r\n \r\n du = float(self.location[0] / self.emap.width)\r\n dv = float(self.location[2] / self.emap.depth)\r\n self.emap.unif[48] -= self.scaleu * (du if self.emap.unif[50] == 1.0 else dv)\r\n self.emap.unif[49] += self.scalev * (dv if self.emap.unif[50] == 1.0 else du)\r\n self.emap.unif[51] -= self.scaleu * (du if self.emap.unif[50] == 1.0 else dv)\r\n self.emap.unif[52] += self.scalev * (dv if self.emap.unif[50] == 1.0 else du)", "def enable(self):\n\t\tglEnable(GL_TEXTURE_3D)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)", "def texture( self, mode ):\n texture = mode.cache.getData( self, 'texture' )\n if texture is None:\n texture = glGenTextures( 1 )\n holder = mode.cache.holder( self, texture, 'texture' )\n return texture", "def texturemapGridSequence():\n fn = 'GridVideos/grid1.mp4'\n cap = cv2.VideoCapture(fn)\n drawContours = True;\n\n texture = cv2.imread('Images/ITULogo.jpg')\n texture = cv2.pyrDown(texture)\n\n\n mTex,nTex,t = texture.shape\n\n #load Tracking data\n running, imgOrig = cap.read()\n mI,nI,t = imgOrig.shape\n\n cv2.imshow(\"win2\",imgOrig)\n\n pattern_size = (9, 6)\n\n idx = [0,8,45,53]\n while(running):\n #load Tracking data\n running, imgOrig = cap.read()\n if(running):\n imgOrig = cv2.pyrDown(imgOrig)\n gray = cv2.cvtColor(imgOrig,cv2.COLOR_BGR2GRAY)\n found, corners = cv2.findChessboardCorners(gray, pattern_size)\n if found:\n term = ( cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_COUNT, 30, 0.1 )\n cv2.cornerSubPix(gray, corners, (5, 5), (-1, -1), term)\n cv2.drawChessboardCorners(imgOrig, pattern_size, corners, found)\n\n for t in idx:\n cv2.circle(imgOrig,(int(corners[t,0,0]),int(corners[t,0,1])),10,(255,t,t))\n cv2.imshow(\"win2\",imgOrig)\n cv2.waitKey(1)", "def workflow(now, realtime):\n szx = 7000\n szy = 3500\n # Create the image data\n imgdata = np.zeros((szy, szx), 'u1')\n sts = now - datetime.timedelta(minutes=2)\n metadata = {'start_valid': sts.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'end_valid': now.strftime(\"%Y-%m-%dT%H:%M:%SZ\"),\n 'product': 'a2m',\n 'units': '0.02 mm'}\n\n gribfn = mrms.fetch('PrecipRate', now)\n if gribfn is None:\n print((\"mrms_rainrate_comp.py NODATA for PrecipRate: %s\"\n ) % (now.strftime(\"%Y-%m-%dT%H:%MZ\"),))\n return\n\n # http://www.nssl.noaa.gov/projects/mrms/operational/tables.php\n # Says units are mm/hr\n fp = gzip.GzipFile(gribfn, 'rb')\n (_, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n os.unlink(gribfn)\n\n val = grb['values']\n # Convert into units of 0.1 mm accumulation\n val = val / 60.0 * 2.0 * 50.0\n val = np.where(val < 0., 255., val)\n imgdata[:, :] = np.flipud(val.astype('int'))\n\n (tmpfp, tmpfn) = tempfile.mkstemp()\n\n # Create Image\n png = Image.fromarray(np.flipud(imgdata))\n png.putpalette(mrms.make_colorramp())\n png.save('%s.png' % (tmpfn,))\n\n mrms.write_worldfile('%s.wld' % (tmpfn,))\n # Inject WLD file\n routes = \"c\" if realtime else \"\"\n prefix = 'a2m'\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.wld GIS/mrms/%s_%s.wld wld' %s.wld\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n # Now we inject into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot a%s %s \"\n \"gis/images/4326/mrms/%s.png GIS/mrms/%s_%s.png png' %s.png\"\n \"\") % (routes, now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n if realtime:\n # Create 900913 image\n cmd = (\"gdalwarp -s_srs EPSG:4326 -t_srs EPSG:3857 -q -of GTiff \"\n \"-tr 1000.0 1000.0 %s.png %s.tif\") % (tmpfn, tmpfn)\n subprocess.call(cmd, shell=True)\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/900913/mrms/%s.tif GIS/mrms/%s_%s.tif tif' %s.tif\"\n \"\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n\n j = open(\"%s.json\" % (tmpfn,), 'w')\n j.write(json.dumps(dict(meta=metadata)))\n j.close()\n # Insert into LDM\n pqstr = (\"/home/ldm/bin/pqinsert -i -p 'plot c %s \"\n \"gis/images/4326/mrms/%s.json GIS/mrms/%s_%s.json json' \"\n \"%s.json\") % (now.strftime(\"%Y%m%d%H%M\"), prefix, prefix,\n now.strftime(\"%Y%m%d%H%M\"), tmpfn)\n subprocess.call(pqstr, shell=True)\n for suffix in ['tif', 'json', 'png', 'wld']:\n if os.path.isfile(\"%s.%s\" % (tmpfn, suffix)):\n os.unlink('%s.%s' % (tmpfn, suffix))\n\n os.close(tmpfp)\n os.unlink(tmpfn)", "def make_background(self):\n for x in range(self.env_list[0].size):\n for y in range(self.env_list[0].size):\n img = load_image(\"dirt.png\")[0]\n self.background.blit(img, (x*50, y*50))", "def on_run(self):\n self.set_illumination({'mode': 'breathe'})", "def bindTextures(self, textures, rstate):\n if GLExtension.multitexture:\n # We have multitexturing. Enable and bind the texture units we'll be using,\n # make sure the rest are disabled.\n texIndex = 0\n for unit in GLExtension.textureUnits:\n glActiveTextureARB(unit)\n for target in GLExtension.textureTargets:\n glDisable(target)\n if texIndex < len(textures):\n t = textures[texIndex]\n if t:\n t.bind(rstate)\n glEnable(t.glTarget)\n texIndex += 1\n\n # Leave the first texture unit active\n glActiveTextureARB(GLExtension.textureUnits[0])\n else:\n # No multitexturing, only enable the current texture unit\n for target in GLExtension.textureTargets:\n glDisable(target)\n if textures:\n glEnable(textures[0].glTarget)\n textures[0].bind(rstate)", "def __init__(self, env: gym.Env, eval_episodes: int, render_freq: int, \n fps: int, verbose=0):\n super().__init__(verbose=verbose)\n self.env = env\n self.eval_episodes = eval_episodes\n self.render_freq = render_freq\n self.fps = fps", "def render(self, mode='human'):\n\n if self.RENDER_ENV_ONLY:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=np.array([120, 120, 120])/255.0)\n bezel = 10\n \n self._env_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n self._agent_render(self.get_full_state,\n [bezel, bezel], [SCREEN_W-2*bezel, SCREEN_H-2*bezel])\n return self.viewer.render(return_rgb_array = mode=='rgb_array')\n\n if (self.RENDER_INDIV_MEMORY == True and self.INDIV_MEMORY == \"fog\") or (self.RENDER_TEAM_MEMORY == True and self.TEAM_MEMORY == \"fog\"):\n SCREEN_W = 1200\n SCREEN_H = 600\n\n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n \n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n\n self._env_render(self._static_map,\n [7, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [7+1.49*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_red_render,\n [7+1.49*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [7, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # ind blue agent memory rendering\n for num_blue, blue_agent in enumerate(self._team_blue):\n if num_blue < 2:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+num_blue*SCREEN_H//4, 7], [SCREEN_H//4-10, SCREEN_H//4-10])\n else:\n blue_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if blue_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(blue_agent.get_obs(self),\n [900+(num_blue-2)*SCREEN_H//4, 7+SCREEN_H//4], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n # ind red agent memory rendering\n for num_red, red_agent in enumerate(self._team_red):\n if num_red < 2:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+num_red*SCREEN_H//4, 7+1.49*SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n \n else:\n red_agent.INDIV_MEMORY = self.INDIV_MEMORY\n if red_agent.INDIV_MEMORY == \"fog\" and self.RENDER_INDIV_MEMORY == True:\n self._env_render(red_agent.get_obs(self),\n [900+(num_red-2)*SCREEN_H//4, 7+SCREEN_H//2], [SCREEN_H//4-10, SCREEN_H//4-10])\n\n if self.TEAM_MEMORY == \"fog\" and self.RENDER_TEAM_MEMORY == True:\n # blue team memory rendering\n blue_visited = np.copy(self._static_map)\n blue_visited[self.blue_memory] = UNKNOWN\n self._env_render(blue_visited,\n [7+2.98*SCREEN_H//3, 7], [SCREEN_H//2-10, SCREEN_H//2-10])\n\n # red team memory rendering \n red_visited = np.copy(self._static_map)\n red_visited[self.red_memory] = UNKNOWN\n self._env_render(red_visited,\n [7+2.98*SCREEN_H//3, 7+1.49*SCREEN_H//3], [SCREEN_H//2-10, SCREEN_H//2-10])\n else:\n SCREEN_W = 600\n SCREEN_H = 600\n \n if self.viewer is None:\n from gym.envs.classic_control import rendering\n self.viewer = rendering.Viewer(SCREEN_W, SCREEN_H)\n self.viewer.set_bounds(0, SCREEN_W, 0, SCREEN_H)\n\n self.viewer.draw_polygon([(0, 0), (SCREEN_W, 0), (SCREEN_W, SCREEN_H), (0, SCREEN_H)], color=(0, 0, 0))\n \n self._env_render(self._static_map,\n [5, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_obs_blue_render,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5+SCREEN_W//2, 10], [SCREEN_W//2-10, SCREEN_H//2-10], self._team_blue)\n self._env_render(self.get_obs_red_render,\n [5+SCREEN_W//2, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._env_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n self._agent_render(self.get_full_state,\n [5, 10+SCREEN_H//2], [SCREEN_W//2-10, SCREEN_H//2-10])\n\n if self.SILENCE_RENDER:\n return self.viewer.get_array()\n else:\n return self.viewer.render(return_rgb_array = mode=='rgb_array')", "def _create_background(self):\n\n if not hasattr(self, \"save_initial_seed\"):\n self.save_initial_seed = random.choice((1, 4, 7, 10))\n\n initial_seed = self.save_initial_seed\n grass_serie = initial_seed + 0\n rock_serie = initial_seed + 1\n dirt_serie = initial_seed + 11\n path_serie = initial_seed + 12\n water_serie = initial_seed + 13\n\n for y in range(self.tile_height):\n for x in range(self.tile_width):\n\n if self.tiles[x][y].tile_type == Tile.T_VOID:\n pass\n else:\n weight = self.tile_weight(x, y, self.tiles,\n tile_type=self.tiles[x][y].tile_type,\n tile_subtype=self.tiles[x][y].tile_subtype)\n\n if self.tiles[x][y].tile_type == Tile.T_BLOCK:\n self._background.blit(GLOBAL.img('FLOOR')[rock_serie][weight],\n (x * TILESIZE_SCREEN[0], y * TILESIZE_SCREEN[1]))\n elif self.tiles[x][y].tile_type == Tile.T_GROUND:\n if self.tiles[x][y].tile_subtype == Tile.S_FLOOR:\n self._background.blit(GLOBAL.img('FLOOR')[dirt_serie][weight],\n (x * TILESIZE_SCREEN[0], y * TILESIZE_SCREEN[1]))\n elif self.tiles[x][y].tile_subtype == Tile.S_GRASS:\n self._background.blit(GLOBAL.img('FLOOR')[grass_serie][weight],\n (x * TILESIZE_SCREEN[0], y * TILESIZE_SCREEN[1]))\n elif self.tiles[x][y].tile_subtype == Tile.S_PATH:\n self._background.blit(GLOBAL.img('FLOOR')[path_serie][weight],\n (x * TILESIZE_SCREEN[0], y * TILESIZE_SCREEN[1]))\n elif self.tiles[x][y].tile_type == Tile.T_LIQUID:\n if self.tiles[x][y].tile_subtype == Tile.S_WATER:\n self._background.blit(GLOBAL.img('FLOOR')[water_serie][weight],\n (x * TILESIZE_SCREEN[0], y * TILESIZE_SCREEN[1]))\n else:\n print(\"Unknown type {} subtype {}\".format(self.tiles[x][y].tile_type,\n self.tiles[x][y].tile_subtype))", "def _create_rain(self):\n r_calc = self._calculate_spacing()\n # Create the full screen of raindrops.\n for raindrop_y in range(r_calc[3]):\n self._create_raindrops_y(raindrop_y)", "def setup(self):\n self.fname = None\n self.remote = self.camera.get('remote', None)\n self.picture_directory = self.camera.get('directory', Bawt.DEFAULT_DIRECTORY)\n self.resolution = self.camera.get('resolution', Bawt.DEFAULT_RESOLUTION)\n LOG.info(\"Picture directory set to: %s\" % self.picture_directory)\n LOG.info(\"Resolution set to %s\" % self.resolution)\n self.timelapse = self.camera.get('timelapse', None)\n self._is_initialized = False", "def fill_dict(self):\n image_time = (self.nl_image - 1) * (self.tcycle * self.dec)\n slc_dict = default_slc_dict()\n ts = self.time_start\n sod = _dt.timedelta(hours=ts.hour, minutes=ts.minute,\n seconds=ts.second, microseconds=ts.microsecond).total_seconds()\n st0 = sod + self.nl_acc * self.tcycle * self.dec + \\\n (self.dec / 2.0) * self.tcycle # include time to center of decimation window\n az_step = self.ang_per_tcycle * self.dec\n prf = abs(1.0 / (self.tcycle * self.dec))\n seq = self.TX_RX_SEQ\n GPRI_TX_z = self.mapping_dict['TX_' + seq[0] + \"_position\"]\n GPRI_RX_z = self.mapping_dict['RX_' + seq[1] + seq[3] + \"_position\"]\n fadc = C / (2. * self.rps)\n # Antenna elevation angle\n ant_elev = _np.deg2rad(self.antenna_elevation)\n # Compute antenna position\n rx1_coord = [0., 0., 0.]\n rx2_coord = [0., 0., 0.]\n tx_coord = [0., 0., 0.]\n #\n # Topsome receiver\n rx1_coord[0] = xoff + ant_radius * _np.cos(\n ant_elev) # local coordinates of the tower: x,y,z, boresight is along +X axis, +Z is up\n rx1_coord[1] = 0.0 # +Y is to the right when looking in the direction of +X\n rx1_coord[2] = GPRI_RX_z + ant_radius * _np.sin(\n ant_elev) # up is Z, all antennas have the same elevation angle!\n # Bottomsome receiver\n rx2_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n rx2_coord[1] = 0.0\n rx2_coord[2] = GPRI_RX_z + ant_radius * _np.sin(ant_elev)\n tx_coord[0] = xoff + ant_radius * _np.cos(ant_elev)\n tx_coord[1] = 0.0\n tx_coord[2] = GPRI_TX_z + ant_radius * _np.sin(ant_elev)\n chan_name = 'CH1 lower' if seq[3] == 'l' else 'CH2 upper'\n slc_dict['title'] = str(ts) + ' ' + chan_name\n slc_dict['date'] = self.time_start.date()\n slc_dict['start_time'] = st0\n slc_dict['center_time'] = st0 + image_time / 2\n slc_dict['end_time'] = st0 + image_time\n slc_dict['range_samples'] = self.ns_out\n slc_dict['azimuth_lines'] = self.nl_tot_dec - 2 * self.nl_acc\n slc_dict['range_pixel_spacing'] = self.rps\n slc_dict['azimuth_line_time'] = self.tcycle * self.dec\n slc_dict['near_range_slc'] = self.rmin\n slc_dict['center_range_slc'] = (self.rmin + self.rmax) / 2\n slc_dict['far_range_slc'] = self.rmax\n slc_dict['radar_frequency'] = self.RF_center_freq\n slc_dict['adc_sampling_rate'] = fadc\n slc_dict['prf'] = prf\n slc_dict['chirp_bandwidth'] = self.RF_freq_max - self.RF_freq_min\n slc_dict['receiver_gain'] = 60 - self.IMA_atten_dB\n slc_dict['GPRI_TX_mode'] = self.TX_mode\n slc_dict['GPRI_TX_antenna'] = seq[0]\n slc_dict.add_parameter('GPRI_RX_antennas', seq[1] + seq[3])\n slc_dict['GPRI_tx_coord'] = [tx_coord[0], tx_coord[1], tx_coord[2]]\n slc_dict['GPRI_rx1_coord'] = [rx1_coord[0], rx1_coord[1], rx1_coord[2]]\n slc_dict['GPRI_rx2_coord'] = [rx2_coord[0], rx2_coord[1], rx2_coord[2]]\n slc_dict['GPRI_az_start_angle'] = self.az_start\n slc_dict['GPRI_az_angle_step'] = az_step\n slc_dict['GPRI_ant_elev_angle'] = self.antenna_elevation\n slc_dict['GPRI_ref_north'] = self.geographic_coordinates[0]\n slc_dict['GPRI_ref_east'] = self.geographic_coordinates[1]\n slc_dict['GPRI_ref_alt'] = self.geographic_coordinates[2]\n slc_dict['GPRI_geoid'] = self.geographic_coordinates[3]\n return slc_dict", "def __init__(self):\n self.screen = pg.display.get_surface()\n self.screen_rect = self.screen.get_rect()\n self.joys = initialize_all_gamepads()\n self.done = False\n self.clock = pg.time.Clock()\n self.fps = 60\n self.keys = pg.key.get_pressed()\n self.cannon = Turret(self.joys[0], (250,250))\n self.objects = pg.sprite.Group()", "def setup(self):\n self.debug(\"Setup ..\")\n\n if self.pipeline.settings.useHardwarePCF:\n self.error(\n \"Global Illumination does not work in combination with PCF!\")\n import sys\n sys.exit(0)\n return\n\n self.settings = VoxelSettingsManager()\n self.settings.loadFromFile(join(self.sceneRoot, \"voxels.ini\"))\n\n self.debug(\n \"Loaded voxels, grid resolution is\", self.settings.GridResolution)\n\n self.gridScale = self.settings.GridEnd - self.settings.GridStart\n self.voxelSize = self.gridScale / float(self.settings.GridResolution)\n self.entrySize = Vec2(\n 1.0 / float(self.settings.StackSizeX), 1.0 / float(self.settings.StackSizeY))\n self.frameIndex = 0\n\n invVoxelSize = Vec3(\n 1.0 / self.voxelSize.x, 1.0 / self.voxelSize.y, 1.0 / self.voxelSize.z)\n invVoxelSize.normalize()\n self.normalizationFactor = invVoxelSize / \\\n float(self.settings.GridResolution)\n\n # Debugging of voxels, VERY slow\n self.debugVoxels = False\n\n if self.debugVoxels:\n self.createVoxelDebugBox()\n\n # Load packed voxels\n packedVoxels = Globals.loader.loadTexture(\n join(self.sceneRoot, \"voxels.png\"))\n packedVoxels.setFormat(Texture.FRgba8)\n packedVoxels.setComponentType(Texture.TUnsignedByte)\n # packedVoxels.setKeepRamImage(False)\n\n # Create 3D Texture to store unpacked voxels\n self.unpackedVoxels = Texture(\"Unpacked voxels\")\n self.unpackedVoxels.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba8)\n self.unpackedVoxels.setMinfilter(Texture.FTLinearMipmapLinear)\n self.unpackedVoxels.setMagfilter(Texture.FTLinear)\n\n self.unpackVoxels = NodePath(\"unpackVoxels\")\n self.unpackVoxels.setShader(\n BetterShader.loadCompute(\"Shader/GI/UnpackVoxels.compute\"))\n\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"packedVoxels\", packedVoxels)\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"stackSizeX\", LVecBase3i(self.settings.StackSizeX))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\n \"gridSize\", LVecBase3i(self.settings.GridResolution))\n print \"setting inputs ..\"\n self.unpackVoxels.setShaderInput(\"destination\", self.unpackedVoxels)\n print \"executing shader ..\"\n self._executeShader(\n self.unpackVoxels, self.settings.GridResolution / 8, self.settings.GridResolution / 8, self.settings.GridResolution / 8)\n\n print \"creating direct radiance texture ..\"\n # Create 3D Texture to store direct radiance\n self.directRadianceCache = Texture(\"Direct radiance cache\")\n self.directRadianceCache.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TInt, Texture.FR32i)\n\n self.directRadiance = Texture(\"Direct radiance\")\n self.directRadiance.setup3dTexture(self.settings.GridResolution, self.settings.GridResolution, self.settings.GridResolution,\n Texture.TFloat, Texture.FRgba16)\n\n print \"setting texture states ..\"\n for prepare in [self.directRadiance, self.unpackedVoxels]:\n prepare.setMagfilter(Texture.FTLinear)\n prepare.setMinfilter(Texture.FTLinearMipmapLinear)\n prepare.setWrapU(Texture.WMBorderColor)\n prepare.setWrapV(Texture.WMBorderColor)\n prepare.setWrapW(Texture.WMBorderColor)\n prepare.setBorderColor(Vec4(0,0,0,1))\n\n self.unpackedVoxels.setBorderColor(Vec4(0))\n # self.directRadiance.setBorderColor(Vec4(0))\n\n self.populateVPLNode = NodePath(\"PopulateVPLs\")\n self.clearTextureNode = NodePath(\"ClearTexture\")\n self.copyTextureNode = NodePath(\"CopyTexture\")\n self.generateMipmapsNode = NodePath(\"GenerateMipmaps\")\n self.convertGridNode = NodePath(\"ConvertGrid\")\n\n\n if False:\n surroundingBox = Globals.loader.loadModel(\n \"Models/CubeFix/Model.egg\")\n surroundingBox.setPos(self.settings.GridStart)\n surroundingBox.setScale(self.gridScale)\n\n # surroundingBox.setTwoSided(True)\n surroundingBox.flattenStrong()\n surroundingBox.reparentTo(Globals.render)\n\n self.bindTo(self.populateVPLNode, \"giData\")\n self.reloadShader()\n\n self._generateMipmaps(self.unpackedVoxels)", "def run():\n scene = lm.scene_object()\n copy_latest_low()\n copy_latest_high()", "def __init__(self, world_map, GRID_LOCK, coordinates=None):\n\n ''' Take parameters, and Sprite Constants '''\n super(EagleSprite, self).__init__(world_map, EagleSprite.IMAGE, GRID_LOCK,\n EagleSprite.HEALTH_BAR, EagleSprite.AVG_SPEED,\n EagleSprite.VISION, coordinates)\n\n self.type = \"eagle\"\n self.prey = [\"fish\"]\n self.movable_terrain = world_map.tile_types\n self.shadow = self.SHADOW_IMAGE\n self.shadow_tile = self.world_map.get_tile_by_index((self.tile.location_t[1] + 1, self.tile.location_t[0]))", "def __init__(self, scene = base.render, ambient = 0.2, hardness = 16, fov = 40, near = 10, far = 100):\n \n # Read and store the function parameters\n self.scene = scene\n self.__ambient = ambient\n self.__hardness = hardness\n \n # By default, mark every object as textured.\n self.flagTexturedObject(self.scene)\n \n # Create the buffer plus a texture to store the output in\n buffer = createOffscreenBuffer(-3)\n depthmap = Texture()\n buffer.addRenderTexture(depthmap, GraphicsOutput.RTMBindOrCopy, GraphicsOutput.RTPColor)\n \n # Set the shadow filter if it is supported\n if(base.win.getGsg().getSupportsShadowFilter()):\n depthmap.setMinfilter(Texture.FTShadow)\n depthmap.setMagfilter(Texture.FTShadow) \n \n # Make the camera\n self.light = base.makeCamera(buffer)\n self.light.node().setScene(self.scene)\n self.light.node().getLens().setFov(fov)\n self.light.node().getLens().setNearFar(near, far)\n\n # Put a shader on the Light camera.\n lci = NodePath(PandaNode(\"lightCameraInitializer\"))\n lci.setShader(loader.loadShader(\"caster.sha\"))\n self.light.node().setInitialState(lci.getState())\n\n # Put a shader on the Main camera.\n mci = NodePath(PandaNode(\"mainCameraInitializer\"))\n mci.setShader(loader.loadShader(\"softshadow.sha\"))\n base.cam.node().setInitialState(mci.getState())\n\n # Set up the blurring buffers, one that blurs horizontally, the other vertically\n #blurXBuffer = makeFilterBuffer(buffer, \"Blur X\", -2, loader.loadShader(\"blurx.sha\"))\n #blurYBuffer = makeFilterBuffer(blurXBuffer, \"Blur Y\", -1, loader.loadShader(\"blury.sha\"))\n\n # Set the shader inputs\n self.scene.setShaderInput(\"light\", self.light)\n #self.scene.setShaderInput(\"depthmap\", blurYBuffer.getTexture())\n self.scene.setShaderInput(\"depthmap\", buffer.getTexture())\n self.scene.setShaderInput(\"props\", ambient, hardness, 0, 1)", "def GetTextureDimensions(self):\n ...", "def fully_sample_sky(region = \"allsky\", limitregion = False, adaptivep0 = True, useprior = \"RHTPrior\", velrangestring = \"-10_10\", \n gausssmooth_prior = False, tol=1E-5, sampletype = \"mean_bayes\", mcmc=False, deltafuncprior=False, testpsiproj=False, \n testthetas=False, save=True, baseprioramp = 1E-8, smoothprior=False, sig=30, fixwidth=False):\n \n print(\"Fully sampling sky with options: region = {}, limitregion = {}, useprior = {}, velrangestring = {}, gausssmooth_prior = {}, deltafuncprior = {}, testpsiproj = {}, testthetas = {}\".format(region, limitregion, useprior, velrangestring, gausssmooth_prior, deltafuncprior, testpsiproj, testthetas))\n\n out_root = \"/disks/jansky/a/users/goldston/susan/Wide_maps/\"\n\n # Get ids of all pixels that contain RHT data\n rht_cursor, tablename = get_rht_cursor(region = region, velrangestring = velrangestring)\n print(\"table name is\", tablename)\n all_ids = get_all_rht_ids(rht_cursor, tablename)\n \n if limitregion is True:\n print(\"Loading all allsky data points that are in the SC_241 region\")\n # Get all ids that are in both allsky data and SC_241\n all_ids_SC = pickle.load(open(\"SC_241_healpix_ids.p\", \"rb\"))\n all_ids = list(set(all_ids).intersection(all_ids_SC))\n \n print(\"beginning creation of all posteriors\")\n \n if testthetas is False:\n # Create and sample posteriors for all pixels\n if useprior is \"RHTPrior\":\n all_pMB, all_psiMB = sample_all_rht_points(all_ids, adaptivep0 = adaptivep0, rht_cursor = rht_cursor, region = region, useprior = useprior, gausssmooth_prior = gausssmooth_prior, tol=tol, sampletype = sampletype, mcmc = mcmc, deltafuncprior=deltafuncprior, testpsiproj=testpsiproj, baseprioramp=baseprioramp)\n elif useprior is \"ThetaRHT\":\n all_pMB, all_psiMB = sample_all_rht_points_ThetaRHTPrior(all_ids, adaptivep0 = adaptivep0, region = region, useprior = useprior, local = False, tol=tol, smoothprior=smoothprior, sig=sig, fixwidth=fixwidth)\n \n # Place into healpix map\n hp_psiMB = make_hp_map(all_psiMB, all_ids, Nside = 2048, nest = True)\n hp_pMB = make_hp_map(all_pMB, all_ids, Nside = 2048, nest = True)\n \n if limitregion is False:\n psiMB_out_fn = \"psiMB_allsky_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\".fits\"\n pMB_out_fn = \"pMB_allsky_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\".fits\"\n elif limitregion is True:\n if mcmc is True:\n psiMB_out_fn = \"psiMB_DR2_SC_241_mcmc_50_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_tol_{}.fits\".format(tol)\n pMB_out_fn = \"pMB_DR2_SC_241_mcmc_50_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_tol_{}.fits\".format(tol)\n else:\n if sampletype is \"mean_bayes\":\n print(\"saving mean bayes sampled planck+rht data\")\n #psiMB_out_fn = \"psiMB_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_tol_{}.fits\".format(tol)\n #pMB_out_fn = \"pMB_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_tol_{}.fits\".format(tol)\n #psiMB_out_fn = \"psiMB_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_deltafuncprior_\"+str(deltafuncprior)+\"_fixedpsi0_reverseRHT.fits\"\n #pMB_out_fn = \"pMB_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_deltafuncprior_\"+str(deltafuncprior)+\"_fixedpsi0_reverseRHT.fits\"\n if useprior is \"RHTPrior\":\n psiMB_out_fn = \"psiMB_DR2_SC_241_prior_\"+useprior+\"_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_deltafuncprior_\"+str(deltafuncprior)+\"_baseprioramp_\"+str(baseprioramp)+\".fits\"\n pMB_out_fn = \"pMB_DR2_SC_241_prior_\"+useprior+\"_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_deltafuncprior_\"+str(deltafuncprior)+\"_baseprioramp_\"+str(baseprioramp)+\".fits\"\n elif useprior is \"ThetaRHT\":\n psiMB_out_fn = \"psiMB_DR2_SC_241_prior_\"+useprior+\"_\"+velrangestring+\"_smoothprior_\"+str(smoothprior)+\"_sig_\"+str(sig)+\"_adaptivep0_\"+str(adaptivep0)+\"_fixwidth_\"+str(fixwidth)+\".fits\"\n pMB_out_fn = \"pMB_DR2_SC_241_prior_\"+useprior+\"_\"+velrangestring+\"_smoothprior_\"+str(smoothprior)+\"_sig_\"+str(sig)+\"_adaptivep0_\"+str(adaptivep0)+\"_fixwidth_\"+str(fixwidth)+\".fits\"\n \n \n elif sampletype is \"MAP\":\n psiMB_out_fn = \"psiMB_MAP_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_baseprioramp_\"+str(baseprioramp)+\".fits\"\n pMB_out_fn = \"pMB_MAP_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_baseprioramp_\"+str(baseprioramp)+\".fits\"\n \n if testpsiproj is True:\n psiMB_out_fn = \"psiMB_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_deltafuncprior_\"+str(deltafuncprior)+\"_testpsiproj_\"+str(testpsiproj)+\"_smalloffset.fits\"\n pMB_out_fn = \"pMB_DR2_SC_241_\"+velrangestring+\"_smoothprior_\"+str(gausssmooth_prior)+\"_adaptivep0_\"+str(adaptivep0)+\"_deltafuncprior_\"+str(deltafuncprior)+\"_testpsiproj_\"+str(testpsiproj)+\"_smalloffset.fits\"\n \n if save:\n hp.fitsfunc.write_map(out_root + psiMB_out_fn, hp_psiMB, coord = \"G\", nest = True) \n hp.fitsfunc.write_map(out_root + pMB_out_fn, hp_pMB, coord = \"G\", nest = True) \n else:\n all_maxrhts, zzz = sample_all_rht_points(all_ids, adaptivep0 = adaptivep0, rht_cursor = rht_cursor, region = region, useprior = useprior, gausssmooth_prior = gausssmooth_prior, tol=tol, sampletype = sampletype, mcmc = mcmc, deltafuncprior=deltafuncprior, testpsiproj=testpsiproj, testthetas=testthetas)\n maxrhts = make_hp_map(all_maxrhts, all_ids, Nside = 2048, nest = True)\n hp.fitsfunc.write_map(out_root + \"vel_\" + velrangestring +\"_maxrht.fits\", maxrhts, coord = \"G\", nest = True)", "def initialize_rexarm(self):\n self.current_state = \"initialize_rexarm\"\n\n if not self.rexarm.initialize():\n print('Failed to initialize the rexarm')\n self.status_message = \"State: Failed to initialize the rexarm!\"\n time.sleep(5)\n self.next_state = \"idle\"", "def __init__(self, time_constant: float, sampling_time: float):\n self.alpha = sampling_time / (time_constant + sampling_time)\n self.state = None", "def simulate(initstate, t, timestep=forward, drive=donothing, bounds = [0.97, 0.97, 0.97, 0.97], saveinterval=10, beta=0.281105, eps=0.013, gamma=0.0880, mu=0.3, nu=0, dudt_x = dudt, dvdt_x = dvdt, dndt_x = dndt, grav=True, cori=True, advx=True, advy=True, attn=True): # gives surface height array of the system after evert dt\n bounds = np.asarray(bounds, dtype=np.float32)\n h, n, u, v, f, dx, dy, dt = [initstate[k] for k in ('h', 'n', 'u', 'v', 'lat', 'dx', 'dy', 'dt')]\n \n f = np.float32(((2*2*np.pi*np.sin(f*np.pi/180))/(24*3600))[:,np.newaxis])\n \n \n du0 = np.zeros_like(u)\n dv0 = np.zeros_like(v)\n dn0 = np.zeros_like(n)\n \n \n dndt_x(h, n, u, v, dx, dy, dn0)\n dn = (dn0, np.copy(dn0), np.copy(dn0))\n \n dudt_x(h, n, f, u, v, dx, dy, du0)\n du = (du0, np.copy(du0), np.copy(du0), np.copy(du0))\n \n dvdt_x(h, n, f, u, v, dx, dy, dv0)\n dv = (dv0, np.copy(dv0), np.copy(dv0), np.copy(dv0))\n \n nu = (dx+dy)/1000\n \n mmax = np.max(np.abs(n))\n landthresh = 1.5*np.max(n) # threshhold for when sea ends and land begins\n itrs = int(np.ceil(t/dt))\n saveinterval = np.int(saveinterval//dt)\n assert (dt >= 0), 'negative dt!' # dont try if timstep is zero or negative\n \n ntt = np.zeros((np.int(np.ceil(itrs/saveinterval)),)+n.shape, dtype=np.float32)\n maxn = np.zeros(n.shape, dtype=n.dtype) # max height in that area\n \n coastx = np.less(h, landthresh) # where the reflective condition is enforced on the coast\n \n print('simulating...')\n try:\n for itr in range(itrs):# iterate for the given number of iterations\n if itr%saveinterval == 0:\n ntt[np.int(itr/saveinterval),:,:] = n\n print(np.argmax( ntt[np.int(itr/saveinterval),:,:],axis=0)[5])\n \n \n maxn = np.max((n, maxn), axis=0) # record new maxes if they are greater than previous records \n \n # pushes n, u, v one step into the future\n n,u,v, du, dv, dn = timestep(h, n, u, v, f, dt, dx, dy, du, dv, dn, beta=beta, eps=eps, gamma=gamma, mu=mu, nu=nu, dudt_x=dudt_x, dvdt_x=dvdt_x, dndt_x=dndt_x, grav=grav, cori=cori, advx=advx, advy=advy, attn=attn)\n\n land(h, n, u, v, coastx) # how to handle land/coast\n border(n, u, v, 15, bounds) \n drive(h, n, u, v, f, dt, dx, dy, nu, coastx, bounds, mu, itr)\n print('simulation complete')\n except Exception as e:\n print('timestep: ', itr)\n raise e\n return ntt, maxn#, minn, timemax # return surface height through time and maximum heights", "def __init__(self):\n # Screen settings\n self.screen_width = 400\n self.screen_height = 300\n self.bg_color = (230, 230, 230)\n\n self.rocket_speed_factor= 1.5", "def gen_img_settings_quality(l):\n \n lhalf = 0.5*l\n \n ### sphere radius\n \n sphere_radius = 0.7\n #sphere_rgbcolor = [0.25,0.65,0.65]\n \n ### RESOLUTION\n \n img_widthpx = 1024\n img_heightpx = 1024\n\n ### includes and defaults\n\n povray_includes = [\"colors.inc\", \"textures.inc\", \"shapes.inc\"]\n povray_defaults = [vapory.Finish( 'ambient', 0.1,\n\t \t\t\t 'diffuse', 0.65,\n\t\t \t\t 'specular', 0.5,\n\t\t\t \t 'shininess', 0.53,\n\t\t\t\t 'opacity', 1.0)]\n\n\n ### light sources\n\n sun1 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', 'White')\n sun2 = vapory.LightSource([lhalf, lhalf, -1.01*lhalf], 'color', [0.7, 0.7, 0.7])\n\n ### background\n\n background = vapory.Background('color', [1,1,1])\n\n ### camera\n\n #povray_cam = vapory.Camera('angle', 75, 'location', [-15 , 15.0+0.5,15.0-0.25],'look_at', [0.25 , 15.0+0.5, 15.0-0.25])\n povray_cam = vapory.Camera('location', [lhalf, lhalf, -1.01*lhalf], 'look_at', [lhalf,lhalf,0], 'angle', 90)\n\n ### text\n # If desired include this in the povray_objects - array declared in the loop\n #text1 = vapory.Text( 'ttf', '\"timrom.ttf\"' ,'\"Division:\"', 0.01, 0.0, 'scale', [0.5,0.5,0.5],'rotate', [0,90,0], 'translate' , [0.0 , 15.0+2.75-1 , 15.0+1.5], vapory.Pigment('Black') ) \n\n ### render quality\n\n quality = 10\n \n return sphere_radius, img_widthpx, img_heightpx, povray_includes, povray_defaults, sun1, sun2, background, povray_cam, quality", "def rainfall_series(self):\n\n # assign local temporal variables\n datatype = 'strds'\n increment = str(self.rain_interval)+\" minutes\"\n raster = 'raster'\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n #iterations = sum(1 for row in precip)\n\n # create a raster space time dataset\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(\n elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # open txt file with precipitation data\n with open(evol.precipitation) as csvfile:\n\n # check for header\n has_header = csv.Sniffer().has_header(csvfile.read(1024))\n\n # rewind\n csvfile.seek(0)\n\n # skip header\n if has_header:\n next(csvfile)\n\n # parse time and precipitation\n precip = csv.reader(csvfile, delimiter=',', skipinitialspace=True)\n\n # initial run\n initial = next(precip)\n evol.start = initial[0]\n evol.rain_intensity = 'rain_intensity'\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=evol.rain_intensity,\n rain_observation=float(initial[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model for each rainfall record\n for row in precip:\n\n # update the elevation\n evol.elevation=evolved_elevation\n\n # update time\n evol.start=row[0]\n\n # compute rainfall intensity (mm/hr)\n # from rainfall observation (mm)\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity}\"\n \"={rain_observation}\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_intensity=rain_intensity,\n rain_observation=float(row[1]),\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"= {evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def __init__(self, width, height, platformType, theme):\n super().__init__()\n\n self.image = pygame.Surface([width, height])\n if platformType == 'edge':\n self.image.fill(constants.DARK_GREY)\n\n else:\n self.image.fill(constants.WHITE)\n self.image.set_colorkey(constants.WHITE)\n if theme == 'dirt':\n if platformType == 'top':\n self.image.blit(graphics.TILEDICT['dirt mid top'], graphics.TILEDICT['dirt mid top'].get_rect())\n elif platformType == 'round top':\n self.image.blit(graphics.TILEDICT['dirt round top'], graphics.TILEDICT['dirt round top'].get_rect())\n elif platformType == 'right':\n self.image.blit(graphics.TILEDICT['dirt plat right'], graphics.TILEDICT['dirt plat right'].get_rect())\n elif platformType == 'left':\n self.image.blit(graphics.TILEDICT['dirt plat left'], graphics.TILEDICT['dirt plat left'].get_rect())\n elif platformType == 'top left':\n self.image.blit(graphics.TILEDICT['dirt top left'], graphics.TILEDICT['dirt top left'].get_rect())\n elif platformType == 'top right':\n self.image.blit(graphics.TILEDICT['dirt top right'], graphics.TILEDICT['dirt top right'].get_rect())\n else:\n self.image.blit(graphics.TILEDICT['dirt center'], graphics.TILEDICT['dirt center'].get_rect())\n elif theme == 'snow':\n if platformType == 'top':\n self.image.blit(graphics.TILEDICT['snow mid top'], graphics.TILEDICT['snow mid top'].get_rect())\n elif platformType == 'round top':\n self.image.blit(graphics.TILEDICT['snow round top'], graphics.TILEDICT['snow round top'].get_rect())\n elif platformType == 'right':\n self.image.blit(graphics.TILEDICT['snow plat right'],graphics.TILEDICT['snow plat right'].get_rect())\n elif platformType == 'left':\n self.image.blit(graphics.TILEDICT['snow plat left'], graphics.TILEDICT['snow plat left'].get_rect())\n elif platformType == 'top left':\n self.image.blit(graphics.TILEDICT['snow top left'], graphics.TILEDICT['snow top left'].get_rect())\n elif platformType == 'top right':\n self.image.blit(graphics.TILEDICT['snow top right'], graphics.TILEDICT['snow top right'].get_rect())\n else:\n self.image.blit(graphics.TILEDICT['snow center'], graphics.TILEDICT['snow center'].get_rect())\n elif theme == 'castle':\n if platformType == 'top':\n self.image.blit(graphics.TILEDICT['castle mid top'], graphics.TILEDICT['castle mid top'].get_rect())\n elif platformType == 'round top':\n self.image.blit(graphics.TILEDICT['castle round top'], graphics.TILEDICT['castle round top'].get_rect())\n elif platformType == 'right':\n self.image.blit(graphics.TILEDICT['castle plat right'],graphics.TILEDICT['castle plat right'].get_rect())\n elif platformType == 'left':\n self.image.blit(graphics.TILEDICT['castle plat left'], graphics.TILEDICT['castle plat left'].get_rect())\n elif platformType == 'top left':\n self.image.blit(graphics.TILEDICT['castle top left'], graphics.TILEDICT['castle top left'].get_rect())\n elif platformType == 'top right':\n self.image.blit(graphics.TILEDICT['castle top right'], graphics.TILEDICT['castle top right'].get_rect())\n else:\n self.image.blit(graphics.TILEDICT['castle center'], graphics.TILEDICT['castle center'].get_rect())\n\n self.rect = self.image.get_rect()", "def setUp(self):\n self.t = Timew()", "def setUp(self):\n self.t = Timew()", "def setUp(self):\n self.t = Timew()", "def make_ptsrc_background(exp_time, fov, sky_center, absorb_model=\"wabs\", \r\n nH=0.05, area=40000.0, input_sources=None, \r\n output_sources=None, prng=None):\r\n prng = parse_prng(prng)\r\n\r\n exp_time = parse_value(exp_time, \"s\")\r\n fov = parse_value(fov, \"arcmin\")\r\n if nH is not None:\r\n nH = parse_value(nH, \"1.0e22*cm**-2\")\r\n area = parse_value(area, \"cm**2\")\r\n if input_sources is None:\r\n ra0, dec0, fluxes, ind = generate_sources(exp_time, fov, sky_center,\r\n area=area, prng=prng)\r\n num_sources = fluxes.size\r\n else:\r\n mylog.info(\"Reading in point-source properties from %s.\" % input_sources)\r\n t = ascii.read(input_sources)\r\n ra0 = t[\"RA\"].data\r\n dec0 = t[\"Dec\"].data\r\n fluxes = t[\"flux_0.5_2.0_keV\"].data\r\n ind = t[\"index\"].data\r\n num_sources = fluxes.size\r\n\r\n mylog.debug(\"Generating spectra from %d sources.\" % num_sources)\r\n\r\n # If requested, output the source properties to a file\r\n if output_sources is not None:\r\n t = Table([ra0, dec0, fluxes, ind],\r\n names=('RA', 'Dec', 'flux_0.5_2.0_keV', 'index'))\r\n t[\"RA\"].unit = \"deg\"\r\n t[\"Dec\"].unit = \"deg\"\r\n t[\"flux_0.5_2.0_keV\"].unit = \"erg/(cm**2*s)\"\r\n t[\"index\"].unit = \"\"\r\n t.write(output_sources, format='ascii.ecsv', overwrite=True)\r\n\r\n # Pre-calculate for optimization\r\n eratio = spec_emax/spec_emin\r\n oma = 1.0-ind\r\n invoma = 1.0/oma\r\n invoma[oma == 0.0] = 1.0\r\n fac1 = spec_emin**oma\r\n fac2 = spec_emax**oma-fac1\r\n\r\n fluxscale = get_flux_scale(ind, fb_emin, fb_emax, spec_emin, spec_emax)\r\n\r\n # Using the energy flux, determine the photon flux by simple scaling\r\n ref_ph_flux = fluxes*fluxscale*keV_per_erg\r\n # Now determine the number of photons we will generate\r\n n_photons = prng.poisson(ref_ph_flux*exp_time*area)\r\n\r\n all_energies = []\r\n all_ra = []\r\n all_dec = []\r\n\r\n for i, nph in enumerate(n_photons):\r\n if nph > 0:\r\n # Generate the energies in the source frame\r\n u = prng.uniform(size=nph)\r\n if ind[i] == 1.0:\r\n energies = spec_emin*(eratio**u)\r\n else:\r\n energies = fac1[i] + u*fac2[i]\r\n energies **= invoma[i]\r\n # Assign positions for this source\r\n ra = ra0[i]*np.ones(nph)\r\n dec = dec0[i]*np.ones(nph)\r\n\r\n all_energies.append(energies)\r\n all_ra.append(ra)\r\n all_dec.append(dec)\r\n\r\n mylog.debug(\"Finished generating spectra.\")\r\n\r\n all_energies = np.concatenate(all_energies)\r\n all_ra = np.concatenate(all_ra)\r\n all_dec = np.concatenate(all_dec)\r\n\r\n all_nph = all_energies.size\r\n\r\n # Remove some of the photons due to Galactic foreground absorption.\r\n # We will throw a lot of stuff away, but this is more general and still\r\n # faster. \r\n if nH is not None:\r\n if absorb_model == \"wabs\":\r\n absorb = get_wabs_absorb(all_energies, nH)\r\n elif absorb_model == \"tbabs\":\r\n absorb = get_tbabs_absorb(all_energies, nH)\r\n randvec = prng.uniform(size=all_energies.size)\r\n all_energies = all_energies[randvec < absorb]\r\n all_ra = all_ra[randvec < absorb]\r\n all_dec = all_dec[randvec < absorb]\r\n all_nph = all_energies.size\r\n mylog.debug(\"%d photons remain after foreground galactic absorption.\" % all_nph)\r\n\r\n all_flux = np.sum(all_energies)*erg_per_keV/(exp_time*area)\r\n\r\n output_events = {\"ra\": all_ra, \"dec\": all_dec, \r\n \"energy\": all_energies, \"flux\": all_flux}\r\n\r\n return output_events", "def __init__(self,world,renderer,a,b,x,y,ang,mode=0):\r\n\t\tself.world = world\r\n\t\tself.renderer = renderer\r\n\t\tself.a = a\r\n\t\tself.b = b\r\n\t\tself.x = x\r\n\t\tself.x_tgt = x\r\n\t\tself.y = y\r\n\t\tself.y_tgt = y\r\n\t\tself.ang = ang\r\n\t\tself.mode = mode\r\n\t\tself.tgt = []\r\n\t\tself.thrust_color = b2Color(1,1,1)\r\n\t\t\r\n\t\tself.__f = filter(0.9)\r\n\t\tself.__ftl = filter(0.65)\r\n\t\tself.__ftr = filter(0.65)\r\n\t\t\r\n\t\tself.AddTarget(b2Vec2(self.x,self.y))\r\n\t\tself.NextTarget()\r\n\t\t##self.__g = graph(self.renderer,45,300,10,100,100)\t\t\r\n\t\t\r\n\t\t##self.a_pid = PID(0.07, 0.00001, 0.009, 1./64., 0.5, -0.5)\r\n\t\t##self.y_pid = PID(0.5, 0.01, 0.9, 1./64., 0.3, 0)\r\n\t\t##self.x_pid = PID(0.04, 0.000001, 0.1, 1./64., 3.14/8, -3.14/8)\r\n\t\t\r\n\t\tself.__conf = config(\"config.cfg\")\r\n\t\tself.__conf.Load()\r\n\t\tif mode:\r\n\t\t\t(kp,ki,kd,dt,mi,mx)=self.__conf.Read(\"a_pid\")\r\n\t\t\tself.a_pid = PID(kp, ki, kd, dt, mx, mi, graph(self.renderer,300,10,300,100) if self.configer==0 else None)\r\n\t\t\t(kp,ki,kd,dt,mi,mx)=self.__conf.Read(\"y_pid\")\r\n\t\t\tself.y_pid = PID(kp, ki, kd, dt, mx, mi, graph(self.renderer,300,10,300,100) if self.configer==1 else None)\r\n\t\t\t(kp,ki,kd,dt,mi,mx)=self.__conf.Read(\"x_pid\")\r\n\t\t\tself.x_pid = PID(kp, ki, kd, dt, mx, mi ,graph(self.renderer,300,10,300,100) if self.configer==2 else None)\r\n\t\t\tself.thrust_color = b2Color(1,0,0.5)\r\n\t\telse:\r\n\t\t\tself.a_pid = PIDz(2.0, 0.1, 6.0, 0.5, -0.5)\r\n\t\t\tself.y_pid = PIDz(6.2, 0.2, 17.4, 0.5, 0)\r\n\t\t\tself.x_pid = PIDz(0.6, 0.02, 22, math.pi/8, -math.pi/8)\r\n\t\t\tself.thrust_color = b2Color(0,0,1)\r\n\t\t\r\n\t\trail=b2FixtureDef(\r\n\t\t\tshape=b2PolygonShape(box=(self.a,self.b)),\r\n\t\t\tdensity=0.02,\r\n\t\t\tfriction=0.1)\r\n\r\n\t\tbody=b2FixtureDef(\r\n\t\t\tshape=b2PolygonShape(box=(a/3.,a/6.,(0,a/6.),0)),\r\n\t\t\tdensity=0.10,\r\n\t\t\tfriction=0.1)\r\n\t\t#~ help(b2PolygonShape)\t\r\n\t\tself.body = self.world.CreateDynamicBody(fixtures=[rail,body], position=(self.x, self.y))", "def example_BSR():\n pts = [(1,1),(2,2),(3,3)]\n lines = [ [ (1,1), (1,2), (2,1)], [ (6,1), (1,6), (5,-1)] ]\n\n bloody_simple_2drender('2d_render.png', pts=pts, vecs=pts, lines=lines )", "def start(self):\n self.frame = 0\n self._init_level(1)\n self.reward = 0\n self.pcontinue = 1\n self.ghost_speed = self.ghost_speed_init\n return self._make_image(), self.reward, self.pcontinue", "def RenderTexture(self, vtkVolume, vtkRenderer, p_int=..., p_int=..., *args, **kwargs):\n ...", "def rdmb_povray_color(file_base,\n time_point=2000,\n width=800, height=600,\n rotx=0, roty=0, rotz=0,\n angle=14,\n mode=\"C\"):\n\n vs, ucs, As, Cs = load_rd_mb(file_base)\n \n file_png = file_base + \"_color_{:05}.png\".format(time_point)\n \n tempfile = file_png[:-4] + \"__temp__\" + \".pov\"\n\n camera = Camera('location', [0, 0, -25],\n 'look_at', [0, 0, 0],\n 'angle', angle,\n 'right x*image_width/image_height')\n \n light = LightSource([-3, 2, -6],\n 'color', [1.0, 1.0, 1.0], 'parallel')\n light2 = LightSource([2, -2, -6],\n 'color', [0.2, 0.2, 0.2], 'parallel')\n background = Background('color', [1, 1, 1, 1])\n \n spheres = []\n spheres += sph(vs, ucs, As, Cs,\n 0, 0, 0,\n rotx=rotx, roty=roty, rotz=rotz,\n mode=mode)\n \n objects = [light, light2, background] + spheres\n \n scene = Scene(camera, objects=objects)\n \n scene.render(file_png,\n width=width, height=height,\n tempfile=tempfile,\n output_alpha=True, antialiasing=0.001)\n\n return file_png", "def GetOutTextureCoord(self):\n ...", "def run(self, *args, **kwargs):\n verbose = kwargs.get('verbose', False)\n \n self.checkMetadata()\n \n rhessysDir = self.metadata['rhessys_dir']\n self.paths = RHESSysPaths(self.context.projectDir, rhessysDir)\n \n templateFilename = os.path.basename(self.metadata['template'])\n templateFilepath = os.path.join(self.context.projectDir, self.metadata['template'])\n \n g2wPath = os.path.join(self.context.projectDir, self.metadata['g2w_bin'])\n \n # Make sure g2w can find rat\n g2wEnv = dict(os.environ)\n g2wEnv['PATH'] = self.paths.RHESSYS_BIN + os.pathsep + g2wEnv['PATH']\n \n # Make sure region is properly set\n demRast = self.grassMetadata['dem_rast']\n result = self.grassLib.script.run_command('g.region', rast=demRast)\n if result != 0:\n raise RunException(\"g.region failed to set region to DEM, returning {0}\".format(result))\n \n # Mask subbasin to basin\n basin_rast = self.grassMetadata['basin_rast']\n result = self.grassLib.script.run_command('r.mask', flags='o', input=basin_rast, maskcats='1',\n quiet=True)\n if result != 0:\n sys.exit(\"r.mask failed to set mask to basin, returning %s\" % (result,))\n subbasin_raster = self.grassMetadata['subbasins_rast']\n subbasin_mask = \"{0}_mask\".format(subbasin_raster)\n mapcalc_input = \"{subbasin_mask}={subbasins}\".format(subbasin_mask=subbasin_mask,\n subbasins=subbasin_raster)\n result = self.grassLib.script.write_command('r.mapcalc',\n stdin=mapcalc_input,\n stdout=PIPE,\n stderr=PIPE)\n if result != 0:\n raise RunException(\"r.mapcalc failed to generate masked subbasin map {0}, input: {1}\".format(subbasin_raster,\n mapcalc_input))\n \n # Get list of subbasins\n result = self.grassLib.script.read_command('r.stats', flags='n', input=subbasin_raster, quiet=True)\n if result is None or result == '':\n raise RunException(\"Error reading subbasin map {0}\".format(subbasin_raster))\n \n subbasins = result.split()\n subbasin_masks = []\n worldfiles = []\n for subbasin in subbasins:\n # Remove mask\n result = self.grassLib.script.run_command('r.mask', flags='r', quiet=True)\n if result != 0:\n raise RunException(\"r.mask failed to remove mask\")\n \n # Make a mask layer for the sub-basin\n mask_name = \"subbasin_{0}\".format(subbasin)\n subbasin_masks.append(mask_name)\n result = self.grassLib.script.write_command('r.mapcalc',\n stdin=\"{mask_name}={subbasins} == {subbasin_number}\".format(mask_name=mask_name,\n subbasins=subbasin_mask,\n subbasin_number=subbasin),\n stdout=PIPE,\n stderr=PIPE)\n if result != 0:\n raise RunException(\"r.mapcalc failed to generate mask for subbasin {0}\".format(subbasin))\n \n # Mask to the sub-basin\n result = self.grassLib.script.run_command('r.mask', flags='o', input=mask_name, maskcats='1',\n quiet=True)\n if result != 0:\n raise RunException(\"r.mask failed to set mask to sub-basin {0}, returning {1}\".format(mask_name,\n result))\n \n worldfileName = \"world_subbasin_{0}_init\".format(subbasin)\n worldfilePath = os.path.join(self.paths.RHESSYS_WORLD, worldfileName)\n worldfiles.append(worldfilePath)\n g2wCommand = \"{g2w} -t {template} -w {worldfile}\".format(g2w=g2wPath, \n template=templateFilepath, \n worldfile=worldfilePath)\n \n if verbose:\n self.outfp.write(\"{0}\\n\".format(g2wCommand))\n self.outfp.write(\"\\nRunning grass2world from {0}...\".format(self.paths.RHESSYS_BIN))\n self.outfp.flush()\n\n cmdArgs = g2wCommand.split()\n process = Popen(cmdArgs, cwd=self.paths.RHESSYS_BIN, env=g2wEnv, \n stdout=PIPE, stderr=PIPE)\n (process_stdout, process_stderr) = process.communicate()\n if process.returncode != 0:\n raise RunException(\"grass2world failed, returning {0}\".format(process.returncode))\n \n if verbose:\n self.outfp.write(process_stdout)\n self.outfp.write(process_stderr)\n \n # Remove mask\n result = self.grassLib.script.run_command('r.mask', flags='r', quiet=True)\n if result != 0:\n raise RunException(\"r.mask failed to remove mask\") \n \n # Write metadata\n RHESSysMetadata.writeRHESSysEntry(self.context, 'worldfiles_init', \n RHESSysMetadata.VALUE_DELIM.join([self.paths.relpath(w) for w in worldfiles]))\n RHESSysMetadata.writeRHESSysEntry(self.context, 'subbasin_masks', \n RHESSysMetadata.VALUE_DELIM.join([m for m in subbasin_masks]))\n\n if verbose:\n self.outfp.write('\\n\\nFinished creating worldfiles\\n')\n\n # Write processing history\n RHESSysMetadata.appendProcessingHistoryItem(self.context, RHESSysMetadata.getCommandLine())" ]
[ "0.61697626", "0.6057704", "0.57795113", "0.5687284", "0.56449056", "0.55825335", "0.5524036", "0.5497377", "0.5459231", "0.5422027", "0.5419896", "0.5402457", "0.5394463", "0.5369188", "0.53376245", "0.5336224", "0.5335161", "0.5335161", "0.5328675", "0.5311985", "0.5309703", "0.53080434", "0.5281215", "0.5267542", "0.5254202", "0.52378064", "0.522492", "0.5221929", "0.52175444", "0.5206659", "0.519221", "0.51850384", "0.51816756", "0.51727796", "0.5165376", "0.5148668", "0.513752", "0.5137447", "0.51319396", "0.51151866", "0.51043063", "0.5104181", "0.5080107", "0.50648046", "0.50558794", "0.5053955", "0.50440186", "0.50427973", "0.50304157", "0.50252396", "0.502437", "0.50034344", "0.49741885", "0.49631673", "0.49532413", "0.4941099", "0.4935449", "0.49289885", "0.4921715", "0.49180114", "0.4917355", "0.4916836", "0.4915857", "0.49133447", "0.48979953", "0.4890634", "0.48793435", "0.48780775", "0.4877663", "0.48768172", "0.4871213", "0.4867953", "0.48661554", "0.48600724", "0.48573583", "0.4852385", "0.48488203", "0.48443744", "0.48405752", "0.48351383", "0.48325905", "0.48279455", "0.4827022", "0.48252153", "0.48234978", "0.48135206", "0.48094204", "0.4803376", "0.4794273", "0.47936776", "0.47936776", "0.47936776", "0.47905195", "0.478944", "0.47830382", "0.47800434", "0.47780028", "0.47765437", "0.4776434", "0.47758254" ]
0.5953139
2
Draw a cube with texture coordinates
def drawCube(self): glBegin(GL_QUADS); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 0.0); glVertex3f(1.0, -1.0, -1.0); glTexCoord2f(1.0, 1.0); glVertex3f(1.0, 1.0, -1.0); glTexCoord2f(0.0, 1.0); glVertex3f(1.0, 1.0, 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(1.0, -1.0, 1.0); glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0); glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0); glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0); glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0); glEnd()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def drawCube( self ):\n glBegin(GL_QUADS);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n glTexCoord2f(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n glTexCoord2f(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n glTexCoord2f(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n glTexCoord2f(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def drawCube( self ):\n glBegin(GL_QUADS);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 0.0); glVertex3f( 1.0, -1.0, -1.0);\n mTexture(1.0, 1.0); glVertex3f( 1.0, 1.0, -1.0);\n mTexture(0.0, 1.0); glVertex3f( 1.0, 1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f( 1.0, -1.0, 1.0);\n mTexture(0.0, 0.0); glVertex3f(-1.0, -1.0, -1.0);\n mTexture(1.0, 0.0); glVertex3f(-1.0, -1.0, 1.0);\n mTexture(1.0, 1.0); glVertex3f(-1.0, 1.0, 1.0);\n mTexture(0.0, 1.0); glVertex3f(-1.0, 1.0, -1.0);\n glEnd()", "def __drawCube(self):\n self.cubePos = [[[(160, 160), (200, 160), (240, 160)],\n [(160, 200), (200, 200), (240, 200)],\n [(160, 240), (200, 240), (240, 240)]],\n [[(400, 160), (440, 160), (480, 160)],\n [(400, 200), (440, 200), (480, 200)],\n [(400, 240), (440, 240), (480, 240)]],\n [[(280, 160), (320, 160), (360, 160)],\n [(280, 200), (320, 200), (360, 200)],\n [(280, 240), (320, 240), (360, 240)]],\n [[(40, 160), (80, 160), (120, 160)],\n [(40, 200), (80, 200), (120, 200)],\n [(40, 240), (80, 240), (120, 240)]],\n [[(160, 40), (200, 40), (240, 40)],\n [(160, 80), (200, 80), (240, 80)],\n [(160, 120), (200, 120), (240, 120)]],\n [[(160, 280), (200, 280), (240, 280)],\n [(160, 320), (200, 320), (240, 320)],\n [(160, 360), (200, 360), (240, 360)]]]\n self.cubeColor = {1: 'green', 2: 'blue', 3: 'red', 4: 'orange',\\\n 5: 'white', 6: 'yellow'}\n for x in range(6):\n for y in range(3):\n for z in range(3):\n pos = self.cubePos[x][y][z]\n color = self.cubeColor[self.cube.cube[x][y][z]]\n self.cv.create_rectangle(pos[0], pos[1], pos[0]+40, pos[1]+40,\n fill=color, width='2')", "def create_cube_textured(texture_list):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n t_list = [Point2(0, 0), Point2(1, 0), Point2(1, 1), Point2(0, 1)]\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n for _i in range(len(texture_list)):\n glActiveTexture(GL_TEXTURE0 + _i)\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, texture_list[_i])\n glBegin(GL_QUADS)\n drawVertexListCreateNormal_textured([a, b, c, d], t_list)\n drawVertexListCreateNormal_textured([b, f, g, c], t_list)\n drawVertexListCreateNormal_textured([f, e, h, g], t_list)\n drawVertexListCreateNormal_textured([e, a, d, h], t_list)\n drawVertexListCreateNormal_textured([d, c, g, h], t_list)\n drawVertexListCreateNormal_textured([a, e, f, b], t_list)\n glEnd()\n for _i in range(len(texture_list)):\n glActiveTexture(GL_TEXTURE0 + _i)\n glDisable(GL_TEXTURE_2D)\n glPopMatrix()\n glEndList()\n return obj", "def draw_cube(self, vec):\n # TOP FACE\n gl.glBegin(gl.GL_QUADS)\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n # BOTTOM FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # FRONT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n # BACK FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # RIGHT FACE\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2] + \\\n self.spacer)\n gl.glVertex3f(vec[0] + self.spacer, vec[1] + self.spacer, vec[2])\n gl.glVertex3f(vec[0] + self.spacer, vec[1], vec[2])\n # LEFT FACE\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2] + self.spacer)\n gl.glVertex3f(vec[0], vec[1], vec[2])\n gl.glVertex3f(vec[0], vec[1] + self.spacer, vec[2])\n gl.glEnd()", "def draw_cube(self, window):\n size = pygame.display.get_surface().get_size()\n width = (size[0]/4)\n\n window.fill((000,000,000))\n\n self.draw_face(\"U\", window, (0 + (width*1), 0 + (width*0)), width)\n self.draw_face(\"L\", window, (0 + (width*0), 0 + (width*1)), width)\n self.draw_face(\"F\", window, (0 + (width*1) * 1, 0 + (width*1)), width)\n self.draw_face(\"R\", window, (0 + (width*2), 0 + (width*1)), width)\n self.draw_face(\"B\", window, (0 + (width*3), 0 + (width*1)), width)\n self.draw_face(\"D\", window, (0 + (width*1), 0 + (width*2)), width)\n\n pygame.display.update()", "def draw( self ):\r\n print \"Drawing cuboid!\"\r\n glTranslated( *self.pos3D ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n if self.rotnByOGL:\r\n glRotated( self.thetaDeg , *self.rotAxis )\r\n # glTranslated( 0 , 0 , 0 ) # This moves the origin of drawing , so that we can use the above coordinates at each draw location\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n glColor3ub( *self.color ) # Get the color according to the voxel type\r\n print \"DEBUG:\" , \"Set color to\" , self.color\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_QUADS , # -------------- Draw quadrilaterals\r\n self.indices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n glColor3ub( *self.colorLine )\r\n pyglet.gl.glLineWidth( 3 )\r\n pyglet.graphics.draw_indexed( \r\n 8 , # --------------------- Number of seqential triplet in vertex list\r\n GL_LINES , # -------------- Draw quadrilaterals\r\n self.linDices , # ---------- Indices where the coordinates are stored\r\n ( 'v3f' , self.vertX ) # vertex list , OpenGL offers an optimized vertex list object , but this is not it\r\n ) # 'v3i' # This is for integers I suppose!\r\n \r\n print \"DEBUG:\" , \"Indices\"\r\n print self.indices \r\n print \"DEBUG:\" , \"Vertices\"\r\n print self.vertices \r\n \"\"\" URL: http://pyglet.readthedocs.io/en/pyglet-1.2-maintenance/programming_guide/graphics.html#vertex-lists\r\n \r\n There is a significant overhead in using pyglet.graphics.draw and pyglet.graphics.draw_indexed due to pyglet \r\n interpreting and formatting the vertex data for the video device. Usually the data drawn in each frame (of an animation) \r\n is identical or very similar to the previous frame, so this overhead is unnecessarily repeated.\r\n \r\n A VertexList is a list of vertices and their attributes, stored in an efficient manner that’s suitable for direct \r\n upload to the video card. On newer video cards (supporting OpenGL 1.5 or later) the data is actually stored in video memory.\r\n \"\"\"\r\n if self.rotnByOGL:\r\n glRotated( -self.thetaDeg , *self.rotAxis )\r\n glTranslated( *np.multiply( self.pos3D , -1 ) ) # Reset the transform coordinates\r\n print \"DEBUG:\" , \"Translated to\" , 0 , 0 , 0\r\n print \"Done drawing!\"", "def draw_cuboid(self, x_pos, z_pos, half_width, half_depth, height):\n GL.glBegin(GL.GL_QUADS)\n GL.glNormal3f(0, -1, 0)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 1, 0)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(-1, 0, 0)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glNormal3f(1, 0, 0)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glNormal3f(0, 0, -1)\n GL.glVertex3f(x_pos - half_width, -6, z_pos - half_depth)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos - half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos - half_depth)\n GL.glNormal3f(0, 0, 1)\n GL.glVertex3f(x_pos - half_width, -6 + height, z_pos + half_depth)\n GL.glVertex3f(x_pos - half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6, z_pos + half_depth)\n GL.glVertex3f(x_pos + half_width, -6 + height, z_pos + half_depth)\n GL.glEnd()", "def drawPlane(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n ex = width / 2\n sx = -ex\n ey = height\n sy = 0\n glBegin(GL_QUADS)\n glNormal3f(0, 0, 1)\n glTexCoord2f(0, 0)\n glVertex3f(sx, sy, 0)\n glTexCoord2f(2, 0)\n glVertex3f(ex, sy, 0)\n glTexCoord2f(2, 2)\n glVertex3f(ex, ey, 0)\n glTexCoord2f(0, 2)\n glVertex3f(sx, ey, 0)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)", "def create_cube(color=COLOR_WHITE):\n a = Point3(-1.0, -1.0, -1.0)\n b = Point3(1.0, -1.0, -1.0)\n c = Point3(1.0, -1.0, 1.0)\n d = Point3(-1.0, -1.0, 1.0)\n e = Point3(-1.0, 1.0, -1.0)\n f = Point3(1.0, 1.0, -1.0)\n g = Point3(1.0, 1.0, 1.0)\n h = Point3(-1.0, 1.0, 1.0)\n\n obj = glGenLists(1)\n glNewList(obj, GL_COMPILE)\n glPushMatrix()\n glBegin(GL_QUADS)\n glColor4fv(color)\n drawVertexListCreateNormal([a, b, c, d])\n drawVertexListCreateNormal([b, f, g, c])\n drawVertexListCreateNormal([f, e, h, g])\n drawVertexListCreateNormal([e, a, d, h])\n drawVertexListCreateNormal([d, c, g, h])\n drawVertexListCreateNormal([a, e, f, b])\n glEnd()\n glPopMatrix()\n glEndList()\n return obj", "def draw_cube(self, points, color=(255, 0, 0)):\n\n # draw front\n self.draw_line(points[0], points[1], color)\n self.draw_line(points[1], points[2], color)\n self.draw_line(points[3], points[2], color)\n self.draw_line(points[3], points[0], color)\n\n # draw back\n self.draw_line(points[4], points[5], color)\n self.draw_line(points[6], points[5], color)\n self.draw_line(points[6], points[7], color)\n self.draw_line(points[4], points[7], color)\n\n # draw sides\n self.draw_line(points[0], points[4], color)\n self.draw_line(points[7], points[3], color)\n self.draw_line(points[5], points[1], color)\n self.draw_line(points[2], points[6], color)\n\n # draw dots\n self.draw_dot(points[0], point_color=color, point_radius=4)\n self.draw_dot(points[1], point_color=color, point_radius=4)\n\n # draw x on the top\n self.draw_line(points[0], points[5], color)\n self.draw_line(points[1], points[4], color)", "def render(self):\n GL.glColor(*self._color)\n\n GL.glLoadIdentity()\n GL.glTranslate(self._x, self._y, 0)\n\n GL.glBegin(GL.GL_QUADS)\n GL.glVertex3f(0, 0, 0)\n GL.glVertex3f(self._width, 0, 0)\n GL.glVertex3f(self._width, self._height, 0)\n GL.glVertex3f(0, self._height, 0)\n GL.glEnd()", "def draw(self):\n\n glEnable(self.texture.target)\n glBindTexture(self.texture.target, self.texture.id)\n if self.mipmaps:\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR_MIPMAP_LINEAR)", "def testRendersSimpleCube(self):\n\n model_transforms = camera_utils.euler_matrices(\n [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]\n\n vertices_world_space = torch.matmul(\n torch.stack([self.cube_vertices, self.cube_vertices]),\n model_transforms.transpose())\n\n normals_world_space = torch.matmul(\n torch.stack([self.cube_normals, self.cube_normals]),\n model_transforms.transpose())\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.0, 6.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]], dtype=torch.float32)\n world_up = torch.tensor([[0.0, 1.0, 0.0], [0.0, 1.0, 0.0]], dtype=torch.float32)\n image_width = 640\n image_height = 480\n light_positions = torch.tensor([[[0.0, 0.0, 6.0]], [[0.0, 0.0, 6.0]]])\n light_intensities = torch.ones([2, 1, 3], dtype=torch.float32)\n vertex_diffuse_colors = torch.ones_like(vertices_world_space, dtype=torch.float32)\n\n renders = mesh_renderer.mesh_renderer(\n vertices_world_space, self.cube_triangles, normals_world_space,\n vertex_diffuse_colors, eye, center, world_up, light_positions,\n light_intensities, image_width, image_height)\n\n for image_id in range(renders.shape[0]):\n target_image_name = \"Gray_Cube_%i.png\" % image_id\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, renders[image_id, :, :, :])", "def cube(im_in, azimuth=30., elevation=45., filename=None,\n do_axis=True, show_label=True,\n cube_label = {'x':'x', 'y':'y', 't':'t'},\n colormap='gray', roll=-180., vmin=0., vmax=1.,\n figsize=figsize, dpi=300, **kwargs):\n im = im_in.copy()\n\n N_X, N_Y, N_frame = im.shape\n fx, fy, ft = get_grids(N_X, N_Y, N_frame)\n import numpy as np\n from vispy import app, scene, use\n try:\n AffineTransform = scene.transforms.AffineTransform\n except:\n AffineTransform = scene.transforms.MatrixTransform\n\n use(app='pyglet', gl='pyopengl2')\n from vispy.util.transforms import perspective, translate, rotate\n canvas = scene.SceneCanvas(size=figsize, bgcolor='white', dpi=dpi)\n view = canvas.central_widget.add_view()\n\n# frame = scene.visuals.Cube(size = (N_X/2, N_frame/2, N_Y/2), color=(0., 0., 0., 0.),\n# edge_color='k',\n# parent=view.scene)\n for p in ([1, 1, 1, -1, 1, 1], [1, 1, -1, -1, 1, -1], [1, -1, 1, -1, -1, 1],[1, -1, -1, -1, -1, -1],\n [1, 1, 1, 1, -1, 1], [-1, 1, 1, -1, -1, 1], [1, 1, -1, 1, -1, -1], [-1, 1, -1, -1, -1, -1],\n [1, 1, 1, 1, 1, -1], [-1, 1, 1, -1, 1, -1], [1, -1, 1, 1, -1, -1], [-1, -1, 1, -1, -1, -1]):\n# line = scene.visuals.Line(pos=np.array([[p[0]*N_Y/2, p[1]*N_X/2, p[2]*N_frame/2], [p[3]*N_Y/2, p[4]*N_X/2, p[5]*N_frame/2]]), color='black', parent=view.scene)\n line = scene.visuals.Line(pos=np.array([[p[0]*N_X/2, p[1]*N_frame/2, p[2]*N_Y/2],\n [p[3]*N_X/2, p[4]*N_frame/2, p[5]*N_Y/2]]), color='black', parent=view.scene)\n\n opts = {'parent':view.scene, 'cmap':'grays', 'clim':(0., 1.)}\n image_xy = scene.visuals.Image(np.rot90(im[:, :, 0], 3), **opts)\n tr_xy = AffineTransform()\n tr_xy.rotate(90, (1, 0, 0))\n tr_xy.translate((-N_X/2, -N_frame/2, -N_Y/2))\n image_xy.transform = tr_xy\n\n image_xt = scene.visuals.Image(np.fliplr(im[:, -1, :]), **opts)\n tr_xt = AffineTransform()\n tr_xt.rotate(90, (0, 0, 1))\n tr_xt.translate((N_X/2, -N_frame/2, N_Y/2))\n image_xt.transform = tr_xt\n\n image_yt = scene.visuals.Image(np.rot90(im[-1, :, :], 1), **opts)\n tr_yt = AffineTransform()\n tr_yt.rotate(90, (0, 1, 0))\n tr_yt.translate((+N_X/2, -N_frame/2, N_Y/2))\n image_yt.transform = tr_yt\n\n if do_axis:\n t = {}\n for text in ['x', 'y', 't']:\n t[text] = scene.visuals.Text(cube_label[text], parent=canvas.scene, face='Helvetica', color='black')\n t[text].font_size = 8\n t['x'].pos = canvas.size[0] // 3, canvas.size[1] - canvas.size[1] // 8\n t['t'].pos = canvas.size[0] - canvas.size[0] // 5, canvas.size[1] - canvas.size[1] // 6\n t['y'].pos = canvas.size[0] // 12, canvas.size[1] // 2\n\n cam = scene.TurntableCamera(elevation=35, azimuth=30)\n cam.fov = 45\n cam.scale_factor = N_X * 1.7\n if do_axis: margin = 1.3\n else: margin = 1\n cam.set_range((-N_X/2, N_X/2), (-N_Y/2*margin, N_Y/2/margin), (-N_frame/2, N_frame/2))\n view.camera = cam\n if not(filename is None):\n im = canvas.render()\n app.quit()\n import vispy.io as io\n io.write_png(filename, im)\n else:\n app.quit()\n return im", "def enable(self):\n\t\tglEnable(GL_TEXTURE_3D)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_S, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_T, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_WRAP_R, GL_REPEAT)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MAG_FILTER, GL_LINEAR)\n\t\tglTexParameteri(GL_TEXTURE_3D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)", "def drawFloor(width, height, texture):\n glBindTexture(GL_TEXTURE_2D, texture)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_REPLACE) # try GL_DECAL/GL_REPLACE/GL_MODULATE\n glHint(GL_PERSPECTIVE_CORRECTION_HINT, GL_NICEST) # try GL_NICEST/GL_FASTEST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE) # try GL_CLAMP/GL_REPEAT/GL_CLAMP_TO_EDGE\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR) # try GL_LINEAR/GL_NEAREST\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR)\n\n sx = width / 2\n ex = -sx\n sz = height / 2\n ez = -sz\n\n # Enable/Disable each time or OpenGL ALWAYS expects texturing!\n glEnable(GL_TEXTURE_2D)\n\n glBegin(GL_QUADS)\n glTexCoord2f(0, 0)\n glVertex3f(sx, 0, sz)\n glTexCoord2f(0, 1)\n glVertex3f(sx, 0, ez)\n glTexCoord2f(1, 1)\n glVertex3f(ex, 0, ez)\n glTexCoord2f(1, 0)\n glVertex3f(ex, 0, sz)\n glEnd()\n\n glDisable(GL_TEXTURE_2D)", "def cube(self):\n\n dims = self.voxels.shape\n max_dim = max(dims)\n \n x_target = (max_dim - dims[0]) / 2\n y_target = (max_dim - dims[1]) / 2\n z_target = (max_dim - dims[2]) / 2\n\n self.voxels = np.pad(self.voxels,\n ((int(np.ceil(x_target)), int(np.floor(x_target))),\n (int(np.ceil(y_target)), int(np.floor(y_target))),\n (int(np.ceil(z_target)), int(np.floor(z_target)))),\n 'constant',\n constant_values=(0))\n\n self.point_position = self.point_position + [np.ceil(z_target),\n np.ceil(y_target),\n np.ceil(x_target)]\n\n return(self)", "def mlab_plt_cube(xmin, xmax, ymin, ymax, zmin, zmax):\n faces = cube_faces(xmin, xmax, ymin, ymax, zmin, zmax)\n for grid in faces:\n x, y, z = grid\n mlab.mesh(x, y, z, opacity=0.1, color=(0.1, 0.2, 0.3))", "def create_cube(scale=(1.0,1.0,1.0), st=False, rgba=False, dtype='float32', type='triangles'):\n\n shape = [24, 3]\n rgba_offset = 3\n\n width, height, depth = scale\n # half the dimensions\n width /= 2.0\n height /= 2.0\n depth /= 2.0\n\n vertices = np.array([\n # front\n # top right\n ( width, height, depth,),\n # top left\n (-width, height, depth,),\n # bottom left\n (-width,-height, depth,),\n # bottom right\n ( width,-height, depth,),\n\n # right\n # top right\n ( width, height,-depth),\n # top left\n ( width, height, depth),\n # bottom left\n ( width,-height, depth),\n # bottom right\n ( width,-height,-depth),\n\n # back\n # top right\n (-width, height,-depth),\n # top left\n ( width, height,-depth),\n # bottom left\n ( width,-height,-depth),\n # bottom right\n (-width,-height,-depth),\n\n # left\n # top right\n (-width, height, depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n (-width,-height, depth),\n\n # top\n # top right\n ( width, height,-depth),\n # top left\n (-width, height,-depth),\n # bottom left\n (-width, height, depth),\n # bottom right\n ( width, height, depth),\n\n # bottom\n # top right\n ( width,-height, depth),\n # top left\n (-width,-height, depth),\n # bottom left\n (-width,-height,-depth),\n # bottom right\n ( width,-height,-depth),\n ], dtype=dtype)\n\n st_values = None\n rgba_values = None\n\n if st:\n # default st values\n st_values = np.tile(\n np.array([\n (1.0, 1.0,),\n (0.0, 1.0,),\n (0.0, 0.0,),\n (1.0, 0.0,),\n ], dtype=dtype),\n (6,1,)\n )\n\n if isinstance(st, bool):\n pass\n elif isinstance(st, (int, float)):\n st_values *= st\n elif isinstance(st, (list, tuple, np.ndarray)):\n st = np.array(st, dtype=dtype)\n if st.shape == (2,2,):\n # min / max\n st_values *= st[1] - st[0]\n st_values += st[0]\n elif st.shape == (4,2,):\n # per face st values specified manually\n st_values[:] = np.tile(st, (6,1,))\n elif st.shape == (6,2,):\n # st values specified manually\n st_values[:] = st\n else:\n raise ValueError('Invalid shape for st')\n else:\n raise ValueError('Invalid value for st')\n\n shape[-1] += st_values.shape[-1]\n rgba_offset += st_values.shape[-1]\n\n if rgba:\n # default rgba values\n rgba_values = np.tile(np.array([1.0, 1.0, 1.0, 1.0], dtype=dtype), (24,1,))\n\n if isinstance(rgba, bool):\n pass\n elif isinstance(rgba, (int, float)):\n # int / float expands to RGBA with all values == value\n rgba_values *= rgba \n elif isinstance(rgba, (list, tuple, np.ndarray)):\n rgba = np.array(rgba, dtype=dtype)\n\n if rgba.shape == (3,):\n rgba_values = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,):\n rgba_values[:] = np.tile(rgba, (24,1,))\n elif rgba.shape == (4,3,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (4,4,):\n rgba_values = np.tile(rgba, (6,1,))\n elif rgba.shape == (6,3,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (6,4,):\n rgba_values = np.repeat(rgba, 4, axis=0)\n elif rgba.shape == (24,3,):\n rgba_values = rgba\n elif rgba.shape == (24,4,):\n rgba_values = rgba\n else:\n raise ValueError('Invalid shape for rgba')\n else:\n raise ValueError('Invalid value for rgba')\n\n shape[-1] += rgba_values.shape[-1]\n\n data = np.empty(shape, dtype=dtype)\n data[:,:3] = vertices\n if st_values is not None:\n data[:,3:5] = st_values\n if rgba_values is not None:\n data[:,rgba_offset:] = rgba_values\n\n if type == 'triangles':\n # counter clockwise\n # top right -> top left -> bottom left\n # top right -> bottom left -> bottom right\n indices = np.tile(np.array([0, 1, 2, 0, 2, 3], dtype='int'), (6,1))\n for face in range(6):\n indices[face] += (face * 4)\n indices.shape = (-1,)\n elif type == 'triangle_strip':\n raise NotImplementedError\n elif type == 'triangle_fan':\n raise NotImplementedError\n elif type == 'quads':\n raise NotImplementedError\n elif type == 'quad_strip':\n raise NotImplementedError\n else:\n raise ValueError('Unknown type')\n\n return data, indices", "def cube_vertices(x, y, z, n):\n #def cube_vertices(self):\n # \"\"\" Return the vertices of the cube at position x, y, z with size 2*n.\n #\n # \"\"\"\n # return [\n # x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n # x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n # x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n # x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n # x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n # x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n # ]\n return [\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\n ]", "def generate(self, x, y=0, z=0):\n lerp, grad, fade, p = self._lerp, self._grad, self._fade, self._p\n # Find unit cuve that contains point (x,y,z).\n X = int(floor(x)) & 255\n Y = int(floor(y)) & 255\n Z = int(floor(z)) & 255\n # Find relative (x,y,z) of point in cube.\n # Compute fade curves.\n x, y, z = x-floor(x), y-floor(y), z-floor(z)\n u, v, w = fade(x), fade(y), fade(z)\n # Hash coordinates of the cube corners.\n A = Y + p[X]\n B = Y + p[X+1]\n AA, AB, BA, BB = Z+p[A], Z+p[A+1], Z+p[B], Z+p[B+1]\n # Add blended results from the cube corners.\n return lerp(w, \n lerp(v, lerp(u, grad(p[AA ], x , y , z ), \n grad(p[BA ], x-1, y , z )),\n lerp(u, grad(p[AB ], x , y-1, z ), \n grad(p[BB ], x-1, y-1, z ))),\n lerp(v, lerp(u, grad(p[AA+1], x , y , z-1), \n grad(p[BA+1], x-1, y , z-1)),\n lerp(u, grad(p[AB+1], x , y-1, z-1), \n grad(p[BB+1], x-1, y-1, z-1))))", "def load(self):\n\t\tglTexImage3D(GL_TEXTURE_3D, 0, GL_LUMINANCE16_ALPHA16, \n\t\t\tself.width, self.width, self.width, 0, GL_LUMINANCE_ALPHA, \n\t\t\tGL_UNSIGNED_SHORT, ctypes.byref(self.data))", "def __init__(self, camera=None, light=None, name=\"\", z=0.1):\r\n super(Canvas, self).__init__(camera, light, name, x=0.0, y=0.0, z=0.0,\r\n rx=0.0, ry=0.0, rz=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n cx=0.0, cy=0.0, cz=0.0)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n self.depth = z\r\n\r\n ww = 20.0\r\n hh = 20.0\r\n\r\n self.verts = ((-ww, -hh, z), (0.0, hh, z), (ww, -hh, z))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 1.0), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))", "def dessinerRectangle(p0, p1, p2, p3,texture=None, textureRepeat = True, color = (0,1,0)):\n \n \n if texture == None:\n r,v,b = color\n glDisable(GL_TEXTURE_2D)\n glColor3f(r,v,b)\n glBegin(GL_QUADS)\n glVertex3f(p0[0],p0[1],p0[2])\n glVertex3f(p1[0],p1[1],p1[2])\n glVertex3f(p2[0],p2[1],p2[2])\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()\n glEnable(GL_TEXTURE_2D)\n else:\n\n if textureRepeat:\n a = fabs(p0[0] - p1[0])\n b = fabs(p0[1] - p1[1])\n c = fabs(p0[2] - p1[2])\n\n if a >= b and a >= c:\n d = a\n elif b >= a and b >= c:\n d = b\n elif c >= a and c >= b:\n d = c\n else:\n d = a\n\n a = fabs(p1[0] - p2[0])\n b = fabs(p1[1] - p2[1])\n c = fabs(p1[2] - p2[2])\n\n if a >= b and a >= c:\n e = a\n elif b >= a and b >= c:\n e = b\n elif c >= a and c >= b:\n e = c\n else:\n e = a\n\n del a\n del b\n del c\n\n glColor4f(1,1,1,1)\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(d,0.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(d,e)\n glVertex3f(p2[0],p2[1],p2[2])\n glTexCoord2f(0,e)\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()\n else:\n glColor4f(1,1,1,1)\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(0.0,1.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(1.0,1.0)\n glVertex3f(p2[0],p2[1],p2[2])\n glTexCoord2f(1.0,0.0)\n glVertex3f(p3[0],p3[1],p3[2])\n glEnd()", "def setupTexture( self ):\n glEnable(GL_TEXTURE_2D)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\n glBindTexture(GL_TEXTURE_2D, self.imageID)", "def GetInTextureCoord(self):\n ...", "def redraw(self):\n self.update_spin()\n glMatrixMode( GL_MODELVIEW )\n glLoadIdentity()\n\n glClear( GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT )\n self.SetCurrent()\n texture_id = self.texture.texture_id\n width = self.texture.GetWidth()\n height = self.texture.GetHeight()\n\n self.texture.load_jpeg('Sunrise.jpg')\n self.texture.enable()\n\n glTranslatef( 0.0, 0.0, -5.0 )\n glRotatef( self.angle, 0, 1.0, 0 )\n yscale = 1.75\n xscale = yscale * self.x2yAspect\n\n glScalef( xscale, yscale, 2.0 )\n\n glBegin( GL_QUADS )\n # Lower left quad corner\n glTexCoord2f( self.offset, self.offset )\n glVertex3f(-1.0, -1.0, 0.0)\n\n # Lower right quad corner\n glTexCoord2f( self.replication + self.offset, self.offset )\n glVertex3f(1.0, -1.0, 0.0)\n\n # Upper right quad corner\n glTexCoord2f( self.replication + self.offset, self.replication + self.offset )\n glVertex3f(1.0, 1.0, 0.0)\n\n # Upper left quad corner\n glTexCoord2f( self.offset, self.replication + self.offset )\n glVertex3f(-1.0, 1.0, 0.0)\n glEnd()\n\n self.texture.disable()\n glutSwapBuffers()", "def polyCube(*args, axis: Union[List[float, float, float], bool]=None, caching: bool=True,\n constructionHistory: bool=True, createUVs: Union[int, bool]=3, depth: Union[float,\n bool]=1.0, height: Union[float, bool]=1.0, name: AnyStr=\"\", nodeState: Union[int,\n bool]=0, object: bool=True, subdivisionsDepth: Union[int, bool]=1,\n subdivisionsHeight: Union[int, bool]=1, subdivisionsWidth: Union[int, bool]=1,\n subdivisionsX: Union[int, bool]=1, subdivisionsY: Union[int, bool]=1,\n subdivisionsZ: Union[int, bool]=1, texture: Union[int, bool]=1, width: Union[float,\n bool]=1.0, q=True, query=True, e=True, edit=True, **kwargs)->Union[List[AnyStr],\n Any]:\n pass", "def drawReference(x, y, z, l):\r\n\r\n glPushMatrix()\r\n\r\n glColor3f(1.0, 0.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x + l, y, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 1.0, 0.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y + l, z)\r\n glEnd()\r\n\r\n glColor3f(0.0, 0.0, 1.0)\r\n\r\n glBegin(GL_LINES)\r\n glNormal3f(0.0, 0.0, 1.0)\r\n glVertex3f(x, y, z)\r\n glVertex3f(x, y, z + l)\r\n glEnd()\r\n\r\n glPopMatrix()", "def enable_texture_mode():\n for area in bpy.context.screen.areas:\n if area.type == \"VIEW_3D\":\n for space in area.spaces:\n if space.type == \"VIEW_3D\":\n space.viewport_shade = \"TEXTURED\"\n return", "def draw_cube(ax, xy, size, depth=0.3,\n edges=None, label=None, label_kwargs=None, **kwargs):\n if edges is None:\n edges = range(1, 13)\n\n x, y = xy\n y -= size # set left/up corner as the first (0,0) for one cube\n \n # first plot background edges\n if 9 in edges:\n ax.plot([x + depth, x + depth + size],\n [y + depth + size, y + depth + size], **kwargs)\n if 10 in edges:\n ax.plot([x + depth + size, x + depth + size],\n [y + depth, y + depth + size], **kwargs)\n if 11 in edges:\n ax.plot([x + depth, x + depth + size],\n [y + depth, y + depth], **kwargs)\n if 12 in edges:\n ax.plot([x + depth, x + depth],\n [y + depth, y + depth + size], **kwargs)\n \n # second plot middile edges\n if 5 in edges:\n ax.plot([x, x + depth],\n [y + size, y + depth + size], **kwargs)\n if 6 in edges:\n ax.plot([x + size, x + size + depth],\n [y + size, y + depth + size], **kwargs)\n if 7 in edges:\n ax.plot([x + size, x + size + depth],\n [y, y + depth], **kwargs)\n if 8 in edges:\n ax.plot([x, x + depth],\n [y, y + depth], **kwargs)\n \n # last plot foreground edges \n if 1 in edges: # top edge\n ax.plot([x, x + size],\n [y + size, y + size], **kwargs)\n if 2 in edges: # right \n ax.plot([x + size, x + size],\n [y, y + size], **kwargs)\n if 3 in edges: # bottom\n ax.plot([x, x + size],\n [y, y], **kwargs)\n if 4 in edges: # left\n ax.plot([x, x],\n [y, y + size], **kwargs)\n\n if label:\n if label_kwargs is None:\n label_kwargs = {}\n \n ax.text(x + 0.5 * size, y + 0.5 * size - font_height() / 2, \n label, ha='center', va='center', **label_kwargs)", "def plot_cube(ax: Axes, x: ArrayLike, y: ArrayLike, f_low: callable, f_upp: callable, **kwargs):\n # lower\n xm, ym = np.meshgrid(x, y)\n zm = f_low(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # upper\n zm = f_upp(xm, ym)\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # north\n xm, ym = np.array([x, x]), y[0]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[0]), f_upp(x, y[0])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # south\n xm, ym = np.array([x, x]), y[-1]*np.ones([2, len(y)])\n zm = np.array([f_low(x, y[-1]), f_upp(x, y[-1])])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # east\n xm, ym = x[0]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[0], y), f_upp(x[0], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)\n\n # west\n xm, ym = x[-1]*np.ones([2, len(x)]), np.array([y, y])\n zm = np.array([f_low(x[-1], y), f_upp(x[-1], y)])\n ax.plot_surface(xm, ym, zm, **kwargs)", "def place_cube(self,\n cube_xy,\n player=None,\n weight=1,\n azimuth=None,\n return_azimuth=False):\n\n self.color_idx += 1\n if self.color_idx == len(self.colors):\n self.color_idx = 0\n if azimuth is None:\n azimuth = np.random.randint(0, 180)\n else:\n assert azimuth >= 0 and azimuth <= 180\n cube_rot = self.p0.getQuaternionFromEuler([\n 0, 0, np.deg2rad(azimuth)\n ]) # rotated around which axis? # np.deg2rad(90)\n\n alpha = 1 # this could be set to .5 for some transparency\n\n if weight == 1:\n if player is None or self.four_colors:\n color = self.colors[self.color_idx] + [alpha]\n elif player == Player.Player:\n color = [0, 0, 1, 1]\n if DEBUG:\n print(\"Player putting down cube at\", cube_xy)\n elif player == Player.Enemy:\n color = [1, 0, 0, 1]\n if DEBUG:\n print(\"Opponent putting down cube at\", cube_xy)\n elif player == Player.Starter:\n color = [0, 0, 0, 1]\n if self.dark:\n color = [1, 1, 1, 1]\n if DEBUG:\n print(\"Starter cube at\", cube_xy)\n else:\n color = WEIGHT_COLORS[weight]\n\n max_z = self.find_highest_z(cube_xy, azimuth)\n\n cube_pos = [cube_xy[0], cube_xy[1], max_z + 1.0001]\n # print (\"placing cube at\",cube_pos)\n\n cube_visual = self.p0.createVisualShape(\n shapeType=self.p0.GEOM_BOX,\n rgbaColor=color,\n halfExtents=[1, 1, 1]\n # specularColor=[0.4, .4, 0],\n )\n\n cube = self.p0.createMultiBody(\n baseMass=weight,\n # baseInertialFramePosition=[0, 0, 0],\n baseCollisionShapeIndex=self.cube_collision,\n baseVisualShapeIndex=cube_visual,\n basePosition=cube_pos,\n baseOrientation=cube_rot,\n useMaximalCoordinates=True)\n\n self.cubes.append(cube)\n\n if max_z > self.current_max_z:\n self.current_max_z = np.around(max_z)\n out = True\n else:\n out = False\n\n if not return_azimuth:\n return out\n else:\n return out, azimuth", "def draw_background(imname, width, height):\n \n # load background image (should be .bmp) to OpenGL texture\n bg_image = pygame.image.load(imname).convert()\n bg_data = pygame.image.tostring(bg_image,\"RGBX\",1)\n \n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n \n # bind the texture\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D,glGenTextures(1))\n glTexImage2D(GL_TEXTURE_2D,0,GL_RGBA,width,height,0,GL_RGBA,GL_UNSIGNED_BYTE,bg_data)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MAG_FILTER,GL_NEAREST)\n glTexParameterf(GL_TEXTURE_2D,GL_TEXTURE_MIN_FILTER,GL_NEAREST)\n \n # create quad to fill the whole window\n glBegin(GL_QUADS)\n glTexCoord2f(0.0,0.0); glVertex3f(-1.0,-1.0,-1.0)\n glTexCoord2f(1.0,0.0); glVertex3f( 1.0,-1.0,-1.0)\n glTexCoord2f(1.0,1.0); glVertex3f( 1.0, 1.0,-1.0)\n glTexCoord2f(0.0,1.0); glVertex3f(-1.0, 1.0,-1.0)\n glEnd()\n \n # clear the texture\n glDeleteTextures(1)", "def _load_opengl(self):\r\n opengles.glGenTextures(4, ctypes.byref(self._tex), 0)\r\n from pi3d.Display import Display\r\n if Display.INSTANCE:\r\n Display.INSTANCE.textures_dict[str(self._tex)] = [self._tex, 0]\r\n opengles.glBindTexture(GL_TEXTURE_2D, self._tex)\r\n RGBv = GL_RGBA if self.alpha else GL_RGB\r\n opengles.glTexImage2D(GL_TEXTURE_2D, 0, RGBv, self.ix, self.iy, 0, RGBv,\r\n GL_UNSIGNED_BYTE,\r\n ctypes.string_at(self.image, len(self.image)))\r\n opengles.glEnable(GL_TEXTURE_2D)\r\n opengles.glGenerateMipmap(GL_TEXTURE_2D)\r\n opengles.glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\r\n if self.mipmap:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_LINEAR_MIPMAP_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_LINEAR)\r\n else:\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER,\r\n GL_NEAREST)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S,\r\n GL_MIRRORED_REPEAT)\r\n opengles.glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T,\r\n GL_MIRRORED_REPEAT)", "def plot_surface(self):\n X, Y = np.meshgrid(self.x, self.y)\n fig = plt.figure()\n ax = fig.add_subplot(111, projection='3d')\n ax.plot_surface(X=X, Y=Y, Z=self.z)\n plt.show()", "def view(self):\n plt.imshow(self.texture_array, vmin = 0, vmax = 255)\n if self.texture_array.ndim == 2:\n plt.set_cmap('gray')\n \n plt.title(self.texture_name)\n plt.show()", "def box(x, y, z):\n global _cmds\n _cmds = (f\"cube({[x,y,z]},\"\n f\"center=false);\\n\\n\") + _cmds", "def dessinerTriangle(p0, p1, p2,texture=None):\n \n \n if texture == None:\n r,v,b = 0,1,0\n glColor3f(r,v,b)\n glDisable(GL_TEXTURE_2D)\n glColor3f(1.0,0.0,0.0)\n glBegin(GL_TRIANGLES)\n glVertex3f(p0[0],p0[1],p0[2])\n glVertex3f(p1[0],p1[1],p1[2])\n glVertex3f(p2[0],p2[1],p2[2])\n glEnd()\n glEnable(GL_TEXTURE_2D)\n else:\n glBindTexture(GL_TEXTURE_2D,texture.id)\n glBegin(GL_TRIANGLES)\n glTexCoord2f(0.0,0.0)\n glVertex3f(p0[0],p0[1],p0[2])\n glTexCoord2f(0.0,0.0)\n glVertex3f(p1[0],p1[1],p1[2])\n glTexCoord2f(1,1)\n glVertex3f(p2[0],p2[1],p2[2])\n glEnd()", "def __bind_pca_texture(self):\n size = View.__principal_components.size // 3\n data = View.__principal_components.transpose() * View.__deviations\n\n columns = 2**13\n rows = ceil(size / columns)\n\n padding = [0] * (rows * columns - size) * 3\n data = concatenate((data.flatten(), padding))\n\n self.__sh.create_float_texture(data, (columns, rows), 2, 3)", "def renderTiles(self, window):\n window.blit(self.TileSurface, (self.x, self.y),(self.textures))", "def __draw_board_texture(self, texture):\n\n textureWidth, textureHeight = texture.size\n\n for x in range(0, self.width, textureWidth):\n for y in range(0, self.height, textureHeight):\n self.baseImage.paste(texture, (x, y))", "def __init__(self, name):\r\n super(OffScreenTexture, self).__init__(name)\r\n from pi3d.Display import Display\r\n self.ix, self.iy = Display.INSTANCE.width, Display.INSTANCE.height\r\n self.im = Image.new(\"RGBA\",(self.ix, self.iy))\r\n self.image = self.im.convert(\"RGBA\").tostring('raw', \"RGBA\")\r\n self.alpha = True\r\n self.blend = False\r\n\r\n self._tex = ctypes.c_int()\r\n self.framebuffer = (ctypes.c_int * 1)()\r\n opengles.glGenFramebuffers(1, self.framebuffer)\r\n self.depthbuffer = (ctypes.c_int * 1)()\r\n opengles.glGenRenderbuffers(1, self.depthbuffer)", "def drawScene(self):\n glBegin(GL_LINES)\n # draw axes\n glColor3f(1, 0, 0)\n glVertex3f(0, 0, 0)\n glVertex3f(self.worldSize / 2, 0, 0)\n glColor3f(0, 1, 0)\n glVertex3f(0, 0, 0)\n glVertex3f(0, self.worldSize / 2, 0)\n glColor3f(0, 0, 1)\n glVertex3f(0, 0, 0)\n glVertex3f(0, 0, self.worldSize / 2)\n # draw bounding box\n glColor3f(1, 1, 1)\n scalar = (self.worldSize - 1) / 2\n for x in [-1, 1]:\n for y in [-1, 1]:\n for z in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n for z in [-1, 1]:\n for x in [-1, 1]:\n for y in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n for y in [-1, 1]:\n for z in [-1, 1]:\n for x in [-1, 1]:\n glVertex3f(scalar * x, scalar * y, scalar * z)\n glEnd()\n # draw spheres if in POINTS mode\n if self.displayMode is self.DISPLAYMODE_POINTS:\n prev = (0, 0, 0)\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize):\n for y in range(self.worldSize):\n for z in range(self.worldSize):\n glTranslatef(x - offset - prev[0], y - offset - prev[1], z - offset - prev[2])\n # use threshold for black/white coloring\n if self.world[x][y][z] > self.worldThreshold:\n glColor3f(1, 1, 1)\n else:\n glColor3f(0, 0, 0)\n gluSphere(self.sphere, 0.1, 8, 4)\n prev = (x - offset, y - offset, z - offset)\n # draw mesh if in MESH mode\n elif self.displayMode is self.DISPLAYMODE_MESH:\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n if self.polygons[x][y][z]:\n glBegin(GL_POLYGON)\n glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize)\n for vertex in self.polygons[x][y][z]:\n glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset)\n glEnd()\n # draw wireframe in in WIRE mode\n elif self.displayMode is self.DISPLAYMODE_WIREFRAME:\n offset = int(self.worldSize / 2)\n for x in range(self.worldSize - 1):\n for y in range(self.worldSize - 1):\n for z in range(self.worldSize - 1):\n glBegin(GL_LINES)\n glColor3f(x / self.worldSize, y / self.worldSize, z / self.worldSize)\n for vertex in self.polygons[x][y][z]:\n glVertex3f(x + vertex[0] - offset, y + vertex[1] - offset, z + vertex[2] - offset)\n glEnd()\n # draw background in the distance\n glLoadIdentity()\n glBegin(GL_QUADS)\n glColor3f(59 / 256, 102 / 256, 212 / 256)\n glVertex3f(-30, -23, -49.5)\n glVertex3f(30, -23, -49.5)\n glColor3f(184 / 256, 201 / 256, 242 / 256)\n glVertex3f(30, 23, -49.5)\n glVertex3f(-30, 23, -49.5)\n glEnd()\n # HUD in white\n glColor3f(1, 1, 1)\n # lower left\n glWindowPos2f(10, 10)\n for ch in 'WASD: Rotate':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 25)\n for ch in 'Wheel: Thresh':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 40)\n for ch in 'R: Randomize':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 55)\n for ch in 'O: Object':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 70)\n for ch in 'I: Wireframe':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n glWindowPos2f(10, 85)\n for ch in 'P: Points':\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))\n # upper right\n glWindowPos2f(self.displaySize[0] - 118, self.displaySize[1] - 25)\n for ch in 'Thresh: %0.2f' % self.worldThreshold:\n glutBitmapCharacter(GLUT_BITMAP_9_BY_15, ord(ch))", "def draw_face(self, face, window, xy, width):\n width = width / 2 - (width/2/20)\n\n if face == \"U\":\n face = self.state[0:4]\n elif face == \"L\":\n face = self.state[4:8]\n elif face == \"F\":\n face = self.state[8:12]\n elif face == \"R\":\n face = self.state[12:16]\n elif face == \"B\":\n face = self.state[16:20]\n elif face == \"D\":\n face = self.state[20:24]\n\n rect1 = pygame.Rect((xy[0],xy[1], width, width))\n rect2 = pygame.Rect((xy[0]+(width+(width/10)),xy[1], width, width))\n rect3 = pygame.Rect((xy[0],xy[1]+(width+(width/10)), width, width))\n rect4 = pygame.Rect((xy[0]+(width+(width/10)),xy[1]+(width+(width/10)), width, width))\n pygame.draw.rect(window, self.colors[face[0]], rect2)\n pygame.draw.rect(window, self.colors[face[1]], rect1)\n pygame.draw.rect(window, self.colors[face[2]], rect3)\n pygame.draw.rect(window, self.colors[face[3]], rect4)", "def __call__(self):\n Texture()", "def __init__(self, cube_file):\n\t\timport pyfits, pywcs\n\t\t# Put the cube in RA - DEC - RM order and save it\n\t\tCube.__init__(self, np.transpose(pyfits.getdata(cube_file), (2, 1, 0)))\n\t\tself.wcs = pywcs.WCS(pyfits.getheader(cube_file))\n\n\t\tsky0 = self.pix2sky([0,0,0])\n\t \tskyN = self.pix2sky([self.x_max,self.y_max,self.z_max])\n\t \tself.ra_min = min(sky0[0],skyN[0])\n\t\tself.ra_max = max(sky0[0],skyN[0])\n\t\tself.ra_step = (self.ra_max-self.ra_min)/self.x_max\n\t \tself.dec_min = min(sky0[1],skyN[1])\n\t self.dec_max = max(sky0[1],skyN[1])\n\t\tself.dec_step = (self.dec_max-self.dec_min)/self.y_max\n\t\tself.fd_min = min(sky0[2],skyN[2])\n\t\tself.fd_max = max(sky0[2],skyN[2])\n\t\tself.fd_step = (self.fd_max-self.fd_min)/self.z_max", "def tex(self):\r\n self.load_opengl()\r\n return self._tex", "def imshow_mesh_3d(img, vertices, faces, camera_center, focal_length, colors=(76, 76, 204)):\n H, W, C = img.shape\n if not has_pyrender:\n warnings.warn('pyrender package is not installed.')\n return img\n if not has_trimesh:\n warnings.warn('trimesh package is not installed.')\n return img\n try:\n renderer = pyrender.OffscreenRenderer(viewport_width=W, viewport_height=H)\n except (ImportError, RuntimeError):\n warnings.warn('pyrender package is not installed correctly.')\n return img\n if not isinstance(colors, list):\n colors = [colors for _ in range(len(vertices))]\n colors = [color_val(c) for c in colors]\n depth_map = np.ones([H, W]) * np.inf\n output_img = img\n for idx in range(len(vertices)):\n color = colors[idx]\n color = [(c / 255.0) for c in color]\n color.append(1.0)\n vert = vertices[idx]\n face = faces[idx]\n material = pyrender.MetallicRoughnessMaterial(metallicFactor=0.2, alphaMode='OPAQUE', baseColorFactor=color)\n mesh = trimesh.Trimesh(vert, face)\n rot = trimesh.transformations.rotation_matrix(np.radians(180), [1, 0, 0])\n mesh.apply_transform(rot)\n mesh = pyrender.Mesh.from_trimesh(mesh, material=material)\n scene = pyrender.Scene(ambient_light=(0.5, 0.5, 0.5))\n scene.add(mesh, 'mesh')\n camera_pose = np.eye(4)\n camera = pyrender.IntrinsicsCamera(fx=focal_length[0], fy=focal_length[1], cx=camera_center[0], cy=camera_center[1], zfar=100000.0)\n scene.add(camera, pose=camera_pose)\n light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=1)\n light_pose = np.eye(4)\n light_pose[:3, 3] = np.array([0, -1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([0, 1, 1])\n scene.add(light, pose=light_pose)\n light_pose[:3, 3] = np.array([1, 1, 2])\n scene.add(light, pose=light_pose)\n color, rend_depth = renderer.render(scene, flags=pyrender.RenderFlags.RGBA)\n valid_mask = (rend_depth < depth_map) * (rend_depth > 0)\n depth_map[valid_mask] = rend_depth[valid_mask]\n valid_mask = valid_mask[:, :, None]\n output_img = valid_mask * color[:, :, :3] + (1 - valid_mask) * output_img\n return output_img", "def b_transform_cube(b_obj):\n \n b_scale_object()\n b_scale_single_face(b_obj)", "def RenderTexture(self, vtkVolume, vtkRenderer, p_int=..., p_int=..., *args, **kwargs):\n ...", "def testComplexShading(self):\n\n model_transforms = camera_utils.euler_matrices(\n [[-20.0, 0.0, 60.0], [45.0, 60.0, 0.0]])[:, :3, :3]\n\n vertices_world_space = torch.matmul(\n torch.stack([self.cube_vertices, self.cube_vertices]),\n model_transforms.transpose())\n\n normals_world_space = torch.matmul(\n torch.stack([self.cube_normals, self.cube_normals]),\n model_transforms.transpose())\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0], [0.0, 0.2, 18.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0], [0.1, -0.1, 0.1]], dtype=torch.float32)\n world_up = torch.constant([[0.0, 1.0, 0.0], [0.1, 1.0, 0.15]], dtype=torch.float32)\n fov_y = torch.tensor([40.0, 13.3], dtype=torch.float32)\n near_clip = 0.1\n far_clip = 25.0\n image_width = 640\n image_height = 480\n light_positions = torch.tensor([[[0.0, 0.0, 6.0], [1.0, 2.0, 6.0]],\n [[0.0, -2.0, 4.0], [1.0, 3.0, 4.0]]])\n light_intensities = torch.tensor(\n [[[1.0, 1.0, 1.0], [1.0, 1.0, 1.0]],\n [[2.0, 0.0, 1.0], [0.0, 2.0, 1.0]]],\n dtype=torch.float32)\n vertex_diffuse_colors = torch.tensor(2*[[[1.0, 0.0, 0.0],\n [0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 0.0],\n [1.0, 0.0, 1.0],\n [0.0, 1.0, 1.0],\n [0.5, 0.5, 0.5]]],\n dtype=torch.float32)\n vertex_specular_colors = torch.tensor(2*[[[0.0, 1.0, 0.0],\n [0.0, 0.0, 1.0],\n [1.0, 1.0, 1.0],\n [1.0, 1.0, 0.0],\n [1.0, 0.0, 1.0],\n [0.0, 1.0, 1.0],\n [0.5, 0.5, 0.5],\n [1.0, 0.0, 0.0]]],\n dtype=torch.float32)\n shininess_coefficients = 6.0 * torch.ones([2, 8], dtype=torch.float32)\n ambient_color = torch.tensor([[0.0, 0.0, 0.0], [0.1, 0.1, 0.2]], dtype=torch.float32)\n renders = mesh_renderer.mesh_renderer(\n vertices_world_space,\n self.cube_triangles,\n normals_world_space,\n vertex_diffuse_colors,\n eye,\n center,\n world_up,\n light_positions,\n light_intensities,\n image_width,\n image_height,\n vertex_specular_colors,\n shininess_coefficients,\n ambient_color,\n fov_y,\n near_clip,\n far_clip)\n tonemapped_renders = torch.cat([\n mesh_renderer.tone_mapper(renders[:, :, :, 0:3], 0.7),\n renders[:, :, :, 3:4]\n ],\n dim=3)\n\n # Check that shininess coefficient broadcasting works by also rendering\n # with a scalar shininess coefficient, and ensuring the result is identical:\n broadcasted_renders = mesh_renderer.mesh_renderer(\n vertices_world_space,\n self.cube_triangles,\n normals_world_space,\n vertex_diffuse_colors,\n eye,\n center,\n world_up,\n light_positions,\n light_intensities,\n image_width,\n image_height,\n vertex_specular_colors,\n 6.0,\n ambient_color,\n fov_y,\n near_clip,\n far_clip)\n tonemapped_broadcasted_renders = torch.cat([\n mesh_renderer.tone_mapper(broadcasted_renders[:, :, :, 0:3], 0.7),\n broadcasted_renders[:, :, :, 3:4]\n ],\n dim=3)\n\n for image_id in range(renders.shape[0]):\n target_image_name = \"Colored_Cube_%i.png\" % image_id\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, tonemapped_renders[image_id, :, :, :])\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, tonemapped_broadcasted_renders[image_id, :, :, :])", "def draw_kame(self):\r\n #pygame.draw.rect(self.screen, self.color, self.rect, self.image)\r\n self.screen.blit(self.image, self.rect)", "def setupTexture(self):\r\n # Configure the texture rendering parameters\r\n glEnable(GL_TEXTURE_2D)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\r\n glTexParameterf(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\r\n glTexEnvf(GL_TEXTURE_ENV, GL_TEXTURE_ENV_MODE, GL_DECAL)\r\n\r\n # Re-select our texture, could use other generated textures\r\n # if we had generated them earlier.\r\n glBindTexture(GL_TEXTURE_2D, self.imageID)", "def map_face(self):\n #Array Order: U0,D1,R2,L3,F4,B5,\n \n cube_list = []\n cube_list = self.cube.definition()\n \n for index, cubit in enumerate(self.faces['Up']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index]])\n for index, cubit in enumerate(self.faces['Ri']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+9]])\n for index, cubit in enumerate(self.faces['Ft']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+18]])\n for index, cubit in enumerate(self.faces['Dn']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+27]])\n for index, cubit in enumerate(self.faces['Le']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+36]])\n for index, cubit in enumerate(self.faces['Bk']):\n self.canvas.itemconfig(cubit,fill=self.face_colours[cube_list[index+45]])", "def view_draw(self, context):\n self.override_context = context.copy()\n region = context.region\n view = context.region_data\n\n vmat = view.view_matrix.copy()\n vmat_inv = vmat.inverted()\n pmat = view.perspective_matrix * vmat_inv\n\n viewport = [region.x, region.y, region.width, region.height]\n\n self.update_view(vmat, pmat, viewport)\n\n glPushAttrib(GL_ALL_ATTRIB_BITS)\n\n glDisable(GL_DEPTH_TEST)\n glDisable(GL_CULL_FACE)\n glDisable(GL_STENCIL_TEST)\n glEnable(GL_TEXTURE_2D)\n\n glClearColor(0, 0, 1, 1)\n glClear(GL_COLOR_BUFFER_BIT)\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glMatrixMode(GL_PROJECTION)\n glPushMatrix()\n glLoadIdentity()\n\n glActiveTexture(GL_TEXTURE0)\n glBindTexture(GL_TEXTURE_2D, self.tex)\n glTexImage2D(GL_TEXTURE_2D, 0, GL_RGB8, self.width, self.height, 0, GL_RGB,\n GL_UNSIGNED_BYTE, self.processor.image_buffer)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n\n glBegin(GL_QUADS)\n glColor3f(1.0, 1.0, 1.0)\n glTexCoord2f(0.0, 0.0)\n glVertex3i(-1, -1, 0)\n glTexCoord2f(1.0, 0.0)\n glVertex3i(1, -1, 0)\n glTexCoord2f(1.0, 1.0)\n glVertex3i(1, 1, 0)\n glTexCoord2f(0.0, 1.0)\n glVertex3i(-1, 1, 0)\n glEnd()\n\n glPopMatrix()\n glMatrixMode(GL_MODELVIEW)\n glPopMatrix()\n\n glPopAttrib()", "def cube_vertices(x, y, z, n):\r\n return [\r\n x-n,y+n,z-n, x-n,y+n,z+n, x+n,y+n,z+n, x+n,y+n,z-n, # top\r\n x-n,y-n,z-n, x+n,y-n,z-n, x+n,y-n,z+n, x-n,y-n,z+n, # bottom\r\n x-n,y-n,z-n, x-n,y-n,z+n, x-n,y+n,z+n, x-n,y+n,z-n, # left\r\n x+n,y-n,z+n, x+n,y-n,z-n, x+n,y+n,z-n, x+n,y+n,z+n, # right\r\n x-n,y-n,z+n, x+n,y-n,z+n, x+n,y+n,z+n, x-n,y+n,z+n, # front\r\n x+n,y-n,z-n, x-n,y-n,z-n, x-n,y+n,z-n, x+n,y+n,z-n, # back\r\n ]", "def cube_vertices(x, y, z, nx, ny=None, nz=None):\n if ny == None: ny = nx\n if nz == None: nz = nx\n return [\n x - nx, y + ny, z - nz, x - nx, y + ny, z + nz, x + nx, y + ny, z + nz, x + nx, y + ny, z - nz, # top\n x - nx, y - ny, z - nz, x + nx, y - ny, z - nz, x + nx, y - ny, z + nz, x - nx, y - ny, z + nz, # bottom\n x - nx, y - ny, z - nz, x - nx, y - ny, z + nz, x - nx, y + ny, z + nz, x - nx, y + ny, z - nz, # left\n x + nx, y - ny, z + nz, x + nx, y - ny, z - nz, x + nx, y + ny, z - nz, x + nx, y + ny, z + nz, # right\n x - nx, y - ny, z + nz, x + nx, y - ny, z + nz, x + nx, y + ny, z + nz, x - nx, y + ny, z + nz, # front\n x + nx, y - ny, z - nz, x - nx, y - ny, z - nz, x - nx, y + ny, z - nz, x + nx, y + ny, z - nz, # back\n ]", "def __init__(self, x, y, x2, y2, x3, y3, color=(255, 255, 255, 255),\n batch=None, group=None):\n self._x = x\n self._y = y\n self._x2 = x2\n self._y2 = y2\n self._x3 = x3\n self._y3 = y3\n self._rotation = 0\n self._num_verts = 3\n\n r, g, b, *a = color\n self._rgba = r, g, b, a[0] if a else 255\n\n program = get_default_shader()\n self._batch = batch or Batch()\n self._group = self.group_class(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA, program, group)\n\n self._create_vertex_list()\n self._update_vertices()", "def _draw_surface(terrain, row, rowCount):\n if (row + 1 == rowCount):\n return\n glBegin(GL_TRIANGLES)\n\n point = 0\n rowLength = len(terrain.grid[row])\n while (point < rowLength - 1):\n top_left = (point, row, terrain.grid[row][point])\n top_right = (point + 1, row, terrain.grid[row][point + 1])\n bottom_left = (point, row + 1, terrain.grid[row + 1][point])\n bottom_right = (point + 1, row + 1, terrain.grid[row + 1][point + 1])\n\n middle_height = (top_left[2] + top_right[2] + bottom_left[2] + bottom_right[2]) / 4\n middle = (point + 0.5, row + 0.5, middle_height)\n draw_color(top_left)\n draw_color(top_right)\n draw_color(middle)\n\n draw_color(top_right)\n draw_color(bottom_right)\n draw_color(middle)\n\n draw_color(bottom_right)\n draw_color(bottom_left)\n draw_color(middle)\n\n draw_color(bottom_left)\n draw_color(top_left)\n draw_color(middle)\n point += 1\n glEnd()", "def render(self):\n glPushMatrix()\n glMultMatrixf(np.transpose(self.translation_matrix))\n glMultMatrixf(self.scaling_matrix)\n color = color.COLORS[self.color_index]\n glColor3f(color[0], color[1], color[2])\n\n if self.selected:\n # Emit light\n glMaterialfv(GL_FRONT, GL_EMISSION, [0.0, 0.0, 0.0])\n\n glPopMatrix()", "def imageCube(imagepath=None,position=(0,0,0),initialorientation='x'):\n\n # first parse the image path into folder and basename\n\n bpy.ops.import_image.to_plane(files=[{'name':imagepath}])\n obj0 = bpy.context.object\n\n obj0.location = position\n obj0.location.x = obj0.location.x - 0.5\n obj1 = copyObject(obj0,newlocation=(1,0,0))\n obj0.rotation_euler = (0,math.pi/2,0)\n obj1.rotation_euler = (0,-math.pi/2,0)\n\n\n obj2 = copyObject(obj0,newlocation=(0.5,-0.5,0))\n obj3 = copyObject(obj0,newlocation=(0.5,0.5,0))\n obj2.rotation_euler = (math.pi/2,0,0)\n obj3.rotation_euler = (-math.pi/2,0,0)\n\n\n obj4 = copyObject(obj0,newlocation=(0.5,0,0.5))\n obj5 = copyObject(obj0,newlocation=(0.5,0,-0.5))\n obj4.rotation_euler = (0,0,0)\n obj5.rotation_euler = (0,-math.pi,0)", "def __call__(self, verts, cams=None, texture=None, rend_mask=False):\n if texture is None:\n texture = self.default_tex\n elif texture.dim() == 5:\n # Here input it F x T x T x T x 3 (instead of F x T x T x 3)\n # So add batch dim.\n texture = torch.unsqueeze(texture, 0)\n if cams is None:\n cams = self.default_cam\n elif cams.dim() == 1:\n cams = torch.unsqueeze(cams, 0)\n\n if verts.dim() == 2:\n verts = torch.unsqueeze(verts, 0)\n\n verts = asVariable(verts)\n cams = asVariable(cams)\n texture = asVariable(texture)\n\n if rend_mask:\n rend = self.renderer.forward(verts, self.faces, cams)\n rend = rend.repeat(3, 1, 1)\n rend = rend.unsqueeze(0)\n else:\n rend = self.renderer.forward(verts, self.faces, cams, texture)\n\n rend = rend.data.cpu().numpy()[0].transpose((1, 2, 0))\n rend = np.clip(rend, 0, 1) * 255.0\n\n return rend.astype(np.uint8)", "def draw_plane(env, transform, extents=(4,4), texture=None):\n if texture is None:\n texture = np.zeros((100,100,4))\n texture[:,:,1] = 0.2\n texture[:,:,2] = 0.2\n texture[:,:,3] = 0.2\n with env:\n h = env.drawplane(transform, extents=extents, texture=texture)\n return h", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def __init__(self, texture, texcoords, enabled=True):\n vfunc = Function(\"\"\"\n void pass_coords() {\n $v_texcoords = $texcoords;\n }\n \"\"\")\n ffunc = Function(\"\"\"\n void apply_texture() {\n if ($enabled == 1) {\n gl_FragColor *= texture2D($u_texture, $texcoords);\n }\n }\n \"\"\")\n self._texcoord_varying = Varying('v_texcoord', 'vec2')\n vfunc['v_texcoords'] = self._texcoord_varying\n ffunc['texcoords'] = self._texcoord_varying\n self._texcoords_buffer = VertexBuffer(\n np.zeros((0, 2), dtype=np.float32)\n )\n vfunc['texcoords'] = self._texcoords_buffer\n super().__init__(vcode=vfunc, vhook='pre', fcode=ffunc)\n\n self.enabled = enabled\n self.texture = texture\n self.texcoords = texcoords", "def render(self, proj):\n if self.text == '' or not self.mesh:\n return\n\n model = self.model.getTransformation()\n mvp = proj * self.transform.getTransformation() * model\n\n gl.glEnable(gl.GL_FRAMEBUFFER_SRGB)\n\n gl.glEnable(gl.GL_BLEND)\n gl.glBlendFunc(gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA)\n self.shader.bind()\n if self.color:\n self.shader.setUniform('u_color', self.color)\n self.font.bindAtlas()\n self.shader.setUniform('T_MVP', mvp)\n self.mesh.draw()\n gl.glDisable(gl.GL_BLEND)\n self.shader.unbind()\n self.font.unbindAtlas()\n gl.glDisable(gl.GL_FRAMEBUFFER_SRGB)", "def get_map_3d_tex(self, size, filename = None, charPos = None):\n mod = self.world_size / size\n image = PNMImage(size, size)\n for x in xrange(size):\n for y in xrange(size):\n px = x * mod\n py = y * mod\n height = self[px, py]\n if height <= 0:\n color = (abs(height) / 50) + 50\n if color > 255:\n color = 255\n image.setPixel(x, y, (0, 0, 255-color))\n else:\n if height <= self.config.low_mount_level[1]:\n color = height / 20\n r = 0\n g = 50+color\n b = 0\n image.setPixel(x, y, (r, g, b))\n elif height > self.config.low_mount_level[1]:\n color = height / 50\n r = color\n g = color\n b = color\n if r > 255:\n r = 255\n if g > 255:\n r = 255\n if b > 255:\n b = 255\n image.setPixel(x, y, (r, g, b))\n\n if filename != None:\n image.write(filename)\n\n if charPos != None:\n charX, charY = charPos\n for x in xrange(-1, 2):\n for y in xrange(-1, 2):\n image.setPixel(int(charX/mod)+x, int(charY/mod)+y, (255, 0, 0))\n\n texture = Texture()\n texture.load(image)\n return texture", "def draw(self, screen):\n halfScale = int(self.screenScale / 2)\n\n x = int(self.x)\n y = int(self.y)\n for i in range(-halfScale, halfScale):\n for j in range(-halfScale, halfScale):\n\n pygame.Surface.set_at(\n screen, (x * self.screenScale + i, y * self.screenScale + j), self.color)", "def GUI_Cube(self,canvas,XYS):\n X,Y,S = XYS\n cUp = [];cFt = [];cDn = [];cBk = [];cRi = [];cLe = []\n cUp_xi=[S + X+S*i for i in range(3)]\n cUp_yi=[Y+S*i for i in range(3)]\n cFt_xi=[S + X+S*i for i in range(3)]\n cFt_yi=[4*S+Y+S*i for i in range(3)]\n cLe_xi=[X+S*i-3*S for i in range(3)]\n cLe_yi=[4*S+Y+S*i for i in range(3)]\n cRi_xi=[X+S*i+5*S for i in range(3)]\n cRi_yi=[4*S+Y+S*i for i in range(3)]\n cDn_xi=[S + X+S*i for i in range(3)]\n cDn_yi=[2*S+2*3*S+Y+S*i for i in range(3)]\n cBk_xi=[X+S*i+9*S for i in range(3)]\n cBk_yi=[4*S+Y+S*i for i in range(3)]\n\n x=0\n for j in range(3):\n for i in range(3):\n cUp.append(canvas.create_rectangle(cUp_xi[i],cUp_yi[j],cUp_xi[i]+S,cUp_yi[j]+S,fill='white',tags = ('Up',x+0)))\n cFt.append(canvas.create_rectangle(cFt_xi[i],cFt_yi[j],cFt_xi[i]+S,cFt_yi[j]+S,fill='green',tags = ('Ft',x+18)))\n cDn.append(canvas.create_rectangle(cDn_xi[i],cDn_yi[j],cDn_xi[i]+S,cDn_yi[j]+S,fill='yellow',tags = ('Dn',x+27))) \n cBk.append(canvas.create_rectangle(cBk_xi[i],cBk_yi[j],cBk_xi[i]+S,cBk_yi[j]+S,fill='blue',tags = ('Bk',x+45)))\n cRi.append(canvas.create_rectangle(cRi_xi[i],cRi_yi[j],cRi_xi[i]+S,cRi_yi[j]+S,fill='red',tags = ('Ri',x+9)))\n cLe.append(canvas.create_rectangle(cLe_xi[i],cLe_yi[j],cLe_xi[i]+S,cLe_yi[j]+S,fill='orange',tags = ('Le',x+36))) \n x+=1\n\n return {'Up':cUp,'Dn':cDn,'Ri':cRi,'Le':cLe,'Ft':cFt,'Bk':cBk}", "def drawSphere3D(x0,y0,z0, radius, hres, vres):\n dislin.sphe3d(x0,y0,z0, radius, hres, vres)", "def __init__(self, screen_width, screen_height):\n self.display = (screen_width, screen_height)\n self.screen = pygame.display.set_mode(self.display, DOUBLEBUF | OPENGL)\n self.percpective = gluPerspective(45, (self.display[0]/self.display[1]), 0.1, 50.0)\n self.step_back = glTranslatef(0.0, 0.0, -15)\n self.red_cube = Red_cube()\n self.green_cube = Green_cube()\n self.blue_cube = Blue_cube()\n self.black_cube = Black_cube()\n self.looper()", "def test_colormap_CubeHelix():\n with TestingCanvas(size=size, bgcolor='w') as c:\n idata = np.linspace(255, 0, size[0]*size[1]).astype(np.ubyte)\n data = idata.reshape((size[0], size[1]))\n image = Image(cmap=get_colormap('cubehelix', rot=0, start=0),\n clim='auto', parent=c.scene)\n image.set_data(data)\n assert_image_approved(c.render(), \"visuals/colormap_cubehelix.png\")", "def testThatCubeRotates(self):\n image_height = 480\n image_width = 640\n initial_euler_angles = [[0.0, 0.0, 0.0]]\n\n euler_angles = torch.tensor(initial_euler_angles, requires_grad=True)\n model_rotation = camera_utils.euler_matrices(euler_angles)[0, :3, :3]\n model_rotation.requires_grad = True\n\n vertices_world_space = torch.reshape(\n torch.matmul(self.cube_vertices, model_rotation.transpose()),\n [1, 8, 3])\n\n normals_world_space = torch.reshape(\n torch.matmul(self.cube_normals, model_rotation.transpose()),\n [1, 8, 3])\n\n # camera position:\n eye = torch.tensor([[0.0, 0.0, 6.0]], dtype=torch.float32)\n center = torch.tensor([[0.0, 0.0, 0.0]], dtype=torch.float32)\n world_up = torch.tensor([[0.0, 1.0, 0.0]], dtype=torch.float32)\n\n vertex_diffuse_colors = torch.ones_like(vertices_world_space)\n light_positions = torch.reshape(eye, [1, 1, 3])\n light_intensities = torch.ones([1, 1, 3], dtype=torch.float32)\n\n # Pick the desired cube rotation for the test:\n test_model_rotation = camera_utils.euler_matrices([[-20.0, 0.0, 60.0]])[0, :3, :3]\n\n desired_vertex_positions = torch.reshape(\n torch.matmul(self.cube_vertices, test_model_rotation.transpose())\n [1, 8, 3])\n desired_normals = torch.reshape(\n torch.matmul(self.cube_normals, test_model_rotation.transpose()),\n [1, 8, 3])\n\n optimizer = torch.optim.SGD([euler_angles], lr=0.7, momentum=0.1)\n for _ in range(35):\n optimizer.zero_grad()\n render = mesh_renderer.mesh_renderer(\n vertices_world_space,\n self.cube_triangles,\n normals_world_space,\n vertex_diffuse_colors,\n eye,\n center,\n world_up,\n light_positions,\n light_intensities,\n image_width,\n image_height)\n desired_render = mesh_renderer.mesh_renderer(\n desired_vertex_positions,\n self.cube_triangles,\n desired_normals,\n vertex_diffuse_colors,\n eye,\n center,\n world_up,\n light_positions,\n light_intensities,\n image_width,\n image_height)\n loss = torch.mean(torch.abs(render - desired_render))\n loss.backward()\n optimizer.step()\n\n render = torch.reshape(render, [image_height, image_width, 4])\n desired_render = torch.reshape(desired_render, [image_height, image_width, 4])\n target_image_name = \"Gray_Cube_0.png\"\n baseline_image_path = os.path.join(self.test_data_directory,\n target_image_name)\n test_utils.expect_image_file_and_render_are_near(\n self, baseline_image_path, desired_render)\n test_utils.expect_image_file_and_render_are_near(\n self,\n baseline_image_path,\n render,\n max_outlier_fraction=0.01,\n pixel_error_threshold=0.04)", "def draw_offscreen(context):\n offscreen = SprytileGui.offscreen\n target_img = SprytileGui.texture_grid\n tex_size = SprytileGui.tex_size\n\n offscreen.bind()\n glClear(GL_COLOR_BUFFER_BIT)\n glDisable(GL_DEPTH_TEST)\n glEnable(GL_BLEND)\n glMatrixMode(GL_PROJECTION)\n glLoadIdentity()\n gluOrtho2D(0, tex_size[0], 0, tex_size[1])\n\n def draw_full_quad():\n texco = [(0, 0), (0, 1), (1, 1), (1, 0)]\n verco = [(0, 0), (0, tex_size[1]), (tex_size[0], tex_size[1]), (tex_size[0], 0)]\n glBegin(bgl.GL_QUADS)\n for i in range(4):\n glTexCoord2f(texco[i][0], texco[i][1])\n glVertex2f(verco[i][0], verco[i][1])\n glEnd()\n\n glColor4f(0.0, 0.0, 0.0, 0.5)\n draw_full_quad()\n\n if target_img is not None:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n target_img.gl_load(0, GL_NEAREST, GL_NEAREST)\n glBindTexture(GL_TEXTURE_2D, target_img.bindcode[0])\n # We need to backup and restore the MAG_FILTER to avoid messing up the Blender viewport\n old_mag_filter = Buffer(GL_INT, 1)\n glGetTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST)\n glEnable(GL_TEXTURE_2D)\n draw_full_quad()\n glTexParameteriv(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, old_mag_filter)\n\n # Translate the gl context by grid matrix\n grid_matrix = sprytile_utils.get_grid_matrix(SprytileGui.loaded_grid)\n matrix_vals = [grid_matrix[j][i] for i in range(4) for j in range(4)]\n grid_buff = bgl.Buffer(bgl.GL_FLOAT, 16, matrix_vals)\n\n glMatrixMode(GL_MODELVIEW)\n glPushMatrix()\n glLoadIdentity()\n glLoadMatrixf(grid_buff)\n\n glDisable(GL_TEXTURE_2D)\n\n # Get data for drawing additional overlays\n grid_size = SprytileGui.loaded_grid.grid\n padding = SprytileGui.loaded_grid.padding\n margin = SprytileGui.loaded_grid.margin\n curr_sel = SprytileGui.loaded_grid.tile_selection\n is_pixel_grid = sprytile_utils.grid_is_single_pixel(SprytileGui.loaded_grid)\n is_use_mouse = context.scene.sprytile_ui.use_mouse\n is_selecting = SprytileGui.is_selecting\n\n glLineWidth(1)\n\n # Draw box for currently selected tile(s)\n # Pixel grid selection is drawn in draw_tile_select_ui\n sprytile_data = context.scene.sprytile_data\n is_not_base_layer = sprytile_data.work_layer != \"BASE\"\n draw_outline = sprytile_data.outline_preview or is_not_base_layer\n if draw_outline and is_selecting is False and not is_pixel_grid:\n if is_not_base_layer:\n glColor4f(0.98, 0.94, 0.12, 1.0)\n elif SprytileGui.is_moving:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n else:\n glColor4f(1.0, 1.0, 1.0, 1.0)\n curr_sel_min, curr_sel_max = SprytileGui.get_sel_bounds(\n grid_size, padding, margin,\n curr_sel[0], curr_sel[1],\n curr_sel[2], curr_sel[3]\n )\n SprytileGui.draw_selection(curr_sel_min, curr_sel_max)\n\n # Inside gui, draw appropriate selection for under mouse\n if is_use_mouse and is_selecting is False and SprytileGui.cursor_grid_pos is not None:\n\n cursor_pos = SprytileGui.cursor_grid_pos\n # In pixel grid, draw cross hair\n if is_pixel_grid and SprytileGui.is_moving is False:\n glColor4f(1.0, 1.0, 1.0, 0.5)\n glBegin(GL_LINE_STRIP)\n glVertex2i(0, int(cursor_pos.y + 1))\n glVertex2i(tex_size[0], int(cursor_pos.y + 1))\n glEnd()\n\n glBegin(GL_LINE_STRIP)\n glVertex2i(int(cursor_pos.x + 1), 0)\n glVertex2i(int(cursor_pos.x + 1), tex_size[1])\n glEnd()\n # Draw box around selection\n elif SprytileGui.is_moving is False:\n glColor4f(1.0, 0.0, 0.0, 1.0)\n cursor_min, cursor_max = SprytileGui.get_sel_bounds(grid_size, padding, margin,\n int(cursor_pos.x), int(cursor_pos.y),)\n SprytileGui.draw_selection(cursor_min, cursor_max)\n\n glPopMatrix()\n offscreen.unbind()", "def init(width, height):\n\tglClearColor(0.0, 0.0, 1.0, 0.0) #blue bg\n\tglMatrixMode(GL_PROJECTION)\n\tglLoadIdentity()\n\tglOrtho(-0.5, 2.5, -1.5, 1.5, -1.0, 1.0)", "def draw():\n global trackball, flashlight, \\\n vertex_buffer, normal_buffer, \\\n colors, color_buffer, selected_face, add_face, \\\n shaders\n\n # Clear the rendering information.\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n # Clear the transformation stack.\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n glPushMatrix()\n\n # Transform the objects drawn below by a rotation.\n trackball.glRotate()\n\n # * * * * * * * * * * * * * * * *\n # Draw all the triangular facets.\n glUseProgram(shaders)\n\n h_vertex = glGetAttribLocation(shaders,'vertex')\n h_normal = glGetAttribLocation(shaders,'normal')\n h_color = glGetAttribLocation(shaders,'color')\n h_eye = glGetUniformLocation(shaders,'eye')\n h_light = glGetUniformLocation(shaders,'light')\n\n # all the vertex positions\n glEnableVertexAttribArray(h_vertex)\n glBindBuffer (GL_ARRAY_BUFFER, vertex_buffer)\n glVertexAttribPointer(h_vertex, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # all the vertex normals\n glEnableVertexAttribArray(h_normal)\n glBindBuffer (GL_ARRAY_BUFFER, normal_buffer)\n glVertexAttribPointer(h_normal, 3, GL_FLOAT, GL_FALSE, 0, None)\n\n # all the face vertex colors\n glEnableVertexAttribArray(h_color)\n glBindBuffer (GL_ARRAY_BUFFER, color_buffer)\n\n if selected_face and add_face:\n # paint that face's vertices Green\n rgb_selected = [0.7,0.9,0.6] #GREEN\n for change in range(9):\n colors[selected_face.id * 9 + change] = rgb_selected[change % 3]\n # update the color buffer\n glBufferData (GL_ARRAY_BUFFER, len(colors)*4, \n (c_float*len(colors))(*colors), GL_STATIC_DRAW)\n add_face = False\n\n glVertexAttribPointer(h_color, 3, GL_FLOAT, GL_FALSE, 0, None)\n \n # position of the flashlight\n light = flashlight.rotate(vector(0.0,0.0,1.0));\n glUniform3fv(h_light, 1, (2.0*radius*light).components())\n\n # position of the viewer's eye\n eye = trackball.recip().rotate(vector(0.0,0.0,1.0))\n glUniform3fv(h_eye, 1, eye.components())\n\n glDrawArrays (GL_TRIANGLES, 0, len(face.instances) * 3)\n\n glDisableVertexAttribArray(h_vertex)\n glDisableVertexAttribArray(h_normal)\n glDisableVertexAttribArray(h_color)\n\n glPopMatrix()\n\n # Render the scene.\n glFlush()\n\n glutSwapBuffers()", "def __init__(self, camera=None, light=None, name=\"\", \r\n corners=((-0.5, -0.28868), (0.0, 0.57735), (0.5, -0.28868)),\r\n x=0.0, y=0.0, z=0.0, sx=1.0, sy=1.0, sz=1.0,\r\n rx=0.0, ry=0.0, rz=0.0, cx=0.0, cy=0.0, cz=0.0):\r\n super(Triangle, self).__init__(camera, light, name, x, y, z, rx, ry, rz,\r\n sx, sy, sz, cx, cy, cz)\r\n self.ttype = GL_TRIANGLES\r\n self.verts = []\r\n self.norms = []\r\n self.texcoords = []\r\n self.inds = []\r\n c = corners # alias for convenience\r\n\r\n self.verts = ((c[0][0], c[0][1], 0.0), (c[1][0], c[1][1], 0.0), (c[2][0], c[2][1], 0.0))\r\n self.norms = ((0, 0, -1), (0, 0, -1), (0, 0, -1))\r\n self.texcoords = ((0.0, 0.0), (0.5, 0.86603), (1.0, 0.0))\r\n\r\n self.inds = ((0, 1, 2), ) #python quirk: comma for tuple with only one val\r\n\r\n self.buf = []\r\n self.buf.append(Buffer(self, self.verts, self.texcoords, self.inds, self.norms))", "def create_sprite(self, pos):\n group = pyglet.sprite.SpriteGroup(\n self.TEXTURE, gl.GL_SRC_ALPHA, gl.GL_ONE_MINUS_SRC_ALPHA\n )\n texcoords = []\n for i in xrange(self.length + 1):\n texcoords.extend([\n self.TEXTURE.tex_coords[0], i,\n self.TEXTURE.tex_coords[3], i,\n ])\n count = 2 * (self.length + 1)\n verts = [0, 0] * count # set vertices later from body\n self.vlist = batch.add(\n count, gl.GL_TRIANGLE_STRIP, group,\n ('v2f', verts),\n ('t2f', texcoords)\n )", "def GetOutTextureCoord(self):\n ...", "def populate_texture(self, texture):\n texture.blit_buffer(self._cbuffer, colorfmt='bgr', bufferfmt='ubyte')", "def draw_texture_rectangle(center_x, center_y, width, height, texture,\n angle=0, alpha=1, transparent=True):\n\n if transparent:\n GL.glEnable(GL.GL_BLEND)\n GL.glBlendFunc(GL.GL_SRC_ALPHA, GL.GL_ONE_MINUS_SRC_ALPHA)\n else:\n GL.glDisable(GL.GL_BLEND)\n\n GL.glEnable(GL.GL_TEXTURE_2D)\n GL.glHint(GL.GL_POLYGON_SMOOTH_HINT, GL.GL_NICEST)\n GL.glHint(GL.GL_PERSPECTIVE_CORRECTION_HINT, GL.GL_NICEST)\n\n GL.glLoadIdentity()\n GL.glTranslatef(center_x, center_y, 0)\n if angle != 0:\n GL.glRotatef(angle, 0, 0, 1)\n\n GL.glColor4f(1, 1, 1, alpha)\n z = 0.5 # pylint: disable=invalid-name\n\n GL.glBindTexture(GL.GL_TEXTURE_2D, texture.texture_id)\n GL.glBegin(GL.GL_POLYGON)\n GL.glNormal3f(0.0, 0.0, 1.0)\n GL.glTexCoord2f(0, 0)\n GL.glVertex3f(-width / 2, -height / 2, z)\n GL.glTexCoord2f(1, 0)\n GL.glVertex3f(width / 2, -height / 2, z)\n GL.glTexCoord2f(1, 1)\n GL.glVertex3f(width / 2, height / 2, z)\n GL.glTexCoord2f(0, 1)\n GL.glVertex3f(-width / 2, height / 2, z)\n GL.glEnd()", "def render(self, camera=None):\r\n glPushMatrix()\r\n x,y,z = self.pos\r\n glTranslatef(x,y,-z)\r\n a, b, c = self.rotation\r\n glRotatef(a, 1, 0, 0)\r\n glRotatef(b, 0, 1, 0)\r\n glRotatef(c, 0, 0, 1)\r\n try:\r\n glScalef(*self.scale)\r\n except:\r\n glScalef(self.scale, self.scale, self.scale)\r\n glColor(*self.colorize)\r\n\r\n if self.outline:\r\n misc.outline(misc.OutlineGroup([i[0] for i in self.gl_lists]),\r\n self.outline_color, self.outline_size)\r\n\r\n for i in self.gl_lists:\r\n i[1].bind()\r\n i[0].render()\r\n glPopMatrix()", "def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))", "def print_cube(num):\n print(\"Cube: {}\".format(num * num * num))", "def textureWindow(*args, activeSelectionOnTop: bool=True, axesColor: Union[List[float, float,\n float], bool]=None, backFacingColor: Union[List[float, float, float, float],\n bool]=None, capture: AnyStr=\"\", captureSequenceNumber: int=0, changeCommand:\n Union[List[AnyStr, AnyStr, AnyStr, AnyStr], bool]=None, checkerColor1:\n Union[List[float, float, float], bool]=None, checkerColor2: Union[List[float,\n float, float], bool]=None, checkerColorMode: Union[int, bool]=0,\n checkerDensity: Union[int, bool]=0, checkerDrawTileLabels: bool=True,\n checkerGradient1: Union[List[float, float, float], bool]=None,\n checkerGradient2: Union[List[float, float, float], bool]=None,\n checkerGradientOverlay: bool=True, checkerTileLabelColor: Union[List[float,\n float, float], bool]=None, clearImage: bool=True, cmEnabled: bool=True,\n control: bool=True, defineTemplate: AnyStr=\"\", displayAxes: bool=True,\n displayCheckered: bool=True, displayDistortion: bool=True,\n displayDivisionLines: bool=True, displayGridLines: bool=True, displayImage:\n Union[int, bool]=0, displayIsolateSelectHUD: bool=True, displayLabels:\n bool=True, displayOverlappingUVCountHUD: bool=True, displayPreselection:\n bool=True, displayReversedUVCountHUD: bool=True, displaySolidMap: bool=True,\n displayStyle: Union[AnyStr, bool]=\"\", displayTextureBorder: bool=True,\n displayUVShellCountHUD: bool=True, displayUVStatisticsHUD: bool=True,\n displayUsedPercentageHUD: bool=True, distortionAlpha: Union[float, bool]=0.0,\n distortionPerObject: bool=True, divisions: Union[int, bool]=0, docTag:\n Union[AnyStr, bool]=\"\", doubleBuffer: bool=True, drawAxis: bool=True,\n drawSubregions: bool=True, exists: bool=True, exposure: Union[float,\n bool]=0.0, filter: Union[AnyStr, bool]=\"\", forceMainConnection: Union[AnyStr,\n bool]=\"\", forceRebake: bool=True, frameAll: bool=True, frameSelected:\n bool=True, frontFacingColor: Union[List[float, float, float, float],\n bool]=None, gamma: Union[float, bool]=0.0, gridLinesColor: Union[List[float,\n float, float], bool]=None, gridNumbersColor: Union[List[float, float, float],\n bool]=None, highlightConnection: Union[AnyStr, bool]=\"\", imageBaseColor:\n Union[List[float, float, float], bool]=None, imageDim: bool=True,\n imageDisplay: bool=True, imageNames: bool=True, imageNumber: Union[int,\n bool]=0, imagePixelSnap: bool=True, imageRatio: bool=True, imageRatioValue:\n Union[float, bool]=0.0, imageSize: bool=True, imageTileRange:\n Union[List[float, float, float, float], bool]=None, imageUnfiltered:\n bool=True, internalFaces: bool=True, labelPosition: Union[AnyStr, bool]=\"\",\n loadImage: AnyStr=\"\", lockMainConnection: bool=True, mainListConnection:\n Union[AnyStr, bool]=\"\", maxResolution: Union[int, bool]=0, multiColorAlpha:\n Union[float, bool]=0.0, nbImages: bool=True, nextView: bool=True, numUvSets:\n bool=True, numberOfImages: Union[int, bool]=0, numberOfTextures: Union[int,\n bool]=0, panel: Union[AnyStr, bool]=\"\", parent: Union[AnyStr, bool]=\"\",\n previousView: bool=True, realSize: bool=True, refresh: bool=True,\n relatedFaces: bool=True, removeAllImages: bool=True, removeImage: bool=True,\n rendererString: Union[AnyStr, bool]=\"\", reset: bool=True, saveImage:\n bool=True, scaleBlue: Union[float, bool]=0.0, scaleGreen: Union[float,\n bool]=0.0, scaleRed: Union[float, bool]=0.0, selectInternalFaces: bool=True,\n selectRelatedFaces: bool=True, selectionConnection: Union[AnyStr, bool]=\"\",\n setUvSet: int=0, singleBuffer: bool=True, size: Union[float, bool]=0.0,\n solidMap3dView: bool=True, solidMapColorSeed: Union[int, bool]=0,\n solidMapPerShell: bool=True, spacing: float=0.0, stateString: bool=True,\n style: Union[int, bool]=0, subdivisionLinesColor: Union[List[float, float,\n float], bool]=None, textureBorder3dView: bool=True, textureBorderColor:\n Union[List[float, float, float], bool]=None, textureBorderWidth: Union[int,\n bool]=0, textureNames: bool=True, textureNumber: Union[int, bool]=0,\n tileLabels: bool=True, tileLinesColor: Union[List[float, float, float],\n bool]=None, toggle: bool=True, toggleExposure: bool=True, toggleGamma:\n bool=True, unParent: bool=True, unlockMainConnection: bool=True,\n updateMainConnection: bool=True, useFaceGroup: bool=True, useTemplate:\n AnyStr=\"\", usedPercentageHUDRange: Union[List[float, float, float, float],\n bool]=None, uvSets: bool=True, viewPortImage: bool=True, viewTransformName:\n Union[AnyStr, bool]=\"\", wireframeComponentColor: Union[List[float, float,\n float, float], bool]=None, wireframeObjectColor: Union[List[float, float,\n float, float], bool]=None, writeImage: AnyStr=\"\", q=True, query=True, e=True,\n edit=True, **kwargs)->Union[AnyStr, Any]:\n pass", "def plasm_cube(self, size=0.1, color=WHITE):\n return COLOR(color)(T([1,2,3])(self.coords)(CUBOID([size, size, size])))", "def make_cube(r, g, b):\n ny, nx = r.shape\n R = np.zeros([ny, nx, 3])\n R[:,:,0] = r\n G = np.zeros_like(R)\n G[:,:,1] = g\n B = np.zeros_like(R)\n B[:,:,2] = b\n\n RGB = R + G + B\n\n return R, G, B, RGB", "def Draw(self):\n\t\tGameImage.Draw(self, self.coords)", "def surface(func, umin=0, umax=2*np.pi, ucount=64, urepeat=1.0,\n vmin=0, vmax=2*np.pi, vcount=64, vrepeat=1.0):\n\n vtype = [('position', np.float32, 3),\n ('texcoord', np.float32, 2),\n ('normal', np.float32, 3)]\n itype = np.uint32\n\n # umin, umax, ucount = 0, 2*np.pi, 64\n # vmin, vmax, vcount = 0, 2*np.pi, 64\n\n vcount += 1\n ucount += 1\n n = vcount*ucount\n\n Un = np.repeat(np.linspace(0, 1, ucount, endpoint=True), vcount)\n Vn = np.tile (np.linspace(0, 1, vcount, endpoint=True), ucount)\n U = umin+Un*(umax-umin)\n V = vmin+Vn*(vmax-vmin)\n\n vertices = np.zeros(n, dtype=vtype)\n for i,(u,v) in enumerate(zip(U,V)):\n vertices[\"position\"][i] = func(u,v)\n\n vertices[\"texcoord\"][:,0] = Un*urepeat\n vertices[\"texcoord\"][:,1] = Vn*vrepeat\n\n indices = []\n for i in range(ucount-1):\n for j in range(vcount-1):\n indices.append(i*(vcount) + j )\n indices.append(i*(vcount) + j+1 )\n indices.append(i*(vcount) + j+vcount+1)\n indices.append(i*(vcount) + j+vcount )\n indices.append(i*(vcount) + j+vcount+1)\n indices.append(i*(vcount) + j )\n indices = np.array(indices, dtype=itype)\n vertices[\"normal\"] = normals(vertices[\"position\"],\n indices.reshape(len(indices)//3,3))\n\n return vertices.view(gloo.VertexBuffer), indices.view(gloo.IndexBuffer)", "def draw3d(self, zoom):\n # For \"disappearing\" animation\n # For all balls except white and black\n if ((self.number == 0 and self.visible == False) or (self.number == 8 and self.visible == False)) == False:\n\n # Set position of light source because it depends on the position of the ball\n light_position = [zoom * (self.x - 200.0), zoom * (self.y + 200.0), zoom * 200.0, 1.0]\n light_direction = [zoom * self.x, zoom * self.y, 0.0, 1.0]\n\n glLightfv(GL_LIGHT0, GL_POSITION, light_position)\n glLightfv(GL_LIGHT0, GL_SPOT_DIRECTION, light_direction)\n\n # Turn on textures\n glEnable(GL_TEXTURE_2D)\n glBindTexture(GL_TEXTURE_2D, self.texture)\n\n # Here we move, rotate and draw the ball\n glPushMatrix()\n glScalef(zoom, zoom, zoom)\n glTranslatef(self.x, self.y, 0.0)\n glMultMatrixd(self.matrix)\n if self.radius > 1.0:\n graphicsBall3D(self.radius)\n glPopMatrix()\n\n # Turn off textures\n glDisable(GL_TEXTURE_2D)", "def cube_colors(self, cubes):\n n = cubes.shape[0]\n col = np.zeros((n ** 3, 3))\n terrain_col = (66, 244, 72)\n empty_col = self.background\n for i in range(n):\n for j in range(n):\n for k in range(n):\n c = cubes[i, j, k]\n col[i * n ** 2 + j * n + k] = empty_col if c.state == 'empty' else terrain_col\n self.wireframe_col = col", "def drawFace():\r\n\tglPushMatrix()\r\n\tglTranslatef(-0.5,-0.5,0)\r\n\tglBegin(GL_LINE_LOOP)\r\n\t\r\n\tglVertex3f(0,VALUE,0)\r\n\tglVertex3f(VALUE,0,0)\r\n\t\r\n\tglVertex3f(LENGTH-VALUE,0,0)\r\n\tglVertex3f(LENGTH,VALUE,0)\r\n\t\r\n\tglVertex3f(LENGTH,LENGTH-VALUE,0)\r\n\tglVertex3f(LENGTH-VALUE,LENGTH,0)\r\n\t\r\n\tglVertex3f(VALUE,LENGTH,0)\r\n\tglVertex3f(0,LENGTH-VALUE,0)\r\n\t\r\n\tglEnd()\r\n\tglPopMatrix()", "def end_cast(self):\r\n #draw the actual map\r\n self.emap.draw(shader=self.mshader, camera=self.camera)\r\n super(ShadowCaster, self)._end()\r\n # set third texture to this ShadowCaster texture\r\n texs = self.emap.buf[0].textures\r\n if len(texs) == 2:\r\n texs.append(self)\r\n else:\r\n texs[2] = self\r\n # change background back to blue\r\n opengles.glClearColor(ctypes.c_float(0.4), ctypes.c_float(0.8), \r\n ctypes.c_float(0.8), ctypes.c_float(1.0))\r\n # work out left, top, right, bottom for shader\r\n self.emap.unif[48] = 0.5 * (1.0 + self.scaleu) # left [16][0]\r\n self.emap.unif[49] = 0.5 * (1.0 + self.scalev) # top [16][1]\r\n self.emap.unif[51] = 1.0 - self.emap.unif[48] # right [17][0]\r\n self.emap.unif[52] = 1.0 - self.emap.unif[49] # bottom [17][1]\r\n \r\n du = float(self.location[0] / self.emap.width)\r\n dv = float(self.location[2] / self.emap.depth)\r\n self.emap.unif[48] -= self.scaleu * (du if self.emap.unif[50] == 1.0 else dv)\r\n self.emap.unif[49] += self.scalev * (dv if self.emap.unif[50] == 1.0 else du)\r\n self.emap.unif[51] -= self.scaleu * (du if self.emap.unif[50] == 1.0 else dv)\r\n self.emap.unif[52] += self.scalev * (dv if self.emap.unif[50] == 1.0 else du)", "def add_object(self, obj): # DEFINE OBJ!\n obj.spritesheet_width = self.spritesheet.size['width']\n obj.spritesheet_height = self.spritesheet.size['height']\n \n obj._layer_added(self)\n \n\n obj.buffer_index = len(self.objects)\n self.objects.append(obj)\n\n x = obj.x\n y = obj.y\n \n self.verts.extend(((x, y, 0.0), (x+obj.width, y, 0.0), (x+obj.width, y-obj.height, 0.0), (x, y-obj.height, 0.0)))\n self.texcoords.extend(obj.uv_texture)\n self.norms.extend(((0, 0, -1), (0, 0, -1), (0, 0, -1), (0, 0, -1)))\n\n if pi3d.PLATFORM == pi3d.PLATFORM_PI:\n self.inds.append((self.a,self.b,self.c))\n self.inds.append((self.d,self.a,self.c))\n else:\n self.inds.extend((self.a,self.b,self.c))\n self.inds.extend((self.d,self.a,self.c))\n\n self.a += 4\n self.b += 4\n self.c += 4\n self.d += 4\n\n \n #~ return len(self.sprites)-1", "def draw(self, fig, ax):\n\n\t\tpoints = self.apply_transformations() \n\t\tax.scatter3D(points[:, 0], points[:, 1], points[:, 2])\n\n\t\t# cube polygons collection\n\t\tverts = [\n\t\t\t[points[0], points[1], points[2], points[3]],\n\t\t\t[points[4], points[5], points[6], points[7]], \n\t\t\t[points[0], points[1], points[5], points[4]], \n\t\t\t[points[2], points[3], points[7], points[6]], \n\t\t\t[points[1], points[2], points[6], points[5]],\n\t\t\t[points[4], points[7], points[3], points[0]]\n\t\t]\n\t\t\n\t\t# render polygons\n\t\tax.add_collection3d(Poly3DCollection(verts, \n\t\tfacecolors='blue', linewidths=1, edgecolors='b', alpha=0.3))", "def __init__(self, shape, pts, texcoords, faces, normals=None, smooth=True):\r\n super(Buffer, self).__init__()\r\n\r\n # Uniform variables all in one array!\r\n self.unib = (c_float * 12)(0.0, 0.0, 0.0,\r\n 0.5, 0.5, 0.5,\r\n 1.0, 1.0, 0.0,\r\n 0.0, 0.0, 0.0)\r\n \"\"\" pass to shader array of vec3 uniform variables:\r\n\r\n ===== ============================ ==== ==\r\n vec3 description python\r\n ----- ---------------------------- -------\r\n index from to\r\n ===== ============================ ==== ==\r\n 0 ntile, shiny, blend 0 2\r\n 1 material 3 5\r\n 2 umult, vmult, point_size 6 8\r\n 3 u_off, v_off (only 2 used) 9 10\r\n ===== ============================ ==== ==\r\n \"\"\"\r\n #self.shape = shape\r\n self.textures = []\r\n pts = np.array(pts, dtype=float)\r\n texcoords = np.array(texcoords, dtype=float)\r\n faces = np.array(faces)\r\n\r\n if normals == None: #i.e. normals will only be generated if explictly None\r\n LOGGER.debug('Calculating normals ...')\r\n\r\n normals = np.zeros(pts.shape, dtype=float) #empty array rights size\r\n\r\n fv = pts[faces] #expand faces with x,y,z values for each vertex\r\n #cross product of two edges of triangles\r\n fn = np.cross(fv[:][:][:,1] - fv[:][:][:,0], fv[:][:][:,2] - fv[:][:][:,0])\r\n fn = Utility.normalize_v3(fn)\r\n normals[faces[:,0]] += fn #add up all normal vectors for a vertex\r\n normals[faces[:,1]] += fn\r\n normals[faces[:,2]] += fn\r\n normals = Utility.normalize_v3(normals)\r\n else:\r\n normals = np.array(normals)\r\n \r\n # keep a copy for speeding up the collision testing of ElevationMap\r\n self.vertices = pts\r\n self.normals = normals\r\n self.tex_coords = texcoords\r\n self.indices = faces\r\n self.material = (0.5, 0.5, 0.5, 1.0)\r\n\r\n # Pack points,normals and texcoords into tuples and convert to ctype floats.\r\n n_verts = len(pts)\r\n if len(texcoords) != n_verts:\r\n if len(normals) != n_verts:\r\n self.N_BYTES = 12 # only use pts\r\n self.array_buffer = c_floats(pts.reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 24 # use pts and normals\r\n self.array_buffer = c_floats(np.concatenate((pts, normals),\r\n axis=1).reshape(-1).tolist())\r\n else:\r\n self.N_BYTES = 32 # use all three NB doesn't check that normals are there\r\n self.array_buffer = c_floats(np.concatenate((pts, normals, texcoords),\r\n axis=1).reshape(-1).tolist())\r\n\r\n self.ntris = len(faces)\r\n self.element_array_buffer = c_shorts(faces.reshape(-1))\r\n from pi3d.Display import Display\r\n self.disp = Display.INSTANCE # rely on there always being one!\r", "def render( self, shader, mode, index ):\n location = shader.getLocation( mode, self.name, uniform=True )\n if location is not None and location != -1:\n value = self.currentValue( shader, mode )\n if value:\n self.baseFunction( location, index )\n glActiveTexture( GL_TEXTURE0 + index )\n glBindTexture( GL_TEXTURE_BUFFER, self.texture( mode ) )\n vbo = value.vbo(mode)\n vbo.bind()\n try:\n glTexBuffer( GL_TEXTURE_BUFFER, self.get_format(), int(vbo) )\n finally:\n vbo.unbind()\n return True \n return False", "def drawVector3D(x0,y0,z0,x1,y1,z1, vtype='normal'):\n dislin.vectr3(x0,y0,z0,x1,y1,z1, vectordict[vtype])" ]
[ "0.8082414", "0.790963", "0.73804325", "0.7115595", "0.7054056", "0.70481575", "0.6607423", "0.6532412", "0.64749116", "0.6449474", "0.6229988", "0.6222093", "0.6187685", "0.61688286", "0.6162942", "0.6152132", "0.6028768", "0.59879607", "0.5984257", "0.59456533", "0.5933637", "0.5922008", "0.588329", "0.5874939", "0.58282906", "0.5820203", "0.58158016", "0.5789243", "0.5743865", "0.5733173", "0.57237595", "0.57229877", "0.57083565", "0.5699348", "0.568006", "0.56705785", "0.56678617", "0.5637324", "0.56058043", "0.56005794", "0.55976146", "0.5575043", "0.5571767", "0.5555994", "0.55374736", "0.5534736", "0.55095434", "0.5506222", "0.5505048", "0.54940116", "0.54928994", "0.5488861", "0.5472803", "0.547267", "0.5466227", "0.5460862", "0.5460352", "0.5435024", "0.5432242", "0.5424846", "0.5403749", "0.5401808", "0.5399284", "0.5393053", "0.53915775", "0.53908306", "0.53908306", "0.53767484", "0.5371917", "0.53716636", "0.53645164", "0.53597134", "0.5356797", "0.53423214", "0.53380334", "0.53351235", "0.5327497", "0.532383", "0.5323594", "0.53082186", "0.5307563", "0.5276187", "0.52694035", "0.5259157", "0.5253607", "0.5253607", "0.52530915", "0.5253084", "0.524997", "0.52301633", "0.5229689", "0.5223696", "0.5217121", "0.5211219", "0.52045417", "0.5199141", "0.51945585", "0.51916015", "0.5191006", "0.5186947" ]
0.80746436
1
Request refresh of the context whenever idle
def OnIdle(self, ): self.triggerRedraw(1) return 1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def idle(self):\n return", "def on_refresh(self):\n pass", "def idle():", "def _idle(self):\n # self._purge_timedout()\n # ...", "def refresh(self):\n pass", "def refresh(self):\n pass", "def refresh(self):\n\n self._refreshed_on = time.time() * 1000", "def refresh(self):\n self.fetch(False)", "def context_reset(self):\n self._context_state = None\n logging.info('Resetting the context')", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def refresh(self) -> None:\n pass", "def Refresh(self):\n pass", "def in_context(self):\n pass", "def update_state(self, context):\n pass", "def _update_context(self):\n # update file for context\n self._engine.update_file_for_context()", "def refresh(self, context=None):\n current = self.get_by_uuid(self._context, uuid=self.uuid)\n self.obj_refresh(current)", "def refresh(self):\n self.__refresh()", "def refresh(self):\n raise NotImplementedError", "def refresh(self):\n raise NotImplementedError", "def _Refresh(self):\n raise NotImplementedError", "def refresh_view():\n pass", "def keep_alive(self, context: ResourceCommandContext, cancellation_context: CancellationContext) -> None:\n super().keep_alive(context, cancellation_context)", "def reset(self, *_):\n with self._context.lock:\n super().reset()\n self.__context_init()", "def refresh_status(self):\n\n pass", "def reloadMode(self): \n\t\tpass", "def context(self, context):\n self._context = context", "def refresh(self):\n raise NotImplementedError(\"To be implemented\")", "def context(self, context):\n\n self._context = context", "def __init__(self):\n self._context = {}", "def refresh_status() -> None:\n ...", "async def _timein_refresh(self):\n\t\t\n\t\tawait self.refresh_cache()", "def refresh_source(self):\n pass", "def perform_idle_actions(self):\n self._close_old_django_connections()", "def reinitialize(self, requestContext):\n pass", "def reload(self):", "def reload(self):", "def context_started(self, cls, example):", "def setCurrent(ctx):\n THREAD_CONTEXT.current = ctx", "def refresh(self):\n self.Refresh()", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def _refresh(self):\n resp = self._cb.get_object(self._build_api_request_uri())\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def idle(self):\n stream=self.get_stream()\n if stream:\n stream.idle()", "def handle_reload_toolbox(self):", "def reload(self):\n\n pass", "def refresh(self) -> None:\n self._itempage.get()", "def shutdown(self):\n yield self.cxn.manager.expire_context(self.server.ID,\n context=self.ctx)", "def context(self) -> Any:\n ...", "async def ensure_active(self):\n if not self.active:\n await self.refresh()", "def refresh(self):\n self._refresh_method()", "def at_server_reload(self):\n self.db.started = True", "def refresh_session():\n\n hruntime.response.headers['Cache-Control'] = 'must-revalidate, no-cache, no-store'\n\n hruntime.user = hruntime.dbroot.users[hruntime.session.name]\n hruntime.i18n = hruntime.dbroot.localization.languages['cz']", "def context():\n return dict()", "def context(self) -> CONTEXT:", "def __context_init(self):\n self._context.data[\"services\"] = copy.deepcopy(INITIAL_SRVDATA)", "def context_ended(self, cls, example):", "def refresh_session(self):\n if self.session:\n try:\n yield from self.session.close()\n except:\n # we don't care if closing the session does nothing\n pass \n\n self.session = aiohttp.ClientSession()\n self._session_start = time.time()", "def ensure_context(self):\n with driver.get_active_context():\n oldctx = self._get_attached_context()\n newctx = self.get_or_create_context(None)\n self._set_attached_context(newctx)\n try:\n yield\n finally:\n self._set_attached_context(oldctx)", "def update_store(self):\n _request_store.context = self", "def keepalive(self) -> None:", "def refresh(self, view_manager):\n pass", "def context_set(context):\n global __context\n if context == DefaultContext:\n context = context.copy()\n __context = context", "def recurrent(self):\n pass", "def invalidate(self, context):\n self.dictionary = None", "def refresh_screen(self):", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def __init__(self):\n self.context={}", "def refresh(self): \n return self._config.refreshObj(self)", "def new_context(self):\n return dict()", "def _context(self, context):\n self.context = context\n # if there have been changed lines encountered that haven't yet\n # been add to a hunk.\n if self.changedlines:\n self.add_new_hunk()", "def __getstate__(self):\n odict = self.__dict__.copy()\n odict['_contexts'] = []\n return odict", "def startEvaluationMode(self):\r\n self.storeDataRef = self.dataRef", "def _update_on_active(self):\n pass", "def push_context(self):\n raise NotImplementedError()", "def handleReload(self, confInfo=None):", "def _context_allow_change(self):\n try:\n self.__dict__['ALLOW_CHANGE'] = True\n yield\n finally:\n del self.__dict__['ALLOW_CHANGE']", "def refresh(self):\n return self._refresh", "def OnIdle( self, ):\n self.triggerRedraw(1)\n return 1", "def OnIdle( self, ):\n self.triggerRedraw(1)\n return 1", "def refresh(modeladmin, request, queryset):\n if not queryset.exists():\n return # 404\n opts = queryset[0].content_object._meta\n module = '%s.%s' % (opts.app_label, opts.object_name)\n ids = queryset.values_list('object_id', flat=True)\n # Execute get_state isolated on a process to avoid gevent polluting the stack\n # and preventing this silly complain \"gevent is only usable from a single thread\"\n # ids listqueryset is converted in a list in order to be properly serialized\n result = get_state.delay(module, ids=list(ids), lock=False)\n try:\n # Block until finish\n result.get()\n except OperationLocked:\n msg = 'This operation is currently being executed by another process.'\n messages.error(request, msg)\n else:\n msg = 'The state of %d %ss has been updated.' % (queryset.count(), opts.object_name)\n modeladmin.message_user(request, msg)", "def reload_info(self):\n self.__loop.run_until_complete(self.__reload_info())", "def handle_context_missing(self):", "def refresh(self):\n self.lease = self.blazar.lease.get(self.id)", "def _refresh_cache(self, data_dict):\r\n pass", "def refresh(self) -> None:\n self.tesla_device.refresh()\n self._attributes = self.tesla_device.attrs.copy()\n self.async_write_ha_state()", "async def refresh(self):\n while True:\n await asyncio.sleep(5/6 * self.lifetime)\n\n request = stun.Message(message_method=stun.Method.REFRESH,\n message_class=stun.Class.REQUEST)\n request.attributes['LIFETIME'] = self.lifetime\n self.__add_authentication(request)\n await self.request(request, self.server)", "def refresh_configuration(self):\n pass", "def __init__(self):\n self._data = dict() # stores the data in the context\n self._last_context = None # stores previous context if the current one is used in a with block\n self._thread_id = self._get_thread_id() # the ID of the thread that context lives in", "def syncrepl_refreshdone(self):\n pass", "async def refresh_entity_state(self):", "def _refresh(self):\n url = self.urlobject_single.format(self._cb.credentials.org_key, self._model_unique_id)\n resp = self._cb.get_object(url)\n self._info = resp\n self._last_refresh_time = time.time()\n return True", "def start_refresh(self, widget, context):\n\n self.source_id = gobject.timeout_add(38, self.continuous_scroll, context)", "def refresh():\n return __apf_cmd(\"-e\")", "def current_context():\n return _current.get()", "def refresh(self):\n self.dto = self.res.get()\n log.debug(f\"Refreshed {self.url}\")", "def restore_context(self):\r\n self.current_context = self.context_stack.pop()" ]
[ "0.6957455", "0.6568586", "0.65202016", "0.64047533", "0.6379513", "0.6379513", "0.63642436", "0.6357043", "0.6318813", "0.6293459", "0.6293459", "0.6293459", "0.6279487", "0.6272733", "0.6181421", "0.6174326", "0.61006075", "0.60482174", "0.60439646", "0.60439646", "0.6043728", "0.6025652", "0.6020738", "0.6004129", "0.5958445", "0.5953644", "0.5953369", "0.59002537", "0.58910453", "0.5877513", "0.5870844", "0.5856948", "0.5838552", "0.58305836", "0.5789563", "0.5786058", "0.5786058", "0.57801056", "0.57739055", "0.5747157", "0.5727174", "0.5727174", "0.57251614", "0.5674736", "0.56521374", "0.564835", "0.5646359", "0.5645227", "0.5636359", "0.5625652", "0.55906177", "0.5587889", "0.558647", "0.5579708", "0.55791605", "0.5572257", "0.5570948", "0.5551374", "0.5547961", "0.5547096", "0.55465555", "0.5538767", "0.55200166", "0.551793", "0.55174387", "0.5516626", "0.5516626", "0.5516626", "0.5516626", "0.5516626", "0.5516626", "0.5496661", "0.54922396", "0.54801077", "0.5479161", "0.5471161", "0.5471076", "0.54635656", "0.5461302", "0.54610914", "0.5460767", "0.5442632", "0.5442632", "0.5435659", "0.54343104", "0.5424755", "0.5422572", "0.5417836", "0.54101235", "0.5405306", "0.5402262", "0.53989637", "0.5394453", "0.5388826", "0.53888035", "0.53822094", "0.5366681", "0.53653574", "0.53607184", "0.535875" ]
0.5446337
81
Continually read and add messages to the chat.
def _receive_message_loop(self): while True: try: message = self.connection_socket.recv(4096) if len(message) > 0: self.add_message_to_chat(message.decode('utf-8')) sleep(0.2) except ConnectionResetError: # messagebox.showerror("Client dropped", "The other person has dropped from the connection.") self.root.destroy()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def handle_chat(self):\n while True:\n if self.chat_breakout:\n return\n\n time.sleep(1)\n messages = \"\"\n for i in range(5):\n try:\n messages += f\"{self.queue.popleft()}\\n\"\n except IndexError:\n # Queue is empty but no worries\n continue\n\n if messages != \"\":\n self.loop.create_task(\n self.ingame_cog.send_chat_to_discord(\n self.bot, self.channel, messages\n )\n )", "def list_add_chat_message(self, chat_message):\n self.chat_messages.append(chat_message)\n\n #logging.info(\"adding message: %s\" % chat_message.message)\n\n if len(self.chat_messages) > ENVIRONMENT['BUFFER_SIZE']:\n self.chat_messages.pop(0)\n\n # alert our polling clients\n self.new_message_event.set()\n self.new_message_event.clear()", "async def start(self):\n\n while True:\n try:\n data = await self.reader.read(8192)\n\n if self._trace_enabled:\n self._logger.trace(\n \"Received %d bytes from remote server:\\n%s\",\n len(data),\n msg.dump(data),\n )\n await self.process(data)\n except asyncio.CancelledError:\n return\n except:\n logging.exception(\"Unhandled error in Message Reader\")\n raise", "def processIncoming(self):\n while (self.queue.qsize()):\n try:\n message = self.queue.get_nowait()\n \n self.terminal.insert(END,message)\n\n # Autoscroll the terminal if set\n if (self.autoscroll_value.get()):\n self.terminal.yview(END)\n\n except Queue.Empty:\n pass", "async def messages(self, ctx):\n\n\t\tawait self.message_leaderboard(ctx, \"messages\")", "def _keep_getting_new_messages(self):\n while True:\n new_messages = self.get_new_messages()\n for message in new_messages:\n self.handle(message)\n time.sleep(self.refresh_delay)", "def recv_messages(self):\n while True:\n b = unwrap_read(self.sock.recv(4096))\n msgs = self.parser.feed(b)\n if msgs:\n for msg in msgs:\n self.router.incoming(msg)\n return", "def process_chatter(self, msg):\n # note, nothing in here is ROS specific, it's just python code that\n # runs when new info appears\n\n print msg.data # print the recieved message\n\n self.msgs_recieved += 1 # increase msg count\n self.msgs_recieved %= 500 # mod 500 so we don't get enormous numbers\n self.msg = \"%d messages recieved\" % self.msgs_recieved # set message", "def redis_chat_messages_listener(redis_server, redis_new_chat_messages):\n logging.info(\"Spun up a redis chat message listener.\")\n while True:\n raw = redis_new_chat_messages.next()\n msg = (ChatMessage(**json.loads(raw['data'])))\n ## just hook into our existing way for now\n ## a bit redundant but allows server to be run without redis\n logging.info(\"new chat message subscribed to: %s\" % raw['data'])\n ## add o our local buffer to push to clients\n chat_channel = get_chat_channel(redis_server, msg.channel_name)\n chat_channel.list_add_chat_message(msg)", "def joinchat():\n Loading = True\n while Loading:\n readbuffer_join = sock.recv(1024).decode()\n for line in readbuffer_join.split(\"\\n\")[0:-1]:\n print(line)\n Loading = loadingComplete(line)", "def run_chat(self, auto_send_receipts=False):\n for message in self.receive_messages():\n print(message)\n\n if message.payment:\n for func in self._payment_handlers:\n func(message.source, message.payment)\n continue\n\n if not message.text:\n continue\n\n for _, regex, func in self._chat_handlers:\n match = re.search(regex, message.text)\n if not match:\n continue\n\n try:\n reply = func(message, match)\n except Exception as e: # noqa - We don't care why this failed.\n print(e)\n continue\n\n if isinstance(reply, tuple):\n stop, reply = reply\n else:\n stop = True\n\n\n # In case a message came from a group chat\n group_id = message.group_info.get(\"groupId\")\n\n # mark read and get that sweet filled checkbox\n try:\n if auto_send_receipts and not group_id:\n self.send_receipt(recipient=message.source, timestamps=[message.timestamp])\n\n if group_id:\n self.send_group_message(recipient_group_id=group_id, text=reply)\n else:\n self.send_message(recipient=message.source, text=reply)\n except Exception as e:\n print(e)\n\n if stop:\n # We don't want to continue matching things.\n break", "def listener(messages):\n for m in messages:\n chatid = m.chat.id\n print(str(chatid))\n if m.content_type == 'text':\n text = m.text\n tb.send_message(chatid, text)", "def recv(self, *messages):\n for message in messages:\n self.input.put(message)", "async def read(self) -> None:\n make_non_blocking(self.stream)\n\n while not self.stream.closed:\n message = None\n try:\n message = await self.read_one()\n\n if not message:\n await self.sleep()\n continue\n else:\n self.wake()\n\n IOLoop.current().add_callback(self.queue.put_nowait, message)\n except Exception as e: # pragma: no cover\n self.log.exception(\n \"%s couldn't enqueue message: %s (%s)\", self, message, e\n )\n await self.sleep()", "def work(self):\n while True:\n message = self.get()\n self.handle(message)", "def _read_loop(self):\n while True:\n self.read()", "def handle_messages(self):\n\n #Get the time at which the code started running\n current_time = datetime.datetime.now()\n\n #get all messages between now and the time where a message was last received\n messages = self.client.messages.list(\n date_sent_before = datetime.datetime.now()+ datetime.timedelta(hours = TIMEDIFFERENCE),\n date_sent_after = self.last_message_timing + datetime.timedelta(hours = TIMEDIFFERENCE)\n )\n\n #Iterate through all the new messages\n for record in messages:\n #If it is not from the Twilio Client\n if record.from_ != 'whatsapp:+14155238886':\n #Then update the timing of the last message to the current time\n self.last_message_timing = current_time\n #If the message sent is the '?' that seeks to get the number\n #of people in the queue\n if record.body == '?':\n #Get the data about people from firebase\n people_data = self.firebase.get_data('people_count')\n #Get the number of people queueing\n no_of_people = people_data['people_count']\n #Create a message from the API to tell the person\n #asking the number of people in the queue\n message = self.client.messages.create(\n body='The number of the people in the queue is {}'.format(no_of_people),\n from_='whatsapp:{sender_number}'.format(**self.config),\n to=record.from_\n )", "async def consumer(self):\n while True:\n logging.info(\"Consuming telephony log...\")\n logs = await self.telephonylog_queue.get()\n\n if logs is None:\n logging.info(\n \"Telephony logs empty. Nothing to write...\")\n continue\n logging.info(\n \"Consumed {} telephony logs...\".format(len(logs)))\n\n try:\n for log in logs:\n self.writer.write(json.dumps(log).encode() + b'\\n')\n await self.writer.drain()\n logging.info(\"Wrote data over tcp socket...\")\n except Exception as e:\n logging.error(\"Failed to write data to transport with {}\".format(e))\n sys.exit(1)\n\n # Idea is to write to last_offset_read file after data is sent\n # When user sets recover=True in toml, we will read from this file\n # if it exists and grab data from that offset\n # Still testing out this logic\n logging.info(self.last_offset_read)\n checkpoint_file = os.path.join(self.config['logs']['checkpointDir'],\n \"telephony_checkpoint_data.txt\")\n checkpointing_data = open(checkpoint_file, \"w\")\n checkpointing_data.write(json.dumps(self.last_offset_read['telephony_last_fetched']))\n checkpointing_data.flush()\n checkpointing_data.close()\n self.writer.close()", "def run(self):\n while True:\n time.sleep(RTM_READ_DELAY)\n for event in self._slack_client.rtm_read():\n self.handle_event(event)", "def run(self):\n while True:\n line = self.stream.readline()\n if not len(line):\n # EOF, stop!\n break\n else:\n # Put the text on the queue, along with the time it was read.\n self.callback_queue.put(line)", "def listen(self):\n\n\t\tprint(\"Connected to the room\")\n\n\t\t#: Watch for messages coming from the server.\n\t\twhile self.joined:\n\n\t\t\t#: Wait for a message to be recieved from the server.\n\t\t\ttry:\n\t\t\t\t#: Store a most recent message for testing purposes.\n\t\t\t\tself.most_recent_message = self.client.recv(1024).decode()\n\t\t\t\tself.messages.append(self.most_recent_message)\n\t\t\texcept OSError:\n\t\t\t\tprint(\"Connection to the server has been lost.\")\n\n\t\t\t\t#: Quit from the server to do cleanup.\n\t\t\t\tself.quit(False)", "def _process_chat_queue(self, chat_queue):\n while self.allowed_to_chat:\n if len(chat_queue) > 0:\n self.ts.send_message(chat_queue.pop())\n time.sleep(.5)", "def run_chat(self, auto_send_receipts=False):\n signal.signal(signal.SIGINT, self._stop_handler)\n signal.signal(signal.SIGQUIT, self._stop_handler)\n messages_iterator = self.receive_messages()\n while self._run:\n try:\n message = next(messages_iterator)\n\n except ConnectionResetError as e:\n self.logger.exception(\"Got an error attempting to get a message from signal!\")\n raise\n except Exception as e:\n self.logger.exception(\"Got an error attempting to get a message from signal!\")\n continue\n\n self.logger.info(f\"Receiving message {message}\")\n\n\n if message.payment:\n for func in self._payment_handlers:\n func(message.source, message.payment)\n continue\n\n if not message.text:\n continue\n\n for _, regex, func in self._chat_handlers:\n match = re.search(regex, message.text)\n if not match:\n continue\n\n try:\n reply = func(message, match)\n except Exception as e: # noqa - We don't care why this failed.\n self.logger.exception(f\"Failed to process message {message}\")\n continue\n\n if isinstance(reply, tuple):\n stop, reply = reply\n else:\n stop = True\n\n # In case a message came from a group chat\n group_id = message.group_v2 and message.group_v2.get(\"id\") # TODO - not tested\n\n # mark read and get that sweet filled checkbox\n try:\n if auto_send_receipts and not group_id:\n self.send_read_receipt(recipient=message.source['number'], timestamps=[message.timestamp])\n\n if group_id:\n self.send_group_message(recipient_group_id=group_id, text=reply)\n else:\n self.send_message(recipient=message.source['number'], text=reply)\n except Exception as e:\n self.logger.exception(e)\n raise\n\n if stop:\n # We don't want to continue matching things.\n break\n return", "def run(self):\n while True:\n msg = self.recv()", "def listening(self):\n # starting point (CheckPoint)\n try:\n last_index = len(re.split('\\n', open(self.path, 'r').read())) - 1\n \n while True:\n \n curr_size = path.getsize(self.path)\n modified_time = path.getmtime(self.path)\n \n time.sleep(.2)\n # Latest.log Either got Archived by Minecraft or a new Instance of Minecraft Opened\n if self.fileSize > curr_size:\n print('\\033[31mDetected Change in Size')\n print('\\033[32mDid You reopen Minecraft?')\n self.fileSize = curr_size\n last_index = len(re.split('\\n', open(self.path, 'r').read())) - 1\n \n # MODIFIED??? must be minecraft dumping chat onto lastest.log\n if self.last_time_modified != modified_time:\n \n self.last_time_modified = modified_time\n chat = open(self.path, 'r').read()\n newChatLines = re.split('\\n', chat)[last_index:] # Reads Lines From the last checkpoint\n \n \n \n curr_index = -1\n\n for line in newChatLines:\n\n curr_index += 1\n # if line is not a \\n or \\r tag then our Line checkpoint is the current line\n if line:\n last_index += 1\n \n # Ignores ERRORS / WARNINGS focuses on chat logs\n if '[Client thread/INFO]: [CHAT]' in line:\n\n self.newLineEvent(line)\n # TODO LOGING\n except (FileExistsError, FileNotFoundError, PermissionError, NotADirectoryError) as e:\n err_helper.showError('0x1', e, crash=True)", "def read_messages(self, msg_num):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username, font=self.title_font,\r\n bg=self.bg_color, height=2)\r\n user_label.pack(pady=5, padx=50)\r\n lbl_msg = Label(self.root, text=\"Message \" + str(msg_num), font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_msg.pack(pady=5, padx=10)\r\n self.refresh_button = Button(self.root, text=\"Refresh page\", font=self.text_font,\r\n bg=self.bg_color, command=lambda: self.refresh(msg_num))\r\n self.refresh_button.pack(padx=10, pady=10)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=15)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n text_widget.pack()\r\n scrollbar_msg.config(command=text_widget.yview)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.go_back_read)\r\n button_send.pack(pady=5, side=BOTTOM)\r\n button_send = Button(self.root, text=\"see/close message\\ncontrol panel\",\r\n font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.new_window_messages(button_send))\r\n button_send.pack(pady=5, side=BOTTOM)\r\n if self.msg_list:\r\n if msg_num < len(self.msg_list):\r\n next_msg = Button(self.root, text=\"next message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num + 1))\r\n next_msg.pack(pady=5, padx=5, side=RIGHT)\r\n if msg_num > 1:\r\n previous_msg = Button(self.root, text=\"previous message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num - 1))\r\n previous_msg.pack(pady=5, padx=5, side=LEFT)\r\n text_widget.insert(END, \"from: \" + self.msg_list[msg_num - 1][2] + \"\\n\")\r\n text_widget.tag_add('sender', '1.0', '1.end')\r\n text_widget.tag_config('sender', font='none 14')\r\n\r\n text_widget.insert(END, self.msg_list[msg_num - 1][0])\r\n text_widget.tag_add('msg', '2.0', END)\r\n text_widget.tag_config('msg', font='none 12')\r\n\r\n text_widget.config(state=DISABLED)", "async def on_chat_message(self, chat_message):\n pass", "def on_message(self, _, message):\n with self.message_lock:\n self.messages.append(Message.deserialize(message))\n self.new_message_available.set()\n super().on_message(_, message)", "def process_messages(self):\n pass", "def poll(self):\n msgs = self._read()\n\n if msgs and self.callback:\n for msg in msgs:\n self.callback(msg)", "def _add_to_chat_queue(self, message):\n self.chat_message_queue.appendleft(message)", "async def handle_commands(self, reader, queue):\n await queue.put(self.introduction_message)\n\n logged_username = None # temporary logged in user\n to = None # temporary to user\n\n async for data in read_util(reader): # ended when we close the connection of client\n print(\"Received:\", data.decode())\n # self.write_handler(data, writer)\n text = data.decode()\n if text.startswith(\"$\"):\n logged_username = text[1:]\n self.users[logged_username] = queue # assigning the queue to the logged in user\n\n elif text.startswith(\"@\"):\n if not logged_username:\n await queue.put(self.introduction_message)\n continue\n to, message = text.split(\" \", 1)\n to = to[1:]\n if to not in self.users:\n await queue.put(b\"Invalid username\\n\")\n continue # what if its not there think about it\n\n user_message = f\"<{logged_username}> {message}\\n\"\n await self.users.get(to).put(user_message.encode()) # putting message in respective queue of `to` user", "def receive_messages(self):\n messages = self.incoming_messages\n self.incoming_messages = []\n return messages", "def read_incoming(self):\r\n buf = ''\r\n debug_prompt = re.compile(r'\\A[\\w]+>>? ')\r\n while 1:\r\n try:\r\n buf += os.read(self.fid, 100).decode('utf8')\r\n except:\r\n self.queue.put(None)\r\n return\r\n lines = buf.splitlines()\r\n for line in lines[:-1]:\r\n self.queue.put(line)\r\n if buf.endswith('\\n'):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n elif re.match(debug_prompt, lines[-1]):\r\n self.queue.put(lines[-1])\r\n buf = ''\r\n else:\r\n buf = lines[-1]", "def receive_chat(self, chat_packet):\n message = self.parser(chat_packet.json_data)\n if not message:\n # This means our Parser failed to extract the message\n return\n\n self.queue.append(message)", "def handle_read(self):\n while True:\n try:\n content = self.recv(1024)\n if content:\n self.rbuf.write(content.decode('utf-8'))\n if len(content) < 1024:\n break\n except Exception as e:\n print(e)\n self.handle_rpc()", "def _process_messages(self, room, new_messages):\n\t\tfor message in new_messages:\n\t\t\tself._log.info(\"handling message {}\".format(message[\"id\"]))\n\n\t\t\tfor reactive in self._reactives:\n\t\t\t\ttry:\n\t\t\t\t\treactive(room, message, self, self._hipchat)\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tself._log.error(\"reactive {!r} errored while handling message\".format(reactive), exc_info=True)", "async def relay_iopub_messages(self):\n while True:\n msg = await self.iosub.recv_multipart()\n # Send the message up to the consumer (for example, the notebook)\n self.iopub_socket.send_multipart(msg)", "def listen(self):\n self.channel.start_consuming()", "def receive():\n while True:\n try:\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\n msg_list.insert(tkinter.END, msg)\n \n except OSError: # Possibly client has left the chat.\n break", "async def listen(self):\n\n while True:\n if not self.connected:\n # sleep and hope the checker fixes us\n await asyncio.sleep(5)\n continue\n data = await self.read_one_message()\n if data is None:\n await asyncio.sleep(1)\n continue\n mtype = self.find_balboa_mtype(data)\n\n if mtype is None:\n self.log.error(\"Spa sent an unknown message type.\")\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_CONFIG_RESP:\n (self.macaddr, junk, morejunk) = self.parse_config_resp(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_STATUS_UPDATE:\n await self.parse_status_update(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_PANEL_RESP:\n self.parse_panel_config_resp(data)\n await asyncio.sleep(0.1)\n continue\n if mtype == BMTR_PANEL_NOCLUE1:\n self.parse_noclue1(data)\n await asyncio.sleep(0.1)\n continue\n self.log.error(\"Unhandled mtype {0}\".format(mtype))", "def start_speaking(self):\n self.allowed_to_chat = True\n self.chat_message_queue.clear()\n self.chat_thread = threading.Thread(target=self._process_chat_queue,\n kwargs={'chat_queue': self.chat_message_queue})\n self.chat_thread.daemon = True\n self.chat_thread.start()", "async def client_read(reader, connection):\n while connection.state[\"connected\"]:\n inp = await reader.readline()\n\n if not inp: # This is an EOF. Hard disconnect.\n connection.state[\"connected\"] = False\n return\n\n payload = {\n \"uuid\": connection.uuid,\n \"addr\": connection.addr,\n \"port\": connection.port,\n \"msg\": inp.strip(),\n }\n msg = {\n \"event\": \"player/input\",\n \"secret\": WS_SECRET,\n \"payload\": payload,\n }\n\n asyncio.create_task(messages_to_game.put(Message(\"IO\", message=json.dumps(msg, sort_keys=True, indent=4))))", "def useChat(self):\n # Implements a subprocess to run the Kuri robot simultaneously with the user input loop\n proc_stdin = io.TextIOWrapper(self.proc.stdin, encoding='utf-8', line_buffering=True)\n\n while True:\n txt = input(\"Talk to me! (Type 'q' to quit) \").lower()\n if txt == 'q':\n proc_stdin.write('q\\n')\n quit()\n else:\n sentiment = self.sd.getSentiment(txt)\n proc_stdin.write(sentiment + '\\n')\n print(\"Sentiment: \" + sentiment + '\\n')", "def update_messages():\n\n scrollbar = Scrollbar(root)\n scrollbar.pack(side=RIGHT, fill=Y)\n listbox = Text(root, wrap =WORD, yscrollcommand=scrollbar.set, background=\"#CCFFCC\", fg=\"black\", selectbackground=\"#003300\",\n highlightcolor=\"#0033CC\")\n\n msgs = []\n run = True\n while run:\n\n time.sleep(0.1) # update every 1/10 of a second\n new_messages = c1.get_messages() # get any new messages from client\n msgs.extend(new_messages) # add to local list of messages\n\n for msg in new_messages: # display new messages\n print(msg)\n #title_label = Label(text=str(msg), bg=\"#CCFFCC\", fg=\"black\", padx=34, pady=5, font=\"comicsansms 9 bold\",borderwidth=3,wraplength=300, relief=SUNKEN)\n #title_label.pack(side=TOP)\n\n listbox.insert(END, str(msg)+'\\n\\n')\n listbox.pack(fill=BOTH, padx=36)\n scrollbar.config(command=listbox.yview)\n\n if msg == \"{quit}\":\n root.destroy()\n run = False\n break", "def receive():\r\n while True:\r\n try:\r\n msg = client_socket.recv(BUFSIZ).decode(\"utf8\")\r\n msg_list.insert(tkinter.END, msg)\r\n except OSError: # Possibly client has left the chat.\r\n break", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def request_messages(self):\n # we fetch messages from all rooms at the same time, we will route them at writing time\n base_query = (\n \"SELECT e.received_ts, j.json \"\n \"FROM events AS e \"\n \"INNER JOIN event_json AS j \"\n \"USING (event_id) \"\n \"WHERE e.room_id IN %s \"\n \"AND e.received_ts>%s \"\n \"AND e.type='m.room.message' \"\n \"ORDER BY e.received_ts;\"\n )\n\n self.read_last_ts_written()\n\n try:\n logging.info(\"Connecting to database ...\")\n with psycopg2.connect(**self.db_config) as conn:\n logging.info(\"Connected to database.\")\n with conn.cursor() as cur:\n cur.execute(\n base_query, (tuple(self.rooms.keys()), self.last_ts_written)\n )\n\n for row in cur:\n # there are two fields per row : timestamp at reception, and json data\n line = self.process_message_row(row)\n\n # we get the room nickname in the config at the room_id key\n room_name = self.rooms[line[\"room_id\"]]\n file_path = self.ts_to_filepath(\n timestamp=line[\"ts\"], room_name=room_name\n )\n\n if self.append_line(file_path, json.dumps(line)):\n logging.info(\n \"Message with timestamp {} written to {}\".format(\n line[\"ts\"], file_path\n )\n )\n self.last_ts_written = line[\"ts\"]\n else:\n logging.error(\n \"We couldn't write message {}, we will exit.\".format(\n line[\"ts\"]\n )\n )\n return False\n\n self.write_last_ts_written()\n\n logging.info(\"Disconnecting from database ...\")\n conn.close()\n logging.info(\"Disconnected from database.\")\n return True\n\n except psycopg2.OperationalError as e:\n logging.error(\n 'Could not connect to database : \"{}\"'.format(str(e).replace(\"\\n\", \"\"))\n )\n\n return False\n\n return True", "def startReceiving(self):\n self.listening = True\n self.start()", "def startReading(self):\n self.reading = True\n self.thread = ReadSocket(self)\n self.thread.start()", "def read_message(stdscr, conversation):\n backlog = []\n tail = filesystem.tail_conversation(conversation)\n old_backlog = 0\n while True:\n # These settings are inside the loop because the reply mode disables them.\n stdscr.nodelay(1)\n curses.noecho()\n for line in tail:\n if line:\n backlog.append(line.replace(\"\\r\", \"\"))\n else:\n break\n if old_backlog != len(backlog):\n stdscr.erase()\n safe_put(stdscr, \"Viewing conversation with {user}. You can (r)eply or (q)uit.\".format(user=conversation), (2, 0))\n safe_put(stdscr, \"\\r\".join(backlog[-20:]), (4, 0))\n stdscr.refresh()\n old_backlog = len(backlog)\n\n selection = stdscr.getch()\n if selection == ord(\"q\"):\n break\n if selection == ord(\"r\"):\n stdscr.nodelay(0)\n send_message(stdscr, conversation)\n # Trigger a redraw after sending a message\n old_backlog = 0\n time.sleep(0.1)\n stdscr.nodelay(0)\n stdscr.clear()\n stdscr.refresh()", "def _workout_messages(self, msgs_bunch):\n if msgs_bunch != []:\n while True:\n r = requests.post(self.url, headers = self.headers, data = json.dumps(msgs_bunch))\n # request success condition below - to end the handler\n if r.status_code == 200:\n break\n print('http_handler: failed to retranslate messages, try again in ' + str(self.timeout) + ' sec')\n time.sleep(self.timeout)\n # next bunch of messages will not be read until this function ends\n # current bunch of messags will be deleted in next request if delete_flag = True is set", "def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)", "def doRead(self):\n if self.read_scheduled is not None:\n if not self.read_scheduled.called:\n self.read_scheduled.cancel()\n self.read_scheduled = None\n\n while True:\n if self.factory is None: # disconnected\n return\n\n events = self.socket_get(constants.EVENTS)\n\n if (events & constants.POLLIN) != constants.POLLIN:\n return\n\n try:\n message = self._readMultipart()\n except error.ZMQError as e:\n if e.errno == constants.EAGAIN:\n continue\n\n raise e\n\n log.callWithLogger(self, self.messageReceived, message)", "def new_message(self, message):\n self.message_counter += 1\n self.message_buffer.append(str(message))\n self.event_loop()", "async def _send_messages(self, message: str) -> None:\n for chat_id in self.chat_ids_list:\n await self._send_request_to_api(message, chat_id)", "def handle(self):\n try:\n while True:\n\n # Pop the message from the queue\n\n msg = self.queue.get_nowait()\n\n # Log anything if necesary\n\n self.log_message(msg)\n\n # Identify the src peer\n\n if 'src_id' in msg:\n\n if msg['src_id'] == -1:\n\n this_peer = None # Server message\n\n else:\n\n this_peer = self.peers[msg['src_id']]\n\n # If we are not up-to-date with server, only accept MSG_CONNECT and MSG_SET_ALL\n\n if isinstance(msg, MSG_CONNECT):\n\n if self.marker.id != msg['src_id']:\n\n print(\"Peer '{}' has joined the session\".format(msg['name']))\n\n elif type(msg) == MSG_SET_ALL:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # self.mark_set(peer.mark, peer.index())\n\n # Format the lines\n\n self.format_text()\n\n # Move the local peer to the start\n\n self.marker.move(1,0)\n\n # Flag that we've been update\n\n self.is_up_to_date = True\n\n elif self.is_up_to_date:\n\n # If the server responds with a console message\n\n if isinstance(msg, MSG_RESPONSE):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n # Stop running when server is manually killed \n\n elif isinstance(msg, MSG_KILL):\n\n if hasattr(self.root, \"console\"):\n\n self.root.console.write(msg['string']) \n\n self.root.push.kill()\n self.root.pull.kill()\n\n # Handles selection changes\n\n elif isinstance(msg, MSG_SELECT):\n\n sel1 = str(msg['start'])\n sel2 = str(msg['end'])\n \n this_peer.select(sel1, sel2)\n\n # Handles keypresses\n\n elif isinstance(msg, MSG_DELETE):\n\n self.handle_delete(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif type(msg) == MSG_BACKSPACE:\n\n self.handle_backspace(this_peer, msg['row'], msg['col'])\n\n self.root.colour_line(msg['row'])\n\n elif isinstance(msg, MSG_EVALUATE_BLOCK):\n\n lines = (int(msg['start_line']), int(msg['end_line']))\n\n this_peer.highlightBlock(lines)\n\n # Experimental -- evaluate code based on highlight\n\n string = self.get(\"{}.0\".format(lines[0]), \"{}.end\".format(lines[1]))\n \n self.root.lang.evaluate(string, name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_EVALUATE_STRING):\n\n # Handles single lines of code evaluation, e.g. \"Clock.stop()\", that\n # might be evaluated but not within the text\n\n self.root.lang.evaluate(msg['string'], name=str(this_peer), colour=this_peer.bg)\n\n elif isinstance(msg, MSG_SET_MARK):\n\n row = msg['row']\n col = msg['col']\n\n this_peer.move(row, col)\n\n # If this is a local peer, make sure we can see the marker\n\n if this_peer == self.marker:\n\n self.mark_set(INSERT, \"{}.{}\".format(row, col))\n\n self.see(self.marker.mark)\n\n elif isinstance(msg, MSG_INSERT):\n\n self.handle_insert(this_peer, msg['char'], msg['row'], msg['col'])\n\n # Update IDE keywords\n\n self.root.colour_line(msg['row'])\n\n # If the msg is from the local peer, make sure they see their text AND marker\n\n if this_peer == self.marker:\n\n self.see(self.marker.mark)\n\n self.edit_separator()\n\n elif isinstance(msg, MSG_GET_ALL):\n\n # Return the contents of the text box\n\n data = self.handle_getall()\n\n reply = MSG_SET_ALL(-1, data, msg['src_id'])\n\n self.root.push_queue.put( reply ) \n\n elif isinstance(msg, MSG_REMOVE):\n\n # Remove a Peer\n this_peer.remove()\n \n del self.peers[msg['src_id']]\n \n print(\"Peer '{}' has disconnected\".format(this_peer)) \n\n elif isinstance(msg, MSG_BRACKET):\n\n # Highlight brackets on local client only\n\n if this_peer.id == self.marker.id:\n\n row1, col1 = msg['row1'], msg['col1']\n row2, col2 = msg['row2'], msg['col2']\n\n peer_col = int(self.index(this_peer.mark).split(\".\")[1])\n\n # If the *actual* mark is a ahead, adjust\n\n col2 = col2 + (peer_col - col2) - 1\n\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row1, col1), \"{}.{}\".format(row1, col1 + 1))\n self.tag_add(\"tag_open_brackets\", \"{}.{}\".format(row2, col2), \"{}.{}\".format(row2, col2 + 1))\n\n elif type(msg) == MSG_CONSTRAINT:\n\n new_name = msg['name']\n\n print(\"Changing to constraint to '{}'\".format(new_name))\n\n for name in self.root.creative_constraints:\n\n if name == new_name:\n\n self.root.creative_constraints[name].set(True)\n self.root.__constraint__ = constraints[name](msg['src_id'])\n\n else:\n\n self.root.creative_constraints[name].set(False)\n\n elif type(msg) == MSG_SYNC:\n\n # Set the contents of the text box\n\n self.handle_setall(msg['data'])\n\n # Move the peers to their position\n\n for _, peer in self.peers.items():\n \n peer.move(peer.row, peer.col)\n\n # Format the lines\n\n self.format_text()\n\n elif type(msg) == MSG_UNDO:\n\n self.handle_undo()\n\n # Give some useful information about what the message looked like if error\n\n else:\n\n print(\"Error in text box handling. Message was {}\".format(msg.info()))\n\n raise e\n\n # Update any other idle tasks\n\n self.update_idletasks()\n\n # This is possible out of date - TODO check\n\n if msg == self.root.wait_msg:\n\n self.root.waiting = False\n self.root.wait_msg = None\n self.root.reset_title()\n\n self.refreshPeerLabels()\n\n # Break when the queue is empty\n except queue.Empty:\n \n self.refreshPeerLabels()\n\n # Recursive call\n self.after(30, self.handle)\n return", "async def receiver(self):\n socket_input = await self.websocket.recv()\n logger.debug(\"<<< Received:\\n{}\".format(socket_input))\n\n # Showdown sends this response on initial connection\n if socket_input == \"o\":\n logger.info(\"Connected on {}\".format(self.websocket_url))\n self.connected = True\n self.add_task(self.on_connect())\n return\n\n inputs = utils.parse_socket_input(socket_input)\n for room_id, inp in inputs:\n room_id = room_id or \"lobby\"\n logger.debug(\"||| Parsing:\\n{}\".format(inp))\n inp_type, params = utils.parse_text_input(inp)\n\n # Set challstr attributes and autologin\n if inp_type == \"challstr\":\n self.challengekeyid, self.challstr = params\n if self.name and self.password and self.autologin:\n await self.login()\n elif self.autologin:\n msg = (\n \"Cannot login without username and password. If \"\n \"you don't want your client to be logged in, \"\n \"you can use Client.start(autologin=False).\"\n )\n raise Exception(msg)\n\n # Process query response\n elif inp_type == \"queryresponse\":\n response_type, data = params[0], \"|\".join(params[1:])\n data = json.loads(data)\n self.add_task(\n self.on_query_response(response_type, data), transient=True\n )\n if response_type == \"savereplay\":\n self.add_task(\n self.server.save_replay_async(data), transient=True\n )\n\n # Challenge updates\n elif inp_type == \"updatechallenges\":\n self.challenges = json.loads(params[0])\n self.add_task(\n self.on_challenge_update(self.challenges), transient=True\n )\n\n # Messages\n elif inp_type == \"c:\" or inp_type == \"c\":\n timestamp = None\n if inp_type == \"c:\":\n timestamp, params = int(params[0]), params[1:]\n author_str, *content = params\n content = \"|\".join(content)\n chat_message = message.ChatMessage(\n room_id, timestamp, author_str, content, client=self\n )\n self.add_task(\n self.on_chat_message(chat_message), transient=True\n )\n elif inp_type == \"pm\":\n author_str, recipient_str, *content = params\n content = \"|\".join(content)\n private_message = message.PrivateMessage(\n author_str, recipient_str, content, client=self\n )\n self.add_task(\n self.on_private_message(private_message), transient=True\n )\n\n # Rooms\n elif inp_type == \"init\":\n room_type = params[0]\n room_obj = room.class_map.get(room_type, room.Room)(\n room_id, client=self, max_logs=self.max_room_logs\n )\n self.rooms[room_id] = room_obj\n self.add_task(self.on_room_init(room_obj), transient=True)\n elif inp_type == \"deinit\":\n if room_id in self.rooms:\n self.add_task(\n self.on_room_deinit(self.rooms.pop(room_id)),\n transient=True,\n )\n\n # add content to proper room\n if isinstance(self.rooms.get(room_id, None), room.Room):\n self.rooms[room_id].add_content(inp)\n\n self.add_task(\n self.on_receive(room_id, inp_type, params), transient=True\n )", "def listen(self):\n\n # It's ideal to start listening before the game starts, but the\n # down-side\n # is that object construction may not be done yet. Here we pause\n # shortly\n # to let initialization finish, so all functionality (e.g. self.log)\n # is\n # available.\n time.sleep(0.1)\n\n for st in self.sentences():\n if st:\n self.onMessage(source=None, message=st)", "def twitch_receive_messages(self):\r\n self._push_from_buffer()\r\n result = []\r\n while True:\r\n # process the complete buffer, until no data is left no more\r\n try:\r\n time.sleep(.01)\r\n if self.s is None:\r\n raise Exception('socket is closed')\r\n msg = self.s.recv(4096).decode() # NON-BLOCKING RECEIVE!\r\n except socket.error as e:\r\n err = e.args[0]\r\n if err == errno.EAGAIN or err == errno.EWOULDBLOCK:\r\n # There is no more data available to read\r\n if len(result):\r\n self._maybe_print('returning with {}'.format(result))\r\n\r\n return result\r\n else:\r\n # a \"real\" error occurred\r\n # import traceback\r\n # import sys\r\n # print(traceback.format_exc())\r\n if not self.in_shutdown:\r\n print(\"Trying to recover...\")\r\n self.connect()\r\n return result\r\n else:\r\n self._maybe_print('twitch in: ' + msg)\r\n rec = [self._parse_message(line)\r\n for line in filter(None, msg.split('\\r\\n'))]\r\n rec = [r for r in rec if r] # remove Nones\r\n result.extend(rec)\r\n self._maybe_print(\"result length {} {}\".format(len(result), result))", "def run(self):\n self.logger.info(\"Starting messenger.\")\n self.recv()", "def receive(get_chat):\n # while True:\n try:\n final_chat = {\"final_chat\":get_chat}\n # headers = {'Content-type': 'application/json', 'Accept': 'text/plain'}\n # r = requests.post(url = API_ENDPOINT, data = json.dumps(final_chat), headers= headers).json()\n r = home(final_chat)\n print(r)\n r_text = r['text']\n if r[\"file\"] =='':\n pass\n else:\n r_file = r['file']\n run_file = os.path.join(os.sep,'new_folder',r_file)\n if os.path.isfile(run_file):\n print(True)\n genie_text = 'Genie: '+r_text\n msg_list.insert(END, genie_text)\n import subprocess\n shell_script = 'powershell.exe' + ' ' + run_file\n data = subprocess.call(shell_script, shell=True)\n r_text = r_file + \" ran successfully\"\n else:\n r_text = \"Sorry Diagnostics Scripts are not available\"\n\n except Exception as e: # Possibly client has left the chat.\n # print(e)\n r_text = \"I am afraid there is some issue while accessing to the data\"\n return r_text", "def add_messages(username, message):\n now = datetime.now().strftime(\"%H:%M:%S\")\n message_dict = {'timestamp': now, 'from':username, 'message':message}\n \n \"\"\"Write the chat message to messages.txt\"\"\"\n write_to_file(\"data/messages.txt\", \"{0} - {1}: {2} \\n\".format(message_dict['timestamp'], message_dict['from'].title(), message_dict['message']))", "def Start(self):\n for unused_i in range(0, self.args.message_count):\n self.CallClient(\n standard.ReadBuffer, offset=0, length=100, next_state=\"Process\")", "def onRecv(self, data):\n self.stream += data\n while self.handleStream(): pass", "async def _process_messages(self) -> None:\n try:\n while not self._client.closed:\n msg = await self._client.receive()\n\n if msg.type in (WSMsgType.CLOSE, WSMsgType.CLOSED, WSMsgType.CLOSING):\n break\n\n if msg.type == WSMsgType.ERROR:\n raise ConnectionFailed()\n\n if msg.type != WSMsgType.TEXT:\n raise InvalidMessage(f\"Received non-Text message: {msg.type}\")\n\n try:\n data = msg.json(loads=ujson.loads)\n except ValueError as err:\n raise InvalidMessage(\"Received invalid JSON.\") from err\n\n if LOGGER.isEnabledFor(logging.DEBUG):\n LOGGER.debug(\"Received message:\\n%s\\n\", pprint.pformat(msg))\n\n self._handle_incoming_message(data)\n\n finally:\n # TODO: handle reconnect!\n LOGGER.debug(\"Listen completed. Cleaning up\")\n\n for future in self._result_futures.values():\n future.cancel()\n\n if not self._client.closed:\n await self._client.close()\n\n if self._shutdown_complete_event:\n self._shutdown_complete_event.set()\n else:\n LOGGER.debug(\"Connection lost, will reconnect in 10 seconds...\")\n self._loop.create_task(self._auto_reconnect())", "async def _incoming_tcp(self, pid, reader):\n\n # When the user disconnects, asyncio will call it \"EOF\" (end of\n # file). Until then, we simply try to read a line from the\n # user.\n while not reader.at_eof():\n # reader.readline() is an asynchronous method\n # This means that it won't actually execute on its own\n # unless we 'await' it.\n # Under the hood, using this 'await' actually switches to\n # execute some other code until this player sends us\n # a message.\n msg = await reader.readline()\n\n # The player just sent us a message!\n # Remove any whitespace and convert from bytes to str\n msg = msg.strip().decode(encoding=\"latin1\")\n\n if msg:\n # Pass the message to server.on_player_msg().\n # The method there will send the message to the\n # Character that the player controls.\n # This function can be overriden for custom behavior.\n self.on_player_msg(pid, msg)\n\n logging.debug(\"_incoming_tcp closed for %s\", pid)", "def __receive_messages(self) -> [str]:\n while True:\n try:\n data = self.__socket.recv(4096)\n if data:\n msgs = self.__json_serializer.bytes_to_jsons(data)\n if RemotePlayerProxy.DEBUG:\n for msg in msgs:\n print(f'[RPP] [RECV] <- [{self.name}]: {msg}')\n return msgs\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(f'Lost client {self.name} because: ', e)\n return []", "def split_chat(self, chat):\n senders_messages = self.datetime_split(chat)\n for sender_message in senders_messages:\n message = self._get_message(sender_message)\n if message:\n self.messages.append(message)\n self.speakers.add(message.speaker)", "def listen(self):\n while self.active:\n self.handle_input()", "def text_e(self, event):\n directory=os.getcwd()+ '/messages'\n filename=str(self.user)+'_'+str(self.friend)\n text = self.text_send.GetValue()\n messages = mf.addMessage(self.user, self.friend, self.passw, text)\n mf.makeTextFile(self.user, self.friend, self.passw, messages)\n \n self.chat_log.LoadFile('/'.join((directory, filename)))\n self.text_send.SetValue(\"\")\n event.Skip()", "def listen(self):\n while True:\n peer, address = self.server.accept()\n peer_port = int(peer.recv(self.CHUNK_SIZE).decode(self.FORMAT))\n peer.send(bytes(str(self.PEER_LIST),self.FORMAT))\n if (address[0], peer_port) not in self.PEER_LIST:\n self.PEER_LIST.append((address[0], peer_port))\n output_message = f\"{datetime.now().timestamp()} : Added <{address[0]}:{peer_port}> to the Peer_list\"\n self.dump_to_file(output_message)\n print(output_message)\n thread = threading.Thread(target=self.handlePeers, args=(peer, ))\n thread.start()", "def ioloop_callback(self):\n\n logger.debug(\"ioloop_callback\")\n with self.lock:\n messages = list(self.messages)\n\n logger.debug(repr(messages))\n new_listeners = []\n smallest_offset = None\n for mid, listener in self.listeners:\n logger.debug(\"listener, waiting for lmid: %r\", mid)\n offset = bisect.bisect(messages, (mid, None))\n if messages[offset:]:\n logger.debug(\"offset: %i\", offset)\n listener.dispatch_messages([m for _, m in messages[offset:]])\n smallest_offset = min(offset, smallest_offset) if smallest_offset is not None else offset\n else:\n # we didn't yet have enough messages to\n # dispatch, so we re-append this bugger.\n logger.debug(\"re-appended listener\")\n new_listeners.append((mid, listener))\n \n self.listeners[:] = new_listeners\n logger.debug(\"remaining listeners: %r\", self.listeners)\n # if smallest_offset is not None:\n # with self.lock:\n # self.messages = self.messages[smallest_offset+1:]", "def process(self, message=None):\n\n while self.running:\n message = self.channel.basic.get(self.queue)\n if message:\n content = message.body\n\n # log message\n if self.debug:\n self.log(\"Recieved: \" + str(content))\n\n # send to child nodes\n self.scatter(Message(**self.parse(content)))\n else:\n # yield to other greenlet\n # self.tick()\n self.sleep(1)", "def refresh_chat(self):\n self.chat_container.noutrefresh()\n self.chat_win.noutrefresh()\n curses.doupdate()", "def run(self):\n if has_GUI:\n self.GUI(self.buffer)\n else:\n while True:\n message = input(\"Write your command:\\n\")\n # print(message)\n self.buffer.append(message)", "def run_chat_client():\r\n while must_run:\r\n print_menu()\r\n action = select_user_action()\r\n perform_user_action(action)\r\n print(\"Thanks for watching. Like and subscribe! 👍\")", "def get_all_messages():\n with open(\"data/messages.txt\", \"r\") as chat_list:\n messages = chat_list.readlines()\n return messages", "def _read_data(self):\n while True:\n try:\n data = yield from asyncio.wait_for(self._socket.recv(), 1)\n except asyncio.TimeoutError:\n continue\n except asyncio.CancelledError:\n break\n except ConnectionClosed:\n break\n\n self._push_packet(data)\n\n self._loop.call_soon(self.close)", "def handle_read(self):\n packet = self.recv(8192)\n if packet == \"\":\n #print \"[WARNING] Socket closed by remote host %s:%s\" % (\n # self.address,self.port)\n self.close()\n return\n packet_list = messages.separate_messages(packet)\n #received_types = \" + \".join(\n # messages.get_message_type(messages.parse(packet))\n # for packet in packet_list)\n #print \"From %s:%s received: \" % (self.address, self.port), received_types\n # Process a single message at a time\n for packet in packet_list:\n message = messages.parse(packet)\n if messages.get_message_type(message) == \"OFPT_ECHO_REQUEST\":\n self.buffer.append(messages.of_echo_reply)\n else:\n self.handle_message(message)", "def characters(self, ch):\n if self.inMessageContent:\n self.message = self.message + ch\n self.messages.append(self.message)", "def consume_messages(self):\n\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n while method_frame:\n\n LOGGER.info(\"Message received\")\n\n self.channel.basic_ack(method_frame.delivery_tag)\n payload = json.loads(body)\n if not isinstance(payload, dict):\n return\n\n # Process the message\n if 'control' in payload:\n LOGGER.info(\"A control signal received!\")\n # self.set_control(payload['control'])\n print(payload['control'])\n\n # Continue getting messages\n method_frame, properties, body = self.channel.basic_get(self.queue_name, no_ack=False)\n\n # TODO\n # return control_signal", "def _on_read(self, line):\n # Some game logic (or magic)\n line = line.strip()\n logger.info(\"RCV> %s\", line)\n if not line:\n self.stream.close()\n return\n\n self.stream.write(\"echo: %s\\n\" % line)\n\n # Wait for further input on this connection\n self.wait()", "def add_chat_message(self, message):\n try:\n data = message.to_json()\n key = ENVIRONMENT['REDIS_PREFIX'] + \"chat_messages:%s\" % self.channel_id\n \n logging.info(data)\n \n self.redis_server.rpush(key, data)\n self.redis_server.publish(ENVIRONMENT['REDIS_PREFIX'] + 'chat_messages', data)\n except Exception, e:\n logging.info(\"ERROR adding message %s: %s\" % (message, e))\n raise", "def send_messages(self):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.title_font, bg=self.bg_color, height=2)\r\n user_label.pack(pady=10, padx=50)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=10)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n write_message = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n write_message.pack()\r\n scrollbar_msg.config(command=write_message.yview)\r\n button_speech_rec = Button(self.root, text=\"listen\\nto speech\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.create_speech_thread(write_message))\r\n button_speech_rec.pack(pady=10)\r\n button_send = Button(self.root, text=\"send\", font=self.text_font,\r\n height=2, width=20, command=lambda: self.send(write_message))\r\n button_send.pack(pady=10)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.choose_path)\r\n button_send.pack(pady=10)", "def _start_receive_from_queue(self):\n while True:\n received_message = recv_msg(self.TCPSock)\n # received_message = self.TCPSock.recv(self.buf)\n if self.verbose: print \"Server sends: \" + received_message\n self.receive_message_queue.put(received_message)", "async def receive(self, text_data):\n if self.user and not self.user.is_authenticated:\n return\n\n text_data_json = json.loads(text_data)\n message = text_data_json['message']\n\n full_name = \"{} {}\".format(self.user.first_name, self.user.last_name)\n if full_name == \" \":\n full_name = \"--\"\n\n try:\n room = Rooms.objects.get(name=self.room_name)\n except Rooms.DoesNotExist:\n return\n\n chat_object = Chat.objects.create(user_id=self.user.id, message=message, room=room)\n\n created_at = chat_object.created_at.strftime('%H:%M:%S %Y/%m/%d')\n\n await self.channel_layer.group_send(\n self.room_group_name,\n {\n 'type': \"chat_message\",\n 'message': message,\n 'user_id': self.user.id,\n 'publisher_full_name': full_name,\n 'created_at': created_at,\n }\n )", "def process(self, chunk):\n self._buf += chunk\n\n # Streams are `\\r\\n`-separated JSON messages.\n raw_lines = self._buf.split(b\"\\r\\n\")\n\n # If only one element in the split, then there wasn't a CRLF.\n if len(raw_lines) > 1:\n\n # The last element may be a b'', which is perfectly fine.\n self._buf = raw_lines[-1]\n\n # Blank lines are keep-alive messages.\n self._mailbox.extend(l for l in raw_lines[:-1] if l.strip())", "def _hear_message_from_server(self):\n while self.is_alive:\n data = self._socket.recv(1024)\n content = loads(data)\n self._current_list = content\n print(\"Servidor: {}\".format(content))", "def read(self):\n from x84.bbs import getch\n from x84.bbs.session import getsession\n from x84.bbs.output import echo\n session = getsession()\n self._quit = False\n echo(self.refresh())\n while not self.quit:\n echo(self.process_keystroke(getch()))", "def start_consuming(self):\n\n for queue in self._handlers.keys():\n self._consumer_tags += self._channel.basic_consume(self.on_message,\n queue=queue)", "async def read_loop(self):\n message: WSMessage # Fix typehinting\n async for message in self.ws:\n if message.type == WSMsgType.TEXT:\n await self.client.gateway_handler.on_receive(\n message.json(loads=loads), self)\n elif message.type in [WSMsgType.CLOSE, WSMsgType.CLOSING,\n WSMsgType.CLOSED]:\n self.logger.warning(\n f\"WebSocket is closing! Details: {message.json()}. Close \"\n f\"code: {self.ws.close_code}\")\n else:\n self.logger.warning(\n \"Unknown message type: \" + str(type(message)))", "def updateChat(self, ):\n self.__redrawChat()", "def start(update, context):\n chats = load_chats()\n chats.append( str( update.message.chat_id ) )\n save_channels(chats)\n update.message.reply_text('Chat registered!')", "def run(self) -> None:\n while self.data_incoming or len(self._queue):\n if not self._queue:\n logging.info(\"Consumer %d is sleeping since queue is empty\", self._name)\n time.sleep(0.75)\n print(self._queue.get())\n time.sleep(0.5)", "async def __bufferedReader():\n while True:\n # Get char and then append to prevent a race condition caused by the async await\n charIn = await __terminalState.osSupport.getInputChar()\n\n wasHandled = False\n for key, handlers in __terminalState.inputHandlers.items():\n if key is None or charIn in key:\n for handler in handlers:\n asyncio.get_event_loop().call_soon(handler, charIn)\n wasHandled = True\n\n if not wasHandled:\n __terminalState.inputBuffer += charIn", "def recv_input_locally(self):\n\n while self.commands.poll():\n command = self.commands.recv()\n self.local_commands.append(command)\n\n while self.audio_play.poll():\n frame = self.audio_play.recv()\n self.local_audio_play.append(frame)", "def background_thread():\n count = 0\n with open(\"logs.txt\", \"r\") as logfile:\n while True:\n socketio.sleep(1)\n count += 1\n\n line = logfile.readline()\n if line:\n socketio.emit('my_response',\n {'data': line, 'count': count},\n namespace='/test')", "def chatReceiveMessage(self, chat, user, message):\n self.on_message(user, message, False, False, chat)", "def start(self):\n while True:\n ident = self.reply_socket.recv()\n assert self.reply_socket.rcvmore(), \"Missing message part.\"\n msg = self.reply_socket.recv_json()\n omsg = Message(msg)\n print>>sys.__stdout__\n print>>sys.__stdout__, omsg\n handler = self.handlers.get(omsg.msg_type, None)\n if handler is None:\n print >> sys.__stderr__, \"UNKNOWN MESSAGE TYPE:\", omsg\n else:\n handler(ident, omsg)" ]
[ "0.69138986", "0.6706282", "0.6597729", "0.65439147", "0.64965916", "0.64251614", "0.6425156", "0.64248693", "0.64159244", "0.6353749", "0.63389343", "0.63240904", "0.6288118", "0.6244579", "0.6240275", "0.621565", "0.61916894", "0.61845356", "0.6172189", "0.610745", "0.61024016", "0.61013895", "0.60933006", "0.6085623", "0.6084471", "0.60711896", "0.6068645", "0.6064049", "0.60614014", "0.6045931", "0.6034912", "0.6025307", "0.60158837", "0.60099417", "0.60009694", "0.5997274", "0.5991899", "0.5982215", "0.59803605", "0.5963686", "0.59594065", "0.59496397", "0.5924848", "0.59102947", "0.58758295", "0.58687073", "0.5863497", "0.58599424", "0.58564836", "0.5856199", "0.58302504", "0.58253646", "0.5813231", "0.58040565", "0.5798157", "0.5797907", "0.5795033", "0.5787196", "0.57815385", "0.5778745", "0.5778539", "0.5765396", "0.5748192", "0.5748089", "0.5743681", "0.573709", "0.57262856", "0.5718587", "0.5714636", "0.5713254", "0.5699559", "0.5680946", "0.56676185", "0.5652752", "0.5640311", "0.56390756", "0.56374186", "0.56034565", "0.55956626", "0.55940676", "0.55892915", "0.558777", "0.5586469", "0.5585988", "0.5575487", "0.5558539", "0.5555858", "0.5551685", "0.554757", "0.5531986", "0.55305535", "0.5528147", "0.55263853", "0.55172586", "0.5515335", "0.5512", "0.5508078", "0.5507566", "0.5505968", "0.5504856" ]
0.5676125
72
Sends the message over the socket and also adds it to the chat.
def _send_message(self, e: Event): message = self.message_text.get("1.0", 'end-1c').replace('\n', "") if len(message) > 0: self.add_message_to_chat('you: ' + message) self._clear_message_text() self.connection_socket.send(bytes('them: ' + message, 'utf-8'))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def send_message(self, message):\r\n\t\tself.__tcpSocket.write(message.encode('utf8'))", "def send(self, message):\n self.sock.send(message)", "def write(self, msg):\n self.sock.send(msg.encode())", "def send_message(self, message):\n\n self.socket.send(message.serialize())", "def send(self, msg):\n self.__sock.send(msg)", "def _send(self, message):\n self.sock.sendall('%s\\n' % message)", "def send(msg): # event is passed by binders.\n # print(\"i sended: \" + msg)\n msg = msg + \";\"\n client_socket.send(bytes(msg, \"utf8\"))", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def sendMessage(self, msg):\n # Socket Object\n self.sock.connect((self.host, self.port))\n self.sock.send(msg)\n self.sock.close()", "def send_message(self, message):\n encoded_message = self.encode_message(message)\n self.socket.send(encoded_message)", "def send_message(self, message, socket):\n socket.send(bytes(message, 'UTF-8'))", "def write(self, msg):\n cmd = self.__compose(msg)\n self.sock.send(cmd)", "def send_message(self, message):\r\n if not self.is_connected():\r\n self.__root.after(self.WAIT_PERIOD, lambda: self.\r\n send_message(message))\r\n return\r\n self.__socket.send(str(message).encode())", "def send_message(self, message):\n self.print_debug_message(message)\n self.socket.send(message)", "async def send_msg(self, message: str) -> None:\n await self.socket.sendall(message.encode())", "def send(self):\n if(self.target):\n try:\n self.message = self.message +\"\\r\\n\"\n self.target[0].send(self.message)\n except socket.error, err:\n print err", "def send(self, message):\n if self.connection:\n self.connection.send(message)", "def send_message(self, message:str):\r\n msg_send = message.encode()\r\n self.server_connection.send(msg_send)", "def _send(self, message):\n logger.info(message)\n self.buffer.put(message)", "def send_message(self, message):\n\t\tself.logger.send(\"{0} - {1}\".format(self.peerip, str(message)))\n\t\ttry:\n\t\t\tself.socket.sendall(message.get_message(self.coin))\n\t\texcept socket.error as err:\n\t\t\tself.stop(err.errno,'send_message')", "def send(self, message):\n form_message = message + protocol.MESSAGE_SEPARATOR\n sent = 0\n while sent < len(form_message):\n try:\n sent += self.socket.send(form_message[sent:])\n except (OSError, AttributeError) as e:\n self.logger.critical(\"Error while sending message.\", exc_info=True)\n raise MessengerException(\"Socket not connected.\") from e", "def chat(sock, msg):\r\n message = \"PRIVMSG {} :{}\\r\\n\".format(cfg.CHAN, msg)\r\n #print(\"Sending: \"+message)\r\n sock.send(message.encode(\"utf-8\"))", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def sendMessage(sock, message):\n messageTemp = \"PRIVMSG \" + channel +\" :\" +message\n sock.send((messageTemp+ \"\\n\").encode())", "def send(self, msg):\n if self.sock is not None:\n try:\n send_msg(self.sock, msg)\n except socket.error, msg:\n self.sock = None\n print 'Send failed. Error Code : ' + str(msg[0]) + ' Message ' + msg[1]", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def __send_message(self, data):\n if RemotePlayerProxy.DEBUG:\n print(f'[RPP] [SEND] -> [{self.name}]: {data}')\n\n try:\n self.__socket.sendall(bytes(data, 'ascii'))\n except Exception as e:\n if RemotePlayerProxy.DEBUG:\n print(e)", "def send_message(self, msg):\n if msg is None:\n raise ValueError('message cannot be None!')\n\n if not isinstance(msg, message.Message):\n raise ValueError('message must be a type of Message')\n\n message_json = json.dumps(msg.__dict__)\n message_length = len(message_json)\n message_length_binary = struct.pack('>I', message_length)\n\n logging.info(\"Send: {0}\".format(message_json))\n\n self.sck.send(message_length_binary)\n self.sck.send(message_json)", "def send_message(self, message):\n msg_bytes = (\n f'{self.username}{self.delimiter}{message}'\n ).encode('utf-8')\n self.socket.writeDatagram(\n qtc.QByteArray(msg_bytes),\n qtn.QHostAddress.Broadcast,\n self.port\n )", "def send(self, message):\n self.logger.info(\"Sending to server: %s\" % message)\n self.sendLine(message)", "def send(self, msg):\n sleep(self.m_to)\n self.conn.send(msg)", "def send(self, recipient_socket, message):\n try:\n recipient_socket.send(message.encode('utf-8'))\n for client, sock in self.connection_dict.items():\n if sock == recipient_socket:\n self.log(\"A message '{}' has been sent to client '{}'\".format(message, client))\n except:\n # Broken socket connection may be, chat client pressed ctrl+c for example\n recipient_socket.close()\n self.remove_socket(recipient_socket)", "def add_chat_message(self, message):\n try:\n data = message.to_json()\n key = ENVIRONMENT['REDIS_PREFIX'] + \"chat_messages:%s\" % self.channel_id\n \n logging.info(data)\n \n self.redis_server.rpush(key, data)\n self.redis_server.publish(ENVIRONMENT['REDIS_PREFIX'] + 'chat_messages', data)\n except Exception, e:\n logging.info(\"ERROR adding message %s: %s\" % (message, e))\n raise", "def send(self, msg):\r\n if isinstance(msg, str):\r\n msg = msg.encode()\r\n logger.debug('Sending message: %s ...', repr(msg))\r\n self._socket.sendall(msg)", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def send(self, msg):\n pass", "def transmit(self, msg):\r\n # send our message to the client\r\n self.conn.sendall(msg)", "def send(self, message):\n _check_message_type(message=message)\n response = requests.post(\n self._server_url + _SEND_URL,\n data={\"id\": self._chat_id, \"msg\": message}\n )", "def send(message):\n\tmessage = message.encode()\n\tconn.send(message)", "def send(self, message):\n pass", "def sendMsg(self, msg):\n self.sockUDP.sendto(bytes(msg), self.serverAddress)\n logger.debug(\"sent: %r\", msg)", "def send_message(sock, message) -> None:\n print('[CLIENT LOG] sending message to server: {}'.format(str(message)))\n if type(message) == bytes:\n \n sock.sendall(message)\n else:\n sock.sendall(str.encode(str(message)))", "def send_msg(sock, msg):\n msg += '\\0'\n data = msg.encode('utf-8')\n sock.sendall(data)", "def send_chat_message(self, channel, message):\r\n self._send(\"PRIVMSG #{0} :{1}\".format(channel, message))", "def send_message(self, data):\n header, data = format_msg(data)\n self.server_socket.sendto(header, self.client_address)\n self.server_socket.sendto(data, self.client_address)", "def send(self, msg):\n # keep track of the total sent\n # so we can make sure the whole message is sent\n msg = (msg+'\\n').encode('utf-8')\n totalsent = 0\n while totalsent < len(msg):\n sent = self.sock.send(msg[totalsent:])\n # it is bad if we still have things to send\n # but do not send anything\n if sent == 0:\n raise RuntimeError(\"connection broken\")\n totalsent += sent", "def send_message(self, message):\n pass", "def send(self, msg: str):\n\t\tself.client.send(msg.encode())", "def send(self):\r\n if self.connection:\r\n self.connection.send(self.getLine())\r\n else:\r\n print \"(0) message without connection could not be sent\"", "def s_send(self, command_type, msg):\n # A 1 byte command_type character is put at the front of the message\n # as a communication convention\n try:\n self.client_socket.send((command_type + msg).encode())\n except:\n # If any error occurred, the connection might be lost\n self.__connection_lost()", "async def on_socket_send(self, msg: \"Msg | MsgProto\") -> None:", "def sendMessage(self, message):\n message.counter = self.counter\n self.socket.send(message.tobytes())\n self.counter += 1", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def send_message(self, msg: dict):\n txrx_debug('{} sending {} msg to {}'.format(msg['src'], msg['type'], msg['dst']))\n self.sock.send(dumps(msg).encode('utf-8'))", "def sendchat(self, the_id, msg):\r\n the_id = Client.toroomid(the_id)\r\n self.tx_cmd(FCTYPE.CMESG, the_id, 0, 0, msg)\r\n #@TODO - Emote encoding\r", "def send_protocol_message(self, msg):\n self.conn.send(msg + \"\\0\")", "def send_message(self, message):\n if self.connected:\n self.send(\n json.dumps(message.request))", "def sendMessage(self, message):\n self.connection.sendMessage(self, message.encode('ascii', 'ignore'))", "def send_message(self, data):\n self.transport.write(data)", "def send_message():\n try:\n sock = socket(AF_INET, SOCK_STREAM)\n sock.connect((SERVER_IP, SERVER_PORT))\n print('[+] ' + SERVER_IP + ' connected!')\n position = MESSAGE.encode('utf-8')\n sock.send(bytes(position))\n sock.close()\n print('[+] Transfer completed!')\n except Exception as e:\n print('[-]', e)", "def send(self, msg):\n if self.verbose:\n print('<- out ' + msg)\n self._socket.send_string(msg)\n return", "def chat(sock, msg):\n full_msg = \"PRIVMSG {} :{}\\n\".format('#' + encryption_key.decrypted_chan, msg)\n msg_encoded = full_msg.encode(\"utf-8\")\n print(msg_encoded)\n sock.send(msg_encoded)", "def send_message(self, message, user, msg_type=MSG_TYPE_MESSAGE):\n final_msg = {'room': str(self.id), 'message': message, 'username': user.username, 'msg_type': msg_type}\n\n # Send out the message to everyone in the room\n self.websocket_group.send(\n {\"text\": json.dumps(final_msg)}\n )", "def sendMessage(self, msg):\r\n binaries, msg = recursiveBinarySearch(msg)\r\n msg = json.dumps(msg)\r\n\r\n if isInIOThread():\r\n self._send(msg, binaries)\r\n else:\r\n self._connection.reactor.callFromThread(self._send, msg, binaries)", "def send(self, msg):\n msg = stc.pack('>I', len(msg)) + msg\n self.sendall(msg)", "def send(self, message, sender):\n chatclient.receive_chat_message(message, sender)\n return {}", "def recieved_message(json, methods=['GET', 'POST']):\n json['username'] = session['username']\n socketio.emit('server message', json)\n message = Message(\n user_id = session['user_id'],\n room_id = json[\"room_id\"],\n sendTime = datetime.now(),\n content = json[\"content\"]\n )\n db.session.add(message)\n db.session.commit()", "def send(self, msg):\n\n self.sock.sendto(msg, (self.UDP_IP, self.UDP_PORT))", "def write(self):\r\n assert self.status == SEND_ANSWER\r\n sent = self.socket.send(self.message)\r\n if sent == len(self.message):\r\n self.status = WAIT_LEN\r\n self.message = ''\r\n self.len = 0\r\n else:\r\n self.message = self.message[sent:]", "def send(self, msg):\n self.message('Me', msg)", "def send(event=None): # event is passed by binders.\n print(\"socket\")\n print(client_socket)\n msg = my_msg.get()\n my_msg.set(\"\") # Clears input field.\n try:\n client_socket.send(bytes(msg, \"utf8\"))\n except BrokenPipeError:\n error_msg = \"Unable to send\"\n msg_list.insert(tkinter.END, error_msg)\n \n if msg == \"{quit}\":\n client_socket.close()\n top.quit()", "def _send(self, message: str) -> None:\n logger.info(\"Send: {}\".format(message['type']))\n logger.debug(\"Send: {}\".format(message))\n\n message_b = (json.dumps(message) + '\\r\\n').encode()\n self.transport.write(message_b)", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def send(self, msg):\n #assert(isinstance(msg, Message))\n\n msg = envelp(msg, self.get_msg_id())\n self.send_raw(msg)\n\n # TODO: Fix this: this little delay is to be able to\n # send messages one after the other\n #\n # without this delay, following code is not working:\n #\n # the_actor.send({'a': 'message'})\n # the_actor.send({'a': 'different message'})\n #\n gevent.sleep(0.000000000000000000000000001)", "async def send(self, message):", "def send_message(channel, data):\n try:\n socketio.emit(channel, data)\n logging.info('Message was sent.')\n logging.debug(data)\n except Exception as e:\n logging.error(e)\n logging.error(\"Can't send message. Exeption occured\")", "def send_message(self, to, message):\n\t\tmessage_dict = {\n\t\t\tACTION: MESSAGE,\n\t\t\tSENDER: self.username,\n\t\t\tDESTINATION: to,\n\t\t\tTIME: time.time(),\n\t\t\tMESSAGE_TEXT: message\n\t\t}\n\t\tclient_log.debug(f'Сформирован словарь сообщения: {message_dict}')\n\t\t# Необходимо дождаться освобождения сокета для отправки сообщения\n\t\twith socket_lock:\n\t\t\tsend_message(self.transport, message_dict)\n\t\t\tself.process_server_ans(get_message(self.transport))\n\t\t\tclient_log.info(f'Отправлено сообщение для пользователя {to}')", "def send(self, msg):\r\n self.msgLock.acquire()\r\n self.msg.append(msg)\r\n self.numMsg += 1\r\n self.msgLock.release()", "def sendMsg(self, chat, msg):\n try:\n self.chats[chat].SendMessage(msg)\n return \"Message sent\\n\"\n except KeyError:\n raise RuntimeError(\"No chat %s\" % chat)", "def send_msg(self, msg):\n self.msg_queue.put(dict(to=settings.IOTTLY_XMPP_SERVER_USER,msg='/json ' + json.dumps(msg)))", "def sendMsg(self, channel, message, length=None):\n self.logger.info(\"Sending in %s: %s\" % (channel, message))\n self.msg(channel, message, length)", "def send_message(self, message: str):\n self.client.chat_postMessage(\n channel=f\"@{self.username}\", text=message,\n )", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def tell(self, message):\n message_bytes = message.encode(\"utf8\") + b\"\\x00\" # TODO: move to messenger\n self.messenger.send(message_bytes)", "def send(self, sock, message):\r\n try:\r\n sock.send(message)\r\n except:\r\n ip = sock.getpeername()[0]\r\n thread_message = protocol.thread.disconnected(\r\n client=ip)\r\n self.logic_queue.put(thread_message)\r\n del self.sockets[ip]", "def send(self, msg):\n with self._send_lock:\n self._rt.send_message(msg.bytes())", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def sendCommand(self,command,message):\n \n msg_temp = command + \" \" + message +'\\n'\n msg = msg_temp.encode('UTF-8')\n self.socket.send(msg)", "def set_message(self,message):\n message_length=len(message)\n if isinstance(message,str):\n message=message.encode()\n self._messageToSend=message\n elif isinstance(message,bytes):\n pass\n\n # print(self.type, \" sending \", message)\n else:\n print (\"set_message not string or bytes error type(message):\",type(message))\n message=\"\"\n\n while len(message)<self.BUFFER_SIZE-1:\n message=message + b'\\0'\n message=message_length.to_bytes(1,\"big\")+message\n\n\n if len(message)>0:\n try:\n self._s.sendto(message,(self.address,self._sendToPort))\n except Exception as e:\n print(\"cannot send message, socket may be closed,\",e)", "def send_message(self, msg):\n self.logger.debug(msg)\n self.writer.send(json.dumps(msg))", "def send_msg(self):\n while True:\n msg = input()\n # Added to show logs clean at the first time\n # a conncetion send a message.\n if(self.flag):\n self.k = self.k + 1\n self.flag = False\n self.srvsock.send(bytes(msg, encoding='utf-8'))", "def send(self, data):\n self.sock.send(data)", "def send(self, data):\n self.sock.send(data)", "def write_message(self, message):\r\n logging.debug(\"Sending message {mes} to {usr}\".format(mes=message, usr=self.id))\r\n self.handler.write_message(message)", "def send_message(self, message):\n self.send_message_queue.put(message)", "def send(self, msg):\n body = json.dumps(msg)\n body = \"Content-Length: \" + str(len(body)) + \"\\r\\n\\r\\n\" + body\n body = bytes(body, \"ascii\")\n totalsent = 0\n while totalsent < len(body):\n sent = self.sock.send(body[totalsent:])\n if sent == 0:\n raise RuntimeError(\"socket connection broken\")\n totalsent = totalsent + sent" ]
[ "0.8096262", "0.7986446", "0.781855", "0.77682155", "0.7713088", "0.7641384", "0.7619159", "0.75668764", "0.75613767", "0.75334066", "0.7495466", "0.74901545", "0.74558794", "0.74528867", "0.73387706", "0.7315095", "0.72622466", "0.7216694", "0.7187263", "0.7170448", "0.7164233", "0.7163439", "0.7153602", "0.71477795", "0.7138391", "0.7122128", "0.71065", "0.70962983", "0.70766056", "0.70756406", "0.706222", "0.70590895", "0.70563465", "0.7052954", "0.7027868", "0.7000386", "0.69919026", "0.6983129", "0.6983129", "0.6983129", "0.6979263", "0.6954702", "0.6948111", "0.6943357", "0.6943007", "0.6934484", "0.6927295", "0.69220614", "0.6918031", "0.68984133", "0.6898037", "0.6897062", "0.6890065", "0.6885293", "0.68829346", "0.6880482", "0.6876011", "0.68738925", "0.68732977", "0.6869168", "0.68653667", "0.6855709", "0.6848042", "0.68469", "0.6818854", "0.681423", "0.6810716", "0.67941034", "0.6772601", "0.67680955", "0.67676216", "0.67669713", "0.67660207", "0.675998", "0.67516935", "0.67205155", "0.6717293", "0.67053634", "0.67047733", "0.66972053", "0.669358", "0.6691726", "0.6690731", "0.6685171", "0.6671894", "0.66643584", "0.66507477", "0.6630494", "0.66274923", "0.66265804", "0.6622639", "0.6622639", "0.6621698", "0.66159654", "0.66108954", "0.6610771", "0.6610771", "0.66035795", "0.65995526", "0.65980417" ]
0.70444596
34
Adds a message to the chat and scrolls down.
def add_message_to_chat(self, message: str): scroll_length = (len(message) // Client.TEXTBOX_CHARACTER_LENGTH) + 1 self.chat_text.config(state=NORMAL) self.chat_text.insert(END, message + '\n') self.chat_text.yview_scroll(scroll_length, "units") self.chat_text.config(state=DISABLED)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def list_add_chat_message(self, chat_message):\n self.chat_messages.append(chat_message)\n\n #logging.info(\"adding message: %s\" % chat_message.message)\n\n if len(self.chat_messages) > ENVIRONMENT['BUFFER_SIZE']:\n self.chat_messages.pop(0)\n\n # alert our polling clients\n self.new_message_event.set()\n self.new_message_event.clear()", "def _add_to_chat_queue(self, message):\n self.chat_message_queue.appendleft(message)", "def add_msg(self, msg):\n self.chat_win.addch('\\n')\n self.chat_win.addstr(\"[{}] {}\".format(\n datetime.strftime(datetime.now(), \"%H:%M\"), msg)\n )\n self.refresh_all()", "def new_message(self, message):\n self.message_counter += 1\n self.message_buffer.append(str(message))\n self.event_loop()", "def add_message(self, msg):\n self.messages.append(msg)", "def messageScrolled(self,message):\n from dialogs import speDialog\n if sys.platform!='win32':message='<font size=-2>%s</font>'%message\n speDialog.create(self, message, self.path)", "def add_message(self, message):\n self.message_list.append(message)", "def scrollUp(self, messages=1):\n self.scrollOffset -= messages\n self._recalculateCoordinates()", "def scrollDown(self, messages=1):\n if self.scrollOffset < 1:\n self.scrollOffset += messages\n self._recalculateCoordinates()", "def _log_append(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart,end = p.get_bounds()\n\t\tp.insert(end, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_end_iter(), 0.0)", "def add_chat_message(self, message):\n try:\n data = message.to_json()\n key = ENVIRONMENT['REDIS_PREFIX'] + \"chat_messages:%s\" % self.channel_id\n \n logging.info(data)\n \n self.redis_server.rpush(key, data)\n self.redis_server.publish(ENVIRONMENT['REDIS_PREFIX'] + 'chat_messages', data)\n except Exception, e:\n logging.info(\"ERROR adding message %s: %s\" % (message, e))\n raise", "def show_message(self, message):\n self.sense.show_message(\n message,\n scroll_speed=self.SCROLL_SPEED,\n text_colour=self.TEXT_COLOUR\n )", "def __draw_message(self, message):\n x_offset = (curses.COLS - len(message)) // 2\n self.message_win.addstr(0, x_offset, message)", "def send_message(self, message:str):\n self.chat.click()\n text_box = self.chat.find_element_by_xpath(\"//div[@class='_2_1wd copyable-text selectable-text' and @data-tab='6']\")\n text_box.click()\n text_box.send_keys(message)\n time.sleep(0.1)\n send_button = self.chat.find_element_by_xpath(\"//button[@class='_1E0Oz']\")\n send_button.click()", "def display_message(self, message):\n with self.lock:\n self.messages_list.configure(state='normal')\n self.messages_list.insert(tk.END, message)\n self.messages_list.configure(state='disabled')\n self.messages_list.see(tk.END)", "def display_message(self, message):\n with self.lock:\n self.messages_list.configure(state='normal')\n self.messages_list.insert(tk.END, message)\n self.messages_list.configure(state='disabled')\n self.messages_list.see(tk.END)", "def __addmsg(self, msg: str) -> None:\n # region Docstring\n # endregion\n self.record += msg\n self.textbox.kill()\n self.textbox = UITextBox(\n html_text=self.record,\n relative_rect=Rect((0, 0), (self.size[0], self.size[1] - 25)),\n container=self,\n manager=self.ui_manager,\n )", "async def chat_message(self, event):\n\t\t# Send a message down to the client\n\t\tprint(\"DocumentChatConsumer: chat_message from user #\" + str(event))\n\t\ttimestamp = calculate_timestamp(timezone.now())\n\t\tawait self.send_json(\n\t\t\t{\n\t\t\t\t\"msg_type\": MSG_TYPE_MESSAGE,\n\t\t\t\t\"annotationId\": event['annotationId'],\n\t\t\t\t\"username\": event[\"username\"],\n\t\t\t\t\"user_id\": event[\"user_id\"],\n\t\t\t\t\"xfdfString\": event[\"message\"],\n\t\t\t\t\"natural_timestamp\": timestamp,\n\t\t\t},\n\t\t)", "async def chat_message(self, event):\n message = event['message']\n await self.send_json({\n 'message': message\n })", "def append_message(self, message):\n if message['message_id'] in self.message_ids:\n return\n self.message_ids.append(message['message_id'])\n self.messages.append(message)", "def scrollUp(self):\n if self.__firstShownLine > 0:\n self.__firstShownLine -= 1\n self.__refreshContent()\n else:\n curses.beep()", "def ScrollMessage(text, color, repeat):\n text_area.text = text\n text_area.color = color\n\n # Start the message just off the side of the glasses\n x = display.width\n text_area.x = x\n\n # Determine the width of the message to scroll\n width = text_area.bounding_box[2]\n\n for _ in range(repeat):\n while x != -width:\n x = x - 1\n text_area.x = x\n\n # Update the switch and if it has been pressed abort scrolling this message\n switch.update()\n if not switch.value:\n return\n\n time.sleep(0.025) # adjust to change scrolling speed\n x = display.width", "def updateChat(self, ):\n self.__redrawChat()", "def send_messages(self):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username,\r\n font=self.title_font, bg=self.bg_color, height=2)\r\n user_label.pack(pady=10, padx=50)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=10)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n write_message = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n write_message.pack()\r\n scrollbar_msg.config(command=write_message.yview)\r\n button_speech_rec = Button(self.root, text=\"listen\\nto speech\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.create_speech_thread(write_message))\r\n button_speech_rec.pack(pady=10)\r\n button_send = Button(self.root, text=\"send\", font=self.text_font,\r\n height=2, width=20, command=lambda: self.send(write_message))\r\n button_send.pack(pady=10)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.choose_path)\r\n button_send.pack(pady=10)", "def add_message(self, message):\n try:\n self.send_loop(message)\n except AttributeError:\n raise UnsupportedMessageTypeError(message.__class__.__name__)", "def append_message(self, message_object):\n self.messages.append(message_object)", "async def new_message(self, message):\n user = self.scope['user']\n response_data = {\n 'message': message,\n 'username': user.get_full_name()\n }\n await self.create_chat_message(user, message)\n await self.channel_layer.group_send(\n self.conversation_name,\n {\n 'type': 'chat_message',\n 'response_data': json.dumps(response_data)\n }\n )", "def text(message):\n global list_messages\n room = session.get('room')\n msg = session.get('name') + ':' + message['msg']\n list_messages.append(msg)\n addNewMsg(message,session)\n print ('size of list_messages ' + str(len(list_messages)) + ', session ' + str(session))\n emit('message', {'msg': msg}, room=room)", "def add_message(self, msg_id, location, msg):\n\n self._messages.append((msg_id,location,msg))", "def add_message(self, msg):\n msg_string = json.dumps(msg)\n self.redis_client.publish(self.message_channel, msg_string)\n self.redis_client.lpush(self.message_list, msg_string)\n self.redis_client.ltrim(self.message_list, 0,\n app.config[\"MAX_MESSAGES\"]-1)", "def do_message(self, message):\r\n \r\n if not self.display_game:\r\n return\r\n \r\n if SlTrace.trace(\"message\"):\r\n if (self.prev_message is None\r\n or len(message.text) > len(self.prev_message)\r\n or len(message.text) > SlTrace.trace(\"message_len\", default=25) > 25):\r\n SlTrace.lg(f\"{len(message.text)}: {message}\")\r\n self.prev_message = message.text\r\n message.text = message.text[0:SlTrace.trace(\"message_len\", default=25)]\r\n SlTrace.lg(\"do_message(%s)\" % (message.text), \"execute\")\r\n if not self.run:\r\n return\r\n \r\n if (self.mw is None or not self.mw.winfo_exists()\r\n or self.msg_frame_base is None\r\n or not self.msg_frame_base.winfo_exists()):\r\n return\r\n \r\n self.wait_message(message)\r\n if self.msg_frame is not None:\r\n self.msg_frame.destroy() # Remove all message frames\r\n self.msg_frame = None\r\n self.msg_frame = Frame(self.msg_frame_base)\r\n self.msg_frame.pack(side=\"top\", expand=NO, fill=NONE)\r\n text = f'{message.text:40}'\r\n color = message.color\r\n font_size = message.font_size\r\n if font_size is None:\r\n font_size=40\r\n time_sec = message.time_sec\r\n\r\n \r\n if (self.mw is None or not self.mw.winfo_exists()\r\n or self.msg_frame is None\r\n or not self.msg_frame.winfo_exists()):\r\n return\r\n \r\n if self.mw is not None and self.mw.winfo_exists():\r\n if self.cur_message is not None:\r\n self.cur_message.destroy()\r\n self.cur_message = None\r\n width = self.get_width()\r\n if width < 500:\r\n width = 500\r\n message.msg = Message(self.msg_frame, text=text, width=width) # Seems to be pixels!\r\n message.msg.config(fg=color, bg='white',\r\n anchor=S,\r\n font=('times', font_size, 'italic'))\r\n message.msg.pack(side=\"top\")\r\n ###message.msg.pack(side=\"bottom\")\r\n self.cur_message = message\r\n if time_sec is not None:\r\n if self.speed_step >= 0:\r\n time_sec = self.speed_step # Modify for view / debugging\r\n end_time = datetime.now() + timedelta(seconds=time_sec)\r\n message.end_time = end_time", "def read_messages(self, msg_num):\r\n self.clear_screen()\r\n user_label = Label(self.root, text=\"Hello \" + self.username, font=self.title_font,\r\n bg=self.bg_color, height=2)\r\n user_label.pack(pady=5, padx=50)\r\n lbl_msg = Label(self.root, text=\"Message \" + str(msg_num), font=self.title_font,\r\n bg=self.bg_color)\r\n lbl_msg.pack(pady=5, padx=10)\r\n self.refresh_button = Button(self.root, text=\"Refresh page\", font=self.text_font,\r\n bg=self.bg_color, command=lambda: self.refresh(msg_num))\r\n self.refresh_button.pack(padx=10, pady=10)\r\n messages_frame = Frame(self.root)\r\n messages_frame.pack(padx=30, pady=15)\r\n scrollbar_msg = Scrollbar(messages_frame)\r\n scrollbar_msg.pack(side=RIGHT, fill=Y)\r\n text_widget = Text(messages_frame, width=50, height=15, font=self.text_font,\r\n yscrollcommand=scrollbar_msg.set)\r\n text_widget.pack()\r\n scrollbar_msg.config(command=text_widget.yview)\r\n button_send = Button(self.root, text=\"go back\", font=self.text_font,\r\n height=2, width=20, command=self.go_back_read)\r\n button_send.pack(pady=5, side=BOTTOM)\r\n button_send = Button(self.root, text=\"see/close message\\ncontrol panel\",\r\n font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.new_window_messages(button_send))\r\n button_send.pack(pady=5, side=BOTTOM)\r\n if self.msg_list:\r\n if msg_num < len(self.msg_list):\r\n next_msg = Button(self.root, text=\"next message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num + 1))\r\n next_msg.pack(pady=5, padx=5, side=RIGHT)\r\n if msg_num > 1:\r\n previous_msg = Button(self.root, text=\"previous message\", font=self.text_font,\r\n height=2, width=20,\r\n command=lambda: self.read_messages(msg_num - 1))\r\n previous_msg.pack(pady=5, padx=5, side=LEFT)\r\n text_widget.insert(END, \"from: \" + self.msg_list[msg_num - 1][2] + \"\\n\")\r\n text_widget.tag_add('sender', '1.0', '1.end')\r\n text_widget.tag_config('sender', font='none 14')\r\n\r\n text_widget.insert(END, self.msg_list[msg_num - 1][0])\r\n text_widget.tag_add('msg', '2.0', END)\r\n text_widget.tag_config('msg', font='none 12')\r\n\r\n text_widget.config(state=DISABLED)", "def __sendMessage(self):\n # TODO: Switch to this when implemented\n \n msg = self.ui.inputWidget.toPlainText()\n self.ui.inputWidget.clear()\n strv = StringView()\n strv.appendText(unicode(msg))\n self._amsn_conversation.sendMessage(strv)\n self.ui.textEdit.append(\"<b>/me says:</b><br>\"+unicode(msg)+\"\")", "def scrollDown(self):\n if self.__firstShownLine < len(self.__data) - 1:\n self.__firstShownLine += 1\n self.__refreshContent()\n self.__printRow(self.__firstShownLine + self.height - 2)\n else:\n curses.beep()", "async def on_chat_message(self, chat_message):\n pass", "def display_message(self, message):\n with self.lock:\n self.url_list.configure(state='normal')\n self.url_list.insert(tk.END, message)\n self.url_list.see(tk.END)", "def _show_message(self, message, message_color, background_color=(0, 0, 0)):\n\n # Need to be sure we revert any changes to rotation\n self._sense_hat.rotation = 0\n self._sense_hat.show_message(message, Config.SCROLL_TEXT_SPEED, message_color, background_color)", "async def chat_message(self, event):\n await self.send_json(\n return_value(\n ACTION_MESSAGE,\n event['label'],\n event['username'],\n MSG_MESSAGE,\n event['message']\n )\n )", "def update_messages():\n\n scrollbar = Scrollbar(root)\n scrollbar.pack(side=RIGHT, fill=Y)\n listbox = Text(root, wrap =WORD, yscrollcommand=scrollbar.set, background=\"#CCFFCC\", fg=\"black\", selectbackground=\"#003300\",\n highlightcolor=\"#0033CC\")\n\n msgs = []\n run = True\n while run:\n\n time.sleep(0.1) # update every 1/10 of a second\n new_messages = c1.get_messages() # get any new messages from client\n msgs.extend(new_messages) # add to local list of messages\n\n for msg in new_messages: # display new messages\n print(msg)\n #title_label = Label(text=str(msg), bg=\"#CCFFCC\", fg=\"black\", padx=34, pady=5, font=\"comicsansms 9 bold\",borderwidth=3,wraplength=300, relief=SUNKEN)\n #title_label.pack(side=TOP)\n\n listbox.insert(END, str(msg)+'\\n\\n')\n listbox.pack(fill=BOTH, padx=36)\n scrollbar.config(command=listbox.yview)\n\n if msg == \"{quit}\":\n root.destroy()\n run = False\n break", "def new_message(self, room, mess):\n pass", "async def chat_message(self, event):\n await self.send(\n {'type': \"websocket.send\",\n 'text': event['response_data']}\n )", "def add_message(self,message,room):\n\n # strip that message\n message = message.strip()\n\n log.debug('adding message: %s %s' % (message,room))\n\n # add a dequeue for the room if its not there\n if room not in self.rooms:\n d = deque(maxlen=self.maxlen)\n self.rooms[room] = d\n\n # add the msg to the collection\n self.rooms.get(room).appendleft(\n Message(time(),message))\n\n # return the message for good measure\n return self.rooms.get(room)[-1]", "def add(self, message):\n if self.verify_message( message ):\n self.vis_data.addElem( message )", "def send_message(self,message):\n connected=False\n self.driver_Lock.acquire()\n while(not connected):\n try:\n whatsapp_msg = self.driver.find_element_by_class_name('_2S1VP') #find text box element\n connected=True\n except Exception as exc:\n print(exc)\n sleep(1)\n\n if(isinstance(message,str)): #check if the message is of type string\n whatsapp_msg.send_keys(message) #input message\n whatsapp_msg.send_keys(Keys.SHIFT+Keys.ENTER) #create new line\n\n elif(isinstance(message,list)): #check if the message is of type list\n for line in message: #run through all the lines\n whatsapp_msg.send_keys(line) #input line\n whatsapp_msg.send_keys(Keys.SHIFT+Keys.ENTER) #create new line\n\n whatsapp_msg.send_keys(Keys.SHIFT+Keys.ENTER) #create new line\n whatsapp_msg.send_keys(\"-{}\".format(bot_name)) #add bot name tag\n\n whatsapp_msg.send_keys(Keys.ENTER) #send message\n self.driver_Lock.release() #release driver lock", "async def add_message(self, message_id: int, chat_id: int):\n logging.info('Db: Add new message.')\n command = self.ADD_MESSAGE\n return await self.pool.fetchval(command, message_id, chat_id)", "def on_message(\n self, client: mqtt.Client, userdata: typing.Any, msg: mqtt.MQTTMessage\n ) -> None:\n self.msgs.append(msg)", "def characters(self, ch):\n if self.inMessageContent:\n self.message = self.message + ch\n self.messages.append(self.message)", "def comment(self, msg):\n\t\tself._client.add_comment(self, msg)", "def printToUser(self, message):\n \n self.messages_to_user.setText(message)\n self.messages_to_user.adjustSize()", "def log_msg(self, msg):\n self.log.append(msg + \"\\n\")\n self.log.setCaretPosition(self.log.getDocument().getLength())", "def add_message(username, message):\n now = datetime.now().strftime(\"%H:%M:%S\")\n messages.append(\"({}) {}: {}\".format(now, username, message))", "def on_message(self, message):\n self.write_message(u\"%s\" % message)", "def add_messages(username, message):\n now = datetime.now().strftime(\"%H:%M:%S\")\n message_dict = {'timestamp': now, 'from':username, 'message':message}\n \n \"\"\"Write the chat message to messages.txt\"\"\"\n write_to_file(\"data/messages.txt\", \"{0} - {1}: {2} \\n\".format(message_dict['timestamp'], message_dict['from'].title(), message_dict['message']))", "def show_message(message, col=c.r, update=False):\n g.content = generate_songlist_display()\n g.message = col + message + c.w\n\n if update:\n screen_update()", "def on_message(self, _, message):\n with self.message_lock:\n self.messages.append(Message.deserialize(message))\n self.new_message_available.set()\n super().on_message(_, message)", "def scrollReceiveToBottom(self, parent, start, end):\n QtCore.QTimer.singleShot(0, self.receiveTable.scrollToBottom)\n self.receiveTable.resizeColumnToContents(1)", "def add_to_queue(self, msg):\n if not self.queue.full():\n self.queue.put(msg)", "def __send_msg(self, msg):\n self.frame_nb += 1\n self.__send_frame(self.frame_nb, msg)", "def send_message(self, message: str):\n self.client.chat_postMessage(\n channel=f\"@{self.username}\", text=message,\n )", "def chatReceiveMessage(self, chat, user, message):\n self.on_message(user, message, False, False, chat)", "async def chat_message(self, event):\n if self.user and not self.user.is_authenticated:\n return\n\n user_id = event['user_id']\n message = event['message']\n created_at = event['created_at']\n publisher_full_name = event['publisher_full_name']\n\n await self.send(text_data=json.dumps({\n 'user_id': user_id,\n 'created_at': created_at,\n 'message': \"{}\".format(message),\n 'publisher_full_name': publisher_full_name,\n }))", "def new_message_from_conn(self, friend, msg):\n print(\"new_msg signal activated with friend\",friend,\"and msg\",msg)\n\n if not self.stack.get_child_by_name(friend):\n new_chat_window = chat_layout.ChatLayout(orientation=Gtk.Orientation.VERTICAL,friend=friend)\n new_chat_window.show_all()\n self.stack.add_titled(new_chat_window, friend, friend)\n\n child = self.move_to_child(friend)\n child.append_friend_text(msg)", "def msg_handler(self, msg):\n self.view.frame.log.append(msg)", "def AddMessage(self, name, time, message):\n pass", "def msg(self, message):\n\n message = PushoverMessage(message)\n self.messages.append(message)\n return message", "def update(self, msg):\r\n self.msgVar.set(msg)", "def update(self, msg):\r\n self.msgVar.set(msg)", "def enter_message(self, message):\n self.selib.input_text(self.locator.message, message)", "def characters(self, message):\n self._message = self._message + message", "def send_message(self, robot_id, message):\n self.messages_to_send.append([robot_id, message])", "def send_message(self, robot_id, message):\n self.messages_to_send.append([robot_id, message])", "def send_message(self, robot_id, message):\n self.messages_to_send.append([robot_id, message])", "def _log_prepend(self, msg):\n\t\tp = self._edit.get_buffer()\n\t\tstart = p.get_start_iter()\n\t\tp.insert(start, msg)\n\t\tself._trunc_lines()\n\t\tself._edit.scroll_to_iter(p.get_start_iter(), 0.0)", "def update(self, msg):\n self.msgVar.set(msg)", "async def chat_message(self, event):\n\n print(\"PublicChatConsumer\", \"chat_message from user\", event[\"user_id\"])\n await self.send_json({\n \"msg_type\": MSG_TYPE_MESSAGE,\n \"profile_image\": event[\"profile_image\"],\n \"username\": event[\"username\"],\n \"user_id\": event[\"user_id\"],\n \"message\": event[\"message\"],\n \"natural_timestamp\": humanize_or_normal(timezone.now())\n })", "def sendMessage(self, message):\n message.counter = self.counter\n self.socket.send(message.tobytes())\n self.counter += 1", "async def handle_add(message: types.Message):\n chat_id = message[\"chat\"][\"id\"]\n text = message[\"text\"].lower().strip()\n\n if len(text.split()) > 1:\n await add_subscriptions(\n chat_id, text.replace(\",\", \" \").replace(\"+\", \" \").split()[1:]\n )\n else:\n await StateMachine.asked_add.set()\n inline_keyboard = types.InlineKeyboardMarkup()\n inline_keyboard.add(\n types.InlineKeyboardButton(\"cancel\", callback_data=\"cancel\")\n )\n await reply(\n message,\n \"What would you like to subscribe to?\",\n reply_markup=inline_keyboard,\n )", "def send(self, msg):\r\n self.msgLock.acquire()\r\n self.msg.append(msg)\r\n self.numMsg += 1\r\n self.msgLock.release()", "def send_message(self, message):\n source_guid = str(uuid.uuid1())\n date = time.strftime(\"%H:%M:%S\")\n self.api.send_message(\n self.conversation_type,\n self.cid,\n source_guid,\n message[:1000]\n )\n if self.api.send_message(self.conversation_type, self.cid, source_guid, message):\n self.append_message(source_guid, 'me', date, message[:1000])\n if len(message) > 1000:\n self.send_message(message[1000:])", "def client(self,message):\n self.message = message\n self.run()", "def _send(self, message):\r\n if not message:\r\n return\r\n\r\n self._maybe_print('twitch out queued: ' + message)\r\n self.buffer.append(message + \"\\n\")", "def reply(self, text=None):\n self.message.click()\n self.message.send_keys(Keys.ARROW_RIGHT)\n try:\n self.message.find_element_by_xpath(\"//div[@aria-label='Reply']\").click()\n except NoSuchElementException:\n raise Exception(\"Message has been been deleted\")\n if text is not None:\n self.get_chat().send_message(text)", "def add_message():\n if 'user_id' not in session:\n abort(401)\n if request.form['text']:\n db = get_db()\n db.execute('''insert into message (author_id, text, pub_date)\n values (?, ?, ?)''', (session['user_id'], request.form['text'],\n int(time.time())))\n db.commit()\n flash('Your message was recorded')\n return redirect(url_for('timeline'))", "def add_message_to_queue(self, message):\n\t\t\t\tself.message_queue.append(message)\n\t\t\t\treturn self.message_queue", "def addMessage(self, message, flags, date = None):\n self.messages.append((message, flags, date, self.mUID))\n self.mUID += 1\n return defer.succeed(None)", "def _send_message(self, e: Event):\n\n message = self.message_text.get(\"1.0\", 'end-1c').replace('\\n', \"\")\n\n if len(message) > 0:\n self.add_message_to_chat('you: ' + message)\n self._clear_message_text()\n self.connection_socket.send(bytes('them: ' + message, 'utf-8'))", "def provide_command_feedback(self, message):\n self.refresh() # reset the display\n\n self.command_feedback_bar.draw_text(message)\n self.input_box.clear()\n self.input_box.draw_text(\"PRESS (C) to CONTINUE\")\n selection = \"\"\n while selection != ord(\"C\") and selection != ord(\"c\"):\n selection = self.get_input_ch()\n\n self.refresh() # reset it again", "def send_message(self, message):\n \n msgPacket = serverbound.play.ChatPacket()\n msgPacket.message = message\n self.connection.write_packet(msgPacket)", "def add_message(self, username, message, mode=None):\n username = filter_printable(username)\n message = filter_printable(message)\n if curses.has_colors():\n if mode == 'REVERSE':\n mode = curses.A_REVERSE\n elif mode == 'ENCRYPTED':\n mode = curses.color_pair(1)\n elif mode: # If mode is not None\n raise ValueError('Invalid mode')\n else:\n mode = None\n self.message_log.append(\n {'user': username, 'msg': message, 'mode': mode}\n )\n if self.lanchat.beep:\n curses.beep()", "def add_item(self, command, screen_output, scroll_position):\n if self._pointer + 1 < len(self._items):\n self._items = self._items[:self._pointer + 1]\n self._items.append(\n NavigationHistoryItem(command, screen_output, scroll_position))\n if len(self._items) > self._capacity:\n self._items = self._items[-self._capacity:]\n self._pointer = len(self._items) - 1", "def setMessage(self, message):\n self.message = str(message)\n self.app.processEvents()\n QtCore.QTimer(self).singleShot(5,self._messageDelayed)\n self.app.processEvents()", "def __redrawChat(self):\n self.__chatWin.clear()\n chats = self._client.currentChannel().chatHistory()\n count = min(len(chats), self.__chatWin.getmaxyx()[0])\n shown = chats[-count:]\n for c in shown:\n self.__chatWin.addstr(c + \"\\n\")\n\n self.__update()", "async def send_message(self, message: dict) -> None:\n await self.client.chat_postMessage(channel=self.channel_id, **message)", "def sendChatMessage(self, msg):\n self.transport.write(msg)", "def text_e(self, event):\n directory=os.getcwd()+ '/messages'\n filename=str(self.user)+'_'+str(self.friend)\n text = self.text_send.GetValue()\n messages = mf.addMessage(self.user, self.friend, self.passw, text)\n mf.makeTextFile(self.user, self.friend, self.passw, messages)\n \n self.chat_log.LoadFile('/'.join((directory, filename)))\n self.text_send.SetValue(\"\")\n event.Skip()", "def direct_message(self, user, msg, num):\n PAUSE = 1\n logging.info('Send message {} to {}'.format(msg,user))\n self.driver.get(self.direct_url)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[1]/div/div[2]/input')[0].send_keys(user)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[5]/div/div/div/div[3]/button[2]')[0].click() #Edge case to get rid of notification\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[2]/div[2]/div/div/div[3]/button')[0].click()\n self.driver.find_elements_by_xpath('/html/body/div[2]/div/div/div[1]/div/div[2]/div/button')[0].click()\n time.sleep(PAUSE)\n # The message will be placed and sent\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[2]/textarea')[0].send_keys(msg)\n time.sleep(PAUSE)\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/div[3]/button')[0].click()\n # Special feature involving reacting with heart\n for x in range(num):\n self.driver.find_elements_by_xpath('//*[@id=\"react-root\"]/section/div/div[2]/div/div/div[2]/div[2]/div/div[2]/div/div/button[2]')[0].click()\n time.sleep(PAUSE)", "def appendToReceiveMsgBuffer(self, Message):\n self.ReceiveMessageBuffer.append(Message)", "def receive_message(self, message):\r\n return", "async def send_room_previous_chats(self, message_data: dict):\n print(\"PublicChatConsumer\", \"send_room_previous_chats\")\n await self.send_json({\n \"messages_payload\": \"messages_payload\",\n \"messages\": message_data[\"messages\"],\n \"new_page_number\": message_data[\"new_page_number\"]\n })", "def on_message(self, message):\n #print(f\"This message was sent: {message}\") # Writes to the console window (server side)\n self.write_message(f\"This message was sent: {message}\") # Writes message to sender" ]
[ "0.6858262", "0.68089944", "0.6778456", "0.67170316", "0.66270477", "0.65076065", "0.6480786", "0.6332747", "0.6320004", "0.6282979", "0.6253987", "0.6231954", "0.6195", "0.6192585", "0.61682373", "0.61682373", "0.6153406", "0.61134017", "0.6108938", "0.60916406", "0.60653615", "0.6035066", "0.6014792", "0.60144955", "0.60139024", "0.6002169", "0.59412557", "0.5919042", "0.59148926", "0.5905361", "0.5897642", "0.5894144", "0.5869653", "0.5854924", "0.5850056", "0.58459264", "0.58313745", "0.58183724", "0.5812973", "0.58039993", "0.57856816", "0.5781202", "0.5773801", "0.5772236", "0.5770621", "0.5754375", "0.5714087", "0.5699602", "0.5693328", "0.5679895", "0.5653702", "0.56441236", "0.564028", "0.5632957", "0.56175995", "0.5615125", "0.56094575", "0.5600561", "0.55892247", "0.55866724", "0.5569616", "0.55447507", "0.5541027", "0.5535905", "0.55283755", "0.55282694", "0.55282694", "0.5517936", "0.5514488", "0.5511331", "0.5511331", "0.5511331", "0.5503937", "0.55020636", "0.549134", "0.5490972", "0.54847556", "0.5484431", "0.54742813", "0.54620373", "0.54584", "0.54558986", "0.5443844", "0.5443602", "0.54420835", "0.5435855", "0.5434108", "0.54173696", "0.54169875", "0.5412417", "0.5405625", "0.5405087", "0.5404402", "0.5403917", "0.5401161", "0.5394911", "0.53905797", "0.5388096", "0.53855276", "0.5382066" ]
0.799143
0
Creates a data folder containing a 100class subset of ImageNet, then creates a zipped copy of it
def zip_imagenet100c(): #First make sure the directory we are given is correct! if not os.path.isdir(DATA_SRC_ROOT): raise Exception("Bad filepath given") #create the destiantion directories if they don't exist if not os.path.isdir(IMAGENET100_DIR): os.mkdir(IMAGENET100_DIR) #grab the subset wnids for the 100 class-subset with open(IMAGENET100_CLASSES) as f: subset_wnids = f.readlines() subset_wnids = [x.strip() for x in subset_wnids] #list of the 100 WNIDs we grab #Grab the names of all of the folders inside the root data source #Structure is distortion/sub_distortion/level/wnids for distortion in os.listdir(DATA_SRC_ROOT): if distortion != "meta.bin": print(distortion) folder_path = os.path.join(DATA_SRC_ROOT, distortion) if not os.path.isdir(folder_path): continue for sub_distortion in os.listdir(folder_path): print(sub_distortion) subfolder_path = os.path.join(folder_path, sub_distortion) if not os.path.isdir(subfolder_path): continue for level in os.listdir(subfolder_path): print(level) level_path = os.path.join(subfolder_path, level) #grab the correcrt validation d9recotires for wnid in os.listdir(level_path): wnid_path = os.path.join(level_path, wnid) if not os.path.isdir(wnid_path): continue if wnid in subset_wnids: dest_path = os.path.join(IMAGENET100_DIR, distortion, sub_distortion, level, wnid) shutil.copytree(wnid_path, dest_path) #copy the metadata bin file meta_file = os.path.join(DATA_SRC_ROOT, 'meta.bin') meta_dest = os.path.join(IMAGENET100_DIR, 'meta.bin') shutil.copy(meta_file, meta_dest) #Zip the destinatio file shutil.make_archive(ZIP_PATH + '/ImageNet100C', 'tar', IMAGENET100_DIR)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_dataset(data_folder: str, dataset_file: str, targets_file: str = os.path.join('data', 'targets.pkl')):\n files = sorted(glob.glob(os.path.join(data_folder, '**/*.jpg'), recursive=True))\n images = []\n crop_sizes = []\n crop_centers = []\n targets = []\n for image in tqdm(files, desc='creating dataset', total=len(files)):\n img = Image.open(image)\n # quadruple dataset by vertical and horizontal flipping\n for i in range(4):\n if i == 1 or i == 3:\n img = img.transpose(Image.FLIP_LEFT_RIGHT)\n if i == 2:\n img = img.transpose(Image.FLIP_TOP_BOTTOM)\n x, y, w, h, cx, cy = get_random_image_values()\n resized = img.resize((y, x), Image.LANCZOS) # mind thee: x and y swapped\n arr = np.array(resized, dtype=np.float32)\n arr, target_array = create_cropped_data(np.copy(arr), (w, h), (cx, cy), crop_only=False)\n images.append(arr)\n crop_sizes.append((w, h))\n crop_centers.append((cx, cy))\n targets.append(target_array)\n data = {'images': images, 'crop_sizes': crop_sizes, 'crop_centers': crop_centers}\n # persist on harddrive\n with open(dataset_file, 'wb') as f:\n pickle.dump(data, f)\n with open(targets_file, 'wb') as f:\n pickle.dump(targets, f)\n print(f'created datset and saved it to {dataset_file} and targets to {targets_file}')", "def zip_files():\n zipper = ZipFile(\"Moritz_Bunse_ML_project.zip\", \"w\")\n files_to_write = [\"poi_id.py\",\n \"my_classifier.pkl\",\n \"my_dataset.pkl\",\n \"my_feature_list.pkl\",\n \"tester.py\",\n \"Look+At+Enron+data+set.html\",\n \"Look At Enron data set.ipynb\",\n \"data_dict.pkl\",\n \"final_project_dataset.pkl\",\n \"img/Flow chart feature selection.png\"\n ]\n for filename in files_to_write:\n zipper.write(filename)\n\n zipper.close()", "def data_directory(class_labels):\n\n dataset_folders = ['train','validation','test']\n object_class = class_labels\n os.mkdir(BASE_DIR)\n\n for folder in dataset_folders:\n for obj_cls in object_class:\n training_dir = BASE_DIR + os.sep +'{}'.format(folder)\n if not os.path.exists(BASE_DIR+os.sep +'{}'.format(folder)):\n os.mkdir(training_dir)\n class_dir = training_dir + os.sep + '{}'.format(obj_cls)\n if not os.path.exists(training_dir + os.sep + '{}'.format(obj_cls)):\n os.mkdir(class_dir)", "def create_folders():\n if not os.path.exists(\"data/train-npy/\"):\n os.makedirs(\"data/train-npy/\")\n if not os.path.exists(\"data/test-npy/\"):\n os.makedirs(\"data/test-npy/\")\n if not os.path.exists(\"data/valid-npy/\"):\n os.makedirs(\"data/valid-npy/\")", "def MakeDataSetFiles(dirname):\n\n\n if not os.path.exists(dirname):\n os.mkdir(dirname)\n if not os.path.exists(os.path.join(dirname, 'train')):\n os.mkdir(os.path.join(dirname, 'train'))\n if not os.path.exists(os.path.join(dirname, 'test')):\n os.mkdir(os.path.join(dirname, 'test'))\n data_train = fetch_20newsgroups(subset='train', categories=None, shuffle=True, random_state=42)\n data_test = fetch_20newsgroups(subset='test', categories=None, shuffle=True, random_state=42)\n\n if dirname[-1] == '/' or dirname[-1] == '\\\\':\n dirname = dirname[:-1]\n \n Util.WriteClassFile(data_train.target, os.path.join(dirname, 'train_classes.txt'))\n Util.WriteClassFile(data_test.target,os.path.join(dirname, 'test_classes.txt'))\n\n\n train_counter = 0;\n for doc in data_train.data:\n filename = 'train_' + str(train_counter).zfill(5);\n f = file(os.path.join(dirname, 'train', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n train_counter = train_counter + 1;\n\n test_counter = 0;\n for doc in data_test.data:\n filename = 'test_' + str(test_counter).zfill(5);\n f = file(os.path.join(dirname, 'test', filename), 'w');\n f.write(doc.encode('ascii', 'ignore'));\n f.close();\n test_counter = test_counter + 1;\n\n class_index = file(os.path.join(dirname, 'class_label_index.txt'), 'w')\n for label in data_train.target_names:\n class_index.write(label + '\\n')\n class_index.close()", "def distributeDataset(destinationFolder, testFolder, trainFolder):\n \n # Set up directories for test and training data sets\n if not os.path.exists(testFolder):\n os.makedirs(testFolder)\n if not os.path.exists(trainFolder):\n os.makedirs(trainFolder)\n\n # Generate list of directories\n dirs = []\n for i in range(0,8):\n dirs.append(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\\\\sd04\\\\png_txt\\\\figs_\" + str(i)))\n\n # Extract Test data\n files = os.listdir(dirs[0])\n\n for filename in files:\n shutil.copy(os.path.join(dirs[0], filename), testFolder)\n shutil.rmtree(dirs[0])\n\n # Extract Train data\n for i in range(1,8):\n\n files = os.listdir(dirs[i])\n for filename in files:\n shutil.copy(os.path.join(dirs[i], filename), trainFolder)\n shutil.rmtree(dirs[i])\n shutil.rmtree(os.path.join(destinationFolder, \"NISTSpecialDatabase4GrayScaleImagesofFIGS\"))", "def prepare_data_for_training(args):\n # Form the train/test splits and write them to disk\n dataset = data.Dataset(args)\n # get image classes and image counts in each class\n label_map = dataset.get_class_info()\n class_count = len(list(label_map.values()))\n # split the data and store it in log dir\n df_train, df_test = dataset.split_dataset()\n\n # perform dataset augmentations\n image_data = augment.Augmentation(args)\n # get the data gens for training and test images\n train_data_gen, _ = image_data.map_fn_train(df_train)\n test_data_gen, _ = image_data.map_fn_test(df_test)\n\n return train_data_gen, test_data_gen, df_train, df_test, class_count", "def training_data_generation(DATA_DIR, img_height_size, img_width_size, label_list):\r\n \r\n img_ms_files = glob.glob(DATA_DIR + '\\\\Train_MS' + '\\\\Train_*.tif')\r\n img_pan_files = glob.glob(DATA_DIR + '\\\\Train_Pan' + '\\\\Train_*.tif')\r\n polygon_files = glob.glob(DATA_DIR + '\\\\Train_Polygons' + '\\\\Train_*.geojson')\r\n \r\n img_ms_array_list = []\r\n img_pan_array_list = []\r\n mask_array_list = []\r\n \r\n for file in range(len(img_ms_files)):\r\n with rasterio.open(img_ms_files[file]) as f:\r\n metadata = f.profile\r\n img_ms = np.transpose(f.read(tuple(np.arange(metadata['count']) + 1)), [1, 2, 0])\r\n \r\n with rasterio.open(img_pan_files[file]) as g:\r\n metadata_pan = g.profile\r\n img_pan = np.expand_dims(g.read(1), axis = 2)\r\n \r\n ms_to_pan_ratio = metadata['transform'][0] / metadata_pan['transform'][0]\r\n \r\n if (img_height_size % ms_to_pan_ratio) != 0 or (img_width_size % ms_to_pan_ratio) != 0:\r\n raise ValueError('Please make sure that both img_height_size and img_width_size can be divided by {}'.format(int(ms_to_pan_ratio)))\r\n \r\n mask = training_mask_generation(img_pan_files[file], polygon_files[file], labels = label_list)\r\n \r\n img_ms_array, img_pan_array, mask_array = image_clip_to_segment_and_convert(img_ms, img_pan, mask, ms_to_pan_ratio, \r\n img_height_size, img_width_size)\r\n \r\n img_ms_array_list.append(img_ms_array)\r\n img_pan_array_list.append(img_pan_array)\r\n mask_array_list.append(mask_array)\r\n \r\n img_ms_full_array = np.concatenate(img_ms_array_list, axis = 0)\r\n img_pan_full_array = np.concatenate(img_pan_array_list, axis = 0)\r\n mask_full_array = to_categorical(np.concatenate(mask_array_list, axis = 0), num_classes = len(label_list))\r\n \r\n return img_ms_full_array, img_pan_full_array, mask_full_array", "def create_train_folder(df_train, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/train')\n print(f'Create train set at: {folder_path}')\n for _, row in tqdm(df_train.iterrows(), total=df_train.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'train', row['filename'])\n shutil.copy(img, destination_path )", "def prepare_data(src, dst):\n\n data_prefix = 'miniCelebA_'\n for split in ['train', 'val', 'test']:\n print('processing %s split' % split)\n if (not os.path.exists(os.path.join(dst, 'x_' + split + '.npy')) or not\n os.path.exists(os.path.join(dst, 'y_' + split + '.npy'))):\n labels = glob(os.path.join(src, split, '*'))\n no_sample = 0\n for lb in labels:\n no_sample += len(os.listdir(lb))\n\n x = np.zeros((no_sample, 224, 224, 3))\n y = np.zeros((no_sample, 20))\n count = 0\n for lb in labels:\n files = glob(os.path.join(lb, '*.png'))\n for f in files:\n print('processing file: %s, with label %s' % (f, lb.split('/')[-1]))\n y[count] = to_categorical(int(lb.split('/')[-1]), 20)\n img = misc.imresize(misc.imread(f), (224, 224), 'bicubic')\n if img.ndim == 2:\n img = np.expand_dims(img, -1)\n img = np.concatenate((img, img, img), axis=-1)\n x[count] = img\n\n count += 1\n\n assert count == no_sample, \"number of sample (%d) is different than number of read image (%d)\" % (\n no_sample, count)\n\n x = get_deep_feature(x)\n np.save(os.path.join(dst, data_prefix + 'x_' + split + '.npy'), x)\n np.save(os.path.join(dst, data_prefix + 'y_' + split + '.npy'), y)", "def _make_dataset(input_dir, output_dir, image_size, margin, split='train'):\n input_dir = os.path.join(input_dir, split)\n\n output_root = os.path.join(output_dir, split)\n if not os.path.exists(output_root):\n os.makedirs(output_root)\n\n class_folders = glob.glob(os.path.join(input_dir, '*'))\n detector = MTCNN()\n\n for class_folder in class_folders:\n target_output_dir = os.path.join(output_root, class_folder.split('/')[-1])\n if not os.path.exists(target_output_dir):\n os.makedirs(target_output_dir)\n\n target_files = glob.glob(os.path.join(class_folder, '*'))\n logger.debug('processing %s...', class_folder)\n for file in target_files:\n img = cv2.imread(file)\n detect_result = detector.detect_faces(img)\n\n if not detect_result:\n logger.warning('WARNING: failed to detect face in file %s, skip', file)\n continue\n\n x0, y0, width, height = detect_result[0]['box']\n x1, y1 = x0 + width, y0 + height\n\n x0 = max(x0 - margin // 2, 0)\n y0 = max(y0 - margin // 2, 0)\n x1 = min(x1 + margin // 2, img.shape[1])\n y1 = min(y1 + margin // 2, img.shape[0])\n\n face_img = img[y0:y1, x0:x1, :]\n face_img = cv2.resize(face_img, dsize=(image_size, image_size),\n interpolation=cv2.INTER_LINEAR)\n\n filename = file.split('/')[-1]\n img_name = filename.split('.')[0]\n cv2.imwrite(os.path.join(target_output_dir, filename),\n face_img)\n with open(os.path.join(target_output_dir, img_name + '.txt'), 'w') as f:\n f.write('%d %d %d %d\\n' % (x0, y0, x1, y1))\n logger.debug('processing %s finished!', class_folder)", "def make_dataset():\n\n\tnumberOfTrials = dataset_params.num_of_samples\n\tnumberOfTrials_train = int(numberOfTrials*0.8)\n\tnumberOfTrials_test = int(numberOfTrials*0.2)\n\n\tprint(\"==================================================\")\n\tprint(\"1. Generating Train images ......\")\n\tprint(\"\\nTrain image per variation\", numberOfTrials_train)\n\tmakeDataset(numberOfTrials_train, \"train\")\n\n\tprint(\"==================================================\")\n\tprint(\"2. Generating Test images ......\")\n\tprint(\"\\nTest image per variation\", numberOfTrials_test)\n\tmakeDataset(numberOfTrials_test, \"test\")\n\n\tprint(\"==================================================\")\n\tprint(\"Done!!!\")", "def save_all_features(nb_samples, source=\"./datasets/D1/images/\", dest=\"./datasets/D1/features/\", input_size=(416, 416), batch_size=16):\n\n # check if the directory exists, and if not make it\n if not os.path.exists(dest):\n os.makedirs(dest)\n\n # define image height and width\n (img_height, img_width) = input_size\n\n # build the VGG16 network and extract features after every MaxPool layer\n model = VGG16(weights='imagenet', include_top=False)\n\n c1 = model.layers[-16].output\n c1 = GlobalAveragePooling2D()(c1)\n\n c2 = model.layers[-13].output\n c2 = GlobalAveragePooling2D()(c2)\n\n c3 = model.layers[-9].output\n c3 = GlobalAveragePooling2D()(c3)\n\n c4 = model.layers[-5].output\n c4 = GlobalAveragePooling2D()(c4)\n\n c5 = model.layers[-1].output\n c5 = GlobalAveragePooling2D()(c5)\n\n\n model = Model(inputs=model.input, outputs=(c1, c2, c3, c4, c5))\n\n # always save your weights after training or during training\n model.save_weights('first_try.h5')\n model.save('model_save')\n\n # define image generator without augmentation\n datagen = ImageDataGenerator(rescale=1. / 255.)\n\n generator = datagen.flow_from_directory(\n source,\n target_size=(img_height, img_width),\n batch_size=batch_size,\n class_mode=\"sparse\",\n shuffle=False)\n\n # generate and save features, labels and respective filenames\n steps = nb_samples / batch_size + 1\n X = model.predict_generator(generator, steps)\n Y = np.concatenate([generator.next()[1] for i in range(0, generator.samples, batch_size)])\n names = generator.filenames\n\n for n, i in enumerate(X):\n print(\"Saving \" + n + \" and \" + i)\n with open(dest + \"X-\" + str(img_height) + \"-c\" + str(n + 1) + \"-AVG.npy\", 'w') as f:\n np.save(f.name, i)\n\n if not os.path.exists(dest + \"Y.npy\"):\n with open(dest + \"Y.npy\", 'w') as f:\n np.save(f.name, Y)\n\n if not os.path.exists(dest + \"filenames.npy\"):\n with open(dest + \"filenames.npy\", 'w') as f:\n np.save(f.name, names)", "def gen_data_dir(img_dir, id_label_dict, num_class, shuffle=True):\n img_file_path = gen_img_files(img_dir, shuffle)\n return gen_data_file(img_file_path, id_label_dict, num_class)", "def create_noobj_folder(\n folder: PathLike, \n img_ext: str = \".jpg\",\n):\n folder = Path(folder).expanduser().resolve()\n images = glob(folder, img_ext)\n \n for image in images:\n filename = image.name\n _folder = image.parent.name\n path = folder / (image.stem + \".xml\")\n img_w, img_h = get_image_size(image)\n\n tree = ET.Element(\"annotation\")\n\n et_folder = ET.SubElement(tree, \"folder\")\n et_folder.text = _folder\n\n et_filename = ET.SubElement(tree, \"filename\")\n et_filename.text = filename\n\n et_path = ET.SubElement(tree, \"path\")\n et_path.text = str(path)\n\n et_img_size = ET.SubElement(tree, \"size\")\n ET.SubElement(et_img_size, \"width\").text = str(img_w)\n ET.SubElement(et_img_size, \"height\").text = str(img_h)\n ET.SubElement(et_img_size, \"depth\").text = \"3\"\n\n content = ET.tostring(tree, encoding=\"unicode\", pretty_print=True)\n try: \n path.write_text(content)\n except KeyboardInterrupt:\n path.write_text(content)\n exit()", "def preprocess(data_path, dataset):\n il_data_path = os.path.join(data_path, 'il' + dataset)\n train_path = os.path.join(il_data_path, 'train')\n val_path = os.path.join(il_data_path, 'val')\n\n if os.path.isdir(il_data_path):\n return\n\n os.makedirs(train_path)\n os.makedirs(val_path)\n\n train_set = _datasets[dataset](data_path, train=True, download=True)\n val_set = _datasets[dataset](data_path, train=False, download=True)\n\n # dump pickles for each class\n for cur_set, cur_path in [[train_set, train_path], [val_set, val_path]]:\n for idx, item in enumerate(cur_set):\n label = item[1]\n if not os.path.exists(os.path.join(cur_path, str(label))):\n os.makedirs(os.path.join(cur_path, str(label)))\n with open(os.path.join(cur_path, str(label), str(idx) + '.p'), 'wb') as f:\n pickle.dump(item, f)", "def exporting_cropped_images (fpath_tiff):\n src = rasterio.open(fpath_tiff, 'r')\n outfolder_irregular = '/train/irregular'\n outfolder_healthy = '/train/healthy'\n outfolder_concrete = '/train/concrete'\n outfolder_incomplete = '/train/incomplete'\n outfolder_other = '/train/other'\n outfolder = '/train/batch'\n #os.makedirs (outfolder, exist_ok = True)", "def generate_nmnist_dataset(initial_size, input_dir, num_spikes, step_factor):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n images = prepare_n_mnist(filename, True, num_spikes, step_factor)\n if num_images + len(images) >= image_dataset.size:\n image_dataset = np.resize(image_dataset,\n (num_images + len(images)) * 2)\n add_images_to_dataset(image_dataset, images, num_images, i, 28, 28)\n num_images += len(images)\n\n return image_dataset[0:num_images]", "def compress_wrapper(args: Namespace) -> None:\n directory_path = os.path.join(DATASETS_DIR, args.directory)\n compress_datasets(directory_path, args.holdout)", "def create_random_data(output_path: str, num_images: int = 5) -> None:\n train_path = os.path.join(output_path, \"train\")\n class1_train_path = os.path.join(train_path, \"class1\")\n class2_train_path = os.path.join(train_path, \"class2\")\n\n val_path = os.path.join(output_path, \"val\")\n class1_val_path = os.path.join(val_path, \"class1\")\n class2_val_path = os.path.join(val_path, \"class2\")\n\n test_path = os.path.join(output_path, \"test\")\n class1_test_path = os.path.join(test_path, \"class1\")\n class2_test_path = os.path.join(test_path, \"class2\")\n\n paths = [\n class1_train_path,\n class1_val_path,\n class1_test_path,\n class2_train_path,\n class2_val_path,\n class2_test_path,\n ]\n\n for path in paths:\n try:\n os.makedirs(path)\n except FileExistsError:\n pass\n\n for i in range(num_images):\n pixels = numpy.random.rand(64, 64, 3) * 255\n im = Image.fromarray(pixels.astype(\"uint8\")).convert(\"RGB\")\n im.save(os.path.join(path, f\"rand_image_{i}.jpeg\"))\n\n process_images(output_path)", "def makeDataset(numberOfTrials, data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\n\tutils.create_directory(dataset_params.data_path)\n\tutils.create_directory(os.path.join(dataset_params.data_path, data_folder))\n\n\tallowedRadius = utils.defineShapePerimeter()\n\tcolorsRGB = utils.defineColorValues()\n\tshapeDict = utils.defineShapeSides()\n\tpadding = dataset_params.padding\n\n\tnum = 0\n\toutput_images = [[\"figNum\", \"shape\", \"color\", \"size\", \"background\", \"quadrant\", \"radius\"]]\n\tfor c in dataset_params.colors: # for all 7 foreground colors \n\t\tfor q in dataset_params.quadrants: # for all 4 quadratns \n\t\t\tfor s in dataset_params.shapes: # for all 5 shapes\n\t\t\t\tfor k in dataset_params.sizes: # for all 3 sizes\n\t\t\t\t\tfor b in dataset_params.backgrounds: # for all 3 background colors\n\t\t\t\t\t\tfor i in range(numberOfTrials):\n\t\t\t\t\t\t\tfileName = os.path.join(dataset_params.data_path, data_folder, str(num) + \".png\")\n\t\t\t\t\t\t\tpresentQuadrant = dataset_params.quadrants[q]\n\t\t\t\t\t\t\tradius = random.randint(allowedRadius[s][k][0],allowedRadius[s][k][1])\n\n\t\t\t\t\t\t\tif(presentQuadrant == 3):\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 2):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 128 + padding\n\t\t\t\t\t\t\t\tyMax = 255 - radius\n\n\t\t\t\t\t\t\telif(presentQuadrant == 1):\n\t\t\t\t\t\t\t\txMin = 0 + radius\n\t\t\t\t\t\t\t\txMax = 128 - padding\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\txMin = 128 + padding\n\t\t\t\t\t\t\t\txMax = 255 - radius\n\t\t\t\t\t\t\t\tyMin = 0 + radius\n\t\t\t\t\t\t\t\tyMax = 128 - padding\n\n\t\t\t\t\t\t\txCenter = random.randint(xMin, xMax)\n\t\t\t\t\t\t\tyCenter = random.randint(yMin, yMax)\n\t\t\t\t\t\t\tcenter = [xCenter, yCenter]\n\n\t\t\t\t\t\t\tif(s == \"circle\"):\n\t\t\t\t\t\t\t\toutput_images.append([num, \"circle\", c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\t\timg = makeCircle(c, radius, center, b, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tn = shapeDict[s]\n\t\t\t\t\t\t\t\timg = makePolygon(center, n, radius, b, c, colorsRGB)\n\t\t\t\t\t\t\t\timg = img[:,:,::-1]\n\t\t\t\t\t\t\t\tcv2.imwrite(fileName, img)\n\t\t\t\t\t\t\t\toutput_images.append([num, s, c, k, b, presentQuadrant, radius])\n\t\t\t\t\t\t\tnum += 1\n\t\n\tprint(\"Number of image generated\", num)\n\n\tprint(\"Saving \" + data_type + \" data meta information to CSV ......\")\n\tdf = pd.DataFrame(output_images[1:], columns=output_images[0])\n\tdf.to_csv(label_file, index=False)\n\tprint(\"Saved \" + data_type + \" data meta information: \" + data_folder)\n\t\n\n\tprint(\"Saving \" + data_type + \" images data to npz(numpy) compressed file ......\")\n\tmake_npz_file(data_type)\n\tprint(\"Saved \" + data_type + \" images data to npz(numpy) compressed file!\")\n\t\n\treturn None", "def creation_data_sets(quality, dataset, test_case=False):\n current_path = Path.cwd()\n if dataset == 0:\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Mnist_{}\".format(quality))\n test_path = current_path.joinpath(\"Mnist_{}_test\".format(quality))\n else:\n (x_train, y_train), (x_test, y_test) = cifar10.load_data()\n del y_train, y_test\n train_path = current_path.joinpath(\"Cifar-10_{}\".format(quality))\n test_path = current_path.joinpath(\"Cifar-10_{}_test\".format(quality))\n\n create_directories(train_path, test_path)\n convert(train_path, x_train, dataset, quality, test_case)\n convert(test_path, x_test, dataset, quality, test_case)", "def make_data(config, data, label):\n if not os.path.isdir(os.path.join(os.getcwd(), config.checkpoint_dir)):\n os.makedirs(os.path.join(os.getcwd(), config.checkpoint_dir))\n\n if config.is_train:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/train.h5')\n else:\n savepath = os.path.join(os.getcwd(), config.checkpoint_dir +'/test.h5')\n\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def copy_files():\n\n # Load the Knifey-Spoony dataset.\n # This is very fast as it only gathers lists of the files\n # and does not actually load the images into memory.\n dataset = load()\n\n # Copy the files to separate training- and test-dirs.\n dataset.copy_files(train_dir=train_dir, test_dir=test_dir)", "def make_npz_file(data_type):\n\n\tdata_folder = data_type + \"_images\"\n\tlabel_file = os.path.join(dataset_params.data_path, data_type + \"_lables.csv\")\n\toutput_file = os.path.join(dataset_params.data_path, \"synthetic_\" + data_type + \"_data\")\n\tline_reader = csv.DictReader(open(label_file,\"r\"))\n\n\tdata = []\n\tlabels = []\n\tdata_points = 0\n\tfor row in line_reader:\n\t\timage_name = os.path.join(dataset_params.data_path,data_folder,row[\"figNum\"] + \".png\")\n\t\timage_data = cv2.imread(image_name, cv2.IMREAD_COLOR)\n\t\timage_data = cv2.cvtColor(image_data, cv2.COLOR_BGR2RGB)\n\t\timage_label = [int(dataset_params.shapes[row[\"shape\"]]), int(dataset_params.colors[row[\"color\"]]), int(dataset_params.sizes[row[\"size\"]]), int(row[\"quadrant\"]), int(dataset_params.backgrounds[row[\"background\"]]) ]\n\t\tdata.append(image_data)\n\t\tlabels.append(image_label)\n\t\tdata_points += 1\n\n\t# Converting list to data to np array\n\tdata = np.asarray(data)\n\tlabels = np.asarray(labels)\n\n\t# Printing log information\n\tprint(data_type, \"statistics being saved: \")\n\tprint(data_type, \"data shape\", data.shape)\n\tprint(data_type, \"label shape\", labels.shape)\n\n\t# saveing the file as npz file\n\tnp.savez_compressed(output_file, data=data, lables=labels)", "def createDataset(sources,output,labels,sparse):\n global has_joblib\n out_path = str(output)\n # delete the output file\n if os.path.exists(os.path.abspath(out_path)):\n os.remove(os.path.abspath(out_path))\n \n # first, list the source files\n fpaths_src, fnames_src = utils.listFiles(directory=os.path.abspath(sources), ext='png')\n \n label_map={}\n \n # read the label file\n if not (labels == None):\n label_map = utils.readLabelMap(labels)\n # check that the numbers match\n print(\"Number of images in label map : %s\"%str(len(label_map.keys())-1))\n print(\"Number of images in source dir: %s\"%str(len(fpaths_src)))\n assert len(label_map.keys())-1 == len(fpaths_src)\n \n # generate KNN classifier\n if not (args.codebook == 'None' or args.codebook == None):\n args.knn = getKNNClassifier() \n else:\n args.knn = None\n \n # precompute number of images\n n_imgs = len(fpaths_src)\n \n # preallocate array\n # if augmentation, calculate (9*4+1)*n samples\n all_features_list = []\n \n # parallel implementation (default, if joblib available)\n if has_joblib:\n image_features = Parallel(n_jobs=args.njobs,verbose=5) (delayed(processImage)(fpaths_src, label_map, fnames_src, img_idx) for img_idx in range(n_imgs))\n # collect all images into a single matrix\n image_features = np.concatenate(image_features, axis=0)\n all_features_list.append(image_features)\n else:\n for img_idx in xrange(n_imgs):\n image_features = processImage(fpaths_src, label_map, fnames_src, img_idx)\n all_features_list.append(image_features)\n \n # make a 2D matrix from the list of features (stack all images vertically)\n feat_matrix = np.concatenate(all_features_list, axis=0).astype(np.float32) \n \n # do scaling of each feature dimension \n #if False:\n if not (args.scale == 0):\n print \"Scaling data...\"\n \n # preserve the labels\n label_vec = feat_matrix[:,0]\n feat_matrix = np.delete(feat_matrix,0,1)\n \n featurestats = np.zeros((2,feat_matrix.shape[1]))\n \n # use soft-normalization (zero-mean, unit var whitening)\n if (args.scale == 1):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # featurestats contains 2 rows, first row = mean, second row = std\n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # use hard-normalization \n elif (args.scale == 2):\n # if we specified featurestats from a training set, use them\n if not (args.featurestats == None):\n # load the statistics\n featurestats = loadFeatureStats()\n # the featurestats contains 2 rows, first row = min, second row = max \n # and n feature dimensions\n assert feat_matrix.shape[1]==featurestats.shape[1]\n else:\n pass\n \n \n # normalize each feature dimension\n for feat_idx in xrange(feat_matrix.shape[1]):\n feat_vec = feat_matrix[:,feat_idx]\n \n # soft-normalization (zero-mean, approx. unit variance)\n if (args.scale == 1): \n # if feature statistics are specified\n if not (args.featurestats == None):\n feat_mean = featurestats[0,feat_idx]\n feat_std = featurestats[1,feat_idx]\n else:\n # compute them from the data\n feat_mean = feat_vec.mean()\n feat_std = (feat_vec.std() + 1e-10)\n # store them \n featurestats[0,feat_idx] = feat_mean\n featurestats[1,feat_idx] = feat_std\n \n # shift to zero mean and (unit) variance\n feat_vec_scaled = (feat_vec - feat_mean) / (1.*feat_std)\n \n \n # hard-normalization (min/max = borders estimated from the (training) dataset)\n elif (args.scale == 2):\n if not (args.featurestats == None):\n feat_min = featurestats[0,feat_idx]\n feat_max = featurestats[1,feat_idx]\n else:\n # compute them freshly\n feat_min = np.min(feat_vec)\n feat_max = np.max(feat_vec)\n # store them \n featurestats[0,feat_idx] = feat_min\n featurestats[1,feat_idx] = feat_max\n \n # standardize/normalize between 0 and 1\n feat_vec_std = (feat_vec - feat_min) / (feat_max - feat_min + 1e-10) \n \n # linearly scale between -1 and 1 \n feat_vec_scaled = (1.0*feat_vec_std * (1 - -1)) - 1\n \n \n # set column back to matrix\n feat_matrix[:,feat_idx] = feat_vec_scaled\n \n # finally prepend the label_vec again\n feat_matrix = np.concatenate((np.reshape(label_vec,(feat_matrix.shape[0],1)),feat_matrix), axis=1)\n \n print \"Done.\"\n else:\n print \"Data may not be properly scaled, use the 'svm-scale' implementation of libsvm.\"\n \n if not (args.savefeaturestats == None):\n saveFeatureStats(featurestats) \n\n #Parallel(n_jobs=args.njobs, verbose=5)(delayed(function)(params) for i in range(10))\n # open the output file\n output_file = open(os.path.abspath(out_path), 'wb')\n\n # run through the feature matrix \n print \"Writing %s rows and %s cols to file...\"%(feat_matrix.shape)\n # parallel implementation (default, if joblib available)\n if has_joblib:\n lines = Parallel(n_jobs=args.njobs, verbose=5)(delayed(writeLine)(i, feat_matrix) for i in range(feat_matrix.shape[0]))\n output_file.writelines(lines) \n else:\n for i in xrange(feat_matrix.shape[0]):\n line = writeLine(i, feat_matrix)\n output_file.writelines(line)\n \n output_file.close()\n \n return 0", "def download():\n\n trainset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=True, download=True)\n testset = torchvision.datasets.CIFAR10(root=paths.raw_cifar10_dir(), train=False, download=True)\n train_images = numpy.array(trainset.data)\n train_labels = numpy.array(trainset.targets)\n test_images = numpy.array(testset.data)\n test_labels = numpy.array(testset.targets)\n\n assert numpy.max(train_images) == 255\n\n train_images = train_images/255.\n test_images = test_images/255.\n\n utils.write_hdf5(paths.cifar10_train_images_file(), train_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_train_images_file())\n utils.write_hdf5(paths.cifar10_test_images_file(), test_images.astype(numpy.float32))\n log('wrote %s' % paths.cifar10_test_images_file())\n utils.write_hdf5(paths.cifar10_train_labels_file(), train_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_train_labels_file())\n utils.write_hdf5(paths.cifar10_test_labels_file(), test_labels.reshape(-1, 1).astype(numpy.int))\n log('wrote %s' % paths.cifar10_test_labels_file())", "def prepare_data(self, *args, **kwargs):\n # get paths to train and test splits\n _split_paths = [os.path.join(self.path_to_data, split)\n for split in os.listdir(self.path_to_data)]\n\n # for each split [train, test]\n for _path in _split_paths:\n _img_classes = os.listdir(_path) # get subfolders representing each class\n self.splits[os.path.basename(_path)] = []\n\n # get the images in pairs with its corresponding class\n for _class in _img_classes:\n _data = self.get_img_text_pair(os.path.join(_path, _class))\n\n if os.path.basename(_path) == 'train':\n self.weights[self.encode_label(_class)] = len(_data)\n self.splits[os.path.basename(_path)].extend(_data)", "def create_train_file(img_folder_path: str, train_file_path: str) -> None:\n files = []\n for ext in (\"*.gif\", \"*.png\", \"*.jpg\", \"*.bmp\"):\n img_path = glob(join(img_folder_path, ext))\n if img_path:\n files.extend(img_path)\n\n write_to_train_file(files, train_file_path)\n\n print(\"Training files are created in \" + img_folder_path)", "def _create_layout(root_dir, subsets):\n _create_folder(os.path.join(root_dir, \"images\"))\n _create_folder(os.path.join(root_dir, \"labels\"))\n\n for subset in subsets:\n _create_folder(os.path.join(root_dir, \"images\", subset))\n _create_folder(os.path.join(root_dir, \"labels\", subset))", "def create_directories(train_path, test_path):\n train_path.joinpath(\"images\").mkdir(parents=True)\n test_path.joinpath(\"images\").mkdir(parents=True)", "def convert_dataset(src_dir, dest_dir):\n subdirs = get_subdirs(src_dir)\n detector = dlib.simple_object_detector(MODEL_PATH)\n for img_dir in tqdm(subdirs):\n\tprint(img_dir)\n jpegs = get_img_paths_in_dir(img_dir)\n target_dir = dest_dir + img_dir.split('/')[-1]\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n for src_path in jpegs:\n target_path = target_dir + '/' + src_path.split('/')[-1]\n img = io.imread(src_path)\n dets = detector(img)\n bounding_boxes = get_bounding_boxes(dets)\n if bounding_boxes:\n square_box = find_square_box(bounding_boxes[0])\n if is_valid(square_box, img):\n box = bounding_boxes[0]\n square_box = find_square_box(box)\n cropped_img = crop_frame(img, square_box)\n PIL_img = PIL.Image.fromarray(cropped_img)\n resized_img = PIL_img.resize((54,54), PIL.Image.BILINEAR)\n\t\t resized_img.save(target_path)\n print(target_path)\n # grey_img = resized_img.convert('L')\n # grey_img.save(target_path)", "def save_data(data_dir):\r\n for k in range(1,11):\r\n fold_name = 'fold' + str(k)\r\n print \"Saving\" + fold_name\r\n features, labels = process_audio(parent_path, [fold_name])\r\n labels = encode(labels)\r\n print \"Features of\", fold_name , \" = \", features.shape\r\n print \"Labels of\", fold_name , \" = \", labels.shape\r\n feature_file = os.path.join(data_dir, fold_name + '_x.npy')\r\n labels_file = os.path.join(data_dir, fold_name + '_y.npy')\r\n np.save(feature_file, features)\r\n print \"Saved \" + feature_file\r\n np.save(labels_file, labels)\r\n print \"Saved \" + labels_file", "def generate_dataset(self):\n\t\timg_set = []\n\t\tqa_set = []\n\t\tfor i in range(self.config.dataset_size):\n\t\t\timg, r = self.generate_image()\n\t\t\tq = self.generate_question()\n\t\t\ta = self.generate_answer(r, q)\n\t\t\timg_sample = {\n\t\t\t\t'id': i,\n\t\t\t\t'image': img.tolist()\n\t\t\t}\n\t\t\timg_set.append(img_sample)\n\t\t\tfor j in range(len(q)):\n\t\t\t\tqa_sample = {\n\t\t\t\t\t'id': i,\n\t\t\t\t\t'question': q[j].tolist(),\n\t\t\t\t\t'answer': a[j].tolist()\n\t\t\t\t}\n\t\t\t\tqa_set.append(qa_sample)\n\t\tprint('Finished creating smaples')\n\t\tdataset = {\n\t\t\t'image':\timg_set,\n\t\t\t'qa':\tqa_set\n\t\t}\n\t\twith open(self.path, 'w') as f:\n\t\t\tjson.dump(dataset, f)", "def generate_data(dataset, target_filename, label):\n\n data_dir = check_data(dataset)\n\n data_x = np.empty((0, NB_SENSOR_CHANNELS))\n data_y = np.empty((0))\n\n zf = zipfile.ZipFile(dataset)\n print (\"Processing dataset files ...\")\n for filename in OPPORTUNITY_DATA_FILES:\n try:\n data = np.loadtxt(BytesIO(zf.read(filename)))\n print (\"... file {0}\".format(filename))\n x, y = process_dataset_file(data, label)\n data_x = np.vstack((data_x, x))\n data_y = np.concatenate([data_y, y])\n except KeyError:\n print (\"ERROR: Did not find {0} in zip file\".format(filename))\n\n # Dataset is divided into train and test\n nb_training_samples = 557963\n # The first 18 OPPORTUNITY data files are used for the traning dataset, having 557963 samples\n X_train, y_train = data_x[:nb_training_samples,:], data_y[:nb_training_samples]\n X_test, y_test = data_x[nb_training_samples:,:], data_y[nb_training_samples:]\n\n print (\"Final datasets with size: | train {0} | test {1} | \".format(X_train.shape,X_test.shape))\n\n obj = [(X_train, y_train), (X_test, y_test)]\n f = open(os.path.join(data_dir, target_filename), 'wb')\n cp.dump(obj, f, protocol=cp.HIGHEST_PROTOCOL)\n f.close()", "def copy_database(path_images, path_labels, path_final_images):\n\n try:\n labels = sorted(os.listdir(path_labels))\n except FileNotFoudError:\n print(\"No such file or directory \", path_labels)\n\n try:\n images = sorted(os.listdir(path_images)) #+ \"RetinaNet_I04590/\"))\n except FileNotFoudError:\n print(\"No such file or directory \", path_images)\n\n \"\"\"if not os.path.exists(path_final_images + \"I04590/\"):\n os.mkdir(path_final_images + \"I04590/\")\n\n if not os.path.exists(path_final_images + \"I045135/\"):\n os.mkdir(path_final_images + \"I045135/\")\n\n if not os.path.exists(path_final_images + \"I090135/\"):\n os.mkdir(path_final_images + \"I090135/\")\n\n if not os.path.exists(path_final_images + \"I4590135/\"):\n os.mkdir(path_final_images + \"I4590135/\")\n\n if not os.path.exists(path_final_images + \"Params/\"):\n os.mkdir(path_final_images + \"Params/\")\n\n if not os.path.exists(path_final_images + \"Pauli2/\"):\n os.mkdir(path_final_images + \"Pauli2/\")\n\n if not os.path.exists(path_final_images + \"Pauli3/\"):\n os.mkdir(path_final_images + \"Pauli3/\")\n\n if not os.path.exists(path_final_images + \"Stokes/\"):\n os.mkdir(path_final_images + \"Stokes/\")\n\n if not os.path.exists(path_final_images + \"Rachel/\"):\n os.mkdir(path_final_images + \"Rachel/\")\n\n if not os.path.exists(path_final_images + \"Rachel2/\"):\n os.mkdir(path_final_images + \"Rachel2/\")\"\"\"\n\n for k in range(len(images)):\n if str(k) + \".xml\" in labels:\n copyfile(path_images + \"/\" + images[k],\n path_final_images + \"/\" + images[k])\n \"\"\"copyfile(path_images + \"RetinaNet_I04590/\" + str(k) + \".png\",\n path_final_images + \"I04590/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I045135/\" + str(k) + \".png\",\n path_final_images + \"I045135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I090135/\" + str(k) + \".png\",\n path_final_images + \"I090135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_I4590135/\" + str(k) + \".png\",\n path_final_images + \"I4590135/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Params/\" + str(k) + \".png\",\n path_final_images + \"Params/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli2/\" + str(k) + \".png\",\n path_final_images + \"Pauli2/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Pauli3/\" + str(k) + \".png\",\n path_final_images + \"Pauli3/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Stokes/\" + str(k) + \".png\",\n path_final_images + \"Stokes/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel/\" + str(k) + \".png\",\n path_final_images + \"Rachel/\" + str(k) + \".png\")\n copyfile(path_images + \"RetinaNet_Rachel2/\" + str(k) + \".png\",\n path_final_images + \"Rachel2/\" + str(k) + \".png\")\n copyfile(path_labels + str(k) + \".xml\",\n path_final_labels + str(k) + \".xml\")\"\"\"\n print(k)", "def generate_transformed_data(self):\n for each_class in self.classes:\n class_directory = \"data/test/test_folder/\"\n class_directory += each_class\n class_directory += \"_test.jpg\"\n test_image = keras.preprocessing.image.load_img(class_directory)\n image_set = keras.preprocessing.image.img_to_array(test_image)\n image_set = image_set.reshape((1,) + image_set.shape)\n i = 0\n for each_batch in self.transformed_data_generator.flow(image_set, batch_size=1,\n save_to_dir=\"data/test_transformed/test_folder\", save_prefix=each_class, save_format=\"jpeg\"):\n i += 1\n if i > 20:\n break", "def GenerateImageSamples(self):\n self.generateImageSamples = GenerateImageSamples(self.trainDataDir,\n self.testDataDir,\n self.trainClassDir,\n self.testClassDir,\n self.cfgData)\n\n self.generateImageSamples.LoadDataSave(self.trainDataDir, 'train')\n # self.generateImageSamples.CopyFiles(self.trainClassDir)\n\n self.generateImageSamples.LoadDataSave(self.testDataDir, 'test')\n # self.generateImageSamples.CopyFiles(self.testClassDir)", "def Dev_Image_data_generator(folderlist,resize = (920,1200),Transformation = True, scaling = True, batch_size = 16):\n\n while True:\n total_classes = len(folderlist.keys())\n keys = folderlist.keys()\n Images = []\n Image_label = []\n for key in folderlist.keys():\n img_label = random.choice(folderlist[key])\n img = Image.open(img_label,'r')\n h = resize[1]\n l = int(img.size[1]*h/img.size[0])\n img = img.resize((h,l), Image.ANTIALIAS)\n background = Image.new('RGB', (resize[1], resize[0]), (255, 255, 255))\n img_w, img_h = img.size\n bg_w, bg_h = background.size\n offset = (int((bg_w - img_w) / 2), int((bg_h - img_h) / 2))\n background.paste(img, offset)\n background = np.asarray(background)\n if Transformation == True:\n rotation = rotate(background,random.choice(range(360)))\n translate = translate_xy(background,random.choice(range(resize[0]/4)),random.choice(range(resize[1]/4)))\n flip = cv2.flip(rotation,1)\n Y = np.concatenate((rotation[np.newaxis,:,:,:],flip[np.newaxis,:,:,:],translate[np.newaxis,:,:,:]))\n Images.append(Y)\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key for i in range(4)])\n else:\n Images.append(background[np.newaxis,:,:,:])\n Image_label.append([key])\n Image_label = np.concatenate(Image_label)\n Images = np.concatenate(Images)\n Image_label = np.array(pd.get_dummies(Image_label))\n X_Image , Y_Image = shuffle(Images,Image_label,random_state=0)\n if scaling == True:\n X_Image = X_Image/255\n else:\n X_Image = X_Image\n batches = int(len(X_Image)/batch_size)\n for batch in range(batches):\n x = X_Image[batch*batch_size:(batch+1)*batch_size,:,:,:]\n y = Y_Image[batch*batch_size:(batch+1)*batch_size]\n yield((x,y))", "def __download(self):\n\n if self.__check_exists():\n return\n\n print(\"Downloading AudioMNIST dataset\")\n\n # download files\n try:\n os.makedirs(self.__path)\n except OSError as e:\n if e.errno == errno.EEXIST:\n pass\n else:\n raise\n\n if not os.path.exists(os.path.join(self.__path, 'AudioMNIST-master.zip')):\n url = 'https://github.com/soerenab/AudioMNIST/archive/master.zip'\n wget_data = wget.download(url, out=self.__path)\n\n archive = zipfile.ZipFile(wget_data)\n\n for file in archive.namelist():\n if file.startswith('AudioMNIST-master/data/'):\n archive.extract(file, self.__path)\n\n print(\"Download successful\")\n\n audio_mnist_src = os.path.join(self.__path, 'AudioMNIST-master/data/')\n data = np.array(glob.glob(os.path.join(audio_mnist_src, \"**/*.wav\")))\n\n train_images = []\n train_labels = []\n test_images = []\n test_labels = []\n\n # first 5-cross-validation set from https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py\n train_folders = [28, 56, 7, 19, 35, 1, 6, 16, 23, 34, 46, 53, 36, 57, 9, 24, 37, 2,\n 8, 17, 29, 39, 48, 54, 43, 58, 14, 25, 38, 3, 10, 20, 30, 40, 49, 55,\n 12, 47, 59, 15, 27, 41, 4, 11, 21, 31, 44, 50]\n test_folders = [26, 52, 60, 18, 32, 42, 5, 13, 22, 33, 45, 51]\n\n print(\"Converting audio to images\")\n # create train and test folders and save audios as images\n for filepath in tqdm(data):\n # the last one is just a counter for repeat of each digit, e.g. say zero once, twice, third time..\n\n dig, vp, rep = filepath.rstrip(\".wav\").split(\"/\")[-1].split(\"_\")\n\n # according to https://github.com/soerenab/AudioMNIST/blob/master/preprocess_data.py\n fs, data = wavf.read(filepath)\n\n # resample\n data = librosa.core.resample(y=data.astype(np.float32), orig_sr=fs, target_sr=8000, res_type=\"scipy\")\n # zero padding\n if len(data) > 8000:\n raise ValueError(\"data length cannot exceed padding length.\")\n elif len(data) < 8000:\n embedded_data = np.zeros(8000)\n offset = np.random.randint(low=0, high=8000 - len(data))\n embedded_data[offset:offset + len(data)] = data\n elif len(data) == 8000:\n # nothing to do here\n embedded_data = data\n pass\n\n # 1. fourier transform\n # stft, with selected parameters, spectrogram will have shape (228, 230)\n f, t, zxx = scipy.signal.stft(embedded_data, 8000, nperseg=455, noverlap=420, window='hann')\n # get amplitude\n zxx = np.abs(zxx[0:227, 2:-1])\n\n # if not 2, then convert to decibel\n zxx = librosa.amplitude_to_db(zxx, ref=np.max)\n\n # normalize from range -80,0 to 0,1\n zxx = (zxx - zxx.min()) / (zxx.max() - zxx.min())\n\n zxx = zxx[::-1] # reverse the order of frequencies to fit the images in the paper\n zxx = np.atleast_3d(zxx).transpose(2, 0, 1) # reshape to (1, img_dim_h, img_dim_w)\n\n # decide to which list to add (train or test)\n if int(vp) in train_folders:\n train_images.append(zxx)\n train_labels.append(int(dig))\n elif int(vp) in test_folders:\n test_images.append(zxx)\n test_labels.append(int(dig))\n else:\n raise Exception('Person neither in train nor in test set!')\n\n train_images = torch.Tensor(train_images).float()\n train_labels = torch.Tensor(train_labels).long()\n test_images = torch.Tensor(test_images).float()\n test_labels = torch.Tensor(test_labels).long()\n\n torch.save(train_images, os.path.join(self.__path, 'train_images_tensor.pt'))\n torch.save(train_labels, os.path.join(self.__path, 'train_labels_tensor.pt'))\n torch.save(test_images, os.path.join(self.__path, 'test_images_tensor.pt'))\n torch.save(test_labels, os.path.join(self.__path, 'test_labels_tensor.pt'))\n\n print('Done!')", "def make_datasets(class_names, dataset_dict, path_source, path_dest, seed):\n \n create_directory_structure(path_dest)\n\n path_alldata = [path_source.joinpath(f'label_{class_}')\n for class_ in class_names]\n\n path_imagefiles = [class_path.glob('*.bin')\n for class_path in path_alldata]\n\n size = sum([v for k, v in dataset_dict.items()])\n rng = default_rng(seed)\n\n datasets_by_class = np.array([rng.choice([image_file.name\n for image_file in image_filelist],\n size=size, replace=False)\n for image_filelist in path_imagefiles])\n\n dataset_labels = np.array([np.full(size, class_)\n for class_ in class_names])\n\n if not path_dest.exists():\n path_dest.mkdir(parents=True)\n\n start=0\n for set_name, n_examples in dataset_dict.items():\n stop = start + n_examples\n\n filename = f'{set_name}_set.csv'\n path_file = path_dest.joinpath(filename)\n \n images = datasets_by_class[:,start:stop].flatten()\n labels = dataset_labels[:,start:stop].flatten()\n rows = np.transpose(np.vstack((images, labels))).tolist()\n\n with path_file.open(mode='w', newline='') as f:\n csv_writer = writer(f)\n csv_writer.writerows(rows)\n\n start = n_examples", "def decompress_data(src, dst):\n assert os.path.exists(src), \"{} does not exist. Please download the \\\n entire repository and keep it as it originally is\".format(src)\n\n # create folder layout at the destination folder\n subset_list = [\"train\", \"val\"]\n _create_layout(dst, subset_list)\n\n # extract data\n for subset in subset_list:\n subset_img_src = os.path.join(src, \"images\", subset + \".zip\")\n subset_img_dst = os.path.join(dst, \"images\", subset)\n _extract_multi_vol_zip(subset_img_src, subset_img_dst)\n _extract_all_gz_in_dir(subset_img_dst)\n\n subset_lbl_src = os.path.join(src, \"labels\", subset + \".zip\")\n subset_lbl_dst = os.path.join(dst, \"labels\", subset)\n _extract_zip(subset_lbl_src, subset_lbl_dst)\n _extract_all_gz_in_dir(subset_lbl_dst)\n\n print(\"Finished decompressing {}.\".format(subset))", "def build_dataset(\n is_train, \n data_dir: str,\n image_size: int = 224,\n color_jitter: float = 0.4, \n aa: str = \"rand-m9-mstd0.5-inc1\",\n train_interpolation: str = \"bicubic\",\n reprob: float = 0.25, \n remode: str = \"pixel\", \n recount: int = 1\n):\n transforms = build_transform(\n is_train, \n image_size, \n color_jitter, \n aa, \n train_interpolation, \n reprob, \n remode, \n recount\n )\n\n root = os.path.join(data_dir, 'train' if is_train else 'val')\n dataset = datasets.ImageFolder(root, transform=transforms)\n nb_classes = 1000\n\n return dataset, nb_classes", "def create_dataset(data_path, batch_size=32, num_parallel_workers=1):\n # Define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # Define map operations\n resize_op = vision.Resize((resize_height, resize_width), interpolation=Inter.LINEAR)\n rescale_nml_op = vision.Rescale(rescale_nml, shift_nml)\n rescale_op = vision.Rescale(rescale, shift)\n hwc2chw_op = vision.HWC2CHW()\n type_cast_op = transforms.TypeCast(mstype.int32)\n\n # Apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n\n return mnist_ds", "def read_classified_data(root_path, to_size = (200,200), transformation = transforms.ToTensor()):\n label_dict = {}\n # for each folder in the dataset\n # get the label\n for i, label in tqdm(enumerate(sorted(os.listdir(root_path))), desc = \"Read in...\", leave = False):\n if len(os.listdir(sub_path)) == 0:\n continue\n sub_path = os.path.join(root_path, label)\n # write the label in the label dict\n label_dict[i] = label\n # find the csv, there should be one and only one csv\n csv_path = glob.glob(os.path.join(sub_path,\"*.csv\"))[0]\n df = pd.read_csv(csv_path)\n # the csv should have a image_name list indicating the 1-1 correspondense\n image_origin = df[\"image_name\"]\n # get the rest and the features\n df.drop(labels = \"image_name\", axis = \"columns\", inplace = True)\n # concate them to our dataset\n if i == 0:\n features = torch.from_numpy(df.to_numpy())\n images = torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin])\n labels = torch.ones(image_origin.shape[0])*label\n else:\n features = torch.cat((features,torch.from_numpy(df.to_numpy())))\n images = torch.cat(images,torch.stack([preprocess(Image.open(os.path.join(sub_path, i)).convert(\"RGB\"),\n to_size = to_size,\n transformation = transformation) for i in image_origin]))\n labels = torch.cat(labels, torch.ones(image_origin.shape[0])*label)\n # return the dataset with our label_dict\n return TensorDataset(images,features, labels),label_dict", "def generate_nmnist_continuous_dataset(initial_size, input_dir):\n image_dataset = np.rec.array(None, dtype=[('height', np.uint16),\n ('width', np.uint16),\n ('image_data', 'object'),\n ('label', np.uint32)],\n shape=initial_size)\n num_images = 0\n\n # loop through each folder within the test directories\n for i in range(0, 10):\n current_dir = input_dir + os.path.sep + str(i) + os.path.sep + '*.bin'\n print('Processing {}...'.format(current_dir))\n for filename in glob.iglob(current_dir):\n image = prepare_n_mnist_continuous(filename, False, False)\n if num_images + 1 >= image_dataset.size:\n image_dataset = np.resize(image_dataset, (num_images * 2))\n add_images_to_dataset(image_dataset, image, num_images, i, 28, 28)\n num_images += 1\n\n return image_dataset[0:num_images]", "def split_folder(data_dir, train_pct, val_pct):\n\n random.seed(1)\n\n IMG_SUFFIX = '*_sat.jpg'\n MASK_SUFFIX = '*_msk.png'\n\n glob_imgs = os.path.join(data_dir,IMG_SUFFIX)\n glob_masks = os.path.join(data_dir, MASK_SUFFIX)\n\n img_paths = np.array(sorted(glob.glob(glob_imgs)))\n mask_paths = np.array(sorted(glob.glob(glob_masks)))\n \n num_imgs = len(img_paths)\n index_lst = list(range(num_imgs))\n\n random.shuffle(index_lst)\n\n train_idx_bound = int(train_pct * num_imgs)\n train_imgs = img_paths[index_lst[:train_idx_bound]]\n train_masks = mask_paths[index_lst[:train_idx_bound]]\n\n val_idx_bound = int((train_pct + val_pct) * num_imgs)\n val_imgs = img_paths[index_lst[train_idx_bound: val_idx_bound]]\n val_masks = mask_paths[index_lst[train_idx_bound: val_idx_bound]]\n\n test_imgs = img_paths[index_lst[val_idx_bound:]]\n test_masks = mask_paths[index_lst[val_idx_bound:]]\n\n # Write the lists to their own directories\n copy_list_to_dir(train_imgs, \"train\")\n print(\"Moved images into: train\")\n copy_list_to_dir(train_masks, \"train\")\n print(\"Moved masks into: train\")\n copy_list_to_dir(val_imgs, \"val\")\n print(\"Moved images into: val\")\n copy_list_to_dir(val_masks, \"val\")\n print(\"Moved masks into: val\")\n copy_list_to_dir(test_imgs, \"test\")\n print(\"Moved images into: test\")\n copy_list_to_dir(test_masks, \"test\")\n print(\"Moved masks into: test\")", "def make_data(sess, data, data_dir):\n if FLAGS.is_train:\n #savepath = os.path.join(os.getcwd(), os.path.join('checkpoint',data_dir,'train.h5'))\n savepath = os.path.join('.', os.path.join('checkpoint',data_dir,'train.h5'))\n if not os.path.exists(os.path.join('.',os.path.join('checkpoint',data_dir))):\n os.makedirs(os.path.join('.',os.path.join('checkpoint',data_dir)))\n with h5py.File(savepath, 'w') as hf:\n hf.create_dataset('data', data=data)", "def generateDataset(self):\n if self.outdir[-1] != \"/\": \n self.outdir += \"/\"\n self.outdir += \"dataset_trackml\"\n i = 1\n while os.path.exists(self.outdir):\n self.outdir.replace(\"_\"+str(i-1), \"\")\n self.outdir += (\"_\"+str(i))\n i += 1\n cmd = \"mkdir -p \"+ self.outdir\n os.system(cmd)\n\n cont = pc.particleController()\n cont.generateEvents(self.numevents, self.hpe, self.detectors)\n\n self.generateHits(cont)\n self.generateTruths(cont)\n self.generateSolution(cont)", "def create_data_folders() -> None:\n if not os.path.exists(\"data/save\"):\n os.mkdir(\"./data\")\n os.mkdir(\"./data/save\")\n if not os.path.exists(\"data/critics\"):\n os.mkdir(\"./data/critics\")\n if not os.path.exists('data/policies/'):\n os.mkdir('data/policies/')\n if not os.path.exists('data/results/'):\n os.mkdir('data/results/')", "def __init__(self, data_folder: str = os.path.join('data', 'user_images'),\n dataset_file: str = os.path.join('data', 'dataset.pkl'),\n targets: str = os.path.join('data', 'dataset.pkl')):\n # check for existing dataset\n if not os.path.exists(dataset_file):\n create_dataset(data_folder, dataset_file)\n with open(dataset_file, 'rb') as f:\n data = pickle.load(f)\n print(f'loaded dataset from {dataset_file}')\n self.images = data['images']\n self.crop_sizes = data['crop_sizes']\n self.crop_centers = data['crop_centers']", "def create_test_folder(df_test, target_path):\n folder_path = os.path.join(target_path, 'xray_preprocess/test')\n print(f'Create test set at: {folder_path}')\n for _, row in tqdm(df_test.iterrows(), total=df_test.shape[0]):\n if row['class']=='negative':\n destination_path = os.path.join(folder_path, 'negative')\n elif row['class']=='positive':\n destination_path = os.path.join(folder_path, 'positive')\n if not os.path.exists(destination_path):\n os.makedirs(destination_path) \n img = os.path.join(target_path, 'xray', 'test', row['filename'])\n shutil.copy(img, destination_path )", "def load_datasets():\n from .dataset import num_classes, image_size\n\n train_filename = maybe_download('notMNIST_large.tar.gz', 247336696)\n test_filename = maybe_download('notMNIST_small.tar.gz', 8458043)\n\n train_folders = maybe_extract(train_filename)\n test_folders = maybe_extract(test_filename)\n if not (len(train_folders) == len(test_folders) == num_classes):\n raise Exception('Expected %d folders, one per class. Found %d and %d instead.' % (\n num_classes, len(train_folders), len(test_folders)))\n print(\"Dataset folders: %s, %s\" % (train_folders, test_folders))\n\n # load datasets\n train_datasets = maybe_pickle(train_folders, 45000, image_size)\n test_datasets = maybe_pickle(test_folders, 1800, image_size)\n\n return train_datasets, test_datasets", "def compress_skim_dir(directory, output=\"zarr\"):\n\n if output not in (\"zarr\", \"zarr.zip\"):\n raise NotImplementedError(output)\n\n if output == \"zarr\":\n if not os.path.exists(directory+\".zarr\"):\n os.makedirs(directory+\".zarr\")\n elif output == \"zarr.zip\":\n if os.path.exists(directory+\".zarr.zip\"):\n raise FileExistsError(directory+\".zarr.zip\")\n\n master = {}\n for f in os.walk(directory):\n for fi in f[2]:\n if \".emx\" in fi:\n arr = np.fromfile(fi, dtype='f4')\n side = int(np.sqrt(arr.size))\n arr = arr.reshape(side, side)\n tazrange = pd.RangeIndex(1, side+1)\n master[fi.replace(\".emx\", \"\")] = xr.DataArray(\n arr,\n dims=['otaz', 'dtaz'],\n coords={'otaz': tazrange, 'dtaz': tazrange}\n )\n\n master = sh.Dataset(master)\n\n if output == \"zarr\":\n master.to_zarr(directory+\".zarr\", mode='a')\n elif output == \"zarr.zip\":\n with zarr.ZipStore(directory+\".zarr.zip\", mode='w') as store:\n master.to_zarr(store)\n return master", "def get_training_data(data_dir):\n data = []\n for label in labels:\n path = os.path.join(data_dir, label)\n class_num = labels.index(label)\n img_set = os.listdir(path)\n n = len(img_set)\n for i in range(n):\n try:\n img = img_set[i]\n img_arr = cv2.imread(os.path.join(path, img))\n resized_arr = cv2.resize(img_arr, (img_size, img_size)) # Reshaping images to preferred size\n data.append([resized_arr, class_num])\n if i % 100 == 0:\n print(\"Processing images: {}/{}\".format(i + 1, n))\n except Exception as e:\n print(e)\n return np.array(data)", "def pickle_dump_files():\n with open('data/' + dataset_name + '_' + model_name + '_' + 'predictions', 'wb') as f:\n pickle.dump(predictions, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'state_sentences', 'wb') as f:\n pickle.dump(final_state_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'decoded_sentences', 'wb') as f:\n pickle.dump(final_decoded_sentences, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'ids', 'wb') as f:\n pickle.dump(idx, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'exemplars', 'wb') as f:\n pickle.dump(exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'counter_exemplars', 'wb') as f:\n pickle.dump(counter_exemplars, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_exemplar_words', 'wb') as f:\n pickle.dump(top_exemplar_words, f)\n with open('data/' + dataset_name + '_' + model_name + '_' + 'top_counter_exemplar_words', 'wb') as f:\n pickle.dump(top_counter_exemplar_words, f)", "def generate_test_environment(tmpdir, dataset):\n\n # Overwrite settings with test settings\n generate_test_settings(tmpdir, dataset)\n\n # Generate the archive files\n for usage in ['train', 'test']:\n \n for dstype in ['images', 'labels']:\n \n dataset_type = usage + '.' + dstype\n \n mnist_dataset = 'datasets.mnist.' + dataset_type\n filepath = get_setting(mnist_dataset)\n\n test_dataset = dataset + '.' + dataset_type\n generate_test_dataset_archive(filepath, test_dataset)", "def load_data(self):\n # make sure preprocessing is same as preprocessing as the network\n # reduce mean, and divide by a value to do scaling\n self.train_datagen = ImageDataGenerator(\n rescale=1./ 255,\n shear_range=0.05,\n rotation_range=20, # randomly rotate images in the range (degrees, 0 to 180)\n zoom_range=[0.9, 1.1], # Randomly zoom image\n width_shift_range=0.1, # randomly shift images horizontally (fraction of total width)\n height_shift_range=0.1, # randomly shift images vertically (fraction of total height)\n horizontal_flip=True, # randomly flip images\n brightness_range=[0.8, 1.2],\n fill_mode='reflect',\n validation_split=0.2)\n\n self.test_datagen = ImageDataGenerator(rescale=1. / 255)\n\n self.train_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"training\")\n\n self.validation_generator = self.train_datagen.flow_from_directory(\n self.train_dir,\n target_size=(224, 224),\n shuffle=True,\n batch_size=self.batchsize,\n class_mode='categorical',\n subset=\"validation\")\n\n self.test_generator = self.test_datagen.flow_from_directory(\n self.test_dir,\n target_size=(224, 224),\n shuffle=False,\n batch_size=1,\n class_mode='categorical')", "def get_dataset(root_folder):\n # Parameters for data loader \n RESIZE_HEIGHT = 100 \n RESIZE_WIDTH = 100 \n\n cal_transform = transforms.Compose([\n transforms.Resize((RESIZE_HEIGHT,RESIZE_WIDTH)),\n transforms.ToTensor(),\n transforms.Lambda(lambda x: x.flatten())\n ])\n\n root_folder = \"../101_ObjectCategories/\"\n caltech101 = ImageFolder(root=root_folder, transform=cal_transform)\n\n return caltech101", "def create_and_write_output(predictions_path,output_path,inpDir):\n \n filenames= sorted(os.listdir(predictions_path)) \n for filename in filenames:\n \n # read the 3 channel output image from the neural network\n image=cv2.imread(os.path.join(predictions_path,filename))\n \n # create binary image output using the create_binary function\n out_image=create_binary(image) \n \n # read and store the metadata from the input image\n with BioReader(os.path.join(inpDir,filename)) as br:\n metadata = br.metadata\n\n # Write the binary output consisting of the metadata using bfio.\n output_image_5channel=np.zeros((out_image.shape[0],out_image.shape[1],1,1,1),dtype=np.uint8)\n output_image_5channel[:,:,0,0,0]=out_image \n\n with BioWriter(os.path.join(output_path,filename), metadata=metadata) as bw:\n bw.dtype = output_image_5channel.dtype\n bw.write(output_image_5channel)", "def main(args):\n\n for dir in args.dirs:\n # prepdir = mdssprep.Directory(dir,exclude=['file_*3*','file_2??'],include=['file_*5*'],maxarchivesize=mdssprep.one_meg*200.,minsize=mdssprep.one_meg*100.)\n prepdir = mdssprep.Directory(dir)\n prepdir.archive(dryrun=False)", "def generate_test_data(root: str) -> str:\n size = (64, 64)\n folder_path = os.path.join(root, \"enviroatlas_lotp\")\n\n if not os.path.exists(folder_path):\n os.makedirs(folder_path)\n\n for prefix in tile_list:\n for suffix, data_profile in layer_data_profiles.items():\n img_path = os.path.join(folder_path, f\"{prefix}_{suffix}.tif\")\n img_dir = os.path.dirname(img_path)\n if not os.path.exists(img_dir):\n os.makedirs(img_dir)\n\n data_profile[\"profile\"][\"height\"] = size[0]\n data_profile[\"profile\"][\"width\"] = size[1]\n data_profile[\"profile\"][\"transform\"] = Affine(\n 1.0, 0.0, 608170.0, 0.0, -1.0, 3381430.0\n )\n\n write_data(\n img_path,\n data_profile[\"profile\"],\n data_profile[\"data_type\"],\n data_profile[\"vals\"],\n )\n\n # build the spatial index\n schema = {\n \"geometry\": \"Polygon\",\n \"properties\": {\n \"split\": \"str\",\n \"naip\": \"str\",\n \"nlcd\": \"str\",\n \"roads\": \"str\",\n \"water\": \"str\",\n \"waterways\": \"str\",\n \"waterbodies\": \"str\",\n \"buildings\": \"str\",\n \"lc\": \"str\",\n \"prior_no_osm_no_buildings\": \"str\",\n \"prior\": \"str\",\n },\n }\n with fiona.open(\n os.path.join(folder_path, \"spatial_index.geojson\"),\n \"w\",\n driver=\"GeoJSON\",\n crs=\"EPSG:3857\",\n schema=schema,\n ) as dst:\n for prefix in tile_list:\n img_path = os.path.join(folder_path, f\"{prefix}_a_naip.tif\")\n with rasterio.open(img_path) as f:\n geom = shapely.geometry.mapping(shapely.geometry.box(*f.bounds))\n geom = fiona.transform.transform_geom(\n f.crs.to_string(), \"EPSG:3857\", geom\n )\n\n row = {\n \"geometry\": geom,\n \"properties\": {\n \"split\": prefix.split(\"/\")[0].replace(\"_tiles-debuffered\", \"\")\n },\n }\n for suffix, data_profile in layer_data_profiles.items():\n key = suffix_to_key_map[suffix]\n row[\"properties\"][key] = f\"{prefix}_{suffix}.tif\"\n dst.write(row)\n\n # Create archive\n archive_path = os.path.join(root, \"enviroatlas_lotp\")\n shutil.make_archive(archive_path, \"zip\", root_dir=root, base_dir=\"enviroatlas_lotp\")\n shutil.rmtree(folder_path)\n md5: str = calculate_md5(archive_path + \".zip\")\n return md5", "def dataset_convertor(dataset_directory, outfolder_random, outfolder_art):\n print(\"converting dataset...\")\n directories = next(os.walk(dataset_directory))[1]\n for directory in directories:\n for i, file_name in enumerate(next(os.walk(dataset_directory + \"/\" + directory))[2]):\n image_splitter(Image.open(dataset_directory + \"/\" + directory + \"/\" + file_name, \"r\"), file_name,\n outfolder_random, outfolder_art, directory)\n print(\"converted\", file_name, \"successfully.\")", "def populate_train_test_val_dirs_nonrandomly(root_dir, val_ratio=0.15, test_ratio=0.05, preliminary_clahe=True,\n apply_masks=True):\n\n ''' Creating partitions of the data after shuffling '''\n # Folder to copy images from\n src = join(root_dir, 'CoregisteredBlurryImages')\n\n all_file_names = [f for f in os.listdir(src) if isfile(join(src, f))]\n\n if val_ratio == 0.0:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between validation images\n val_skip_number = len(all_file_names) / (val_ratio * len(all_file_names))\n\n if test_ratio == 0.0:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) + 1\n else:\n # Select the number of images to skip between test images\n test_skip_number = len(all_file_names) / (test_ratio * len(all_file_names))\n\n # Get the list of validation file names, test file names, and train file names\n val_file_names = all_file_names[::int(val_skip_number)]\n test_file_names = [filename for filename in all_file_names[::int(test_skip_number + 1)]\n if filename not in val_file_names]\n train_file_names = [filename for filename in all_file_names\n if filename not in val_file_names and filename not in test_file_names]\n\n # Print the file distribution among the folders\n logger.print_file_distribution(len(all_file_names), len(train_file_names), len(val_file_names),\n len(test_file_names))\n\n # Copy-Pasting images into train dataset\n for name in train_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/train/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/train/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/train/Masks')\n\n # Copy-Pasting images into val dataset\n for name in val_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/val/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/val/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/val/Masks')\n\n # Copy-Pasting images into test dataset\n for name in test_file_names:\n shutil.copy(join(root_dir, 'CoregisteredBlurryImages', name), root_dir + '/test/CoregisteredBlurryImages')\n shutil.copy(join(root_dir, 'ClearImages', name), root_dir + '/test/ClearImages')\n if apply_masks:\n shutil.copy(join(root_dir, 'Masks', name), root_dir + '/test/Masks')\n\n ''' Augment the images in each new folder '''\n # If we want to use preliminary adaptive equalization...\n if preliminary_clahe:\n pass\n # ... then first, apply Contrast Limited Adaptive Histogram Equalization to clear images in all folders\n CLAHE_image_folder(root_dir + '/train/ClearImages')\n CLAHE_image_folder(root_dir + '/val/ClearImages')\n CLAHE_image_folder(root_dir + '/test/ClearImages')\n\n # Then, apply histogram equalization to make the blurry images' histogram match that of the clear images\n hist_match_image_folder(root_dir=join(root_dir, 'train'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'val'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)\n hist_match_image_folder(root_dir=join(root_dir, 'test'),\n clear_dir_name='ClearImages',\n blurry_dir_name='CoregisteredBlurryImages',\n match_to_clear=True)", "def get_files(self):\n train_images = glob(os.path.join(self.images_dir, '*%s' % self.im_extension)) \n train_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in train_images]\n val_images = glob(os.path.join(self.val_images_dir, '*%s' % self.im_extension))\n val_labels = [x.replace(self.im_extension, '.npy').replace('images', 'groundTruth') for x in val_images]\n train_images = np.array(train_images)\n train_labels = np.array(train_labels)\n val_images = np.array(val_images)\n val_labels = np.array(val_labels)\n test_images = np.array(\n glob('/media/data_cifs/pytorch_projects/datasets/BSDS500_crops/data/images/test_nocrop/*.jpg'))\n test_labels = np.array(\n [x.replace('images', 'groundTruth').replace('.jpg', '.npy') for x in test_images])\n test_labels = np.array(\n [np.load(x) for x in test_labels])\n keep_idx = np.array([True if x.shape[0] > x.shape[1] else False for x in test_labels])\n test_images = test_images[keep_idx]\n test_labels = test_labels[keep_idx]\n test_images = np.stack([misc.imread(x) for x in test_images], 0)\n test_labels = np.stack(test_labels, 0)\n test_labels = test_labels[..., None]\n\n # Add constant padding to bottom/right\n if self.pad:\n test_images = util.pad(test_images, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='linear_ramp')\n test_labels = util.pad(test_labels, ((0, 0), (self.pad // 2, self.pad - self.pad // 2), (self.pad // 2, self.pad - self.pad // 2), (0, 0)), mode='constant', constant_values=0)\n\n # Select images for training\n sort_idx = np.argsort(train_images)\n train_images = train_images[sort_idx[:self.train_size]]\n train_labels = train_labels[sort_idx[:self.train_size]]\n\n # Build CV dict\n cv_files, cv_labels = {}, {}\n cv_files[self.folds['train']] = train_images\n cv_files[self.folds['val']] = val_images\n cv_files[self.folds['test']] = test_images\n cv_labels[self.folds['train']] = train_labels\n cv_labels[self.folds['val']] = val_labels\n cv_labels[self.folds['test']] = test_labels\n return cv_files, cv_labels", "def package_datasets(ds_all, dirname=''):\n ds_all = copy.deepcopy(ds_all)\n assert dirname != '', \"dirname required\"\n package_dataset(ds_all['ds_train_um'], dirname=join('.', dirname, 'train'))\n package_dataset(ds_all['ds_valid_um'], dirname=join('.', dirname, 'valid'))\n package_dataset(ds_all['ds_test_um'], dirname=join('.', dirname, 'test'))", "def prepare_nfold_datasets(self): # i.e. split into different train/ground-truth(test) dataset\n for alpha in range(1, self.ALPHAs+1):\n if alpha != self.ALPHAs:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI], separator='-')\n else:\n gt_years = np.array2string(self.tl_model.years[(alpha-1)*self.PSI : alpha*self.PSI+self.runoff_years], separator='-')\n new_cluster_dir = str(Path(self.tl_model.cluster_dir) / f'alpha_{alpha}_GT-{gt_years}')\n os.makedirs(new_cluster_dir, exist_ok=True)\n\n new_prepared_data_dir = str(Path(self.tl_model.prepared_data_dir) / f'alpha_{alpha}')\n os.makedirs(new_prepared_data_dir, exist_ok=True)\n \n if utils.find(f'*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir) and utils.find(f'*alpha_{alpha}_standardized_stacked_arr.pkl', new_prepared_data_dir):\n pass\n else:\n if not utils.find(f'*target*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No input datasets pre-processed for alpha of {alpha}\")\n prepare.cut_target_dataset(self, alpha, new_prepared_data_dir)\n\n if not utils.find(f'*rf*alpha_{alpha}_preprocessed.pkl', new_prepared_data_dir):\n print(f\"=> No rainfall datasets pre-processed for alpha of {alpha}\")\n prepare.cut_rf_dataset(self, alpha, new_prepared_data_dir)\n \n print(f'Preprocessed pickles for alpha split {alpha} can be found @:\\n{new_prepared_data_dir}')", "def splitMerge(self):\n\t\tpath_merge = self.aug_merge_path\n\t\tpath_train = self.aug_train_path\n\t\tpath_label = self.aug_label_path\n\t\tfor i in range(self.slices):\n\t\t\tpath = path_merge + \"/\" + str(i)\n\t\t\ttrain_imgs = glob.glob(path+\"/*.\"+self.img_type)\n\t\t\tsavedir = path_train + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)\n\t\t\tsavedir = path_label + \"/\" + str(i)\n\t\t\tif not os.path.lexists(savedir):\n\t\t\t\tos.mkdir(savedir)", "def test_train_split(folder_name):\n\n class_folders = glob.glob(os.path.join(folder_name, '*'))\n\n class_names = [i.split('/')[-1] for i in class_folders]\n\n print(class_folders)\n\n train_folder_path = os.path.join(folder_name, 'train_dir')\n validation_folder_path = os.path.join(folder_name, 'val_dir')\n\n if not os.path.exists(train_folder_path):\n os.makedirs(train_folder_path)\n if not os.path.exists(validation_folder_path):\n os.makedirs(validation_folder_path)\n\n # Create the folder structure\n class_folders_train = []\n class_folders_val = []\n for class_name in class_names:\n # Create calss folder in the training directory\n class_folders_train.append(os.path.join(train_folder_path, class_name))\n if not os.path.exists(class_folders_train[-1]):\n os.makedirs(class_folders_train[-1])\n # Create class folder in the validation_directory\n class_folders_val.append(os.path.join(\n validation_folder_path, class_name))\n if not os.path.exists(class_folders_val[-1]):\n os.makedirs(class_folders_val[-1])\n\n class_files = []\n\n for idx, class_folder in enumerate(class_folders):\n class_files = glob.glob(os.path.join(class_folder, '*.jpg'))\n for file in class_files[:int(len(class_files) * 0.7)]:\n copyfile(file, os.path.join(\n class_folders_train[idx], file.split('/')[-1]))\n for file in class_files[int(len(class_files) * 0.7):]:\n print(file)\n print(os.path.join(class_folders_val[idx], file.split('/')[-1]))\n copyfile(file, os.path.join(\n class_folders_val[idx], file.split('/')[-1]))", "def preprocess_dir(data_path,\n output_path,\n dataset,\n n_train,\n new_size,\n ):\n img_type_dict = get_class_labels()\n\n print('Preprocessing:', dataset)\n target_data_path = data_path\n disease_dirs = os.listdir(target_data_path)\n disease_dirs = [d for d in disease_dirs if\n os.path.isdir(os.path.join(target_data_path, d))]\n img_stack, target_list = [], []\n img_names = []\n for img_type in disease_dirs:\n class_lbl = img_type_dict[img_type]\n n_class = int(n_train / len(disease_dirs))\n print('\\t', img_type)\n img_files_path = os.path.join(target_data_path, img_type)\n if not (os.path.isdir(img_files_path)):\n continue\n img_files = os.listdir(img_files_path)\n img_files = [f for f in img_files if f.endswith('.jpeg')]\n if dataset == 'train':\n img_files = img_files[0:n_class]\n for img_fname in img_files:\n img_path = os.path.join(img_files_path, img_fname)\n img_arr = np.array(Image.open(img_path))\n img_arr = skimage.transform.resize(img_arr, new_size)\n img_arr = (img_arr - img_arr.min()) / img_arr.max()\n img_stack.append(img_arr)\n target_list.append(class_lbl)\n img_names += [n.split('.')[0] for n in img_files]\n # Save preprocessed data\n save_data(output_path, img_stack, target_list,\n new_size, dataset, n_train, img_names)", "def label_training_data(input_path, output_path):\r\n import shutil\r\n image_files = [file for file in os.listdir(path=input_path) if '.JPG' in file or '.jpeg' in file]\r\n \r\n for file in image_files:\r\n file_input_path = os.path.join(input_path,file)\r\n \r\n img = cv2.imread(file_input_path)\r\n \r\n file_output_path = os.path.join(output_path, classify_face(img))\r\n \r\n try:\r\n os.makedirs(file_output_path)\r\n except FileExistsError:\r\n # directory already exists\r\n pass\r\n shutil.move(file_input_path, file_output_path)", "def _preprocess_data(self, name, directory):\n if name.endswith('data'):\n for path in glob(str(directory / '**/*.jpg'), recursive=True):\n try:\n with Image.open(path) as img:\n if not name.startswith('feature'):\n img = img.rotate(-90, 0, 1)\n img = img.resize(self.input_shape)\n except (ValueError, OSError):\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n filename = path.name.split('img-')[1]\n target = (path.parent / filename).with_suffix('.image.png')\n img.save(target, 'PNG')\n os.remove(str(path))\n elif name.endswith('targets'):\n for path in glob(str(directory / '**/*.mat'), recursive=True):\n try:\n mat = spio.loadmat(path)['depthMap']\n img = spmisc.toimage(mat).resize(self.target_shape)\n except ValueError:\n print(\"Couldn't open {}\".format(path))\n else:\n path = Path(path)\n name = path.name[path.name.index('-') + 1:]\n target = (path.parent / name).with_suffix('.depth.png')\n img.save(target, 'PNG')\n os.remove(str(path))", "def create_dataset(data_path, batch_size=32, repeat_size=1,\n num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n # define operation parameters\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Resize images to (32, 32)\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images\n rescale_op = CV.Rescale(rescale, shift) # rescale images\n hwc2chw_op = CV.HWC2CHW() # change shape from (height, width, channel) to (channel, height, width) to fit network.\n type_cast_op = C.TypeCast(mstype.int32) # change data type of label to int32 to fit network\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n\n # apply DatasetOps\n buffer_size = 10000\n mnist_ds = mnist_ds.shuffle(buffer_size=buffer_size) # 10000 as in LeNet train script\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds", "def get_data(folder: str, dimensions: int):\n preprocess = transforms.Compose(\n [\n transforms.Resize(256),\n transforms.CenterCrop(dimensions),\n transforms.ToTensor(),\n transforms.Normalize(\n mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]\n )\n ]\n )\n return datasets.ImageFolder(folder, transform=preprocess)", "def convert_hdf5_to_npz(in_dir_name, out_dir_name, cutoff=None,\n num_sampled_shards=None, max_num_atoms=None):\n\n tr_env_fn = in_dir_name+'/split/pairs_train@40'\n va_env_fn = in_dir_name+'/split/pairs_val@40'\n te_env_fn = in_dir_name+'/split/pairs_test@40'\n\n # Create the internal data sets\n ds_tr = MoleculesDataset(tr_env_fn, cutoff=cutoff, name='training')\n ds_va = MoleculesDataset(va_env_fn, cutoff=cutoff, name='validation')\n ds_te = MoleculesDataset(te_env_fn, cutoff=cutoff, name='test')\n\n print('Training: %i molecules. Validation: %i molecules. Test: %i molecules.'%(len(ds_tr),len(ds_va),len(ds_te)))\n\n # Make a directory\n try:\n os.mkdir(out_dir_name)\n except FileExistsError:\n pass\n\n # Save the data sets as compressed numpy files\n tr_file_name = out_dir_name+'/train.npz'\n va_file_name = out_dir_name+'/valid.npz'\n te_file_name = out_dir_name+'/test.npz'\n ds_tr.write_compressed(tr_file_name)\n ds_va.write_compressed(va_file_name)\n ds_te.write_compressed(te_file_name)\n\n return ds_tr, ds_va, ds_te", "def download(self):\n\n if self._check_exists():\n return\n\n os.makedirs(self.raw_folder, exist_ok=True)\n os.makedirs(self.processed_folder, exist_ok=True)\n\n # download files\n for url in self.resources:\n filename = url.rpartition('/')[2]\n download_and_extract_archive(url, download_root=self.raw_folder, filename=filename, md5=None)\n\n print('Processing...')\n\n training_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_train_valid.amat'))\n )\n test_set = (\n self.read_image_label_file(os.path.join(self.raw_folder, 'mnist_all_rotation_normalized_float_test.amat'))\n )\n\n with open(os.path.join(self.processed_folder, self.training_file), 'wb') as f:\n torch.save(training_set, f)\n with open(os.path.join(self.processed_folder, self.test_file), 'wb') as f:\n torch.save(test_set, f)\n\n print('Done!')", "def create_output_folder(output_folder_name: str, finding_labels: list):\n if not os.path.isdir(output_folder_name):\n os.mkdir(output_folder_name)\n for type in ['/train', '/val', '/test']:\n if not os.path.isdir(output_folder_name + type):\n os.mkdir(output_folder_name + type)\n for disease in finding_labels:\n if not os.path.isdir(output_folder_name + type + '/' + disease):\n os.mkdir(output_folder_name + type + '/' + disease)", "def train_classifier_bootstrap(iterations: int = 3, files_per_iteration: int = 50,\n test_partition: float = 0.2) -> None:\n\n classifier = CnnClassifier(device).to(device)\n classifier.load_from_file()\n classifier = classifier.to(device)\n\n full_res_folder = os.path.dirname(__file__) + '/dataset/GTSRB_Negative/bootstrap_full_res/'\n output_folder = os.path.dirname(__file__) + '/dataset/GTSRB_Negative/images/'\n print(\"Searching for new pictures in\", full_res_folder, \"that are classified as street signs\")\n\n jpg_file_names = []\n for root, dirs, files in os.walk(full_res_folder):\n for name in files:\n if name.endswith(\".jpg\"):\n jpg_file_names.append((root, name))\n\n for i in range(1, iterations):\n random.shuffle(jpg_file_names)\n print(\">>>>>>>>>>>>>>>>>>>>>>>>>>> Iteration {}\".format(i))\n dirty = False\n new_files = []\n for root, jpg_file_name in jpg_file_names[:files_per_iteration]:\n print(\"\\tLoading\", jpg_file_name)\n pil_img = Image.open(root + jpg_file_name)\n fa = FrameAnalyzer(device, classifier=classifier, width=pil_img.size[0], height=pil_img.size[1])\n if pil_img is not None:\n rectangles = fa.get_detected_street_signs(pil_img, limit=50)\n print(\"\\t\\t{} patches classified as street signs\".format(len(rectangles)))\n if len(rectangles):\n dirty = True\n for rect in rectangles:\n x1, y1, x2, y2, label_idx, score = rect\n x1, y1, x2, y2 = int(x1), int(y1), int(x2), int(y2)\n fn = '{}_{}_'.format(x1, y1) + jpg_file_name\n # crop and save\n pil_img.crop((x1, y1, x2, y2)).save(output_folder + fn)\n new_files.append(fn)\n\n if dirty:\n print(\"Adding\", len(new_files), \"negative examples\")\n # shuffle and partition into training and test\n random.shuffle(new_files)\n n_test = int(test_partition * len(new_files))\n new_files_test = new_files[:n_test]\n new_files_training = new_files[n_test:]\n GTSRBDataset.add_images_to_negative_examples_json(new_files_test, overwrite=False, test=True)\n GTSRBDataset.add_images_to_negative_examples_json(new_files_training, overwrite=False, test=False)\n # train the model\n # epochs = 2 # max(1, int(iterations / 3))\n classifier.train_model(max_epochs=20, max_patience=5)\n classifier.store_to_file()", "def before_process(self,data,labels):\n # JM: if integer labels are given, then create different output\n # directories for each new label\n if all(isinstance(lbl,int) for lbl in labels):\n self.batch_dirs = \\\n [os.path.join(self.output_dir,str(lbl)) for lbl in labels]\n # JM: otherwise create the same output directory for each image\n else:\n self.batch_dirs = [self.output_dir] * len(data)\n\n # create output directories if they don't already exist\n uniques = set(self.batch_dirs)\n for out_dir in uniques:\n if not os.path.exists(out_dir):\n os.makedirs(out_dir)\n\n self.batch_index = 0", "def make_data(_is_train, data, label):\n save_path = os.path.join(os.getcwd(), \"SRCNN\", 'checkpoint')\n if not os.path.exists(save_path):\n os.makedirs(save_path)\n\n if _is_train:\n save_path = os.path.join(save_path, 'train.h5')\n else:\n save_path = os.path.join(save_path, 'test.h5')\n\n # data 和 label 預設類型是 numpy array ,但若建立時內部陣列維度不相等,內部數據將被轉為 dtype=object\n # 導致 h5py 無法儲存: TypeError: Object dtype dtype('O') has no native HDF5 equivalent\n with h5py.File(save_path, 'w') as hf:\n hf.create_dataset('data', data=data)\n hf.create_dataset('label', data=label)", "def create_train_sets(self, proportion_val):\n l_path = os.listdir(self.image_folder_path)\n lr_path = random.sample(l_path, len(l_path))\n val_files = lr_path[: round(proportion_val * len(lr_path))]\n train_files = lr_path[round(proportion_val * len(lr_path)) :]\n delete_files(self.root_name, \"/VOC2021/ImageSets/Main\")\n write_txt(\"train.txt\", self.txt_path, train_files)\n write_txt(\"val.txt\", self.txt_path, val_files)", "def _create_tensor_dataset(self, img_size):\n with tempfile.TemporaryDirectory() as tmpdirname:\n dir_mimic = '/cluster/work/vogtlab/Projects/mimic-cxr/physionet.org/files/mimic-cxr-jpg/2.0.0/files'\n if os.path.exists(dir_mimic):\n # only run test if original data exists\n\n if os.path.exists('/cluster/home/klugh/'):\n tmpdirname = os.path.join(os.path.expandvars('$TMPDIR'), 'test_create_tensor_dataset')\n if not os.path.exists(tmpdirname):\n os.mkdir(tmpdirname)\n dir_out = os.path.expanduser(os.path.join(tmpdirname, 'dir_out'))\n\n dir_base_resized_compressed = os.path.expanduser(os.path.join(tmpdirname))\n self.assertTrue(os.path.exists(tmpdirname))\n\n dir_base_resize = os.path.join(tmpdirname, f'files_small_{img_size[0]}')\n dataset_creator = CreateTensorDataset(dir_base_resize=dir_base_resize, dir_mimic=dir_mimic,\n dir_out=dir_out,\n img_size=img_size,\n dir_base_resized_compressed=dir_base_resized_compressed,\n max_it=10)\n dataset_creator()\n self.assertTrue(os.path.exists(dir_out))\n self.assertTrue(os.path.exists(dir_base_resized_compressed))\n\n assert os.path.exists(os.path.join(dir_base_resized_compressed,\n f'mimic_resized_{img_size[0]}.zip')), \\\n 'dir_resized_compressed {} does not exist \\n {}'.format(\n os.path.join(dir_base_resized_compressed, f'mimic_resized_{img_size[0]}.zip'),\n os.listdir(dir_base_resized_compressed))", "def split_data(train_split, src_dir, train_dir, test_dir, classes):\n for cls in classes:\n # get all dat files of this class\n data = get_instances_of_class(cls, src_dir)\n \n # how many of the data points are for training?\n train_count = round(len(data) * train_split / 100)\n \n # randomly choose indexes\n train_indexes = set()\n while len(train_indexes) < train_count:\n train_indexes.add(random.randrange(len(data)))\n \n # move all train_indexes to train_dir, others to test_dir\n COPY = lambda src, dst, filename:\\\n shutil.copy2(\n \"{}/{}\".format(src, data[i]),\n \"{}/{}\".format(dst, data[i])\n )\n \n for i in range(len(data)):\n if i in train_indexes:\n COPY(src_dir, train_dir, data[i])\n else:\n COPY(src_dir, test_dir, data[i])", "def main():\n train_src = read_file(SRC_TRAIN)\n train_tgt = read_file(TRGT_TRAIN)\n val_src = read_file(SRC_VAL)\n val_tgt = read_file(TRGT_VAL)\n # val = read_files(VAL_FILES)\n np.savez(\n DATA_NPZ_NAME, train_src=train_src, train_tgt=train_tgt, val_src=val_src, val_tgt=val_tgt)", "def generate_dataset():\n if not os.path.exists(\"../data/COVID-19/COVID-19.npy\"):\n print(\"Processing Training Data.\")\n training_data = get_training_data('../data/COVID-19/train')\n print(\"Processing Test Data.\")\n test_data = get_training_data('../data/COVID-19/test')\n\n x_train, y_train, x_test, y_test = [], [], [], []\n\n for feature, label in training_data:\n x_train.append(feature)\n y_train.append(label)\n\n for feature, label in test_data:\n x_test.append(feature)\n y_test.append(label)\n\n # Normalize the data\n x_train = np.array(x_train) / 255\n x_test = np.array(x_test) / 255\n\n # resize data for deep learning\n x_train = x_train.reshape(-1, 3, img_size, img_size)\n y_train = np.array(y_train)\n x_test = x_test.reshape(-1, 3, img_size, img_size)\n y_test = np.array(y_test)\n\n # With data augmentation to prevent overfitting and handling the imbalance in dataset\n dataset = {\"x_train\": x_train, \"y_train\": y_train, \"x_test\": x_test, \"y_test\": y_test}\n np.save(\"../data/COVID-19/COVID-19.npy\", dataset)\n else:\n dataset = np.load(\"../data/COVID-19/COVID-19.npy\", allow_pickle=True).item()\n x_train, y_train, x_test, y_test = dataset[\"x_train\"], dataset[\"y_train\"], dataset[\"x_test\"], dataset[\"y_test\"]\n\n x_train_tensor = torch.from_numpy(x_train)\n x_train_tensor = x_train_tensor.type(torch.FloatTensor)\n y_train_tensor = torch.from_numpy(y_train)\n y_train_tensor = y_train_tensor.type(torch.LongTensor)\n x_test_tensor = torch.from_numpy(x_test)\n x_test_tensor = x_test_tensor.type(torch.FloatTensor)\n y_test_tensor = torch.from_numpy(y_test)\n y_test_tensor = y_test_tensor.type(torch.LongTensor)\n\n train_dataset = TensorDataset(x_train_tensor, y_train_tensor)\n test_dataset = TensorDataset(x_test_tensor, y_test_tensor)\n\n return train_dataset, test_dataset", "def build_dataset(self):\n print(\"reading data of images currently , please wait......\")\n x_train, y_train, _ = get_images(self.train_directory)\n x_test, y_test, _ = get_images(self.test_directory)\n x_train, y_train = image_subset(self.num_classes, x_train, y_train)\n x_test, y_test = image_subset(self.num_classes, x_test, y_test)\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n self.x_train = x_train / 255\n self.x_test = x_test / 255\n self.y_train = utils.to_categorical(y_train, self.num_classes)\n self.y_test = utils.to_categorical(y_test, self.num_classes)", "def __init__(self, dataset, width=512, height=512, pictures=10, generate_classes=True, generate_objects=True):\n super().__init__(dataset)\n\n cropper = Cropper(width=width, height=height)\n\n dir_name = \"tmp-data-{}x{}-from-{}-pictures\".format(width, height, pictures)\n origins = os.path.join(dir_name, \"origins\")\n classes = os.path.join(dir_name, \"classes\")\n origins_classes_v_join = os.path.join(dir_name, \"origin-classes-v-join\")\n objects = os.path.join(dir_name, \"objects\")\n origins_objects_v_join = os.path.join(dir_name, \"origin-objects-v-join\")\n\n if not os.path.exists(origins):\n os.makedirs(origins)\n\n trains = self.get_iterable_trains()\n vals = self.get_iterable_evals()\n\n selection_set = []\n for _, val in enumerate(trains):\n origin, class_v, object_v = self.get_train_triple(val)\n selection_set.append((origin, class_v, object_v))\n for _, val in enumerate(vals):\n origin, class_v, object_v = self.get_val_triple(val)\n selection_set.append((origin, class_v, object_v))\n\n final_set = random.sample(selection_set, pictures)\n\n if generate_classes:\n if not os.path.exists(classes):\n os.makedirs(classes)\n if not os.path.exists(origins_classes_v_join):\n os.makedirs(origins_classes_v_join)\n\n if generate_objects:\n if not os.path.exists(objects):\n os.makedirs(objects)\n if not os.path.exists(origins_objects_v_join):\n os.makedirs(origins_objects_v_join)\n\n for _, (origin, class_v, object_v) in enumerate(final_set):\n print(\"Processing {}, {}, {}\".format(origin, class_v, object_v))\n cropper.set_imgs(origin, class_v, object_v, add_randomly=5)\n counter = 1\n while not cropper.is_finished:\n origin_i, class_i, object_i = cropper.next_crop()\n # Check that classes are involved\n finded = False\n for l in class_i:\n for pix in l:\n for c in pix:\n if c != 0:\n finded = True\n break\n if finded:\n break\n if finded:\n break\n if not finded:\n continue\n path = \"{}-{}.png\".format(get_origin_name(origin), counter)\n # print(\"Writing: {}\".format(os.path.join(origins, path)))\n cv2.imwrite(os.path.join(origins, path), origin_i)\n if generate_classes:\n cv2.imwrite(os.path.join(classes, path), class_i)\n cv2.imwrite(os.path.join(origins_classes_v_join, path), cv2.hconcat([origin_i, class_i]))\n if generate_objects:\n cv2.imwrite(os.path.join(objects, path), object_i)\n cv2.imwrite(os.path.join(origins_objects_v_join, path), cv2.hconcat([origin_i, object_i]))\n counter += 1\n\n print(\"Generating of {}-pictures-subset done. You find it in: {}\".format(pictures, dir_name))", "def setup(self):\n\n folder_name, file_name, url, md5 = self.resource\n dataset_folder = os.path.join(self.data_root, folder_name)\n if not os.path.exists(dataset_folder):\n sh_utils.download_and_extract_archive(url, dataset_folder, md5, file_name)\n\n test_transform = tv_transforms.Compose(\n [\n tv_transforms.ToTensor(),\n tv_transforms.Lambda(lambda x: x.permute(1, 2, 0)),\n ]\n )\n\n dataset_out = tv_datasets.ImageFolder(\n root=dataset_folder, transform=test_transform\n )\n self.images_only_dataset_out = sh_data_torch.IndexedTorchDataset(\n sh_data_torch.ImagesOnlyTorchDataset(dataset_out)\n )", "def fixture_image_data(tmp_path_factory, request):\n # Make root dir\n root = tmp_path_factory.mktemp(\"data\")\n\n # Set params\n num_images = request.param\n\n # Create image files\n paths = [root / Path(f\"{idx}.png\") for idx in range(num_images)]\n dimensions = [(idx % 10 + 1, (10 - idx) % 10 + 1) for idx in range(num_images)]\n for path, dim in zip(paths, dimensions):\n image = Image.new(mode=\"RGB\", size=dim)\n if not path.parent.exists():\n path.parent.mkdir(parents=True)\n with open(path, \"wb\") as img_file:\n image.save(img_file)\n return root", "def prepare_test_data(args):\n image_dir = args.test_image_dir\n\n files = os.listdir(image_dir)\n files = [f for f in files if f.lower().endswith('.png')]\n\n img_ids = list(range(len(files)))\n img_files = []\n img_heights = []\n img_widths = []\n \n for f in files:\n img_path = os.path.join(image_dir, f)\n img_files.append(img_path)\n img = cv2.imread(img_path)\n img_heights.append(img.shape[0]) \n img_widths.append(img.shape[1]) \n\n print(\"Building the testing dataset...\")\n dataset = DataSet(img_ids, img_files, img_heights, img_widths)\n print(\"Dataset built.\")\n return dataset", "def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path)\n\n # define operation parameters\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n shift = 0.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize(\n (resize_height, resize_width), interpolation=Inter.LINEAR\n ) # Resize images to (32, 32)\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml) # normalize images\n rescale_op = CV.Rescale(rescale, shift) # rescale images\n hwc2chw_op = (\n CV.HWC2CHW()\n ) # change shape from (height, width, channel) to (channel, height, width) to fit network.\n type_cast_op = C.TypeCast(\n mstype.int32\n ) # change data type of label to int32 to fit network\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(\n input_columns=\"label\",\n operations=type_cast_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=resize_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=rescale_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=rescale_nml_op,\n num_parallel_workers=num_parallel_workers,\n )\n mnist_ds = mnist_ds.map(\n input_columns=\"image\",\n operations=hwc2chw_op,\n num_parallel_workers=num_parallel_workers,\n )\n\n # apply DatasetOps\n buffer_size = 10000\n mnist_ds = mnist_ds.shuffle(\n buffer_size=buffer_size\n ) # 10000 as in LeNet train_lenet script\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds", "def main(src_dir, dst_dir='pleiades', print_cfg_ipol=False):\n for dataset in os.listdir(src_dir):\n dataset_abspath = os.path.join(src_dir, dataset)\n if os.path.isdir(dataset_abspath):\n if 'dataset_1' in os.listdir(dataset_abspath): # the dataset has subdatasets (multidate)\n for subdataset in os.listdir(dataset_abspath):\n if os.path.isdir(os.path.join(dataset_abspath, subdataset)):\n l = list_images_in_dataset(os.path.join(dataset_abspath, subdataset))\n mkdir_p(os.path.join(dst_dir, dataset, subdataset))\n create_links(l, os.path.join(dst_dir, dataset, subdataset), print_cfg_ipol)\n else: # the dataset doesn't have subdatasets (monodate)\n l = list_images_in_dataset(dataset_abspath)\n mkdir_p(os.path.join(dst_dir, dataset))\n create_links(l, os.path.join(dst_dir, dataset), print_cfg_ipol)", "def CreateFolders(self,block):\n \n if self.mode=='first_layer' or self.mode=='greedy':\n name = 'block_'+str(block)\n if not os.path.exists(os.path.join(self.path_save,name)):\n os.makedirs(os.path.join(self.path_save,name,'training'))\n elif self.mode=='all_layers':\n name = 'block_'+str(0)+'_'+str(self.nb_blocks-1)\n if not os.path.exists(os.path.join(self.path_save,name)):\n os.makedirs(os.path.join(self.path_save,name,'training')) \n elif self.mode=='lpp':\n name = 'lpp'\n if not os.path.exists(os.path.join(self.path_save,name)):\n os.makedirs(os.path.join(self.path_save,name,'training')) \n if self.mode!='test':\n folder = os.path.join(self.path_save,'ImagesLastBlock')\n if not os.path.exists(folder):\n subfolders = ['train','val']\n subsubfolders = ['trueimage','blurredimage','trueblur','noise_std','mk_vec','diagSigma_vec','newmh_vec','newSigmah_vec','Gammap_vec','LAMBDAk_vec']\n paths = [os.path.join(folder, sub, subsub) for sub in subfolders for subsub in subsubfolders]\n for path in paths:\n os.makedirs(path)", "def __init__(self, folder_path, image_size=(320,240), batch_size=4, mode='seg', target_classes=[\"Good Crypts\"], filter_classes=[], augment=True):\n print(\"Initialising data generator\")\n # Making the image ids list\n self.folder_path = folder_path\n image_paths = [f for f in os.listdir(folder_path) if f.endswith(\".jpg\")]\n self.image_ids = [f.replace('.jpg', '') for f in image_paths]\n self.orig_image_ids = self.image_ids.copy()\n self.filter_classes = filter_classes\n self.filter_data()\n\n self.image_size = image_size\n self.batch_size = batch_size\n self.mode = mode\n self.target_classes = target_classes\n self.augment = augment\n print(\"Image count in {} path: {}\".format(self.folder_path,len(self.image_ids)))\n self.on_epoch_end()", "def main():\n if len(sys.argv) < 3:\n message = \"\"\"\n Usage: python generate_dataset.py <dataset_name> <number of files> <size of each file in bytes>\n \"\"\"\n print(message)\n sys.exit(0)\n dataset_name = sys.argv[1]\n file_number = int(sys.argv[2])\n file_size = int(sys.argv[3])\n\n if not os.path.exists(dataset_name):\n os.makedirs(dataset_name)\n\n for i in range(file_number):\n tmp_file = open('./' + dataset_name + '/' + dataset_name + '.file' + str(i), 'w+')\n tmp_file.write(os.urandom(file_size))\n tmp_file.close()", "def main(src, dst, size):\r\n\ttrain_dst = os.path.join(dst, 'train')\r\n\ttest_dst = os.path.join(dst, 'test')\r\n\tlabel_paths = [os.path.join(src, 'labels', i) for i in os.listdir(os.path.join(src, 'labels'))]\r\n\timage_folders = [os.path.join(src, i) for i in os.listdir(src) if i != \"labels\"]\r\n\r\n\timage_paths = {}\r\n\tfor folder in image_folders:\r\n\t\timages = os.listdir(folder)\r\n\t\timage_paths[os.path.basename(folder)] = [os.path.join(folder, i) for i in images]\r\n\tif DEBUG:\r\n\t\tprint(\"image folders are : {}\".format(image_paths.keys()))\r\n\r\n\t# for each image assign its xyz coordinate\r\n\targs = []\r\n\r\n\ttrain_labels = [\"B1\", \"B2\", \"B3\", \"B5\", \"B6\"]\r\n\ttest_labels = [\"B4\"]\r\n\r\n\tfor l_p in label_paths:\r\n\t\tfolder = os.path.basename(l_p).split('_')[0]\r\n\t\tcamera = os.path.basename(l_p).split('_')[-1][0:-4]\r\n\r\n\t\timages = image_paths[folder]\r\n\t\tlabels = get_xyz_coord(l_p)\r\n\t\timages = list(filter(lambda x: os.path.basename(x).split(\"_\")[0] == camera, images))\r\n\t\tif DEBUG:\r\n\t\t\tprint(l_p, camera)\r\n\t\tfor i in images:\r\n\t\t\tindex = int(os.path.basename(i).split('_')[-1][0:-4])\r\n\t\t\tif os.path.basename(l_p)[0:2] in train_labels:\r\n\t\t\t\tdestination = os.path.join(train_dst, folder, os.path.basename(i))\r\n\t\t\telif os.path.basename(l_p)[0:2] in test_labels:\r\n\t\t\t\tdestination = os.path.join(test_dst, folder, os.path.basename(i))\r\n\t\t\telse:\r\n\t\t\t\traise ValueError\r\n\t\t\targs.append([i, destination, reorder(labels[index]), size])\r\n\r\n\tp = Pool()\r\n\tresults = list(tqdm.tqdm(p.imap(image_process, args), ascii=True, total=len(args)))\r\n\tp.close()\r\n\tp.join()\r\n\r\n\tannotations_train = edict()\r\n\tannotations_test = edict()\r\n\tfor r in results:\r\n\t\tdestination, uv_coord, depth, xyz, k = r\r\n\t\tfolder = os.path.basename(os.path.dirname(destination))\r\n\t\timage = os.path.basename(destination)\r\n\r\n\t\tif folder[0:2] in train_labels:\r\n\t\t\tannotations = annotations_train\r\n\t\telif folder[0:2] in test_labels:\r\n\t\t\tannotations = annotations_test\r\n\t\telse:\r\n\t\t\traise ValueError\r\n\r\n\t\tif folder not in annotations:\r\n\t\t\tannotations[folder] = edict()\r\n\t\t\tannotations[folder][image] = edict()\r\n\t\telse:\r\n\t\t\tannotations[folder][image] = edict()\r\n\t\tannotations[folder][image].uv_coord = uv_coord\r\n\t\tannotations[folder][image].k = k\r\n\t\tannotations[folder][image].depth = depth\r\n\t\tannotations[folder][image].xyz = xyz\r\n\r\n\twith open(os.path.join(train_dst, \"annotation.pickle\"), \"wb\") as handle:\r\n\t\tpickle.dump(annotations_train, handle)\r\n\r\n\twith open(os.path.join(test_dst, \"annotation.pickle\"), \"wb\") as handle:\r\n\t\tpickle.dump(annotations_test, handle)", "def create_dataset(data_path, batch_size=32, repeat_size=1, num_parallel_workers=1):\n # define dataset\n mnist_ds = ds.MnistDataset(data_path, num_samples=batch_size * 10)\n\n resize_height, resize_width = 32, 32\n rescale = 1.0 / 255.0\n rescale_nml = 1 / 0.3081\n shift_nml = -1 * 0.1307 / 0.3081\n\n # define map operations\n resize_op = CV.Resize((resize_height, resize_width), interpolation=Inter.LINEAR) # Bilinear mode\n rescale_nml_op = CV.Rescale(rescale_nml, shift_nml)\n rescale_op = CV.Rescale(rescale, shift=0.0)\n hwc2chw_op = CV.HWC2CHW()\n type_cast_op = C.TypeCast(mstype.int32)\n\n # apply map operations on images\n mnist_ds = mnist_ds.map(operations=type_cast_op, input_columns=\"label\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=resize_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=rescale_nml_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n mnist_ds = mnist_ds.map(operations=hwc2chw_op, input_columns=\"image\", num_parallel_workers=num_parallel_workers)\n\n # apply DatasetOps\n mnist_ds = mnist_ds.batch(batch_size, drop_remainder=True)\n mnist_ds = mnist_ds.repeat(repeat_size)\n\n return mnist_ds", "def init_data(dataset_config: dict):\n # train and dev will be in random order, test may be ordered according to labels\n if dataset_config[\"name\"] == \"CoLA\":\n train, dev, test, num_classes = load_cola(dataset_config)\n elif dataset_config[\"name\"] == \"AGNews\":\n train, dev, test, num_classes = load_ag_news(dataset_config)\n elif dataset_config[\"name\"] == \"DBPedia\":\n train, dev, test, num_classes = load_dbpedia(dataset_config)\n elif dataset_config[\"name\"] == \"YRF\":\n train, dev, test, num_classes = load_yrf(dataset_config)\n else:\n raise NameError(f\"Dataset {dataset_config['name']} not implemented.\")\n # etc.\n\n # shrink size if debugging\n if dataset_config[\"debug\"]:\n # choose a random subset using huggingface select function\n train = train.select(random.sample(range(len(train)), k=200))\n dev = dev.select(random.sample(range(len(dev)), k=40))\n test = test.select(random.sample(range(len(test)), k=200))\n\n # create class imbalance\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"pool_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"pool_balance\"] == \"imbalanced\":\n train = train.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"pool_balance = {dataset_config['pool_balance']} not allowed\")\n\n if dataset_config[\"dev_balance\"] == \"balanced\":\n pass\n elif dataset_config[\"dev_balance\"] == \"imbalanced\":\n dev = dev.filter(lambda example: create_imbalanced_dataset(example, dataset_config[\"imbalance_prop\"], dataset_config['imbalance_cls']))\n else:\n NameError(f\"dev_balance = {dataset_config['dev_balance']} not allowed\")\n\n # get seed labelled pool indices (using the same seed data every time)\n random.seed(dataset_config[\"seed\"])\n if dataset_config[\"seed_balance\"] == \"balanced\":\n # this is random (will have some variance vs pool)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"]\n )\n elif dataset_config[\"seed_balance\"] == \"stratified\":\n # this is the same as the underlying train set (which may be unbalanced)\n indices = list(range(len(train)))\n unlabelled_pool_idx, labelled_pool_idx = split(\n indices,\n random_state=dataset_config[\"seed\"],\n test_size=dataset_config[\"seed_size\"],\n stratify=train['label']\n )\n elif dataset_config[\"seed_balance\"] == \"imbalanced\":\n # artificially sample an imbalanced seed set from the pool\n unlabelled_pool_idx, labelled_pool_idx = create_imbalanced_seed(\n train,\n num_classes,\n dataset_config[\"seed_size\"],\n dataset_config['imbalance_prop'],\n dataset_config['imbalance_cls']\n )\n else:\n raise NameError(f\"seed_balance = {dataset_config['seed_balance']} not allowed\")\n\n return train, dev, test, num_classes, labelled_pool_idx, unlabelled_pool_idx", "def _create_data_directory(self):\n self.src_data_dir.mkdir(exist_ok=True, parents=True)", "def cli(suffix, partspec, target_person_size, crop=513, only_missing=False, up3d_fp=UP3D_FP): # pylint: disable=too-many-locals, too-many-arguments\n np.random.seed(1)\n LOGGER.info(\"Creating segmentation dataset for %s classes with target \"\n \"person size %f and suffix `%s`.\",\n partspec, target_person_size, suffix)\n assert ' ' not in suffix\n dset_fromroot = path.join(partspec, str(target_person_size), suffix)\n dset_fp = path.join(DSET_ROOT_FP, dset_fromroot)\n if path.exists(dset_fp):\n if not only_missing:\n if not click.confirm(\"Dataset folder exists: `%s`! Continue?\" % (dset_fp)):\n return\n else:\n os.makedirs(dset_fp)\n LOGGER.info(\"Creating list files...\")\n list_fp = path.join(path.dirname(__file__), '..', 'training', 'list')\n if not path.exists(list_fp):\n os.makedirs(list_fp)\n train_list_f = open(path.join(list_fp, 'train_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n val_list_f = open(path.join(list_fp, 'val_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n test_list_f = open(path.join(list_fp, 'test_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n with open(path.join(up3d_fp, 'train.txt'), 'r') as f:\n train_spec = [line.strip() for line in f.readlines()]\n with open(path.join(up3d_fp, 'val.txt'), 'r') as f:\n val_spec = [line.strip() for line in f.readlines()]\n with open(path.join(up3d_fp, 'test.txt'), 'r') as f:\n test_spec = [line.strip() for line in f.readlines()]\n\n LOGGER.info(\"Processing...\")\n add_dataset(\n dset_fp,\n dset_fromroot,\n up3d_fp,\n train_list_f, val_list_f, test_list_f,\n train_spec, val_spec, test_spec,\n target_person_size, partspec,\n crop, 0,\n only_missing=only_missing)\n train_list_f.close()\n val_list_f.close()\n test_list_f.close()\n LOGGER.info(\"Creating trainval file...\")\n trainval_list_f = open(path.join(list_fp, 'trainval_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'w')\n train_list_f = open(path.join(list_fp, 'train_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'r')\n val_list_f = open(path.join(list_fp, 'val_%s_%d_%s.txt' % (\n partspec, target_person_size, suffix)), 'r')\n for line in train_list_f:\n trainval_list_f.write(line)\n for line in val_list_f:\n trainval_list_f.write(line)\n trainval_list_f.close()\n train_list_f.close()\n val_list_f.close()\n LOGGER.info(\"Done.\")" ]
[ "0.6529704", "0.6481513", "0.6435279", "0.6418067", "0.63397163", "0.6227401", "0.62035155", "0.61983734", "0.6165103", "0.61225355", "0.612228", "0.60964", "0.60848254", "0.6073827", "0.60563177", "0.6047265", "0.60234", "0.6019958", "0.60174394", "0.6004536", "0.5983722", "0.5966851", "0.59544486", "0.59262556", "0.5918337", "0.591189", "0.58968794", "0.58944523", "0.58807254", "0.58793026", "0.5871399", "0.58556706", "0.5851229", "0.58354205", "0.58233756", "0.58172244", "0.57947797", "0.5782247", "0.5777168", "0.5774015", "0.5772425", "0.5770133", "0.5766006", "0.5765337", "0.5765043", "0.57493174", "0.57476", "0.57440317", "0.57390124", "0.5719708", "0.5712652", "0.5707615", "0.5693829", "0.5691468", "0.5671326", "0.5667025", "0.56623685", "0.56515706", "0.5650346", "0.5646674", "0.5635191", "0.56300783", "0.56294036", "0.5623636", "0.5620676", "0.56172675", "0.5613995", "0.5613662", "0.5607737", "0.5605935", "0.560168", "0.5601285", "0.56000787", "0.55907685", "0.55793387", "0.5576717", "0.55765754", "0.55756813", "0.5573758", "0.5572673", "0.5571896", "0.5570903", "0.55697584", "0.55687547", "0.556801", "0.55678827", "0.55598503", "0.55552244", "0.5552731", "0.5547544", "0.55471724", "0.55348134", "0.5534614", "0.55288994", "0.55208385", "0.55152154", "0.551271", "0.5511608", "0.5510444", "0.55075467" ]
0.7347322
0
Given a wav file, use Praat to return a dictionary containing pitch (in Hz) at each millisecond.
def praat_analyze_pitch(audio_file): praatpath = path.abspath('Praat.app/Contents/MacOS/Praat') # locate Praat executable pl = PraatLoader(praatpath=praatpath) # create instance of PraatLoader object praat_output = pl.run_script('pitch.praat', audio_file) # run pitch script in Praat pitch_data = pl.read_praat_out(praat_output) # turn Praat's output into Python dict return pitch_data
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_pitch_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n if os.path.isfile(filename) is False:\n raise Exception('File not found with filename = %s' % filename)\n\n print(\"====> reading pitch from sound file\")\n win_s = 4096 // DOWN_SAMPLE # fft size\n hop_s = 512 // DOWN_SAMPLE # hop size\n\n s = source(filename, samplerate, hop_s)\n samplerate = s.samplerate\n\n tolerance = 0.8\n\n pitch_o = pitch(\"yin\", win_s, hop_s, samplerate)\n pitch_o.set_unit(\"midi\")\n pitch_o.set_tolerance(tolerance)\n\n result = []\n\n # total number of frames read\n total_frames = 0\n while True:\n samples, read = s()\n # the pitch value is not rounded and many zeroes occur\n that_pitch = pitch_o(samples)[0]\n confidence = pitch_o.get_confidence()\n result.append(dict(time=total_frames / float(samplerate), pitch=that_pitch, confidence=confidence))\n total_frames += read\n if read < hop_s:\n break\n\n group_result_with_log_density = compute_density_from_pitch_result(result)\n density_level_list = compute_density_level(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> density level list length %s\" % len(density_level_list))\n proportion_list = get_emphasis_start_times(group_result_with_log_density, result[len(result) - 1]['time'])\n print(\"====> emphasis proportion list length = %d\" % len(proportion_list))\n return dict(pitch_result=result, emphasis_proportion_list=proportion_list, density_level_list=density_level_list)", "def load_wav(wav_file):\n rate, data = wavfile.read(wav_file)\n return rate, data", "def filesample(filename):\n sampling_rate, samples = wavfile.read(filename)\n times = np.arange(len(samples)) / sampling_rate\n return samples, sampling_rate", "def fingerprint_wave(file):\n\n\twav = wave.open(file, 'rb')\n\tif wav.getnchannels() == 1:\n\t\tstereo = 0\n\telif wav.getnchannels() == 2:\n\t\tstereo = 1\n\telse:\n\t\twav.close()\n\t\traise Exception(\"Only 1 or 2 channel WAV files supported\")\n\n\twidth = wav.getsampwidth()\n\tif width != 2:\n\t\twav.close()\n\t\traise Exception(\"Only 16-bit sample widths supported\")\n\n\tsrate = wav.getframerate()\t\n\n\tbuffer = wav.readframes(wav.getnframes())\n\twav.close()\n\n\tms = (len(buffer) / 2)/(srate/1000)\n\tif stereo == 1:\n\t\tms = ms / 2\n\t\n\tfprint = libofa.create_print(buffer, libofa.BYTE_ORDER_LE, len(buffer) / 2,\n\t\t\t\t\t\t\t\tsrate, stereo);\n\n\treturn (fprint, ms)", "def load_wav(file_path):\n sample_rate, data = wavfile.read(file_path)\n return data, sample_rate", "def analyzeWAV(inputFile):\n data, fs, nbits = audiolab.wavread(inputFile)\n samplingRate = fs\n return [data, samplingRate]", "def load_wav_file(file_path: str):\n rate, data = wavfile.read(file_path)\n return rate, data", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Le fichier est trop court')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def parse(cls, file: Keyvalues) -> Dict[str, 'Sound']:\n sounds = {}\n for snd_prop in file:\n volume = split_float(\n snd_prop, 'volume',\n VOLUME.__getitem__,\n 1.0,\n )\n pitch = split_float(\n snd_prop, 'pitch',\n Pitch.__getitem__,\n 100.0,\n )\n\n if 'soundlevel' in snd_prop:\n level = split_float(\n snd_prop, 'soundlevel',\n Level.__getitem__,\n Level.SNDLVL_NORM,\n )\n elif 'attenuation' in snd_prop:\n atten_min, atten_max = split_float(\n snd_prop, 'attenuation',\n ATTENUATION.__getitem__,\n ATTENUATION['ATTN_IDLE'],\n )\n # Convert to a soundlevel.\n # See source_sdk/public/soundflags.h:ATTN_TO_SNDLVL()\n level = (\n (50.0 + 20.0 / atten_min) if atten_min else 0.0,\n (50.0 + 20.0 / atten_max) if atten_max else 0.0,\n )\n else:\n level = (Level.SNDLVL_NORM, Level.SNDLVL_NORM)\n\n # Either 1 \"wave\", or multiple in \"rndwave\".\n wavs: List[str] = []\n for prop in snd_prop:\n if prop.name == 'wave':\n wavs.append(prop.value)\n elif prop.name == 'rndwave':\n for subprop in prop:\n wavs.append(subprop.value)\n\n channel_str = snd_prop['channel', 'CHAN_AUTO'].upper()\n channel: Union[int, Channel]\n if channel_str.startswith('CHAN_'):\n channel = Channel(channel_str)\n else:\n channel = int(channel_str)\n\n sound_version = snd_prop.int('soundentry_version', 1)\n\n if 'operator_stacks' in snd_prop:\n if sound_version == 1:\n raise ValueError(\n 'Operator stacks used with version '\n f'less than 2 in \"{snd_prop.real_name}\"!'\n )\n\n start_stack, update_stack, stop_stack = (\n Keyvalues(stack_name, [\n prop.copy()\n for prop in\n snd_prop.find_children('operator_stacks', stack_name)\n ])\n for stack_name in\n ['start_stack', 'update_stack', 'stop_stack']\n )\n else:\n start_stack, update_stack, stop_stack = [None, None, None]\n\n sounds[snd_prop.name] = Sound(\n snd_prop.real_name,\n wavs,\n volume,\n channel,\n level,\n pitch,\n start_stack,\n update_stack,\n stop_stack,\n sound_version == 2,\n )\n return sounds", "def load_and_get_stats(filename):\n\n import scipy.io.wavfile as siow\n sampling_rate, amplitude_vector = siow.read(filename)\n\n wav_length = amplitude_vector.shape[0] / sampling_rate\n\n return sampling_rate, amplitude_vector, wav_length", "def read_wav(wav_file):\n w = wave.open(wav_file)\n n = 60 * 10000\n if w.getnframes() < n * 2:\n raise ValueError('Wave file too short')\n frames = w.readframes(n)\n wav_data1 = struct.unpack('%dh' % n, frames)\n frames = w.readframes(n)\n wav_data2 = struct.unpack('%dh' % n, frames)\n return wav_data1, wav_data2", "def wav_to_raw(path, log=False):\n rate, data = wavfile.read(path)\n if log:\n m, s = divmod(float(len(data))/rate, 60)\n h, m = divmod(m, 60)\n logging.info(\"Original recording length: %d h %d m %d s\" % (h, m, s))\n try:\n if data.shape[1] == 2:\n # If stereo (2-channel), take the average of the two channels.\n data = 0.5 * (data[:, 0] + data[:, 1])\n if log:\n logging.info('Stereo audio')\n except IndexError:\n if log:\n logging.info('Mono audio')\n return rate, data", "def inputwav(filename):\n data, sr = sf.read(filename)\n print('Decoding \"'+filename+'\"...')\n print('Sample rate is '+str(sr)+'...')\n try:\n ch=len(data[0,])\n except:\n ch=1\n print('File contains '+str(ch)+' audio channel(s)...')\n #Reshape the data so other functions can interpret the array if mono.\n #basically transposing the data\n if ch==1:\n data=data.reshape(-1,1)\n n=len(data)\n #This prevents log(data) producing nan when data is 0\n data[np.where(data==0)]=0.00001\n #convert to dB\n data_dB=20*np.log10(abs(data))\n return n, data,data_dB,sr, ch", "def read_wave(path):\n with contextlib.closing(wave.open(path, 'rb')) as wf:\n num_channels = wf.getnchannels()\n assert num_channels == 1\n sample_width = wf.getsampwidth()\n assert sample_width == 2\n sample_rate = wf.getframerate()\n assert sample_rate in (8000, 16000, 32000)\n pcm_data = wf.readframes(wf.getnframes())\n return pcm_data, sample_rate", "def read_wav_file(wave_file):\n return wavfile.read(wave_file)", "def read(filename):\n\n fileName, fileExtension = os.path.splitext(filename)\n wav_filename = filename\n rate, data = scipy.io.wavfile.read(str(wav_filename)) # the data is read in its native format\n if data.dtype =='int16':\n data = numpy.cast['float'](data)\n return [rate,data]", "def load_wav(file_name):\n fs, signal = wavfile.read(file_name)\n signal = np.float32(signal) / (2**(16)/2-1)\n return fs, signal", "def read_wave(f):\n # w will be an object of type wave.Wav_read.\n file = file_utils.open_or_fd(f, encoding=None)\n wav = wavio.read(file)\n # see https://github.com/WarrenWeckesser/wavio/blob/master/wavio.py for\n # format of `wav`\n\n # we want data as (num_channels, num_samples).. this is the\n # format that seems most compatible with convolutional code and\n # resampling.\n data = wav.data.swapaxes(0, 1)\n if data.dtype == np.int16:\n data = data.astype(np.float32) * (1.0 / 2**15)\n elif data.dtype == np.int24:\n data = data.astype(np.float32) * (1.0 / 2**23)\n else:\n if data.dtype != np.float32:\n raise RuntimeError(\"Array returned from wavio.read had \"\n \"unexpected dtype \".format(data.dtype))\n return (data, float(wav.rate))", "def get_wav_info(file_name):\n wr = wave.open(file_name, 'r')\n sample_width = wr.getsampwidth()\n frame_rate = wr.getframerate()\n num_frames = wr.getnframes()\n n_channels = wr.getnchannels()\n s = \"sample width: {} bytes\\n\".format(sample_width) + \\\n \"frame rate: {} Hz\\n\".format(frame_rate) + \\\n \"num frames: {}\\n\".format(num_frames) + \\\n \"track length: {} s\\n\".format(num_frames / frame_rate) + \\\n \"num channels: {}\\n\".format(n_channels)\n\n return s", "def wav_to_prosodic(path, sr=16000, offset=10):\n sound = parselmouth.Sound(path)\n pitch = sound.to_pitch() #timestep, pitch_floor, pitch_ceiling\n intensity = sound.to_intensity()\n\n features = []\n\n max_time = sound.get_total_duration()\n\n for time in np.arange(0, max_time, 0.001):\n f0 = pitch.get_value_at_time(time)\n f0_nan = 0\n if np.isnan(f0):\n f0 = 0\n f0_nan = 1\n int_db = intensity.get_value(time)\n if np.isnan(int_db):\n int_db = 0\n\n features.append([f0, f0_nan, int_db])\n\n array_feats = np.array(features).T\n\n print(\"SHAPE OF THE FEATURES:\", array_feats.shape)\n assert(not np.any(np.isnan(array_feats)))\n\n return array_feats, max_time", "def wavread(fname):\n fh = wave.open(fname,'rb')\n (nchannels, sampwidth, framerate, nframes, comptype, compname) = fh.getparams()\n if sampwidth == 2:\n frames = fh.readframes(nframes * nchannels)\n dn = struct.unpack_from('%dh' % nframes*nchannels, frames)\n if nchannels > 1:\n out = np.array([dn[i::nchannels] for i in range(nchannels)])/float(2**15)\n else:\n out = np.array(dn)/float(2**15)\n else:\n print('not a 16 bit wav-file')\n out = [0]\n fh.close()\n return (out,framerate)", "def from_wav(cls, fps):\n fpi = iter(fps)\n fs, data = wavfile.read(next(fpi))\n hlist = [data] + [wavfile.read(fp)[1] for fp in fpi]\n\n h = np.array(hlist)\n if data.dtype in [np.uint8, np.int16, np.int32]:\n lim_orig = (np.iinfo(data.dtype).min, np.iinfo(data.dtype).max)\n lim_new = (-1.0, 1.0)\n h = _rescale(h, lim_orig, lim_new).astype(np.double)\n\n return cls.from_time(fs, h)", "def tone_to_freq(tone):\n return math.pow(2, (tone - 69.0) / 12.0) * 440.0", "def sp_audio_pipeline(wav):\n sig = sb.dataio.dataio.read_audio(wav)\n sig = sig.unsqueeze(0)\n sig = hparams[\"speed_perturb\"](sig)\n sig = sig.squeeze(0)\n return sig", "def load_wav(wav_filepath):\n wv, _ = librosa.load(wav_filepath, sr=44100, mono=False) \n return wv", "def wavread(filename):\n\n\tif (os.path.isfile(filename) == False): # raise error if wrong input file\n\t\traise ValueError(\"Input file is wrong\")\n\n\tfs, x = read(filename)\n\n\tif (len(x.shape) !=1): # raise error if more than one channel\n x = np.mean(x,axis = 1)\n\t\tprint \"Audio file is stereo, converting to mono\"\n\n\t#scale down and convert audio into floating point number in range of -1 to 1\n\tx = np.float32(x)/norm_fact[x.dtype.name]\n\treturn fs, x", "def read_note_from_sound_file(filename: str, samplerate: int = DEFAULT_SAMPLE_RATE):\n print(\"====> reading notes from sound file\")\n win_s = 512 // DOWN_SAMPLE # fft size\n hop_s = 256 // DOWN_SAMPLE # hop size\n # adjust sample rate\n s = source(filename, samplerate, hop_s)\n samplerate = s.samplerate\n notes_o = notes(\"default\", win_s, hop_s, samplerate)\n\n result = []\n total_frames = 0\n while True:\n samples, read = s()\n new_note = notes_o(samples)\n # note too high considered as noise\n if new_note[0] != 0 and new_note[0] <= 120:\n note_klass = Note(time=total_frames / float(samplerate), pitch=new_note[0], volume=new_note[1] - 20,\n duration=new_note[2])\n result.append(note_klass)\n total_frames += read\n if read < hop_s:\n break\n\n return result", "def _read_pha(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n header_for_livetime = hdul[0].header\n\n return data['channel'], data['counts'], header_for_livetime['LIVETIME']", "def audioRead(path):\n data, samplerate = sf.read(path)\n frames = data.shape[0]\n channels = len(data.shape)\n duration = 1/samplerate*frames\n return data, samplerate, path, duration, frames, channels", "def get_metadata(my_path):\n with wave.open(my_path, \"rb\") as wave_file:\n frame_rate = wave_file.getframerate()\n channels = wave_file.getnchannels()\n my_format = pyaudio.get_format_from_width(wave_file.getsampwidth())\n return str(frame_rate), str(channels), str(my_format)", "def get_line_wavelengths():\n line_wavelengths = OrderedDict() ; line_ratios = OrderedDict()\n \n line_wavelengths['PaB'] = [12821]\n line_ratios['PaB'] = [1.]\n line_wavelengths['Ha'] = [6564.61]\n line_ratios['Ha'] = [1.]\n line_wavelengths['Hb'] = [4862.68]\n line_ratios['Hb'] = [1.]\n line_wavelengths['Hg'] = [4341.68]\n line_ratios['Hg'] = [1.]\n line_wavelengths['Hd'] = [4102.892]\n line_ratios['Hd'] = [1.]\n \n line_wavelengths['OIII-4363'] = [4364.436]\n line_ratios['OIII-4363'] = [1.]\n line_wavelengths['OIII'] = [5008.240, 4960.295]\n line_ratios['OIII'] = [2.98, 1]\n \n # Split doublet, if needed\n line_wavelengths['OIII4959'] = [4960.295]\n line_ratios['OIII4959'] = [1]\n line_wavelengths['OIII5007'] = [5008.240]\n line_ratios['OIII5007'] = [1]\n \n line_wavelengths['OII'] = [3727.092, 3729.875]\n line_ratios['OII'] = [1, 1.] \n \n line_wavelengths['OI-6302'] = [6302.046, 6363.67]\n line_ratios['OI-6302'] = [1, 0.33]\n\n line_wavelengths['NeIII'] = [3869]\n line_ratios['NeIII'] = [1.]\n line_wavelengths['NeV'] = [3346.8]\n line_ratios['NeV'] = [1.]\n line_wavelengths['NeVI'] = [3426.85]\n line_ratios['NeVI'] = [1.]\n \n line_wavelengths['SIII'] = [9068.6, 9530.6][::-1]\n line_ratios['SIII'] = [1, 2.44][::-1]\n \n # Split doublet, if needed\n line_wavelengths['SIII9068'] = [9068.6]\n line_ratios['SIII9068'] = [1]\n line_wavelengths['SIII9531'] = [9530.6]\n line_ratios['SIII9531'] = [1]\n \n line_wavelengths['SII'] = [6718.29, 6732.67]\n line_ratios['SII'] = [1., 1.] \n \n line_wavelengths['HeII'] = [4687.5]\n line_ratios['HeII'] = [1.]\n line_wavelengths['HeI-5877'] = [5877.2]\n line_ratios['HeI-5877'] = [1.]\n line_wavelengths['HeI-3889'] = [3889.5]\n line_ratios['HeI-3889'] = [1.]\n line_wavelengths['HeI-1083'] = [10830.]\n line_ratios['HeI-1083'] = [1.]\n \n line_wavelengths['MgII'] = [2799.117]\n line_ratios['MgII'] = [1.]\n \n line_wavelengths['CIV-1549'] = [1549.480]\n line_ratios['CIV-1549'] = [1.]\n line_wavelengths['CIII-1908'] = [1908.734]\n line_ratios['CIII-1908'] = [1.]\n line_wavelengths['OIII-1663'] = [1665.85]\n line_ratios['OIII-1663'] = [1.]\n line_wavelengths['HeII-1640'] = [1640.4]\n line_ratios['HeII-1640'] = [1.]\n \n line_wavelengths['NII'] = [6549.86, 6585.27]\n line_ratios['NII'] = [1., 3]\n line_wavelengths['NIII-1750'] = [1750.]\n line_ratios['NIII-1750'] = [1.]\n line_wavelengths['NIV-1487'] = [1487.]\n line_ratios['NIV-1487'] = [1.]\n line_wavelengths['NV-1240'] = [1240.81]\n line_ratios['NV-1240'] = [1.]\n\n line_wavelengths['Lya'] = [1215.4]\n line_ratios['Lya'] = [1.]\n \n line_wavelengths['Lya+CIV'] = [1215.4, 1549.49]\n line_ratios['Lya+CIV'] = [1., 0.1]\n \n line_wavelengths['Ha+SII'] = [6564.61, 6718.29, 6732.67]\n line_ratios['Ha+SII'] = [1., 1./10, 1./10]\n line_wavelengths['Ha+SII+SIII+He'] = [6564.61, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+SII+SIII+He'] = [1., 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n\n line_wavelengths['Ha+NII+SII+SIII+He'] = [6564.61, 6549.86, 6585.27, 6718.29, 6732.67, 9068.6, 9530.6, 10830.]\n line_ratios['Ha+NII+SII+SIII+He'] = [1., 1./(4.*4), 3./(4*4), 1./10, 1./10, 1./20, 2.44/20, 1./25.]\n \n line_wavelengths['OIII+Hb'] = [5008.240, 4960.295, 4862.68]\n line_ratios['OIII+Hb'] = [2.98, 1, 3.98/6.]\n \n line_wavelengths['OIII+Hb+Ha'] = [5008.240, 4960.295, 4862.68, 6564.61]\n line_ratios['OIII+Hb+Ha'] = [2.98, 1, 3.98/10., 3.98/10.*2.86]\n\n line_wavelengths['OIII+Hb+Ha+SII'] = [5008.240, 4960.295, 4862.68, 6564.61, 6718.29, 6732.67]\n line_ratios['OIII+Hb+Ha+SII'] = [2.98, 1, 3.98/10., 3.98/10.*2.86*4, 3.98/10.*2.86/10.*4, 3.98/10.*2.86/10.*4]\n\n line_wavelengths['OIII+OII'] = [5008.240, 4960.295, 3729.875]\n line_ratios['OIII+OII'] = [2.98, 1, 3.98/4.]\n \n line_wavelengths['OII+Ne'] = [3729.875, 3869]\n line_ratios['OII+Ne'] = [1, 1./5]\n \n return line_wavelengths, line_ratios", "def read_wav_data(timestamps, wavfile, snapint=[-0.3, -0.2, -0.1, 0, 0.1, 0.2, 0.3], fft_size=1024):\n sig, samplerate = librosa.load(wavfile, sr=None, mono=True)\n data = list()\n\n # normalize sound wave\n # sig = sig / np.sqrt(np.mean(sig**2, axis=0));\n # sig = sig / np.max(np.max(np.abs(sig), axis=0));\n sig = sig / np.max(np.abs(sig))\n\n # calc a length array\n tmpts = np.array(timestamps)\n timestamp_interval = tmpts[1:] - tmpts[:-1]\n timestamp_interval = np.append(timestamp_interval, timestamp_interval[-1])\n\n for sz in snapint:\n data_r = np.array([get_wav_data_at(max(0, min(len(sig) - fft_size, coord + timestamp_interval[i] * sz)),\n sig, samplerate, fft_size=fft_size, freq_high=samplerate//4) for i, coord in enumerate(timestamps)])\n data.append(data_r)\n\n raw_data = np.array(data)\n norm_data = np.tile(np.expand_dims(\n np.mean(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n std_data = np.tile(np.expand_dims(\n np.std(raw_data, axis=1), 1), (1, raw_data.shape[1], 1, 1))\n return (raw_data - norm_data) / std_data", "def preprocessing(filename):\n reporting(\"Preprocessing file...\", True)\n chdir(path.dirname(filename))\n (rate, sig) = wavefile.load(path.split(filename)[1])\n signal = sig[0]\n\n duration = len(signal) / rate\n reporting(f\"Done. Duration={duration}\")\n return signal", "def load_wav_16k_mono(self, filename):\n filename = utils.get_file_path('webapp/static/processed', filename)\n\n file_contents = tf.io.read_file(filename)\n wav, sample_rate = tf.audio.decode_wav(file_contents,\n desired_channels=1)\n wav = tf.squeeze(wav, axis=-1)\n sample_rate = tf.cast(sample_rate, dtype=tf.int64)\n wav = tfio.audio.resample(wav, rate_in=sample_rate, rate_out=16000)\n return wav", "def read_wav(fname, normalize=True):\n # samps_int16: N x C or N\n # N: number of samples\n # C: number of channels\n sampling_rate, samps_int16 = wavfile.read(fname)\n # N x C => C x N\n samps = samps_int16.astype(np.float)\n # tranpose because I used to put channel axis first\n if samps.ndim != 1:\n samps = np.transpose(samps)\n # normalize like MATLAB and librosa\n if normalize:\n samps = samps / MAX_INT16\n return sampling_rate, samps", "def readNextGenSpectrum(fname=''):\n\n print('Reading : ', fname)\n\n with open(fname, 'r') as rfile:\n dum = rfile.readline()\n sdum = dum.split()\n teff = float(sdum[0])\n logg = float(sdum[1])\n mph = float(sdum[2])\n dum = rfile.readline()\n nwav = float(dum.split()[0])\n\n bigline = []\n dum = rfile.readline()\n while dum.strip() != '':\n sdum = dum.split()\n for i in range(len(sdum)):\n bigline.append(float(sdum[i]))\n dum = rfile.readline()\n\n bigline = np.array(bigline)\n # Convert wavelength from angstrom to micron\n wav = bigline[:nwav] / 1e4\n inu = bigline[nwav:2 * nwav]\n bnu = bigline[nwav * 2:nwav * 3]\n\n ii = wav.argsort()\n wav = wav[ii]\n inu = inu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n bnu = bnu[ii] * 1e-8 * wav * 1e4 / np.pi / (29979245800.0 / wav * 1e4)\n\n #\n # The unit is now erg/s/cm/Hz/ster\n #\n\n return {'teff': teff, 'logg': logg, 'mph': mph, 'nwav': nwav, 'wav': wav, 'inu': inu, 'bnu': bnu}", "def record_and_get_wav(self, time):\n sample_width, frames = self.record_audio(time)\n return WavFile(samples=frames, sample_width=sample_width, time=time)", "def mono(filename,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if ch==2:\n print('Converting to mono...')\n L=data[:,0]\n R=data[:,1]\n n=len(data)\n data_m=np.zeros((n,1))\n data_m=L/2.0+R/2.0\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_mono.wav',data_m,sr,'PCM_16')\n print('Done!')\n return data_m\n else:\n print( \"Error: input is already mono stoooooooooopid!\")", "def wavPlayer(data, rate, scale=False, autoplay=False):\r\n #if np.max(abs(data)) > 1 or scale:\r\n # data = data/np.max(abs(data))\r\n #data = (2**13*data).astype(np.int16)\r\n \r\n buffer = BytesIO()\r\n buffer.write(b'RIFF')\r\n buffer.write(b'\\x00\\x00\\x00\\x00')\r\n buffer.write(b'WAVE')\r\n \r\n buffer.write(b'fmt ')\r\n if data.ndim == 1:\r\n noc = 1\r\n else:\r\n noc = data.shape[1]\r\n \r\n bits = data.dtype.itemsize * 8\r\n sbytes = rate*(bits // 8)*noc\r\n ba = noc * (bits // 8)\r\n buffer.write(struct.pack('<ihHIIHH', 16, 1, noc, rate, sbytes, ba, bits))\r\n\r\n # data chunk\r\n buffer.write(b'data')\r\n buffer.write(struct.pack('<i', data.nbytes))\r\n\r\n if data.dtype.byteorder == '>' or (data.dtype.byteorder == '=' and sys.byteorder == 'big'):\r\n data = data.byteswap()\r\n\r\n buffer.write(data.astype(np.int16).tostring())\r\n\r\n # Determine file size and place it in correct position at start of the file.\r\n size = buffer.tell()\r\n buffer.seek(4)\r\n buffer.write(struct.pack('<i', size-8))\r\n \r\n val = buffer.getvalue()\r\n autoplay = \" autoplay=\\\"autoplay\\\"\"*autoplay + \"\"\r\n \r\n src = \"\"\"<audio controls=\"controls\" style=\"width:600px\"{autoplay}>\r\n <source controls src=\"data:audio/wav;base64,{base64}\" type=\"audio/wav\" />\r\n Your browser does not support the audio element.\r\n </audio>\"\"\".format(base64=base64.b64encode(val).decode(\"ascii\"), autoplay=autoplay)\r\n display(HTML(src))", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > 30:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def read_audio(filename, sample_rate = 44100):\n loader = essentia.standard.MonoLoader(filename = filename, sampleRate = sample_rate)\n audio = loader()\n return audio", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, channels=1, rate=RATE,\n input=True, output=True,\n frames_per_buffer=CHUNK_SIZE)\n\n num_silent = 0\n snd_started = False\n\n r = array('h')\n\n while 1:\n # little endian, signed short\n snd_data = array('h', stream.read(CHUNK_SIZE))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n snd_started = True\n\n if snd_started and num_silent > SILENCE:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n r = trim(r)\n r = add_silence(r, 0.5)\n return sample_width, r", "def read_wavetxt(path):\n with open(path) as f:\n for line in f.readlines():\n line = line.strip()\n if 'SampleFrequence' in line:\n freq = int(line[16:])\n elif 'DataInput' in line:\n series = np.array(line[10:].split(',')).astype(np.float64)\n return (freq, series)", "def wav_to_sig(wav_file):\n spf = wave.open(wav_file,'r')\n sig = spf.readframes(-1)\n sig = np.fromstring(sig, 'Int16')\n fs = spf.getframerate()\n return (sig, fs)", "def load_wav_file(fname):\n fp = wave.open(fname, \"rb\")\n channels = fp.getnchannels()\n bitrate = fp.getsampwidth() * 8\n samplerate = fp.getframerate()\n buf = fp.readframes(fp.getnframes())\n return SoundData(buf, channels, bitrate, len(buf), samplerate)", "def signal_dictionary(music_filename):\n\tx = []\n\ty = []\n\tassign_points = {}\n\n\tsignal = input_waves.WAVtoSignal(music_filename)\n\tfor i in range(len(signal)):\n\t\tx = signal[i][0]\n\t\ty = signal[i][1]\n\t\tassign_points.update({x:y})\n\tprint 'assign dictionary complete'\n\treturn assign_points", "def graph_spectrogram(wav_file):\n rate, data = get_wav_info(wav_file)\n nfft = 200 # Length of each window segment\n fs = 8000 # Sampling frequencies\n noverlap = 120 # Overlap between windows\n nchannels = data.ndim\n if nchannels == 1:\n pxx, freqs, bins, im = plt.specgram(data, nfft, fs, noverlap = noverlap)\n elif nchannels == 2:\n pxx, freqs, bins, im = plt.specgram(data[:,0], nfft, fs, noverlap = noverlap)\n return pxx", "def samplerate(self):\n return self.sound.samplerate", "def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time", "def read_audio(self, path_to_wav):\n y, sr = librosa.load(path_to_wav, sr=None)\n return (y, sr)", "def record():\n p = pyaudio.PyAudio()\n stream = p.open(format=FORMAT, input_device_index=0, channels=1, rate=RATE, input=True, output=True, frames_per_buffer=CHUNK_SIZE)\n num_silent = 0\n snd_started = False\n\n r = array('h')\n while 1:\n snd_data = array('h', stream.read(CHUNK_SIZE, exception_on_overflow = False))\n if byteorder == 'big':\n snd_data.byteswap()\n r.extend(snd_data)\n\n silent = is_silent(snd_data)\n if silent and snd_started:\n num_silent += 1\n elif not silent and not snd_started:\n print(\"Sound started.\")\n snd_started = True\n\n if snd_started and num_silent> 10:\n break\n\n sample_width = p.get_sample_size(FORMAT)\n stream.stop_stream()\n stream.close()\n p.terminate()\n\n r = normalize(r)\n #r = trim(r)\n #r = add_silence(r, 0.5)\n return sample_width, r", "def _synthesize_tone(self, duration_in_msec):\n sample_count = int(float(self.sample_rate) * duration_in_msec * 0.001)\n # There are two bytes per 16-bit sample.\n tmp_buffer = bytearray(sample_count + sample_count)\n fscale = 2.0 * math.pi * self.tone_frequency * self.sample_period;\n # Loop and create the audio samples.\n index = 0\n # Create the rising envelope part of the tone.\n for i, gain in enumerate(self.pulse_shaping_list):\n angle = float(i) * fscale\n value = gain * math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n # Create the level part of the tone. Start at the next\n # sample index so that the phase is a continuous function.\n rising_falling_count = len(self.pulse_shaping_list)\n middle_sample_count = sample_count - (2 * rising_falling_count)\n for i in range(0, middle_sample_count):\n angle = float(i + rising_falling_count) * fscale\n value = math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n # Create the decaying part of the tone. Start at the next\n # sample index so that the phase is a continuous function.\n temp_count = rising_falling_count + middle_sample_count;\n for i, rev_gain in enumerate(self.pulse_shaping_list):\n angle = float(i + temp_count) * fscale\n value = (1.0 - rev_gain) * math.sin(angle)\n byte0, byte1 = MorseCodeSender._float_to_16_bit_sample(value)\n # Write the bytes in little-endian order.\n tmp_buffer[index] = byte0\n tmp_buffer[index + 1] = byte1\n index += 2\n return tmp_buffer", "def process_sound_file(file_path):\n\n return to_mfcc(get_wav(file_path))", "def get_file_bpm(path, params = {}):\n try:\n win_s = params['win_s']\n samplerate = params['samplerate']\n hop_s = params['hop_s']\n except:\n \"\"\"\n # super fast\n samplerate, win_s, hop_s = 4000, 128, 64 \n # fast\n samplerate, win_s, hop_s = 8000, 512, 128\n \"\"\"\n # default:\n samplerate, win_s, hop_s = 44100, 1024, 512\n\n s = source(path, samplerate, hop_s)\n samplerate = s.samplerate\n o = tempo(\"specdiff\", win_s, hop_s, samplerate)\n # List of beats, in samples\n beats = []\n # Total number of frames read\n total_frames = 0\n\n while True:\n samples, read = s()\n is_beat = o(samples)\n if is_beat:\n this_beat = o.get_last_s()\n beats.append(this_beat)\n #if o.get_confidence() > .2 and len(beats) > 2.:\n # break\n total_frames += read\n if read < hop_s:\n break\n\n # Convert to periods and to bpm \n bpms = 60./diff(beats)\n b = median(bpms)\n return b", "def load(filename):\n root,ext = _os_path.splitext(filename)\n loader = LOADER[ext]\n frequency,raw_signal = loader(filename)\n iinfo = _numpy.iinfo(raw_signal.dtype)\n raw_signal_midpoint = (iinfo.max + iinfo.min)/2.\n raw_signal_range = iinfo.max - raw_signal_midpoint\n unit_output_signal = (raw_signal - raw_signal_midpoint)/raw_signal_range\n return (frequency, unit_output_signal)", "def micsample(listentime):\n frames, sampling_rate = record_audio(listentime)\n samples = np.hstack([np.frombuffer(i, np.int16) for i in frames])\n times = np.arange(samples.size) / sampling_rate\n return samples, times", "def read_speeches(filename):\n\n # Open a speech file\n speech_file = open(filename)\n\n # Create a new dictionary\n speech_dict = {}\n\n # Iterate over lines\n for line in speech_file:\n # Replace whitespace, including /n, at the end of a line with a single space\n line = line.rstrip() + ' '\n\n # Given that a title begins with #\n if line.startswith('#'):\n # Remove '# ' at the beginning and ': ' at the end, to be used as a title\n title = line[2:-2]\n # Assign the tile as a key in the dictionary\n speech_dict[title] = ''\n # A speech line does not begins with #\n else:\n # Not begins with [ either\n if line.startswith('[') is False:\n # Append the speech line to the already existing string of the corresponding title\n # The tile variable is kept from the previous loop(s)\n speech_dict[title] += line\n\n # Close the file\n speech_file.close()\n\n return speech_dict", "def read(f, normalized=False):\r\n a = pydub.AudioSegment.from_mp3(f)\r\n y = np.array(a.get_array_of_samples())\r\n if a.channels == 2:\r\n y = y.reshape((-1, 2))\r\n if normalized:\r\n return a.frame_rate, np.float32(y) / 2**15\r\n else:\r\n return a.frame_rate, y", "def sampling_rate(self):\n with audioread.audio_open(self.path) as f:\n return f.samplerate", "def from_frequency(frequency:float, detune=0) -> 'Pitch':\n return Pitch(1200*np.log2(frequency/440) + detune)", "def record_audio_to_file_and_get_wav(self, time, file_name):\n sample_width, frames = self.record_audio(time)\n wf = wave.open(file_name, 'wb')\n wf.setnchannels(self.channels)\n wf.setsampwidth(sample_width)\n wf.setframerate(self.rate)\n wf.writeframes(frames)\n wf.close()\n return WavFile(samples=frames, sample_width=sample_width, time=time, word=file_name)", "def save_timit_pitch():\n timit_names = []\n pitch_intensity_tables = []\n\n wav_txt_file_names = glob.glob(os.path.join(timit_pitch_data_path, '*.wav.txt'))\n for wav_txt_file in wav_txt_file_names:\n pitch_intensity = pd.read_csv(wav_txt_file, delimiter='\\t', dtype=np.float64, na_values=['?'])\n pitch_intensity = pitch_intensity.dropna()\n pitch_intensity.loc[pitch_intensity.pitch == 0, 'pitch'] = np.NaN\n pitch_intensity.loc[pitch_intensity.intensity == 0, 'intensity'] = np.NaN\n pitch_intensity['log_hz'] = np.log(pitch_intensity['pitch'])\n pitch_intensity['erb_rate'] = convert_hz(pitch_intensity['pitch'], \"erb\")\n pitch = pitch_intensity['log_hz']\n pitch_intensity['rel_pitch_global'] = (pitch - np.mean(pitch))/np.std(pitch)\n pitch = pitch_intensity['erb_rate']\n pitch_intensity['rel_pitch_global_erb'] = (pitch - np.mean(pitch))/np.std(pitch)\n\n timit_name = wav_txt_file.split(os.sep)[-1][:-8]\n\n timit_names.append(timit_name)\n pitch_intensity_tables.append(pitch_intensity)\n\n timit_pitch = pd.concat(pitch_intensity_tables, keys=timit_names)\n #print(np.mean(timit_pitch['log_hz'])) # -> 4.9406, (no log: 147.0387)\n #print(np.std(timit_pitch['log_hz'])) # -> 0.3112, (no log: 48.59846)\n timit_pitch['abs_pitch'] = (timit_pitch['log_hz'] - np.mean(timit_pitch['log_hz']))/np.std(timit_pitch['log_hz'])\n timit_pitch['abs_pitch_erb'] = (timit_pitch['erb_rate'] - np.mean(timit_pitch['erb_rate']))/np.std(timit_pitch['erb_rate'])\n timit_pitch['abs_pitch_change'] = timit_pitch['abs_pitch'].diff()\n timit_pitch['abs_pitch_erb_change'] = timit_pitch['abs_pitch_erb'].diff()\n #print(np.mean(timit_pitch.intensity)) # -> 63.000\n #print(np.std(timit_pitch.intensity)) # -> 15.537\n timit_pitch['zscore_intensity'] = (timit_pitch.intensity - np.mean(timit_pitch.intensity))/np.std(timit_pitch.intensity)\n\n filename = os.path.join(processed_timit_data_path, 'timit_pitch.h5')\n timit_pitch.to_hdf(filename, 'timit_pitch')\n return timit_pitch", "def get_sig(filename):\n\n (rate, data) = readAudioFile(filename)\n\n return rate, data", "def format_pitch_data(pd):\n\n\tfor t in pd.keys():\n\t\tpd[t] = pd[t]['Pitch'] \t # make each value just the pitch, instead of a sub-dict\n\t\tif pd[t] == 0:\n\t\t\tdel pd[t]\t\t # if pitch is 0, remove from dictionary\n\n\t# now, pd is dict where each key is time (x value) and each value is pitch (y value)\n\t# to format for graph input, make list of dicts containing x-y pairs\n\tdatapoints_list = []\n\tfor t in pd.keys():\n\t\tdatapoint = {}\n\t\tdatapoint[\"x\"] = t\n\t\tdatapoint[\"y\"] = pd[t]\n\t\tdatapoints_list.append(datapoint)\n\n\t# sort the list by the value of \"x\"\n\tdatapoints_sorted = sorted(datapoints_list, key=itemgetter(\"x\"))\n\n\t# with this sorted list, do some data smoothing\n\t# pull out every nth item\n\ti = 0\n\tdatapoints_keep = []\n\twhile i < len(datapoints_sorted):\n\t\tdatapoints_keep.append(datapoints_sorted[i])\n\t\ti += 50\n\t# make sure last item is included so length of curve isn't lost\n\tdatapoints_keep.append(datapoints_sorted[-1])\n\n\t# print \"num of datapoints:\", len(datapoints_keep)\n\t# print datapoints_keep[:100]\n\n\treturn json.dumps(datapoints_keep, sort_keys=True)", "def mic_audio(dur):\n\n audio,b = microphone.record_audio(dur)\n audio = np.hstack([np.frombuffer(i,np.int16) for i in audio])\n return audio", "def single_analyze_wav(self, filePath):\n\n tChopped, vChopped, fVals,\\\n powerFFT, peakFreqs, peakAmps = Utils.AnalyzeFFT(filePath, tChop=self.settings['processing']['tChop'],\n detail=self.settings['processing']['detail'])\n\n self.analyzeDone.emit(tChopped, vChopped, fVals, powerFFT, peakFreqs, peakAmps, filePath)\n self.update_table(peakFreqs, peakAmps)", "def process_audio_file(self, file_name):\n sig, sr = librosa.load(file_name, mono=True)\n return self._extract_function(sig, sr)", "def decode_wav(raw_data):\n return _kaldi_module.decode_wav(raw_data)", "def slow(filename,p=10,wout=True):\n n, data, data_dB,sr,ch=inputwav(filename)\n if p>0:\n print('Slowing...')\n if p<0:\n print('Warning: You are speeding up the audio! Use positive value'\n +' for p to slow.')\n f=resample(data,int(len(data)*(1+p/100.0)))\n if wout==True:\n print('Exporting...')\n sf.write(filename[0:len(filename)-4]+'_slow.wav',f,sr,'PCM_16')\n print('Done!')\n return f", "def wav_to_fourier(wav_file,\n rate_limit=6000.0,\n step=1.0):\n rate, aud_data = read(wav_file)\n # Should be mono\n if len(aud_data) != len(aud_data.ravel()):\n aud_data = np.mean(aud_data, axis=1)\n\n # Zero padding\n len_data = aud_data.shape[0]\n channel_1 = np.zeros(2 ** (int(np.ceil(np.log2(len_data)))))\n channel_1[0:len_data] = aud_data\n\n # Fourier analysis\n fourier = np.abs(np.fft.fft(channel_1))\n freq = np.linspace(0, rate, fourier.shape[0])\n\n freq, fourier = limit_by_freq(freq,\n fourier,\n upper_limit=rate_limit)\n freq, fourier = group_by_freq(freq,\n fourier,\n step=step)\n # Max frequency should be 100.0\n a = np.max(np.abs(fourier)) / 100.0\n fourier = fourier / a\n\n return freq, fourier", "def read_audio(file_path, resample_rate=None, to_mono=False):\n return librosa.load(file_path, sr=resample_rate, mono=to_mono)", "def load_wav_to_torch(full_path):\n sampling_rate, data = read(full_path)\n return torch.from_numpy(data).float(), sampling_rate", "def load_wav_dic(wav_dic):\n noisy_path, clean_path = wav_dic[\"noisy\"], wav_dic[\"clean\"]\n noisy, fs = sf.read(noisy_path, dtype=\"float32\")\n clean, fs = sf.read(clean_path, dtype=\"float32\")\n return noisy, clean, fs", "def simp(filename, seconds_per_average=0.001):\n wavefile = wave.open(filename, 'rb')\n print \"# gnuplot data for %s, seconds_per_average=%s\" % \\\n (filename, seconds_per_average)\n print \"# %d channels, samplewidth: %d, framerate: %s, frames: %d\\n# Compression type: %s (%s)\" % wavefile.getparams()\n\n framerate = wavefile.getframerate() # frames / second\n frames_to_read = int(framerate * seconds_per_average)\n print \"# frames_to_read=%s\" % frames_to_read\n\n time_and_max = []\n values = []\n count = 0\n while 1:\n fragment = wavefile.readframes(frames_to_read)\n if not fragment:\n break\n\n # other possibilities:\n # m = audioop.avg(fragment, 2)\n # print count, \"%s %s\" % audioop.minmax(fragment, 2)\n\n m = audioop.rms(fragment, wavefile._framesize)\n time_and_max.append((count, m))\n values.append(m)\n count += frames_to_read\n # if count>1000000:\n # break\n\n # find the min and max\n min_value, max_value = min(values), max(values)\n points = [] # (secs,height)\n for count, value in time_and_max:\n points.append((count/framerate,\n (value - min_value) / (max_value - min_value)))\n return points", "def decode_audio(fp, fs=None, mono=False, normalize=False, fastwav=False, measured = False):\n if measured:\n fp = fp.decode('latin').replace(\"clean\", \"measured\")\n\n if fastwav:\n # Read with scipy wavread (fast).\n _fs, _wav = wavread(fp)\n if fs is not None and fs != _fs:\n raise NotImplementedError('Fastwav cannot resample audio.')\n if _wav.dtype == np.int16:\n _wav = _wav.astype(np.float32)\n _wav /= 32768.\n elif _wav.dtype == np.float32:\n pass\n else:\n raise NotImplementedError('Fastwav cannot process atypical WAV files.')\n else:\n # TODO: librosa currently optional due to issue with cluster installation\n import librosa\n # Decode with librosa load (slow but supports file formats like mp3).\n _wav, _fs = librosa.core.load(fp, sr=fs, mono=False)\n if _wav.ndim == 2:\n _wav = np.swapaxes(_wav, 0, 1)\n\n assert _wav.dtype == np.float32\n\n # At this point, _wav is np.float32 either [nsamps,] or [nsamps, nch].\n # We want [nsamps, 1, nch] to mimic 2D shape of spectral feats.\n if _wav.ndim == 1:\n nsamps = _wav.shape[0]\n nch = 1\n else:\n nsamps, nch = _wav.shape\n _wav = np.reshape(_wav, [nsamps, 1, nch])\n \n # Average channels if we want monaural audio.\n if mono:\n _wav = np.mean(_wav, 2, keepdims=True)\n\n if normalize:\n _wav /= np.max(np.abs(_wav))\n\n return _wav", "def load_audio(file_path):\n # load the audio file in its original sampling rate\n audio_data, sr = librosa.load(file_path, sr=sampling_rate)\n\n # get the common file name\n file_name = file_path.split(\"/\")[-1]\n file_name = file_name.split(\".wav\")[0]\n\n # calculate number of samples in the time duration needed\n num_samples = int(sr*time_duration)\n\n # get the cut-off audio signals and save them\n for i in np.arange(num_audio_files):\n audio_cut_data = cut_sample(audio_data, num_samples)\n file_path=dir_to_save + file_name + \"_\" + str(i+1) + \".wav\"\n save_sample(audio_cut_data, file_path, sr)\n print(f\"generating signal {str(i)}, its length {len(audio_cut_data)} by cutting the original signal\")", "def load_audio(self):\n df = pd.read_csv(\"{dir}/iteration_{iter}.csv\".format(dir=self.directory, iter=self.iteration),\n usecols=[1, 2, 3])\n\n doa_from_file = df.iloc[0][1]\n wav_name = df.iloc[0][0]\n filename = \"{dir}/{wav_name}\".format(dir=self.directory, wav_name=wav_name)\n\n y, sr = librosa.load(filename, mono=False)\n\n y_8k = librosa.resample(y, sr, 8000)\n result_x = librosa.util.fix_length(y_8k, 8000)\n\n return result_x, doa_from_file", "def test_wav(self, dtype, sample_rate, num_channels):\n duration = 1\n path = self.get_temp_path(\"data.wav\")\n data = get_wav_data(dtype, num_channels, normalize=False, num_frames=duration * sample_rate)\n save_wav(path, data, sample_rate)\n info = self._info(path)\n assert info.sample_rate == sample_rate\n assert info.num_frames == sample_rate * duration\n assert info.num_channels == num_channels\n assert info.bits_per_sample == sox_utils.get_bit_depth(dtype)\n assert info.encoding == get_encoding(\"wav\", dtype)", "def get_pitch(self, start):\n spectrum, amplitude = self.frequency_spectrum(start)\n peaks = self.get_peaks(spectrum, amplitude)\n\n if self.print:\n self.plot_clip(spectrum, amplitude, peaks)\n \n return self.get_note_probabilities(peaks)", "def pitch_gen(freq: float, duration: float, signal: np.array, sample_freq: int, alpha=0.99, ref_length=50):\n total_sample_number = int(sample_freq * duration)\n desire_signal_length = int(sample_freq / freq)\n # pad or cur signal\n if len(signal) >= desire_signal_length:\n input_signal = signal[: desire_signal_length]\n else: # pad\n input_signal = np.pad(signal, (0, desire_signal_length - len(signal)), 'constant')\n\n result = KS(input_signal, N=total_sample_number, alpha=alpha, ref_length=ref_length)\n\n return result", "def loadTTLPulse(file, n_channels = 2, fs = 20000, track = 0, mscope = 1):\n f = open(file, 'rb')\n startoffile = f.seek(0, 0)\n endoffile = f.seek(0, 2)\n bytes_size = 2 \n n_samples = int((endoffile-startoffile)/n_channels/bytes_size)\n f.close()\n with open(file, 'rb') as f:\n data = np.fromfile(f, np.uint16).reshape((n_samples, n_channels))\n \n ch_track = data[:,track].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.diff(ch_track), height=30000)\n timestep = np.arange(0, len(data))/fs\n peaks+=1\n ttl_track = pd.Series(index = timestep[peaks], data = data[peaks,track]) \n\n ch_mscope = data[:,mscope].astype(np.int32)\n peaks,_ = scipy.signal.find_peaks(np.abs(np.diff(ch_mscope)), height=30000)\n peaks+=1\n ttl_mscope = pd.Series(index = timestep[peaks], data = data[peaks,mscope])\n\n return ttl_track, ttl_mscope", "def extract_beat(audio, sample_rate = 44100):\n beat_tracker = BeatTrackerDegara()\n beats_time = beat_tracker(audio)\n beats = np.array(map(lambda time : round(time * sample_rate), beats_time))\n beats = np.append(0, beats)\n beats_time = np.append(0, beats_time)\n\n return beats, beats_time", "def write_wav(filename, data, rate = 44100):\r\n \r\n # Compress the data (the input format is likely to be float64)\r\n # Make sure that the format is readable by Librosa\r\n maxv = np.iinfo(np.int16).max\r\n lb_write_wav(filename, (data * maxv).astype(np.int16), rate) \r\n \r\n return(None)", "def _save_wav(buff, data, rate) -> None:\n # Code inspired from `IPython.display.Audio`\n data = np.array(data, dtype=float)\n\n bit_depth = 16\n max_sample_value = int(2**(bit_depth - 1)) - 1\n\n num_channels = data.shape[1] if len(data.shape) > 1 else 1\n scaled = np.int16(data / np.max(np.abs(data)) * max_sample_value)\n # The WAVE spec expects little-endian integers of \"sampwidth\" bytes each.\n # Numpy's `astype` accepts array-protocol type strings, so we specify:\n # - '<' to indicate little endian\n # - 'i' to specify signed integer\n # - the number of bytes used to represent each integer\n # See: https://numpy.org/doc/stable/reference/arrays.dtypes.html\n encoded_wav = scaled.astype(f'<i{bit_depth // 8}', copy=False).tobytes()\n\n with wave.open(buff, mode='wb') as waveobj:\n waveobj.setnchannels(num_channels)\n waveobj.setframerate(rate)\n waveobj.setsampwidth(bit_depth // 8)\n waveobj.setcomptype('NONE', 'NONE')\n waveobj.writeframes(encoded_wav)", "def decodeSpeech(hmmd, lmdir, dictp, wavfile):\n\n try:\n import sphinxbase\n import pocketsphinx as ps\n\n except:\n import pocketsphinx as ps\n print \"\"\"Pocket sphinx and sphixbase is not installed\n in your system. Please install it with package manager.\n \"\"\"\n speechRec = ps.Decoder(hmm=hmmd, lm=lmdir, dict=dictp)\n wavFile = file(wavfile, 'rb')\n speechRec.decode_raw(wavFile)\n result = speechRec.get_hyp()\n print result[0]\n return result[0]", "def checkfrequency(inputgiven):\n data_size = 40000\n wav_file = wave.open(inputgiven, 'r')\n data = wav_file.readframes(data_size)\n wav_file.close()\n data = struct.unpack('{n}h'.format(n=data_size), data)\n print max(data)", "def play(sampler, name=\"/Users/Jxie0755/Documents/DXcodings/Learning_Python/CS_61A/week03/mario.wav\", seconds=2):\n out = open(name, \"wb\")\n out.setnchannels(1)\n out.setsampwidth(2)\n out.setframerate(frame_rate)\n t = 0\n while t < seconds * frame_rate:\n sample = sampler(t)\n out.writeframes(encode(sample))\n t = t + 1\n out.close()", "def extract_duration(path, out_file):\n\n # sanity_check: check if the paths are correct\n # sanity_check: check if the out_file exists; if not then create one\n\n metadata_filepath_duration = open(out_file, 'w')\n\n for subdir, dirs, files in os.walk(path):\n for file in files:\n file_path = os.path.join(subdir, file)\n wavfile, sampling_rate = librosa.load(file_path)\n wavfile_duration = librosa.get_duration(y=wavfile, sr=sampling_rate)\n metadata_filepath_duration.write(file_path + ' | ' + str(wavfile_duration) + '\\n')\n\n metadata_filepath_duration.close()\n\n # sorting the wavfiles alphabetically to maintain order\n subprocess.call(['sort', out_file, '-o', out_file])", "def read_audio(f, downmix):\n if f.endswith('.mp3'):\n f = _mp3_hook(f)\n sr, audio = scipy.io.wavfile.read(f)\n if not audio.dtype is np.float32:\n audio = _normalize_pcm(audio)\n if downmix and len(audio.shape) == 2:\n audio = down_mix(audio)\n return sr, audio", "def tonify(self, tone_generator=None, verbose=False):\n if tone_generator is None:\n tone_generator = ToneGenerator('tonifyoutput.wav')\n tone_generator.file.setnchannels(len(self.sheets))\n # Find the max length (in seconds) of the data sheets\n max_length = 0.0\n for sheet in self.sheets:\n if len(sheet) > max_length:\n max_length = len(sheet)\n nframes = int(max_length * tone_generator.sample_rate)\n tone_generator.file.setnframes(nframes)\n\n tone_strs = []\n for d in self.sheets:\n if verbose:\n print \"File:\", d.data.name\n print \"Frequencies:\", self.freqs[self.sheets.index(d)]\n values = []\n tone_generator.setfreqs(self.freqs[self.sheets.index(d)])\n for i in range(0, len(d.times)):\n duration = d.durations[i]\n calls = d.calls[i]\n if verbose:\n print \"\\ttone: (%d, %d, %d) for %f seconds\" % (calls[0], calls[1],\n calls[2], duration)\n tone = tone_generator.get_tone((calls[0], calls[1], calls[2]), duration)\n values.append(str(tone))\n try:\n delta = float((d.times[i + 1] - d.times[i]).seconds)\n if float(delta) - duration < 0.0:\n silence_duration = 0.0\n else:\n silence_duration = float(delta) - duration\n except IndexError:\n break\n if verbose:\n print \"\\tsilence for\", silence_duration,\"seconds\"\n silence = tone_generator.get_silence(silence_duration)\n values.append(str(silence))\n if len(d) < max_length:\n end_silence = tone_generator.get_silence(max_length - len(d))\n values.append(str(end_silence))\n value_str = ''.join(values)\n tone_strs.append(value_str)\n \n if verbose:\n print \"Writing to file... (may take several minutes)\"\n combined = interleave_binarystr(tone_strs)\n tone_generator.file.writeframes(combined)\n if verbose:\n print \"Finished writing.\"\n tone_generator.close()", "def read(self, path):\n pbase = os.path.splitext(path)[0]\n gsid = pbase.split('/')[-2]\n gender, sid = gsid[0], gsid[1:]\n assert sid in self._spkr_table\n phoneseq = phnread(pbase+'.PHN')\n wrdseq = phnread(pbase+'.WRD')\n transcrpt = txtread(pbase+'.TXT')\n sample = TIMITSpeech(\n *audioread(path), speaker=sid, gender=gender,\n transcript=transcrpt, phonemeseq=phoneseq,\n wordseq=wrdseq\n )\n #sample.phonemeseq = [\n # (t, PHONETABLE[p]) for t, p in sample.phonemeseq]\n return sample", "def raw_to_wav(data, path, rate=44100):\n wavfile.write(path, rate, data)", "def _process_utterance(pml_dir, wav_dir, index, wav_path, pml_path, hparams):\n try:\n # Load the audio as numpy array\n wav = audio.load_wav(wav_path)\n except FileNotFoundError: # catch missing wav exception\n print('file {} present in csv metadata is not present in wav folder. skipping!'.format(\n wav_path))\n return None\n\n # rescale wav\n if hparams.rescale:\n wav = wav / np.abs(wav).max() * hparams.rescaling_max\n\n # Assert all audio is in [-1, 1]\n if (wav > 1.).any() or (wav < -1.).any():\n raise RuntimeError('wav has invalid value: {}'.format(wav_path))\n\n # Mu-law quantize\n if is_mulaw_quantize(hparams.input_type):\n # [0, quantize_channels)\n out = mulaw_quantize(wav, hparams.quantize_channels)\n\n constant_values = mulaw_quantize(0, hparams.quantize_channels)\n out_dtype = np.int16\n\n elif is_mulaw(hparams.input_type):\n # [-1, 1]\n out = mulaw(wav, hparams.quantize_channels)\n constant_values = mulaw(0., hparams.quantize_channels)\n out_dtype = np.float32\n\n else:\n # [-1, 1]\n out = wav\n constant_values = 0.\n out_dtype = np.float32\n\n # Get the PML features from the cmp file\n pml_cmp = np.fromfile(pml_path, dtype=np.float32)\n pml_features = pml_cmp.reshape((-1, hparams.pml_dimension))\n pml_frames = pml_features.shape[0]\n\n if pml_frames > hparams.max_pml_frames and hparams.clip_pmls_length:\n return None\n\n # Find parameters\n n_fft = (hparams.num_freq - 1) * 2\n\n if hparams.use_lws:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l, r = audio.pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Zero pad audio signal\n out = np.pad(out, (l, r), mode='constant', constant_values=constant_values)\n else:\n # Ensure time resolution adjustement between audio and mel-spectrogram\n l_pad, r_pad = audio.librosa_pad_lr(wav, n_fft, audio.get_hop_size(hparams))\n\n # Reflect pad audio signal (Just like it's done in Librosa to avoid frame inconsistency)\n out = np.pad(out, (l_pad, r_pad), mode='constant', constant_values=constant_values)\n\n # print(len(out), pml_frames, audio.get_hop_size(hparams), pml_frames * audio.get_hop_size(hparams))\n assert len(out) >= pml_frames * audio.get_hop_size(hparams)\n\n # time resolution adjustment\n # ensure length of raw audio is multiple of hop size so that we can use\n # transposed convolution to upsample\n out = out[:pml_frames * audio.get_hop_size(hparams)]\n assert len(out) % audio.get_hop_size(hparams) == 0\n time_steps = len(out)\n\n # Write the spectrogram and audio to disk\n audio_filename = os.path.join(wav_dir, 'audio-{}.npy'.format(index))\n pml_filename = os.path.join(pml_dir, 'pml-{}.npy'.format(index))\n np.save(audio_filename, out.astype(out_dtype), allow_pickle=False)\n np.save(pml_filename, pml_features, allow_pickle=False)\n\n # global condition features\n if hparams.gin_channels > 0:\n raise RuntimeError('When activating global conditions, please set your speaker_id rules in line 129 of '\n 'datasets/wavenet_preprocessor.py to use them during training')\n else:\n speaker_id = '<no_g>'\n\n # Return a tuple describing this training example\n return audio_filename, pml_path, pml_filename, speaker_id, time_steps, pml_frames", "def get_wav_duration(wav_bytes: bytes) -> float:\n with io.BytesIO(wav_bytes) as wav_buffer:\n wav_file: wave.Wave_read = wave.open(wav_buffer, \"rb\")\n with wav_file:\n frames = wav_file.getnframes()\n rate = wav_file.getframerate()\n return frames / float(rate)", "def read_wav(filename, offset=0, nframes=None, dtype=torch.double):\n\n if nframes is None: # Load whole file\n fs, x = wavfile.read(filename, mmap=False)\n x = torch.tensor(x, dtype=dtype)\n x.unsqueeze_(dim=0)\n\n else: # Load a part\n with wave.open(filename) as f:\n fs = f.getframerate()\n f.setpos(offset)\n buff = f.readframes(nframes)\n x = torch.tensor(np.frombuffer(buff, np.int16), dtype=dtype)\n x.unsqueeze_(dim=0)\n x -= x.mean()\n\n return x.to(DEVICE), fs", "def readAudioFile(path):\n\n extension = os.path.splitext(path)[1]\n\n try:\n # Commented below, as we don't need this\n # #if extension.lower() == '.wav':\n # #[Fs, x] = wavfile.read(path)\n # if extension.lower() == '.aif' or extension.lower() == '.aiff':\n # s = aifc.open(path, 'r')\n # nframes = s.getnframes()\n # strsig = s.readframes(nframes)\n # x = numpy.fromstring(strsig, numpy.short).byteswap()\n # Fs = s.getframerate()\n if extension.lower() == '.mp3' or extension.lower() == '.wav' or extension.lower() == '.au' or extension.lower() == '.ogg':\n try:\n audiofile = AudioSegment.from_file(path)\n except:\n print(\"Error: file not found or other I/O error. \"\n \"(DECODING FAILED)\")\n return -1 ,-1\n\n if audiofile.sample_width == 2:\n data = numpy.fromstring(audiofile._data, numpy.int16)\n elif audiofile.sample_width == 4:\n data = numpy.fromstring(audiofile._data, numpy.int32)\n else:\n return -1, -1\n Fs = audiofile.frame_rate\n x = numpy.array(data[0::audiofile.channels]).T\n else:\n print(\"Error in readAudioFile(): Unknown file type!\")\n return -1, -1\n except IOError:\n print(\"Error: file not found or other I/O error.\")\n return -1, -1\n\n if x.ndim == 2:\n if x.shape[1] == 2:\n x = x.flatten()\n\n return Fs, x", "def get_wav_name(wav_path):\r\n filename = wav_path.rsplit(\"\\\\\")[-1]\r\n id_list = filename.split(\".\")[0]\r\n wav_speaker = id_list.split(\"_\")\r\n return wav_speaker[0], wav_speaker[1]", "def generate_wavplot(song_name):\n\n filepath = features[features.inferred_name.str.title() == song_name].feature_file.values[0]\n rate, wave = wavfile.read(filepath)\n mono = np.mean(wave, axis=1)\n mono.shape\n plt.figure(figsize=(20,6))\n plt.axis('off')\n plt.plot(mono[::mono.shape[0]//6000], color='white')\n plt.tight_layout;\n friendly_song_name = '_'.join(song_name.split()).lower()\n output_filepath = './static/wavplots/' + friendly_song_name + '.png'\n plt.savefig(output_filepath, bbox_inches='tight', pad_inches=0, transparent=True)\n return output_filepath", "def play_audio_file(self, fname=DETECT_DONG):\n ding_wav = wave.open(fname, 'rb')\n ding_data = ding_wav.readframes(ding_wav.getnframes())\n # with no_alsa_error():\n audio = pyaudio.PyAudio()\n stream_out = audio.open(\n format=audio.get_format_from_width(ding_wav.getsampwidth()),\n channels=ding_wav.getnchannels(),\n rate=ding_wav.getframerate(), input=False, output=True)\n stream_out.start_stream()\n stream_out.write(ding_data)\n time.sleep(0.2)\n stream_out.stop_stream()\n stream_out.close()\n audio.terminate()", "def wave(pi, gpio, hz, secs, on=1, offset=0):\n micros_left = int(secs * 1000000)\n transitions = int(2 * hz * secs)\n micros = micros_left / transitions\n\n if (offset < 0) or (offset > micros):\n print(\"Illegal offset {} for hz {}\".format(offset, hz))\n exit()\n\n pi.set_mode(gpio, pigpio.OUTPUT)\n\n wf = [] # Empty waveform.\n\n if offset:\n wf.append(pigpio.pulse(0, 0, offset))\n micros_left -= micros\n last_micros = micros - offset\n transitions -= 1\n\n for t in range(transitions, 0, -1):\n micros = micros_left / t\n if (t & 1) == (on & 1):\n wf.append(pigpio.pulse(0, 1<<gpio, micros))\n else:\n wf.append(pigpio.pulse(1<<gpio, 0, micros))\n micros_left -= micros\n\n if offset:\n if on:\n wf.append(pigpio.pulse(1<<gpio, 0, last_micros))\n else:\n wf.append(pigpio.pulse(0, 1<<gpio, last_micros))\n\n pi.wave_add_generic(wf)\n pi.wave_send_repeat(pi.wave_create())" ]
[ "0.70273054", "0.70135546", "0.68500084", "0.6821027", "0.6812894", "0.6796246", "0.65991676", "0.6327907", "0.6325302", "0.62763965", "0.62052196", "0.61778903", "0.60919714", "0.60821027", "0.60718834", "0.60426664", "0.59239006", "0.59204537", "0.5904547", "0.58931905", "0.58914626", "0.5872094", "0.5866617", "0.58492965", "0.5761596", "0.5707479", "0.5656295", "0.5656023", "0.5653055", "0.564231", "0.56335354", "0.560535", "0.56025934", "0.5597245", "0.55909026", "0.5574474", "0.5539481", "0.5534304", "0.5522624", "0.5520059", "0.55175614", "0.550967", "0.5509119", "0.55017835", "0.54978997", "0.5497711", "0.54865444", "0.54853845", "0.5484697", "0.5473517", "0.5465837", "0.5460034", "0.54588896", "0.545026", "0.5445254", "0.5437466", "0.54361683", "0.5431766", "0.54274046", "0.5426893", "0.5414216", "0.54095185", "0.53995675", "0.53924966", "0.53743005", "0.53598315", "0.53539485", "0.5342214", "0.5326171", "0.53151184", "0.53137136", "0.52978605", "0.52967584", "0.5294765", "0.52905804", "0.5283515", "0.5279952", "0.5263066", "0.52577126", "0.52519506", "0.5243364", "0.52353483", "0.5233773", "0.5228164", "0.522146", "0.5215236", "0.5208882", "0.52087027", "0.52056855", "0.52038515", "0.5203597", "0.5200142", "0.51992565", "0.5196355", "0.5195985", "0.5195538", "0.51953286", "0.5187094", "0.5186792", "0.5157119" ]
0.6584172
7
Clean up the dictionary returned by praatinterface, put it in the format needed for graphing, smooth data by reducing number of datapoints, and return it as JSON.
def format_pitch_data(pd): for t in pd.keys(): pd[t] = pd[t]['Pitch'] # make each value just the pitch, instead of a sub-dict if pd[t] == 0: del pd[t] # if pitch is 0, remove from dictionary # now, pd is dict where each key is time (x value) and each value is pitch (y value) # to format for graph input, make list of dicts containing x-y pairs datapoints_list = [] for t in pd.keys(): datapoint = {} datapoint["x"] = t datapoint["y"] = pd[t] datapoints_list.append(datapoint) # sort the list by the value of "x" datapoints_sorted = sorted(datapoints_list, key=itemgetter("x")) # with this sorted list, do some data smoothing # pull out every nth item i = 0 datapoints_keep = [] while i < len(datapoints_sorted): datapoints_keep.append(datapoints_sorted[i]) i += 50 # make sure last item is included so length of curve isn't lost datapoints_keep.append(datapoints_sorted[-1]) # print "num of datapoints:", len(datapoints_keep) # print datapoints_keep[:100] return json.dumps(datapoints_keep, sort_keys=True)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_json(self):\n data = {}\n data['ip'] = self.ip\n\n try:\n data['country'] = self.processedvtdata[\"country\"]\n except KeyError:\n data['country'] = 'None'\n try:\n data['as'] = self.processedvtdata[\"as_owner\"]\n except KeyError:\n data['as'] = 'None'\n try:\n data['rdns'] = self.processedvtdata[\"self.reversedns\"]\n except KeyError:\n data['rdns'] = 'None'\n try:\n data['label'] = self.expertlabel\n except AttributeError:\n data['label'] = ''\n\n # geodata\n #{\"status\":\"success\",\"country\":\"Yemen\",\"countryCode\":\"YE\",\"region\":\"SA\",\"regionName\":\"Amanat Alasimah\",\"city\":\"Sanaa\",\"zip\":\"\",\"lat\":15.3522,\"lon\":44.2095,\"timezone\":\"Asia/Aden\",\"isp\":\"Public Telecommunication Corporation\",\"org\":\"YemenNet\",\"as\":\"AS30873 Public Telecommunication Corporation\",\"query\":\"134.35.218.63\"}\n if self.geodata:\n data['geodata'] = self.geodata\n \n # vt resolutions. Is a list\n data['vt'] = {}\n try:\n if self.processedvtdata['resolutions'] != 'None':\n data['vt']['resolutions'] = []\n for count, resolution_tuple in enumerate(self.processedvtdata['resolutions']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = resolution_tuple[0]\n temp['domain'] = resolution_tuple[1]\n data['vt']['resolutions'].append(temp)\n except KeyError:\n pass\n\n # vt urls. Is a list\n try:\n if self.processedvtdata['detected_urls'] != 'None':\n data['vt']['detected_urls'] = []\n for count, url_tuple in enumerate(self.processedvtdata['detected_urls']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = url_tuple[0]\n temp['url'] = url_tuple[1][0]\n temp['detections'] = str(url_tuple[1][1]) + '/' + str(url_tuple[1][2])\n data['vt']['detected_urls'].append(temp)\n except KeyError:\n pass\n\n\n # vt detected communicating samples. Is a list\n try:\n if self.processedvtdata['detected_communicating_samples'] != 'None':\n data['vt']['detected_communicating_samples'] = []\n for count, communcating_tuple in enumerate(self.processedvtdata['detected_communicating_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = communcating_tuple[0]\n temp['detections'] = str(communcating_tuple[1][0]) + '/' + str(communcating_tuple[1][1])\n temp['sha256'] = communcating_tuple[1][2]\n data['vt']['detected_communicating_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt detected downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_downloaded_samples'] != 'None':\n data['vt']['detected_downloaded_samples'] = []\n for count, detected_tuple in enumerate(self.processedvtdata['detected_downloaded_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['date'] = detected_tuple[0]\n temp['detections'] = str(detected_tuple[1][0]) + '/' + str(detected_tuple[1][1])\n temp['sha256'] = detected_tuple[1][2]\n data['vt']['detected_downloaded_samples'].append(temp)\n except AttributeError:\n pass\n\n # vt referrer downloaded samples. Is a list\n try:\n if self.processedvtdata['detected_referrer_samples'] != 'None':\n data['vt']['detected_referrer_samples'] = []\n for count, referrer_tuple in enumerate(self.processedvtdata['detected_referrer_samples']):\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['sha256'] = referrer_tuple[0]\n temp['detections'] = str(referrer_tuple[1][0]) + '/' + str(referrer_tuple[1][1])\n data['vt']['detected_referrer_samples'].append(temp)\n except AttributeError:\n pass\n\n # pt data\n data['pt'] = {}\n if self.processedptdata:\n count = 0\n data['pt']['passive_dns'] = []\n for result in self.processedptdata_results:\n if count >= self.amount_to_print:\n break\n temp = {}\n temp['lastseen'] = result[0]\n temp['firstseen'] = result[1][0]\n temp['hostname'] = result[1][1]\n data['pt']['passive_dns'].append(temp)\n count += 1\n\n # shodan data\n try:\n if self.shodandata:\n data['shodan'] = self.shodandata\n except AttributeError:\n pass\n\n data = json.dumps(data)\n return data", "def getcleanjson(self):\n dico=self.dico.copy() # shalow copie as we remove second level data in the copied\n for key in [\"basefiles\",\"grader\",\"soluce\"]:\n if key in dico:\n del dico[key]\n return str(json.dumps(dico))", "def beautify_json(self) -> None:\n for letter in self.data:\n for category in self.data[letter]:\n self.data[letter][category] = str(self.data[letter][category.lower()])\n self.save()\n with open(dict_path, encoding='utf-8') as x:\n data = x.read()\n with open(dict_path, 'w', encoding='utf-8') as y:\n data2 = data.replace('\"[', '[').replace(']\"', ']').replace(\"'\", '\"')\n y.write(data2)", "def make_json_airspace_format(self):\n # The previous fct make_open_airspace_format already stored, coordinates_kml, name and type\n # This data is collected in an dictionary, which then is stored as json.\n # initialize dict\n coordinates_as_list_of_floats = []\n # run through coordinates\n coordinates_as_list_of_floats = []\n for coo_pt in self.coordinates_kml.split(' ')[:-1]:\n lat_long = coo_pt.split(',')\n coordinates_as_list_of_floats.append([float(lat_long[1]), float(lat_long[0])])\n # make json dict\n # rename name if not thermal space\n if self.name.startswith('TS_') and not (self.as_type == 'A' or self.as_type == 'B'):\n name_for_json = self.name[3:]\n else:\n name_for_json = self.name\n # rename airspace type for json:\n if self.as_type == 'A':\n self.as_type = 'Good_thermals'\n if self.as_type == 'B':\n self.as_type = 'Bad_thermals'\n self.json_dict = {\"AL\": \"FL98\", \"AH\": \"FL99\", \"AC\": self.as_type, \"AN\": name_for_json, \"data\": coordinates_as_list_of_floats}", "def format(self, table):\n #return table.data.to_json()\n data = _replace_nans(table.as_array().tolist())\n if hasattr(data, \"strip\") or \\\n (not hasattr(data, \"__getitem__\") and \\\n not hasattr(data, \"__iter__\")):\n # data is not a list/tuple => wrap it\n data = [ data ]\n v = {\n 'offset': table.offset,\n 'data': data,\n 'headers': table.headers,\n 'types': table.types,\n }\n if table.sizes is not None:\n v[\"sizes\"] = table.sizes\n return json.dumps(v, cls=ExtEncoder)", "def to_json(self) -> Dict[str, any]:\n return {\n 'x_0': self.point_1_moment.strftime(DATE_TIME_FORMAT),\n 'y_0': self.point_1_price,\n 'x_1': self.point_2_moment.strftime(DATE_TIME_FORMAT),\n 'y_1': self.point_2_price,\n 'x_first': self.first_moment.strftime(DATE_TIME_FORMAT),\n 'y_first': self.y_x(self.first_moment),\n 'x_last': self.last_moment.strftime(DATE_TIME_FORMAT),\n 'y_last': self.y_x(self.last_moment)\n }", "def export_data(self):\n\n # We need to remove spaces for TW-1504, use custom separators\n data_tuples = ((key, self._serialize(key, value))\n for key, value in six.iteritems(self._data))\n\n # Empty string denotes empty serialized value, we do not want\n # to pass that to TaskWarrior.\n data_tuples = filter(lambda t: t[1] is not '', data_tuples)\n data = dict(data_tuples)\n return json.dumps(data, separators=(',',':'))", "def package_data(data_dict):\r\n return json.dumps(data_dict)", "def _json(self, data):\n if len(data) == 0:\n return \"\"\n if self.meta:\n data['meta_history'] = [{'prog': __prog__,\n 'release': __release__,\n 'author': __author__,\n 'date': __now__},]\n return json.dumps(data) + \"\\n\"", "def json_data(self):\n self.check_proof()\n return {\n \"vars\": [{'name': v.name, 'T': str(v.T)} for v in self.vars],\n \"proof\": sum([printer.export_proof_item(self.thy, item, unicode=True, highlight=True)\n for item in self.prf.items], []),\n \"report\": self.rpt.json_data(),\n \"method_sig\": self.get_method_sig()\n }", "def get_json(self):\n return {\n \"power\": self.get_power(), \n \"timestamp\": self.get_timestamp(), \n \"shortage\": self.get_shortage()\n }", "def _format_data(self, data):\n return json.dumps(data)", "def json_friendly(self):", "def new_police_report(self):\n\n d = {'category':'',\n 'original_text':'',\n 'line_num':0,\n 'address':'',\n 'map_scale':mapscale.UNKNOWN,\n 'date_month':0,\n 'date_day':0,\n 'date_year':0,\n 'lat':'',\n 'long':''}\n\n return d", "def to_dict(self):\n# \"\"\" The JSON model used is like:\n# <code>\n#{\n# \"duration\": 15,\n# \"url\": \"url1\",\n# \"selections\": [{\n# \"annotations\": [{\n# \"author\": \"\",\n# \"description\": \"speaker\",\n# \"keyword\": \"john\",\n# \"lang\": \"EN\"\n# },\n# {\n# \"author\": \"\",\n# \"description\": \"speakerLabel\",\n# \"keyword\": \"S0\",\n# \"lang\": \"EN\"\n# }\n# , {\n# \"author\": \"\",\n# \"description\": \"gender\",\n# \"keyword\": \"F\",\n# \"lang\": \"EN\" \n# }],\n# \"resolution\": \"0x0\",\n# \"selW\": 20,\n# \"selH\": 15,\n# \"selY\": 10,\n# \"selX\": 10,\n# \"startTime\" : 0,\n# \"endTime\" : 10\n# \n# }]\n#}\n# </code>\n# \n# \"\"\"\n\n dic = {\"duration\": self.get_duration(),\n \"url\": self._filename,\n \"db\":self.get_db().get_path(),\n \"selections\": [] }\n for seg in self.get_time_slices():\n dic['selections'].append({\n \"startTime\": float(seg[0]) / 100.0,\n \"endTime\": float(seg[1]) / 100.0,\n 'speaker': seg[-2],\n 'speakerLabel': seg[-1],\n 'gender': seg[2],\n 'speakers': seg[3]\n })\n return dic", "def linedata():\n get_values = request.args\n pc = get_values.get('pc') is not None # Per Capita\n gr = get_values.get('gr') is not None # Growth Rate\n place_args, _ = get_place_args(get_values)\n plot_data, _ = datachart_handler.get_plot_data(place_args, pc, gr)\n return json.dumps(plot_data)", "def format(self, table):\n #return table.data.to_json()\n m = table.as_array()\n rank = len(m.shape)\n is_table = len(table.headers)<=5 or (len(table.headers)>5 and (table.headers[0] != '0' or table.headers[1] != '1' or table.headers[2] != '2' ))\n\n if rank<3 and is_table:\n v = []\n for i in range(len(table.headers)):\n vv = {\n 'offset': table.offset,\n 'header': table.headers[i],\n 'type': table.types[i],\n 'data': _replace_nans(m[:,i].tolist()) if rank>1 else _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n vv[\"size\"] = table.sizes[0]\n v.append(vv)\n else:\n # if hasattr(data, \"strip\") or \\\n # (not hasattr(data, \"__getitem__\") and \\\n # not hasattr(data, \"__iter__\")):\n # # data is not a list/tuple => wrap it\n # data = [ data ]\n v = {\n 'offset': table.offset,\n #'headers': table.headers,\n 'type': table.types[0],\n 'data': _replace_nans(m.tolist()),\n }\n if table.sizes is not None:\n v[\"size\"] = table.sizes\n\n return json.dumps(v, cls=ExtEncoder)", "def precipitation():\n\n return jsonify(prcp_df)", "def json(self):\n beat = self.beat + 1.4 # replace with hjd\n w, h = self.getWidth(), self.getHeight()\n \n return {\n \"_time\": beat,\n \"_duration\": self.dur,\n #\"_lineIndex\": 0,\n #\"_type\": 0,\n #\"_width\": 0,\n \"_customData\": {\n # to undo the local rotation z transform we have to take trig parts of it and multiply them by the dimensions of the wall, then add them to the position\n \"_position\": [self.l + math.cos(math.radians(self.lrot[2] - 90)) * h / 2, self.d + math.sin(math.radians(self.lrot[2]-90)) * h / 2 + h / 2],\n \"_scale\": [w, h],\n \"_rotation\": self.rot,\n \"_localRotation\": self.lrot\n }\n }", "def to_dict(self) -> dict:\n\n return {\n \"data\": {\n \"avg_tone\": self.average_tone,\n \"goldstein\": self.goldstein,\n \"actor_code\": self.actor_code,\n \"lat\": self.latitute,\n \"lon\": self.longitude,\n \"date\": self.timestamp.strftime(r\"%Y-%m-%d %H:%M:%S\"),\n }\n }", "def to_json_string(self):\n\t\treturn json.dumps(dataclasses.asdict(self), indent=2, sort_keys=True) + \"\\n\"", "def json(self):\r\n return {\"id\": self.id, \"code\": self.code, \"description\": self.description, \"xCoor\": self.x_coor, \"yCoor\": self.y_coor, \"latitude\": self.latitude,\r\n \"longitude\": self.longitude, \"waterschapId\": self.waterschap_id, \"watertypeId\": self.watertype_id, \"watertypeKrwId\": self.watertype_krw_id}", "def to_json(self):\n # only when regenerate an dict is effective enough\n if none_count(self) * 10 / len(self.keys()) < 3:\n return self\n\n ret = {}\n for k, v in six.iteritems(self):\n if v == None:\n continue\n ret.update({k: v})\n return ret", "def precipitation(pre_date):\n # Query for the dates and temperature observations from the last year.\n # Convert the query results to a Dictionary using `date` as the key and `tobs` as the value.\n # Return the json representation of your dictionary.\n results = session.query(Measurement).filter(Measurement.date == pre_date)\n\n # Print all passengers of that gender\n all_tobs = [] \n for measure in results:\n measure_dict = {}\n measure_dict[\"station\"] = measure.station\n measure_dict[\"date\"] = measure.date\n measure_dict[\"prcp\"] = measure.prcp\n all_tobs.append(measure_dict)\n\n return jsonify(all_tobs)\n\n\n # print(results)\n # Convert list of tuples into normal list\n # all_names = list(np.ravel(results))\n # print(all_names)\n\n # print(all_names)\n return jsonify({'TempMin': tmp_min, 'TempMax': tmp_max, 'TempAvg': tmp_avg})", "def keypoints_to_json(datum):\n jsonDict = dict()\n jsonDict[\"pose_keypoints_2d\"] = datum.poseKeypoints.tolist()\n if datum.faceKeypoints.size > 0 :\n jsonDict[\"face_keypoints_2d\"] = []\n else : \n jsonDict[\"face_keypoints_2d\"] = datum.faceKeypoints.tolist()\n jsonDict[\"hand_left_keypoints_2d\"] = datum.handKeypoints[0].tolist()\n jsonDict[\"hand_right_keypoints_2d\"] = datum.handKeypoints[1].tolist()\n return jsonDict", "def json_temp_arima(df_arima):\n # The shift_hours=1 accounts for df_arima starting its indexing from 1\n # instead of 0.\n df_arima = add_time_columns(df_arima, shift_hours=1)\n if len(df_arima) < 1:\n return \"{}\"\n json_str = (\n df_arima.groupby([\"sensor_id\", \"measure_name\", \"run_id\"], as_index=True)\n .apply(\n lambda x: x[\n [\n \"prediction_value\",\n \"prediction_index\",\n \"run_id\",\n \"time\",\n \"timestamp\",\n ]\n ].to_dict(orient=\"records\")\n )\n .reset_index()\n .rename(columns={0: \"Values\"})\n .to_json(orient=\"records\")\n )\n return json_str", "def format_json(self,query_results):\n results=query_results.data\n factory=factory_json()\n dump=factory.dumps(results)\n print(dump)\n # TODO return output for this\n return \"\"", "def create_json_report(output):\n # Initial work, just dump mia_metrics and dummy_metrics into a json structure\n return json.dumps(output, cls=NumpyArrayEncoder)", "def json_temp_ges(df):\n\n json_str = (\n df.groupby(\n [\n \"sensor_id\",\n \"measure_name\",\n \"run_id\",\n \"ventilation_rate\",\n \"num_dehumidifiers\",\n \"lighting_shift\",\n \"scenario_type\",\n ],\n as_index=True,\n )\n .apply(\n lambda x: x[\n [\n \"prediction_value\",\n \"prediction_index\",\n \"run_id\",\n \"time\",\n \"timestamp\",\n ]\n ].to_dict(orient=\"records\")\n )\n .reset_index()\n .rename(columns={0: \"Values\"})\n .to_json(orient=\"records\")\n )\n return json_str", "def to_json(self):\n\n scaler_json=self.__dict__.copy()\n scaler_json['scale_']=scaler_json['scale_'].tolist()\n scaler_json['min_']=scaler_json['min_'].tolist()\n scaler_json['data_min_']=scaler_json['data_min_'].tolist()\n scaler_json['data_max_']=scaler_json['data_max_'].tolist()\n scaler_json['data_range_']=scaler_json['data_range_'].tolist()\n\n return json.dumps(scaler_json)", "def to_json(self):\n pass", "def lineup_json() -> Response:\n watch = \"watch_direct\" if config.direct else \"watch\"\n\n return jsonify([{\n \"GuideNumber\": station.get('channel_remapped') or station['channel'],\n \"GuideName\": station['name'],\n \"URL\": f\"http://{host_and_port}/{watch}/{station['id']}\"\n } for station in locast_service.get_stations()])", "def serialize_to_json(self):\n d = self.__dict__\n x = d.pop('epicenter_x')\n y = d.pop('epicenter_y')\n z = d.pop('epicenter_z')\n d['epicenter'] = [x, y, z]\n\n return dict_to_safe_for_json(d)", "def save_to_json(self):\r\n file = col.defaultdict(list)\r\n data_sources = [\"http://www.gcmap.com/\",\r\n \"http://www.theodora.com/country_digraphs.html\",\r\n \"http://www.citypopulation.de/world/Agglomerations.html\",\r\n \"http://www.mongabay.com/cities_urban_01.htm\",\r\n \"http://en.wikipedia.org/wiki/Urban_agglomeration\",\r\n \"http://www.worldtimezone.com/standard.html\"]\r\n file[\"data_sources\"] = data_sources\r\n for code, city in self.vertices.items():\r\n metros = {}\r\n for key, val in vars(city).items():\r\n metros[key] = val\r\n file[\"metros\"].append(metros)\r\n for code, _list in self.edges.items():\r\n for edge in _list:\r\n routes = {\"ports\": [edge.start, edge.destination], \"distance\": edge.distance}\r\n second_route = {\"ports\": [edge.destination, edge.start], \"distance\": edge.distance}\r\n if second_route not in file[\"routes\"]:\r\n file[\"routes\"].append(routes)\r\n with open('../Data/save.json', 'w') as outfile:\r\n json.dump(file, outfile, indent=4)", "def to_json_string(self) -> None:\n return json.dumps(dataclasses.asdict(self)) + \"\\n\"", "def parse_jsons(self):\n return super(VegaGraphScatter, self).parse_jsons()", "def __json_encode__(self) -> Dict[str, Any]:\n return {\"figure\": self.figure, \"name\": self.name, \"metadata\": self.metadata}", "def _process_dict(data):\n new_dict = {}\n for key in data.keys():\n\tnew_dict['name'] = data['printerName']\n #new_dict[key] = data[key]\n\n #FIGURE OUT AND UPDATE PRINTER STATUS\n status = BUSY_STATUS\n error_msg = \"\"\n if \"FrontPanelMessage\" in data:\n if data[\"FrontPanelMessage\"].lower() in READY_MESSAGES:\n status = READY_STATUS\n elif \"error\" in data[\"FrontPanelMessage\"].lower():\n status = ERROR_STATUS\n error_msg = \"general error\"\n \n if \"TonerStatus\" in data:\n if data[\"TonerStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Toner Error\"\n #if len(new_dict[\"TonerStatus\"]) > 4:\n #new_dict[\"TonerStatus\"] = new_dict[\"TonerStatus\"][4:]\n\n if \"PaperStatus\" in data:\n if data[\"PaperStatus\"].find(\"2\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Status Error\"\n elif data[\"PaperStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Out of Paper\"\n #if len(new_dict[\"PaperStatus\"]) > 4:\n #new_dict[\"PaperStatus\"] = new_dict[\"PaperStatus\"][4:]\n\n if \"PaperJamStatus\" in data:\n if data[\"PaperJamStatus\"].find(\"1\") != -1:\n status = ERROR_STATUS\n error_msg = \"Paper Jam\"\n #if len(new_dict[\"PaperJamStatus\"]) > 4:\n #new_dict[\"PaperJamStatus\"] = new_dict[\"PaperJamStatus\"][4:]\n\n new_dict[\"status\"] = status\n new_dict[\"error_msg\"] = error_msg\n new_dict[\"location\"] = PRINTERS[new_dict[\"name\"]][0]\n new_dict[\"building_name\"] = PRINTERS[new_dict[\"name\"]][1]\n new_dict[\"latitude\"] = PRINTERS[new_dict[\"name\"]][2]\n new_dict[\"longitude\"] = PRINTERS[new_dict[\"name\"]][3]\n new_dict[\"atResidence\"] = PRINTERS[new_dict[\"name\"]][4]\n return new_dict", "def render(self, data):\n logging.info(\"render (start)\")\n\n seria = json.dumps(data, ensure_ascii=False, indent=4)\n logging.info(\"rendered %s characters (end)\" % len(seria))\n return seria", "def _jsonify(self):\n return self.experiment_record.to_ddb_record()", "def to_json(self):\n\n d = {\n \"title\": self.title,\n \"abstract\": self.abstract,\n \"intellectual_merit\": self.intellectual_merit,\n \"broader_impact\": self.broader_impact,\n \"use_of_fg\": self.use_of_fg,\n \"scale_of_use\": self.scale_of_use,\n \"categories\": self.categories,\n \"keywords\": self.keywords,\n \"primary_discipline\": self.primary_discipline,\n \"orientation\": self.orientation,\n \"contact\": self.contact,\n \"url\": self.url,\n \"active\": self.active,\n \"status\": self.status,\n \"lead\": self.lead,\n \"members\": self.members,\n \"resources_services\": self.resources_services,\n \"resources_software\": self.resources_software,\n \"resources_clusters\": self.resources_clusters,\n \"resources_provision\": self.resources_provision\n }\n return d", "def to_json(self):\n capsule = {}\n capsule[\"Hierarchy\"] = []\n for (\n dying,\n (persistence, surviving, saddle),\n ) in self.merge_sequence.items():\n capsule[\"Hierarchy\"].append(\n {\n \"Dying\": dying,\n \"Persistence\": persistence,\n \"Surviving\": surviving,\n \"Saddle\": saddle,\n }\n )\n capsule[\"Partitions\"] = []\n base = np.array([None, None] * len(self.Y)).reshape(-1, 2)\n for (min_index, max_index), items in self.base_partitions.items():\n base[items, :] = [min_index, max_index]\n capsule[\"Partitions\"] = base.tolist()\n\n return json.dumps(capsule)", "def somef_data_to_graph(self, somef_data):\n current_date = datetime.datetime.now()\n data = self.reconcile_somef_data(somef_data)\n if len(data.keys()) == 0:\n logging.warning(\"No fields were found in file\")\n return\n if constants.CAT_NAME not in data.keys():\n data['name'] = 'Software' + current_date.strftime(\"%Y%m%d%H%M%S\")\n if constants.CAT_FULL_NAME not in data.keys():\n data['fullName'] = data['name']\n # save JSON in temp file\n temp_file = \"tmp\"+current_date.strftime(\"%Y%m%d%H%M%S\")+\".json\"\n with open(temp_file, 'w') as output:\n json.dump(data, output)\n result_graph = self.apply_mapping(constants.mapping_path, output.name)\n self.g += self.g + result_graph\n os.remove(output.name)\n # temp_file = tempfile.NamedTemporaryFile()\n # with open(temp_file.name, 'w') as output:\n # json.dump(data, output)\n # result_graph = self.apply_mapping(constants.mapping_path, output.name)\n # self.g += self.g + result_graph", "def npdict(self):\n\n d = {}\n\n # per profile\n d['cruise'] = self.cruise()\n d['day'] = self.day()\n d['latitude'] = self.latitude()\n d['latitude_unc'] = self.latitude_unc()\n d['longitude'] = self.longitude()\n d['longitude_unc'] = self.longitude_unc()\n d['month'] = self.month()\n d['n_levels'] = self.n_levels()\n d['primary_header_keys'] = self.primary_header_keys()\n d['probe_type'] = self.probe_type()\n d['time'] = self.time()\n d['uid'] = self.uid()\n d['year'] = self.year()\n d['PIs'] = self.PIs()\n d['originator_station'] = self.originator_station()\n d['originator_cruise'] = self.originator_cruise()\n d['originator_flag_type'] = self.originator_flag_type()\n d['t_metadata'] = self.t_metadata()\n d['s_metadata'] = self.s_metadata()\n # per level\n d['s'] = self.s()\n d['s_unc'] = self.s_unc()\n d['s_level_qc'] = self.s_level_qc()\n d['s_profile_qc'] = self.s_profile_qc()\n d['s_qc_mask'] = self.s_qc_mask()\n d['t'] = self.t()\n d['t_unc'] = self.t_unc()\n d['t_level_qc'] = self.t_level_qc()\n d['t_profile_qc'] = self.t_profile_qc()\n d['t_qc_mask'] = self.t_qc_mask()\n d['z'] = self.z()\n d['z_unc'] = self.z_unc()\n d['z_level_qc'] = self.z_level_qc()\n d['oxygen'] = self.oxygen()\n d['phosphate'] = self.phosphate()\n d['silicate'] = self.silicate()\n d['pH'] = self.pH()\n d['p'] = self.p()\n\n return d", "def to_json(self):\n return json.dumps({\"data\": self._data.tolist(),\n \"header\": self._header.tolist(),\n \"dates\": self._dates.tolist()})", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def to_json(self):\n return json.dumps(sanitize_for_serialization(self.to_dict()))", "def get_datalist_fr_json(self):\n raw_data = json.load(open(self.saved_json_file, 'r'))\n for indivdual_set in raw_data['query']['results']['stats']:\n temp_dict_data = {}\n if type(indivdual_set) == str:\n #for single data\n continue # temp do not use\n for parameters in indivdual_set.keys():\n if type(indivdual_set[parameters]) == str:\n temp_dict_data[parameters] = indivdual_set[parameters]#for symbol\n elif type(indivdual_set[parameters]) == dict:\n if indivdual_set[parameters].has_key('content'):\n temp_dict_data[parameters] = indivdual_set[parameters]['content']\n\n ## append to list\n self.com_data_allstock_list.append(temp_dict_data)", "def as_json(self):", "def tojson(self,filenames={},alias=None,axes={}):\n\n out = \"[\"\n firstser = True\n for study in self.studies:\n if not firstser:\n out += ',\\n'\n else:\n firstser = False\n out += '{\\n'\n out += ' \"type\": \"study\",\\n'\n out += ' \"id\": \"%s\",\\n' % study[0]\n if self.change_name:\n out += ' \"name\": \"%s\",\\n' % self.change_name\n else:\n out += ' \"name\": \"%s\",\\n' % study[1]\n\n out += ' \"series\": [\\n'\n ksort = self.studies[study].keys()[:]\n ksort.sort(lambda a,b: cmp(fixser(a),fixser(b)))\n first = True\n for k in ksort:\n\n if alias:\n if not alias.match(study[0], study[1], k):\n continue\n\n e = self.studies[study][k]\n if not first:\n out += ',\\n'\n else:\n first = False\n\n out += ' {\\n'\n out += ' \"type\": \"series\",\\n'\n out += ' \"id\": \"%s\",\\n' % k\n out += ' \"rows\": %d,\\n' % e.shape[0]\n out += ' \"cols\": %d,\\n' % e.shape[1]\n out += ' \"slices\": %d,\\n' % len(e.slices)\n out += ' \"times\": %d,\\n' % len(e.times)\n out += ' \"echoes\": %d,\\n' % len(e.echoes)\n out += ' \"flip_var\": \"%s\",\\n' % e.vflip\n out += ' \"flip\": %g,\\n' % e.flip\n out += ' \"reptimes\": [%g],\\n' % e.tr\n \n tmpechoes = [(\"%g\"% (float(e.te[x]),)) for x in e.echoes.keys()]\n out += ' \"echotimes\": [%s],\\n' % (', '.join(tmpechoes))\n\n if not e.table is None:\n out += ' \"table\": [%s],\\n' % (', '.join([str(x) for x in e.table]),)\n\n if not e.patient_cmt is None:\n out += ' \"patient_cmt\": \"%s\",\\n' % e.patient_cmt\n \n if not e.image_cmt is None:\n out += ' \"image_cmt\": \"%s\",\\n' % e.image_cmt\n\n if not e.sar is None:\n out += ' \"sar\": {\\n'\n out += ' \"mode\": %d,\\n' % (e.sar['mode'],)\n out += ' \"most_crit\": \"%s\",\\n' % (e.sar['most_crit'],)\n out += ' \"value_lim\": %g,\\n' % (e.sar['values'][0],)\n out += ' \"value_1\": %g,\\n' % (e.sar['values'][1],)\n out += ' \"value_2\": %g,\\n' % (e.sar['values'][2],)\n out += ' \"value_body\": %g\\n' % (e.sar['body'],)\n out += ' },\\n'\n\n if not e.phase is None:\n out += ' \"phase\": {\\n'\n out += ' \"axis\": \"%s\",\\n' % (e.phase['axis'],)\n\n if axes.has_key((study[0],study[1],k)):\n a = axes[study[0],study[1],k]\n out_axis = orient.map_axis(e.phase['axis'],a)\n out += ' \"axis_out\": \"%s\",\\n' % (out_axis,)\n \n out += ' \"direction\": \"%s\",\\n' % (e.phase['direction'],)\n out += ' \"positive\": %d\\n' % (e.phase['positive'],)\n\n out += ' },\\n'\n \n if len(e.times) > 1:\n out += ' \"interval\": %.4g,\\n' % (self.interval(e),)\n \n if e.bval.values()[0] != None:\n t = [int(x) for x in e.times]; t.sort(); t = [str(x) for x in t]\n\n # diffusion in the DICOM [x,y,z] co-ordinates\n out += ' \"diffusion\": [\\n'\n first=1\n for tn in t:\n if first==0:\n out += ',\\n'\n first=0\n if len(e.diff[tn]) < 3:\n out += ' [%g, null]' % (e.bval[tn])\n else:\n out += ' [%g, [%g, %g, %g]]' % (e.bval[tn], e.diff[tn][0], e.diff[tn][1], e.diff[tn][2])\n out += '\\n ],\\n'\n \n # diffusion aligned to the DICOM [i,j,k] image grid\n #\n # (NB we use OrientedImage here re-initialized from the original DICOM\n # orientation field for this series)\n out += ' \"diffusiongrid\": [\\n'\n o = orient.OrientedImage(None,e.res,e.orient.keys())\n first=1\n for tn in t:\n if first==0:\n out += ',\\n'\n first=0\n if len(e.diff[tn]) < 3:\n out += ' [%g, null]' % (e.bval[tn])\n else:\n egrid = o.dcm_to_grid(e.diff[tn])\n out += ' [%g, [%g, %g, %g]]' % (e.bval[tn], egrid[0], egrid[1], egrid[2])\n out += '\\n ],\\n'\n\n out += ' \"desc\": \"%s\",\\n' % e.desc\n out += ' \"type\": \"%s\",\\n' % e.type\n\n # Axes mappings:\n #\n # This records the DICOM-to-output axis mappings for the conversion. We only allow\n # for very simple mappings, flips and 90-degree rotations: a major point of volconv\n # is to preserve the image and DICOM co-ordinate systems.\n #\n # The [i j k] system is the image array: [0 0 0] is the corner voxel of the lowest\n # slice. [I J K] is the output image array with [0 0 0] the first voxel stored.\n # [i j k] and [I J K] indices are always positive; if an axis is flipped, the corner\n # voxel moves to the opposite end of that axis.\n #\n # The [x y z] system is the patient co-ordinate system: [0 0 0] is the DICOM origin.\n # [X Y Z] of the output format will be an appropriate patient/world co-ordinate\n # system with the same origin and orthogonal axes. The anatomical meaning of DICOM\n # axes is preserved if the output format defines them (eg DICOM LPS -> Nifti RAS).\n\n if axes.has_key((study[0],study[1],k)):\n a = axes[study[0],study[1],k]\n\n # image axis mappings: depends on reorient/flip parameters\n out += ' \"grid_axes_map\": [\"%s\", \"%s\", \"%s\"],\\n' % \\\n (a[0], a[1], a[2])\n\n # patient axis mappings: for DICOM->Nifti, depends purely on standards\n out += ' \"patient_axes_map\": [\"-x\", \"-y\", \"z\"],\\n'\n\n if self.use_exdcm:\n if self.exdcm_path:\n fn = e.file[sorted(e.file.keys())[0]]\n else:\n fn = basename(e.file[sorted(e.file.keys())[0]])\n out += ' \"exdcm\": \"%s\",\\n' % fn\n\n if filenames.has_key((study[0],study[1],k)):\n fn = basename(filenames[study[0],study[1],k])\n if fn.endswith('.nii') or fn.endswith('.nii.gz'):\n out += ' \"nii\": \"%s\",\\n' % fn\n elif fn.endswith('.gipl') or fn.endswith('.gipl.gz'):\n out += ' \"gipl\": \"%s\",\\n' % fn\n\n out += ' \"date\": \"%s\",\\n' % e.date\n out += ' \"time\": \"%s\"\\n' % e.time\n out += ' }'\n out += '\\n ]\\n'\n out += \"}\"\n out += ']\\n'\n return(out)", "def loadAndClean(jfile):\n with open(jfile) as json_file:\n data = json.load(json_file)[\"G15\"]\n newDict = {}\n # Print the type of data variable\n \n for entry in data:\n\n if \"version\" in data[entry] and data[entry][\"assessed_by\"] not in filterList:\n\n if data[entry][\"assessed_by\"] in dictTries:\n dictTries[data[entry][\"assessed_by\"]].append(data[entry])\n\n \n\n if len(dictTries[data[entry][\"assessed_by\"]]) == 2:\n ml = dictTries[data[entry][\"assessed_by\"]]\n #vou querer calcular o maior \n # comparisson = \"accuracy\"\n comparisson = \"target_w_penalty\"\n \n if ml[0][comparisson] <= ml[1][comparisson]:\n if ml[0][\"accuracy\"] > 90:\n printSingle(ml[0])\n newDict[entry] = ml[0]\n\n else:\n if ml[1][\"accuracy\"] > 90:\n printSingle(ml[1])\n newDict[entry] = ml[1]\n dictTries[data[entry][\"assessed_by\"]] = []\n\n else:\n dictTries[data[entry][\"assessed_by\"]] = [data[entry]]\n\n\n\n # dictTries[data[\"assessed_by\"]]\n\n # newDict[entry] = data[entry]\n # print(data[entry])\n # print()\n # printDict(newDict)\n return newDict", "def weatherPrecipitation():\n prcp = session.query(measurement.date,measurement.prcp).all()\n test = [{row[0]:row[1]}for row in prcp]\n return jsonify(test)", "def to_json(self) -> str:\n data_dict = self._to_list_dict()\n return json.dumps(data_dict, indent=4, cls=NumpyEncoder)", "def to_json(self):\n\n columns = list()\n points = list()\n\n if self._collection.type() == Event:\n columns += ['time']\n elif self._collection.type() == TimeRangeEvent:\n columns += ['timerange']\n elif self._collection.type() == IndexedEvent:\n columns += ['index']\n\n columns += self.columns()\n\n for i in self._collection.events():\n points.append(i.to_point(columns[1:]))\n\n cols_and_points = dict(\n columns=columns,\n points=points,\n )\n\n # fold in the rest of the payload\n cols_and_points.update(self._data)\n\n # Turn the index back into a string for the json representation.\n # The Index object can still be accessed via TimeSeries.index()\n if 'index' in cols_and_points and \\\n isinstance(cols_and_points.get('index'), Index):\n cols_and_points['index'] = cols_and_points.get('index').to_string()\n\n return cols_and_points", "def _get_data_as_json(self, data):\n data = self._get_data_as_df(data)\n data = data.to_json(orient=\"records\")\n char_per_line = min(len(data), self.SAMPLES_PER_LINE_DEFAULT)\n return list(map(''.join, zip(*[iter(data)] * char_per_line)))", "def to_string(self):\n\thistory_items = self.parse()\n\tvalue_counter = history_items[\"data\"]\n\tresp = { \"slots\": history_items[\"slots\"], \"num_data\": history_items[\"num_data\"], \"top_num_values\": [] }\n\n\tfor i in range(0, min(len(value_counter), 10)):\n\t resp[\"top_num_values\"].append({ \"itemid\": value_counter[i][0], \"num\": value_counter[i][1]})\n\t\n return json.dumps(self.parse(), indent=4)", "def to_json(peaklist):\n pl = [{\"Assignment\": peak.assignments_list,\n \"Dimensions\": peak.chem_shifts_list,\n \"DataHeight\": peak.extra_attr} for peak in peaklist]\n json_str = json.dumps(pl)\n return json_str", "def to_json_full(self):\n data = self.to_json()\n # TODO: Enable this once custom resource_links are supported again.\n #data['resource_links'] = [\n # r.to_json() for r in self.resource_links\n # if self.resource_links is not None\n #]\n data['commits'] = [\n c.to_json() for c in self.commits if self.commits is not None\n ]\n data['nvd'] = self.nvd._to_json_full(\n ) if self.nvd is not None else None\n data['creator'] = self.creator.to_json(\n ) if self.creator is not None else None\n data['date_created'] = self.date_created\n data['date_modified'] = self.date_modified\n\n return data", "def json(self) -> Dict[str, Any]:\n return {\n \"product_id\": self.product_id,\n \"detection_index\": self.detection_index,\n \"product_name\": self.class_name,\n \"confidence\": self.conf,\n \"bounding_box\": [int(coord)\n for coord in self.scale_coordinates.round()],\n \"top_k_product_names\": self.top_k_names,\n \"top_k_confidences\": self.top_k_confidences,\n \"top_k_product_ids\": self.top_k_product_ids,\n \"top_k_detection_indices\": self.top_k_indices\n }", "def json(self):\n ar_min_date, ar_max_date = self.get_ar_dates(\n (self.last_ar_year if self.last_ar_year else self.founding_date.year) + 1\n )\n d = {\n 'foundingDate': self.founding_date.isoformat(),\n 'identifier': self.identifier,\n 'lastModified': self.last_modified.isoformat(),\n 'lastAnnualReport': datetime.date(self.last_ar_date).isoformat() if self.last_ar_date else '',\n 'nextAnnualReport': LegislationDatetime.as_legislation_timezone_from_date(\n self.next_anniversary\n ).astimezone(timezone.utc).isoformat(),\n 'lastAnnualGeneralMeetingDate': datetime.date(self.last_agm_date).isoformat() if self.last_agm_date else '',\n 'lastLedgerTimestamp': self.last_ledger_timestamp.isoformat(),\n 'legalName': self.legal_name,\n 'legalType': self.legal_type,\n 'hasRestrictions': self.restriction_ind,\n 'goodStanding': self.good_standing,\n 'arMinDate': ar_min_date.isoformat(),\n 'arMaxDate': ar_max_date.isoformat()\n }\n # if self.last_remote_ledger_timestamp:\n # # this is not a typo, we want the external facing view object ledger timestamp to be the remote one\n # d['last_ledger_timestamp'] = self.last_remote_ledger_timestamp.isoformat()\n # else:\n # d['last_ledger_timestamp'] = None\n\n if self.dissolution_date:\n d['dissolutionDate'] = datetime.date(self.dissolution_date).isoformat()\n if self.fiscal_year_end_date:\n d['fiscalYearEndDate'] = datetime.date(self.fiscal_year_end_date).isoformat()\n if self.tax_id:\n d['taxId'] = self.tax_id\n\n return d", "def render_dictionary(self): \n asset_json = {\n 'name': self.name,\n 'product_name': self.product_name,\n 'product_vendor': self.product_vendor,\n 'configuration': self.configuration,\n 'description': self.description,\n 'primary_users': self.primary_users,\n 'primary_voting': self.primary_voting,\n 'secondary_users': self.secondary_users,\n 'secondary_voting': self.secondary_voting,\n 'tags': self.tags,\n 'type': self.asset_type,\n 'action_whitelist': self.action_whitelist\n }\n\n if self.ingest_container_label:\n asset_json['ingest'] = {\n 'container_label': self.ingest_container_label,\n 'interval_mins': self.ingest_interval_mins,\n 'poll': self.ingest_poll,\n 'start_time_epoch_utc': self.ingest_start_time\n }\n\n return asset_json", "def json_raw(self):\n return json.dumps(self.data, cls=ComplexEncoder)", "def jsonify(data):\n\n for key in data:\n if type(data[key]) == numpy.ndarray:\n data[key] = data[key].tolist()\n\n if isinstance(data[key], list):\n data[key] = [0 if isinstance(x, float) and math.isnan(x) else x for x in data[key]]\n\n return data", "def get_data(self):\n return self.data.to_json()", "def _jsonify(data: dict):\n j = data.pop('json', None)\n if isinstance(j, dict):\n return j\n if j is None:\n for k, v in data.items():\n if isinstance(v, datetime) or isinstance(v, date):\n data[k] = arrow.get(v).isoformat()\n\n # Create json from kwargs\n j = json.dumps(data)\n return json.loads(j)", "def serialize(self, data):\n if isinstance(data, dict):\n return json.dumps(\n {\n key: value.tolist() if isinstance(value, np.ndarray) else value\n for key, value in data.items()\n }\n )\n\n if hasattr(data, \"read\"):\n return data.read()\n\n if isinstance(data, np.ndarray):\n return json.dumps(data.tolist())\n\n return json.dumps(data)", "def precipitation():\n\n # Query all Measurement\n prcp_results = session.query(Measurement).all()\n\n # Creating a dictionary and appending a list of Measurements to it\n all_prcp_results = []\n for prcp in prcp_results:\n prcp_dict = {}\n prcp_dict[\"date\"] = prcp.date\n prcp_dict[\"precipitation\"] = prcp.prcp\n all_prcp_results.append(prcp_dict)\n\n return jsonify(all_prcp_results)", "def precip():\n date_prcp=session.query(measurements.date,measurements.prcp).all()\n date_prcp_df=pd.DataFrame(date_prcp).set_index('date')\n date_prcp_dict=date_prcp_df.to_dict()\n return jsonify(date_prcp_dict)", "def json(self):\n # Response legacy data: allow for any column to be null.\n document = {\n 'mhrNumber': self.mhr_number,\n 'documentType': self.document_type,\n 'documentRegistrationNumber': self.document_reg_id,\n 'interimed': self.interimed,\n 'ownerCrossReference': self.owner_cross_reference,\n 'interestDenominator': self.interest_denominator,\n 'declaredValue': self.declared_value,\n 'ownLand': self.own_land,\n 'routingSlipNumber': self.routing_slip_number,\n 'lastService': self.last_service,\n 'bcolAccount': self.bcol_account,\n 'datNumber': self.dat_number,\n 'examinerId': self.examiner_id,\n 'updateId': self.update_id,\n 'phoneNumber': self.phone_number,\n 'attentionReference': self.attention_reference,\n 'name': self.name,\n 'legacyAddress': self.legacy_address,\n 'numberOfPages': self.number_of_pages,\n 'considerationValue': self.consideration_value,\n 'affirmByName': self.affirm_by_name,\n 'liensWithConsent': self.liens_with_consent,\n 'clientReferenceId': self.client_reference_id\n }\n if self.draft_ts:\n document['draftDateTime'] = model_utils.format_local_ts(self.draft_ts)\n if self.registration_ts:\n document['createDateTime'] = model_utils.format_local_ts(self.registration_ts)\n if self.transfer_execution_date and self.transfer_execution_date.year > 1:\n document['transferDate'] = model_utils.format_local_date(self.transfer_execution_date)\n return document", "def ser(self):\n return {\n 'lat': self.lat,\n 'lon': self.lon,\n 'title': self.title,\n 'datasource': self.datasource,\n 'author': self.author,\n 'start_time': self.start_time\n }", "def json(self):\n class ExtendedJSONEncoder(json.JSONEncoder):\n def default(self, obj):\n if isinstance(obj, datetime.date) or isinstance(obj, datetime.time):\n encoded_object = obj.isoformat()\n else:\n encoded_object = json.JSONEncoder.default(self, obj)\n return encoded_object\n\n obj = {\n 'operation': self.operation,\n 'version': self.version,\n 'language': self.language,\n 'identifiers': self.identifiers,\n 'store_execute': self.store_execute,\n 'status': self.status,\n 'lineage': self.lineage,\n 'inputs': dict((i, [inpt.json for inpt in self.inputs[i]]) for i in self.inputs),\n 'outputs': self.outputs,\n 'raw': self.raw\n }\n\n return json.dumps(obj, allow_nan=False, cls=ExtendedJSONEncoder)", "def to_json_string(self):\n\t\treturn json.dumps(self.to_dict(), indent=2, sort_keys=True) + \"\\n\"", "def _to_json(self, output):\n out_dict = {\"predictions\": output}\n return json.dumps(out_dict)", "def formatting(self):\n trajectory = self.query\n # determine direction of trajectory\n regres = Regression.linear_regression(trajectory[-10:])\n first_point = regres[0]\n last_point = regres[-1]\n\n # path manipulation (filtering, road matching and so on)\n h_subpath = geoutil.filter_by_longitude(self.horizontal,trajectory[-1],0.0005)\n v_subpath = geoutil.filter_by_latitude(self.vertical,trajectory[-1],0.0005)\n\n # calculate direction vector prediction\n predicted_path = []\n try:\n # remove first third of each regression\n h_start = int(len(h_subpath)/2)\n v_start = int(len(v_subpath)/2)\n base_point = trajectory[-1]\n for i in range(10):\n deltaX = (h_subpath[h_start+i+1][0]-h_subpath[h_start+i][0] + v_subpath[v_start+i+1][0] - v_subpath[v_start+i][0])*2\n deltaY = (h_subpath[h_start+i+1][1]-h_subpath[h_start+i][1] + v_subpath[v_start+i+1][1] - v_subpath[v_start+i][1])*2\n dirX = (last_point[0]-first_point[0])/20\n dirY = (last_point[1]-first_point[1])/20\n new_point = [base_point[0]+(deltaX+dirX),base_point[1]+(deltaY+dirY)]\n predicted_path.append(new_point)\n base_point = new_point\n except Exception as e:\n print(\"Predicted trajectory error: \"+str(e))\n\n self.predicted = predicted_path\n\n return {\n \"maroon\": geoutil.geojson_path_converter(geoutil.roads_matching(predicted_path),\"road_matching\"),\n \"orange\": geoutil.geojson_path_converter(self.training,\"training\"),\n \"red\": geoutil.geojson_path_converter(regres,\"lin regression\"),\n \"blue\": geoutil.geojson_path_converter(h_subpath+v_subpath,\"h and v regression\"),\n \"black\": geoutil.geojson_path_converter(predicted_path,\"prediction\")\n }", "def output_json(data, code, headers=None):\n #data[\"timestamp\"] = datetime.now()\n return jsonify(data)", "def toJSON(self):\r\n\r\n jsonToRet = []\r\n rowJson = []\r\n matrixJson = []\r\n\r\n if len(self.slctData) > 100:\r\n self.getSimMatSummary(100)\r\n jsonToRet.append(self.summaryOrdering)\r\n for i in range(0,len(self.simMatSmm)):\r\n for n in self.simMatSmm[i]:\r\n rowJson.append(n)\r\n matrixJson.append(rowJson)\r\n rowJson = []\r\n jsonToRet.append(matrixJson)\r\n\r\n jsonToRet.append(self.patchOrdering)\r\n # jsonToRet = []\r\n rowJson = []\r\n matrixJson = []\r\n\r\n for i in range(0,len(self.simMat)):\r\n for n in self.simMat[i]:\r\n rowJson.append(n)\r\n matrixJson.append(rowJson)\r\n rowJson = []\r\n jsonToRet.append(matrixJson)\r\n return jsonToRet", "def Export(self):\n\n current_time = datetime.datetime.now(tz.UTC)\n\n self.data = np.load(self.cache_path, allow_pickle=True)[()]\n\n news = []\n\n ID = self.data.keys()\n\n for id in ID:\n v = self.data[id]\n if 'address' in v and \"河南\" in v['address'] and v['valid'] == 1\\\n and current_time - parse(v['time']) < datetime.timedelta(hours=12):\n news.append({\"Time\": v['time'], \"address\": v['address'], \"location\": v['location'], \"post\": v['post'],\n \"link\": v[\"link\"]})\n\n with open(self.output_path, \"w\", encoding=\"utf-8\") as fp:\n json.dump(news, fp, ensure_ascii=False, indent=4)\n\n print(\"Export %d info\" % len(news))", "def as_json(self):\n # if we don't convert it to a dict we'll get a whole bunch of 'can't be serialized' things\n # match = self.__dict__\n # match.pop('_sa_instance_state', None)\n # for k in match:\n #\n # match['date'] = match['date'].isoformat()\n m = self.__dict__\n m['explosions'] = self.explosions.all()\n m['deaths'] = self.deaths.all()\n m['antagobjs'] = self.antagobjs.all()\n m['uplinkbuys'] = self.uplinkbuys.all()\n m['badassbuys'] = self.badassbuy.all()\n m['populationstats'] = self.populationstats.all()\n\n return dict_to_json(m)", "def format_data(self, params):\n return json.dumps(params)", "def to_json(self):\n quantille_data = dict(((key, quantille.dumps()) for key, quantille in self.__data.items()))\n return json.dumps((quantille_data, self.__keys, self.__value_keynames))", "def to_json(self):\n return None", "def __data_row_to_json(self, row):\n raw_data = {}\n raw_data[\"body\"] = row.body\n raw_data[\"score_hidden\"] = row.score_hidden\n raw_data[\"archived\"] = row.archived\n raw_data[\"name\"] = row.name\n raw_data[\"author\"] = row.author\n raw_data[\"author_flair_text\"] = row.author_flair_text\n raw_data[\"downs\"] = row.downs\n raw_data[\"created_utc\"] = row.created_utc\n raw_data[\"subreddit_id\"] = row.subreddit_id\n raw_data[\"link_id\"] = row.link_id\n raw_data[\"parent_id\"] = row.parent_id\n raw_data[\"score\"] = row.score\n raw_data[\"retrieved_on\"] = row.retrieved_on\n raw_data[\"controversiality\"] = row.controversiality\n raw_data[\"gilded\"] = row.gilded\n raw_data[\"id\"] = row.id\n raw_data[\"subreddit\"] = row.subreddit\n raw_data[\"ups\"] = row.ups\n raw_data[\"distinguished\"] = row.distinguished\n raw_data[\"author_flair_css_class\"] = row.author_flair_css_class\n\n return json.dumps(raw_data)", "def to_single_json(self):\n self.error_throw('output')\n \n if self.rank_method == methods_of_ranking[3]: #'diversified_ranking'\n self.output_div('single_json')\n else:\n self.output('single_json')", "def _format_data(self) -> None:\n for row in self._db_data:\n if row['age_start'] is None:\n continue\n # entry = {'x': 'Celkem', 'y': int(row['count'])}\n elif row['age_start'] == 95:\n entry = {'x': f\"{int(row['age_start'])}+\", 'y': int(row['count'])}\n else:\n entry = {'x': f\"{int(row['age_start'])}-{int(row['age_start'])+4}\", 'y': int(row['count'])}\n self.return_data['data'].append(entry)", "def to_json(self) -> Dict[str, Any]:\n return {\n \"client_order_id\": self.client_order_id,\n \"exchange_order_id\": self.exchange_order_id,\n \"trading_pair\": self.trading_pair,\n \"order_type\": self.order_type.name,\n \"trade_type\": self.trade_type.name,\n \"price\": str(self.price),\n \"amount\": str(self.amount),\n \"executed_amount_base\": str(self.executed_amount_base),\n \"executed_amount_quote\": str(self.executed_amount_quote),\n \"last_state\": str(self.current_state.value),\n \"leverage\": str(self.leverage),\n \"position\": self.position.value,\n \"creation_timestamp\": self.creation_timestamp,\n \"last_update_timestamp\": self.last_update_timestamp,\n \"order_fills\": {key: fill.to_json() for key, fill in self.order_fills.items()}\n }", "def cache_to_json(self):\n # {\"team_number\": 0000000, \"student1\": \"\", \"student2\": \"\", \"student3\": \"\",\n # \"advisor_type\": \"\", \"advisor\": \"\", \"school\": \"\", \"prize\": \"\"}\n json_details = {\"fields\": [\"teams counts\", \"teams numbers\",\n \"student1\", \"student2\", \"student3\",\n \"advisor_type\", \"advisor\", \"school\", \"prize\"],\n \"teams counts\": 0,\n \"teams numbers\": [],\n \"info\": []}\n self.cache_result_file.seek(0, 0)\n lines = self.cache_result_file.readlines()\n json_details[\"teams counts\"] = len(lines)\n for line in lines:\n info = eval(line)\n json_details[\"teams numbers\"].append(info[\"team_number\"])\n json_details[\"info\"].append(info)\n\n with open(self.result_filename, \"w\") as f:\n json.dump(obj=json_details, fp=f, indent=4)\n\n self.logger.debug(\"Info Result Updated to JSON\")", "def json(self, update=False):\n return json.dumps(self.export(update=update), indent=4)", "def json_format(data):\n return {\n 'Title': data[\"title\"],\n 'Publication date': data['pubDate'],\n 'News link': data['link'],\n 'Image link': data['media'],\n }", "def to_dict(self):\n out_dict = _deepcopy(self.__dict__)\n out_dict[\"drawdown\"] = self.drawdown.to_dict()\n out_data = []\n for i in range(self.data_count()):\n out_data.append(self.data[i].to_dict())\n out_dict[\"data\"] = out_data\n return(out_dict)", "def precipitation():\r\n # Query all measurements\r\n results = session.query(Measurement).all()\r\n\r\n # Create a dictionary from the row data and append to a list of all_precipitation\r\n all_precipitation = []\r\n\r\n for row in results:\r\n precipitation_dict = { row.date : row.prcp}\r\n all_precipitation.append(precipitation_dict)\r\n # print(all_precipitation)\r\n return jsonify(all_precipitation)", "def json_out(self, data):\n\t\treturn json.dumps(data)", "def toJson(p):\n data = {p.name: {'Memory': p.memory, 'Camera': p.camera, 'Battery': p.battery, 'Ram': p.ram, 'Price': p.price,\n 'Image url': p.image}}\n return data" ]
[ "0.65398073", "0.63518214", "0.6286521", "0.61414844", "0.6091616", "0.6031922", "0.6015514", "0.6000563", "0.59230435", "0.59183973", "0.590805", "0.5886959", "0.5885497", "0.5877358", "0.5861081", "0.58608174", "0.58516765", "0.58342355", "0.57904774", "0.5777289", "0.5751878", "0.57506096", "0.57440114", "0.5739263", "0.5733976", "0.5730267", "0.57127184", "0.5698137", "0.5681085", "0.5660584", "0.5653313", "0.56498677", "0.56445795", "0.5637852", "0.5636569", "0.5623234", "0.5619551", "0.56121635", "0.56046724", "0.5601997", "0.559125", "0.5589514", "0.55750555", "0.5572174", "0.557053", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5569957", "0.5565475", "0.55614716", "0.5553061", "0.55464655", "0.5545285", "0.5544039", "0.55233574", "0.55119795", "0.5500705", "0.54977834", "0.5497328", "0.5495724", "0.5493581", "0.5491933", "0.54856384", "0.5484223", "0.54838836", "0.54787034", "0.54769164", "0.5473891", "0.54701036", "0.54661244", "0.54648364", "0.5456681", "0.5452575", "0.54519784", "0.54512644", "0.54460174", "0.543681", "0.54366666", "0.5433667", "0.5432198", "0.54308385", "0.54206884", "0.54177165", "0.54176205", "0.54125464", "0.54119766", "0.5402241", "0.5400928", "0.5400337", "0.5396692", "0.5395917", "0.53915226", "0.53858906" ]
0.5780186
19
Show the popup and return True if accepted, False if canceled.
def popup(self): return self.exec_() == QDialog.Accepted
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def verify_popup(self, type):", "def _show_popup(self) -> None:\n\n top = tk.Toplevel()\n email_list_len = len(self.get_recipients())\n msg = tk.messagebox.askquestion('Confirm send emails', 'Are you sure you want to email {} client{}?'\n .format(email_list_len, \"s\" if email_list_len > 1 else \"\"),\n icon='warning')\n if msg == \"yes\":\n self._disable_buttons()\n email_process(self.get_recipients())\n top.destroy()\n else:\n top.destroy()", "def __window_confirm(self, text):\n return True", "def show_confirm_dialog(text):\n dialog = QDialog()\n interface = confirmGenerated.Ui_Dialog()\n interface.setupUi(dialog)\n interface.label.setText(text)\n if dialog.exec_() == 1:\n return True\n return False", "def showOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK)", "def IsOk(self):\r\n \r\n return self.window != None", "def acceptAlert(self):\n self.log_info(f\"Browser.acceptAlert: Accepting alert\")\n alert = self.CORE.switch_to.alert\n alert.accept()\n return", "def alert_accept(self):\n self._alert_accept_cancel(True)", "def is_shown(self):\n return self.page.q(css=self.MODAL_SELECTOR).present", "def on_okButton_clicked(self):\n self.accept=True", "def askOk(parent,message,title=''):\r\n return askStyled(parent,message,title,wx.OK|wx.CANCEL)", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def confirm(self):\n with self.handle_alert(confirm=True):\n self.q(css='button#confirm').first.click()", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def run(self):\n # show the dialog\n self.dlg.show()\n # Run the dialog event loop\n result = self.dlg.exec_()\n # See if OK was pressed\n if result:\n # Do something useful here - delete the line containing pass and\n # substitute with your code.\n pass", "def show_ok_message(self, msg, msecs=3):\n\n message.PopupMessage.success(msg, parent=self, duration=msecs, closable=True)", "def askStyled(parent,message,title,style):\r\n dialog = wx.MessageDialog(parent,message,title,style)\r\n result = dialog.ShowModal()\r\n dialog.Destroy()\r\n return result in (wx.ID_OK,wx.ID_YES)", "def _onOk(self):\n\n self.accepted = True\n self.close()", "def _onOk(self):\n\n self.accepted = True\n self.close()", "def show_popup(self, popup_type, popup_msg):\n # Setup the MessageBox\n msg = QMessageBox()\n\n # Title the window\n msg.setWindowTitle(f\"{popup_type}\")\n\n # Set text inside the window\n if popup_type == \"Error\":\n msg.setText(f\"Error: {popup_msg}\")\n elif popup_type == \"Success\":\n msg.setText(f\"Success: {popup_msg}\")\n\n # Set the icon\n if popup_type == \"Error\":\n msg.setIcon(QMessageBox.Warning)\n elif popup_type == \"Success\":\n msg.setIcon(QMessageBox.Information)\n\n # Add buttons to the bottom\n msg.setStandardButtons(QMessageBox.Cancel)\n\n x = msg.exec_()", "def popup():\n msg = messagebox.askyesno('Warning', 'Are you sure you would like to submit?')\n if msg: # if user clicked yes\n save_txt()\n save_db()\n root.destroy()", "def consent(s, eType, eVal):\n try:\n import maya.cmds as cmds # Is Maya active? Ask using their GUI\n answer = cmds.confirmDialog(t=eType.__name__, m=CONFIRM_MSG, b=(\"Yes\",\"No\"), db=\"Yes\", cb=\"No\", ds=\"No\")\n return \"Yes\" == answer\n except ImportError:\n return True # No means to ask? Ah well ...", "def dialog(message, timeout=0, buttons=DIALOGBUTTON_OK):\n box = Dialogs(__name__, message, buttons)\n box.timeout = timeout\n return _retcode2bool(box.show())", "def onAccepted():\n dialog.done(1)", "def yes_no_cancel_popup(title=None,\n text=None):\n d = gtk.Dialog(title=title,\n parent=None,\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=( gtk.STOCK_YES, gtk.RESPONSE_YES,\n gtk.STOCK_NO, gtk.RESPONSE_NO,\n gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL ))\n hb=gtk.HBox()\n hb.show()\n d.vbox.add(hb)\n\n i=gtk.Image()\n i.set_from_stock(gtk.STOCK_DIALOG_QUESTION, gtk.ICON_SIZE_DIALOG)\n i.show()\n hb.pack_start(i, expand=False)\n\n if text is not None:\n l=gtk.Label(text)\n l.show()\n hb.add(l)\n d.connect('key-press-event', dialog_keypressed_cb)\n\n d.show()\n center_on_mouse(d)\n retval=d.run()\n d.destroy()\n return retval", "def isModal(self) -> bool:\n ...", "def isModal(self) -> bool:\n ...", "def isModal(self) -> bool:\n ...", "def isModal(self) -> bool:\n ...", "def is_ime_popup(self,ignore_error_handle =False):\n message = {};\n step = 'is ime popup'\n try:\n isPopup = self.driver.is_ime_active();\n message = self.feedback.feedback_action_ok(step);\n message['is_popup'] = isPopup;\n except BaseException,e:\n message = self.feedback.feedback_action_fail(step,str(e),ignore_error_handle);\n finally:\n return message;", "def is_statement_status_changed_successfully(self):\n pop_up_not_present = None\n try:\n self.wait(10).until(EC.presence_of_element_located(self.info_pop_up_locator))\n pop_up_not_present = False\n self.click_element(self.ok_button_locator)\n except:\n pop_up_not_present = True\n finally:\n return pop_up_not_present", "def decision(question):\n return click.confirm(question, show_default=True)", "def popupcheck(correcte):\r\n popup = Tk()\r\n def destroy():\r\n popup.destroy()\r\n if correcte:\r\n root.destroy\r\n if correcte:\r\n popup.wm_title(\"Hidato\")\r\n label = Label(popup, text=\"La grille est correcte vous avez gagné!\")\r\n else:\r\n popup.wm_title(\"Hidato\")\r\n label = Label(popup, text=\"La grille est incorrecte, veuillez rééssayer\")\r\n label.pack(side=\"top\", fill=\"x\", pady=10)\r\n B1 = Button(popup, text=\"Okay\", command = destroy)\r\n B1.pack()\r\n popup.mainloop()", "def confirm(self, action):\n title = \"%s : P L E A S E C O N F I R M\" % action\n question_text = \"<html><b>%s - PLEASE CONFIRM.</b><br/>\"\\\n \"<br/>Do you want to %s %s recordings for the following project?\"\\\n \"<br/><br/>PROJECT : %s\"\\\n \"<br/>CLIENT : %s\"\\\n \"<br/>DATE : %s<br/></html>\" % (\n action.upper(),\n action,\n \" & \".join(self.selected_formats),\n self.recordings_table.project_details()[2],\n self.recordings_table.project_details()[3],\n self.recordings_table.project_details()[0]\n )\n\n self.hide()\n if action == 'upload':\n self.confirmation_dialog.setText(title, question_text)\n self.confirmation_dialog.exec_()\n self.show()\n\n if self.confirmation_dialog.cancelled:\n return (False, False)\n\n return (True, self.confirmation_dialog.immediate_upload)\n else:\n self.confirmation_dialog.showQuestion(title, question_text)\n self.show()\n return self.confirmation_dialog.copy_confirmed", "def _launch_click_through_dialog(self):\n text = \"The port test did not complete successfully. If you are certain that you really did forward the port and would like to continue anyway, you can do so.\\\n Otherwise, you may want to try again.\"\n self.controller.show_msgbox(text, title=\"Do You Really Want to Do That?\", cb=self._click_through_dialog_cb, buttons=(gtk.STOCK_CANCEL, 0, gtk.STOCK_OK, 1), width=300)", "def __window_prompt(self, text):\n return True", "def modal_call(self, after_ok_func=None):\n #self.show()\n while True:\n response = self.window.run()\n if response == Gtk.ResponseType.OK:\n # dialog will be closed by connect, now continue work while\n # rest of dialog is unresponsive, release when finished\n self.close()\n if after_ok_func is not None:\n after_ok_func()\n break\n elif (response == Gtk.ResponseType.DELETE_EVENT or\n response == Gtk.ResponseType.CANCEL):\n # connect buttons generating this to a close call\n break", "def verify_dialogue(title, message, name_of_control_to_click='popup_default_button'):\r\n msg, flag = \"\", False\r\n try:\r\n if g.platform =='android':\r\n sleep(5)\r\n popup = ui_controls.ui_element(get_obj_identifier('popup'))\r\n flag1, flag2, flag3, flag4 = False, False, False, False\r\n\r\n if popup is not None:\r\n flag1 = True\r\n title_actual = ui_controls.text_view(get_obj_identifier('popup_title'))\r\n sleep(3)\r\n message_actual = ui_controls.text_view(get_obj_identifier('popup_message'))\r\n if title_actual.lower() == title.lower():\r\n flag2 = True\r\n print 'pop up title matched'\r\n if message_actual.lower() == message.lower():\r\n flag3 = True\r\n print 'pop up message matched'\r\n sleep(3) \r\n flag4 = ui_controls.button(get_obj_identifier(name_of_control_to_click))\r\n flag = flag1 and flag2 and flag3 and flag4\r\n else:\r\n print 'pop up not available'\r\n else:\r\n \r\n sleep(3)\r\n \r\n flag1, flag2, flag3, flag4 = False, False, False, False\r\n\r\n \r\n flag1 = True\r\n title_actual = ui_controls.text_view(get_obj_identifier('popup_title'),value=True)\r\n sleep(3)\r\n message_actual = ui_controls.text_view(get_obj_identifier('popup_message'),label=True)\r\n if title_actual.lower() == title.lower():\r\n flag2 = True\r\n print 'pop up title matched'\r\n if message_actual.lower() == message.lower():\r\n flag3 = True\r\n print 'pop up message matched'\r\n sleep(3) \r\n flag4 = ui_controls.button(get_obj_identifier(name_of_control_to_click))\r\n flag = flag1 and flag2 and flag3 and flag4\r\n \r\n except Exception as excp:\r\n traceback.print_exc()\r\n msg += str(excp)\r\n return flag, msg", "def _show_dialog(self, content: dict):\n raise NotImplementedError", "def accept_ok(self):\n self.ok = True\n self.destroy()", "def ok(self, _=None):\r\n\r\n if not self.validate():\r\n self.initial_focus.focus_set() # put focus back\r\n return\r\n self.withdraw()\r\n self.update_idletasks()\r\n self.temp.set(True) # set boolean variable temp equal to True\r\n self.apply()\r\n self.parent.focus_set()\r\n self.destroy()", "def ask_ok_cancel(message=\"\", title=None):\n return dialog(\"ask_ok_cancel\", message=message, title=title)", "def is_shown(self, request):\n return True", "def PresentDialog_Confirm_Call( message ):\n return call( message, [ 'Ok', 'Cancel' ] )", "def TransferToWindow(self):\n\t\treturn True # Prevent wxDialog from complaining.", "def wait_dialog_box(self):\n while True:\n time.sleep(0.5)\n dialog = AppWindow.locate_on(SummonSelector.dialog_ok.path, (1 / 3, 2 / 3, 2 / 3, 1 / 3))\n if dialog is not None:\n self.logger.info(\"dialog popped up\")\n return", "def messageConfirm(self,message):\n answer=self.message(message,style=wx.YES_NO|wx.ICON_QUESTION)\n return self.messageIsOk(answer)", "def confirm():\r\n if(PsdEntry.get() == \"Psd\" and UserEntry.get() == \"User\"):\r\n open()\r\n else:\r\n messagebox.showerror(\"Error\",\"Invalid Username\")", "def action_confirm(self):\n self.check_txt_ids()\n self.write({'state': 'confirmed'})\n return True", "def AcceptsFocus(self):\n\n return self.IsShown() and self.IsEnabled()", "def show_popup(self, view, docstring, location=None):", "def show_dialog(self, widget, data):\n\t\twidget.show()", "def test_alert_pop_up(self):\n\n # locators\n alert_button = 'alertbtn'\n\n # steps\n locate_alert_button = WebDriverWait(self.driver, 10).until(\n ec.visibility_of_element_located((By.ID, alert_button))\n )\n locate_alert_button.click()\n alert = self.driver.switch_to.alert\n print(alert.text)\n alert.accept()", "def show_question_dialog(self, title, message):\n dialog = QMessageBox.question(self, title, message, QMessageBox.Yes | QMessageBox.No | QMessageBox.Cancel)\n '''dialog.setText(title) # format_secondary_text(message)\n dialog.setInformativeText(message)\n dialog.setStandardButtons(QMessageBox.Save | QMessageBox.Discard | QMessageBox.Cancel)\n #dialog.addButton(QPushButton('Accept'), QMessageBox.YesRole)\n #dialog.addButton(QPushButton('Cancel'), QMessageBox.RejectRole)\n dialog.setDefaultButton(QMessageBox.Cancel)'''\n #response = dialog.exec_()\n #dialog dialog.destroy()\n return dialog # response", "def confirm_exit(self):\n return True", "def show_popup(self, process_info, label_info):\n layout = FloatLayout()\n\n self.popup_label = Label(text= process_info)\n self.close_button = Button(text=\"OK\", size_hint=(.2,.1), pos_hint={'x': .4, 'y': .1})\n\n layout.add_widget(self.popup_label)\n\n layout.add_widget(self.close_button)\n\n # Instantiate the modal popup and display\n\n popup = Popup(title=label_info, content=layout)\n\n popup.open()\n\n # Attach close button press with popup.dismiss action\n self.close_button.disabled = True\n self.close_button.bind(on_press=popup.dismiss)", "def dlg_validate(self):\n return(True) # override", "def is_modal(self) -> bool:\n return False", "def get_confirmation():\n inp = PInput(\"#> \")\n\n inp.add_keyword(\"yes\")\n inp.add_keyword(\"no\")\n\n inp.ask()\n ans = inp.get_input()\n\n if ans == \"yes\":\n return True\n else:\n return False", "def confirm(text, window=None):\n return message(text, u'Confirma', M_QUESTION, B_YES_NO, window) == R_YES", "def display_correct_window(self):\r\n all_buttons = ['New Question', 'Show Solution', 'Main Menu']\r\n if self.solution is None:\r\n all_buttons.remove('Show Solution')\r\n window = SimpleWindow(self.title, msg=\"That's right!\",\r\n buttons=all_buttons)\r\n window.run()\r\n return window.clicked", "def accept_cancel(self):\n self.ok = False\n self.destroy()", "def ok(self, event=None):\n if not self.validate():\n self.initial_focus.focus_set()\n return\n\n self.withdraw()\n self.update_idletasks()\n self.apply()\n self.cancel()", "def input(self, message=''):\r\n from javax.swing import JOptionPane\r\n return JOptionPane.showInputDialog(frame, message)", "def AskYesNo(msg,\r\n title='Please confirm',\r\n width=70,\r\n yesno_keys={1: \"Yes\", 2: \"No\"}):\r\n w = Window()\r\n k = w.Create(title,\r\n msg.split('\\n'),\r\n width=width,\r\n main_keys=yesno_keys)\r\n if not k:\r\n return False\r\n\r\n while True:\r\n ln, k = w.Show()\r\n if k == HEM_FNKEY_F1:\r\n return True\r\n elif k == HEM_FNKEY_F2:\r\n return False", "def show_dlg(self, dlg):\n if not self.test_mode:\n dlg.Center()\n return dlg.ShowModal()\n else:\n return dlg.GetAffirmativeId()", "def ask_ok(title='Confirm', message=''):\n if not isinstance(title, string_types):\n raise TypeError('ask_ok() title must be a string.')\n if not isinstance(message, string_types):\n raise TypeError('ask_ok() message must be a string.')\n return _get_app().ask_ok(title, message)", "def confirm_dialog(self, title, message):\n return self._impl.confirm_dialog(title, message)", "def show_dialog(self):\n self.showMaximized()\n sys.exit(self.app.exec_())", "def alertIsPresent(self, *, accept_if_present=False):\n self.disable_logging()\n result = self.waitForAlertPresent(timeout=0)\n if accept_if_present and result:\n self.acceptAlert()\n self.revert_logging()\n self.log_info(f\"Browser.alertIsPresent: Alert is present, and has been accepted as accept_if_present=True\")\n else:\n self.revert_logging()\n self.log_info(f\"Browser.alertIsPresent: Alert is {'' if result else 'not '}present\")\n return result", "def show_question(title, message, cancel=True):\n\n pass", "def show_popup(self, data):\r\n message = add_color(data[\"errtext\"], \"FF0000\")\r\n self.ids.errormessage.text = message\r\n self.open()", "def show_dialog_ync(self, title: str, message: str) -> Gtk.ResponseType:\n dialog = self.ui.get_object(\"ync_dialog\")\n dialog.set_transient_for(self.ui.get_object(\"mainWindow\"))\n dialog.set_title(title)\n self.ui.get_object(\"ync_label\").set_markup(message)\n response = dialog.run()\n dialog.hide()\n return response", "def confirmCall(self, activePlayer, action):\n # todo: raise notImplemented. should be overriden\n return False", "def TransferFromWindow(self):\n\t\treturn True # Prevent wxDialog from complaining.", "def show_dialog_yn(self, title: str, message: str) -> Gtk.ResponseType:\n dialog = self.ui.get_object(\"yn_dialog\")\n dialog.set_transient_for(self.ui.get_object(\"mainWindow\"))\n dialog.set_title(title)\n self.ui.get_object(\"yn_label\").set_markup(message)\n response = dialog.run()\n dialog.hide()\n return response", "def sd_yes_clicked(self, widget, data=None):\n return True", "def win_popup(self):\n content = BoxLayout(orientation='vertical')\n message_label = Label(text=self.win_message)\n button_layer = BoxLayout(orientation='horizontal')\n dismiss_button = Button(text='QUIT', size_hint=(1, 1))\n next_button = Button(id='next', text='NEXT ROUND', size_hint=(1, 1))\n button_layer.add_widget(dismiss_button)\n button_layer.add_widget(next_button)\n content.add_widget(message_label)\n content.add_widget(button_layer)\n popup = Popup(title=self.winner,\n content=content, size_hint=(0.3, 0.25))\n dismiss_button.bind(on_release=(lambda a: self.exit_game()),\n on_press=popup.dismiss)\n next_button.bind(on_release=(lambda a: self.next_round()),\n on_press=popup.dismiss)\n popup.open()", "def _confirm_action(self, action):\n\t\treturn True", "def confirm(message: str = \"Confirm?\", suffix: str = \" (y/n) \") -> bool:\n session = create_confirm_session(message, suffix)\n return session.prompt()", "def questionbox(parent, message):\n return QMessageBox.question(parent,\n \"Warning\",\n message) == QMessageBox.Yes", "def ok(self, event=None):\n\t\tself.withdraw()\n\t\tself.update_idletasks()\n\t\tself.result = self.provider.apply()\n\t\tself.parent.focus_set()\n\t\tself.destroy()", "def runAskOkDialog(self, c: Cmdr, title: str, message: str=None, text: str=\"Ok\") -> None:\n if g.unitTesting:\n return\n dialog = QtWidgets.QMessageBox(c and c.frame.top)\n dialog.setWindowTitle(title)\n if message:\n dialog.setText(message)\n dialog.setIcon(Information.Information)\n dialog.addButton(text, ButtonRole.YesRole)\n try:\n c.in_qt_dialog = True\n dialog.raise_()\n dialog.exec_()\n finally:\n c.in_qt_dialog = False", "def buttonOK_Clicked( self, event ):\n\t\tself.EndModal(wx.ID_OK)", "def confirmCloseEvent(self):\n dlg = simpleDialogs.ConfirmCloseDialog(self)\n\n close = False\n clearSettings = False\n\n reply = dlg.exec_()\n\n if reply:\n close = True\n\n if dlg.clearSettingsCheck.isChecked():\n clearSettings = True\n\n return close, clearSettings", "def user_popup(request):\n try:\n return _user_popup(request)\n except Exception as err:\n logging.exception('Exception in user_popup processing:')\n # Return HttpResponse because the JS part expects a 200 status code.\n return HttpHtmlResponse(\n '<font color=\"red\">Error: %s; please report!</font>' %\n err.__class__.__name__)", "def user_popup(request):\n try:\n return _user_popup(request)\n except Exception as err:\n logging.exception('Exception in user_popup processing:')\n # Return HttpResponse because the JS part expects a 200 status code.\n return HttpHtmlResponse(\n '<font color=\"red\">Error: %s; please report!</font>' %\n err.__class__.__name__)", "def display_confirm(self, text, password):\n return self.display_prompt(text) == password", "def confirm(message):\n if not sys.stdout.isatty():\n return False\n reply = BaseCommand.input(\"\\n{message} [Y/N]:\".format(message=message))\n return reply and reply[0].lower() == 'y'", "def acceptClicked(self):\n if len(self.commentEdit.toPlainText()) > 0:\n self.accept()", "def exec(self) -> bool:\n return bool(self._widget._mgui_exec())", "def popupNewPassword():\n dialog = gtk.Dialog(parent=gui,\n flags=gtk.DIALOG_MODAL | gtk.DIALOG_DESTROY_WITH_PARENT,\n buttons=(gtk.STOCK_CANCEL, gtk.RESPONSE_REJECT,\n gtk.STOCK_OK, gtk.RESPONSE_ACCEPT))\n label = gtk.Label(_(\"Enter new password:\"))\n label.set_alignment(0.0, 0.5)\n dialog.vbox.pack_start(label)\n label.show()\n entry = gtk.Entry(max=20)\n entry.set_visibility(False)\n dialog.vbox.pack_start(entry)\n entry.show()\n label2 = gtk.Label(_(\"Reenter new password:\"))\n label2.set_alignment(0.0, 0.5)\n dialog.vbox.pack_start(label2)\n label2.show()\n entry2 = gtk.Entry(max=20)\n entry2.set_visibility(False)\n dialog.vbox.pack_start(entry2)\n entry2.show()\n val = None\n while (dialog.run() == gtk.RESPONSE_ACCEPT):\n v = entry.get_text()\n if (v == entry2.get_text()):\n val = v\n break\n error(_(\"The passwords do not match.\"))\n dialog.destroy()\n return val", "def should_show_check_button(self):\r\n submitted_without_reset = (self.is_submitted() and self.rerandomize == \"always\")\r\n\r\n # If the problem is closed (past due / too many attempts)\r\n # then we do NOT show the \"check\" button\r\n # Also, do not show the \"check\" button if we're waiting\r\n # for the user to reset a randomized problem\r\n if self.closed() or submitted_without_reset:\r\n return False\r\n else:\r\n return True", "def ShowPopUp(self):\n self.popup = True\n t = Timer(1.0, self.RemovePopUp)\n t.start() # after 1 second, the pop up will be removed", "def AskYesNo(question, title=''):\n\n # build the message dialogue\n dial = wx.MessageDialog(None, question, title,\n wx.YES_NO|wx.NO_DEFAULT|wx.ICON_QUESTION)\n\n # run it and get the answer/event\n ret = dial.ShowModal()\n\n if (ret == wx.ID_YES): # process user answer\n return True\n else:\n return False", "def askYes(parent,message,title='',default=True):\r\n style = wx.YES_NO|wx.ICON_EXCLAMATION|((wx.NO_DEFAULT,wx.YES_DEFAULT)[default])\r\n return askStyled(parent,message,title,style)", "def okButton(self):\n \n self.answer=\"ok\"\n self.top.destroy()", "def ask_user(self, timeout: int = 180, **options: Any) -> Result:\n\n self.add_submit_buttons([\"Submit\", \"Close\"], \"Submit\")\n return self.run_dialog(**options, timeout=timeout)", "def closing_plugin(self, cancelable=False):\n return True" ]
[ "0.6642623", "0.6464855", "0.63802284", "0.62683916", "0.6240332", "0.6218975", "0.6172441", "0.61592615", "0.6059267", "0.5996099", "0.5937343", "0.5933337", "0.5915153", "0.5883959", "0.5883959", "0.5883959", "0.58716357", "0.58673215", "0.5851311", "0.5851311", "0.5847941", "0.5827843", "0.5816396", "0.57947785", "0.5788484", "0.5782045", "0.576139", "0.576139", "0.576139", "0.576139", "0.57424664", "0.5718718", "0.5718128", "0.57165617", "0.5709916", "0.56735855", "0.5659799", "0.5658127", "0.56579936", "0.563469", "0.5624667", "0.5620566", "0.5615693", "0.560615", "0.5600947", "0.55855805", "0.55797476", "0.5563111", "0.5552068", "0.55479515", "0.55435103", "0.5541662", "0.5528812", "0.551605", "0.5514528", "0.5504488", "0.548875", "0.547627", "0.54683506", "0.5465102", "0.5460333", "0.54481274", "0.5436161", "0.54153675", "0.540124", "0.53988516", "0.5393821", "0.53918314", "0.53855884", "0.5376851", "0.5374543", "0.53551483", "0.5342573", "0.5311641", "0.5290086", "0.5289206", "0.5284675", "0.52742314", "0.5269508", "0.5263441", "0.5262741", "0.52615607", "0.5239481", "0.5237952", "0.5221161", "0.52164054", "0.52138627", "0.52138627", "0.52028286", "0.5202819", "0.5195786", "0.5193149", "0.51881176", "0.51844174", "0.5176933", "0.5172283", "0.516948", "0.5164945", "0.5147777", "0.5143416" ]
0.79470927
0
Fill the heavy metal unit labels with the selected unit.
def set_hm_unit_display(self): units = str(self.entries['units'].combobox.currentText()) self.ui.is_unitL1.setText(units) self.ui.is_unitL2.setText(units) self.ui.is_unitL3.setText(units) self.ui.is_unitL4.setText(units)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def unit_label(self, unit_label):\n\n self._unit_label = unit_label", "def unit_label(self, unit_label):\n\n self._unit_label = unit_label", "def unit_label(self, unit_label):\n\n self._unit_label = unit_label", "def update_units(self):\n unit_var_value = self.view.vars['unit'].get()\n if unit_var_value == 'm3ph':\n self.minran_u_label.config(text='m³/h')\n self.maxran_u_label.config(text='m³/h')\n self.points_tview.heading('vflow', text='Przepływ [m³/h]', anchor=tk.CENTER)\n elif unit_var_value == 'lps':\n self.minran_u_label.config(text='l/s')\n self.maxran_u_label.config(text='l/s')\n self.points_tview.heading('vflow', text='Przepływ [l/s]', anchor=tk.CENTER)\n self.view.vars['pump_eff_min'].convert_unit(unit_var_value)\n self.view.vars['pump_eff_max'].convert_unit(unit_var_value)\n self.view.vars['pump_characteristic'].convert_unit(unit_var_value)", "def assign_unit(self):\n self.units = {}\n for unit in RADIAL_UNITS:\n if unit.REPR == \"2th_deg\":\n self.units[unit] = self.tth_deg\n elif unit.REPR == \"2th_rad\":\n self.units[unit] = self.tth_rad\n elif unit.REPR == \"q_nm^-1\":\n self.units[unit] = self.q_nm\n elif unit.REPR == \"q_A^-1\":\n self.units[unit] = self.q_A\n elif unit.REPR == \"r_mm\":\n self.units[unit] = self.r_mm\n else:\n logger.warning(\"Unit unknown to GUI %s\" % unit)", "def set_unit(self,unit):\n self.unit = unit", "def unitUpdate(self):\n newText = self.unitGroup.unitString()\n cursorPos = len(newText) - self.text().length() + self.cursorPosition()\n if cursorPos < 0: # cursor set to same distance from right end\n cursorPos = 0\n self.blockSignals(True)\n self.setText(newText)\n self.setCursorPosition(cursorPos)\n self.blockSignals(False)\n self.emit(QtCore.SIGNAL('unitChanged')) # update numEdit", "def set_units(self, units):\n self.units = units", "def setDataUnit(self, dataUnit):\n\t\tself.urmaswin.setDataUnit(dataUnit)", "def update_units(self):\r\n self.units_index = self.UnitsComboBox.currentIndex()\r\n self.cmd = None\r\n if self.connected:\r\n self.cmd = self.unit_switch.get(self.units_index, None)\r\n self.I_source.write(self.cmd)\r\n self.update_header_string()", "def set_unit(self, length='cm'):\n if length == 'cm':\n self.DUMMY = 1.0\n elif length == 'mm':\n self.DUMMY = 0.1\n elif length == 'm':\n self.DUMMY = 0.0", "def UpdateLabel(self) -> _n_6_t_0:", "def FillSquad(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n while self.squad.current_size < self.squad.max_size:\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1", "def AddUnit(self):\n unitName = \"\"\n if isinstance(self.squad, squad.Squad):\n unitName = list(self.squad.additional_units.keys())[0]\n self.squad.addUnit(unitName)\n self.addButton\n self.exportButton\n self.pointLabel['text'] = self.squad.point_cost\n self.sizeLabel['text'] = self.squad.current_size\n r=6\n if isinstance(self.squad, squad.Squad):\n for u in self.squad.units:\n Label(self.__mainWindow, text=u.name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=u.weapon_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=u.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=u.strength.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=u.toughness.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=u.wounds.__str__(), font=__item_format__).grid(row=r, column=5)\n Label(self.__mainWindow, text=u.initiative, font=__item_format__).grid(row=r, column=6)\n Label(self.__mainWindow, text=u.melee_attacks.__str__(), font=__item_format__).grid(row=r, column=7)\n Label(self.__mainWindow, text=u.leadership.__str__(), font=__item_format__).grid(row=r, column=8)\n Label(self.__mainWindow, text=u.armor_save.__str__(), font=__item_format__).grid(row=r, column=9)\n Label(self.__mainWindow, text=u.invuln_save.__str__(), font=__item_format__).grid(row=r, column=10)\n r += 1\n\n else:\n for i in range(self.squad.current_size):\n Label(self.__mainWindow, text=self.squad.squad_name, font=__item_format__).grid(row=r, column=0)\n Label(self.__mainWindow, text=self.squad.ballistics_skill.__str__(), font=__item_format__).grid(row=r, column=1)\n Label(self.__mainWindow, text=self.squad.front_armor.__str__(), font=__item_format__).grid(row=r, column=2)\n Label(self.__mainWindow, text=self.squad.side_armor.__str__(), font=__item_format__).grid(row=r, column=3)\n Label(self.__mainWindow, text=self.squad.rear_armor.__str__(), font=__item_format__).grid(row=r, column=4)\n Label(self.__mainWindow, text=self.squad.hull_points, font=__item_format__).grid(row=r, column=5)\n r += 1\n \n self.addButton['state']='normal'\n if self.squad.current_size == self.squad.max_size:\n self.addButton['state']='disabled'\n if isinstance(self.squad, squad.Squad):\n self.wepSpin.grid(row=r, column=1, columnspan=4)\n self.weaponAdd.grid(row=r, column=5)\n r += 1", "def updateCurrentUnit(self):\n self.unitGroup.updateCurrentUnit(unicode(self.text()),\n self.cursorPosition())\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView", "def load_unit(self, unit_id):", "def unit(self,unit_str,unit_scale):\n self.units[unit_str] = unit_scale\n return self", "def _units_chosen(self):\r\n sender = self.sender()\r\n\r\n # get current state/values\r\n mode = self._mode\r\n height = self.height_field.value()\r\n weight = self.weight_field.value()\r\n\r\n # update widgets\r\n if sender == self.imperial_button:\r\n self.height_units_label.setText('in')\r\n self.weight_units_label.setText('lb')\r\n self._mode = 'imperial'\r\n else:\r\n self.height_units_label.setText('cm')\r\n self.weight_units_label.setText('kg')\r\n self._mode = 'metric'\r\n\r\n # convert values\r\n if mode == 'metric' and self._mode == 'imperial':\r\n self._height = height / 2.54\r\n self._weight = weight / 0.454\r\n elif mode == 'imperial' and self._mode == 'metric':\r\n self._height = height * 2.54\r\n self._weight = weight * 0.454\r\n\r\n # update data widgets\r\n self.height_field.setValue(self._height)\r\n self.weight_field.setValue(self._weight)", "def __init__(self, lunit=\"nm\"):\n super().__init__(lunit)", "def reset_units(shared, *args):\n shared.config.remove_section('units')\n shared.config.add_section('units')\n \n return", "def units(self, units):\n\n self._units = units", "def units(self, units):\n\n self._units = units", "def units(self, units):\n\n self._units = units", "def set_labels(self):\n\n if 1 <= self.selected_data <= 2:\n self.plot_select.setLabel(\"left\", \"P (kPa)\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"P (kPa)\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 3:\n self.plot_select.setLabel(\"left\", \"ext\", \"\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"ext\", \"\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n elif self.selected_data == 4:\n self.plot_select.setLabel(\"left\", \"U\", \"V\")\n self.plot_select.setLabel(\"bottom\", \"t\", \"s\")\n self.plot_zoom.setLabel(\"left\", \"U\", \"V\")\n self.plot_zoom.setLabel(\"bottom\", \"t\", \"s\")\n\n # self.plot_simulate.setLabel(\"left\", \"ext\", \"\")\n # self.plot_simulate.setLabel(\"bottom\", \"t\", \"s\")\n\n self.plot_distribution.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_distribution.setLabel(\"bottom\", \"d_p\", \"m\")\n self.plot_distribution.showGrid(y=True)\n\n self.plot_rotatometer.setLabel(\"left\", \"N ×10¹⁰ (#/m³)\")\n self.plot_rotatometer.setLabel(\"bottom\", \"laimennusvirtaus\")\n self.plot_rotatometer.showGrid(y=True)", "def set_default_unit(unit):\n self.default_unit = unit", "def useUnits():", "def set_W0_unit(self, value):\n if self.lf_W0.text() != \"\":\n self.set_W0() # Update for deg if needed and call comp_output\n # Notify the machine GUI that the machine has changed\n self.saveNeeded.emit()", "def setDistanceUnits(self, units: Unit) -> None:\n self.units = ...", "def autolabel(rects):", "def addLabels(t):\n if not t.label:\n t.label = \"\".join([choice(\"abcdefghijklmnopqrstuvwxyz\") for i in range(4)])\n for r,w in t.children:\n addLabels(r)", "def label_for(self, *pp, unit=True, description=True):\n if len(pp) > 1 and np.all([re.match(r\"k\\d+l\", p) for p in pp]):\n label = \"$k_nl$\"\n if unit:\n label += \" / $m^{-n}$\"\n return label\n return super().label_for(*pp, unit=unit, description=description)", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity", "def drawLabels(self):\r\n if self.sensors == None or self.sensors == []:\r\n return\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[0]])\r\n self.c.create_text(30,20,text=self.sensors[self.sensor_ids[0]],fill=col,anchor=tk.NW)\r\n if len(self.sensor_ids) == 2:\r\n col = self.app.getSensorCol(self.sensors[self.sensor_ids[1]])\r\n self.c.create_text(30,40,text=self.sensors[self.sensor_ids[1]],fill=col,anchor=tk.NW)", "def set_status(self):\r\n string = \"%9.3f%s/%9.3f%s\"\r\n unit1 = unit2 = \"b\"\r\n used = self.usedBytes.get()\r\n total = self.totalBytes.get()\r\n if used > total:\r\n self.label.config(fg=\"red\")\r\n else:\r\n self.label.config(fg=\"black\")\r\n if used > 999999:\r\n unit1 = \"Mb\"\r\n used /= 1000000.0\r\n elif used > 999:\r\n unit1 = \"Kb\"\r\n used /= 1000.0\r\n if total > 999999:\r\n unit2 = \"Mb\"\r\n total /= 1000000.0\r\n elif total > 999:\r\n unit2 = \"Kb\"\r\n total /= 1000.0\r\n self.textStatus.set(string % (used, unit1, total, unit2))", "def updateLabels(self):\n # Intensity range\n self.minIntensityLabel.setText(\"Intensity: \"+str(self.ABsettings[\"intensity_range\"][0]).rjust(3))\n self.labelMaxInt.setText(str(self.ABsettings[\"intensity_range\"][1]).ljust(3))\n # Z range\n self.minZLabel.setText(\"Z range: \"+str(self.ABsettings[\"zrange\"][0]+1).rjust(2))\n self.labelMaxZ.setText(str(self.ABsettings[\"zrange\"][1]+1).ljust(2))", "def test_change_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"), units=\"EE\")\n assert s.units == \"EE\"\n s.units = \"SI\"\n assert s.units == \"SI\"\n assert s.cv.units == \"kilojoule / kelvin / kilogram\"\n assert s.cp.units == \"kilojoule / kelvin / kilogram\"\n assert s.s.units == \"kilojoule / kelvin / kilogram\"\n assert s.h.units == \"kilojoule / kilogram\"\n assert s.T.units == \"degree_Celsius\"\n assert s.u.units == \"kilojoule / kilogram\"\n assert s.v.units == \"meter ** 3 / kilogram\"\n assert s.p.units == \"bar\"", "def add_default_units(self, u: dict):\n # TODO: Could look at replacing dict with defined arguments\n # This would be a big API change\n try:\n self._default_units.set_units(**u)\n except TypeError:\n raise TypeError(\n \"Unexpected argument for base quantities found when creating UnitSet. \"\n \"Please ensure that units are only defined for the seven base quantities.\"\n )", "def labels_x(x_unit, latex = True, verbose = 0): \n \n if verbose > 1:\n print(\"SpectraTools.Resources.UnitConversion.labels_x()\") \n \n if x_unit in nm_labels:\n return \"Wavelength (nm)\"\n elif x_unit in um_labels:\n if latex:\n return r\"Wavelength ($\\mu$m)\"\n else:\n return \"Wavelength (micron)\"\n elif x_unit in cm_labels:\n if latex:\n return r\"Energy (cm$^{-1}$)\"\n else:\n return \"Energy (cm-1)\"\n elif x_unit in ev_labels:\n return \"Energy (eV)\" \n else:\n return x_unit", "def food_selected(self, arg):\n\t\tfood = fooditemdao.retrieve_food(self.selected_food.get())\n\t\tself.lbl_unit.config(text=food.info['unit'])", "def populateWithComplexLabels(self):\n # setting righe e colonne\n self.table.setRowCount(0)\n self.table.setColumnCount(0)\n rows = self.table.rowCount()\n cols = self.table.columnCount()\n if rows != 6:\n self.table.setRowCount(6)\n if cols != 7:\n self.table.setColumnCount(7)\n for row in range(6):\n for col in range(7):\n itemWidget = ComplexLabel(self.table)\n self.table.setCellWidget(row, col, itemWidget)\n self.setComplexLabels(self.indexMonth)\n self.formatHeaderNames()", "def setLabels(self):\n #productive\n profprint()\n self.option = {0:'Ba',\n 1:'Bb',\n 2:'Bc',\n 3:'Bd',\n 4:'Be',\n 5:'Bf',\n 6:'Bg',\n 7:'Bh',\n 8:'Bi',\n 9:'Bj',\n 10:'Bk',\n 11:'Bl',\n 12:'Ca',\n 13:'Cb',\n 14:'Cc',\n 15:'Cd',\n 16:'Ce',\n 17:'Cf',\n 18:'Cg',\n 19:'Ch',\n 20:'Ci',\n 21:'Cj',\n 22:'Ck',\n 23:'Cl',\n 24:'Cm',\n 25:'Cn',\n 26:'Co',\n 27:'Cp',\n 28:'Cq',\n 29:'Cr',\n 30:'Da',\n 31:'Db',\n 32:'Dc',\n 33:'Dd',\n 34:'De',\n 35:'Df',\n 36:'Dg',\n 37:'Dh',\n 38:'Di',\n 39:'Dj',\n 40:'Ea',\n 41:'Eb',\n 42:'Ec',\n 43:'Ed',\n 44:'Ee',\n 45:'Ef',\n 46:'Eg',\n 47:'Eh',\n 48:'Aa',\n 49:'Ab',\n 50:'Ac',\n 51:'Ad',\n 52:'Ae',\n 53:'Af',\n 54:'Iu', \n 55:'Fa',\n 56:'Fb',\n 57:'Fc',\n 58:'Fd',\n 59:'Fe',\n 60:'Ff',\n 61:'Fg',\n 62:'Fh',\n 63:'--'}\n\n return self.option", "def setLabels(self):\r\n # productive\r\n profprint()\r\n self.option = {0:'Ba',\r\n 1:'Bb',\r\n 2:'Bc',\r\n 3:'Bd',\r\n 4:'Be',\r\n 5:'Bf',\r\n 6:'Bg',\r\n 7:'Bh',\r\n 8:'Bi',\r\n 9:'Bj',\r\n 10:'Bk',\r\n 11:'Bl',\r\n 12:'Ca',\r\n 13:'Cb',\r\n 14:'Cc',\r\n 15:'Cd',\r\n 16:'Ce',\r\n 17:'Cf',\r\n 18:'Cg',\r\n 19:'Ch',\r\n 20:'Ci',\r\n 21:'Cj',\r\n 22:'Ck',\r\n 23:'Cl',\r\n 24:'Cm',\r\n 25:'Cn',\r\n 26:'Co',\r\n 27:'Cp',\r\n 28:'Cq',\r\n 29:'Cr',\r\n 30:'Da',\r\n 31:'Db',\r\n 32:'Dc',\r\n 33:'Dd',\r\n 34:'De',\r\n 35:'Df',\r\n 36:'Dg',\r\n 37:'Dh',\r\n 38:'Di',\r\n 39:'Dj',\r\n 40:'Ea',\r\n 41:'Eb',\r\n 42:'Ec',\r\n 43:'Ed',\r\n 44:'Ee',\r\n 45:'Ef',\r\n 46:'Eg',\r\n 47:'Eh',\r\n 48:'Aa',\r\n 49:'Ab',\r\n 50:'Ac',\r\n 51:'Ad',\r\n 52:'Ae',\r\n 53:'Af',\r\n 54:'Iu',\r\n 55:'Fa',\r\n 56:'Fb',\r\n 57:'Fc',\r\n 58:'Fd',\r\n 59:'Fe',\r\n 60:'Ff',\r\n 61:'Fg',\r\n 62:'Fh',\r\n 63:'--'}\r\n\r\n return self.option", "def change_distance_units(self, event):\n metricState = self.metricUnitRadioBtn.GetValue()\n imperialState = self.imperialUnitRadioBtn.GetValue()\n\n if self.distance_txtBox.GetValue() != \"\":\n last_value = float(self.distance_txtBox.GetValue())\n\n last_unitComboboxValue = self.distanceUnitCombobox.GetValue()\n\n if imperialState == True:\n self.distanceUnitCombobox.Clear()\n self.distanceUnitCombobox.AppendItems(self.imperials)\n self.distanceUnitCombobox.SetValue(self.imperials[0])\n elif metricState == True:\n self.distanceUnitCombobox.Clear()\n self.distanceUnitCombobox.AppendItems(self.metrics)\n self.distanceUnitCombobox.SetValue(self.metrics[0])\n\n current_unitComboboxValue = self.distanceUnitCombobox.GetValue()\n\n if self.distance_txtBox.GetValue() != \"\":\n if (last_unitComboboxValue == \"cm\" \n and current_unitComboboxValue == \"in\"):\n self.distance_txtBox.SetValue(str((last_value)/2.54))\n elif (last_unitComboboxValue == \"cm\" \n and current_unitComboboxValue == \"ft\"):\n self.distance_txtBox.SetValue(str((last_value)* 0.032808))\n elif (last_unitComboboxValue == \"m\" \n and current_unitComboboxValue == \"in\"):\n self.distance_txtBox.SetValue(str((last_value)* 39.370))\n elif (last_unitComboboxValue == \"m\" \n and current_unitComboboxValue == \"ft\"):\n self.distance_txtBox.SetValue(str((last_value)/0.3048))\n elif (last_unitComboboxValue == \"in\" \n and current_unitComboboxValue == \"cm\"):\n self.distance_txtBox.SetValue(str((last_value)*2.54))\n elif (last_unitComboboxValue == \"in\"\n and current_unitComboboxValue == \"m\"):\n self.distance_txtBox.SetValue(str((last_value)/39.370))\n elif (last_unitComboboxValue == \"ft\"\n and current_unitComboboxValue == \"cm\"):\n self.distance_txtBox.SetValue(str((last_value)/0.032808))\n elif (last_unitComboboxValue == \"ft\"\n and current_unitComboboxValue == \"m\"):\n self.distance_txtBox.SetValue(str((last_value)/3.2808))", "def updateGroup(self):\n if unicode(self.text()).replace(' ', '') \\\n != self.unitGroup.unitString().replace(' ', ''):\n self.unitGroup.update(unicode(self.text()), self.cursorPosition())\n self.emit(QtCore.SIGNAL('currentChanged')) # update listView\n self.unitUpdate() # replace text with formatted text", "def multi_unit(self, multi_unit):\n\n self._multi_unit = multi_unit", "def actualizeHardwarelabel (self, data):\n if data.has_key(StatusMsg.label_state):\n self.hardwarelabel.setText(self.stateDecoder.get(data.get(StatusMsg.label_state)))", "def initWidgets(self):\n self.loctext.setText(\"{0:g}\".format(self.loc))\n self.scaletext.setText(\"{0:g}\".format(self.scale))", "def __update_labels(self):\n\n self.__active_buses_stringvar.set(str(self.__bus_controller.buses_count))\n self.__active_lines_stringvar.set(str(len(self.__bus_controller.bus_dict)))\n self.__number_of_people_stringvar.set(str(self.__telegram_controller.people_count))\n self.__session_time_stringvar.set(self.session_time)\n\n messages =self.__bus_controller.bus_messages\n for n in range(0, BusController.MAX_MESSAGES_TO_DISPLAY):\n self.__free_text_stringvars_dict[n].set(messages[n])", "def setDataUnits(self, dataunits):\n\t\tself.dataUnits = dataunits\n\t\t\n\t\tx, y, z = self.dataUnits[0].getDimensions()\n\t\tself.dims = (x, y, z)\n\t\tself.newDimX.SetValue(\"%d\" % x)\n\t\tself.newDimY.SetValue(\"%d\" % y)\n\t\tself.newDimZ.SetValue(\"%d\" % z)\n\t\tself.dimsLbl.SetLabel(self.currDimText % (x, y, z))\n\t\tself.onUpdateDims(None)\n\t\tself.onSetToHalfSize(None)", "def set_base_image_labels(driver, user_disk, img_name, branch, target):\n\n dashes = [i for i, c in enumerate(img_name) if c=='-']\n cf_version = img_name[dashes[0]+1:dashes[3]]\n build_id = img_name[dashes[-1]+1:]\n\n driver.ex_set_volume_labels(user_disk,\n {'cf_version': cf_version, 'branch': branch,\n 'target': target, 'build_id': build_id})", "def drawlabels(t, t1):\r\n t.fd(250)\r\n t.pd()\r\n t.write(\"Life\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(12)\r\n t.pd()\r\n t.write(\"Exp.\", font=(\"Arial\", 10, \"bold\"))\r\n t.pu()\r\n t.back(238)\r\n t.right(90)\r\n t.fd(80)\r\n t1.pu()\r\n t1.back(50)\r\n t1.rt(90)\r\n t1.fd(250)\r\n t1.pd()\r\n t1.write(\"Year\", font=(\"Arial\", 10, \"bold\"))\r\n t1.pu()\r\n t1.back(250)\r\n t1.left(90)\r\n t1.fd(50)", "def unit(self) -> str:", "def __init__(self,units=None):\n self.__units = units", "def test_default_units(self):\n s = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"))\n assert s.units is None\n set_default_units(\"SI\")\n s2 = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"))\n assert s2.units == \"SI\"\n set_default_units(\"EE\")\n s3 = State(\"water\", T=Q_(100, \"degC\"), p=Q_(1.0, \"atm\"))\n assert s3.units == \"EE\"\n set_default_units(None)", "def __init__ (self, size, name):\n\n self.size = size\n self.name = name\n self.units = [1 for x in range(size)]", "def load_unitsm(self):\n self.unit_file = self.path+'units.m'\n self.unit_dic = self.load_m(self.unit_file) #actual reading routine\n self.psix=self.unit_dic['psi_x']\n self.eq_x_r = self.unit_dic['eq_x_r']\n self.eq_x_z = self.unit_dic['eq_x_z']\n self.eq_axis_r = self.unit_dic['eq_axis_r']\n self.eq_axis_z = self.unit_dic['eq_axis_z']\n self.eq_axis_b = self.unit_dic['eq_axis_b']\n self.sml_dt = self.unit_dic['sml_dt']\n self.sml_wedge_n = self.unit_dic['sml_wedge_n']\n self.diag_1d_period = self.unit_dic['diag_1d_period']", "def unit_array(self, values):\n self._data_array.values = values\n self._units = self._data_array.attrs['units'] = str(values.units)", "def si_mass_units(kg_unit):\r\n if (\r\n kg_unit.scale.name != 'kilogram' and \r\n kg_unit.scale.symbol != 'kg'\r\n ):\r\n raise RuntimeError(\r\n \"conventional name required, got {0.name} and{0.symbol}\".format(\r\n kg_unit.scale\r\n )\r\n )\r\n \r\n register = kg_unit.register\r\n \r\n gram = register.unit( \r\n proportional_unit(kg_unit,\r\n 'gram',\r\n 'g', \r\n 1.0 / 1000.0,\r\n )\r\n )\r\n \r\n for p_i in metric_prefixes:\r\n if p_i.value != 1E3: \r\n register.unit( \r\n proportional_unit(kg_unit,\r\n p_i.name+'gram',\r\n p_i.symbol+'g', \r\n p_i.value / 1000.0\r\n )\r\n )", "def fill_tank(self):\r\n self.fuel_level = self.fuel_capacity\r\n print(\"Fuel tank is full\")", "def create_gen_labels(master: Widget) -> None:\r\n\r\n gen_label = Label(master, text='Gen:', font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n gen_label.pack(side=LEFT)\r\n self.gen_number = Label(master, text=0, font=self.FONT_NORMAL, bg=self.MAIN_BG)\r\n self.gen_number.pack(side=LEFT)", "def updateMeter(self, name1, name2, op):\r\n mini = 0\r\n maxi = 100\r\n pos = (self.var.get() - mini) / (maxi - mini)\r\n self.updateMeterLine(pos * 0.6 + 0.2)", "def setLabel2(*args):", "def convert_volume(self, event):\n try:\n #Compare other unit to one unit(cubic decimeters)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"acre foot\": 1233481.837548, \"barrels\": 158.987295, \"bushels(UK)\": 36.36872, \"bushels(US)\": 35.23907, \"centiliters\": 0.01, \"cubic centimeters\": 0.001, \"cubic decameters\": 1000000.0, \"cubic decimeters\": 1.0, \"cubic feet\": 28.316847, \"cubic inches\": 0.016387, \"cubic kilometers\": 1000000000000.0, \"cubic meters\": 1000.0, \"cubic mile\": 4168181825000.0, \"cubic millimeters\": 1e-06, \"cubic yards\": 764.554858, \"cups\": 0.236588, \"deciliters\": 0.1, \"dram\": 0.003697, \"dram(imperial)\": 0.003552, \"fluid ounces(US)\": 0.029574, \"fluid ounces(imperial)\": 0.028413, \"gallons(US,dry)\": 4.404884, \"gallons(US,liquid)\": 3.785412, \"gallons(imperial)\": 4.54609, \"gill(US)\": 0.118294, \"gill(imperial)\": 0.142065, \"liters\": 1.0, \"liters(1901-1964)\": 1.000028, \"microliters\": 1e-06, \"milliliters\": 0.001, \"nanoliters\": 1e-09, \"picoliters\": 1e-12, \"pints(US,dry)\": 0.55061, \"pints(US,liquid)\": 0.473176, \"pints(imperial)\": 0.568261, \"quarts(UK,dry)\": 1.101221, \"quarts(US,liquid)\": 0.946353, \"quarts(imperial)\": 1.136523, \"table spoons\": 0.014787, \"tea spoons\": 0.004929}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def set_current_units(units=None):\n manager = Manager() \n if units is not None:\n # set units using a supplied dictionary\n for utype in units:\n if utype in manager.allowed_utypes:\n un = units[utype]\n # handle the identity of \"frequency\" and \"energy\"\n if utype==\"frequency\":\n utype=\"energy\"\n un = units[\"frequency\"]\n \n manager.set_current_units(utype,un)\n else:\n raise Exception(\"Unknown units type %s\" % utype)\n\n else:\n # reset units to the default\n for utype in manager.internal_units:\n if utype in manager.allowed_utypes:\n manager.set_current_units(utype,manager.internal_units[utype])\n else:\n raise Exception(\"Unknown units type %s\" % utype)", "def extract_specs(self):\n vDeflection_unit = \"lcd-info.{}.conversion-set.conversion.force.scaling.unit.unit\".format(\n self.channel_numbers[\"vDeflection\"])\n self.units[\"vDeflection\"] = self.general[vDeflection_unit]\n\n height_unit = \"lcd-info.{}.conversion-set.conversion.nominal.scaling.unit.unit\".format(\n self.channel_numbers[\"height\"])\n self.units[\"height\"] = self.general[height_unit]", "def reset_total():\n total_disks.fill((0, 0, 0, 0))\n total_disks.blit(gamefont.create_label(font_renderer, \"Total Disks {}\".format(len(disk_sprites))), (0, 0))", "def fillCurveLE(self):\n\t\tsel = mn.ls( sl = True, dag = True, ni = True, typ = 'nurbsCurve' )\n\t\tself.curve_le.setText( sel[0].name )", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def setLabel(*args):", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def fill_tank(self):\n self.fuel_level = self.fuel_capacity\n print(\"Fuel tank is full.\")", "def set_unit(self, subfield, new_unit):\n raise exceptions.UnitError(f\"Can not change the unit of a text field\")", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus], \\\n 'elements': [self.pop._get_axis_elements(self.locus)] }\n self._init_labels(self.labels)", "def makeLabel(self):\n\n self.setIndexNames()\n\n if self.isInCore():\n self.getFirstChar()\n else:\n # stick with what we have. (default:ExCore)\n return\n self.label = self.firstChar + \"{0:03d}\".format(self.i2)\n if self.axial is not None:\n # add axial letter\n self.label = self.label + AXIAL_CHARS[self.axial]", "def __init__(self, name, unit_set):\n self.name = name\n self.unit_set = unit_set\n\n self.unit_name = units_of_quantities[self.name]\n\n unit_expr = sm.sympify(self.unit_name)\n unit_expr = unit_expr.subs(derived_units)\n\n self.symbolic_value = sm.sympify(str(unit_expr))\n atoms = self.symbolic_value.atoms(sm.Symbol)\n self.def_units = [Unit(atom.name) for atom in atoms]\n\n self.def_names, self.def_units = self._get_dicts(self.def_units)\n self.names, self.units = self._get_dicts(self.unit_set)\n\n self.def_coef = float(self.symbolic_value.subs(self.def_names))\n\n coef_dict = {}\n for key, val in six.iteritems(self.def_units):\n coef_dict[val.name] = self.units[key].coef\n self.coef_dict = coef_dict\n\n self.raw_coef = float(self.symbolic_value.subs(self.coef_dict))\n self.coef = self.raw_coef / self.def_coef", "def labelledCube(self, dim=None, sample=None):\n if dim is None:\n dim = self.D\n if sample is None:\n sample = range(1, int(self.poolSize)+1)\n \n all_labels = list(it.product(*(range(self.slices),) * dim))\n self.sample_labels = set(random.sample(all_labels, k= len(sample)))\n labelled_sample = {label : sample for label, sample in zip(self.sample_labels, sample)}\n self.text[\"labelledSamples\"] = labelled_sample\n return labelled_sample", "def BaseLabel(self, *args):\n return _XCAFDoc.XCAFDoc_MaterialTool_BaseLabel(self, *args)", "def update_unit_rect(self, unit):\n\t\tx, y = unit.tile_x, unit.tile_y\n\t\tscreen_x, screen_y = x*SIZE, y*SIZE\n\t\tunit.rect.x = screen_x\n\t\tunit.rect.y = screen_y", "async def on_unit_created(self, unit: Unit):", "def autolabel(rects):\n\t for rect in rects:\n\t\theight = rect.get_height()\n\t\tax.text(rect.get_x() + rect.get_width()/2., 1.01*height,\n\t\t '%d' % int(height),\n\t\t ha='center', va='bottom')", "def update_selection(self):\n\n # clear all boxes\n self.clear_boxes()\n self.draw_figure(self.s)\n\n # update temperature list\n if self.Data[self.s]['T_or_MW'] == \"T\":\n self.temperatures = np.array(self.Data[self.s]['t_Arai']) - 273.\n else:\n self.temperatures = np.array(self.Data[self.s]['t_Arai'])\n\n self.T_list = [\"%.0f\" % T for T in self.temperatures]\n self.tmin_box.SetItems(self.T_list)\n self.tmax_box.SetItems(self.T_list)\n self.tmin_box.SetValue(\"\")\n self.tmax_box.SetValue(\"\")\n self.Blab_window.SetValue(\n \"%.0f\" % (float(self.Data[self.s]['pars']['lab_dc_field']) * 1e6))\n if \"saved\" in self.Data[self.s]['pars']:\n self.pars = self.Data[self.s]['pars']\n self.update_GUI_with_new_interpretation()\n self.Add_text(self.s)\n self.write_sample_box()", "def labels_y(y_unit, latex = True, verbose = 0): \n\n if verbose > 1:\n print(\"SpectraTools.Resources.UnitConversion.labels_y()\") \n \n if y_unit in absorption_labels:\n return \"Absorption (OD)\"\n elif y_unit in milli_absorption_labels:\n return \"Absorption (mOD)\" \n elif y_unit in transmission_1_labels:\n return \"Transmission\"\n elif y_unit in transmission_pct_labels:\n if latex:\n return r\"Transmission (\\%)\"\n else:\n return \"Transmission (%)\"\n else:\n return y_unit", "def setUnits(self, *args):\n return _libsbml.Species_setUnits(self, *args)", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def convert_units(self):\n for prod in (\"ier\", \"ier_inc_rain\"):\n self.data[prod].data[:] /= 1e6", "def convert_mass(self, event):\n try:\n #Compare other unit to one unit(kilograms)\n current_value, current_unit = float(\"0\" + str(self.v.get())), self.dropdown.get()\n unit_comp = {\"Earth masses\": 5.97219e+24, \"Solar masses\": 1.9890000000000002e+30, \"carats\": 0.0002, \"cental\": 45.359237, \"decagrams\": 0.01, \"femtograms\": 1e-18, \"grains\": 6.479891000000001e-05, \"grams\": 0.001, \"hectograms\": 0.1, \"hundredweights\": 50.802345, \"kilograms\": 1.0, \"kilotonnes\": 1000000.0, \"megatonnes\": 1000000000.0, \"micrograms\": 1e-09, \"milligrams\": 1e-06, \"nanograms\": 1e-12, \"ounces(US & UK)\": 0.02835, \"ounces(precious metals)\": 0.031103, \"picograms\": 1e-15, \"pounds(US & UK)\": 0.453592, \"pounds(precious metals)\": 0.373242, \"slugs\": 14.593903, \"stones\": 6.350293, \"tonnes(metric)\": 1000.0, \"tons(UK)\": 1016.046909, \"tons(US)\": 907.18474}\n value_comp, printer = current_value * unit_comp[current_unit], \"\"\n unit_list = sorted(unit_comp.keys())\n unit_list.remove(current_unit)\n for unit in unit_list:\n printer += \"To %s \" % unit + \" \" * (max([len(i) for i in unit_list]) - len(unit)) + str(value_comp / unit_comp[unit]) + [\"\", \"\\n\"][unit_list[-1] != unit]\n except ValueError: #In case user enter the other type of value, not Int or Float\n printer = \"Value is invalid.\"\n self.print_text(printer)", "def fill_up(self):\n self.fuel = self.gas_tank_size", "def __init__(self, units):\n super(PintAxisInfo, self).__init__(label='{:P}'.format(units))", "def giveup():\n for matrix in xrange(4):\n display.set_raw64(LED8x8ICONS['UNKNOWN'],matrix)\n print \"Error occured.\"\n sys.exit(1)", "def setName(self, *args):\n return _libsbml.UnitDefinition_setName(self, *args)", "def UpgradeWeapon(self):\n label = self.wepSpin.get()\n for index in range(min(self.squad.current_size, self.unitToWeap[label][1])):\n upgradedUnit = next(x for x in self.squad.units if x.name == self.unitToWeap[label][3])\n upgradedUnit.armRangedWeapon(weapon.ranged_weapons[self.unitToWeap[label][0]])\n self.squad.point_cost += self.unitToWeap[label][2] \n self.pointLabel['text'] = self.squad.point_cost", "def update(self, **kwargs):\n self.ids.Label1.text = str(distance) + ' kilometers'", "def set_label(self, mp, dn):\n for p in self.partition_list:\n if p[0] == mp:\n p[3] = dn", "def rec_default(self):\n self.pcdi_triggers.setText('(50,50)')\n self.pcdi_type.setText('LUCY')\n self.pcdi_iter.setText('20')\n self.pcdi_normalize.setText('true')\n self.pcdi_roi.setText('(16, 16, 16)')", "def to(self, new_unit, **kwargs):\n new_unit = u.Unit(new_unit)\n return self * (self.unit.to(new_unit, **kwargs) * new_unit / self.unit)", "def _init_after_assignment(self):\n self.labels = { 'axes': [self.locus1, self.locus2, self.locus3], \\\n 'elements': [self.pop._get_axis_elements(self.locus1), self.pop._get_axis_elements(self.locus2), self.pop._get_axis_elements(self.locus3)] }\n self._init_labels(self.labels)", "def test_reneaming_old_default_labels_to_new_fixed_labels():\n from dtscalibration import DataStore\n\n cable_len = 100.0\n nt = 3\n time = np.arange(nt)\n x = np.linspace(0.0, cable_len, 8)\n ts_cold = np.ones(nt) * 4.0 + np.cos(time) * 4\n ts_warm = np.ones(nt) * 20.0 + -np.sin(time) * 4\n\n C_p = 1324 # 1/2 * E0 * v * K_+/lam_+^4\n eta_pf = np.cos(time) / 10 + 1 # eta_+ (gain factor forward channel)\n eta_pb = np.sin(time) / 10 + 1 # eta_- (gain factor backward channel)\n C_m = 5000.0\n eta_mf = np.cos(time + np.pi / 8) / 10 + 1\n eta_mb = np.sin(time + np.pi / 8) / 10 + 1\n dalpha_r = 0.005284\n dalpha_m = 0.004961\n dalpha_p = 0.005607\n gamma = 482.6\n\n temp_real_kelvin = np.zeros((len(x), nt)) + 273.15\n temp_real_kelvin[x < 0.2 * cable_len] += ts_cold[None]\n temp_real_kelvin[x > 0.85 * cable_len] += ts_warm[None]\n temp_real_celsius = temp_real_kelvin - 273.15\n\n st = (\n eta_pf[None]\n * C_p\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_p * x[:, None])\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n ast = (\n eta_mf[None]\n * C_m\n * np.exp(-dalpha_r * x[:, None])\n * np.exp(-dalpha_m * x[:, None])\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rst = (\n eta_pb[None]\n * C_p\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_p * (-x[:, None] + cable_len))\n * np.exp(gamma / temp_real_kelvin)\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n rast = (\n eta_mb[None]\n * C_m\n * np.exp(-dalpha_r * (-x[:, None] + cable_len))\n * np.exp(-dalpha_m * (-x[:, None] + cable_len))\n / (np.exp(gamma / temp_real_kelvin) - 1)\n )\n\n c_f = np.log(eta_mf * C_m / (eta_pf * C_p))\n c_b = np.log(eta_mb * C_m / (eta_pb * C_p))\n\n dalpha = dalpha_p - dalpha_m # \\Delta\\alpha\n alpha_int = cable_len * dalpha\n\n df = c_f # reference section starts at first x-index\n db = c_b + alpha_int\n i_fw = np.log(st / ast)\n i_bw = np.log(rst / rast)\n\n E_real = (i_bw - i_fw) / 2 + (db - df) / 2\n\n ds = DataStore(\n {\n \"ST\": ([\"x\", \"time\"], st),\n \"AST\": ([\"x\", \"time\"], ast),\n \"REV-ST\": ([\"x\", \"time\"], rst),\n \"REV-AST\": ([\"x\", \"time\"], rast),\n \"userAcquisitionTimeFW\": ([\"time\"], np.ones(nt)),\n \"userAcquisitionTimeBW\": ([\"time\"], np.ones(nt)),\n \"cold\": ([\"time\"], ts_cold),\n \"warm\": ([\"time\"], ts_warm),\n },\n coords={\"x\": x, \"time\": time},\n attrs={\"isDoubleEnded\": \"1\"},\n )\n ds = ds.rename_labels()\n\n sections = {\n \"cold\": [slice(0.0, 0.09 * cable_len)],\n \"warm\": [slice(0.9 * cable_len, cable_len)],\n }\n\n real_ans2 = np.concatenate(([gamma], df, db, E_real[:, 0]))\n\n ds.calibration_double_ended(\n sections=sections,\n st_var=1.5,\n ast_var=1.5,\n rst_var=1.0,\n rast_var=1.0,\n method=\"wls\",\n solver=\"sparse\",\n fix_gamma=(gamma, 0.0),\n )\n\n assert_almost_equal_verbose(df, ds.df.values, decimal=14)\n assert_almost_equal_verbose(db, ds.db.values, decimal=13)\n assert_almost_equal_verbose(\n x * (dalpha_p - dalpha_m), ds.alpha.values - ds.alpha.values[0], decimal=13\n )\n assert np.all(np.abs(real_ans2 - ds.p_val.values) < 1e-10)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpf.values, decimal=10)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpb.values, decimal=10)\n assert_almost_equal_verbose(temp_real_celsius, ds.tmpw.values, decimal=10)\n pass" ]
[ "0.65493584", "0.65493584", "0.65493584", "0.61209", "0.5876205", "0.5848301", "0.5719211", "0.56192005", "0.56159955", "0.5609573", "0.55953413", "0.55090725", "0.5500092", "0.54777503", "0.54361576", "0.5392763", "0.5386514", "0.53529835", "0.5319486", "0.53029513", "0.5297845", "0.5297845", "0.5297845", "0.52888274", "0.52446663", "0.52168554", "0.5202094", "0.5187809", "0.51671207", "0.5160897", "0.51603407", "0.51434666", "0.51356816", "0.51293296", "0.5075076", "0.5074127", "0.50699604", "0.50349504", "0.50107706", "0.4996898", "0.49916345", "0.49828547", "0.49730834", "0.49606645", "0.49580404", "0.49379665", "0.4937309", "0.49348137", "0.4922665", "0.49204504", "0.4917921", "0.49140173", "0.49092308", "0.4906485", "0.48991272", "0.4894631", "0.4891032", "0.48877084", "0.48824906", "0.48802", "0.4857377", "0.48551378", "0.48460278", "0.48406526", "0.48404992", "0.4839734", "0.48337793", "0.48125955", "0.48125955", "0.48125955", "0.48125955", "0.48125955", "0.4810913", "0.4810913", "0.48083395", "0.47962332", "0.47956994", "0.47883046", "0.4786887", "0.47862378", "0.47851685", "0.47811836", "0.47764152", "0.47731182", "0.4773018", "0.47650376", "0.47641805", "0.47636753", "0.4761539", "0.4759366", "0.4757058", "0.47564897", "0.4753754", "0.47532347", "0.4750402", "0.4750094", "0.47484675", "0.47461632", "0.474369", "0.47407305" ]
0.6863121
0
Seek to the given time.
def seek_to(self, ms): self.proxy.seek_to(ms)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def seek(self, time):\n command = 'seek ' + str(time)\n self.run_command(command)", "def seek(self, time: int):\n self._select_interface(self._rc_seek, self._http_seek, time)", "def _seek(self, time_offset):\n if (time.time() - (self.time_start + self.time_offset)) < 0.1:\n log.info('Seek recived within 100ms of start - Assuming this is a bounceback from test_audio - applying automatic time mutator of {0}s'.format(time_offset))\n self.time_mutator = time_offset\n self.time_start = time.time() - time_offset\n log.info('seek {0}'.format(time_offset))", "def seek(self, offset):\n if isinstance(offset, Time):\n offset = offset-self.time0\n elif isinstance(offset, str):\n offset = Time(offset, scale='utc') - self.time0\n\n try:\n offset = offset.to(self.dtsample.unit)\n except AttributeError:\n pass\n except u.UnitsError:\n offset = int(offset.to(u.byte).value)\n else:\n offset = (offset/self.dtsample).to(u.dimensionless_unscaled)\n offset = int(round(offset) * self.recordsize)\n self._seek(offset)", "def seek_to_start_time(self):\n return 0", "def set_time(self, value: float):\n if value < 0:\n value = 0\n\n self.player.seek(value)", "def seek(self, offset):\n spotifyconnect.Error.maybe_raise(lib.SpPlaybackSeek(offset))", "def go_to(self, time):\n half_dur = self.half_duration\n self.set_interval((time - half_dur, time + half_dur))", "def seek(self, position: int, whence: int = 0) -> None:\n raise NotImplementedError(\n 'Seek operation is not supported by this object: %r' % self\n ) # pragma: no cover", "def at_time(self, time):\n return self._collection.at_time(time)", "async def seek(self, pos: int):\n pos = max(pos, 0) # Prevent seeking before start of track\n await self._bot.lavalink.ws.send(op='seek', guildId=self.guild_id, position=pos)", "def testSeek(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',\n inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestSeek(file_object)", "def testSeek(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_TSK, location='/a_directory/another_file',\n inode=self._IDENTIFIER_ANOTHER_FILE, parent=self._bde_path_spec)\n file_object = tsk_file_io.TSKFile(self._resolver_context, path_spec)\n\n self._TestSeek(file_object)", "def seek(self, offset):\n spotify.Error.maybe_raise(\n lib.sp_session_player_seek(self._session._sp_session, offset))", "def seek(self, offset, relativeTo):\n self.oFile.seek(offset, relativeTo)", "def time(self, offset=None):\n if offset is None:\n offset = self.offset\n if offset % self.recordsize != 0:\n warnings.warn(\"Offset for which time is requested is not \"\n \"integer multiple of record size.\")\n return self.time0 + self.tell(offset, u.day)", "def set_imeastime(self, time):\n self.itime = time", "def seek(self, seek_value, video_display_name=None):\r\n seek_time = self._parse_time_str(seek_value)\r\n seek_selector = self.get_element_selector(video_display_name, ' .video')\r\n js_code = \"$('{seek_selector}').data('video-player-state').videoPlayer.onSlideSeek({{time: {seek_time}}})\".format(\r\n seek_selector=seek_selector, seek_time=seek_time)\r\n self.browser.execute_script(js_code)", "def seek_to_start_time(self):\n offset_ranges = self.consumer.get_offset_range()\n\n # Kafka uses milliseconds\n offsets = self.consumer.offset_for_time(self.start_time)\n\n for i, (lowest, highest) in enumerate(offset_ranges):\n if offsets[i] == -1:\n logging.warning(\n \"Could not find corresponding offset for start time, so set \"\n \"position to latest message\"\n )\n offsets[i] = highest\n\n if offsets[i] == lowest:\n # We've gone back as far as we can.\n raise TooOldTimeRequestedException(\n \"Cannot find start time in the data, either the supplied \"\n \"time is too old or there is no data available\"\n ) # pragma: no mutate\n\n self.consumer.seek_by_offsets(offsets)\n\n return offsets", "def target_position(self, time):\n pass", "def target_position(self, time):\n pass", "def seek(self, *args) -> \"int\":\n return _ida_fpro.qfile_t_seek(self, *args)", "def timeline_widget_seek_click_slot(self, t):\n # self.valkkafs_manager.timeCallback__(t) # DEBUGGING\n # print(\"PlaybackController: user clicked seek to: %i == %s\" % (t, formatMstimestamp(t))) # DEBUGGING\n self.valkkafs_manager.smartSeek(t)", "def advance_to(self, timestamp: float):\n now = self.__original_time()\n if timestamp < now:\n raise ValueError(\"cannot retreat time reference: \"\n \"target {} < now {}\"\n .format(timestamp, now))\n self.__delta = timestamp - now", "def update(self, time):\n raise NotImplementedError", "def update(self, time):\n raise NotImplementedError", "def set_search_time(self, play_time):\n self.get(COMMAND_UIC, 'SetSearchTime', [('playtime', int(play_time))])", "def seek(self, offset, whence=io.SEEK_SET):\n if whence == io.SEEK_SET:\n self._offset = offset\n elif whence == io.SEEK_CUR:\n self._offset += offset\n elif whence == io.SEEK_END:\n self._offset = self.end_of_file + offset\n return self._offset", "def object_at(self, time):\n for event in self._timeline: \n if time >= event.start_time and time <= event.end_time:\n return event.obj\n return self._timeline[-1].obj", "def set_time(self, time):\n self._time = time", "def seek(self, offset, from_what=0):\n if from_what == 0: # From the begining\n if offset >= self.tell():\n self.seek(offset - self.tell(), from_what=1)\n else:\n raise NotImplementedError(\"Can't seek backwards\")\n elif from_what == 1: # From the cursor position\n if offset < 0:\n raise NotImplementedError(\"Can't seek backwards\")\n else:\n self.read(offset)\n else:\n raise NotImplementedError(\"Can't seek from there\")\n return self.tell()", "def approximate_position(self, at_time: int) -> BasePosition:\n pass", "def seek_track(self, position_ms, device=None, **kwargs):\n if not isinstance(position_ms, int):\n logger.warning(\"position_ms must be an integer\")\n return None\n\n return self._put(\n API.SEEK.value,\n position_ms=position_ms,\n device_id=device,\n check_202=True,\n **kwargs,\n )", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def time(self, time):\n\n self._time = time", "def advancePosition(self,time):\n velocity = self.getVelocity()\n return self.x + time*velocity", "def jump_to(self):\n\n jt = dialog.JumpTo(self.timeFormat)\n\n if jt.exec_():\n if self.timeFormat == HHMMSS:\n newTime = int(time2seconds(jt.te.time().toString(HHMMSSZZZ)) * 1000)\n else:\n newTime = int(jt.te.value() * 1000)\n\n if self.playerType == VLC:\n if self.playMode == FFMPEG:\n frameDuration = Decimal(1000 / list(self.fps.values())[0])\n currentFrame = round(newTime / frameDuration)\n self.FFmpegGlobalFrame = currentFrame\n\n if self.second_player():\n currentFrame2 = round(newTime / frameDuration)\n self.FFmpegGlobalFrame2 = currentFrame2\n\n if self.FFmpegGlobalFrame > 0:\n self.FFmpegGlobalFrame -= 1\n if self.second_player() and self.FFmpegGlobalFrame2 > 0:\n self.FFmpegGlobalFrame2 -= 1\n self.ffmpegTimerOut()\n\n else: # play mode VLC\n\n if self.media_list.count() == 1:\n\n if newTime < self.mediaplayer.get_length():\n self.mediaplayer.set_time(newTime)\n if self.simultaneousMedia:\n self.mediaplayer2.set_time(int(self.mediaplayer.get_time() -\n self.pj[OBSERVATIONS][self.observationId]\n [TIME_OFFSET_SECOND_PLAYER] * 1000))\n\n else:\n QMessageBox.warning(self, programName,\n \"The indicated position is behind the end of media ({})\".\n format(seconds2time(self.mediaplayer.get_length() / 1000)))\n\n elif self.media_list.count() > 1:\n\n if newTime < sum(self.duration):\n\n # remember if player paused (go previous will start playing)\n flagPaused = self.mediaListPlayer.get_state() == vlc.State.Paused\n\n tot = 0\n for idx, d in enumerate(self.duration):\n if newTime >= tot and newTime < tot + d:\n self.mediaListPlayer.play_item_at_index(idx)\n\n # wait until media is played\n while True:\n if self.mediaListPlayer.get_state() in [vlc.State.Playing, vlc.State.Ended]:\n break\n\n if flagPaused:\n self.mediaListPlayer.pause()\n\n self.mediaplayer.set_time(newTime -\n sum(self.duration[0: self.media_list.index_of_item(\n self.mediaplayer.get_media())]))\n\n break\n tot += d\n else:\n QMessageBox.warning(self, programName,\n \"The indicated position is behind the total media duration ({})\".format(\n seconds2time(sum(self.duration) / 1000)))\n\n self.timer_out()\n self.timer_spectro_out()\n # self.timer_plot_data_out()", "def get(self, filename, from_time, to_time):\n raise NotImplementedError()", "def testSeek(self):\n self._TestSeek(self._tsk_partition_path_spec)", "def testSeek(self):\n self._TestSeek(self._tsk_partition_path_spec)", "def time(self, time: float) -> None:\n self._time = time", "def seek(self,event):\r\n if self.app.controlLock.locked():\r\n return\r\n self.app.controlLock.acquire()\r\n x = event.x\r\n scalex,_ = self.getScale()\r\n scalex_secs = [scalex[0]/self.samplerate,scalex[1]/self.samplerate]# Get x scale in seconds\r\n seekTo = (x/self.w) * (scalex_secs[1]-scalex_secs[0]) + scalex_secs[0]# Transform pixel coordinates to represented time\r\n self.app.videoPlayer.pause()\r\n self.app.videoPlayer.seek(seekTo-self.app.dataOffset)\r\n self.app.videoPlayer.pause()# Restart audio to sync\r\n self.update(self.app.videoPlayer.startTimestamp)\r\n self.draw()\r\n self.app.videoPlayer.play()\r\n self.app.controlLock.release()", "def advance(self, dt):\n self.workTill(self.currentTime + dt)", "def jump_in_video(self, jump_time):\n self.pause_video()\n current_time = self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIAStaticText[1]')\n current_time_text = current_time.text\n total_time = self.driver.find_element_by_xpath('//UIAApplication[1]/UIAWindow[1]/UIAStaticText[2]')\n total_time_text = total_time.text\n\n # total_time = minutes*60 + seconds\n total_time_text = float(total_time_text[-5:-3])*60 + float(total_time_text[-2:])\n\n seek_pct = jump_time / total_time_text\n\n seek_bar = self._find_element(class_name='UIASlider')\n seek_bar_size = seek_bar.size['width'] - current_time.size['width'] - total_time.size['width']\n seek_bar_percentage = float(float(str(seek_bar.get_attribute(\"value\"))[:-1]) / 100)\n\n # width * seek_pct is how far over in the bar to tap\n seek_bar_end_x = seek_bar.location['x'] + (seek_bar.size['width'] * seek_pct) - total_time.size['width']\n\n # this is just the vertical middle of the seek bar\n seek_bar_end_y = seek_bar.location['y'] + seek_bar.size['height']/2\n\n seek_bar_start_x = seek_bar.location['x'] + current_time.size['width'] + (seek_bar_size * seek_bar_percentage)\n\n while seek_bar_start_x < seek_bar_end_x:\n self.swipe(seek_bar_start_x, seek_bar_end_y, seek_bar_end_x, seek_bar_end_y, 2000)\n seek_bar_start_x += 15\n print seek_bar_start_x\n\n self.unpause_video()", "def seek(self, offset, location=0):\n if (0 <= location <= len(self._tiff)) and (0 <= location+offset <= len(self._tiff)):\n self._offset = location+offset", "def seek(self, val):\n if self.p:\n self.p.set_position(val/100.0 + self.p.get_position())", "async def async_set_position_updated_at(self, time):\n self._position_updated_at = time", "def advance(self, time):\n raise \"use method advance of class ReactorNet\"\n #return _cantera.reactor_advance(self.__reactor_id, time)", "def setTimepoint(self, tp):\n\t\tpass", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._fp.tell())\n\n self._fp.seek(loc)", "def seek(self,t):\r\n if (t > self.vid_len) or (t < 0):# If seeking to beyond end of video\r\n frame = ImageTk.PhotoImage(Image.fromarray(np.array([[0]*self.w]*self.h)))# Set frame to a black image of same proportions\r\n self.player.config(image=frame)\r\n self.player.image = frame\r\n self.root.update_idletasks()\r\n self.startTimestamp = time.time() - t\r\n if self.hasAudio:\r\n mixer.music.play(start=t,loops=0)\r\n self.updateDataplayers()\r\n # If already playing, skip calling the stream method, or if no video data loaded\r\n if self.isPlaying() or self.isEmpty():\r\n return\r\n self.state = VideoPlayer.State.PLAYING\r\n self.root.after(0,self.stream)", "def seek(self, offset, whence=io.SEEK_SET):\n if offset != 0 and whence == io.SEEK_SET:\n # logging.debug('IterStream: trying to seek to offset {0}.'\n # .format(offset))\n if offset > self.curr_pos:\n self.readinto(bytearray(offset - self.curr_pos))\n elif offset == self.curr_pos:\n pass\n else: # need to re-create iterable\n self.reset()\n self.readinto(bytearray(offset))\n if self.curr_pos != offset:\n # logging.debug('IterStream: curr_pos {0} != offset {1}!'\n # .format(self.curr_pos, offset))\n raise RuntimeError('programming error in IterStream.tell!')\n return self.curr_pos\n elif whence == io.SEEK_END: # seek to end\n # logging.debug('IterStream: seek to end')\n if self.size is None:\n # logging.debug('IterStream: trying to seek to end but size '\n # 'unknown --> raise IOError')\n raise IOError('size unknown, cannot seek to end')\n self.at_end = True # fake jumping to the end\n self.iterable = None # cannot safely be used any more\n self.leftover = None\n return self.size\n elif whence == io.SEEK_SET: # seek to start\n # logging.debug('IterStream: seek to start')\n self.reset()\n return 0\n elif whence == io.SEEK_CUR: # e.g. called by tell()\n # logging.debug('IterStream: seek to curr pos')\n if self.at_end:\n return self.size\n return self.curr_pos\n elif whence not in (io.SEEK_SET, io.SEEK_CUR, io.SEEK_END):\n # logging.debug('Illegal 2nd argument to seek(): {0}'\n # .format(whence))\n raise IOError('Illegal 2nd argument to seek(): {0}'.format(whence))\n else:\n # logging.debug('not implemented: {0}, {1}'.format(offset, whence))\n raise NotImplementedError('seek only partially implemented. '\n 'Cannot yet seek to {0} from {1}'\n .format(offset, whence))", "def set_time(self, time):\n with self.loopback_guard('time'):\n self.widget().setTime(time)", "def seek(self, position):\n if self.player:\n value = position / self._length\n self.player.set_position(value)", "def _setCursorLocOnTimeLabel(self, waveform, t):\n self.tm.setTime(t)", "def getPosition(self, time: float, view: Optional[Str] = ...) -> CVec3:\n ...", "async def async_media_seek(self, position):\n if not self._slave_mode:\n _LOGGER.debug(\"Seek. Device: %s, DUR: %s POS: %\", self.name, self._duration, position)\n if self._duration > 0 and position >= 0 and position <= self._duration:\n value = await self.async_call_linkplay_httpapi(\"setPlayerCmd:seek:{0}\".format(str(position)), None)\n self._position_updated_at = utcnow()\n self._idletime_updated_at = self._position_updated_at\n if value != \"OK\":\n _LOGGER.warning(\"Failed to seek. Device: %s, Got response: %s\", self.entity_id, value)\n else:\n await self._master.async_media_seek(position)", "def update_time(self, offset):\n offset = float(offset[1:])\n self.diff_since_last = offset - self.time_offset\n self.time_since_last += self.diff_since_last\n self.time_since_last_events += self.diff_since_last\n self.time_offset = offset", "def time(self, time):\n # type: (int) -> None\n\n if time is not None:\n if not isinstance(time, int):\n raise TypeError(\"Invalid type for `time`, type has to be `int`\")\n\n self._time = time", "def seeked(self, seeked):\n # type: (int) -> None\n\n if seeked is not None:\n if not isinstance(seeked, int):\n raise TypeError(\"Invalid type for `seeked`, type has to be `int`\")\n\n self._seeked = seeked", "def now(self):\n return self._startTime + self.timeToOffset(self.currentTime, self._timeScale)", "def read_timed(self, buf: AnyWritableBuf, timer: Timer | int, /) -> None:", "def time_so_far(self, easy_read=False):\n so_far = time.time() - self.start_time\n if easy_read:\n return seconds_to_hour_min_sec(so_far)\n return so_far", "def advance_time(self, set_to=None, increment_by=None):\n self._time_condition.acquire()\n if set_to is not None:\n self._time = set_to\n else:\n self._time += increment_by\n self._time_condition.notifyAll()\n self._time_condition.release()", "def change_time(self, new_time):\r\n self.when = new_time", "def set_time(self, set_time):\n\n self._set_time = set_time", "def at(self, t, tol=None):\r\n return self.data[..., self.time.index_at(t)]", "def set_current_time(self, ttime):\n if not isinstance(ttime, Time):\n raise TypeError\n try:\n localtime = ttime.local_repr().split()\n timeSetCmd = 'date -s ' + localtime[3]\n #XXX: here seems a dirty quick way (os.system).\n os.system(timeSetCmd)\n yield WaitDBus(self.rtc.SetCurrentTime, int(ttime.value) )\n except Exception, ex:\n logger.exception(\"Exception : %s\", ex)\n raise", "def media_seek(self, position: float) -> None:\n media_controller = self._media_controller()\n media_controller.seek(position)", "def get_scan_by_time(self, time):\n scan_ids = tuple(self.index)\n lo = 0\n hi = len(scan_ids)\n while hi != lo:\n mid = (hi + lo) // 2\n sid = scan_ids[mid]\n sid = sid.decode('utf-8')\n scan = self.get_scan_by_id(sid)\n if not self._validate(scan):\n sid = scan_ids[mid - 1]\n scan = self.get_scan_by_id(sid)\n if not self._validate(scan):\n sid = scan_ids[mid - 2]\n scan = self.get_scan_by_id(sid)\n\n scan_time = scan.scan_time\n if scan_time == time:\n return scan\n elif (hi - lo) == 1:\n return scan\n elif scan_time > time:\n hi = mid\n else:\n lo = mid\n if hi == 0 and not self._use_index:\n raise TypeError(\"This method requires the index. Please pass `use_index=True` during initialization\")", "def _jump_to_spike(self, delta=+1):\n spike_times = self.get_spike_times()\n if spike_times is not None and len(spike_times):\n ind = np.searchsorted(spike_times, self.time)\n n = len(spike_times)\n self.go_to(spike_times[(ind + delta) % n])", "def seek(self, loc):\n assert loc == 0\n\n # rewind progress bar\n if self.progressbar:\n self.progressbar.update(-self._tell)\n\n self._fp_left.seek(loc)\n self._fp_right.seek(loc)\n self._tell = loc\n self._buf = Buffer()", "def get_next_known_start_time(self, current_time):\n raise NotImplementedError()", "async def seek(self, offset):\n\n await self.VoiceClient.http.setQueueSource(self.tag, {\"start_position\": offset})\n\n return self", "def get(self, target_time=None):\n if target_time is None:\n target_time = Servo.ctime()\n size = len(self.history)\n idx = -1\n with History.lock:\n while idx >= -size:\n data = self.history[idx]\n timestamp, position = data[0], data[1:]\n if timestamp <= target_time:\n return data\n elif idx - 1 > -size:\n prev_timestamp = self.history[idx - 1]\n if prev_timestamp <= target_time:\n return self.history[idx - 1] # It's better to interpolate\n else:\n idx -= 1\n continue\n else:\n return self.history[-size]", "def set_umeastime(self, time):\n self.utime = time", "def setTime(self,time):\n self.time = time", "def update(self, time=None):\n if self.realtime:\n return\n if time is None: # clock in externally-clocked mode, need valid time\n return\n self._time = time", "def _seek(self, offset):\n assert offset % self.recordsize == 0\n file_number, file_offset = divmod(offset,\n self.filesize - self.header_size)\n self.open(file_number)\n self.fh_raw.seek(file_offset + self.header_size)\n self.offset = offset", "def find_nearest_time(self, time):\n\n idx = np.searchsorted(self.times, time, side=\"left\")\n if idx > 0 and (idx == len(self.times) or math.fabs(time - self.times[idx-1]) < math.fabs(time - self.times[idx])):\n return self.times[idx-1]\n else:\n return self.times[idx]", "def _getIndexAtTime(self, startTime: float) -> int:\n return round(startTime * self.frameRate * self.sampleWidth)", "def seek(self, index: int, /) -> str:\n self.index = index\n return self.current", "def call_at(self, behavior, time):\n raise NotImplementedError()", "def at(self, t):\n return self.start + (self.end - self.start) * t", "def target_position(self, time):\n return self.target(time, self.positions, self.dt, self.num_way)", "def target_position(self, time):\n return self.target(time, self.positions, self.dt, self.num_way)", "def target_position(self, time):\n path, path_time = self.get_current_path(time)\n return path.target_position(path_time)", "def pass_time(self, t):\n cont = time.time() + t\n while time.time() < cont:\n time.sleep(0)", "def getPositionKeyTime(self, index, keyIndex, view) -> float:\n ...", "def advance(self, time):\n return _cantera.reactornet_advance(self.__reactornet_id, time)", "def seek_file_area(self, offset, whence=0):\n # if offset > (self.length - self.file_area_byte_offset):\n # raise ValueError(\"Cannot seek past end of volume.\")\n offset += self.file_area_byte_offset + self.offset\n self.infile.seek(offset, whence)", "def currentTime(*args, update: bool=True, q=True, query=True, e=True, edit=True,\n **kwargs)->Union[time, Any]:\n pass", "def sleep_until(self, time):\n raise NotImplementedError()", "def set_time(self, sec):\n self.set_timed(round(sec * 10.0))", "def opened_at(self, datetime: datetime) -> None:", "def set_time(self, value: float):\n raise NotImplementedError()", "def seek(self, virtual_offset):\n\n # Do this inline to avoid a function call,\n # start_offset, within_block = split_virtual_offset(virtual_offset)\n start_offset = virtual_offset >> 16\n within_block = virtual_offset ^ (start_offset << 16)\n if start_offset != self._block_start_offset:\n # Don't need to load the block if already there\n # (this avoids a function call since _load_block would do nothing)\n self._load_block(start_offset)\n assert start_offset == self._block_start_offset\n if within_block > len(self._buffer):\n if not (within_block == 0 and len(self._buffer) == 0):\n raise ValueError(\"Within offset %i but block size only %i\"\n % (within_block, len(self._buffer)))\n self._within_block_offset = within_block\n return virtual_offset" ]
[ "0.7862459", "0.73034114", "0.69887996", "0.6565575", "0.64587533", "0.6248209", "0.5942373", "0.58812505", "0.5810669", "0.5806339", "0.5791968", "0.577806", "0.577806", "0.57382905", "0.5731552", "0.57055503", "0.56437117", "0.5611064", "0.55878776", "0.5579256", "0.5579256", "0.5577683", "0.55120885", "0.5478364", "0.54778165", "0.54778165", "0.54606485", "0.53774863", "0.5362387", "0.5358607", "0.535797", "0.53319466", "0.5299165", "0.5296941", "0.5296941", "0.5296941", "0.5296941", "0.5296941", "0.5284541", "0.5254472", "0.52540636", "0.520336", "0.520336", "0.51911616", "0.51851743", "0.51587665", "0.51519567", "0.5148521", "0.51413727", "0.5133921", "0.51213855", "0.51209617", "0.5112459", "0.51077515", "0.5100875", "0.50953275", "0.5093931", "0.50787205", "0.5063948", "0.50502217", "0.50473106", "0.503405", "0.50334907", "0.50334185", "0.5004736", "0.50013584", "0.49969262", "0.49865097", "0.49716094", "0.4951741", "0.49498513", "0.49408987", "0.49390376", "0.49386626", "0.4920194", "0.49158132", "0.49111384", "0.49015626", "0.48903757", "0.48842564", "0.48797634", "0.4859965", "0.48566008", "0.4844881", "0.484123", "0.4840083", "0.48371813", "0.4834772", "0.4834772", "0.48305088", "0.48178864", "0.48148176", "0.48128873", "0.48061022", "0.48016608", "0.4767888", "0.47665226", "0.4758131", "0.47476155", "0.47467232" ]
0.65188617
4
Create the task on the server
def create(self, server): if len(self.geometries) == 0: raise Exception('no geometries') return server.post( 'task_admin', self.as_payload(), replacements={ 'slug': self.__challenge__.slug, 'identifier': self.identifier})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def create_task():", "def create_task(self, name, value):\n pass", "def create_task():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories/{1}/tasks\".format(STORED_ID['project_id'], STORED_ID['story_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"description\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['task_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())", "def new_task():\n req = request.json\n if 'cmd' in req:\n id = mongo.db.tasks.insert({\n 'cmd' : req['cmd'],\n 'status' : 'Not started'\n })\n\n response = {'id' : str(id)}\n return response", "def task():", "def make_task(self):\n return Task()", "def create(self, task_model):\n raise NotImplementedError()", "def create(self):\n\n # Validate Inputs\n create_dict = {\n \"model_id\": self.model.id,\n }\n\n try:\n # Create Task\n self.spinner.start()\n task_obj = self.dal.task.create(Task(create_dict))\n finally:\n self.spinner.stop()\n return task_obj", "def fusion_api_create_task(self, body, api=None, headers=None):\n return self.task.create(body, api, headers)", "def create_task(request):\n\n db_task = None\n params = request.POST.dict()\n params['owner'] = request.user\n slogger.glob.info(\"create task with params = {}\".format(params))\n try:\n db_task = task.create_empty(params)\n target_paths = []\n source_paths = []\n upload_dir = db_task.get_upload_dirname()\n share_root = settings.SHARE_ROOT\n if params['storage'] == 'share':\n data_list = request.POST.getlist('data')\n data_list.sort(key=len)\n for share_path in data_list:\n relpath = os.path.normpath(share_path).lstrip('/')\n if '..' in relpath.split(os.path.sep):\n raise Exception('Permission denied')\n abspath = os.path.abspath(os.path.join(share_root, relpath))\n if os.path.commonprefix([share_root, abspath]) != share_root:\n raise Exception('Bad file path on share: ' + abspath)\n source_paths.append(abspath)\n target_paths.append(os.path.join(upload_dir, relpath))\n else:\n data_list = request.FILES.getlist('data')\n\n if len(data_list) > settings.LOCAL_LOAD_MAX_FILES_COUNT:\n raise Exception('Too many files. Please use download via share')\n common_size = 0\n for f in data_list:\n common_size += f.size\n if common_size > settings.LOCAL_LOAD_MAX_FILES_SIZE:\n raise Exception('Too many size. Please use download via share')\n\n for data_file in data_list:\n source_paths.append(data_file.name)\n path = os.path.join(upload_dir, data_file.name)\n target_paths.append(path)\n with open(path, 'wb') as upload_file:\n for chunk in data_file.chunks():\n upload_file.write(chunk)\n\n params['SOURCE_PATHS'] = source_paths\n params['TARGET_PATHS'] = target_paths\n\n task.create(db_task.id, params)\n\n return JsonResponse({'tid': db_task.id})\n except Exception as exc:\n slogger.glob.error(\"cannot create task {}\".format(params['task_name']), exc_info=True)\n db_task.delete()\n return HttpResponseBadRequest(str(exc))\n\n return JsonResponse({'tid': db_task.id})", "def create_task(self, coro):\n task = self.loop.create_task(coro)\n return task", "def create_task(self, name, task_info=None):\n task = TaskInst(name=name).save()\n has_task_param = {\n 'super_role': SUPER_ROLE.OWNER,\n 'acceptance': ACCEPTANCE.ACCEPT\n }\n self.tasks.connect(task, has_task_param)\n start = StepInst(name='Start', node_type=NODE_TYPE.START, pos_x=-START_END_OFFSET).save()\n end = StepInst(name='End', node_type=NODE_TYPE.END, pos_x=START_END_OFFSET).save()\n task.steps.connect(start)\n task.steps.connect(end)\n task.update(task_info)\n return task", "def generate_tasks(self, task):", "def add_task(self,verbose = False):\n t = testrun()\n t.url = self.url\n t.runnable = self\n t.script = self.script\n t.location = self.location\n t.save()\n print \"Adding %s\" %(t)\n t.submit_to_wpt()", "def create_task(self, name, date, isComplete):\n user = User.objects.create(username='userdemo')\n user.set_password('calnote24')\n user.save()\n Task.objects.create(task=name, dueDate=date, isComplete=isComplete, user_id=user.id)", "def post(self):\n try:\n req = api.payload\n result = create_task(\n get_db(),\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid request parameters\")", "def create_task(self, coro):\n return self.hass.loop.create_task(self.run_coro(coro))", "def _create_task(self, body, *, task_cls=Task):\n return task_cls(self, body)", "def create_task(self, ticket, user):\n return Task.objects.create_task('test', 'low', ticket, user, user)", "def test_anonymous_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n print res.data\r\n data = json.loads(res.data)\r\n assert data['info'], data", "def create_task(self, unused_parent, task, **kwargs):\n self.uri = task.get('app_engine_http_request').get('relative_uri')\n self.body = task.get('app_engine_http_request').get('body')\n logging.info('Task uri: %r', self.uri)\n logging.info('Task body: %r', self.body)\n return 'fake task'", "def run(self):\n report_info = self.api_client.create_task(self.host_id,\n CompatibilityReport.Spec(self.targetRelease))\n print(\"Compatibility Report API Task ID : \", report_info.get_task_id())", "def create_task(text):\n new_task = Tasks(task_text=text) \n new_task.save()", "def tasks_create(self, name, labels, bug, resource_type, resources, image_quality, frame_filter, **kwargs):\n url = self.api.tasks\n data = {'name': name,\n 'labels': labels,\n 'bug_tracker': bug,\n 'image_quality': image_quality,\n 'frame_filter': frame_filter\n }\n response = self.session.post(url, json=data)\n response.raise_for_status()\n response_json = response.json()\n log.info('Created task ID: {id} NAME: {name}'.format(**response_json))\n log.info(str(response.json()))\n self.tasks_data(response_json['id'], resource_type, resources)", "def new_task(self):\n print \"Create a new task.\"\n\n # Collect new task info from user\n description = raw_input(\"Enter task (140 characters max) > \")\n due_date = raw_input(\"Enter due date as 'year-mm-dd' (optional). > \")\n tags = raw_input(\n \"Enter tags for the task (comma separated) (optional). > \")\n tag_list = [tag.strip() for tag in tags.split(',')]\n try:\n new_task = doto.Task(self.user, description, due_date, tag_list)\n except (NameError, ValueError) as e:\n # On error, print and return.\n print \"Task not created. Error: \", e\n raw_input(\"Press Enter to continue.\")\n return\n self.current_collection.add(new_task)\n return", "def post(self):\n params = json.loads(self.request.body.decode())\n gid = params.get('gid')\n\n if gid: # apply an action on a specified task\n action = params.get('action')\n if action == 'pause':\n self.write(self._rpc.aria2.pause(self._token, gid))\n elif action == 'resume':\n self.write(self._rpc.aria2.unpause(self._token, gid))\n else: # invalid action\n self.send_error(400)\n\n else: # create a task\n url = params.get('url')\n self.write(self._rpc.aria2.addUri(\n self._token, [url], {'dir': self._download_file_dir}))", "def task():\n pass", "def task():\n pass", "def task(self):", "def task(self):", "def Task(self):\n return self.create_task_cls()", "def create():", "def create():", "def task_create(context, values, session=None):\n\n values = values.copy()\n session = session or get_session()\n with session.begin():\n task_info_values = _pop_task_info_values(values)\n\n task_ref = models.Task()\n _task_update(context, task_ref, values, session=session)\n\n _task_info_create(context,\n task_ref.id,\n task_info_values,\n session=session)\n\n return task_get(context, task_ref.id, session)", "def task(self, name):\n pass", "def SetUp(self):\n parser = argparse.ArgumentParser()\n parser.add_argument('--task-hash')\n args, _ = parser.parse_known_args()\n\n self.task = self.CreateNewTask(\n isolated_hash=args.task_hash,\n dimensions={'os': 'Ubuntu-14.04'},\n idle_timeout_secs=90, connection_timeout_secs=90,\n verbosity=logging.DEBUG)\n self.task.Create()\n self.task.WaitForConnection()", "def spawn(self, taskdef: TaskDefinition) -> RemoteTask:\n raise NotImplementedError()", "def create_task(ip, fingerprint, cfm_file, root, cookie):\n\n\tbase = \"http://{0}:{1}\".format(ip, fingerprint.port)\n\turi = '/CFIDE/administrator/scheduler/scheduleedit.cfm'\n\n\tif fingerprint.version in ['5.0']:\n\t\tdata = {\n\t\t\t\"taskNameOrig\" : \"\",\n\t\t\t\"TaskName\" : cfm_file,\n\t\t\t\"StartDate\" : \"01/01/2020\",\n\t\t\t\"EndDate\" : \"\",\n\t\t\t\"ScheduleType\" : \"Once\",\n\t\t\t\"StartTimeOnce\" : \"13:24:05\",\n\t\t\t\"Interval\" : \"Daily\",\n\t\t\t\"StartTimeDWM\" : \"\",\n\t\t\t\"customInterval\" : \"0\",\n\t\t\t\"CustomStartTime\" : \"\",\n\t\t\t\"CustomEndTime\" : \"\",\n\t\t\t\"Operation\" : \"HTTPRequest\",\n\t\t\t\"Port\" : state.external_port,\n\t\t\t\"ScheduledURL\" : \"http://{0}/{1}\".format(utility.local_address(), cfm_file),\n\t\t\t\"Username\" : \"\",\n\t\t\t\"Password\" : \"\",\n\t\t\t\"RequestTimeout\" : \"10\",\n\t\t\t\"ProxyServer\" : \"\",\n\t\t\t\"HttpProxyPort\" : \"23\",\n\t\t\t\"Publish\" : \"1\",\n\t\t\t\"filePath\" : root,\n\t\t\t\"File\" : cfm_file.replace('cfml', 'cfm'),\n\t\t\t\"adminsubmit\" : \"Submit+Changes\"\n\t\t}\n\n\telse:\n\t\tdata = {\n\t\t\t\"TaskName\" : cfm_file,\n\t\t\t\"Start_Date\" : \"Jan 2, 2020\",\n\t\t\t\"End_Date\" : \"\",\n\t\t\t\"ScheduleType\" : \"Once\",\n\t\t\t\"StartTimeOnce\" : \"13:24:50\",\n\t\t\t\"Interval\" : \"Daily\",\n\t\t\t\"StartTimeDWM\" : \"\",\n\t\t\t\"customInterval_hour\" : \"0\",\n\t\t\t\"customInterval_min\" : \"0\",\n\t\t\t\"customInterval_sec\" : \"0\",\n\t\t\t\"CustomStartTime\" : \"\",\n\t\t\t\"CustomEndTime\" : \"\",\n\t\t\t\"Operation\" : \"HTTPRequest\",\n\t\t\t\"ScheduledURL\" : \"http://{0}:{1}/{2}\".format(utility.local_address(), \n\t\t\t\t\t\t\t\t\t\t\tstate.external_port, cfm_file),\n\t\t\t\"Username\" : \"\",\n\t\t\t\"Password\" : \"\",\n\t\t\t\"Request_Time_out\" : \"\",\n\t\t\t\"proxy_server\" : \"\",\n\t\t\t\"http_proxy_port\" : \"\",\n\t\t\t\"publish\" : \"1\",\n\t\t\t\"publish_file\" : root + \"\\\\\" + cfm_file,\n\t\t\t\"adminsubmit\" : \"Submit\",\n\t\t\t\"taskNameOrig\" : \"\"\n\n\t\t}\n\n\tresponse = utility.requests_post(base+uri, data=data, cookies=cookie)\n\tif response.status_code is 200:\n\n\t\treturn True", "def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")", "def tasks():", "def create_task(option):\n if option == 'suite':\n form = _get_task_suite_form()\n elif option == 'log':\n form = _get_task_log_form()\n else:\n flash('Invalid request!', 'danger')\n return redirect(url_for('view_index'))\n\n if not form.validate_on_submit():\n flash('Invalid submission!', 'warning')\n return view_index(form)\n else:\n try:\n # Create new task with user and file and submit it to DB\n user = User.query.filter_by(name=session['user_name']).first()\n software = Software.query.filter_by(name=form.software.data).first()\n\n if option == 'suite':\n files = File.query.filter(File.path.startswith(form.file.data)).filter_by(stored=True).all()\n\n # Filter out subfolders\n files = [file for file in files if os.path.split(file.path)[0] == form.file.data]\n\n if not files:\n raise FileNotFoundError(2, 'No files found in suite.')\n\n else:\n files = [File.query.filter_by(name=form.file.data, stored=True).first()]\n\n # Create and insert new task\n new_task = Task(status_id=1, user=user, software=software)\n [new_task.files.append(file) for file in files]\n\n db.session.add(new_task)\n db.session.commit()\n\n # Start new process\n app.config['OPS_PIPE_PARENT'][new_task.id], app.config['OPS_PIPE_CHILD'][new_task.id] = Pipe(duplex=False)\n app.config['OPS_PROCESS'][new_task.id] = Worker([file.path for file in new_task.files],\n new_task.software.path,\n new_task.id,\n get_log_path(new_task),\n app.config['OPS_LOCK'],\n app.config['OPS_PIPE_CHILD'][new_task.id])\n app.config['OPS_PROCESS'][new_task.id].start()\n app.logger.info('%s created new task %i.' % (user.name, new_task.id))\n flash('Task(s) created!', 'success')\n\n except Exception as xcpt:\n db.session.rollback()\n app.logger.exception('%s raised with file %s: \\n' + str(xcpt), session['user_name'], form.file)\n flash(str(xcpt), 'danger')\n return redirect(url_for('view_index'))", "def create_task(self, name, target, config=None, comment=\"\"):\n\n if not config:\n config = \"Full and fast\"\n\n request = \"\"\"<create_task>\n <name>%s</name>\n <comment>%s</comment>\n <config id=\"%s\"/>\n <target id=\"%s\"/>\n </create_task>\"\"\" % (name, comment, config, target)\n\n return self.make_xml_request(request, xml_result=True).get(\"id\")", "def create(self):\n return self.start()", "def task():\n\n\tprint('Example task executed.')", "def task_gen(self):\n pass", "def create_tasks(self):\n self.create_passport_task()\n\n self.create_visa_task()\n\n self.create_vaccines_task()\n self.create_malaria_task()\n\n self.create_weather_task()\n self.create_flight_needs_task()\n self.create_banking_task()\n\n self.create_insurance_task()\n\n self.create_systematic_tasks() # 3 tasks\n\n if self.trip.return_date_time is None or\\\n self.trip.return_date_time - self.trip.arrival_date_time > timedelta(days=14):\n\n self.create_long_travel_task()\n\n for task in self.tasks:\n task.auto = True\n\n return self.tasks", "def task(ctx, config):\n pass", "def newTask(name, description, assigner, id=None, priority=None, submitter_email=None, whose=None):\n if whose:\n user_id = jutdaapi.find_user(whose)\n if not user_id:\n raise ValueError('bad whose assignment: '+str(whose))\n #title = name + ' for: '+assigner.title()\n # that was the old scheme\n title = '('+assigner.title()+') '+name\n\n if priority != None:\n #priority = (int(priority) + 2) / 2\n priority = int(priority)\n RA_queue = 3\n #if assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+assigner+'\"/>'\n if isinstance(id, str):\n description += '<tasktrackermeta id=\"'+id+'\"/>'\n ticket_id = jutdaapi.create_ticket(RA_queue, title, description,\n priority=priority, submitter_email=submitter_email)\n # Is there a race condition here? In this kind of database\n # I would assume not.\n time.sleep(1)\n ticket = jutdaapi.get_detailed_ticket(ticket_id)\n t = ticketToTask(ticket)\n return t", "def test_user_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n # Register\r\n self.register()\r\n self.signin()\r\n res = self.app.get('api/app/1/newtask')\r\n data = json.loads(res.data)\r\n assert data['info'], data\r\n self.signout()", "def create_task(self, task_state, task_xml):\r\n\r\n tag_name = self.get_tag_name(task_xml)\r\n children = self.child_modules()\r\n task_descriptor = children['descriptors'][tag_name](self.system)\r\n task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)\r\n task = children['modules'][tag_name](\r\n self.system,\r\n self.location,\r\n task_parsed_xml,\r\n task_descriptor,\r\n self.static_data,\r\n instance_state=task_state,\r\n )\r\n return task", "def create(self, task_id, **options):\r\n return self(task_id, **options)", "def run(self, host=None):\n host = self.getFogHost(host)\n num = str(self.getHostNumber(host))\n url = self.baseURL+'host/'+num+'/task'\n try:\n requests.post(url, headers=self.header, json={\"taskTypeID\": 2})\n except Exception:\n sys.exit(1)", "def post(self, request, format=None):\n feedback = {\n 'permission': True\n }\n try:\n post_data = request.data\n serializer = task_serializer.InstantTaskSerializer(data=post_data, group=self.get_group())\n if serializer.is_valid():\n task = serializer.save()\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant task creation successfully!',\n 'info': {\n 'task_id': task.pk\n }\n }\n else:\n logger.info('Instant task parameters is not available: {}'.format(serializer.format_errors()))\n feedback['data'] = ErrorCode.parameter_invalid('instant_task_creation',\n reason=serializer.format_errors())\n except natrix_exception.NatrixBaseException as e:\n feedback['data'] = ErrorCode.sp_code_bug('Create instant has a bug: {}'.format(e.get_log()))\n logger.error(e.get_log())\n except Exception as e:\n natrix_exception.natrix_traceback()\n feedback['data'] = ErrorCode.sp_db_fault(str(e))\n\n return JsonResponse(data=feedback)", "def _add_task(self):\n server: Optional[Server] = None\n for i in range(len(self.servers)):\n if self.servers[i].has_space:\n server = self.servers[i]\n break\n if server:\n server.add_user(ttask=self.ttask)\n else:\n server = Server(umax=self.umax)\n server.add_user(ttask=self.ttask)\n self.servers.append(server)", "def octopus_task(self, msg, args):\r\n self.tasks.send_task_by_id(msg, args)", "def createNewTasks(_id):\n job = mongo.db.jobs.find_one({'_id': _id})\n tasks = job.get('data').get('tasks')\n for task in tasks:\n data = {\n 'name': task.get('name'),\n 'datetime': now(),\n 'status': 'ready',\n 'owner': job.get('owner'),\n 'priority': job.get('priority'),\n 'is_active': True,\n 'slave': None,\n 'last_activity': now(),\n 'started_on': None,\n 'finished_on': None,\n 'paused_on': None,\n 'logs': [],\n 'ctid': None,\n 'target_info': {},\n 'cancelled_on': None,\n 'progress': 0,\n 'job': job.get('_id'),\n 'proccess':\n {\n 'command': getRenderCommand(job.get('category')),\n 'cwd': task.get('cwd'),\n 'filepath': task.get('filepath'),\n 'target': task.get('target'),\n }\n }\n newTask = mongo.db.tasks.insert(data)\n ctid = addTaskToQueue(newTask)\n #updateTaskInfo(str(task['_id']['$oid']), status='ready', ctid=str(ctid))\n job['status'] = 'ready'\n mongo.db.jobs.update({'_id': _id}, job)\n\n return", "def create_task(self, task_body, req_context):\n design_ref = task_body.get('design_ref', None)\n node_filter = task_body.get('node_filter', None)\n action = task_body.get('action', None)\n\n if design_ref is None or action is None:\n raise errors.InvalidFormat(\n 'Task creation requires fields design_ref, action')\n\n task = self.orchestrator.create_task(design_ref=design_ref,\n action=action,\n node_filter=node_filter,\n context=req_context)\n\n task.set_status(hd_fields.TaskStatus.Queued)\n task.save()\n return task", "def new_task(data):\n rabbit_host = os.getenv('RABBIT_HOST', 'localhost')\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbit_host)\n )\n channel = connection.channel()\n channel.basic_publish(\n exchange='',\n routing_key='task_queue',\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n connection.close()", "def task_start_parsing():\n add_task(url_for(\"task_queue_users\"))\n add_task(url_for(\"task_clean_tmp_files\"))\n return OK_RESPONSE", "def create_task(*args, **kwargs):\n loop = asyncio.get_event_loop()\n return wrapped_create_task(loop.create_task, None, args, kwargs)", "def task_prepare_nodes(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'prepare_nodes':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_prepare_nodes\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)", "def run(self):\n if self.type_task == \"Api-request\":\n self.config = ConfigApiRequestTask(**self.dynamic_configs)\n self.task = ApiRequestTask(\n priority=0, # fixed priority\n config=self.config\n )\n elif self.type_task == 'Db':\n self.config = ConfigDbTask(self.dynamic_configs)\n self.task = DbTask(\n priority=0,\n config=self.config\n )\n elif self.type_task == 'File':\n self.config = ConfigFileTask(self.dynamic_configs)\n self.task = FileTask(\n priority=0,\n config=self.config\n )\n \n try:\n self.result = self.task.execute()\n except Exception as e:\n self.errors = str(e)\n self.logger.error(f'Error executing task: {self.errors}')\n return False\n \n res = self.save_into_db()\n return res", "def run_task(self) -> Task:", "def create_weather_task(self):\n try:\n climate = Climate.objects.get(country=self.a_country)\n self.tasks.append(self.trip.tasks.create(\n title=f\"Check climate in {self.a_country.name}\",\n comments=climate.description,\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=3)\n ))\n except (Climate.DoesNotExist, Country.DoesNotExist):\n self.tasks.append(self.trip.tasks.create(\n title=\"Check meteo\",\n comments=\"Check weather conditions in \" + self.a_country.name +\\\n \" and prepare appropriate clothing\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=3)\n ))", "def __init__(self):\n Task.__init__(self)", "def create_task(self, name, value):\n with self.db_lock:\n return self.rcon.hset(self.task_key, name, value)", "def setUp(self):\n self.t = Task()\n self.t(\"add one\")", "async def create_job(response: Response,\n request: Request,\n job: Job = Body(\n ...,\n example={\n \"id_video\": \"bbb_0.mp4\",\n \"bitrate\": 7000,\n \"speed\": \"ultrafast\",\n },\n )\n ): \n \n\n # get an ID and return to client\n id_job = mngr.getID()\n logger.debug(\"got id_job %s\" %id_job)\n resp = [\"http:/\"]\n resp.append(request.headers['host'])\n resp.append(id_job)\n response.headers[\"Location\"] = \"/\".join(resp)\n\n # create the task\n mngr.newJob(id_job, \n job.id_video, \n job.bitrate, \n job.speed)\n\n return id_job", "def create_task_description (r, msg) :\n\n task_descr = troy.TaskDescription ()\n task_descr.tag = \"%s\" % r\n task_descr.executable = '/bin/echo'\n task_descr.arguments = ['Hello', msg, r, '!']\n task_descr.working_directory = \"%(home)s/troy_demo\"\n\n return task_descr", "def new_task(self, appid):\r\n tasks = []\r\n for i in range(0, 10):\r\n tasks.append(Task(app_id=appid, state='0', info={}))\r\n db.session.add_all(tasks)\r\n db.session.commit()", "def _service_task(self):\n pass", "def test_task_creation(self):\n Task.objects.filter(status=Task.Status.AWAITING_PROCESSING).delete()\n\n project = self.projects['test_human_and_machine']\n self.assertEqual(Task.objects.filter(project=project).count(),\n 0)\n create_subsequent_tasks(project)\n\n # Human Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 1)\n\n human_step = self.workflow_steps['test_workflow_2']['step4']\n task = Task.objects.get(step=human_step, project=project)\n data = {'submit_key1': 'submit_val1'}\n assign_task(self.workers[0].id, task.id)\n\n # user 0 submits a task\n response = self._submit_assignment(self.clients[0], task.id, data=data)\n self.assertEqual(response.status_code, 200)\n\n # Machine Task was created\n self.assertEqual(Task.objects.filter(project=project).count(),\n 2)\n machine_step = self.workflow_steps['test_workflow_2']['simple_machine']\n machine_task_assignment = (\n TaskAssignment.objects\n .filter(task__step=machine_step,\n task__project=project)[0])\n\n self.assertEqual(machine_task_assignment.status,\n TaskAssignment.Status.SUBMITTED)\n\n self.assertEqual(machine_task_assignment.in_progress_task_data,\n {'json': 'simple'})\n\n self.assertEqual(machine_task_assignment.task.status,\n Task.Status.COMPLETE)", "def create():\n config = request.data\n return add_scheduling_block(config)", "def create(self, validated_data):\n return Task.objects.create(**validated_data)", "def submit_task(self, op_data):\n\n task_path = op_data['file_path']\n t = Task()\n t.task_id = '0'\n t.task_status = Global.get_status_separating()\n parser = xml.sax.make_parser()\n parser.setFeature(xml.sax.handler.feature_namespaces, 0)\n parser.setContentHandler(t)\n parser.parse(task_path)\n self.__task_set[t.task_id] = t\n self.__task_queue.put(t)\n logging.info(\"submitted task %s\\n\" % t.task_name)", "def create_task(event):\n manager = event.workbench.get_plugin('exopy.tasks')\n dialog = BuilderView(manager=manager,\n parent=event.parameters.get('parent_ui'),\n future_parent=event.parameters.get('future_parent'))\n result = dialog.exec_()\n if result:\n return dialog.config.build_task()\n else:\n return None", "def create_tasks(\n self,\n feature_ids: List,\n features: List,\n ) -> None:\n for i, feature_id in enumerate(feature_ids):\n task = Task(self, feature_id, features[i])\n self.tasks.append(task)\n self.numberOfTasks = len(self.tasks)", "def create_archive_task(self, args=None):\r\n input_args_dict = args\r\n input_args_dict.update({\"archive_filename\": (args.get('archive_filename')\r\n + '-' + str(calendar.timegm(time.gmtime())))\r\n })\r\n input_args_dict.update({\"filterby\": []})\r\n input_args_dict.update({\"ids\": \"\"})\r\n\r\n result = {\"Task\": \"CreateArchiveTask\", \"Status\": \"Started\", \"Error\": \"NoError\", \"JobID\": \"\",\r\n \"Start\": args.get('start'), \"End\": args.get('end'), \"P2Vurl\": \"\",\r\n \"FileName\": args['archive_filename']}\r\n\r\n datasource = self.hostname + \":\" + input_args_dict['archive_filename']\r\n\r\n start_time_in_ms = str(int(input_args_dict['start']) * 1000)\r\n end_time_in_ms = str(int(input_args_dict['end']) * 1000)\r\n\r\n p2v_url = f'{self.applianceurl}/vision2/pivotintovision/?datasources={datasource}' \\\r\n f'&title={result[\"FileName\"]}&start={start_time_in_ms}&end={end_time_in_ms}' \\\r\n f'&tools=trafficOverTime_by_app%2Cconversations_by_ipaddress'\r\n\r\n # Endace Filter order\r\n if input_args_dict['ip']:\r\n input_args_dict['filterby'].append(0)\r\n p2v_url = p2v_url + \"&ip=\" + input_args_dict['ip']\r\n if input_args_dict['src_host_list']:\r\n input_args_dict['filterby'].append(1)\r\n src_ip = ''\r\n for ip in input_args_dict['src_host_list']:\r\n src_ip = src_ip + \",\" + ip\r\n src_ip = src_ip[1:]\r\n p2v_url = p2v_url + \"&sip=\" + src_ip\r\n if input_args_dict['dest_host_list']:\r\n input_args_dict['filterby'].append(2)\r\n dest_ip = ''\r\n for ip in input_args_dict['dest_host_list']:\r\n dest_ip = dest_ip + \",\" + ip\r\n dest_ip = dest_ip[1:]\r\n p2v_url = p2v_url + \"&dip=\" + dest_ip\r\n if input_args_dict['src_port_list']:\r\n input_args_dict['filterby'].append(3)\r\n port = ''\r\n for sport in input_args_dict['src_port_list']:\r\n port = port + \",\" + sport\r\n port = port[1:]\r\n p2v_url = p2v_url + \"&sport=\" + port\r\n if input_args_dict['dest_port_list']:\r\n input_args_dict['filterby'].append(4)\r\n port = ''\r\n for dport in input_args_dict['dest_port_list']:\r\n port = port + \",\" + dport\r\n port = port[1:]\r\n p2v_url = p2v_url + \"&dport=\" + port\r\n if input_args_dict['protocol']:\r\n input_args_dict['filterby'].append(5)\r\n if input_args_dict['port']:\r\n input_args_dict['filterby'].append(6)\r\n p2v_url = p2v_url + \"&port=\" + input_args_dict['port']\r\n\r\n evid = EndaceVisionData(input_args_dict)\r\n with EndaceWebSession(app_url=self.applianceurl, username=self.username, password=self.password,\r\n cert_verify=self.cert_verify) as sess:\r\n # Extract list of rotationfiles datasources and exclude previously archived files\r\n rotfile_ids = []\r\n\r\n api = EndaceVisionAPIAdapter(sess)\r\n path = \"datasources\"\r\n rd = api.get(path)\r\n try:\r\n response = rd.json()\r\n except json.decoder.JSONDecodeError:\r\n raise Exception(f\"JsonDecodeError - path {path}\")\r\n else:\r\n if rd.status_code == 200:\r\n payload = response.get(\"payload\")\r\n for rotfile in payload:\r\n if rotfile[\"type\"] == \"rotation_file_v2\":\r\n rotfile_ids.append(rotfile[\"id\"])\r\n\r\n input_args_dict['ids'] = rotfile_ids\r\n\r\n path = \"archive/\"\r\n rp = api.post(path, json=evid.build_archive_data())\r\n if rp.status_code == 200:\r\n try:\r\n response = rp.json()\r\n except json.decoder.JSONDecodeError:\r\n raise Exception(f\"JsonDecodeError - path {path}\")\r\n else:\r\n meta = response.get(\"meta\", {})\r\n payload = response.get(\"payload\")\r\n if meta:\r\n meta_error = meta.get(\"error\")\r\n if meta_error is not None:\r\n if meta_error is not False:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = str(meta_error)\r\n else:\r\n if payload is not None:\r\n result['JobID'] = payload\r\n result['P2Vurl'] = f'[Endace PivotToVision URL]({p2v_url})'\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - empty payload data from {path}\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"ServerError - empty meta data from {path}\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"HTTP {rd.status_code} to /{path}\"\r\n else:\r\n result['Status'] = \"Failed\"\r\n result['Error'] = f\"HTTP {rd.status_code} to /{path}\"\r\n\r\n if result['Status'] == 'Failed':\r\n self.handle_error_notifications(result['Error'])\r\n return result", "def create():\n pass", "def task_prepare_site(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'prepare_site':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_prepare_site\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)", "def create_task(author, title, text, **kwargs):\n mc = MathContent(text=text)\n mc.save()\n task = Task(author=author, name=title, content=mc, **kwargs)\n task.save()\n return task", "def create(self):\n ...", "def create(task):\n tname = task.get(\"tname\")\n # user cannot create task without name\n\n # Does the new task have a name? If no we can't insert it.\n # Can we insert it?\n if tname is not None:\n\n # Create a person instance using the schema and the passed in person\n schema = TaskListSchema()\n print(task)\n new_task = schema.load(task, session=db.session).data\n\n # Add the person to the database\n db.session.add(new_task)\n db.session.commit()\n\n # Serialize and return the newly created person in the response\n data = schema.dump(new_task).data\n\n return data, 201\n\n # Otherwise, nope, person exists already\n else:\n abort(409, \"Task needs a name\".format(tname=tname),)", "def Add(self, user, args):\n\n # Example tasks:\n # {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'12:12:12', 'force':'0', 'time-limit':'0'}\n # {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'Wednesday 12:12:12', 'force':'0', 'time-limit':'0'}\n # {'description':'', 'project-file':'C:/autoexec.bat', 'date-time':'2012-12-12 12:12:12', 'force':'1', 'time-limit':'0'}\n\n # If argument is a string\n if type(args) == type(str()):\n task = urlparse.parse_qs(args)\n # If argument is a valid dict\n elif type(args) == type(dict()):\n task = args\n else:\n msg = 'Add task: Invalid type of argument for add task: `{0}` !'.format(type(args))\n log.error(msg)\n return '*ERROR* ' + msg\n\n # if not self.conn:\n # print('Cannot add task! Central Engine connection not available !')\n # return False\n # elif self.conn.get_user_variable(user, 'status') == False:\n # print('Cannot add task! Invalid username `{0}` !'.format(user))\n # return False\n\n descrip = task.get('description')\n proj_file = task.get('project-file')\n proj_dt = task.get('date-time')\n proj_force = task.get('force')\n time_limit = task.get('time-limit')\n\n if not os.path.isfile(proj_file):\n msg = 'Add task: Invalid file path `{0}` !'.format(proj_file)\n log.error(msg)\n return '*ERROR* ' + msg\n\n dt, proj_type = _fix_date(proj_dt)\n if not dt: return False\n\n # Duplicate dates?\n if proj_dt in [v['date-time'] for v in self.tasks.values()]:\n msg = 'Add task: Duplicate date-time: `{0}` !'.format(proj_dt)\n log.error(msg)\n return '*ERROR* ' + msg\n\n # If force is not valid, reset it. By default, force is enabled.\n if proj_force != '0':\n proj_force = '1'\n\n try:\n time_limit = int(time_limit)\n except:\n log.error('Add task: Invalid Time-limit number: `{0}` ! Will default to ZERO.'.format(time_limit))\n time_limit = 0\n if time_limit < 0:\n time_limit = 0\n\n # This can only be executed by 1 thread at a time,\n # so there will never be 2 threads that create tasks at the same time\n with self.acc_lock:\n\n created_time = datetime.now().strftime('%Y-%m-%d %H:%M:%S.%f')\n\n task_fixed = {\n 'user' : user,\n 'description' : descrip,\n 'project-file': proj_file,\n 'date-time' : proj_dt,\n 'force' : proj_force,\n 'time-limit' : time_limit,\n 'proj-type' : proj_type\n }\n\n self.tasks[created_time] = task_fixed\n\n log.debug('Created {proj-type} task for user {user} :: File `{project-file}`, activation date '\n '`{date-time}`, force `{force}`, time limit `{time-limit}`.\\n'.format(**task_fixed))\n\n self._save()\n\n return created_time", "async def create_task(self, coro: Callable, callback=None, **kwargs) -> Future:\n # get stuff we'll need to fake scheduler call\n sched_data = {\n \"id\": uuid.uuid4().hex,\n \"name\": self.name,\n \"objectid\": self.AD.app_management.objects[self.name][\"id\"],\n \"type\": \"scheduler\",\n \"function\": callback,\n \"pin_app\": await self.get_app_pin(),\n \"pin_thread\": await self.get_pin_thread(),\n }\n\n def callback_inner(f):\n try:\n # @todo : use our own callback type instead of borrowing\n # from scheduler\n kwargs[\"result\"] = f.result()\n sched_data[\"kwargs\"] = kwargs\n self.create_task(self.AD.threading.dispatch_worker(self.name, sched_data))\n\n # callback(f.result(), kwargs)\n except asyncio.CancelledError:\n pass\n\n f = asyncio.create_task(coro)\n if callback is not None:\n self.logger.debug(\"Adding add_done_callback for future %s for %s\", f, self.name)\n f.add_done_callback(callback_inner)\n\n self.AD.futures.add_future(self.name, f)\n return f", "def create(self):\n\n pass", "def create_additional_tasks(testcase):\n # No need to create progression task. It is automatically created by the cron\n # handler.\n task_creation.create_impact_task_if_needed(testcase)\n task_creation.create_regression_task_if_needed(testcase)\n task_creation.create_symbolize_task_if_needed(testcase)\n task_creation.create_variant_tasks_if_needed(testcase)", "def create(self):", "def setup_task(self, *args, **kwargs):\n pass", "def create_malaria_task(self):\n if not self.a_country is self.d_country and self.a_country.malaria_presence:\n self.tasks.append(self.trip.tasks.create(\n title=\"Protection against mosquitoes\",\n comments=\"Insect repellent, insecticide-treated bednet and pre-treating clothing\",\n category=TaskCategory.objects.get(name=\"Health\"),\n deadline=self.trip.departure_date_time - timedelta(days=3)\n ))", "def createTasks():\n tickets = jutdaapi.get_tickets(queues=[3]) # this works better (still not\n # perfect) if list results is set to 1000 in jutda user settings\n tasks = []\n for ticket in tickets:\n tasks.append(ticketToTask(ticket))\n return tasks", "def __init__(self, task_type, task):\n self.task = task\n self.task_type = task_type", "async def create_upload_file( background_tasks: BackgroundTasks, file: UploadFile = File(...), db : Session = Depends(get_db)):\n background_tasks.add_task(process_acti, file)\n return {\"status\": \"success\"}", "def create_master_random_taskname(self, pom, Nworkers, user_name=None, user_password='Tester', user_org='Musketeer', task_name='Test', random_taskname=True):\r\n self.pom = pom\r\n self.Nworkers = Nworkers\r\n config = 'cloud'\r\n if random_taskname:\r\n rword = ''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(5))\r\n version = '_' + rword\r\n else:\r\n version = '_v2'\r\n task_name += version\r\n self.task_name = task_name\r\n user_password += version\r\n \r\n if user_name is None:\r\n user_name = 'ma' + version\r\n\r\n fflapi.create_user(user_name, user_password, user_org, self.credentials_filename)\r\n ffl.Factory.register(config, fflapi.Context, fflapi.User, fflapi.Aggregator, fflapi.Participant)\r\n context_master = ffl.Factory.context(config, self.credentials_filename, user_name, user_password, encoder = serializer.Base64Serializer)\r\n \r\n # Create task\r\n task_definition = {\"task_name\": task_name,\r\n \"owner\": user_name, \r\n \"quorum\": self.Nworkers, \r\n \"POM\": self.pom,\r\n \"model_type\": \"None\", \r\n }\r\n \r\n ffl_user_master = ffl.Factory.user(context_master)\r\n with ffl_user_master:\r\n try:\r\n result = ffl_user_master.create_task(task_name, ffl.Topology.star, task_definition)\r\n except Exception as err:\r\n print(str(err).split(':')[1])\r\n\r\n '''\r\n with ffl_user_master:\r\n try:\r\n ffl_user_master.create_user(user_name, user_password, user_org)\r\n except Exception as err:\r\n print(str(err).split(':')[1])\r\n\r\n context_master = ffl.Factory.context(config, self.credentials_filename, user_name, user_password, encoder = serializer.Base64Serializer)\r\n ffl_user_master = ffl.Factory.user(context_master)\r\n '''\r\n\r\n # We write to disk the name of the task, to be read by the workers. In the real system, \r\n # the task_name must be communicated by other means.\r\n with open('current_taskname.txt', 'w') as f:\r\n f.write(task_name)\r\n\r\n self.aggregator = ffl.Factory.aggregator(context_master, task_name=task_name)\r\n #return context_master, task_name\r\n return self.aggregator", "def create_flight_needs_task(self):\n duration = self.trip.arrival_date_time - self.trip.departure_date_time\n if duration > timedelta(hours=2):\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"It's a long flight ! Don't forget your earplugs and your sleep mask.\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))\n else:\n self.tasks.append(self.trip.tasks.create(\n title=\"Flight Must Have !\",\n comments=\"Take some food and some drinks for your flight\",\n category=TaskCategory.objects.get(name=\"Others\"),\n deadline=self.trip.departure_date_time - timedelta(days=1)\n ))", "def save(self):\n if self.connection.task(self.name):\n raise ValueError(\"Task already exists\")\n\n self.connection.create_task(self.name, self.__data)", "def __init__(self,target, name = \"\", prio = 10, period = 0, time2run = 0):\n Task.taskid += 1\n self.tid = Task.taskid # Task ID\n self.target = target # create coroutine from given generator\n self.params = None # Value to send/receive\n self.prio = prio\n if name == \"\":\n self.name = \"task_%d\" % self.tid\n else:\n self.name = name\n self.period = period # zero: run now\n # negative: run once\n # positive: run at interval\n self.time2run = time.ticks_ms();\n if time2run>0:\n self.time2run += time2run\n else: \n self.time2run += period\n log.debug(\"Created task %s %d \", self.name,self.tid)\n self.target.send(None)", "def post(self):\n\n from jinjamator.task.celery import run_jinjamator_task\n from jinjamator.daemon.database import db\n\n relative_task_path = request.endpoint.replace(\n \"api.\", \"\"\n )\n data = request.get_json()\n job_id = str(uuid.uuid4())\n user_id = g._user[\"id\"]\n\n job = run_jinjamator_task.apply_async(\n [\n relative_task_path,\n data,\n data.get(\"output_plugin\", \"console\"),\n user_id,\n ],\n task_id=job_id,\n created_by_user_id=user_id,\n )\n\n db_job = list(\n db.session.query(DB_Job).filter(\n DB_Job.task_id == job.id\n )\n )\n db_job = db_job and db_job[0]\n if not db_job:\n db_job = DB_Job(job.id)\n db_job.status = \"SCHEDULED\"\n db_job.configuration = data\n db_job.jinjamator_task = relative_task_path\n db_job.created_by_user_id = user_id\n db.session.add(db_job)\n db.session.flush()\n db.session.commit()\n\n return jsonify({\"job_id\": job.id})", "def create(self):\n pass", "def create(self):\n pass" ]
[ "0.8675896", "0.75931495", "0.7256835", "0.71343005", "0.7059578", "0.70504224", "0.70264506", "0.7024704", "0.69823843", "0.69811654", "0.695801", "0.69416815", "0.68903977", "0.68586636", "0.6837721", "0.6830297", "0.6803687", "0.67863655", "0.67849886", "0.6761924", "0.67410105", "0.6721234", "0.672069", "0.6717473", "0.66953176", "0.6659768", "0.6621724", "0.6621724", "0.6619026", "0.6619026", "0.6613936", "0.66030127", "0.66030127", "0.65953696", "0.65793073", "0.65757835", "0.6572582", "0.65488154", "0.6511043", "0.651101", "0.65034354", "0.6501048", "0.6497039", "0.6485694", "0.6472883", "0.6460683", "0.6455778", "0.6445231", "0.6444475", "0.6419872", "0.64181066", "0.6412722", "0.64075565", "0.63951534", "0.63911843", "0.63900596", "0.6370082", "0.63595027", "0.6356199", "0.6354191", "0.63489616", "0.632444", "0.6318324", "0.6287889", "0.62666947", "0.62639034", "0.62611675", "0.62525296", "0.62438595", "0.62427884", "0.62427545", "0.62314034", "0.62302566", "0.6221987", "0.6221022", "0.62183183", "0.6209463", "0.618544", "0.6184217", "0.61723745", "0.6168904", "0.6155791", "0.61459464", "0.6140123", "0.613451", "0.61283463", "0.61106175", "0.61086327", "0.6105372", "0.610533", "0.610381", "0.61035603", "0.6100463", "0.6097595", "0.6096712", "0.6091173", "0.6088435", "0.6080306", "0.6057885", "0.6057885" ]
0.62833476
64
Update existing task on the server
def update(self, server): return server.put( 'task_admin', self.as_payload(), replacements={ 'slug': self.__challenge__.slug, 'identifier': self.identifier})
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def update_task(self, task):\n create = task.id == 0\n\n xml = self._serialise_task(task)\n\n method = ['PUT','POST'][create]\n\n if create:\n url = \"%s/tasks?%s\" % \\\n (self._get_base_url(), self._get_url_params())\n else:\n url = \"%s/tasks/%s?%s\" % \\\n (self._get_base_url(), task.id, self._get_url_params())\n\n headers = { \"Accept\":\"application/xml\",\n \"Content-Type\":\"application/xml\" }\n self.__conn.request(method, url, xml, headers) \n response = self.__conn.getresponse()\n\n data = response.read()\n\n if not response.status == 200:\n raise Exception(\"Could not update/create task.\"\\\n \" Response was [%s]: %s\" % (response.status, data))\n\n return self._parse_task(ET.fromstring(data))", "def update_task(self, name, fields):\n pass", "def update_task(self):\n row_id = self.get_valid_id('update')\n\n if row_id == -1:\n return\n\n task_title = self.display.ask_user_title()\n task_description = self.display.ask_user_description()\n task_due = self.display.ask_user_due()\n task_finished = self.display.ask_user_finished()\n\n # Call the db function to update data\n self.db_link.update_task(row_id, task_title, task_description, task_due, task_finished)\n self.display.print_success('\\nTask successfully updated.\\n')\n self.print_tasks()", "def put(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if task != None:\n # cache current values before updates\n taskName = task.name\n taskType = task.type\n taskPriority = task.priority\n taskStatus = task.developmentStatus\n taskValidation = task.validation\n taskSubmitterId = task.submitterId\n taskAssigneeId = task.assigneeId\n taskEffort = task.effort\n taskProjectId = task.projectId\n taskDescription = task.description\n # collect the json from the request\n task_json = simplejson.loads(self.request.body)\n # if the user is a guest the project must be unallocated\n wantsNotifications = {\"true\": True, \"false\": False}.get(self.request.params['notify'].lower())\n currentUserId = self.request.params['UUID']\n cukey = db.Key.from_path('User', int(currentUserId))\n user = db.get(cukey)\n if str(user.role) != '_Guest' or (task_json.has_key('projectId') == False or task_json['projectId'] == None):\n # update the project record\n task = helpers.apply_json_to_model_instance(task, task_json)\n # save the updated data\n task.put()\n # Push notification email on the queue if we need to notify\n if notification.should_notify(currentUserId,task,\"updateTask\",wantsNotifications):\n taskqueue.add(url='/mailer', params={'taskId': int(guid), 'currentUUID': self.request.params['UUID'], 'action': \"updateTask\", 'name': taskName, 'type': taskType, 'priority': taskPriority, 'status': taskStatus, 'validation': taskValidation, 'submitterId': taskSubmitterId, 'assigneeId': taskAssigneeId, 'effort': taskEffort, 'projectId': taskProjectId, 'description': taskDescription})\n # return the same record...\n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(401, \"Not Authorized\")\n else:\n self.response.set_status(404, \"Task not found\")", "def update_task(request, tid):\n try:\n slogger.task[tid].info(\"update task request\")\n labels = request.POST['labels']\n task.update(tid, labels)\n except Exception as e:\n slogger.task[tid].error(\"cannot update task\", exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return HttpResponse()", "def update(task_name, task):\n tasks.update(\n {'name': task_name}, {\"$set\": {'name': task.name, 'description': task.description, 'status': task.status}})", "def commit(self):\n data = self._to_json()\n resp = self._connection._put(get_url('task update', uuid=self._uuid), json=data)\n self._auto_update = self._last_auto_update_state\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)", "def update(task_id, task):\n # Get the task requested from the db into session\n update_task = TaskList.query.filter(TaskList.task_id == task_id).one_or_none()\n\n # Did we find the task?\n if update_task is not None: \n\n # turn the passed in task into a db object\n schema = TaskListSchema()\n update = schema.load(task, session=db.session).data\n print(update)\n\n # Set the id to the task we want to update\n update.task_id = update_task.task_id\n\n # merge the new object into the old and commit it to the db\n db.session.merge(update)\n db.session.commit()\n\n # return updated task in the response\n data = schema.dump(update_task).data\n\n return data, 200\n # otherwise, nope, that's an error\n else:\n abort(\n 404, \"Task {task_id} not found\".format(task_id=task_id),\n )", "def update_task(project_id,task_id):\n data = request.get_json()\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n\n else:\n permission = has_project_permission(project, g.user)\n old_task = Task.query.filter_by(id=task_id)\n if not old_task:\n abort(404, f'There is no task with ID of {task_id}.')\n\n if old_task:\n db_session.delete(old_task)\n db_session.commit()\n name = data['name']\n project_id = data['project_id']\n description = data['description']\n completion_status = data['completion_status']\n created_date = data['created_date']\n deadline_date = data['deadline_date']\n new_task = Task(\n name=name, description=description, completion_status=completion_status,\n created_date = created_date, deadline_date = deadline_date, project_id=project_id, created_by=g.user)\n db_session.add(new_task)\n db_session.commit()\n return {\n 'success': True,\n 'result': task_schema.dump(new_task),\n 'message': \"Successfully Updated the Task.\",\n }", "def put(self, id):\n req = api.payload\n try:\n result = update_task(\n get_db(),\n id,\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def update_task(self, name, fields):\n task = self.task(name)\n if not task:\n return False\n\n try:\n data = json.loads(task)\n data.update(fields)\n task = json.dumps(data)\n except ValueError:\n return False\n\n return self.create_task(name, task)", "def update_resources(self):\n\n self.update(True)\n resp = self._connection._patch(\n get_url('task update', uuid=self._uuid))\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n\n self.update(True)", "def task_update(context, task_id, values, session=None):\n\n session = session or get_session()\n\n with session.begin():\n task_info_values = _pop_task_info_values(values)\n\n task_ref = _task_get(context, task_id, session)\n _drop_protected_attrs(models.Task, values)\n\n values['updated_at'] = timeutils.utcnow()\n\n _task_update(context, task_ref, values, session)\n\n if task_info_values:\n _task_info_update(context,\n task_id,\n task_info_values,\n session)\n\n return task_get(context, task_id, session)", "def updateTask(task):\n # First check to see if task exists\n detailed_ticket = jutdaapi.get_detailed_ticket(task._ticket_id)\n if not detailed_ticket:\n print 'task does not exist yet'\n return False\n # If so, check that things have actually changed (diff edited and orig)\n database_task = ticketToTask(detailed_ticket)\n if task._orig == task:\n return 'no changes to make'\n return True\n # If so, check that no one else has made changes (diff orig and database)\n if not database_task == task._orig:\n print 'task has changed in database; refresh task!'\n return False\n #priority = (task.priority + 2) / 2\n priority = task.priority\n if task.assigner not in ['no one', 'Unassigned']:\n title = '('+task.assigner.title()+') '+task.name\n #if task.name[-1] == ' ':\n # title = task.name + 'for: '+task.assigner.title()\n #else:\n # title = task.name + ' for: '+task.assigner.title()\n else:\n title = task.name\n description = task.description\n #if task.assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+task.assigner+'\"/>'\n if 't' not in task.id:\n description += '<tasktrackermeta id=\"'+task.id+'\"/>'\n return jutdaapi.edit_ticket(task._ticket_id, title=title, queue=None, submitter_email=None,\n description=description, priority=priority)", "def update(self, task_model):\n raise NotImplementedError()", "def _update_task(self, st_task):\n\n # Get the estimated hours or None\n rv = self._update_trac_ticket(st_task)\n est = rv['est_hours']\n if est:\n try:\n est = float(est) * 60 * 60\n except:\n est = None\n\n # Update the store\n db_id = self.db.update_task(src_id = st_task.id,\n name = st_task.name,\n tags = ','.join(st_task.tags),\n owner = self.users.get_trac_user(st_task.owner),\n created = st_task.created_at,\n updated = st_task.updated_at,\n time_worked = st_task.hours * 60 * 60,\n time_estimated = est,\n completed = (None,st_task.completed_on)\\\n [st_task.complete])\n\n return db_id", "def post(self):\n task = self.params.task\n task.completed = not task.completed\n task.put()\n render_json(self, obj=task.as_json())", "def _update():\n\tquery = myTaskSession.query(WorkToolkitDB.db.Task)\n\n\tIDStr = myOpt.id\n\tIDs = re.split('\\s*,\\s*', IDStr)\n\n\tif len(IDs) == 0:\n\t\tprint('ERR: no add task input')\n\t\treturn 1\n\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\tfor ID in IDs:\n\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.finish_status: myOpt.f})\n\n\t\tif myOpt.vt:\n\t\t\tquery.filter(WorkToolkitDB.db.Task.id == ID).update({WorkToolkitDB.db.Task.version_time: myOpt.vt})\n\n\t#commit\n\tmyTaskSession.commit()\n\n\t\"\"\"\n\t#ERR: not given itsm id for update \n\tif not myOpt.id:\n\t\tprint('Error: no itsm id given for update finish_status to 1')\n\t\treturn 1\n\t#set default finsih_status if not given\n\tif not myOpt.f:\n\t\tmyOpt.f = 1\n\n\t\n\tquery.filter(WorkToolkitDB.db.Task.id == myOpt.id).update({'finish_status': myOpt.f})\n\tmyTaskSession.commit()\n\n\t\n\tdata = query.filter(WorkToolkitDB.db.Task.id == myOpt.id).all()\n\tfor record in data:\n\t\t\t#record_arr = record.to_array()\n\t\t\tpt.add_row(record.to_array())\n\n\tprint(pt)\n\t\"\"\"\n\n\treturn 0", "def db_update_task(task):\n sql = ('''\n UPDATE {}\n SET title = ? ,\n done = ? ,\n done_date = ?\n WHERE id = ?\n '''.format(TABLE_NAME))\n cur = get_db()\n cur.execute(sql, task)\n cur.commit()", "def _task_update(context, task_ref, values, session=None):\n if 'deleted' not in values:\n values[\"deleted\"] = False\n task_ref.update(values)\n task_ref.save(session=session)\n return task_ref", "def _update(self, task):\n raise NotImplementedError(\"Subclasses should implement this!\")", "def put(self, dnzo_user, task):\n from google.appengine.ext import db\n from tasks_data.tasks import update_task_with_params, save_task\n \n task_was_archived = task.archived\n\n try:\n # hack to allow form-encoded PUT bodies to be accessed by self.request.get()\n self.request.method = \"POST\"\n update_task_with_params(dnzo_user, task, self.request)\n \n except AssertionError, strerror:\n self.bad_request(strerror.message)\n return\n \n finally:\n self.request.method = \"PUT\"\n \n archived_status_changed = task_was_archived != task.archived\n if archived_status_changed and not task.archived:\n from tasks_data.tasks import task_list_can_add_task\n if not task_list_can_add_task(task.task_list, task):\n self.bad_request(\"Can not unarchive task, too many active tasks in the list.\")\n return\n \n save_task(dnzo_user, task, archived_status_changed)\n # reload task\n task = db.get(task.key())\n \n self.json_response(task=task.to_dict())", "def update_task_info(self, url, path):\n try:\n db_connect = pymysql.connect(**self._taskdb_config)\n with db_connect.cursor() as cursor:\n task_id = self.get_task_id(url)\n cursor.execute(\"UPDATE mv SET localpath = '%s' WHERE taskid = '%s'\" % (path, task_id))\n db_connect.commit()\n except:\n db_connect.rollback()\n print(\"Fail updating task information, url=%s, path=%s\" % (url, path))\n else:\n print(\"Success updating task information, url=%s, path=%s\" % (url, path))\n finally:\n db_connect.close()", "def edit_task():\n # get task label from user\n responses = accept_inputs([\"Task label\"])\n label = responses[\"Task label\"]\n # check for existence of task\n results = query_with_results(\"select * from task where label = ?\", [label])\n if len(results) == 0:\n print(\"No task found with label '%s'.\" % label)\n return\n # the task exists, so ask the user for the new description\n responses = accept_inputs([\"New description\"])\n # update db\n query_no_results(\"update task set description = ? where label = ?\", [responses[\"New description\"], label])\n print(\"Task with label '%s' updated.\" % label)", "def test_update_task(self):\n rv = TEST_CLIENT.patch(\n \"/tasks/foo\",\n json={\n \"name\": \"foo 2\",\n },\n )\n result = rv.json()\n expected = {\n \"message\": \"The specified task does not exist\",\n \"code\": \"TaskNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def update_task_by_id(task_id):\n try:\n updated_task = get_task_from_request_form(request)\n tasks = mongo.db.tasks\n\n result = tasks.update_one(\n {\"_id\": ObjectId(task_id)},\n {\n \"$set\": {\n \"title\": updated_task['title'],\n \"reference\": updated_task['reference'],\n \"description\": updated_task['description'],\n \"status\": updated_task['status'],\n \"visible\": updated_task['visible']\n }\n })\n return json_util.dumps(get_task_by_id(task_id))\n except:\n abort(400)", "def edit_task(id, values):\n cursor = conn.cursor()\n cursor.execute(\"UPDATE tasks SET task_name = %s, priority_of_task = %s, category = %s, is_done = %s ,deadline=%s WHERE id = %s;\",\n (values[0], values[1], values[2], values[3],values[4], id))\n\n conn.commit()\n print(\"Number of records updated:\", cursor.rowcount)", "def update_task(\n self,\n task_id: str,\n task_name: Optional[str] = None,\n project_id: Optional[str] = None,\n ) -> None:\n if len(self.find_task_runs(task_id=task_id)) != 0:\n raise MephistoDBException(\n \"Cannot edit a task that has already been run, for risk of data corruption.\"\n )\n if task_name in [\"\"]:\n raise MephistoDBException(f'Invalid task name \"{task_name}')\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n if task_name is not None:\n c.execute(\n \"\"\"\n UPDATE tasks\n SET task_name = ?\n WHERE task_id = ?;\n \"\"\",\n (task_name, int(task_id)),\n )\n if project_id is not None:\n c.execute(\n \"\"\"\n UPDATE tasks\n SET project_id = ?\n WHERE task_id = ?;\n \"\"\",\n (int(project_id), int(task_id)),\n )\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException(e)\n elif is_unique_failure(e):\n raise EntryAlreadyExistsException(\n f\"Task name {task_name} is already in use\"\n )\n raise MephistoDBException(e)", "def _update_tasks(self, tasks):\n\n self._print('Updating tasks {} with {} ...'.format(self._tasks, tasks))\n\n self._tasks.update(tasks)", "def tasks_update(cls, app):\r\n\r\n try:\r\n tasks_info = {}\r\n tasks = app.db.session.query(SpiderTask).all()\r\n except Exception as err:\r\n print(err)\r\n else:\r\n for task in tasks:\r\n tasks_info[task.id] = task.to_json()\r\n return tasks_info\r\n finally:\r\n app.db.session.close()", "def update(self, flushcache=False):\n if self._uuid is None:\n return\n\n now = time.time()\n if (now - self._last_cache) < self._update_cache_time and not flushcache:\n return\n\n resp = self._connection._get(\n get_url('task update', uuid=self._uuid))\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n\n raise_on_error(resp)\n self._update(resp.json())\n self._last_cache = time.time()\n self._is_summary = False", "def test_update(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def update(task_id):\n data = request.get_json()\n try:\n if \"status\" in data:\n db_helper.update_status_entry(task_id)\n result = {'success': True, 'response': 'Status Updated'}\n elif \"first_name\" in data:\n db_helper.update_belt_entry(task_id, data)\n result = {'success': True, 'response': 'Task Updated'}\n else:\n result = {'success': True, 'response': 'Nothing Updated'}\n except:\n result = {'success': False, 'response': 'Something went wrong'}\n\n return jsonify(result)", "def test_update_task_exists(self):\n task_id = util.MOCK_UUID_4\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"name\": \"task-5\",\n },\n )\n result = rv.json()\n expected = {\n \"message\": \"a task with that name already exists\",\n \"code\": \"TaskNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def edit_task(task_id, form):\n db.task.replace_one(\n {\n '_id': ObjectId(task_id)\n },\n {\n 'title': form.title.data,\n 'description': form.description.data,\n 'status': form.status.data,\n 'priority': form.priority.data,\n 'date_added': datetime.datetime.now().timestamp()\n }\n )", "def edit_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n content = get_content_or_400(request)\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n collection.update_one({\"_id\": task[\"_id\"]}, {\"$set\": {\"content\": content}})\n\n response = jsonify()\n response.status_code = 200\n return response", "def _task_info_update(context, task_id, values, session=None):\n session = session or get_session()\n task_info_ref = _task_info_get(context, task_id, session=session)\n if task_info_ref:\n task_info_ref.update(values)\n task_info_ref.save(session=session)\n return _task_info_format(task_info_ref)", "def update_task(request):\n task_id = request.POST.get('task_id', 0)\n modal_description = request.POST.get('description')\n modal_due_date = request.POST.get('dueDate')\n modal_name = request.POST.get('name')\n modal_priority = request.POST.get('priority')\n\n try:\n search_task = task_models.Task.query.filter(task_models.Task.id == task_id).first()\n except NoResultFound:\n return HttpResponse(simplejson.dumps({'success': False}))\n\n search_task.update(user=request.user, lastModifiedBy=request.user.id, lastModified=str(datetime.utcnow()),\n description=modal_description, dueDate=modal_due_date, name=modal_name, priority=modal_priority)\n\n return JsonResponse({\n 'lastModifiedBy': request.user.id,\n 'lastModified': str(datetime.utcnow())\n })", "def fusion_api_update_task(self, body, uri, api=None, headers=None):\n return self.task.update(body, uri, api, headers)", "def taskdetail_update(td_id, values):\n return IMPL.taskdetail_update(td_id, values)", "def assign_task(user_name, task_name, work_server_ip):\r\n\r\n database_handler.update_records(\"current_tasks\",\r\n {\"server_ip\": work_server_ip, \"Task_status\": TaskStatusNames.in_progress.value},\r\n condition=\"Task_name=$? and user_name=$?\", code_args=[task_name, user_name])", "def _update_task(cls, workbook, task, state, task_output):\n task_spec = workbook.tasks.get(task[\"name\"])\n task_runtime_context = task[\"task_runtime_context\"]\n\n # Compute the outbound_context, state and exec_flow_context.\n outbound_context = data_flow.get_outbound_context(task, task_output)\n state, task_runtime_context = retry.get_task_runtime(\n task_spec, state, outbound_context, task_runtime_context)\n\n # Update the task.\n update_values = {\"state\": state,\n \"output\": task_output,\n \"task_runtime_context\": task_runtime_context}\n task = db_api.task_update(task[\"id\"], update_values)\n\n return task, outbound_context", "def save(self):\n if self.connection.task(self.name):\n raise ValueError(\"Task already exists\")\n\n self.connection.create_task(self.name, self.__data)", "def edit_task(self,tid, **kwargs):\n self.task_controller.edit(tid, **kwargs)", "def Change(self, key, args):\n\n if not key in self.tasks:\n msg = 'Change task: Invalid task key `{0}` !'.format(key)\n log.error(msg)\n return '*ERROR* ' + msg\n\n # If argument is a string\n if type(args) == type(str()):\n task = urlparse.parse_qs(args)\n # If argument is a valid dict\n elif type(args) == type(dict()):\n task = args\n else:\n msg = 'Change task: Invalid type of argument for add task: `{0}` !'.format(type(args))\n log.error(msg)\n return '*ERROR* ' + msg\n\n descrip = task.get('description')\n proj_file = task.get('project-file')\n proj_dt = task.get('date-time')\n proj_force = task.get('force')\n time_limit = task.get('time-limit')\n\n # If user wants to change project path\n if proj_file:\n if not os.path.isfile(proj_file):\n msg = 'Change task: Invalid file path `{0}` !'.format(proj_file)\n log.error(msg)\n return '*ERROR* ' + msg\n\n # If user wants to change Date-Time\n if proj_dt:\n dt, proj_type = _fix_date(proj_dt)\n if not dt: return False\n\n # If user wants to change Force\n if proj_force:\n if proj_force != '0':\n proj_force = '1'\n\n # If user wants to change time limit\n if time_limit:\n try:\n time_limit = int(time_limit)\n except:\n log.error('Change task: Invalid Time-limit number: `{0}` ! Will default to ZERO.'.format(time_limit))\n time_limit = 0\n if time_limit < 0:\n time_limit = 0\n\n # Preparing updated task\n task_fixed = {}\n\n if descrip:\n if descrip != self.tasks[key]['description']:\n task_fixed['description'] = descrip\n\n if proj_file:\n task_fixed['project-file'] = proj_file\n\n if proj_dt:\n task_fixed['date-time'] = proj_dt\n task_fixed['proj-type'] = proj_type\n\n if proj_force:\n if proj_force != self.tasks[key]['force']:\n task_fixed['force'] = proj_force\n\n if time_limit is not None:\n if time_limit != self.tasks[key]['time-limit']:\n task_fixed['time-limit'] = time_limit\n\n # This can only be executed by 1 thread at a time,\n # so there will never be 2 threads that create tasks at the same time\n with self.acc_lock:\n\n self.tasks[key].update(task_fixed)\n\n log.debug('Updated task {0} :: File `{project-file}`, activation date `{date-time}`,'\n ' type `{proj-type}`, force `{force}`, time limit `{time-limit}`.\\n'.format(key, **self.tasks[key]))\n\n self._save()\n\n return True", "def task_element(request, task_id):\n try:\n task = Task.objects.get(id=task_id)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == \"GET\":\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n\n status = data.get(\"status\", \"\")\n task.status = status\n\n try:\n assignee = User.objects.get(username=data.get(\"assignee\", \"\"))\n task.assignee = assignee\n except:\n pass\n \n task.save()\n return JsonResponse({\"message\": \"Task updated successfully\"}, status=204)\n\n elif request.method == \"DELETE\":\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)", "def task_update(self):\n try:\n self.task_stop()\n except:\n pass\n self.update()\n self.task_start()", "def task_update(request, id=None):\n instance = get_object_or_404(Todo, id=id)\n print(instance)\n print(instance)\n form = TaskForm(request.POST or None, instance=instance)\n if form.is_valid():\n instance = form.save(commit=False)\n instance.save()\n \n return redirect('lists:alllist')\n\n context = {\n \"desription\": instance.description,\n \"instance\": instance,\n \"form\":form,\n }\n return render(request, \"lists/update_task.html\", context)", "def command(task_id, message, time, project, category, links):\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n new_values = {\n 'message': message,\n 'time': time,\n 'project': project,\n 'category': category,\n 'links': links,\n }\n\n fields_changed = task.edit(new_values)\n\n if not fields_changed:\n click.echo(f\"No changes made to the task {task.id}.\")\n sys.exit(1)\n\n storage.save(task)\n\n fields_name = [field_name for field_name, *_ in fields_changed]\n click.echo(\n f\"The task {task_id} was edited with success. \"\n f\"Fields changed: {fields_name}\"\n )", "def need_update(self, task: Union[Task, Path]) -> bool:\n if isinstance(task, Path):\n return not task.exists()\n if task.name not in self._database:\n return True\n task_time = self._database.get(task.name)\n return task.need_rerun(task_time)", "def task_changed(self, fields):\n update = {}\n for field in fields:\n update[field] = self.__data[field]\n\n self.connection.update_task(self.name, update)", "def _update_object(self, taskrun):\r\n # validate the task and app for that taskrun are ok\r\n task = Task.query.get(taskrun.task_id)\r\n if task is None: # pragma: no cover\r\n raise Forbidden('Invalid task_id')\r\n if (task.app_id != taskrun.app_id):\r\n raise Forbidden('Invalid app_id')\r\n\r\n # Add the user info so it cannot post again the same taskrun\r\n if current_user.is_anonymous():\r\n taskrun.user_ip = request.remote_addr\r\n else:\r\n taskrun.user = current_user", "def update_task_run(self, task_run_id: str, is_completed: bool):\n with self.table_access_condition, self._get_connection() as conn:\n c = conn.cursor()\n try:\n c.execute(\n \"\"\"\n UPDATE task_runs\n SET is_completed = ?\n WHERE task_run_id = ?;\n \"\"\",\n (is_completed, int(task_run_id)),\n )\n except sqlite3.IntegrityError as e:\n if is_key_failure(e):\n raise EntryDoesNotExistException(e)\n raise MephistoDBException(e)", "def resurrectTask(task_id, ignoreStarted = False):\n \n [task] = Hydra_rendertask.fetch(\"where id = '%d'\" % task_id)\n if (\n task.status == 'K' or task.status == 'F' or \n (task.status == 'S' and ignoreStarted == True)\n ):\n task.status = 'R'\n task.host = None\n task.startTime = None\n task.endTime = None\n else:\n return True\n\n with transaction() as t:\n task.update(t)\n \n return False", "def update(self, name, task, filename=None, categories=None):\n assert name, \"Must input a valid dataset name.\"\n assert task, \"Must input a valid task name.\"\n self._assert_dataset_exists_in_cache(name)\n self._assert_task_exists_in_dataset_in_cache(name, task)\n\n self._update_task_filename(name, task, filename)\n self._update_task_categories(name, task, categories)\n\n self._update_cache_data()", "def update_task(pid):\n process = app.config['OPS_PROCESS'].get(pid)\n parent = app.config['OPS_PIPE_PARENT'].get(pid)\n\n if not parent:\n return False\n\n try:\n if parent.poll() or process.is_alive():\n if parent.poll():\n status_id = parent.recv()\n\n task = Task.query.filter_by(id=pid).first()\n if task.status_id != status_id:\n task.status_id = status_id\n db.session.commit()\n else:\n raise EOFError()\n return True\n except (OSError, EOFError, BrokenPipeError):\n pass\n # Close parent pipe and delete pipes from dict\n parent.close()\n del app.config['OPS_PIPE_PARENT'][pid], app.config['OPS_PIPE_CHILD'][pid]\n return False", "def put(self, task):\n self.async_vis.get_indices_ls.append(task.id)\n self.model.put(task)", "def update_put():\n try:\n update.launcher.start_async()\n except update.launcher.AlreadyInProgressError:\n # If an update is already in progress, treat it as success.\n pass\n except update.launcher.Error as e:\n return json_response.error(str(e)), 200\n return json_response.success()", "def _retrieve(cls, connection, uuid):\n resp = connection._get(get_url('task update', uuid=uuid))\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n return Task.from_json(connection, resp.json())", "def __update_task(self, tasks, **extra_args):\n for task in tasks:\n assert isinstance(\n task, Task), \"Core.update_job_state: passed an argument\" \\\n \" which is not a `Task` instance.\"\n task.update_state()", "def test_update_email_task(self):\n\n user = fake_clients.FakeUser(\n name=\"[email protected]\", password=\"123\", email=\"[email protected]\"\n )\n\n setup_identity_cache(users=[user])\n\n url = \"/v1/actions/UpdateEmail\"\n headers = {\n \"project_name\": \"test_project\",\n \"project_id\": \"test_project_id\",\n \"roles\": \"project_admin,member,project_mod\",\n \"username\": \"[email protected]\",\n \"user_id\": user.id,\n \"authenticated\": True,\n }\n\n data = {\"new_email\": \"[email protected]\"}\n response = self.client.post(url, data, format=\"json\", headers=headers)\n\n self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)\n self.assertEqual(response.json(), {\"notes\": [\"task created\"]})\n\n new_token = Token.objects.all()[0]\n url = \"/v1/tokens/\" + new_token.token\n\n data = {\"confirm\": True}\n response = self.client.post(url, data, format=\"json\")\n self.assertEqual(response.status_code, status.HTTP_200_OK)\n self.assertEqual(user.name, \"[email protected]\")", "def _update_all_tasks(self) -> None:\n for task in self.tasks:\n task.update()", "def patch(self, id):\n try:\n task = update_status(get_db(), id, Status[api.payload[\"status\"]])\n if not task:\n api.abort(404, \"Invalid Task\")\n return task_to_dict(task)\n except ValueError:\n api.abort(422, \"Invalid Status\")", "def put(self):\n try:\n save_schedules_to_file(request.json['payload'])\n return 'Celery Beat schedules updated.'\n except Exception:\n logging.exception('Failed to update Celery Beat schedules!')\n raise", "def update_status(request):\n task_id = request.POST.get('task_id', 0)\n new_status = request.POST.get('new_status', 0)\n\n search_task = task_models.Task.query.filter(task_models.Task.id == task_id).first()\n if not search_task:\n return HttpResponse(simplejson.dumps({'success': False}))\n\n search_task.update(user=request.user, status=new_status, lastModifiedBy=request.user.id,\n lastModified=str(datetime.utcnow()))\n\n return JsonResponse({\n 'status': new_status,\n 'lastModifiedBy': request.user.id,\n 'lastModified': str(datetime.utcnow())\n })", "async def touch(self, task_id, increment):\n args = (task_id, increment)\n res = await self.conn.call(self.__funcs['touch'], args)\n return self._create_task(res.body)", "def run(self):\n modify_tasks = filter(self._task_filter, acm.FAelTask.Select(''))\n print([task.Name() for task in modify_tasks])\n for task in modify_tasks:\n #new_task = task.Clone()\n self._update(task)\n try:\n task.Commit()\n except:\n print('Skipping: Task already exists')", "def set_task(self, state, task_id=None):\n assert self._cluster\n task = self._get_task(task_id)\n if task is not False:\n self._set_task(state, task)\n return True\n return False", "def test_taskrun_update(self):\r\n admin = UserFactory.create()\r\n owner = UserFactory.create()\r\n non_owner = UserFactory.create()\r\n app = AppFactory.create(owner=owner)\r\n task = TaskFactory.create(app=app)\r\n anonymous_taskrun = AnonymousTaskRunFactory.create(task=task, info='my task result')\r\n user_taskrun = TaskRunFactory.create(task=task, user=owner, info='my task result')\r\n\r\n task_run = dict(app_id=app.id, task_id=task.id, info='another result')\r\n datajson = json.dumps(task_run)\r\n\r\n # anonymous user\r\n # No one can update anonymous TaskRuns\r\n url = '/api/taskrun/%s' % anonymous_taskrun.id\r\n res = self.app.put(url, data=datajson)\r\n assert anonymous_taskrun, anonymous_taskrun\r\n assert_equal(anonymous_taskrun.user, None)\r\n error_msg = 'Should not be allowed to update'\r\n assert_equal(res.status, '401 UNAUTHORIZED', error_msg)\r\n\r\n # real user but not allowed as not owner!\r\n url = '/api/taskrun/%s?api_key=%s' % (user_taskrun.id, non_owner.api_key)\r\n res = self.app.put(url, data=datajson)\r\n error_msg = 'Should not be able to update TaskRuns of others'\r\n assert_equal(res.status, '403 FORBIDDEN', error_msg)\r\n\r\n # real user\r\n url = '/api/taskrun/%s?api_key=%s' % (user_taskrun.id, owner.api_key)\r\n out = self.app.get(url, follow_redirects=True)\r\n task = json.loads(out.data)\r\n datajson = json.loads(datajson)\r\n datajson['link'] = task['link']\r\n datajson['links'] = task['links']\r\n datajson = json.dumps(datajson)\r\n url = '/api/taskrun/%s?api_key=%s' % (user_taskrun.id, owner.api_key)\r\n res = self.app.put(url, data=datajson)\r\n out = json.loads(res.data)\r\n assert_equal(res.status, '403 FORBIDDEN', res.data)\r\n\r\n # PUT with not JSON data\r\n res = self.app.put(url, data=task_run)\r\n err = json.loads(res.data)\r\n assert res.status_code == 403, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'Forbidden', err\r\n\r\n # PUT with not allowed args\r\n res = self.app.put(url + \"&foo=bar\", data=json.dumps(task_run))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # PUT with fake data\r\n task_run['wrongfield'] = 13\r\n res = self.app.put(url, data=json.dumps(task_run))\r\n err = json.loads(res.data)\r\n assert res.status_code == 403, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'taskrun', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'Forbidden', err\r\n task_run.pop('wrongfield')\r\n\r\n # root user\r\n url = '/api/taskrun/%s?api_key=%s' % (user_taskrun.id, admin.api_key)\r\n res = self.app.put(url, data=datajson)\r\n assert_equal(res.status, '403 FORBIDDEN', res.data)", "def edit_event_task(self):\n self.edit_event()", "def update_task_status(project_id, task_id):\n completion_status = request.get_json()['completion_status']\n\n project = Project.query.filter_by(id=project_id).first()\n if not project:\n return {\n 'success': False,\n 'message': f\"No project with the specified id {project_id} found.\",\n }\n \n else:\n permission = has_project_permission(project, g.user)\n task = Task.query.filter_by(id=task_id).first()\n if not task:\n abort(404, f'There is no task with ID of {task_id}.')\n if task:\n task.completion_status = completion_status\n db_session.add(task)\n db_session.commit()\n return {\n 'success': True,\n 'result': task_schema.dump(task),\n 'message': f\"Successfully Updated the Completion Status of {task.name}.\"\n }", "def test_task_update(self):\r\n admin = UserFactory.create()\r\n user = UserFactory.create()\r\n non_owner = UserFactory.create()\r\n app = AppFactory.create(owner=user)\r\n task = TaskFactory.create(app=app)\r\n root_task = TaskFactory.create(app=app)\r\n data = {'state': '1'}\r\n datajson = json.dumps(data)\r\n root_data = {'state': '4'}\r\n root_datajson = json.dumps(root_data)\r\n\r\n ## anonymous\r\n res = self.app.put('/api/task/%s' % task.id, data=data)\r\n assert_equal(res.status, '401 UNAUTHORIZED', res.status)\r\n ### real user but not allowed as not owner!\r\n url = '/api/task/%s?api_key=%s' % (task.id, non_owner.api_key)\r\n res = self.app.put(url, data=datajson)\r\n assert_equal(res.status, '403 FORBIDDEN', res.status)\r\n\r\n ### real user\r\n url = '/api/task/%s?api_key=%s' % (task.id, user.api_key)\r\n res = self.app.put(url, data=datajson)\r\n out = json.loads(res.data)\r\n assert_equal(res.status, '200 OK', res.data)\r\n assert_equal(task.state, data['state'])\r\n assert task.id == out['id'], out\r\n\r\n ### root\r\n res = self.app.put('/api/task/%s?api_key=%s' % (root_task.id, admin.api_key),\r\n data=root_datajson)\r\n assert_equal(res.status, '200 OK', res.data)\r\n assert_equal(root_task.state, root_data['state'])\r\n\r\n # PUT with not JSON data\r\n res = self.app.put(url, data=data)\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'ValueError', err\r\n\r\n # PUT with not allowed args\r\n res = self.app.put(url + \"&foo=bar\", data=json.dumps(data))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'AttributeError', err\r\n\r\n # PUT with fake data\r\n data['wrongfield'] = 13\r\n res = self.app.put(url, data=json.dumps(data))\r\n err = json.loads(res.data)\r\n assert res.status_code == 415, err\r\n assert err['status'] == 'failed', err\r\n assert err['target'] == 'task', err\r\n assert err['action'] == 'PUT', err\r\n assert err['exception_cls'] == 'TypeError', err", "def update(self, *args, **kwargs):\n\n print(\"\\nIn MOCK ALGO OBSERVER....\")\n\n if 'remaining_tasks' in kwargs:\n\n remaining_tasks = len(kwargs['remaining_tasks'])\n\n print(\"\\tThere are {} remaining tasks\".format(remaining_tasks))\n print(\"\\tIs {} less than {}? {}\".format(remaining_tasks, min_tasks, (remaining_tasks < min_tasks)))\n\n # If we don't have the minimum number of hits out...\n if remaining_tasks < min_tasks:\n print(\"\\tRefilling queue with {} new task(s)\".format(min_tasks - remaining_tasks))\n # Fill up the tasks again\n for t in range(min_tasks - remaining_tasks):\n new_task = make_rand_task()\n tasks.append(new_task)\n\n actAMT.init_tasks(tasks, hit_type_init_file)\n del tasks[:]\n\n if 'completed_task' in kwargs:\n add_to_db(kwargs['completed_task'])", "def instant(self):\n if self._uuid is None:\n return\n\n resp = self._connection._post(get_url('task instant', uuid=self._uuid),\n json=None)\n\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n\n self.update(True)", "def edit_task_name(entry):\n entry.task_name = get_task_name()\n entry.save()\n input(\"Edit successful. \")\n return entry", "def test_update_no_note(self):\n self.my_task.notes = None\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.my_task.title = 'foo'\n key = self.task_storage.update(self.my_task)\n new_task = self.task_storage.find(key)\n\n self.assertEqual(self.my_task, new_task)", "def put(self, task):\n self.put_id += 1\n self.task_queue.put(task)", "def updateData(conn, task):\n # updateData(create_connection(), (20000.200, \"BTCUSDT\"))\n sql = ''' UPDATE criptomonedas\n SET price = ?\n WHERE symbol = ?'''\n cur = conn.cursor()\n cur.execute(sql, task)\n conn.commit()", "def record_task_update(job_name, task_id, version, task_info, cjrdb_conn, print_progress=False):\n if print_progress:\n print(\"Update Job...\")\n db_ses_obj = cjrdb_conn.get_db_session()\n\n qury_rslt = db_ses_obj.query(CJRTaskInfo).filter(CJRTaskInfo.JobName == job_name). \\\n filter(CJRTaskInfo.TaskID == task_id). \\\n filter(CJRTaskInfo.Version == version).one_or_none()\n\n if qury_rslt is not None:\n if qury_rslt.TaskCompleted:\n db_ses_obj.close()\n raise Exception(\"The task '{} - {} v{}' has already been finished - check inputs.\". \\\n format(job_name, task_id, version))\n\n update_time = datetime.datetime.now()\n task_updates_info = qury_rslt.TaskUpdates\n if task_updates_info is None:\n lcl_task_updates_info = dict()\n else:\n lcl_task_updates_info = copy.deepcopy(task_updates_info)\n lcl_task_updates_info[update_time.isoformat()] = task_info\n qury_rslt.TaskUpdates = lcl_task_updates_info\n else:\n db_ses_obj.close()\n raise Exception(\"The task '{} - {} v{}' could not be found - check inputs.\". \\\n format(job_name, task_id, version))\n\n db_ses_obj.commit()\n db_ses_obj.close()", "def fire(cls, task_id):\n TaskModel.objects.filter(id=task_id).update(fire=True)", "def update(conn, sql):\n # sql = ''' UPDATE tasks\n # SET priority = ? ,\n # begin_date = ? ,\n # end_date = ?\n # WHERE id = ?'''\n cur = conn.cursor()\n cur.execute(conn, sql)\n conn.commit()", "def run_main_task(entry_id, task_fcn, action_name):\r\n\r\n # get the InstructorTask to be updated. If this fails, then let the exception return to Celery.\r\n # There's no point in catching it here.\r\n entry = InstructorTask.objects.get(pk=entry_id)\r\n\r\n # get inputs to use in this task from the entry:\r\n task_id = entry.task_id\r\n course_id = entry.course_id\r\n task_input = json.loads(entry.task_input)\r\n\r\n # construct log message:\r\n fmt = u'task \"{task_id}\": course \"{course_id}\" input \"{task_input}\"'\r\n task_info_string = fmt.format(task_id=task_id, course_id=course_id, task_input=task_input)\r\n\r\n TASK_LOG.info('Starting update (nothing %s yet): %s', action_name, task_info_string)\r\n\r\n # Check that the task_id submitted in the InstructorTask matches the current task\r\n # that is running.\r\n request_task_id = _get_current_task().request.id\r\n if task_id != request_task_id:\r\n fmt = u'Requested task did not match actual task \"{actual_id}\": {task_info}'\r\n message = fmt.format(actual_id=request_task_id, task_info=task_info_string)\r\n TASK_LOG.error(message)\r\n raise ValueError(message)\r\n\r\n # Now do the work:\r\n with dog_stats_api.timer('instructor_tasks.time.overall', tags=['action:{name}'.format(name=action_name)]):\r\n task_progress = task_fcn(entry_id, course_id, task_input, action_name)\r\n\r\n # Release any queries that the connection has been hanging onto:\r\n reset_queries()\r\n\r\n # log and exit, returning task_progress info as task result:\r\n TASK_LOG.info('Finishing %s: final: %s', task_info_string, task_progress)\r\n return task_progress", "def test_update_no_match(self):\n self.my_task.key = self.task_storage.add(self.my_task)\n\n self.task_storage.delete(self.my_task.key)\n\n self.my_task.title = 'foo'\n\n self.key = self.task_storage.update(self.my_task)\n\n self.assertIsNone(self.key)", "def update(self, request, pk=None):\n child_task = self.get_object()\n serializer = ChildTaskSerializer(data=request.data)\n parent_task_id = serializer.data['parent_task_id']\n if serializer.is_valid():\n # Request passed validation.\n response = super(ChildTaskViewSet, self).update(request, pk)\n\n # Because the child task may have been updated, we now check to see whether\n if self.check_siblings_completed(parent_task_id=child_task.parent_task_id):\n # If no incomplete tasks remain, update the parent task as complete.\n completed_datetime = datetime.now()\n ParentTask.objects.filter(id__exact=parent_task_id).update(task_completed_date=completed_datetime)\n\n return response\n else:\n # The request did not pass validation; return a 400 header.\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def put(self, todo_id):\n todo = self.get_todo_by_user_id(todo_id)\n data = self.reqparse.parse_args()\n for key, value in data.items():\n if value is not None:\n setattr(todo, key, value)\n # if is_done True set completed_date as now\n if data.get('is_done'):\n todo.completed_date = datetime.utcnow()\n todo.save()\n return todo, 200", "def setTask(self, value):\n return self._call_java(\"setTask\", value)", "def _update(self, json_task):\n self._name = json_task['name']\n self._shortname = json_task.get('shortname')\n self._profile = json_task['profile']\n self._pooluuid = json_task.get('pooluuid')\n self._instancecount = json_task.get('instanceCount')\n self._advanced_range = json_task.get('advancedRanges')\n\n if 'resourceDisks' in json_task and json_task['resourceDisks']:\n self._resource_objects_ids = json_task['resourceDisks']\n self._resource_type = Disk\n elif 'resourceBuckets' in json_task and json_task['resourceBuckets']:\n self._resource_objects_ids = json_task['resourceBuckets']\n self._resource_type = Bucket\n\n if len(self._resource_objects_ids) != \\\n len(self._resource_objects):\n del self._resource_objects[:]\n\n if 'resultDisk' in json_task and json_task['resultDisk']:\n self._result_object_id = json_task['resultDisk']\n self._result_type = Disk\n elif 'resultBucket' in json_task and json_task['resultBucket']:\n self._result_object_id = json_task['resultBucket']\n self._result_type = Bucket\n\n if 'status' in json_task:\n self._status = json_task['status']\n self._creation_date = _util.parse_datetime(json_task['creationDate'])\n if 'errors' in json_task:\n self._errors = [Error(d) for d in json_task['errors']]\n else:\n self._errors = []\n\n if 'constants' in json_task:\n for constant in json_task['constants']:\n self.constants[constant.get('key')] = constant.get('value')\n\n self._uuid = json_task['uuid']\n self._state = json_task['state']\n self._tags = json_task.get('tags', None)\n if 'resultsCount' in json_task:\n if self._rescount < json_task['resultsCount']:\n self._dirty = True\n self._rescount = json_task['resultsCount']\n\n if 'resultsBlacklist' in json_task:\n self._results_blacklist = json_task['resultsBlacklist']\n if 'resultsWhitelist' in json_task:\n self._results_whitelist = json_task['resultsWhitelist']\n if 'snapshotWhitelist' in json_task:\n self._snapshot_whitelist = json_task['snapshotWhitelist']\n if 'snapshotBlacklist' in json_task:\n self._snapshot_blacklist = json_task['snapshotBlacklist']\n\n if 'completedInstances' in json_task:\n self._completed_instances = [CompletedInstance(x) for x in json_task['completedInstances']]\n else:\n self._completed_instances = []", "def set_status_(self, task: Task):\n tic = time.time()\n self._database[task.name] = tic\n self.save()\n return self", "def update():\n return 'update api in put'", "def task_update_stats(request):\n tasks = json.loads(request.POST.get('tasks'))\n date_str = request.POST.get('date')\n cursor = ndb.Cursor(urlsafe=request.POST.get('cursor'))\n countdown = 15\n if not tasks:\n msg = 'Nothing to execute!?'\n logging.warning(msg)\n out = HttpTextResponse(msg)\n else:\n # Dispatch the task to execute.\n task = tasks.pop(0)\n logging.info('Running %s.', task)\n if task.count('-') == 2:\n out, cursor = update_daily_stats(\n cursor, datetime.datetime.strptime(task, DATE_FORMAT))\n elif task == 'monthly':\n # The only reason day is used is in case a task queue spills over the next\n # day.\n day = datetime.datetime.strptime(date_str, DATE_FORMAT)\n out, cursor = update_monthly_stats(cursor, day)\n elif task == '30':\n yesterday = (\n datetime.datetime.strptime(date_str, DATE_FORMAT)\n - datetime.timedelta(days=1)).date()\n out, cursor = update_rolling_stats(cursor, yesterday)\n else:\n msg = 'Unknown task %s, ignoring.' % task\n cursor = ''\n logging.error(msg)\n out = HttpTextResponse(msg)\n\n if cursor:\n # Not done yet!\n tasks.insert(0, task)\n countdown = 0\n\n if out.status_code == 200 and tasks:\n logging.info('%d tasks to go!\\n%s', len(tasks), ', '.join(tasks))\n # Space out the task queue execution by 15s to reduce the risk of\n # datastore inconsistency to get in the way, since no transaction is used.\n # This means to process a full month, it'll include 31*15s = 7:45 minutes\n # delay. 15s is not a lot but we are in an hurry!\n taskqueue.add(\n url=reverse(task_update_stats),\n params={\n 'tasks': json.dumps(tasks),\n 'date': date_str,\n 'cursor': cursor.urlsafe() if cursor else ''},\n queue_name='update-stats',\n countdown=countdown)\n return out", "def handle_task(self, request):\n \"\"\"\n @api {get} /tasks/:id Get a task\n @apiName GetTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Returns the configuration of a task.\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {String} name Name.\n @apiSuccess {String} description Description.\n @apiSuccess {String[]} tags Tags.\n @apiSuccess {Boolean} enabled Task is enabled.\n @apiSuccess {String} mode Task mode (\"any\" or \"all\").\n @apiSuccess {String[]} pools Pools on which the task should run.\n @apiSuccess {Object[]} schedules Schedules at which the task should run.\n @apiSuccess {String} command Command to run.\n @apiSuccess {String} workdir Working directory.\n @apiSuccess {String} user User which the task will be run.\n @apiSuccess {String} group Group which the task will be run.\n @apiSuccess {Object} env Environment variables to set.\n @apiSuccess {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiSuccess {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiSuccessExample {json} Example response:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"[email protected]\"]\n }\n \"\"\"\n \"\"\"\n @api {put} /task/:id Update a task\n @apiName PutTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Update a task. Can also be used to create a task with a specific ID.\n\n @apiParam {String} :id Task ID.\n\n @apiParam {String} name Name.\n @apiParam {String} description Description.\n @apiParam {String[]} tags Tags.\n @apiParam {Boolean} enabled Task is enabled.\n @apiParam {String} mode Task mode (\"any\" or \"all\").\n @apiParam {String[]} pools Pools on which the task should run.\n @apiParam {Object[]} schedules Schedules at which the task should run.\n @apiParam {String} command Command to run.\n @apiParam {String} workdir Working directory.\n @apiParam {String} user User which the task will be run.\n @apiParam {String} group Group which the task will be run.\n @apiParam {Object} env Environment variables to set.\n @apiParam {String} mailreport If the mailer plugin is enabled, condition to send a report (\"error\", \"stdout\", \"stderr\", \"output\", \"always\").\n @apiParam {String[]} mailto If the mailer plugin is enabled, email addresses to send the reports to.\n\n @apiParamExample {json} Example parameters:\n {\n \"name\": \"My task\",\n \"description\": \"Task description\",\n \"tags\": [\"tasg1\", \"tag2\"],\n \"enabled\": true,\n \"mode\": \"all\",\n \"pools\": [\"web\"],\n \"schedules\": [\n {\"minute\": [\"*/1\"]}\n ],\n \"command\": \"/bin/true\",\n \"workdir\": \"/tmp/\",\n \"user\": \"www-data\",\n \"group\": \"www-data\",\n \"env\": {\n \"MYENVVAR\": \"myvalue\"\n },\n \"mailreport\": \"output\",\n \"mailto\": [\"[email protected]\"]\n }\n\n @apiSuccess {Boolean} updated The task has been updated.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"updated\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {delete} /task/:id Delete a task\n @apiName DeleteTask\n @apiGroup Tasks\n @apiVersion 1.0.0\n\n @apiDescription Delete a task.\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} deleted The task has been deleted.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n \"\"\"\n @api {execute} /task/:id Execute a task\n @apiName ExecuteTask\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiDescription Execute a task.\n\n @apiParam {String} :id Task ID.\n @apiParam {String} :target Target for task execution (\"local\" to execute on the local node, otherwise execute on the nodes on which the task is configured to run).\n @apiParam {Boolean} :force Force the execution even if the concurrency limit is reached.\n\n @apiSuccess {Boolean} Executed The task has been executed.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"deleted\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n\n match = re.match('/tasks/([0-9a-z]+)', request.uri_path)\n task = match.group(1)\n\n tasks = self.cluster.config.get('tasks')\n\n if request.method == \"GET\":\n if task in tasks:\n return HTTPReply(code = 200, body = json.dumps(tasks[task]), headers = headers)\n else:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n elif request.method == \"PUT\":\n new = json.loads(request.body)\n if task in tasks:\n old = tasks[task]\n else:\n old = None\n\n tasks[task] = new\n self.cluster.config.set('tasks', tasks)\n\n if old:\n code = 200\n body = json.dumps({\"id\": task, \"updated\": True})\n get_plugin_registry().call_hook('TaskUpdated', task, old, new)\n else:\n code = 201\n body = json.dumps({\"id\": task, \"created\": True})\n get_plugin_registry().call_hook('TaskCreated', task, new)\n\n return HTTPReply(code = code, body = body, headers = headers)\n\n elif request.method == \"DELETE\":\n if task in tasks:\n old = tasks[task]\n del tasks[task]\n self.cluster.config.set('tasks', tasks)\n\n get_plugin_registry().call_hook('TaskDeleted', task, old)\n\n return HTTPReply(code = 200, body = json.dumps({\"id\": task, \"deleted\": True}), headers = headers)\n else:\n return HTTPReply(code = 404, headers = {'Access-Control-Allow-Origin': '*'})\n\n if request.method == \"EXECUTE\":\n try:\n if 'target' in request.args and request.args['target'] == 'local':\n self.manager.execute_task(task)\n else:\n self.cluster.scheduler.run_task(task, ignore_concurrency = 'force' in request.args)\n\n return HTTPReply(code = 200, body = json.dumps({\"id\": task, \"executed\": True}), headers = headers)\n except ExecutionDisabled:\n return HTTPReply(code = 503, body = json.dumps({\"id\": task, \"executed\": False}), headers = headers)", "def __dedasReplace(self, index, newTask, time, tmpTaskQueue, tmpTaskExInfoDict, tmpRscTimeTable):\r\n \r\n tmpTask = tmpTaskQueue[index]\r\n tmpPath = self._netGraph.getShortestPath(tmpTask.getAccessPoint(), tmpTask.getDispatchedServer())\r\n tmpTaskExInfo = tmpTaskExInfoDict[tmpTask.getKey()]\r\n print(\"--------------In scheduler, replacing %s with %s.\" % (tmpTask.getKey(), newTask.getKey()))\r\n\r\n # Only replace task which passes the same link or has the same dispatched server.\r\n if not self.__isJoint(newTask, tmpTask):\r\n print(\"In scheduler, replacing %s with %s failed, they don't joint.\" % (tmpTask.getKey(), newTask.getKey()))\r\n return self._currentACT, self._currentDS\r\n \r\n \r\n # Get info of new task from netGraph and taskExInfoDict\r\n newPath = self._netGraph.getShortestPath(newTask.getAccessPoint(), newTask.getDispatchedServer())\r\n try:\r\n remData = tmpTaskExInfoDict[newTask.getKey()].getRemDataSize()\r\n remComTime = tmpTaskExInfoDict[newTask.getKey()].getRemComTime()\r\n except KeyError:\r\n # If newTask's ex info does not exist, then create it.\r\n print(\"In scheduler, create task ex info for %s.\" % (newTask.getKey()))\r\n tmpTaskExInfoDict[newTask.getKey()] = task_ex_info.TaskExInfo(newTask, newPath.getPathLength())\r\n remData = tmpTaskExInfoDict[newTask.getKey()].getRemDataSize()\r\n remComTime = tmpTaskExInfoDict[newTask.getKey()].getRemComTime()\r\n \r\n print(\"%s has remData:%d, remComTime:%d\" % (newTask.getKey(), remData, remComTime))\r\n print(\"%s has remData:%d, remComTime:%d\" % (tmpTask.getKey(), tmpTaskExInfo.getRemDataSize(), tmpTaskExInfo.getRemComTime()))\r\n\r\n # Only when newTask's is smaller than the replaced one, replacing will happen.\r\n if tmpTaskExInfo.getRemDataSize() >= remData and \\\r\n tmpTaskExInfo.getRemComTime() >= remComTime:\r\n # Recall remained resources of replaced task and wipe record about it.\r\n tmpRscTimeTable.recallRsc(time, tmpTask, tmpPath)\r\n tmpTaskExInfo.cancelScheduleFromNow(time)\r\n\r\n # Put newTask into tmpTaskQueue\r\n tmpTaskQueue[index] = newTask\r\n ts = \"\"\r\n for t in tmpTaskQueue:\r\n ts = ts + \".\" + t.getKey()\r\n print(\"In scheduler, tmpTaskQueue is: %s\" % (ts))\r\n\r\n startTime = time-1\r\n endTime = 0\r\n\r\n # First, allocate bandwidth resource and record it.\r\n while remData > 0:\r\n startTime, endTime, ban = tmpRscTimeTable.allocateLinkSlot(newTask, startTime + 1, newPath)\r\n if ban == 0:\r\n break\r\n remData = remData - ban\r\n tmpTaskExInfoDict[newTask.getKey()].addTransInfo(ban, startTime)\r\n # Then allocate computation resource and record it\r\n while remComTime > 0:\r\n startTime, endTime = tmpRscTimeTable.allocateServerSlot(newTask, endTime, tmpTask.getDispatchedServer())\r\n remComTime = remComTime - 1\r\n tmpTaskExInfoDict[newTask.getKey()].addComInfo(startTime)\r\n tmpTaskExInfoDict[newTask.getKey()].setExpectedComTime(endTime - time)\r\n if not tmpTaskExInfoDict[newTask.getKey()].deadlineIsSatisfied():\r\n print(\"In scheduler, %s is replaced into taskQueue, but it still can't be satisfied.\" % (newTask.getKey()))\r\n tmpRscTimeTable.printStatus()\r\n\r\n # Discard newTask\r\n tmpTaskExInfoDict[newTask.getKey()].setExpectedComTime(parameters.NUM_FLOAT_INFINITY)\r\n \r\n # return self._currentACT, self._currentDS\r\n return self._currentACT, self._currentDS\r\n else:\r\n print(\"In scheduler, replacing succeed, replace %s with %s.\" % (tmpTask.getKey(), newTask.getKey()))\r\n CTList = []\r\n for t in tmpTaskQueue:\r\n CTList.append(tmpTaskExInfoDict[t.getKey()].getExpectedComTime())\r\n print(\"In scheduler, replace ACT:%d, DS:%d.\" %( sum(CTList)/len(CTList), self._currentDS))\r\n taskCTDict = {}\r\n for i in range(len(CTList)):\r\n taskCTDict[tmpTaskQueue[i].getKey()] = CTList[i]\r\n print(\"In schedule, tasks' completion time is:\",taskCTDict)\r\n tmpRscTimeTable.printStatus()\r\n\r\n # Discard tmpTask\r\n tmpTaskExInfo.setExpectedComTime(parameters.NUM_FLOAT_INFINITY)\r\n\r\n return sum(CTList)/len(tmpTaskQueue), self._currentDS\r\n else:\r\n print(\"In scheduler, replacing %s with %s failed, %s is not smaller than %s.\" % (tmpTask.getKey(), newTask.getKey(), newTask.getKey(), tmpTask.getKey()))\r\n return self._currentACT, self._currentDS", "def put(self, request, format=None):\n logger.info('run status put method: {}'.format(request))\n feedback = {\n 'permission': True\n }\n try:\n task_id = request.data.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n\n try:\n uuid.UUID(hex=task_id)\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n try:\n\n task = Task.objects.get(id=task_id, time_type='instant')\n\n if task.status:\n res = command_dispatcher.get_task_data(task.id)\n task.status = False\n task.result_snapshot = json.dumps(res)\n task.save()\n message = u'Turn off task successfully!'\n else:\n message = u'The task is already turned off!'\n res = json.loads(task.result_snapshot)\n\n success = len(res.get('success'))\n wrong = len(res.get('error'))\n response_count = success + wrong\n\n feedback['data'] = {\n 'code': 200,\n 'message': message,\n 'info': {\n 'finished': not task.status,\n 'total': task.terminal_count,\n 'responses': response_count,\n 'success': success,\n 'wrong': wrong\n }\n }\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n\n return JsonResponse(data=feedback)", "def task_changed(old_task, diff, now_task):", "def put_task(self, task_id, is_activated=False, T_zone=(0,None)):\n if task_id is None:\n print('ERROR: task_id cannot be \"None\".')\n return False\n\n if task_id in self.task_dict:\n # The task already exist, overwrite it\n print(\"WARN: The task <%s> already exists in this agent <%d>. Skip this requirement on put_task().\" % (str(task_id), self.agent_id))\n return False\n else:\n \"\"\"\n if self.is_period_intersected(T_zone):\n # That's OK, the agent interseted with itself will not hurt anything!\n pass\n \"\"\"\n self.task_dict[task_id] = TASK(task_id, is_activated, T_zone)\n # print(\"INFO: A task <%s> is put into the agent <%d>\" % (str(task_id), self.agent_id))\n # Update the activation state\n self._update_activation_state()\n return True", "def update_tasks():\n response = \"\"\"\\\n<form action=\"\" method=post>\n<p>Category: <input type=text name=category></p>\n<p>Priority: <input type=text name=priority></p>\n<p>Description: <input type=text name=description></p>\n<p><input type=submit value=Add></p>\n</form>\n\n\n<table border=\"1\" cellpadding=\"3\">\n <tbody>\n <tr>\n <th>Category</th>\n <th>Priority</th>\n <th>Description</th>\n </tr>\n\"\"\"\n\n db_conn = DBConnection()\n db_conn.get_conn()\n\n #db_conn.query_db('delete from tasks')\n if request.method == 'POST':\n category = request.form['category']\n priority = request.form['priority']\n description = request.form['description']\n db_conn.add_task(category, priority, description)\n return redirect('/task')\n #return redirect(url_for('task')) # method name\n\n for single_task in db_conn.query_db('select * from tasks'):\n if single_task['category'] or single_task['priority'] or single_task['description']:\n response += \"<tr><td>%s</td>\" % (single_task['category'])\n response += \"<td>%s</td>\" % (single_task['priority'])\n response += \"<td>%s</td></tr>\" % (single_task['description'])\n response += \"</tbody></table>\"\n\n db_conn.close_conn()\n return response", "def edit_task(request):\n data = {\"success\": False}\n try:\n title = request.POST.get(\"title\")\n col = request.POST.get(\"col\")\n value = request.POST.get(\"value\")\n qs = Todo.objects.get(title=title)\n res = False\n if col == \"title\":\n res = qs.update_title(title=value, user=request.user)\n elif col == \"description\":\n res = qs.update_description(description=value, user=request.user)\n elif col == \"status\":\n res = qs.update_status(status=value, user=request.user)\n if res:\n data[\"success\"] = True\n data[\"message\"] = \"%s updated successfully\" % col\n else:\n data[\"message\"] = \"Failed to update %s\" % col\n except Exception as ex:\n data[\"message\"] = \"Failed to update %s\" % [ex]\n finally:\n return JsonResponse(data)", "def update_fields(task):\n task['totalLength'] = int(task['totalLength'])\n task['completedLength'] = int(task['completedLength'])\n task['downloadSpeed'] = int(task['downloadSpeed'])\n task['eta'] = calculate_eta(task)\n\n if task['files']:\n # there might be multiple files for BT tasks, but we don't support BT\n path = task['files'][0]['path']\n if path:\n filename = os.path.relpath(path, task['dir'])\n task['filename'] = filename\n # the following fields are not needed and should not be exposed\n task.pop('files')\n task.pop('dir')\n\n return task", "def update(self,\n task_id,\n workspace=None,\n command=None,\n command_list=None,\n interactive=False):\n if not task_id:\n raise RequiredArgumentMissing(\n __(\"error\", \"controller.task.delete.arg\", \"id\"))\n if command_list:\n command = \" \".join(command_list)\n elif command:\n command_list = shlex.split(command)\n\n validate(\n \"update_task\", {\n \"workspace\": workspace,\n \"command\": command,\n \"command_list\": command_list,\n \"interactive\": interactive\n })\n update_task_input_dict = {'id': task_id}\n\n if workspace is not None:\n update_task_input_dict['workspace'] = workspace\n if command is not None:\n update_task_input_dict['command'] = command\n if command_list is not None:\n update_task_input_dict['command_list'] = command_list\n if interactive:\n update_task_input_dict['interactive'] = interactive\n\n return self.dal.task.update(update_task_input_dict)", "def set_task1(self, task_id):\n self._set_task(self.controller.CURRENT, task_id)" ]
[ "0.8158114", "0.8152909", "0.76661485", "0.7663534", "0.7616725", "0.7550435", "0.7534657", "0.7525693", "0.74830467", "0.7400476", "0.73755723", "0.7304954", "0.7256344", "0.7224427", "0.7218338", "0.7215386", "0.7203241", "0.718519", "0.7181675", "0.7085396", "0.70829463", "0.7059045", "0.70074606", "0.6994229", "0.6988213", "0.69633526", "0.6920967", "0.6900903", "0.6892587", "0.6861459", "0.6858707", "0.6834846", "0.6799104", "0.6774652", "0.6733662", "0.6722333", "0.66674715", "0.66610193", "0.6625257", "0.66106737", "0.65573627", "0.6533484", "0.6497933", "0.64777017", "0.64594746", "0.64560354", "0.64491814", "0.6424032", "0.64206624", "0.64109546", "0.6391757", "0.63845956", "0.6381322", "0.6372485", "0.6344569", "0.6333953", "0.6329195", "0.6319634", "0.6287345", "0.62654823", "0.62635905", "0.62571585", "0.6233482", "0.62318", "0.6222305", "0.6219002", "0.62168705", "0.61958355", "0.6193218", "0.6156197", "0.61417377", "0.6138799", "0.6130386", "0.61231434", "0.6123013", "0.61132884", "0.6113261", "0.60788596", "0.6075814", "0.606111", "0.60352284", "0.603415", "0.60157204", "0.5992625", "0.5975894", "0.5963973", "0.594115", "0.5940449", "0.5927959", "0.5920151", "0.5918848", "0.5912625", "0.59100795", "0.5908282", "0.59065425", "0.5894498", "0.5865116", "0.5859048", "0.58581865", "0.5843792" ]
0.6991943
24
Check if a task exists on the server
def exists(self, server): try: server.get( 'task', replacements={ 'slug': self.__challenge__.slug, 'identifier': self.identifier}) except Exception: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __check_if_task_exists(self, server_id):\n if server_id in self.__migrating_tasks.keys():\n return True\n return False", "def exists_task(self, task):\n assert task, \"Must input a valid task name.\"\n return any(self.get_by_task(task))", "def isTasksExists(request):\n task_status = {}\n task_result = 0\n flag = None\n for task in request.data['tasks']:\n task_obj = Tafv2Task.objects.filter(script=task)\n if task_obj:\n task_status[task] = \"Task Exists.\"\n else:\n task_result += 1\n task_status[task] = \"Task doesn't Exists.\"\n if task_result > 0:\n flag = False\n else:\n flag = True\n\n return {'taskResult': flag, 'taskStatus': task_status}", "def is_registered(task_name):\n if tasks.find({'name': task_name}).count() > 0:\n return True\n else:\n return False", "def exists(self, task_identifier: str, timeout: int) -> bool:\n raise NotImplementedError", "def exists(taskname):\n with open(todofile, 'r') as todo:\n tasks = todo.readlines()\n for task in tasks:\n try:\n task = json.loads(task)\n if taskname == task['name']:\n return True\n except json.decoder.JSONDecodeError:\n return False\n return False", "def is_task(self, task_id, tasks):\r\n for t in tasks:\r\n if t.id == task_id:\r\n return True\r\n return False", "def exists(self, task, name=None):\n assert task, \"Must input a valid task name.\"\n if name is not None:\n return self._is_task_in_dataset(name, task)\n else:\n return self._is_task_in_any_dataset(task)", "def status_check(task_id):\n logger.info(f\"Checking task status for {task_id}\")\n task = Task.objects.get(kf_id=task_id)\n task.status_check()", "def check_task(request, tid):\n try:\n slogger.glob.info(\"check task #{}\".format(tid))\n response = task.check(tid)\n except Exception as e:\n slogger.glob.error(\"cannot check task #{}\".format(tid), exc_info=True)\n return HttpResponseBadRequest(str(e))\n\n return JsonResponse(response)", "def __contains__(self, task):\n return task in self._tasks", "def is_task_stagnant(task):", "def _CheckIfHuntTaskWasAssigned(self, client_id, hunt_id):\n for _ in aff4.FACTORY.Stat(\n [client_id.Add(\"flows/%s:hunt\" %\n rdfvalue.RDFURN(hunt_id).Basename())],\n token=self.token):\n return True\n\n return False", "def check_repeated_task(self, task):\n task_status = task in self.tasks_asked\n\n # append if never asked\n if task_status == False:\n self.tasks_asked.append(task)\n\n return task_status", "def is_task_in_schedule(self, tid: str) -> bool:\n return tid in self.__tasks", "def __contains__(self, name):\n return name in self._tasks", "def test_update_task_exists(self):\n task_id = util.MOCK_UUID_4\n\n rv = TEST_CLIENT.patch(\n f\"/tasks/{task_id}\",\n json={\n \"name\": \"task-5\",\n },\n )\n result = rv.json()\n expected = {\n \"message\": \"a task with that name already exists\",\n \"code\": \"TaskNameExists\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def check_task_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT id FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task", "def need_update(self, task: Union[Task, Path]) -> bool:\n if isinstance(task, Path):\n return not task.exists()\n if task.name not in self._database:\n return True\n task_time = self._database.get(task.name)\n return task.need_rerun(task_time)", "def is_task_taken(new_task, tasks):\n task_ids = [t.task_data.get('task_id') for t in tasks]\n new_task_id = new_task.get('task_id')\n if new_task_id is None:\n return False\n taken = new_task_id in task_ids\n if taken:\n logger.info('Task {} is already taken'.format(new_task_id))\n return taken", "def check(request,task_name):\n try:\n todo = getattr(tasks,task_name,None)\n except KeyError:\n return JsonResponse(\n {'error':'This {} is not a known task'.format(taskname)})\n \n parameters = todo().settings.get.keys()\n \n try:\n kwargs = {par:request.GET[par] for par in parameters}\n except KeyError:\n return JsonResponse(\n {'error':'Missing parameter: please provide {}'.format(parameters)})\n\n action = todo(**kwargs)\n if not action.settings.valid:\n return JsonResponse(\n {'error':'Invalid settings: {}'.format(action.settings.errors)})\n\n \n action.set_result()\n \n a = action.result\n add_to_project(action.result.hash,project)\n\n return JsonResponse(action.description)", "def is_task_done(self, task_name):\n logging.info(f\"Checking if '{task_name}' is done\")\n if task_name in self._container:\n res = self._container[task_name].running\n logging.info(f\"Result: {res}\")\n return res\n logging.info(f\"Could not find task: {task_name}\")\n raise TaskNotFoundException(f\"Could not find task: {task_name}\")", "def _check_task(self, task: Task) -> bool:\n try:\n extents = list(fiemap(task.path, sync=task.frequency > 1))\n except OSError:\n self.logger.error('Error#%d %s', task.id, task.path, exc_info=True)\n return False\n\n if not extents:\n return False\n\n planner = Planner(self.planner_params, extents)\n clusters = planner.result()\n\n if not clusters:\n return False\n\n task.extents = extents\n task.clusters = clusters\n\n return True", "def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False", "def exists(self, task_identifier: str, timeout: int) -> bool:\n session = self.result_session()\n with self.session_cleanup(session):\n lock = session.query(Lock)\\\n .filter(Lock.task_identifier == task_identifier).first() # pylint: disable=no-member\n if not lock:\n return False\n difference = datetime.utcnow() - lock.created\n if difference < timedelta(seconds=timeout):\n return True\n\n return False", "def is_event_service_task(jeditaskid):\n eventservice = False\n\n query = {'jeditaskid': jeditaskid}\n task = list(JediTasks.objects.filter(**query).values('eventservice'))\n if len(task) > 0 and 'eventservice' in task[0] and task[0]['eventservice'] is not None and task[0]['eventservice'] == 1:\n eventservice = True\n\n return eventservice", "def exists(path):\n r = requests.head(path)\n # print(r.status_code)\n return r.status_code == requests.codes.ok", "def _is_python_task(task, pidstr):\n if str(task.pid) != pidstr:\n return False\n else:\n return True", "def task_is_failure(task):\n\n if task and task.state == 'FAILURE':\n return True\n return False", "def _check_host_existence(self, hostname: str) -> bool:\n with self.lock:\n hosts = self.hosts.all()\n for host in hosts:\n if host['hostname'] == hostname:\n return True\n return False", "def is_task_runnable(self, task: Task) -> bool:\n if any([self._task_status_dict[_] == self.FAIL for _ in task.dependencies]):\n self._task_status_dict[task] = self.FAIL\n return False\n ret = self._task_status_dict[task] == self.TODO and reduce(\n lambda a, b: a and b,\n [self._task_status_dict[_] == self.SUCCESS for _ in task.dependencies],\n True\n )\n return ret", "def test_get_task_success(self):\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = util.MOCK_TASK_1\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "async def check_scheduled_events_exists(self) -> bool:\n\n mycursor, _ = await the_database()\n await mycursor.execute(\"SHOW TABLE STATUS LIKE 'ScheduledEvents'\")\n exists = await mycursor.fetchone()\n await mycursor.close()\n if exists:\n return True\n else:\n return False", "async def exists(self, payload: TPayload) -> bool:", "def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def handle_task_running(self, request):\n \"\"\"\n @api {get} /task/:id/running Check if a task is running\n @apiName IsTaskRunning\n @apiGroup Tasks\n @apiVersion 1.1.0\n\n @apiParam {String} :id Task ID.\n\n @apiSuccess {Boolean} running The task is running.\n @apiSuccess {String} id ID of the task.\n\n @apiSuccessExample {json} Example response:\n {\n \"running\": true,\n \"id\": \"021b2092ef4111e481a852540064e600\"\n }\n \"\"\"\n\n match = re.match('/tasks/([0-9a-z]+)/running', request.uri_path)\n task = match.group(1)\n\n running = self.cluster.is_task_running(task)\n\n headers = {\n 'Content-Type': 'application/javascript',\n 'Access-Control-Allow-Origin': '*'\n }\n body = json.dumps({\"id\": task, \"running\": running})\n\n return HTTPReply(code = 200, body = body, headers = headers)", "def test_get_task_not_found(self):\n task_id = \"foo\"\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = {\n \"message\": \"The specified task does not exist\",\n \"code\": \"TaskNotFound\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 404)", "def has_available(self):\n now = time()\n # We have cached False response\n if self.available_timestamp is not None and now < self.available_timestamp:\n return False\n\n # Get oldestTask from queue stats\n exc = None\n for _repeat in range(6):\n try:\n count = self.handle.count()\n break\n except IOError as e:\n sleep(_repeat * 2 + 1)\n exc = e\n else:\n if exc is not None:\n raise exc\n return False\n # There is at least one availabe task\n if int(count) > 0:\n return True\n # No available task, cache this response for 5 minutes\n self.available_timestamp = now + 300 # 5 minutes\n return False", "def can_run(self, task: \"TaskView\") -> Union[bool, str]:\n return True", "def health_check(task_service_id):\n logger.info(f\"Checking task service status for {task_service_id}\")\n task_service = TaskService.objects.get(kf_id=task_service_id)\n task_service.refresh_from_db()\n task_service.health_check()", "def test_resource_exists(self):\r\n\t\tself.assertTrue(self._configuration_.resources().has_key(\"AddWordTaskRepeat\") and self._configuration_.resources().has_key(\"RemoveWordTaskRepeat\"))", "async def get_task_status(task_id: TaskId):", "def test_create_monitoring_task_does_not_exist(self):\n project_id = util.MOCK_UUID_1\n deployment_id = util.MOCK_UUID_1\n task_id = \"unk\"\n\n rv = TEST_CLIENT.post(\n f\"/projects/{project_id}/deployments/{deployment_id}/monitorings\",\n json={\n \"taskId\": task_id,\n },\n )\n result = rv.json()\n expected = {\n \"message\": \"The specified task does not exist\",\n \"code\": \"InvalidTaskId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def findTaskInList(task, taskList):\n found = False\n for t in taskList:\n if t.tBegin == task.tBegin and t.tEnd == task.tEnd and t.batchSize == task.batchSize \\\n and t.order == task.order and t.machine == task.machine and t.processingUnit == task.processingUnit \\\n and t.operation == task.operation:\n found = True\n return found\n return found", "def _add_task_action(self, task):\n if not task.is_alive():\n return", "def _check_queryinfo_existence(self, hostname: str, job: str) -> bool:\n with self.lock:\n hosts = self.host_query_info.all()\n for host in hosts:\n if host['hostname'] == hostname and host['job'] == job:\n return True\n return False", "def checkRely(self, task):\n if not isinstance(task, dict):\n return False\n keys = task.get(\"rely\")\n #is empty or crontab, explain upstream is true\n if not keys or task.get(\"task_type\") == \"crontab\":\n return True\n\n keyl = []\n for k, v in keys.items():\n keyl.append(k)\n\n date = task.get(\"task_day\")\n if not date:\n date = self.date\n\n mkeys = [{\"task_key\": k} for k in keyl]\n tlist = {}\n for doc in self.mgdb.task_history.find({\"$or\": mkeys, \"task_day\": date}):\n tlist[doc.get(\"task_key\")] = doc\n\n if not tlist or len(tlist) != len(mkeys):\n #when debug, always return true.\n if self.config.get(\"is_debug\"):\n return True\n else:\n return False\n for c, d in tlist.iteritems():\n if d.get(\"status\") != \"finished\":\n return False\n\n return True", "def test_22_get_specific_completed_task_anonymous(self):\r\n\r\n #model.rebuild_db()\r\n with self.flask_app.app_context():\r\n self.create()\r\n app = db.session.query(App).first()\r\n task = db.session.query(Task)\\\r\n .filter(App.id == app.id)\\\r\n .first()\r\n\r\n for i in range(10):\r\n task_run = TaskRun(app_id=app.id, task_id=task.id,\r\n user_ip=\"127.0.0.1\", info={'answer': 1})\r\n db.session.add(task_run)\r\n db.session.commit()\r\n\r\n ntask = Task(id=task.id, state='completed')\r\n\r\n assert ntask not in db.session\r\n db.session.merge(ntask)\r\n db.session.commit()\r\n\r\n res = self.app.get('app/%s/task/%s' % (app.short_name, task.id),\r\n follow_redirects=True)\r\n msg = 'You have already participated in this task'\r\n assert msg in res.data, res.data\r\n assert 'Try with another one' in res.data, res.data", "def requireTask(self, name):\n t = self.getTask(name)\n if t is None:\n raise Exception(\"Task %s not found in service\" % name)\n return t", "async def _exists(self, key):\n with await self._connect() as redis:\n exists = await redis.exists(key)\n return True if exists > 0 else False", "def dumb_task():\n return True", "def task_status():\n pass", "def _task_is_running(course_id, task_type, task_key):\r\n running_tasks = InstructorTask.objects.filter(\r\n course_id=course_id, task_type=task_type, task_key=task_key\r\n )\r\n # exclude states that are \"ready\" (i.e. not \"running\", e.g. failure, success, revoked):\r\n for state in READY_STATES:\r\n running_tasks = running_tasks.exclude(task_state=state)\r\n return len(running_tasks) > 0", "def test_check_existing_enqueues_tasks(self):\n collection = handlers_endpoints_v1.DigestCollection(\n namespace=handlers_endpoints_v1.Namespace())\n collection.items.append(\n generate_digest(collection.namespace.namespace, 'some content'))\n key = model.get_entry_key(\n collection.namespace.namespace, collection.items[0].digest)\n\n # guarantee that one digest already exists in the datastore\n model.new_content_entry(key).put()\n self.call_api('preupload', self.message_to_dict(collection), 200)\n\n # find enqueued tasks\n self.assertEqual(1, self.execute_tasks())", "def validate_alias(self, arg):\n # Check if task exists\n alias = helpers.parse_task_alias(arg)\n task = self.db.get_task_by_alias(alias['task'], alias['project'])\n if not task:\n raise ValueError(u\"*** Error: The task '{}#{}' has not been \"\n \"found.\".format(alias['task'],\n alias['project']).encode('utf8'))\n return task", "def is_runnable(self):\n \n if len(target_tasks) < 1: \n return False\n # check task names?\n \n if self.run_folder is None or \\\n not os.path.exists(self.run_folder) or \\\n not os.path.exists(os.path.join(self.run_folder, self.run_id, 'SampleSheet.csv')):\n return False\n \n return True", "def celery_available():\n try:\n res = check_celery.apply_async()\n return \"OK\" == res.get(timeout=2)\n except:\n return False", "def _exists_remote(self, host):\n # This file gets written after cloudinit is done\n # path = '/var/lib/cloud/instance/boot-finished'\n path = '/home/ubuntu/SETUP_COMPLETE'\n t = 0\n sleep_len = 10\n while True:\n status = subprocess.call(\n ['ssh', '-oStrictHostKeyChecking=no', '-i', '/home/ubuntu/.ssh/id_rsa', 'ubuntu@'+host, 'test -f {}'.format(pipes.quote(path))])\n if status == 0:\n return True\n else:\n return False", "async def exists(self, tag_name):\n try:\n if await self.get_id(tag_name):\n return True\n except RtbDoesntExists:\n return False", "async def access(cls, entry: \"TaskEntry\"):\n return True", "def task_succeed(json_raw):\n rally_report = json.loads(json_raw)\n tasks = rally_report.get('tasks')\n if tasks:\n for task in tasks:\n if task.get('status') != 'finished' or \\\n task.get('pass_sla') is not True:\n return False\n else:\n return False\n return True", "def exists(self, name):\n return self.endpoint.exists(name)", "def exists(path):\n return get_instance(path).exists(path)", "def test_search_not_found(self):\n self.task_storage.add(self.my_task)\n search_task = task.Task(title='title1', notes='note1')\n task_search_list = self.task_storage.search(search_task)\n\n self.assertEqual(task_search_list, None)", "def exists_remote(host, path):\n command = \"test -e \" + pipes.quote(path) + \" && echo 0 || echo 1\"\n (stdoutstring, stderrstring) = execute_ssh_command(host, port, USER, PASSWORD, None, None, command)\n\n for status in stdoutstring:\n if re.search('0', status):\n return True\n if re.search('1', status):\n return False", "def check_for_assemble_file(task_file):\n if not os.path.exists(task_file):\n print_failure_msg(\"{} file is missing\".format(task_file))\n exit(127)\n return True", "def isFromTaskQueue(request=None):\n request = request or webapp2.get_request()\n # As stated in the doc (https://developers.google.com/appengine/docs/python/taskqueue/overview-push#Task_Request_Headers)\n # These headers are set internally by Google App Engine.\n # If your request handler finds any of these headers, it can trust that the request is a Task Queue request.\n # If any of the above headers are present in an external user request to your App, they are stripped.\n # The exception being requests from logged in administrators of the application, who are allowed to set the headers for testing purposes.\n return bool(request.headers.get('X-Appengine-TaskName'))", "def exists(self):\n if self.host.exists(self.remote_path):\n print 'Yes, config exists already.'\n return True\n else:\n print 'Config doesn\\'t exist yet'\n return False", "def exist(self):", "def app_has_tasks(\n client: MarathonClient,\n app_id: str,\n expected_tasks: int,\n exact_matches_only: bool = False,\n) -> bool:\n app_id = \"/%s\" % app_id\n try:\n tasks = client.list_tasks(app_id=app_id)\n except NotFoundError:\n print(\"no app with id %s found\" % app_id)\n raise\n print(\"app %s has %d of %d expected tasks\" % (app_id, len(tasks), expected_tasks))\n if exact_matches_only:\n return len(tasks) == expected_tasks\n else:\n return len(tasks) >= expected_tasks", "def test_delete_task(self):\n check = False\n r = main.List.connection()\n main.List.add(r, \"ToDo\", 1, \"Buy apples\", 2, \"20.05.2015\")\n main.List.delete(r, \"ToDo\", 1)\n task = main.List.pull_from_redis(r, \"ToDo\", False)\n for key in task.iterkeys():\n if key == \"1\":\n check = True\n self.assertFalse(check, \"Deleting task failed.\")", "def exists(self):\n return True", "def exists(self):\n return True", "def test_46_tasks_exists(self, mock):\r\n self.register()\r\n self.new_application()\r\n res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)\r\n assert \"Edit the task presenter\" in res.data, \\\r\n \"Task Presenter Editor should be an option\"\r\n\r\n app = db.session.query(App).first()\r\n app.hidden = 1\r\n db.session.add(app)\r\n db.session.commit()\r\n # As owner\r\n res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)\r\n assert res.status_code == 200, res.status_code\r\n assert \"Edit the task presenter\" in res.data, \\\r\n \"Task Presenter Editor should be an option\"\r\n self.signout()\r\n # As anonymous\r\n res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n\r\n with self.flask_app.app_context():\r\n self.create()\r\n\r\n # As another user, but not owner\r\n self.signin(email=Fixtures.email_addr2, password=Fixtures.password)\r\n res = self.app.get('/app/sampleapp/tasks/', follow_redirects=True)\r\n assert res.status_code == 403, res.status_code\r\n self.signout()", "def test_contain_tasks(self):\n dag = self.dagbag.get_dag(self.dag_id)\n tasks = dag.tasks\n task_ids = list(map(lambda task: task.task_id, tasks))\n self.assertIn(\"extract\", task_ids)", "def schedule_exist(self, schedule_name):\r\n schedule = self.find(\"schedules\", schedule_name, attribute=\"name\")\r\n if schedule is not None:\r\n return True\r\n else:\r\n return False", "def ResourceExists(self, name):\n pass", "def check_pool_exist(pool_name: str) -> bool:\n if not pool_name:\n return False\n return os.path.exists(constant.work_dir + \"/pool/\" + pool_name)", "def is_new_task(self):\n return self.date_created >= timezone.now() - datetime.timedelta(days=1)", "def is_running(self):\n return self._task.running()", "def is_running(self):\n return self._task.running()", "def exists(self, url):\n return (self.base_path / url).exists()", "def test_search(self):\n self.task_storage.add(self.my_task)\n search_task = task.Task(title='title', notes='note')\n task_search_list = self.task_storage.search(search_task)\n\n self.assertTrue(self.my_task in task_search_list)", "def is_task_complete(self):\n if self._task is None:\n raise UserWarning(\"No task registered\")\n\n return self._task.is_complete()", "def test_allowed_if_in_task(self):\n\n @task_only\n def view(request):\n return HttpResponse(\"Hello\")\n\n request = self.factory.get(\"/\")\n request.META[_TASK_NAME_HEADER] = \"test\"\n with sleuth.fake(\"djangae.environment.is_in_task\", True):\n response = view(request)\n\n self.assertEqual(response.status_code, 200)", "def checkjob(self, taskid):\n\t\tfrom subprocess import Popen,PIPE\n\t\timport os\n\n\t\ttry:\n\t\t\tp = self.qstatoutput\n\t\texcept:\n\t\t\t#command = [ 'qstat','-j',id ]\n\t\t\tcommand = [ 'qstat','-u',os.getenv(\"USER\"),'-g','d' ]\n\t\t\tp = Popen( command ,stdout=PIPE,stderr=PIPE ).communicate()\n\t\t\tself.qstatoutput = p\n\n\t\tisincluster = False\n\t\ttaskstatus = {}\n\t\tfor line in p[0].split(\"\\n\"):\n\t\t\tif not str(self.jobsid) in line:\n\t\t\t\tcontinue\n\t\t\tparseline = line.split()\n\t\t\tstatus= parseline[4]\n\t\t\ttry:\n\t\t\t\ttask = int(parseline[9])\n\t\t\texcept IndexError:\n\t\t\t\t# Implies it is still waiting\n\t\t\t\ttask = int(parseline[8])\n\t\t\ttaskstatus[task] = status\n\t\t\tisincluster = True\n\n\t\tif not isincluster:\n\t\t\t# Checking if the outputfiles are there\n\t\t\tif not os.path.exists(self.outputfiles[taskid]):\n\t\t\t\tmessage = \"\\033[1;31mclustermanager.checkjob: Something went wrong in the cluster:\\033[1;m\"\n\t\t\t\tmessage += \"The task '\"+str(taskid)+\"' of the job '\"+str(self.jobsid)\n\t\t\t\tmessage += \"' is already finish but there is no output root file '\"\n\t\t\t\tmessage += self.outputfiles[taskid]+\"'\\n\"\n\t\t\t\tmessage += \"Check the cluster outputs file\"\n\t\t\t\traise message\n\n\t\t\t# Gathering the file outputs in order to add\n\t\t\tself.taskstatus[\"Done\"].append(taskid)\n\t\t\treturn self.outputfiles[taskid]\n\n\t\t# Still in cluster\n\t\t#statustaskdict = dict( [ (status,[]) for status in taskstatus.values() ] )\n\t\tfor task,status in taskstatus.iteritems():\n\t\t\tif status == \"r\" or status == \"t\":\n\t\t\t\tself.taskstatus[\"r\"].append(task)\n\t\t\telif status == \"qw\":\n\t\t\t\tself.taskstatus[\"qw\"].append(task)\n\t\t\telse:\n\t\t\t\tself.taskstatus[\"Undefined\"].append(task)", "def is_exist(self, status_code):\n if status_code == 200:\n return True\n return False", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def exists(self, path):", "def tasks_are_available(tasks):\n task_not_finished_not_scheduled_count = len(tasks)\n for task in tasks:\n if task.getisTaskFinished():\n continue\n if task.getisTaskScheduled():\n continue\n else:\n task_not_finished_not_scheduled_count -= 1\n if task_not_finished_not_scheduled_count < len(tasks):\n return True\n else:\n return False", "def perform_existence_check(set_name_to_cache: bool):\n Thread(target=_perform_existence_check, args=(set_name_to_cache,)).start()", "async def _exists(self, key):\n return await self.client.append(key, b'')", "def check_project_exists(self, project):\n session = self.session_factory()\n exists = session.query(PipelineRun).filter_by(project=project).first()\n session.close()\n if exists:\n return True\n return False", "def is_task(self):\n from .tasks import Task\n return isinstance(self, Task)", "def exists(self):\r\n try:\r\n self.refresh()\r\n except:\r\n return False\r\n return True", "def test_05d_get_nonexistant_app_task(self):\r\n res = self.app.get('/app/noapp/task', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status\r\n # Pagination\r\n res = self.app.get('/app/noapp/task/25', follow_redirects=True)\r\n assert res.status == '404 NOT FOUND', res.status", "def _url_exists(self, url):\n return url_exists(url)", "def has_TaskSet(self, desired_metadata):\n return bool(self._resolve_TaskSet(desired_metadata)) or self.fallback.has_TaskSet(desired_metadata)", "def file_exists(host, fqpath):\n command = \"ls -ld %s\" % fqpath\n rcode, _, rerr = g.run(host, command)\n if rcode == 0:\n return True\n\n g.log.error('File does not exist: %s', rerr)\n return False", "def server_exists(client, server_url):\n data = {\"server_url\": server_url}\n return client._creoson_post(\"windchill\", \"server_exists\", data, \"exists\")" ]
[ "0.77683973", "0.7662436", "0.74845606", "0.73977894", "0.7334475", "0.68549156", "0.68336856", "0.6772", "0.67269367", "0.6711562", "0.67005736", "0.6666161", "0.6534565", "0.64875007", "0.6391127", "0.6377171", "0.6374866", "0.63482434", "0.63294256", "0.6309257", "0.6295972", "0.6288763", "0.62789893", "0.6259014", "0.62515867", "0.6174665", "0.6157906", "0.61175084", "0.6103796", "0.60824", "0.60667384", "0.60659564", "0.6057953", "0.60389674", "0.603059", "0.5991359", "0.5981879", "0.5973841", "0.5969998", "0.5969438", "0.5967509", "0.59617317", "0.59250504", "0.5911943", "0.5910353", "0.59071785", "0.5905562", "0.58795303", "0.5878718", "0.5873959", "0.5870406", "0.5865857", "0.58620864", "0.5856149", "0.5847692", "0.58445984", "0.58323556", "0.5830451", "0.5828532", "0.58254284", "0.5802751", "0.580111", "0.57972777", "0.5776431", "0.57713753", "0.57612145", "0.5758521", "0.5758257", "0.57340413", "0.5706692", "0.5699955", "0.56984365", "0.56984365", "0.5698303", "0.5687326", "0.5678531", "0.5677315", "0.56581914", "0.56525683", "0.5649584", "0.5649584", "0.56488335", "0.56449395", "0.5644492", "0.5643679", "0.5643446", "0.56345755", "0.5631211", "0.56288254", "0.561365", "0.56072474", "0.5598797", "0.55967706", "0.5596659", "0.5587879", "0.55856305", "0.558332", "0.55723387", "0.5566507", "0.55641735" ]
0.8097337
0
Retrieve a task from the server
def from_server(cls, server, slug, identifier): task = server.get( 'task', replacements={ 'slug': slug, 'identifier': identifier}) return cls(**task)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_task(self):\n\n url='{url}/task'.format(url=config.SERVER_URL)\n\n try:\n res=request.urlopen(url,timeout=10).read()\n res=str(res,encoding='utf8')\n except Exception as e:\n check_server() # sleep until server is available\n try:\n res=request.urlopen(url,timeout=10).read()\n res=str(res,encoding='utf8')\n except:\n err_str='error: client -> get_task : ' \\\n 'unable to connect to server, exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)\n\n if 'no task' in res: # if server have no task uid ,return 'no task uid'\n err_str= 'error: client -> get_task : ' \\\n 'unable to get task, exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)\n\n try: # try to parse task str\n res=res.split(',')\n self.task_uid=res[0]\n self.task_type=res[1]\n except:\n err_str='error: client -> get_task : ' \\\n 'unable to split task str,exit process'\n info_manager(err_str,type='KEY')\n os._exit(0)", "def getTask():\n\tcontent = requests.get(MANAGER_URL+\"task\", params={\"apiKey\": API_KEY}).text\n\tif content == \"null\":\n\t\treturn None\n\telse:\n\t\treturn json.loads(content)", "def get_task(self, task_id):\n res = self.conn.cursor().execute(\"SELECT * FROM tasks WHERE id=?\", (task_id,))\n return res.fetchone()", "def get(self, id):\n task = get_task(get_db(), id)\n if not task:\n api.abort(404, f\"Invalid task with id: {id}\")\n return task_to_dict(task)", "def get_task(id):\n\n if not id:\n raise InvalidAPIUsage(\"id is required\")\n\n collection = get_db_collection()\n\n task = get_task_or_404(collection, id)\n\n response = jsonify(content=task['content'])\n response.status_code = 200\n return response", "def get_task(self, id):\n raise NotImplementedError()", "async def get_task_result(task_id: TaskId):", "def get(self) -> Task: # pragma: no cover\n raise NotImplementedError", "def get(self, guid):\n key = db.Key.from_path('Task', int(guid))\n task = db.get(key)\n if not task == None:\n guid = \"%s\" % task.key().id_or_name()\n task_json = { \"id\": \"%s\" % guid, \"name\": task.name,\n \"priority\": task.priority, \"effort\": task.effort,\n \"projectId\": task.projectId,\n \"submitterId\": task.submitterId, \"assigneeId\": task.assigneeId,\n \"type\": task.type, \"developmentStatus\": task.developmentStatus,\n \"validation\": task.validation, \"description\": task.description,\n \"createdAt\": task.createdAt,\n \"updatedAt\": task.updatedAt }\n \n self.response.headers['Content-Type'] = 'application/json'\n self.response.out.write(simplejson.dumps(task_json))\n else:\n self.response.set_status(404, \"Task not found\")", "def get(self, id):\n\n return self.client.get(\"external-task/{0}\".format(id))", "def get(self, guid):\n results = j.sal.fs.find(self._root, '*_%s' % guid)\n if len(results) <= 0:\n raise TaskNotFoundError(\"task %s not found\" % guid)\n if len(results) > 1:\n raise RuntimeError(\"found 2 tasks with same guid, this should not happen\")\n return self._deserialize_task(j.sal.fs.readFile(results[0]))", "def get(self):\n url = \"http://twitter.com/statuses/public_timeline.json\"\n task = taskqueue.Task(\n url='/tasks/fetch',\n params={'url': url}\n )\n task.add('fetch')", "def get(self):\n gid = self.get_query_argument('gid', None)\n\n if gid: # get a specified task\n self.write(update_fields(\n self._rpc.aria2.tellStatus(self._token, gid, TASK_FIELDS)))\n\n else: # get all tasks\n active_tasks = self._rpc.aria2.tellActive(self._token, TASK_FIELDS)\n waiting_tasks = self._rpc.aria2.tellWaiting(\n self._token, -1, 100, TASK_FIELDS)\n stopped_tasks = self._rpc.aria2.tellStopped(\n self._token, -1, 100, TASK_FIELDS)\n all_tasks = [\n update_fields(task) for task in\n itertools.chain(active_tasks, waiting_tasks, stopped_tasks)\n ]\n self.write({'tasks': all_tasks})", "def get_task(self, name):\n res = Task()\n self.GetTask(name, res)\n return res", "def task(self, name):\n with self.db_lock:\n return self.rcon.hget(self.task_key, name)", "def get(self, task_id):\n try:\n return self.dal.task.get_by_id(task_id)\n except EntityNotFound:\n raise DoesNotExist()", "def retrieve_task(self, task_id):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_TASK_STATUS,\n str(task_id)]))\n return r.json()", "def get(self, project_id, task_id):\n try:\n task = backend.get(Task, {'project.pk': request.project.pk, 'pk': task_id},\n only=self.export_fields, include=('project',), raw=True)\n except Task.DoesNotExist:\n return {'message': \"unknown task\"}, 404\n return {'task': self.export(task)}, 200", "def get_task(self, task_id: str) -> Mapping[str, Any]:\n return self.__get_one_by_id(\"tasks\", \"task_id\", task_id)", "def get_task(self):\n return self.queue.get()", "def fusion_api_get_task(self, param='', uri=None, api=None, headers=None):\n if uri is not None:\n # update fully qualified URL to relative URI\n uri = re.sub('^https://\\d*.\\d*.\\d*.\\d*', '', uri)\n return self.task.get(uri=uri, api=api, headers=headers, param=param)", "def get_task(task_id):\n return db.task.find_one({'_id': ObjectId(task_id)})", "def get_task_by_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT * FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task", "def view_task(self, task_id):\n api_url = self.server_url + self.METHOD_VIEW_TASK + str(task_id)\n\n request = Request(api_url)\n\n log.info(\"Request to \" + api_url)\n try:\n response = request.get()\n except HTTPError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n except ConnectionError, e:\n log.error(\"Error in view_task: \" + str(e))\n raise CuckooError(str(e))\n\n log.info(\"Response: \" + str(response))\n\n return response", "def db_get_task(task_id):\n sql = \"SELECT * FROM {} WHERE id=?\".format(TABLE_NAME)\n return db_query(sql, (task_id,), True)", "def _retrieve(cls, connection, uuid):\n resp = connection._get(get_url('task update', uuid=uuid))\n if resp.status_code == 404:\n raise MissingTaskException(resp.json()['message'])\n raise_on_error(resp)\n return Task.from_json(connection, resp.json())", "def get_task(self, key: str) -> Task:\n raise NotImplementedError", "def get(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n # tasks finished so result exists\n if state == states.SUCCESS:\n return { 'id': result_task.task_id, 'status': state, 'result': result_task.get(timeout=1.0)}, 200\n # task still pending or unknown - so result do not exists\n elif state == states.PENDING:\n return { 'id': result_task.task_id, 'status': state }, 404\n # task started but result do not exists yet\n elif state == states.STARTED:\n return { 'id': result_task.task_id, 'status': state }, 404\n else:\n return error(result_task)", "def test_get_task(self):\n resp = self.app.get('/api/2/inf/esrs',\n headers={'X-Auth': self.token})\n\n task_id = resp.json['content']['task-id']\n expected = 'asdf-asdf-asdf'\n\n self.assertEqual(task_id, expected)", "def _task_get(context, task_id, session=None, force_show_deleted=False):\n session = session or get_session()\n query = session.query(models.Task).options(\n sa_orm.joinedload(models.Task.info)\n ).filter_by(id=task_id)\n\n if not force_show_deleted and not context.can_see_deleted:\n query = query.filter_by(deleted=False)\n try:\n task_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"No task found with ID %s\", task_id)\n raise exception.TaskNotFound(task_id=task_id)\n\n # Make sure the task is visible\n if not _is_task_visible(context, task_ref):\n msg = \"Forbidding request, task %s is not visible\" % task_id\n LOG.debug(msg)\n raise exception.Forbidden(msg)\n\n return task_ref", "def on_get(self, req, resp, task_id):\n task_result = AsyncResult(task_id)\n result = {'status': task_result.status, 'result': task_result.result}\n resp.status = falcon.HTTP_200\n resp.body = json.dumps(result)", "def get_task(id):\n\n task = mycelery.AsyncResult(id)\n\n if task and task.info:\n return jsonify({\n 'id': task.id,\n 'name': task.info['name'],\n 'total': task.info['total'],\n 'current': task.info['current'],\n 'complete': task.info['complete'],\n 'errors': task.info['errors'],\n 'errors_count': task.info['errors_count'],\n 'status': task.info['status']\n })\n\n return 'Não existem dados', 400", "def get_task(self, id):\n\n collection = self._get_collection()\n\n item = collection.find_one({\"_id\": ObjectId(id)})\n\n if item:\n return _mongo_item_to_task(item)\n else:\n return None", "def get_task(task_id):\n try:\n return Task.objects.get(id=task_id)\n except ObjectDoesNotExist:\n raise ObjectDoesNotFound(\n 'There is no task with id={}.'.format(task_id))", "def getNodeTaskByUPID(self,node,upid):\n data = self.connect('get','nodes/%s/tasks/%s' % (node,upid),None)\n return data", "def get(self, subresource, **kwargs):\n return getattr(RESTTask, subresource)(self, **kwargs)", "def get_task_by_tid(self, tid):\n return self.task_controller.get_task(tid)", "def on_get(self, req, resp, task_id):\n task = celery_app.AsyncResult(task_id)\n\n resp.body = json.dumps(\n {'status': task.status, 'result': str(task.result)})\n resp.status = falcon.HTTP_200", "def _get_task(self, task_id):\n if not task_id:\n return None\n task = objects.Transaction.get_by_uid(task_id, fail_if_not_found=False)\n if task and task.cluster_id == self.cluster.id:\n return task\n return False", "def on_get(self, req, resp, task_id):\n try:\n builddata = req.get_param_as_bool('builddata')\n subtask_errors = req.get_param_as_bool('subtaskerrors')\n try:\n layers = int(req.params.get('layers', '0'))\n except Exception:\n layers = 0\n\n first_task = self.get_task(req, resp, task_id, builddata)\n\n if first_task is None:\n self.info(req.context, \"Task %s does not exist\" % task_id)\n self.return_error(resp,\n falcon.HTTP_404,\n message=\"Task %s does not exist\" % task_id,\n retry=False)\n else:\n # If layers is passed in then it returns a dict of tasks instead of the task dict.\n if layers:\n resp_data, errors = self.handle_layers(\n req, resp, task_id, builddata, subtask_errors, layers,\n first_task)\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n resp_data['subtask_errors'] = errors\n else:\n resp_data = first_task\n # Includes subtask_errors if the query param 'subtaskerrors' is passed in as true.\n if (subtask_errors):\n _, errors = self.handle_layers(req, resp, task_id,\n False, subtask_errors,\n 1, first_task)\n resp_data['subtask_errors'] = errors\n\n resp.text = json.dumps(resp_data)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(req.context, \"Unknown error: %s\" % (str(ex)))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def task_get(context, task_id, session=None, force_show_deleted=False):\n task_ref = _task_get(context, task_id, session=session,\n force_show_deleted=force_show_deleted)\n return _task_format(task_ref, task_ref.info)", "def __getitem__(self, txid: int) -> asyncio.Task:\n return self._tasks[txid]", "def get(self, name, task):\n assert name, \"Must input a valid dataset name.\"\n assert task, \"Must input a valid task name.\"\n self._assert_dataset_exists_in_cache(name)\n self._assert_task_exists_in_dataset_in_cache(name, task)\n return self.manager.data[\"dataset\"][name][\"tasks\"][task]", "def get(self, task_id=None):\n if task_id:\n item = self.find(task_id)\n self.queue.remove(item)\n else:\n item = self.queue.get()\n return item", "def get_task(self,\n task_label=None,\n notebook_cell_text=None,\n print_return=True):\n\n self._print('Getting task {} ...'.format(task_label))\n\n if task_label:\n task = {task_label: self._tasks[task_label]}\n\n elif notebook_cell_text:\n task = self._load_task_from_notebook_cell(notebook_cell_text)\n\n else:\n raise ValueError(\n 'Get an existing task by querying for its ID or register a '\n 'task from a notebook cell.')\n\n if print_return: # For communicating with JavaScript\n print(dumps(task))\n return task", "def __get_task(self, task_id):\r\n if task_id not in self.__tasks:\r\n self.__tasks[task_id] = Task(task_id)\r\n return self.__tasks[task_id]", "def __get_task(self, task_id):\r\n if task_id not in self.__tasks:\r\n self.__tasks[task_id] = Task(task_id)\r\n return self.__tasks[task_id]", "async def task_detail(request, job_id=None, task_name=None):\n jobs = dagobah._serialize().get('jobs', {})\n job = [job for job in jobs if str(job['job_id']) == job_id][0]\n return template('task_detail.html',\n job=job,\n task_name=task_name,\n task=[task for task in job['tasks']\n if task['name'] == task_name][0])", "def test_get_task_success(self):\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.get(f\"/tasks/{task_id}\")\n result = rv.json()\n\n expected = util.MOCK_TASK_1\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def task(self):\n return self._task", "def get_task(self, u_name):\n raise NotImplementedError()", "def get_tasks(self, task_id=None):\n # Recover all config from OpenVAS\n if task_id:\n return self.make_xml_request('<get_tasks id=\"%s\"/>' % name, xml_result=True)\n else:\n return self.make_xml_request(\"<get_tasks />\", xml_result=True)", "def get(self, id):\n result_task = AsyncResult(id = id, app = backapp)\n state = result_task.state\n\n if state == states.STARTED:\n return { 'id':result_task.task_id, 'status': state }, 200\n # task still pending or unknown\n elif state == states.PENDING:\n return { 'id':result_task.task_id, 'status': state }, 200\n elif state == states.SUCCESS:\n return { 'id':result_task.task_id, 'status': state }, 303, {'Location': api.url_for(MathJobResult,id=result_task.task_id)}\n else:\n return error(result_task)", "def get(self, request):\n feedback = {\n 'permission': True\n }\n\n try:\n task_id = request.GET.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n try:\n uuid.UUID(hex=task_id)\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n try:\n task = Task.objects.get(id=task_id, time_type='instant')\n # response_count = success + wrong\n res = command_dispatcher.get_task_data(task.id)\n success = len(res.get('success'))\n wrong = len(res.get('error'))\n response_count = success + wrong\n\n time_delta = timezone.now() - task.create_time\n\n if task.status and ( response_count == task.terminal_count or time_delta.seconds > 120):\n task.status = False\n task.result_snapshot = json.dumps(res)\n task.save()\n\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant Task Status',\n 'info': {\n 'finished': not task.status,\n 'total': task.terminal_count,\n 'responses': response_count,\n 'success': success,\n 'wrong': wrong\n }\n }\n\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n\n return JsonResponse(data=feedback)", "def get(self, task_id, session=None):\n try:\n task = session.query(db.StatusTask).filter(db.StatusTask.id == task_id).one()\n except NoResultFound:\n raise NotFoundError('task status with id %d not found' % task_id)\n\n args = tasks_parser.parse_args()\n include_execution = args.get('include_execution')\n\n st_task = task.to_dict()\n if include_execution:\n execution = task.executions.order_by(db.TaskExecution.start.desc()).first()\n st_task['last_execution'] = execution.to_dict() if execution else {}\n return jsonify(st_task)", "def _get_task(self, task):\n try:\n return TASKS[task]\n except KeyError:\n raise ValueError(\"task %s \"\n \"is not supported. \" % task)", "def get_task(self, key_task):\n task = None\n scanned_tasks = []\n\n with open(self.path_to_task_file, 'r') as file:\n for line in file:\n current_task = Task()\n current_task.load(line)\n\n if current_task.key == key_task:\n task = current_task\n else:\n scanned_tasks.append(line)\n\n self.check_time(task)\n self.save_scanned_tasks(scanned_tasks) # return unsuccessful tasks in file\n return task", "def taskdetail_get(td_id):\n return IMPL.taskdetail_get(td_id)", "def __call__(self, task):\n self.put(task)\n return self.get()", "async def get(self):\n\n pass", "def get_task(self, id=None, name=None):\n query = \"SELECT * FROM tangerine WHERE \"\n if id: query += \"id='\"+str(id)+\"'\"\n elif name: query += \"name='\"+name+\"' AND parent_job IS NULL\"\n else: return None\n \n cur = self.conn.cursor()\n cur.execute(query + \";\")\n self.conn.commit()\n task = cur.fetchone()\n \n if task:\n return Task(self.columns, task);\n else:\n return None", "def show(id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n task = estask.Task(kargs)\n try:\n dict_resp= task.show(id)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"fail to get task list\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n\n try:\n task.print_show(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def get(self, dnzo_user):\n from tasks_data.models import Task\n \n updated_since = self.request.get('updated_since', None)\n if updated_since:\n from util.human_time import parse_datetime\n updated_since = parse_datetime(updated_since)\n if not updated_since:\n self.bad_request(\"Could not parse supplied date.\")\n return\n \n task_list_key = self.request.get('task_list', None) \n task_list = None\n if task_list_key:\n from tasks_data.task_lists import get_task_list\n task_list = get_task_list(dnzo_user, task_list_key)\n if not task_list:\n self.bad_request(\"Could not find task_list with key '%s'.\" % task_list_key)\n return\n \n if not (task_list or updated_since):\n self.bad_request(\"Must supply task_list or updated_since.\")\n return\n\n from tasks_data.tasks import get_tasks\n tasks = get_tasks(dnzo_user, updated_since=updated_since, task_list=task_list)\n \n data = { 'tasks': map(lambda t: t.to_dict(), tasks) }\n \n self.json_response(**data)", "def get_task_by_id(task_id):\n result = mongo.db.tasks.find({\"_id\": ObjectId(task_id)})\n return json_util.dumps(result)", "def get(self, controller, data, *args, **kwargs): \n task_manager = controller.get_task_manager()\n res = task_manager.get_all_tasks(details=True)\n resp = {\n u'task-instances':res,\n u'count':len(res)\n } \n return resp", "def url(self):\n endpoint = 'taskinfo?taskID=%d' % self.id\n return posixpath.join(self.connection.weburl, endpoint)", "async def get(self):\n raise NotImplementedError()", "def get(self):\n try:\n task = self.async_vis.get()\n except (queue.Empty, IndexError):\n raise IndexError(\"Results are not available yet.\")\n\n return task", "def get_task_by_id(self, task_id):\n return self._gdb_interface.get_task_by_id(task_id)", "def _task_info_get(context, task_id, session=None):\n session = session or get_session()\n query = session.query(models.TaskInfo)\n query = query.filter_by(task_id=task_id)\n try:\n task_info_ref = query.one()\n except sa_orm.exc.NoResultFound:\n LOG.debug(\"TaskInfo was not found for task with id %(task_id)s\",\n {'task_id': task_id})\n task_info_ref = None\n\n return task_info_ref", "def get(self, request):\n feedback = {\n 'permission': True\n }\n try:\n task_id = request.GET.get('task_id', None)\n if task_id is None:\n feedback['data'] = ErrorCode.parameter_missing('task_id')\n raise natrix_exception.ParameterMissingException(parameter='task_id')\n try:\n uuid.UUID(hex=task_id)\n task = Task.objects.get(id=task_id, time_type='instant')\n\n serializer = task_serializer.InstantTaskSerializer(instance=task)\n feedback['data'] = {\n 'code': 200,\n 'message': u'Instant Task Info!',\n 'info': serializer.data\n }\n except ValueError:\n feedback['data'] = ErrorCode.parameter_invalid('task_id', reason=u'must be a UUID')\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n except Task.DoesNotExist:\n feedback['data'] = ErrorCode.parameter_invalid(\n 'task_id', reason=u'Can not retrieve Instant Task: {}'.format(task_id))\n raise natrix_exception.ParameterInvalidException(parameter='task_id')\n except natrix_exception.NatrixBaseException as e:\n logger.error(e.get_log())\n feedback['data'] = ErrorCode.sp_code_bug('Serializer error: {}'.format(e.get_log()))\n except Exception as e:\n logger.error(e)\n feedback['data'] = ErrorCode.sp_code_bug('Unknow error: {}'.format(e))\n\n except natrix_exception.NatrixBaseException as e:\n logger.info(e.get_log())\n\n return JsonResponse(data=feedback)", "def get_task_from_id(task_id, no_exit=False):\n\n try:\n return Task.from_id(task_id)\n except Exception as e:\n logger.warning(snakesay(str(e)))\n if not no_exit:\n sys.exit(1)", "def get_task(*, dag_id: str, task_id: str) -> APIResponse:\n dag: DAG = get_airflow_app().dag_bag.get_dag(dag_id)\n if not dag:\n raise NotFound(\"DAG not found\")\n\n try:\n task = dag.get_task(task_id=task_id)\n except TaskNotFound:\n raise NotFound(\"Task not found\")\n return task_schema.dump(task)", "def get_by_name(task_name):\n return tasks.find_one({'name': task_name})", "def on_get(self, req, resp):\n try:\n task_model_list = self.state_manager.get_tasks()\n task_list = [x.to_dict() for x in task_model_list]\n resp.text = json.dumps(task_list)\n resp.status = falcon.HTTP_200\n except Exception as ex:\n self.error(\n req.context,\n \"Unknown error: %s\\n%s\" % (str(ex), traceback.format_exc()))\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Unknown error\",\n retry=False)", "def get_task_by_id(self, task_id):\n for task in self.tasks:\n if task.id == task_id:\n logger.debug(\"Returning task with ID '%s': '%s'\", task_id, task.to_xml_string())\n return task\n raise ValueError(\"A step task with the ID {} can not be found.\".format(task_id))", "def test_get(self):\n task_types = [1, 2]\n\n for task_type in task_types:\n self.john_gamer.tasks.start(task_type)\n\n self.client.force_login(self.john)\n resp = self.client.get(self.URL)\n\n self.assertListEqual(\n resp.json(),\n ['Type: 1, time left: 42s', 'Type: 2, time left: 42s'],\n \"Gamer can't get list of task via API!\"\n )", "def task(self) -> base_model.BaseTask:\n return self._task", "def get_tasks(self, *args, **kwargs):\n tasks_endpoint = furl(self.ENDPOINT) / self.id / \"tasks\"\n return self._client.list(Task, endpoint=tasks_endpoint.url, *args, **kwargs)", "def list(ctx, id, json):\n\n kargs={'host': c.cfg['host'], \"api_version\": c.cfg['api_version'], \"url_path\": \"/tasks\"}\n if id != None:\n return ctx.invoke(show, id=id, json=json)\n\n task = estask.Task(kargs)\n try:\n dict_resp= task.list()\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))\n\n if dict_resp == None:\n click.echo(\"Fail: error response\")\n sys.exit(1)\n\n if json:\n print(jsn.dumps(dict_resp, sort_keys=True, indent=4))\n return\n try:\n task.print_list(dict_resp)\n except Exception as e:\n sys.exit(\"Fail: %s\" %str(e))", "def get_tasks(id):\n url = 'https://jsonplaceholder.typicode.com/'\n tasks = requests.get(url + 'todos', params={'userId': id}).json()\n return tasks", "def get(self):\n try:\n return self.url_queue.get(timeout=self.timeout)\n except Exception as e:\n print(e)\n return None", "def get_task_by_id(self, id):\n task_table = Table('task', self.metadata, autoload=True)\n try:\n parent_task = self.session.query(task_table).filter(task_table.c.id==id).one()\n task = parent_task._asdict()\n return task\n except Exception as e:\n logger.info(f\"Error retrieving task {id}: {e}\")\n return False", "async def get_task_status(task_id: TaskId):", "def run_task(self) -> Task:", "def retrieve(credentials, task_arn, num_retries=30, interval=1, verbose=False):\n try:\n awsbraket_session = AWSBraket()\n if verbose:\n print(\"- Authenticating...\")\n if credentials is not None:\n print(f\"AWS credentials: {credentials['AWS_ACCESS_KEY_ID']}, {credentials['AWS_SECRET_KEY']}\")\n awsbraket_session.authenticate(credentials=credentials)\n res = awsbraket_session.get_result(task_arn, num_retries=num_retries, interval=interval, verbose=verbose)\n return res\n except botocore.exceptions.ClientError as error:\n error_code = error.response['Error']['Code']\n if error_code == 'ResourceNotFoundException':\n print(\"- Unable to locate the job with Arn \", task_arn)\n print(error, error_code)\n raise", "def get(self, **kwargs):\n clone = self.filter(**kwargs)\n num = len(clone)\n if num == 1:\n return clone._result_cache[0]\n if not num:\n raise Task.DoesNotExist(\n 'Task matching query does not exist. '\n 'Lookup parameters were {0}'.format(kwargs))\n raise ValueError(\n 'get() returned more than one Task -- it returned {0}! '\n 'Lookup parameters were {1}'.format(num, kwargs))", "def task(self, name):\n if name not in self._tasks:\n raise TaskNotFoundError\n\n return self._tasks[name]", "def find(self, task_id):\n for task_obj in self._queue:\n if task_obj.id.startswith(task_id):\n return task_obj\n\n raise LookupError(\"No such task in dorm: '{}'\".format(task_id))", "async def peek(self, task_id):\n\n args = (task_id,)\n res = await self.conn.call(self.__funcs['peek'], args)\n return self._create_task(res.body)", "def get(self):\n\n return task_service.get_tasks()", "def tasks_rpc():\n # First check that this is a legitimate request from the coordinator\n authenticate_coordinator()\n action, task_id, release_id = validate_action(request.get_json(force=True))\n # Call into action\n return ROUTES[action](task_id, release_id)", "def check_task_id(id):\n\n\t# Open connection and execute SQL to get a task\n\ttry:\n\t\tdb, cursor = connect()\n\t\t\n\t\tcursor.execute(\"\"\"SELECT id FROM tasks \n\t\t\t\t\t\tWHERE id=%s\"\"\" % id)\n\n\t\ttask = cursor.fetchone()\n\n\t# Get error messages\n\texcept catch_error(), e:\n\t\tprint \"Error %d: %s\" % (e.args[0],e.args[1])\n\n\t# Close connection\n\tfinally:\n\t\tif db:\n\t\t\tdb.close()\n\n\treturn task", "def get(self, dnzo_user, task_list):\n self.json_response(task_list=task_list.to_dict())", "def command(task_id, tail, wip, limit):\n if task_id:\n task = storage.get_by_id(task_id)\n\n if not task:\n click.echo(f\"Task {task_id} not found.\")\n sys.exit(1)\n\n tasks = [task]\n else:\n tasks = storage.all(limit=limit, reverse=tail, wip=wip)\n\n print_header()\n for task in tasks:\n show_task(task)", "def get(self, task_name):\n try:\n return self._registry[task_name]\n except KeyError:\n raise RuntimeError('Task {} is not registered'.format(task_name))", "def task_detail(request, pk):\n try:\n task = Task.objects.get(pk=pk)\n except Task.DoesNotExist:\n return Response(status=status.HTTP_404_NOT_FOUND)\n\n if request.method == 'GET':\n serializer = TaskSerializer(task)\n return Response(serializer.data)\n\n elif request.method == 'PUT':\n serializer = TaskSerializer(task, data=request.DATA)\n if serializer.is_valid():\n serializer.save()\n # returning the serializer data after saving it to the database\n return Response(serializer.data)\n\n else:\n # there were some validation errors with the data\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n elif request.method == 'DELETE':\n # recall we already have the task present\n task.delete()\n return Response(status=status.HTTP_204_NO_CONTENT)" ]
[ "0.79089105", "0.77541333", "0.7590145", "0.73984325", "0.7325193", "0.73121446", "0.72696733", "0.7252774", "0.7249014", "0.72433484", "0.7215614", "0.7204756", "0.71764857", "0.7148466", "0.7100719", "0.7094087", "0.70633936", "0.7023238", "0.69586605", "0.6954439", "0.6948575", "0.69371295", "0.69323504", "0.6899589", "0.68850595", "0.6856096", "0.6846297", "0.6843231", "0.68242836", "0.67986196", "0.6792396", "0.6772723", "0.6738251", "0.6734039", "0.6731918", "0.6683079", "0.6616057", "0.6604916", "0.660352", "0.6597641", "0.659267", "0.6584005", "0.65818715", "0.65728575", "0.6571994", "0.65602756", "0.65602756", "0.6558526", "0.65528166", "0.65317744", "0.65317744", "0.65317744", "0.65317744", "0.65317744", "0.65013665", "0.64927423", "0.64835525", "0.6461509", "0.6447385", "0.64382887", "0.64328665", "0.64308006", "0.64003223", "0.6389436", "0.6382611", "0.63760406", "0.63619626", "0.62818646", "0.6280852", "0.62700474", "0.62690157", "0.6252922", "0.6232083", "0.62279844", "0.622451", "0.6224421", "0.62216157", "0.6214332", "0.6213212", "0.6201362", "0.61728984", "0.6161654", "0.61489195", "0.6133661", "0.6132197", "0.6131398", "0.61241674", "0.61110413", "0.60882103", "0.60869426", "0.60822666", "0.60654694", "0.60622", "0.60511935", "0.6049321", "0.6045801", "0.60337085", "0.6020683", "0.6012959", "0.5984451", "0.5983601" ]
0.0
-1
Create a task from JSON
def from_payload(cls, payload): return cls(**payload)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def from_json(json):\n\n if \"description\" not in json or \"tags\" not in json or \"files\" not in json:\n raise InvalidJSONException(\"Task must have a description, a list of tags and a list of files\")\n\n if type(json[\"description\"]) != str:\n raise InvalidJSONException(\"Description must be a string\")\n\n if type(json[\"tags\"]) != list:\n raise InvalidJSONException(\"Tags must be a list of Tags\")\n\n for i, tag in enumerate(json[\"tags\"]):\n json[\"tags\"][i] = Tag.from_json(tag)\n\n if type(json[\"files\"]) != list:\n raise InvalidJSONException(\"Files must be a list of Files\")\n\n for i, file in enumerate(json[\"files\"]):\n json[\"files\"][i] = File.from_json(file)\n\n return Task(json[\"description\"], json[\"tags\"], json[\"files\"])", "def from_json(cls, connection, json_task, is_summary=False):\n if 'instanceCount' in json_task:\n instancecount_or_range = json_task['instanceCount']\n else:\n instancecount_or_range = json_task['advancedRanges']\n new_task = cls(connection,\n json_task['name'],\n json_task.get('profile') or json_task.get('pooluuid'),\n instancecount_or_range)\n new_task._update(json_task)\n new_task._is_summary = is_summary\n return new_task", "def FromJson(as_json):\n if _is_string(as_json):\n #we assume it's either a json string or a dictionary\n as_json = json.loads(as_json)\n return Task.FromDictionary(as_json)", "def from_json(cls, json_data: Mapping[str, Any]) -> 'UploadTask':\n start_index: int = json_data.get('start_index', 0)\n batch_size: int = json_data.get('batch_size', 0)\n timestamp: str = json_data.get('timestamp', '0')\n return cls(\n start_index=start_index, batch_size=batch_size, timestamp=timestamp)", "def load_and_process_json(self, format_fn: Formatter):\n required_keys = [\n \"canary\",\n \"name\",\n \"description\",\n \"keywords\",\n ]\n subtask_keys = [\n \"metrics\",\n \"examples\",\n \"preferred_score\",\n ]\n\n if isinstance(self.task_data, str):\n json_file = self.task_data\n json_dir = os.path.split(os.path.abspath(os.path.expanduser(json_file)))[0]\n\n with tf.io.gfile.GFile(self.task_data, \"rb\") as file:\n self.task_data = json.load(file)\n\n if \"examples\" not in self.task_data:\n subtasks = tf.io.gfile.glob(os.path.join(json_dir, \"*\", \"task.json\"))\n if not subtasks:\n raise ValueError(\n f\"task {json_file} must have either examples or subtasks\"\n )\n\n self.task_data[\"subtasks\"] = []\n for t in subtasks:\n with tf.io.gfile.GFile(t, \"r\") as f:\n self.task_data[\"subtasks\"].append(json.load(f))\n\n elif isinstance(self.task_data, dict):\n self.task_data = _sanitize_task_data(self.task_data)\n else:\n raise ValueError(\"Either json path or dict has to be provided.\")\n\n if \"name\" not in self.task_data:\n raise ValueError(\"task must have a name\")\n\n self.absolute_normalization = self.task_data.get(\n \"absolute_normalization\", False\n )\n\n # unicode causing errors in some environments\n task_name = self.task_data[\"name\"].encode(\"ascii\", errors=\"replace\").decode()\n\n if self.parent_task:\n self.name = f\"{self.parent_task.name}:{task_name}\"\n else:\n self.name = task_name\n\n for key in required_keys:\n if key not in self.task_data.keys():\n raise ValueError(f\"{self.name}: Task needs a \" + key + \" field.\")\n\n subtask_max_examples = None\n\n if self.max_examples:\n num_subtasks = len(self.task_data.get(\"subtasks\", []))\n if num_subtasks:\n subtask_max_examples = self.max_examples // num_subtasks\n if subtask_max_examples < 1:\n raise ValueError(\n f\"for task {self.name}: \"\n f\"max_examples ({self.max_examples}) must be >= number \"\n f\"of subtasks ({num_subtasks})\"\n )\n\n self.subtasks = [\n JsonTask(\n task_data=t,\n shot_list=self.shot_list,\n verbose=self.verbose,\n format_fn=self.format_fn,\n parent_task=self,\n max_examples=subtask_max_examples,\n )\n for t in self.task_data.get(\"subtasks\", [])\n ]\n\n subtask_names = set()\n for t in self.subtasks:\n if t.name in subtask_names:\n raise ValueError(f\"in {self.name} subtask name {t.name} is duplicated\")\n subtask_names.add(t.name)\n\n if self.subtasks: # this is a container task\n subtask_details = [t.get_task_details() for t in self.subtasks]\n self.task_data[\"max_input_length\"] = max(\n [d.max_input_length_per_query for d in subtask_details]\n )\n self.task_data[\"max_queries\"] = sum(\n [d.max_queries for d in subtask_details]\n )\n return\n\n for key in subtask_keys:\n if key not in self.task_data.keys():\n raise ValueError(f\"{self.name}: Task needs a \" + key + \" field.\")\n\n self.metrics = self.task_data[\"metrics\"]\n self.generative_metrics = list(set(self.metrics) & set(GENERATIVE_METRICS))\n self.multiple_choice_metrics = list(\n set(self.metrics) & set(MULTIPLE_CHOICE_METRICS)\n )\n\n input_prefix = self.task_data.get(\"example_input_prefix\", \"\\nQ: \")\n output_prefix = self.task_data.get(\"example_output_prefix\", \"\\nA: \")\n choice_prefix = self.task_data.get(\"choice_prefix\", \"\\n choice: \")\n append_choices_to_input = self.task_data.get(\"append_choices_to_input\", True)\n self.few_shot_example_separator = self.task_data.get(\n \"few_shot_example_separator\", \"\\n\"\n )\n\n for metric in self.metrics:\n\n if metric not in GENERATIVE_METRICS + MULTIPLE_CHOICE_METRICS:\n raise ValueError(f\"Metric {metric} not supported.\")\n\n # remove duplicates, keeping example order the same\n example_set = set()\n distinct_examples = []\n for x in self.task_data[\"examples\"]:\n try:\n example_string = json.dumps(x)\n except TypeError as e:\n print(f\"example_string failure: {x}\")\n raise e\n\n if example_string in example_set:\n continue\n else:\n distinct_examples.append(x)\n example_set.add(example_string)\n\n num_examples = len(self.task_data[\"examples\"])\n num_distinct = len(distinct_examples)\n num_duplicates = num_examples - num_distinct\n if num_duplicates:\n print(\n f\"warning: {self.name} has {num_duplicates} duplicate examples \"\n f\"out of {num_examples}\"\n )\n\n max_shots = max(self.shot_list)\n if num_distinct < (max_shots + 1):\n raise ValueError(\n f\"insufficient distinct examples ({num_distinct}) for {max_shots} shots\"\n )\n\n self.task_data[\"examples\"] = distinct_examples\n\n for sample in self.task_data[\"examples\"]:\n example_keys = sample.keys()\n if \"input\" not in example_keys or not (\n \"target\" in example_keys or \"target_scores\" in example_keys\n ):\n raise ValueError(\"Examples missing a target or input field.\")\n if self.multiple_choice_metrics and \"target_scores\" not in example_keys:\n raise ValueError(\n \"Some sample does not have a target_scores field, required for multiple choice metric.\"\n )\n if self.generative_metrics and \"target\" not in example_keys:\n raise ValueError(\n \"Some sample does not have a target field, required for text-to-text metric.\"\n )\n\n self._ds = [\n format_fn(\n sample,\n input_prefix=input_prefix,\n output_prefix=output_prefix,\n choice_prefix=choice_prefix,\n rng=self.rng,\n append_choices_to_input=append_choices_to_input,\n )\n for sample in self.task_data[\"examples\"]\n ]\n\n self.task_data[\"max_input_length\"] = max(\n [len(re.findall(r\"\\w+\", sample[\"input\"])) for sample in self._ds]\n ) * max(self.shot_list)\n\n self.preferred_score = self.task_data[\"preferred_score\"]\n self.low_score = SCORE_RANGES[self.preferred_score][0]\n\n if self.preferred_score == \"multiple_choice_grade\":\n num_examples, random_score = 0, 0\n for sample in self.task_data[\"examples\"]:\n target_scores = sample[\"target_scores\"].values()\n random_score += sum(target_scores) / len(target_scores)\n num_examples += 1\n self.low_score = random_score / num_examples\n\n self.high_score = SCORE_RANGES[self.preferred_score][1]\n self.task_prefix = str(self.task_data.get(\"task_prefix\", \"\"))\n self.output_regex, self.stop_string = None, None\n if \"output_regex\" in self.task_data:\n self.output_regex = self.task_data[\"output_regex\"]\n if \"stop_string\" in self.task_data:\n self.stop_string = self.task_data[\"stop_string\"]\n\n if self.output_regex is None and self.stop_string is None:\n self.output_regex = _DEFAULT_REGEX\n\n for metric in self.metrics:\n if metric not in GENERATIVE_METRICS + MULTIPLE_CHOICE_METRICS:\n tf.logging.info(f\"Metric {metric} not supported. Will be ignored.\")\n\n is_present = lambda val: 1 if val else 0\n self.task_data[\"max_queries\"] = len(self.task_data[\"examples\"]) * (\n is_present(self.generative_metrics)\n + is_present(self.multiple_choice_metrics)\n )\n\n return", "def create_task():", "def create_task():\n client = RequestManager()\n client.set_method(\"POST\")\n client.set_endpoint(\"/projects/{0}/stories/{1}/tasks\".format(STORED_ID['project_id'], STORED_ID['story_id']))\n name = \"\".join(choices(string.ascii_letters, k=6))\n body = {\"description\": name}\n client.set_body(json.dumps(body))\n response = client.execute_request()\n try:\n STORED_ID['task_id'] = response.json()['id']\n except KeyError:\n LOGGER.info(response.json())", "def from_dict(cls, dikt) -> 'Task':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, dikt) -> 'Tasks':\n return util.deserialize_model(dikt, cls)", "def build(arg_dict):\n\n task_item = Task()\n\n try:\n task_item.key = arg_dict['key']\n except KeyError:\n task_item.key = None\n\n try:\n task_item.title = arg_dict['title']\n except KeyError:\n task_item.title = None\n\n try:\n task_item.notes = arg_dict['notes']\n except KeyError:\n task_item.notes = None\n\n return task_item", "def FromJson(as_json):\n if _is_string(as_json):\n #we assume it's either a json string or a dictionary\n parsed = json.loads(as_json)\n else:\n parsed = as_json\n\n #labels\n if \"labels\" in parsed:\n parsed[\"labels\"] = set(parsed[\"labels\"]) # it comes as list in JSON\n\n if \"tasks\" in parsed:\n json_tasks = parsed[\"tasks\"]\n parsed[\"tasks\"] = []\n for json_task in json_tasks:\n the_task = Task.FromDictionary(json_task)\n parsed[\"tasks\"].append(the_task)\n\n story = Story()\n story.__dict__ = parsed\n return story", "def new_task():\n req = request.json\n if 'cmd' in req:\n id = mongo.db.tasks.insert({\n 'cmd' : req['cmd'],\n 'status' : 'Not started'\n })\n\n response = {'id' : str(id)}\n return response", "def load(cls, from_file):\n json_str = gitrepo.read_task(from_file)\n task_dict = json.loads(json_str)\n return cls(**task_dict)", "def create(self):\n\n # Validate Inputs\n create_dict = {\n \"model_id\": self.model.id,\n }\n\n try:\n # Create Task\n self.spinner.start()\n task_obj = self.dal.task.create(Task(create_dict))\n finally:\n self.spinner.stop()\n return task_obj", "def fusion_api_create_task(self, body, api=None, headers=None):\n return self.task.create(body, api, headers)", "def create(self, validated_data):\n return Task.objects.create(**validated_data)", "def test_case_2(self):\n with open(f'{TEST_DATA_DIR}/r1.json') as file:\n data = json.load(file)\n self.assertIsInstance(data, dict)\n\n task_1 = Task.new(data=data)\n\n self.assertIsInstance(task_1, Task)\n self.assertEqual(task_1.template_type, 'ios_base_node')\n self.assertIsInstance(task_1._schema, Schema)\n self.assertTrue(\".j2\" in task_1._template_file_name)\n self.assertFalse(task_1.is_complete)", "def post(self):\n try:\n req = api.payload\n result = create_task(\n get_db(),\n req[\"task\"],\n date.fromisoformat(req[\"due_by\"]),\n Status[req[\"status\"]],\n )\n return task_to_dict(result), 201\n except ValueError:\n api.abort(422, \"Invalid request parameters\")", "def from_dict(cls, dikt) -> 'TaskModel':\n return util.deserialize_model(dikt, cls)", "def _create_task(self, body, *, task_cls=Task):\n return task_cls(self, body)", "def create_task(text):\n new_task = Tasks(task_text=text) \n new_task.save()", "def load(cls, from_file):\n with open(from_file) as infile:\n task_list = json.loads(infile.read())\n\n queue_obj = cls()\n for task_id in task_list:\n queue_obj.put(TaskInfo.from_id(task_id))\n\n return queue_obj", "def create_task(self, name, value):\n pass", "def tasks_create(self, name, labels, bug, resource_type, resources, image_quality, frame_filter, **kwargs):\n url = self.api.tasks\n data = {'name': name,\n 'labels': labels,\n 'bug_tracker': bug,\n 'image_quality': image_quality,\n 'frame_filter': frame_filter\n }\n response = self.session.post(url, json=data)\n response.raise_for_status()\n response_json = response.json()\n log.info('Created task ID: {id} NAME: {name}'.format(**response_json))\n log.info(str(response.json()))\n self.tasks_data(response_json['id'], resource_type, resources)", "def task_prepare_nodes(self, req, resp, json_data):\n action = json_data.get('action', None)\n\n if action != 'prepare_nodes':\n self.error(\n req.context,\n \"Task body ended up in wrong handler: action %s in task_prepare_nodes\"\n % action)\n self.return_error(resp,\n falcon.HTTP_500,\n message=\"Error\",\n retry=False)\n\n try:\n task = self.create_task(json_data, req.context)\n resp.text = json.dumps(task.to_dict())\n resp.append_header('Location',\n \"/api/v1.0/tasks/%s\" % str(task.task_id))\n resp.status = falcon.HTTP_201\n except errors.InvalidFormat as ex:\n self.error(req.context, ex.msg)\n self.return_error(resp,\n falcon.HTTP_400,\n message=ex.msg,\n retry=False)", "def test_anonymous_01_newtask(self):\r\n # Del previous TaskRuns\r\n self.create()\r\n self.del_task_runs()\r\n\r\n res = self.app.get('api/app/1/newtask')\r\n print res.data\r\n data = json.loads(res.data)\r\n assert data['info'], data", "def get_task_from_request_form(request):\n json_data = request.get_json()\n # Required fields\n if \"title\" not in json_data:\n raise ValueError(\"Required field is missing\")\n if \"reference\" not in json_data:\n raise ValueError(\"Required field is missing\")\n if \"status\" not in json_data:\n raise ValueError(\"Required field is missing\")\n\n task_from_request = {\n 'title': json_data['title'],\n 'reference': json_data['reference'],\n 'description': json_data['description'],\n 'timeWorked': [],\n 'status': json_data['status'],\n 'visible': \"visible\" in json_data\n }\n\n return task_from_request", "def generate_tasks(self, task):", "def _mongo_item_to_task(item):\n return Task(\n id=str(item[\"_id\"]),\n task=item[\"task\"],\n args=item[\"args\"],\n kwargs=item[\"kwargs\"],\n wait=item[\"wait\"],\n recurring=item[\"recurring\"],\n when=item[\"when\"],\n )", "def unserialize(\n data: Union[str, bytes], backend: Optional[\"KartonBackend\"] = None\n ) -> \"Task\":\n if not isinstance(data, str):\n data = data.decode(\"utf8\")\n\n task_data = json.loads(data)\n\n task = Task(task_data[\"headers\"])\n task.uid = task_data[\"uid\"]\n task.root_uid = task_data[\"root_uid\"]\n task.parent_uid = task_data[\"parent_uid\"]\n # Compatibility with <= 3.x.x (get)\n task.orig_uid = task_data.get(\"orig_uid\", None)\n task.status = TaskState(task_data[\"status\"])\n # Compatibility with <= 3.x.x (get)\n task.error = task_data.get(\"error\")\n # Compatibility with <= 2.x.x (get)\n task.priority = (\n TaskPriority(task_data.get(\"priority\"))\n if \"priority\" in task_data\n else TaskPriority.NORMAL\n )\n task.last_update = task_data.get(\"last_update\", None)\n task.payload = task_data[\"payload\"]\n task.payload_persistent = task_data[\"payload_persistent\"]\n task.unserialize_resources(backend)\n return task", "def from_json(cls, node):\n\n story = Story()\n story.story_id = _parse_int(node, 'id')\n story.name = _parse_text(node, 'name')\n story.owned_by = _parse_text(node, 'owned_by')\n story.story_type = _parse_text(node, 'story_type')\n story.state = _parse_text(node, 'current_state')\n story.description = _parse_text(node, 'description')\n story.estimate = _parse_int(node, 'estimate')\n story.labels = _parse_array(node, 'labels')\n story.url = _parse_text(node, 'url')\n story.project_id = _parse_int(node, 'project_id')\n\n note_nodes = node.get('notes')\n if note_nodes is not None:\n for note_node in note_nodes:\n note_id = _parse_int(note_node, 'id')\n text = _parse_text(note_node, 'text')\n author = _parse_text(note_node, 'author')\n story.notes.append(Note(note_id, text, author))\n\n attachment_nodes = node.get('attachments')\n if attachment_nodes is not None:\n for attachment_node in attachment_nodes:\n attachment_id = _parse_int(attachment_node, 'id')\n description = _parse_text(attachment_node, 'text')\n url = _parse_text(attachment_node, 'url')\n story.attachments.append(Attachment(attachment_id,description,url))\n\n task_nodes = node.get('tasks')\n if task_nodes is not None:\n for task_node in task_nodes:\n task_id = _parse_int(task_node, 'id')\n description = _parse_text(task_node, 'description')\n complete = _parse_boolean(task_node, 'complete')\n story.tasks.append(Task(task_id, description, complete))\n\n\n\n return story", "def _update_tasks_from_jsons(self):\n\n tasks = {}\n\n for fn in listdir(SIMPLI_JSON_DIR):\n fp = join(SIMPLI_JSON_DIR, fn)\n\n self._print('Loading task-specifying JSON {} ...'.format(fp))\n\n with open(fp) as f:\n tasks_json = loads(reset_encoding(f.read()))\n\n # Load library path, which is common for all tasks in this JSON\n library_path = tasks_json['library_path']\n\n # Load each task\n for t in tasks_json['tasks']:\n\n # Split function path into library_name and function_name\n function_path = t['function_path']\n if '.' in function_path:\n split = function_path.split('.')\n library_name = '.'.join(split[:-1])\n function_name = split[-1]\n else:\n raise ValueError(\n 'function_path must be like: \\'file.function\\'.')\n\n # Task label is this task's UID; so no duplicates are allowed\n label = t.get('label',\n '{} (no task label)'.format(function_name))\n\n tasks[label] = {\n 'library_path':\n library_path,\n 'library_name':\n library_name,\n 'function_name':\n function_name,\n 'description':\n t.get('description', 'No description.'),\n 'required_args':\n self._process_args(t.get('required_args', [])),\n 'default_args':\n self._process_args(t.get('default_args', [])),\n 'optional_args':\n self._process_args(t.get('optional_args', [])),\n 'returns':\n self._process_returns(t.get('returns', [])),\n }\n\n self._print('\\tLoaded {}: {}.'.format(label, tasks[label]))\n\n self._update_tasks(tasks)\n return tasks", "def create(self, task_model):\n raise NotImplementedError()", "def msg_to_task(msg):\n if not isinstance(msg, dict):\n return None\n t = Task()\n t.args = msg[MessageBuilder.FIELD_DATA]\n t.isFault = msg[MessageBuilder.FIELD_ISF]\n t.seqNum = msg[MessageBuilder.FIELD_SEQNUM]\n t.timestamp = msg[MessageBuilder.FIELD_TIME]\n t.duration = msg[MessageBuilder.FIELD_DUR]\n t.cores = msg[MessageBuilder.FIELD_CORES] if MessageBuilder.FIELD_CORES in msg else None\n return t", "def create_from_data(\n self,\n spec: models.ITaskWriteRequest,\n resources: Sequence[StrPath],\n *,\n resource_type: ResourceType = ResourceType.LOCAL,\n data_params: Optional[Dict[str, Any]] = None,\n annotation_path: str = \"\",\n annotation_format: str = \"CVAT XML 1.1\",\n status_check_period: int = None,\n dataset_repository_url: str = \"\",\n use_lfs: bool = False,\n pbar: Optional[ProgressReporter] = None,\n ) -> Task:\n if getattr(spec, \"project_id\", None) and getattr(spec, \"labels\", None):\n raise exceptions.ApiValueError(\n \"Can't set labels to a task inside a project. \"\n \"Tasks inside a project use project's labels.\",\n [\"labels\"],\n )\n\n task = self.create(spec=spec)\n self._client.logger.info(\"Created task ID: %s NAME: %s\", task.id, task.name)\n\n task.upload_data(\n resource_type=resource_type,\n resources=resources,\n pbar=pbar,\n params=data_params,\n wait_for_completion=True,\n status_check_period=status_check_period,\n )\n\n if annotation_path:\n task.import_annotations(annotation_format, annotation_path, pbar=pbar)\n\n if dataset_repository_url:\n git.create_git_repo(\n self._client,\n task_id=task.id,\n repo_url=dataset_repository_url,\n status_check_period=status_check_period,\n use_lfs=use_lfs,\n )\n\n task.fetch()\n\n return task", "def load(cls, from_file):\n with open(from_file) as infile:\n task_list = json.loads(infile.read())\n\n stack = cls()\n for task_id in task_list:\n stack.push(TaskInfo.from_id(task_id))\n\n return stack", "def __init__(self, task_type, task):\n self.task = task\n self.task_type = task_type", "def create(task):\n tname = task.get(\"tname\")\n # user cannot create task without name\n\n # Does the new task have a name? If no we can't insert it.\n # Can we insert it?\n if tname is not None:\n\n # Create a person instance using the schema and the passed in person\n schema = TaskListSchema()\n print(task)\n new_task = schema.load(task, session=db.session).data\n\n # Add the person to the database\n db.session.add(new_task)\n db.session.commit()\n\n # Serialize and return the newly created person in the response\n data = schema.dump(new_task).data\n\n return data, 201\n\n # Otherwise, nope, person exists already\n else:\n abort(409, \"Task needs a name\".format(tname=tname),)", "def _update(self, json_task):\n self._name = json_task['name']\n self._shortname = json_task.get('shortname')\n self._profile = json_task['profile']\n self._pooluuid = json_task.get('pooluuid')\n self._instancecount = json_task.get('instanceCount')\n self._advanced_range = json_task.get('advancedRanges')\n\n if 'resourceDisks' in json_task and json_task['resourceDisks']:\n self._resource_objects_ids = json_task['resourceDisks']\n self._resource_type = Disk\n elif 'resourceBuckets' in json_task and json_task['resourceBuckets']:\n self._resource_objects_ids = json_task['resourceBuckets']\n self._resource_type = Bucket\n\n if len(self._resource_objects_ids) != \\\n len(self._resource_objects):\n del self._resource_objects[:]\n\n if 'resultDisk' in json_task and json_task['resultDisk']:\n self._result_object_id = json_task['resultDisk']\n self._result_type = Disk\n elif 'resultBucket' in json_task and json_task['resultBucket']:\n self._result_object_id = json_task['resultBucket']\n self._result_type = Bucket\n\n if 'status' in json_task:\n self._status = json_task['status']\n self._creation_date = _util.parse_datetime(json_task['creationDate'])\n if 'errors' in json_task:\n self._errors = [Error(d) for d in json_task['errors']]\n else:\n self._errors = []\n\n if 'constants' in json_task:\n for constant in json_task['constants']:\n self.constants[constant.get('key')] = constant.get('value')\n\n self._uuid = json_task['uuid']\n self._state = json_task['state']\n self._tags = json_task.get('tags', None)\n if 'resultsCount' in json_task:\n if self._rescount < json_task['resultsCount']:\n self._dirty = True\n self._rescount = json_task['resultsCount']\n\n if 'resultsBlacklist' in json_task:\n self._results_blacklist = json_task['resultsBlacklist']\n if 'resultsWhitelist' in json_task:\n self._results_whitelist = json_task['resultsWhitelist']\n if 'snapshotWhitelist' in json_task:\n self._snapshot_whitelist = json_task['snapshotWhitelist']\n if 'snapshotBlacklist' in json_task:\n self._snapshot_blacklist = json_task['snapshotBlacklist']\n\n if 'completedInstances' in json_task:\n self._completed_instances = [CompletedInstance(x) for x in json_task['completedInstances']]\n else:\n self._completed_instances = []", "def create_task(self, unused_parent, task, **kwargs):\n self.uri = task.get('app_engine_http_request').get('relative_uri')\n self.body = task.get('app_engine_http_request').get('body')\n logging.info('Task uri: %r', self.uri)\n logging.info('Task body: %r', self.body)\n return 'fake task'", "def _reconstruct_task(task_record, hints, requirements, inputs, outputs):\n rec = task_record[\"t\"]\n return Task(name=rec[\"name\"], base_command=rec[\"base_command\"], hints=hints,\n requirements=requirements, inputs=inputs, outputs=outputs, stdout=rec[\"stdout\"],\n stderr=rec[\"stderr\"], workflow_id=rec[\"workflow_id\"], task_id=rec[\"id\"])", "def insert_task():\n try:\n task = get_task_from_request_form(request)\n result = mongo.db.tasks.insert_one(task)\n return json_util.dumps(get_task_by_id(result.inserted_id))\n except Exception as err:\n abort(400)", "def createNewTasks(_id):\n job = mongo.db.jobs.find_one({'_id': _id})\n tasks = job.get('data').get('tasks')\n for task in tasks:\n data = {\n 'name': task.get('name'),\n 'datetime': now(),\n 'status': 'ready',\n 'owner': job.get('owner'),\n 'priority': job.get('priority'),\n 'is_active': True,\n 'slave': None,\n 'last_activity': now(),\n 'started_on': None,\n 'finished_on': None,\n 'paused_on': None,\n 'logs': [],\n 'ctid': None,\n 'target_info': {},\n 'cancelled_on': None,\n 'progress': 0,\n 'job': job.get('_id'),\n 'proccess':\n {\n 'command': getRenderCommand(job.get('category')),\n 'cwd': task.get('cwd'),\n 'filepath': task.get('filepath'),\n 'target': task.get('target'),\n }\n }\n newTask = mongo.db.tasks.insert(data)\n ctid = addTaskToQueue(newTask)\n #updateTaskInfo(str(task['_id']['$oid']), status='ready', ctid=str(ctid))\n job['status'] = 'ready'\n mongo.db.jobs.update({'_id': _id}, job)\n\n return", "def create_todo():\n payload = request.json\n\n todo = Todo(task=payload['task'], user_id=payload['user_id'])\n\n database.session.add(todo)\n database.session.commit()\n\n return jsonify(todo.to_dict()), 201", "def newtask(path):\n global testers\n import testmaker, yaml\n if not path.endswith('.yaml'):\n print('ignoring', path)\n return\n print('newtask',(path,))\n with open(path) as f: p = yaml.safe_load(f)\n t = testmaker.loadyaml(p)\n name = path.rsplit('/', 1)[-1].rsplit('.', 1)[0]\n testers[name] = t\n print(' -=> loaded new task', name, 'from', path)", "def derive_task(self, headers: Dict[str, Any]) -> \"Task\":\n new_task = Task(\n headers=headers,\n payload=self.payload,\n payload_persistent=self.payload_persistent,\n )\n return new_task", "def gen_task_item(self) -> Dict[str, Any]:\n return {}", "def new_task(self):\n print \"Create a new task.\"\n\n # Collect new task info from user\n description = raw_input(\"Enter task (140 characters max) > \")\n due_date = raw_input(\"Enter due date as 'year-mm-dd' (optional). > \")\n tags = raw_input(\n \"Enter tags for the task (comma separated) (optional). > \")\n tag_list = [tag.strip() for tag in tags.split(',')]\n try:\n new_task = doto.Task(self.user, description, due_date, tag_list)\n except (NameError, ValueError) as e:\n # On error, print and return.\n print \"Task not created. Error: \", e\n raw_input(\"Press Enter to continue.\")\n return\n self.current_collection.add(new_task)\n return", "def create_from_json(cls, config_json: str) -> 'ResolverOp':\n return cls.create(**json_utils.loads(config_json))", "def from_dict(cls, dikt) -> 'POSTExecution':\n return util.deserialize_model(dikt, cls)", "def from_dict(cls, d: dict):\n scen = None\n try:\n scen = DTScenario(d['name'])\n for t in d['tasks']:\n scen.addTask(dtTaskTypeDict['cls'][t['class']], t['parameters'])\n except KeyError:\n scen = None\n raise DTInternalError('DTScenario.fromDict()', 'Wrong dict format')\n return scen", "def post(self, request, format=None):\n feedback = {\n 'permission': True\n }\n try:\n post_data = request.data\n serializer = task_serializer.InstantTaskSerializer(data=post_data, group=self.get_group())\n if serializer.is_valid():\n task = serializer.save()\n feedback['data'] = {\n 'code': 200,\n 'message': 'Instant task creation successfully!',\n 'info': {\n 'task_id': task.pk\n }\n }\n else:\n logger.info('Instant task parameters is not available: {}'.format(serializer.format_errors()))\n feedback['data'] = ErrorCode.parameter_invalid('instant_task_creation',\n reason=serializer.format_errors())\n except natrix_exception.NatrixBaseException as e:\n feedback['data'] = ErrorCode.sp_code_bug('Create instant has a bug: {}'.format(e.get_log()))\n logger.error(e.get_log())\n except Exception as e:\n natrix_exception.natrix_traceback()\n feedback['data'] = ErrorCode.sp_db_fault(str(e))\n\n return JsonResponse(data=feedback)", "def from_dict(cls, dikt) -> 'LoanApplicationTasks':\n return util.deserialize_model(dikt, cls)", "def from_dict(self, d):\r\n options = dict(d)\r\n task_id = options['task_id']\r\n del options['task_id']\r\n return SubtaskStatus.create(task_id, **options)", "def add_task():\n # get values from user\n responses = accept_inputs([\"Task label\", \"Short task description\", \"Parent task label\"])\n # insert into db\n query_no_results(\"insert into task values(?, ?, ?)\",\n [responses[\"Task label\"], responses[\"Short task description\"], responses[\"Parent task label\"]])\n print(\"New task created\")", "def create_task(self, name, task_info=None):\n task = TaskInst(name=name).save()\n has_task_param = {\n 'super_role': SUPER_ROLE.OWNER,\n 'acceptance': ACCEPTANCE.ACCEPT\n }\n self.tasks.connect(task, has_task_param)\n start = StepInst(name='Start', node_type=NODE_TYPE.START, pos_x=-START_END_OFFSET).save()\n end = StepInst(name='End', node_type=NODE_TYPE.END, pos_x=START_END_OFFSET).save()\n task.steps.connect(start)\n task.steps.connect(end)\n task.update(task_info)\n return task", "def create_task(author, title, text, **kwargs):\n mc = MathContent(text=text)\n mc.save()\n task = Task(author=author, name=title, content=mc, **kwargs)\n task.save()\n return task", "def dict_to_task(entry):\n if not isinstance(entry, dict):\n return None\n t = Task()\n try:\n for a in vars(t):\n v_type = type(getattr(t, a))\n if entry[a] is not None:\n v = v_type(entry[a]) if v_type != bool else entry[a] == 'True'\n else:\n v = None\n setattr(t, a, v)\n return t\n except KeyError:\n return None", "def create_task(self, task_state, task_xml):\r\n\r\n tag_name = self.get_tag_name(task_xml)\r\n children = self.child_modules()\r\n task_descriptor = children['descriptors'][tag_name](self.system)\r\n task_parsed_xml = task_descriptor.definition_from_xml(etree.fromstring(task_xml), self.system)\r\n task = children['modules'][tag_name](\r\n self.system,\r\n self.location,\r\n task_parsed_xml,\r\n task_descriptor,\r\n self.static_data,\r\n instance_state=task_state,\r\n )\r\n return task", "def task_create(context, values, session=None):\n\n values = values.copy()\n session = session or get_session()\n with session.begin():\n task_info_values = _pop_task_info_values(values)\n\n task_ref = models.Task()\n _task_update(context, task_ref, values, session=session)\n\n _task_info_create(context,\n task_ref.id,\n task_info_values,\n session=session)\n\n return task_get(context, task_ref.id, session)", "def create(self, validated_data):\n task_data = validated_data\n title = task_data['title']\n desc = task_data['description']\n duration = task_data['duration']\n weight = task_data['weight']\n pay_type = task_data['pay_type']\n note = task_data['note']\n service = task_data['service']\n customer = task_data['customer']\n task_address = task_data['task_address']\n place = task_data['place']\n\n customer_address = validated_data.pop('customer_address')\n c_address = Address.objects.get_or_create(\n **customer_address\n )\n customer_address = c_address[0]\n\n delivery_cost = Money(task_data['delivery_cost'])\n product_cost = Money(task_data['product_cost'])\n if self.initial_data.get('currency'):\n delivery_cost.currency = self.initial_data.get('currency')\n product_cost.currency = self.initial_data.get('currency')\n\n task = Task.objects.create(\n description=desc,\n title=title,\n duration=duration,\n weight=weight,\n pay_type=pay_type,\n delivery_cost=delivery_cost,\n product_cost=product_cost,\n note=note,\n service=service,\n customer=customer,\n place=place,\n customer_address=customer_address,\n task_address=task_address\n )\n task_items = task_data[\"task_to_product\"]\n for items in task_items:\n s = TaskItem.objects.create(\n task=task,\n product=items['product'],\n quantity=items['quantity']\n )\n s.save()\n\n task.save()\n return task", "def test_create_task_with_name_success(\n self,\n mock_background_tasks\n ):\n task_name = \"task_with_arbitrary_name\"\n task_category = \"DEFAULT\"\n\n rv = TEST_CLIENT.post(\n TASK_ROUTE, json={\"name\": task_name, \"category\": task_category}\n )\n result = rv.json()\n\n expected = {\n \"arguments\": None,\n \"category\": \"DEFAULT\",\n \"commands\": None,\n \"cpuLimit\": models.task.TASK_DEFAULT_CPU_LIMIT,\n \"cpuRequest\": models.task.TASK_DEFAULT_CPU_REQUEST,\n \"createdAt\": mock.ANY,\n \"dataIn\": None,\n \"dataOut\": None,\n \"description\": None,\n \"docs\": None,\n \"hasNotebook\": True,\n \"image\": models.task.TASK_DEFAULT_EXPERIMENT_IMAGE,\n \"memoryLimit\": models.task.TASK_DEFAULT_MEMORY_LIMIT,\n \"memoryRequest\": models.task.TASK_DEFAULT_MEMORY_REQUEST,\n \"name\": task_name,\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": models.task.TASK_DEFAULT_READINESS_INITIAL_DELAY_SECONDS,\n \"tags\": [\"DEFAULT\"],\n \"updatedAt\": mock.ANY,\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def make_task(self):\n return Task()", "def create_task(project, queue, location, payload=None, in_seconds=None):\n # [START cloud_tasks_appengine_create_task]\n\n from google.cloud import tasks_v2\n from google.protobuf import timestamp_pb2\n import datetime\n import json\n\n # Create a client.\n client = tasks_v2.CloudTasksClient()\n\n # TODO(developer): Uncomment these lines and replace with your values.\n # project = 'my-project-id'\n # queue = 'my-appengine-queue'\n # location = 'us-central1'\n # payload = 'hello' or {'param': 'value'} for application/json\n # in_seconds = None\n\n # Construct the fully qualified queue name.\n parent = client.queue_path(project, location, queue)\n\n # Construct the request body.\n task = {\n \"app_engine_http_request\": { # Specify the type of request.\n \"http_method\": tasks_v2.HttpMethod.POST,\n \"relative_uri\": \"/example_task_handler\",\n }\n }\n if payload is not None:\n if isinstance(payload, dict):\n # Convert dict to JSON string\n payload = json.dumps(payload)\n # specify http content-type to application/json\n task[\"app_engine_http_request\"][\"headers\"] = {\n \"Content-type\": \"application/json\"\n }\n # The API expects a payload of type bytes.\n converted_payload = payload.encode()\n\n # Add the payload to the request.\n task[\"app_engine_http_request\"][\"body\"] = converted_payload\n\n if in_seconds is not None:\n # Convert \"seconds from now\" into an rfc3339 datetime string.\n d = datetime.datetime.now(tz=datetime.timezone.utc) + datetime.timedelta(\n seconds=in_seconds\n )\n\n # Create Timestamp protobuf.\n timestamp = timestamp_pb2.Timestamp()\n timestamp.FromDatetime(d)\n\n # Add the timestamp to the tasks.\n task[\"schedule_time\"] = timestamp\n\n # Use the client to build and send the task.\n response = client.create_task(parent=parent, task=task)\n\n print(f\"Created task {response.name}\")\n return response", "def create_tasks_set(self): # noqa\n task0 = self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 0', 'status': 'active',\n 'priority': 'high', 'parent_id': None})\n task1 = self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 1', 'status': 'active',\n 'priority': 'medium', 'parent_id': task0.json['task_id']})\n task2 = self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 2', 'status': 'done',\n 'priority': 'veryhigh', 'parent_id': None})\n task3 = self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 3', 'status': 'done',\n 'priority': 'high', 'parent_id': task1.json['task_id']})\n task4 = self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 4', 'status': 'active',\n 'priority': 'medium', 'parent_id': task1.json['task_id']})\n self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 5', 'status': 'active',\n 'priority': 'medium', 'parent_id': task1.json['task_id']})\n self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 6', 'status': 'active',\n 'priority': 'high', 'parent_id': task0.json['task_id']})\n self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 7', 'status': 'done',\n 'priority': 'medium', 'parent_id': task2.json['task_id']})\n self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 8', 'status': 'done',\n 'priority': 'medium', 'parent_id': task3.json['task_id']})\n self.client.post(f\"/api/v1/tasks/{self.todolist.todolist_id}\",\n json={'label': 'Task 9', 'status': 'active',\n 'priority': 'medium', 'parent_id': task4.json['task_id']})\n return [task0.json, task1.json, task2.json]", "def create(cls, task_name, cfd_mesh):\n if task_name not in cls._available_tasks:\n raise KeyError(\"Invalid task name: %s\"%task_name)\n tcls = cls._available_tasks[task_name]\n obj = tcls(cfd_mesh)\n return obj", "def create(self, task_id, **options):\r\n return self(task_id, **options)", "def create(self, validated_data):\n board = validated_data.pop('board_id', None)\n\n group = validated_data.pop('group_id', None)\n\n task = Task.objects.create(\n board=board, **validated_data, group=group\n )\n return task", "def taskdetail_create(name, tsk, td_id=None):\n return IMPL.taskdetail_create(name, tsk, td_id)", "def create(profile, cluster, task_definition, started_by=None, count=None):\n client = boto3client.get(\"ecs\", profile)\n params = {}\n params[\"cluster\"] = cluster\n params[\"taskDefinition\"] = task_definition\n if started_by:\n params[\"startedBy\"] = started_by\n if count:\n params[\"count\"] = count\n return client.run_task(**params)", "def create(cls, course_id, task_type, task_key, task_input, requester):\r\n # create the task_id here, and pass it into celery:\r\n task_id = str(uuid4())\r\n\r\n json_task_input = json.dumps(task_input)\r\n\r\n # check length of task_input, and return an exception if it's too long:\r\n if len(json_task_input) > 255:\r\n fmt = 'Task input longer than 255: \"{input}\" for \"{task}\" of \"{course}\"'\r\n msg = fmt.format(input=json_task_input, task=task_type, course=course_id)\r\n raise ValueError(msg)\r\n\r\n # create the task, then save it:\r\n instructor_task = cls(\r\n course_id=course_id,\r\n task_type=task_type,\r\n task_id=task_id,\r\n task_key=task_key,\r\n task_input=json_task_input,\r\n task_state=QUEUING,\r\n requester=requester\r\n )\r\n instructor_task.save_now()\r\n\r\n return instructor_task", "def create_tier_from_file():\n parser = ArgumentParser(description=\"Tier JSON Descriptor\")\n if is_valid_file(parser,filename):\n f=open(filename,'r')\n json_object = json.load(f)\n\n new_tier = Tier()\n for value in json_object.values():\n for v in range(0,len(value)):\n new_tier.deployment=value[v]['deployment']['deploymentId']\n new_tier.description = value[v]['description']\n new_tier.name = value[v]['name']\n new_tier.budget = value[v]['budget']\n new_tier.minimum_servers = value[v]['minimumServers']\n new_tier.maximum_servers = value[v]['maximumServers']\n new_tier.breach_increment = value[v]['breachIncrement']\n new_tier.breach_period_in_minutes = value[v]['breachPeriodInMinutes']\n new_tier.cooldown_period_in_minutes = value[v]['cooldownPeriodInMinutes']\n new_tier.lower_cpu_threshold = value[v]['lowerCpuThreshold']\n new_tier.upper_cpu_threshold = value[v]['upperCpuThreshold']\n new_tier.lower_ram_threshold = value[v]['lowerRamThreshold']\n new_tier.upper_ram_threshold = value[v]['upperRamThreshold']\n #result=new_tier.create()\n #print new_tier.current_job", "def wmCreateObjectFromXML(self):\n inputtext = uiCommon.getAjaxArg(\"import_text\")\n inputtext = uiCommon.unpackJSON(inputtext)\n on_conflict = uiCommon.getAjaxArg(\"on_conflict\")\n\n # the trick here is to return enough information back to the client\n # to best interact with the user.\n\n # what types of things were in this backup? what are their new ids?\n items = []\n\n # parse it as a validation, and to find out what's in it.\n xd = None\n js = None\n try:\n xd = catocommon.ET.fromstring(inputtext)\n except catocommon.ET.ParseError:\n try:\n js = json.loads(inputtext)\n except:\n return json.dumps({\"error\": \"Data is not properly formatted XML or JSON.\"})\n\n if xd is not None:\n # so, what's in here? Tasks?\n\n # TASKS\n for xtask in xd.findall(\"task\"):\n uiCommon.log(\"Importing Task [%s]\" % xtask.get(\"name\", \"Unknown\"))\n t = task.Task()\n t.FromXML(catocommon.ET.tostring(xtask), on_conflict)\n\n # NOTE: possible TODO\n # passing a db connection to task.DBSave will allow rollback of a whole\n # batch of task additions.\n # if it's determined that's necessary here, just create a db connection here\n # and pass it in\n result, err = t.DBSave()\n if result:\n # add security log\n uiCommon.WriteObjectAddLog(catocommon.CatoObjectTypes.Task, t.ID, t.Name, \"Created by import.\")\n\n items.append({\"type\": \"task\", \"id\": t.ID, \"name\": t.Name})\n else:\n if err:\n items.append({\"type\": \"task\", \"id\": t.ID, \"name\": t.Name, \"info\": err})\n else:\n items.append({\"type\": \"task\", \"id\": t.ID, \"name\": t.Name, \"error\": \"Unable to create Task. No error available.\"})\n # otherwise it might have been JSON\n elif js is not None:\n # if js isn't a list, bail...\n if not isinstance(js, list):\n js = [js]\n\n for jstask in js:\n uiCommon.log(\"Importing Task [%s]\" % jstask.get(\"Name\", \"Unknown\"))\n t = task.Task()\n t.FromJSON(json.dumps(jstask), on_conflict)\n\n result, err = t.DBSave()\n if result:\n # add security log\n uiCommon.WriteObjectAddLog(catocommon.CatoObjectTypes.Task, t.ID, t.Name, \"Created by import.\")\n\n items.append({\"type\": \"task\", \"id\": t.ID, \"name\": t.Name})\n else:\n if err:\n items.append({\"type\": \"task\", \"id\": t.ID, \"name\": t.Name, \"info\": err})\n else:\n items.append({\"type\": \"task\", \"id\": t.ID, \"name\": t.Name, \"error\": \"Unable to create Task. No error available.\"})\n else:\n items.append({\"info\": \"Unable to create Task from backup JSON/XML.\"})\n\n # TODO: for loop for Assets will go here, same logic as above\n # ASSETS\n\n return json.dumps({\"items\": items})", "def test_case_3(self):\n with open(f'{TEST_DATA_DIR}/r1.json') as file:\n data = json.load(file)\n self.assertIsInstance(data, dict)\n\n task_1 = Task.new(data=data)\n self.assertTrue(task_1.validate())\n\n with self.assertRaises(GCGValidationError):\n task_2 = Task.new(data={'data': 'bad_data'})", "def from_json(cls, json_str: str) -> MissionStickerRequest:\n return cls.from_dict(json.loads(json_str))", "def create_item():\n\n data = request.get_json()\n title = data.get(\"title\", None)\n description = data.get(\"description\", None)\n due_date = data.get(\"due_date\", None)\n list_id = data.get(\"list_id\", None)\n\n if title is None or list_id is None:\n return abort(400, description=f\"List ID and title cannot be null!\")\n\n list_to_append = ToDoList.query.filter(ToDoList.id == list_id).first()\n\n if list_to_append is None:\n return abort(404, description=f\"List ID {list_id} does not exist!\")\n\n if due_date is not None:\n try:\n due_date = datetime.datetime.strptime(due_date, DATE_FORMAT)\n except ValueError:\n return abort(400, description=f\"Date format must be YYYY-MM-DD HH:MM\")\n\n new_item = Task(\n title=title,\n description=description,\n status=\"pending\",\n due_date=due_date,\n list_id=list_id,\n )\n db.session.add(new_item)\n db.session.commit()\n\n return make_response(json.dumps(new_item.serialize()))", "def test_create_task_without_name_success(\n self,\n mock_background_tasks\n ):\n task_category = \"DEFAULT\"\n\n rv = TEST_CLIENT.post(TASK_ROUTE, json={\"category\": task_category})\n result = rv.json()\n\n expected = {\n \"arguments\": None,\n \"category\": \"DEFAULT\",\n \"commands\": None,\n \"cpuLimit\": \"2000m\",\n \"cpuRequest\": \"100m\",\n \"createdAt\": mock.ANY,\n \"dataIn\": None,\n \"dataOut\": None,\n \"description\": None,\n \"docs\": None,\n \"hasNotebook\": True,\n \"image\": models.task.TASK_DEFAULT_EXPERIMENT_IMAGE,\n \"memoryLimit\": models.task.TASK_DEFAULT_MEMORY_LIMIT,\n \"memoryRequest\": models.task.TASK_DEFAULT_MEMORY_REQUEST,\n \"name\": \"Tarefa em branco - 1\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": models.task.TASK_DEFAULT_READINESS_INITIAL_DELAY_SECONDS,\n \"tags\": [\"DEFAULT\"],\n \"updatedAt\": mock.ANY,\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def newTask(name, description, assigner, id=None, priority=None, submitter_email=None, whose=None):\n if whose:\n user_id = jutdaapi.find_user(whose)\n if not user_id:\n raise ValueError('bad whose assignment: '+str(whose))\n #title = name + ' for: '+assigner.title()\n # that was the old scheme\n title = '('+assigner.title()+') '+name\n\n if priority != None:\n #priority = (int(priority) + 2) / 2\n priority = int(priority)\n RA_queue = 3\n #if assigner != 'no one':\n # description += '<tasktrackermeta assigner=\"'+assigner+'\"/>'\n if isinstance(id, str):\n description += '<tasktrackermeta id=\"'+id+'\"/>'\n ticket_id = jutdaapi.create_ticket(RA_queue, title, description,\n priority=priority, submitter_email=submitter_email)\n # Is there a race condition here? In this kind of database\n # I would assume not.\n time.sleep(1)\n ticket = jutdaapi.get_detailed_ticket(ticket_id)\n t = ticketToTask(ticket)\n return t", "def test_create_task_invalid_task_id_error(self):\n task_id = \"unk\"\n rv = TEST_CLIENT.post(\n TASK_ROUTE,\n json={\n \"copyFrom\": task_id,\n },\n )\n result = rv.json()\n\n expected = {\n \"message\": \"source task does not exist\",\n \"code\": \"InvalidTaskId\",\n }\n self.assertDictEqual(expected, result)\n self.assertEqual(rv.status_code, 400)", "def load(self, filepath=file):\n try:\n with open(filepath, \"r\", encoding=\"utf-8\") as task_file:\n tasks_json = json.load(task_file)\n self.tasks = [Task(task[\"name\"], task[\"priority\"], task[\"steps\"]) for task in tasks_json]\n self.sort()\n except FileNotFoundError:\n pass", "def makeTask(self, parsedCmd=None, args=None):\n if parsedCmd is not None:\n butler = parsedCmd.butler\n elif args is not None:\n dataRefList, kwargs = args\n butler = dataRefList[0].butlerSubset.butler\n else:\n raise RuntimeError(\"parsedCmd or args must be specified\")\n return self.TaskClass(config=self.config, log=self.log, butler=butler)", "def test_create_task_copy_from_success(\n self,\n mock_background_tasks\n ):\n task_id = util.MOCK_UUID_1\n\n rv = TEST_CLIENT.post(TASK_ROUTE, json={\"copyFrom\": task_id})\n result = rv.json()\n\n expected = {\n \"arguments\": None,\n \"category\": \"DEFAULT\",\n \"commands\": None,\n \"cpuLimit\": models.task.TASK_DEFAULT_CPU_LIMIT,\n \"cpuRequest\": models.task.TASK_DEFAULT_CPU_REQUEST,\n \"createdAt\": mock.ANY,\n \"dataIn\": None,\n \"dataOut\": None,\n \"description\": None,\n \"docs\": None,\n \"hasNotebook\": True,\n \"image\": models.task.TASK_DEFAULT_EXPERIMENT_IMAGE,\n \"memoryLimit\": models.task.TASK_DEFAULT_MEMORY_LIMIT,\n \"memoryRequest\": models.task.TASK_DEFAULT_MEMORY_REQUEST,\n \"name\": f\"{util.MOCK_TASK_NAME_1} - Cópia - 1\",\n \"parameters\": [],\n \"readinessProbeInitialDelaySeconds\": models.task.TASK_DEFAULT_READINESS_INITIAL_DELAY_SECONDS,\n \"tags\": [\"DEFAULT\"],\n \"updatedAt\": mock.ANY,\n \"uuid\": mock.ANY,\n }\n self.assertEqual(result, expected)\n self.assertEqual(rv.status_code, 200)", "def initialize_from_task(MODE=\"reviews\"):\n labels = []\n task_name = \"\"\n dataset_uuid = \"\"\n\n if MODE == \"reviews\":\n labels = {0: \"ASPECT\", 1:\"OPINION\"} # TODO: should be erased before deploy\n task_name = \"Restaurant review aspect/opinion extraction: Aspect or Opinion\"\n dataset_uuid = \"reviews\"\n elif MODE == \"hotel\":\n labels = {0: \"ASPECT\", 1:\"OPINION\"} # TODO: should be erased before deploy\n task_name = \"Hotel review aspect/opinion extraction: Aspect or Opinion\"\n dataset_uuid = \"hotel\"\n elif MODE == \"bc5cdr\":\n labels = {0: \"CHEMICAL\", 1:\"DISEASE\"} # TODO: should be erased before deploy\n task_name = \"Bio-med chemical/disease extraction: Chemical or Disease\"\n dataset_uuid = \"bc5cdr\"\n elif MODE == \"bc5cdr_example\":\n labels = {0: \"CHEMICAL\", 1:\"DISEASE\"} # TODO: should be erased before deploy\n task_name = \"Bio-med chemical/disease extraction: Chemical or Disease\"\n dataset_uuid = \"bc5cdr_example\"\n else:\n raise Error('MODE={} is not recognized.'.format(MODE))\n\n project = Project(name=task_name, dataset_uuid=dataset_uuid, labels=labels)\n #project.launch()\n return project", "async def create_task_run(\n self,\n task: \"TaskObject\",\n flow_run_id: UUID,\n dynamic_key: str,\n name: str = None,\n extra_tags: Iterable[str] = None,\n state: prefect.states.State = None,\n task_inputs: Dict[\n str,\n List[\n Union[\n TaskRunResult,\n Parameter,\n Constant,\n ]\n ],\n ] = None,\n ) -> TaskRun:\n tags = set(task.tags).union(extra_tags or [])\n\n if state is None:\n state = prefect.states.Pending()\n\n task_run_data = TaskRunCreate(\n name=name,\n flow_run_id=flow_run_id,\n task_key=task.task_key,\n dynamic_key=dynamic_key,\n tags=list(tags),\n task_version=task.version,\n empirical_policy=TaskRunPolicy(\n retries=task.retries,\n retry_delay=task.retry_delay_seconds,\n retry_jitter_factor=task.retry_jitter_factor,\n ),\n state=state.to_state_create(),\n task_inputs=task_inputs or {},\n )\n\n response = await self._client.post(\n \"/task_runs/\", json=task_run_data.dict(json_compatible=True)\n )\n return TaskRun.parse_obj(response.json())", "def add_task(self, raw_message):\n if not raw_message:\n raise ValueError(\"No message set\")\n\n data = {\n \"content\": {\n \"raw\": raw_message,\n }\n }\n\n return Task(self.post(\"tasks\", data), **self._new_session_args)", "def process_task(params):\n params['task'](params)", "def test_task(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n json = self.generate_task_dictionary(tid, state=\"error\")\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, json=json)\n\n task = self.client.site(site).task(tid)\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"error\")", "def __init__(self, file_format, location):\n self.file_format = file_format\n self.location = location\n Task.__init__(self)", "def __init__(self, file_format, location):\n self.file_format = file_format\n self.location = location\n Task.__init__(self)", "def create_task(self, task_body, req_context):\n design_ref = task_body.get('design_ref', None)\n node_filter = task_body.get('node_filter', None)\n action = task_body.get('action', None)\n\n if design_ref is None or action is None:\n raise errors.InvalidFormat(\n 'Task creation requires fields design_ref, action')\n\n task = self.orchestrator.create_task(design_ref=design_ref,\n action=action,\n node_filter=node_filter,\n context=req_context)\n\n task.set_status(hd_fields.TaskStatus.Queued)\n task.save()\n return task", "def make_task(self, data, **kwargs):\n return QlikMetric(name=data.get('name'), task_type=data.get('task_type'),\n source_system=data.get('source_system'),\n source_subsystem=data.get('source_subsystem'),\n app_id=data.get('app_id'),\n dimensions=data.get('dimensions'),\n measures=data.get('measures'),\n selections=data.get('selections'),\n yaml_file=data.get('yaml_file'), env=data.get('env'),\n thread_name=data.get('thread_name'), color=data.get('color')\n )", "def from_json_file(cls, json_file):\n with tf.io.gfile.GFile(json_file, \"r\") as reader:\n text = reader.read()\n return cls(**json.loads(text))", "def from_json(json):\n\n if \"targetType\" not in json or \"target\" not in json or \"action\" not in json or \"location\" not in json:\n raise InvalidJSONException(\"Tag must have a type, a value, an action and a location\")\n\n if type(json[\"targetType\"]) != int:\n raise InvalidJSONException(\"Tag targetType must be an int\")\n\n if type(json[\"target\"]) != str:\n raise InvalidJSONException(\"Tag target must be a string\")\n\n if type(json[\"action\"]) != str:\n raise InvalidJSONException(\"Tag action must be a string\")\n\n if type(json[\"location\"]) != int:\n raise InvalidJSONException(\"Tag location must be an int\")\n\n is_completed = False\n if \"isCompleted\" in json and type(json[\"isCompleted\"]) == bool:\n is_completed = json[\"isCompleted\"]\n\n return Tag(json[\"targetType\"], json[\"target\"], json[\"action\"], json[\"location\"], is_completed)", "def POST_task(self, task_data):\n\t\tif not self.room_id:\n\t\t\tself.POST_room()\n\t\trv = self.POST_data('/api/room/' + self.room_id + '/task', data=task_data)\n\t\tself.assertEqual(rv.status_code, 200)\n\t\treturn json.loads(rv.data)['_id']", "def Task(self):\n return self.create_task_cls()", "def post(self, dnzo_user):\n from google.appengine.ext import db\n from tasks_data.models import Task\n from tasks_data.tasks import update_task_with_params, save_task, task_list_can_add_task\n from tasks_data.task_lists import get_task_list\n \n task = Task(parent=dnzo_user)\n \n task_list = self.request.get('task_list', None) \n task_list = task_list and get_task_list(dnzo_user, task_list)\n if task_list:\n task.task_list = task_list\n else:\n self.bad_request(\"Could not find the specified task list.\")\n return\n \n update_task_with_params(dnzo_user, task, self.request)\n \n if not task_list_can_add_task(task_list, task):\n self.bad_request(\"Can not add task, too many active tasks in the list.\")\n return\n \n save_task(dnzo_user, task)\n\n if not task.is_saved():\n self.bad_request(\"Could not add the new task!\")\n return\n\n # reload task \n task = db.get(task.key())\n \n self.json_response(task=task.to_dict())", "def new_task(data):\n rabbit_host = os.getenv('RABBIT_HOST', 'localhost')\n connection = pika.BlockingConnection(\n pika.ConnectionParameters(rabbit_host)\n )\n channel = connection.channel()\n channel.basic_publish(\n exchange='',\n routing_key='task_queue',\n body=json.dumps(data),\n properties=pika.BasicProperties(\n delivery_mode=2, # make message persistent\n )\n )\n connection.close()", "def from_id(cls, task_id):\n return cls.load(TaskInfo._filename(task_id))", "def create_tasks(\n self,\n feature_ids: List,\n features: List,\n ) -> None:\n for i, feature_id in enumerate(feature_ids):\n task = Task(self, feature_id, features[i])\n self.tasks.append(task)\n self.numberOfTasks = len(self.tasks)", "def create_task(self, name, target, config=None, comment=\"\"):\n\n if not config:\n config = \"Full and fast\"\n\n request = \"\"\"<create_task>\n <name>%s</name>\n <comment>%s</comment>\n <config id=\"%s\"/>\n <target id=\"%s\"/>\n </create_task>\"\"\" % (name, comment, config, target)\n\n return self.make_xml_request(request, xml_result=True).get(\"id\")", "def task_trigger(self, args):\n h, tmp = tempfile.mkstemp(\n dir=self._tmpdir, prefix='trigger_raw', suffix='.json')\n os.close(h)\n cmd = [\n '-user',\n 'joe@localhost',\n '-d',\n 'pool=default',\n '-dump-json',\n tmp,\n ]\n cmd.extend(args)\n assert not self._run_swarming('trigger',\n cmd), 'Failed to trigger a task. cmd=%s' % cmd\n with open(tmp, 'rb') as f:\n data = json.load(f)\n task_id = data['tasks'][0]['task_id']\n logging.debug('task_id = %s', task_id)\n return task_id" ]
[ "0.8003312", "0.7318135", "0.7193144", "0.7155828", "0.7070071", "0.70066273", "0.6857383", "0.6823616", "0.6789037", "0.66410047", "0.6638342", "0.65754247", "0.6503764", "0.6498993", "0.6407582", "0.6403449", "0.63990456", "0.6354787", "0.6262863", "0.62520087", "0.6243814", "0.6156025", "0.61262095", "0.6119474", "0.6115382", "0.60650986", "0.605556", "0.6047874", "0.60460585", "0.6044337", "0.60415035", "0.6016453", "0.599282", "0.598516", "0.59458256", "0.5933879", "0.59225357", "0.58927554", "0.58912486", "0.58799", "0.583551", "0.5830323", "0.58224607", "0.5820628", "0.58160794", "0.58103925", "0.57917655", "0.57878524", "0.5787669", "0.57873017", "0.5775989", "0.5773398", "0.57605004", "0.57600313", "0.57544476", "0.57253414", "0.5725075", "0.5701272", "0.5692633", "0.56924754", "0.56902945", "0.56794167", "0.5647887", "0.5641886", "0.56414783", "0.5632069", "0.56218374", "0.5620066", "0.56148064", "0.5607584", "0.559918", "0.55893165", "0.5582544", "0.5578976", "0.5561344", "0.5560972", "0.5549281", "0.5540171", "0.5538454", "0.5533342", "0.55288047", "0.55286086", "0.5524146", "0.5499537", "0.54974926", "0.54964507", "0.5493493", "0.54888314", "0.54888314", "0.54833126", "0.5481053", "0.5477925", "0.5477658", "0.5471453", "0.5470752", "0.54696417", "0.54571617", "0.545021", "0.5448276", "0.5446649", "0.54414326" ]
0.0
-1
Initializes a new MuJoCo environment.
def __init__(self, model_path: str, frame_skip: int, camera_settings: Optional[Dict] = None, ): self._seed() if not os.path.isfile(model_path): raise IOError( '[MujocoEnv]: Model path does not exist: {}'.format(model_path)) self.frame_skip = frame_skip self.sim_robot = MujocoSimRobot( model_path, camera_settings=camera_settings) self.sim = self.sim_robot.sim self.model = self.sim_robot.model self.data = self.sim_robot.data self.metadata = { 'render.modes': ['human', 'rgb_array', 'depth_array'], 'video.frames_per_second': int(np.round(1.0 / self.dt)) } self.mujoco_render_frames = False self.init_qpos = self.data.qpos.ravel().copy() self.init_qvel = self.data.qvel.ravel().copy() observation, _reward, done, _info = self.step(np.zeros(self.model.nu)) assert not done bounds = self.model.actuator_ctrlrange.copy() act_upper = bounds[:, 1] act_lower = bounds[:, 0] # Define the action and observation spaces. # HACK: MJRL is still using gym 0.9.x so we can't provide a dtype. try: self.action_space = spaces.Box( act_lower, act_upper, dtype=np.float32) if isinstance(observation, collections.Mapping): self.observation_space = spaces.Dict({ k: spaces.Box(-np.inf, np.inf, shape=v.shape, dtype=np.float32) for k, v in observation.items()}) else: self.obs_dim = np.sum([o.size for o in observation]) if type(observation) is tuple else observation.size self.observation_space = spaces.Box( -np.inf, np.inf, observation.shape, dtype=np.float32) except TypeError: # Fallback case for gym 0.9.x self.action_space = spaces.Box(act_lower, act_upper) assert not isinstance(observation, collections.Mapping), 'gym 0.9.x does not support dictionary observation.' self.obs_dim = np.sum([o.size for o in observation]) if type(observation) is tuple else observation.size self.observation_space = spaces.Box( -np.inf, np.inf, observation.shape)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, env):\n gym.Wrapper.__init__(self, env)", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)", "def __init__(self): \n\t\n\t # get the environment\n\t\tself.env = env()", "def initialize():\n environment = Environment()\n environment.setup()", "def __init__( self ):\n self._env = None\n self._steps = None\n\n self._initialize( )", "def InitEnvironment(self):\r\n\t\t\r\n\t\t# Turn antialiasing on\r\n\t\trender.setAntialias(AntialiasAttrib.MMultisample,1)\r\n\t\t\r\n\t\t# load the falcon model\r\n\t\tfalcon = loader.loadModel(\"Content/falcon/falcon.bam\")\r\n\t\tfalcon.setScale(30)\r\n\t\tfalcon.setPos(0, 0, 28.5)\r\n\t\tfalcon.reparentTo(render)", "def init():\n env = Environment(5, 5, 20, [10, 20, 10, 5])\n return env", "def init_environment(self):\n # import outside of cell so we don't get a traceback\n from sage import all_cmdline\n from sage.repl.user_globals import initialize_globals\n initialize_globals(all_cmdline, self.shell.user_ns)\n self.run_init()", "def init_env(self, env_name):\n self.env = gym.make(env_name)\n self.env.render()\n\n self.state_space = self.env.observation_space.n\n self.action_space = self.env.action_space.n\n\n self.q_table = np.zeros((self.state_space, self.action_space))", "def initialize_trainer(self):\n self.initialize_matrices()\n self.initialize_model()\n self.initialize_optimizers()\n return self", "def initialise(self, args, environ):", "def __init__(self, env):\r\n gym.Wrapper.__init__(self, env)\r\n self.lives = 0\r\n self.was_real_done = True", "def initialize(self, context):\n self.initialized = True\n properties = context.system_properties\n # Contains the url parameter passed to the load request\n model_dir = properties.get(\"model_dir\") \n gpu_id = properties.get(\"gpu_id\")\n\n # Load Gluonts Model\n self.mx_model = self.load_model(model_dir)", "def __init__(self, env, system=None):\n self._env = env\n self._system = system if system is not None else {}", "def __init__(self, comm):\n _hypre.HypreAME_swiginit(self, _hypre.new_HypreAME(comm))", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n self.lives = 0\n self.was_real_done = True", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.foundation", "def __init__(self, logging=True):\n self.matrix_creator = MatrixCreator()\n self.matrix_computer = MatrixComputer()\n self.equation_parser = EquationParser()\n self.balancing_validator = BalancingValidator(logging=logging)\n self.logger = Logger(active=logging)", "def __init__(self, *argv, **kwargs):\n self.refs = {}\n self.ref0s = {}\n self.defect_refs = {}\n\n self.initialize(*argv, **kwargs)", "def initialisation(self):\n self.create_variables()\n self.create_placeholders()\n self.build_model()\n self.reset_lr(None, True)\n self.build_loss()\n self.initialised = True", "def __init__(self, env, action_repeat=1):\n super().__init__(env)\n if self.env.mujoco_robot.name == \"sawyer\":\n from robosuite.controllers import SawyerIKController\n\n self.controller = SawyerIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n elif self.env.mujoco_robot.name == \"baxter\":\n from robosuite.controllers import BaxterIKController\n\n self.controller = BaxterIKController(\n bullet_data_path=os.path.join(robosuite.models.assets_root, \"bullet_data\"),\n robot_jpos_getter=self._robot_jpos_getter,\n )\n else:\n raise Exception(\n \"Only Sawyer and Baxter robot environments are supported for IK \"\n \"control currently.\"\n )\n\n self.action_repeat = action_repeat", "def setUpEnv(self):\n \n robot = Robot('atrv')\n\n pose = Sensor('pose')\n robot.append(pose)\n pose.configure_mw('yarp')\n\n motion = Actuator('v_omega')\n robot.append(motion)\n motion.configure_mw('yarp')\n \n env = Environment('indoors-1/indoor-1')\n env.configure_service('socket')", "def __init__(self, env):\r\n gym.Wrapper.__init__(self, env)\r\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\r\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def __init__(self):\n # Variables that we give through the constructor.\n # None in this case\n\n # Internal Vars\n # TODO[done] add controler Hint: $ rosservice call /jetbot_0/controller_manager/list_controllers\n self.controllers_list = ['jetbot_joint_state_controller',\n 'jetbot_velocity_controller'\n ]\n # TODO[done] add namespace Hint: $ rostopic list | grep controller\n self.robot_name_space = \"jetbot_0\"\n\n # We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv\n super(JetbotRobotEnv, self).__init__(controllers_list=self.controllers_list,\n robot_name_space=self.robot_name_space,\n reset_controls=True)\n\n\n\n \"\"\"\n To check any topic we need to have the simulations running, we need to do two things:\n 1) Unpause the simulation: without that th stream of data doesnt flow. This is for simulations\n that are pause for whatever the reason\n 2) If the simulation was running already for some reason, we need to reset the controlers.\n This has to do with the fact that some plugins with tf, dont understand the reset of the simulation\n and need to be reseted to work properly.\n \"\"\"\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self._check_all_sensors_ready()\n\n # We Start all the ROS related Subscribers and publishers\n # TODO[done] add subscriber publisher\n rospy.Subscriber(\"/jetbot_0/joint_states\", JointState, self._joints_callback)\n rospy.Subscriber(\"/jetbot_0/jetbot_velocity_controller/odom\", Odometry, self._odom_callback)\n\n self._vel_pub = rospy.Publisher('/jetbot_0/jetbot_velocity_controller/cmd_vel',\n Twist, queue_size=6) # ??? queue size\n\n self._check_publishers_connection()\n \n self.gazebo.pauseSim()", "def init():", "def init(self):\n # Initialize runtime and MDK:\n self.runtime = fakeRuntime()\n self.runtime.getEnvVarsService().set(\"DATAWIRE_TOKEN\", \"somevalue\")\n self.runtime.dependencies.registerService(\"failurepolicy_factory\",\n RecordingFailurePolicyFactory())\n self.mdk = MDKImpl(self.runtime)\n self.mdk.start()\n self.disco = self.mdk._disco\n # Create a session:\n self.session = self.mdk.session()", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n assert env.unwrapped.get_action_meanings()[1] == 'FIRE'\n assert len(env.unwrapped.get_action_meanings()) >= 3", "def __init__(self):\n self.conf = None\n self.section = None\n self._engine = None\n self._session = None\n self.base_model = declarative_base()", "def __init__(self, dataset, model, quality_method, n_horizon = 10):\n LalEnv.__init__(self, dataset, model, quality_method)\n self.n_horizon = n_horizon", "def init():\n rino.initialize.initialize()", "def initialize(self):\n pass # pragma: no cover", "def init(self) -> None:\n ...", "def __init__(self,\n env=None,\n model=None,\n num_steps=None,\n gamma=0.99):\n self.env = env\n self.model = model\n self.num_steps = num_steps\n self.gamma = gamma\n self.obs, self.infos = self.env.reset()\n self.dones = np.zeros(self.env.num_envs).astype(np.float32)", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def initialize(self):\n pass", "def __init__(self):\n self.ju = ju.JSONUtil()\n self.apiC = api.API()", "def initialize(self):\n\t\tpass", "def initialize(self) -> None:\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self):\n self._initialized = False\n self.init()", "def __init__(self, xml_name, recompile_cpp=False, rendering=True):\n if recompile_cpp:\n self._update_wrapper()\n\n if sys.platform.startswith('darwin'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dylib\")\n elif sys.platform.startswith('linux'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.so\")\n elif sys.platform.startswith('win32'):\n cdll_path = os.path.join(os.path.dirname(__file__), \"libsimenv.dll\")\n else:\n raise EnvironmentError(\"Unknown operating system found.\")\n\n model_path = os.path.join(pathlib.Path(__file__).parent, \"mujoco_model/\", xml_name).encode('utf-8')\n self.rendering = rendering\n\n # C++ control engine.\n self.wrapper = ctypes.CDLL(cdll_path)\n self.instance = self.wrapper.get_instance(ctypes.c_char_p(model_path), ctypes.c_bool(rendering))\n\n # Indices of the object bodies.\n self.obstacle_body_index = self.get_body_index(\"obstacle\")\n self.agent_body_index = self.get_body_index(\"agent\")\n\n # Indices of the joints.\n self.obstacle_jnt_index = self.get_jnt_index(\"slider:obstacle\")\n self.agent_jnt_x_index = self.get_jnt_index(\"slider:agent-obstacle_x\")\n self.agent_jnt_y_index = self.get_jnt_index(\"slider:agent-y\")\n\n # Initial positions from the configuration.\n self.obstacle_pos = self.get_body_ini_pos(self.obstacle_body_index)\n self.agent_pos = self.get_body_ini_pos(self.agent_body_index)", "def initialize(self):\n self._setup_simulation_from_parameters()\n if \"orrb\" in self.constants.observation_providers:\n self._reset()\n self._goal = self._next_goal()\n self.update_goal_info()\n\n self.observer = self._build_observer()", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def init(self):\n pass", "def __init__(self):\n \n rospy.logdebug(\"Start CATVehicle_ENV INIT...\")\n \n self.controllers_list = []\n self.publishers_array = []\n self.robot_name_space = \"\"\n self.reset_controls = False\n\n \n \n # We launch the init function of the Parent Class robot_gazebo_env.RobotGazeboEnv\n super(CATVehicleEnv, self).__init__(controllers_list=self.controllers_list,\n robot_name_space=self.robot_name_space,\n reset_controls=False,\n start_init_physics_parameters=False,\n reset_world_or_sim=\"WORLD\")\n \n self.gazebo.unpauseSim()\n self._check_all_sensors_ready()\n \n self._cmd_vel_pub = rospy.Publisher('/catvehicle/cmd_vel', Twist, queue_size=10)\n \n rospy.Subscriber(\"/catvehicle/distanceEstimatorSteeringBased/dist\", Float64, self._distsb_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimatorSteeringBased/angle\", Float64, self._anglesb_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimator/dist\", Float32, self._dist_callback)\n rospy.Subscriber(\"/catvehicle/distanceEstimator/angle\", Float32, self._angle_callback)\n rospy.Subscriber(\"/catvehicle/odom\", Odometry, self._odom_callback)\n \n self._check_publishers_connection()\n self.gazebo.pauseSim()\n \n rospy.logdebug(\"Finished TurtleBot2Env INIT...\")", "def __init__(self, env):\n super().__init__(env)", "def __init__(self, env):\n super().__init__(env)", "def __init__(self, env):\n super().__init__(env)", "def __init__(self, env):\n self.env = env\n #self.gator = CycleGator()\n self.gator = GroupGator()", "def _createModuleObj(self):\n ModuleInitialCondition.__init__(self)", "def __init__(self):\n cwd = os.path.join(os.path.dirname(__file__), config.vosk_model_dir)\n self.model = Model(cwd)\n logger.info(f'Loaded speech recognition model from {cwd}')", "def __init__(self, env):\n gym.Wrapper.__init__(self, env)\n # 1st, we will have 2 attributes: self.lives and self.was_real_done\n self.lives = 0\n self.was_real_done = True", "def _initialize(self):\n self.send_init_command()", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def init(self):\n self._frame_idx = 0\n if self.initialization is not None:\n del self.initialization\n self.initialization = None\n\n if self.config.initialization is not None:\n self.initialization = INITIALIZATION.load(self.config.initialization, **self.__kwargs)\n self.initialization.init()\n\n if self.preprocessing is not None:\n del self.preprocessing\n self.preprocessing = None\n\n if self.config.preprocessing is not None:\n self.preprocessing = Preprocessing(self.config.preprocessing, **self.__kwargs)\n\n if self.odometry is None:\n assert self.config.odometry is not None\n self.odometry = ODOMETRY.load(self.config.odometry, **self.__kwargs)\n\n assert self.odometry is not None\n self.odometry.init()\n if self.loop_closure is None and self.config.loop_closure is not None:\n self.loop_closure = LOOP_CLOSURE.load(self.config.loop_closure, **self.__kwargs)\n if self.loop_closure is not None:\n self.loop_closure.init()\n if self.config.backend is not None:\n self.backend = BACKEND.load(self.config.backend, **self.__kwargs)\n if self.backend is not None:\n self.backend.init()\n else:\n logging.warning(\"[SLAMAlgorithm]Defined a Loop Closure Algorithm Without a Backend\")", "def initialize(self):\r\n pass", "def initialize(self):\r\n pass", "def init():\n pass", "def make_mujoco_env(env_id, seed):\n rank = MPI.COMM_WORLD.Get_rank()\n set_global_seeds(seed + 10000 * rank)\n env = gym.make(env_id)\n env = Monitor(env, os.path.join(logger.get_dir(), str(rank)))\n env.seed(seed)\n return env", "def init(self) -> None:", "def initialize(self):\n self.population.initialize()\n self.cache.initialize()\n if self.storage:\n self.storage.initialize()", "def __init__(self, **kwargs):\n super(ProjectionMatrix, self).__init__(**kwargs) \n LOG.debug(str(kwargs))\n\n #OpenMEEG attributes\n self.om_head = None\n self.om_sources = None\n self.om_sensors = None\n self.om_head2sensor = None\n\n self.om_inverse_head = None\n self.om_source_matrix = None\n self.om_source2sensor = None #For MEG, not used for EEG", "def initialise():\n _initialiseGlobals()\n for pop in AnadPartOfPerspectiveDb.Iterator():\n _addToKnowledge(pop)\n return", "def __init__(self, env, k):\n\n gym.Wrapper.__init__(self, env)\n self.k = k\n self.frames = deque([], maxlen=self.k)\n shp = env.observation_space.shape\n self.observation_space = spaces.Box(low=0, high=1, shape=(shp[0], shp[1], shp[2] * self.k), dtype=np.float32)", "def __init__(self):\r\n\r\n self.Helpers = Helpers(\"Movidius\")\r\n self.confs = self.Helpers.confs\r\n\r\n self.classes = []\r\n self.ncsGraph = None\r\n self.ncsDevice = None\r\n self.reqsize = None\r\n\r\n self.mean = 128\r\n self.std = 1 / 128\r\n\r\n #mvnc.SetGlobalOption(mvnc.GlobalOption.LOG_LEVEL, 2)\r\n\r\n self.Helpers.logger.info(\"Movidius class initialization complete.\")", "def Init(self):\n RobotMap.Init()\n from commands import *\n from subsystems import *\n#@autogenerated_code(\"constructors\", \" \")\n#parse(\"${exporter-path}core/robot-constructors.py\")\n#end\n # This MUST be here. If the OI creates Commands (which it very likely\n # will), constructing it during the construction of CommandBase (from\n # which commands extend), subsystems are not guaranteed to be\n # yet. Thus, their requires() statements may grab null pointers. Bad\n # news. Don't move it.\n self.oi = OI()\n\n # instantiate the command used for the autonomous period", "def init(self):\n # IMPORTANT: create a new gob database model entry for this object\n self.gobify()", "def __init__(self):\n self._setup()\n # Encryption/decryption cipher handler\n self.__cipher = self.__get_cipher()\n # Setup the engine for the sqlite database\n self._engine = create_engine(self.db_uri)\n # Configure the SQLAlchemy metadata\n self._metadata = MetaData()\n self._metadata.bind = self._engine\n self._load_db()\n # Configure the auto-mapping base model\n self._base = automap_base(metadata=self._metadata)\n self._base.prepare()\n # Setup a session generator for database connections\n self._session = sessionmaker(bind=self._engine)", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.pythia", "def init(self, autoInitialize=True, updateRepositories=True, scriptEnvironment=None):\n\n self.__scriptEnvironment = scriptEnvironment\n self.__updateRepositories = updateRepositories\n\n if autoInitialize and Config.findConfig(\"jasyproject\"):\n\n Console.info(\"Initializing session...\")\n Console.indent()\n\n try:\n self.addProject(Project.getProjectFromPath(\".\", self))\n\n except UserError as err:\n Console.outdent(True)\n Console.error(err)\n raise UserError(\"Critical: Could not initialize session!\")\n\n self.getVirtualProject()\n\n Console.debug(\"Active projects (%s):\", len(self.__projects))\n Console.indent()\n\n for project in self.__projects:\n if project.version:\n Console.debug(\"%s @ %s\", Console.colorize(project.getName(), \"bold\"), Console.colorize(project.version, \"magenta\"))\n else:\n Console.debug(Console.colorize(project.getName(), \"bold\"))\n\n Console.outdent()\n Console.outdent()", "def __init__(self):\n self.emotion_map = {0: \"anger\", 1: \"fear\", 2 : \"joy\", 3: \"sadness\"}\n self.tokenizer = AutoTokenizer.from_pretrained(\"MilaNLProc/feel-it-italian-emotion\")\n self.model = AutoModelForSequenceClassification.from_pretrained(\"MilaNLProc/feel-it-italian-emotion\")\n self.model.eval()\n self.device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')", "def _real_initialize(self):\n pass", "def initialize(self, model):\n pass", "def init_gym(env_name):\n env = gym.make(env_name)\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n return env, obs_dim, act_dim", "def init_gym(env_name):\n env = gym.make(env_name)\n obs_dim = env.observation_space.shape[0]\n act_dim = env.action_space.shape[0]\n\n return env, obs_dim, act_dim", "def initialize(self, cwrap):\n pass", "def __init__(self):\n self.model_description: Dict[str, Any] = get_model_description()\n self.model_name: str = self.model_description['name']\n self.model_version: str = self.model_description['version']\n\n # Make sure we do not have a trailing slash to muck up processing later.\n self.event_dir: Optional[str] = None\n self.zone_name: Optional[str] = None\n self.fault_time: Optional[str] = None\n\n self.example: Example = None\n self.validator: ExampleValidator = ExampleValidator()\n self.common_features_df: pd.DataFrame = None\n\n self.cavity_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'cavity_model.onnx'))\n self.fault_onnx_session: rt.InferenceSession = rt.InferenceSession(os.path.join(os.path.dirname(__file__),\n 'model_files',\n 'fault_model.onnx'))", "def do_init(self):\n\n pass", "def init_vars(self):\n\n load_dotenv()\n self.smart_cube = True if os.environ.get(\"SMART_CUBE\") == \"True\" else False\n self.gen_parsed_to_cubedb = True if os.environ.get(\"GEN_PARSED_TO_CUBEDB\") == \"True\" else False\n self.name_of_solve = os.environ.get(\"NAME_OF_SOLVE\")\n self.time_solve = os.environ.get(\"TIME_SOLVE\")\n self.comms_unparsed_bool = True if os.environ.get(\"COMMS_UNPARSED\") == \"True\" else False\n self.gen_with_move_count = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.diff_to_solved_state = float(os.environ.get(\"DIFF_BETWEEN_ALGS\"))\n self.parse_to_lp = True if os.environ.get(\"PARSE_TO_LETTER_PAIR\") == \"True\" else False\n self.gen_with_moves = True if os.environ.get(\"GEN_WITH_MOVE_COUNT\") == \"True\" else False\n self.buffer_ed = self.get_buffer_ed(os.environ.get(\"EDGES_BUFFER\"))\n self.buffer_cor = self.get_buffer_cor(os.environ.get(\"CORNER_BUFFER\"))\n self.path_to_lp = os.environ.get(\"PATH_LETTER_PAIR_FILE\")\n self.dict_lp = self.load_letter_pairs_dict()", "def _env_setup(self, initial_qpos):\n raise NotImplementedError()", "def __init__(self, model_info, alg_config, **kwargs):\n import_config(globals(), alg_config)\n super().__init__(\n alg_name=kwargs.get(\"name\") or \"muzero\",\n model_info=model_info[\"actor\"],\n alg_config=alg_config,\n )\n # self.buff = ReplayBuffer(BUFFER_SIZE)\n self.buff = PrioritizedReplayBuffer(BUFFER_SIZE, alpha=1)\n self.discount = GAMMA\n self.unroll_step = UNROLL_STEP\n self.td_step = TD_STEP\n self.async_flag = False", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet", "def __init__(self):\n self._ll = LowLevelLibs()\n self._lib = self._ll.ratchet", "def __init__(self, **kwargs):\n # Variables that we give through the constructor.\n # namespace\n self.n = kwargs['n']\n self.robots = [Robot(i, kwargs['displacement_xyz']) for i in range(self.n)]\n self.controllers_list = [\n 'joint_state_controller',\n 'joint1_B_controller',\n 'joint1_F_controller',\n 'joint1_L_controller',\n 'joint1_R_controller',\n 'joint2_B_controller',\n 'joint2_F_controller',\n 'joint2_L_controller',\n 'joint2_R_controller',\n 'joint3_B_controller',\n 'joint3_F_controller',\n 'joint3_L_controller',\n 'joint3_R_controller',\n 'joint4_B_controller',\n 'joint4_F_controller',\n 'joint4_L_controller',\n 'joint4_R_controller'\n ]\n for r in self.robots:\n for n in self.controllers_list[1:]:\n r.publisher_list.append(\n rospy.Publisher(r.ns + '/' + n + '/command', Float64, queue_size=1))\n\n self.all_controllers_list = []\n for r in self.robots:\n for c in self.controllers_list:\n self.all_controllers_list.append(r.ns + '/' + c)\n reset_controls_bool = True\n super(CrawlerRobotEnv, self).__init__( n=self.n, robot_name_spaces=['crawler_'+str(i) for i in range(self.n)],\n controllers_list=self.controllers_list,\n reset_controls=reset_controls_bool)\n rospy.logdebug(\"END init CrawlerRobotEnv\")" ]
[ "0.65531933", "0.65531933", "0.6540199", "0.6425967", "0.6125439", "0.60250366", "0.5981867", "0.59326357", "0.58881015", "0.5858973", "0.5796893", "0.5779154", "0.5775181", "0.57644004", "0.5762341", "0.5751086", "0.5751086", "0.5721603", "0.5721603", "0.567534", "0.56694514", "0.56384397", "0.56218386", "0.560622", "0.55893415", "0.55849636", "0.5580358", "0.5579635", "0.5572662", "0.55659443", "0.55659443", "0.55576867", "0.55545104", "0.5543828", "0.5529505", "0.55265504", "0.55259746", "0.5522179", "0.5522179", "0.5522179", "0.5522179", "0.5522179", "0.5517572", "0.5509991", "0.5508322", "0.55076206", "0.55076206", "0.55076206", "0.5502226", "0.5502226", "0.5502169", "0.55014235", "0.54939497", "0.54939497", "0.54939497", "0.54939497", "0.54939497", "0.54939497", "0.54939497", "0.54939497", "0.5480197", "0.5477058", "0.5477058", "0.5477058", "0.5474017", "0.5472136", "0.5465172", "0.5454309", "0.5449488", "0.54477555", "0.54434335", "0.5438086", "0.5438086", "0.5427522", "0.54266137", "0.5423323", "0.5421923", "0.54213774", "0.5416905", "0.5415806", "0.5414639", "0.54004496", "0.54001427", "0.53999716", "0.5399416", "0.53891426", "0.53812397", "0.5380106", "0.53783405", "0.5375811", "0.5375811", "0.53747207", "0.53726256", "0.5372305", "0.5371499", "0.5363632", "0.5352467", "0.5349705", "0.5349705", "0.5342539" ]
0.5511559
43
Reset the robot degrees of freedom (qpos and qvel). Implement this in each subclass.
def reset_model(self): raise NotImplementedError
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def reset(self):\n self._position = TwoDV(0.0, 0.0)\n self._orient = TNavigator.START_ORIENTATION[self._mode]", "def specific_reset(self) -> None:\n self.agent.specific_reset() # reset joints\n new_pos = self.agent.init_xyz\n new_pos[:2] = np.random.uniform(-0.01, 0.01, 2)\n self.agent.set_position(new_pos)\n self.old_potential = self.calculate_task_potential()", "def reset(self):\n self.position = self.initial_position\n self.velocity = [0, 0, 0]", "def reset(self):\n self.velocity_controller.reset()\n self.yaw_filter.reset()", "def _reset_(self):\n print(\"Resetting\")\n\n self._q_target_, x_target = self._pick_random_angles_()\n np.copyto(self._x_target_, x_target)\n if self._target_type == 'position':\n self._target_ = self._x_target_[self._end_effector_indices]\n elif self._target_type == 'angle':\n self._target_ = self._q_target_\n self._action_ = self._rand_obj_.uniform(self._action_low, self._action_high)\n self._cmd_prev_ = np.zeros(len(self._action_low)) # to be used with derivative control of velocity\n if self._reset_type != 'none':\n if self._reset_type == 'random':\n reset_angles, _ = self._pick_random_angles_()\n elif self._reset_type == 'zero':\n reset_angles = self._q_ref[self._joint_indices]\n self._reset_arm(reset_angles)\n\n rand_state_array_type, rand_state_array_size, rand_state_array = utils.get_random_state_array(\n self._rand_obj_.get_state()\n )\n np.copyto(self._shared_rstate_array_, np.frombuffer(rand_state_array, dtype=rand_state_array_type))\n\n print(\"Reset done\")", "def reset(\n self,\n base_position: Optional[Tuple[float]] = None,\n base_orientation_quaternion: Optional[Tuple[float]] = None,\n joint_angles: Optional[Union[Dict[Text, float], Tuple[float]]] = None,\n ):\n self._robot_state = None\n self._last_action = None\n\n # self._get_state() will receive a new state proto from Pupper. We also\n # call the self.receive_observation() to update the internal varialbes.\n self._get_state()\n # self.receive_observation()\n\n\n joint_angles = [0, 0.6, -1.2] * 4\n super().reset(base_position, base_orientation_quaternion, joint_angles)\n\n # Receive another state at the end of the reset sequence. Though it is\n # probably not necessary.\n self._get_state()\n self._step_counter = 0\n self._reset_time = self._clock()", "def reset(self):\n\n self.speed = self.getRandomVelocity()\n self.setX(Configuration.windowWidth / 2)\n self.setY(Configuration.windowHeight / 2)", "def reset(self):\n self.position = np.zeros(self.ndegres)\n self.velocity = np.zeros(self.ndegres)\n self.state = np.zeros(2*self.ndegres)\n self.flag = 0\n self.h_ref = np.array([self.ref for _ in range(self.horizon)])\n self.action = np.zeros(self.ACTION_DIM) \n self.h_action = np.zeros(self.ACTION_DIM*self.horizon)", "def reset(self):\n self.vrp = np.matrix([0.5, 0.5, 1])\n self.vpn = np.matrix([0, 0, -1])\n self.vup = np.matrix([0, 1, 0])\n self.u = np.matrix([-1, 0, 0])\n self.extent = [1., 1., 1.]\n self.screen = [400., 400.]\n self.offset = [20., 20.]", "def reset(self):\n p.resetSimulation()\n p.setPhysicsEngineParameter(numSolverIterations=150)\n p.setTimeStep(self._time_step)\n p.setGravity(0, 0, -9.8)\n\n # load plane\n p.loadURDF(os.path.join(pybullet_data.getDataPath(), \"plane.urdf\"), [0, 0, 0])\n # load robot\n self._darwin = DarwinopEnv()\n\n # Let the world run for a bit\n for _ in range(20):\n p.stepSimulation()", "def ResetPos(self):\n for idx in range(self.unFixJL):\n self._p.resetJointState(self.uid, idx,\n self.InitInfo[\"JPos\"][idx],\n self.InitInfo[\"JVel\"][idx])", "def reset(self):\n self.x = int(constants.SCREEN_WIDTH/2)\n self.y = int(constants.SCREEN_HEIGHT/2)\n self.DX = self.getRandSpeed()\n self.DY = self.getRandSpeed()", "def reset(self):\r\n self.x = self.initX\r\n self.y = self.initY\r\n self.dir= self.initDir", "def _reset(self): # We are using a virtual function defined in the gym infrastructure.\n self.gazebo.unpauseSim()\n \"\"\"\n why we need to unpauseSim because resetting controllers and for checking the sensors, we need the simulation\n to be running because otherwise we don't have any sensory data and we don't have access to the controller reset\n functions services they won't work and tell you to hit play. => it is very important.\n \"\"\"\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.set_init_pose()\n #initialized robot\n self.gazebo.pauseSim()\n self.gazebo.resetSim()\n self.gazebo.unpauseSim()\n self.controllers_object.reset_controllers()\n self.check_all_sensors_ready()\n self.gazebo.pauseSim()\n self.init_env_variables()\n obs = self._get_obs()\n simplified_obs = self.convert_obs_to_state(obs)\n\n return simplified_obs", "def reset_state(self):\n self.y = np.copy(self.start)\n self.dy = np.zeros(self.n_dmps)\n self.ddy = self.ay * (self.by * (self.goal - self.y) - self.dy) + self.force[0]\n self.timestep = 0", "def reset(self,):\n \n self.i = 0\n self.pi = 1.0\n self.si = 0.0\n self.pi_min = float(\"inf\")\n self.si_min = float(\"inf\")", "def reset(self):\n log.debug(\"RESET\")\n self.ref_pos_x = -1\n self.ref_pos_y = -1\n self.ref_pos_z = -1\n self.pos_x = -1\n self.pos_y = -1\n self.pos_z = -1\n self.yaw = 0\n self.throw_ongoing = False", "def reset(self):\r\n # reset Wheel encoders\r\n self.start_time = time.time()\r\n [left_start, right_start] = self.Roomba.Query(43, 44)\r\n self.Motion.reset(left_start, right_start)\r\n # reset bumper\r\n self.bumper.reset()\r\n\r\n #reset grid world data\r\n self.action=[0.0,0.0]\r\n self.grid_state= [0,0,0]\r\n self.real_state = [0.0, 0.0, 0.0]\r\n self.trans_model = None\r\n pass", "def reset(self):\r\n # TODO: have reset flag such that it forces all the bottom changes\r\n self.pwm_freq = self._default[\"pwm_freq\"]\r\n self.gate_logic = self._default[\"gate_logic\"]\r\n self.max_pwm = self._default[\"max_pwm\"]\r\n self.lase_on_power_up = self._default[\"lase_on_power_up\"]\r\n\r\n self.mode = self._default[\"mode\"]\r\n self.lase = self._default[\"lase\"]\r\n self.percent = self._default[\"percent\"] # in percent\r", "def reset_world(self):\n print(\"Resetting world\")\n\n if self.real:\n angles = self.real_arm.convertToList(self.real_arm.joint_angles())\n else:\n angles = self.orig_joint_angles\n self.plan()\n self.robot.arm.SetJointValues(angles)\n self.execute()\n self.execution_robot.arm.SetJointValues(angles)\n for bx, b in enumerate(self.pddl_blocks):\n b.set_base_link_pose(self.orig_block_poses[bx])\n print(\"Done\")", "def reset(self):\n self.m = normalize(self.m0)\n self.t = 0.0", "def reset(self):\n self.rst.value(0) # RST on\n self.sleep_us(100) # reset impulse has to be >100 ns and <100 ms\n self.rst.value(1) # RST off\n # Defaults after reset:\n self.power = self.POWER_DOWN\n self.addressing = self.ADDRESSING_HORIZ\n self.instr = self.INSTR_BASIC\n self.display_mode = self.DISPLAY_BLANK\n self.temp_coeff = self.TEMP_COEFF_0\n self.bias = self.BIAS_1_11\n self.voltage = 3060", "def reset_rotor(self):\n self._rng_offset = 0\n self._rot_offset = 0", "def reset(self):\n self.resetPos()\n self.vx, self.vy = 0, 0\n self.accel, self.dangle = 0, 0\n self.crashed = False\n self.timeDriving, self.score, self.checkpoint, self.laps = 0, 0, 0, 0\n self.targetCheckpointPos = self.maze.checkpoints[0].getMidInt()\n self.inputColour = [sensor_colours[0] for i in range(self.dimensions[0])]\n self.scan = np.array([0 for i in range(self.dimensions[0])])\n self.cost = [0 for i in range(6)]\n #Extrapos for CTS LOS\n self.extrapos = []", "def setAllZero(self):\n self.robot.set_joint([0,0,0,0,0])\n self.robot.save_config()", "def reset(self):\n error_estop = \"\"\"\\\nE-Stop is ASSERTED. Disengage E-Stop and then reset the robot.\n\"\"\"\n error_nonfatal = \"\"\"Non-fatal Robot Error on reset.\nRobot reset cleared stopped state and robot can be enabled, but a non-fatal\nerror persists. Check diagnostics or rethink.log for more info.\n\"\"\"\n error_env = \"\"\"Failed to reset robot.\nPlease verify that the ROS_IP or ROS_HOSTNAME environment variables are set\nand resolvable. For more information please visit:\nhttp://sdk.rethinkrobotics.com/wiki/RSDK_Shell#Initialize\n\"\"\"\n is_reset = lambda: (self._state.enabled == False and\n self._state.stopped == False and\n self._state.error == False and\n self._state.estop_button == 0 and\n self._state.estop_source == 0)\n pub = rospy.Publisher('robot/set_super_reset', Empty, queue_size=10)\n\n if (self._state.stopped and\n self._state.estop_button == AssemblyState.ESTOP_BUTTON_PRESSED):\n rospy.logfatal(error_estop)\n raise IOError(errno.EREMOTEIO, \"Failed to Reset: E-Stop Engaged\")\n\n rospy.loginfo(\"Resetting robot...\")\n try:\n baxter_dataflow.wait_for(\n test=is_reset,\n timeout=3.0,\n timeout_msg=error_env,\n body=pub.publish\n )\n except OSError as e:\n if e.errno == errno.ETIMEDOUT:\n if self._state.error == True and self._state.stopped == False:\n rospy.logwarn(error_nonfatal)\n return False\n raise", "def reset(self):\n self.world.reset()\n self.ref_state = self.ref_root_state\n # self.continue_from_now_by_phase(random() if self.rsi else 0.)\n self.skel.set_positions(self.ref_state.angles)\n # self.skel.set_positions(self.ref_motion.get_q(self.phase_frame))\n # dq = self.ref_motion.get_dq_dart(self.phase_frame)\n # self.skel.set_velocities(dq)\n self.skel.set_velocities(np.zeros(self.skel.ndofs))\n\n return self.state()", "def reset(self):\n self.integral = 0.0\n self.previous_error = 0.0", "def reset(self):\n self.x = random.uniform(-0.05, 0.05)\n self.x_dot = random.uniform(-0.05, 0.05)\n self.theta = random.uniform(-0.05, 0.05)\n self.theta_dot = random.uniform(-0.05, 0.05)", "def reset_state(self):\n self.y = self.y0.copy()\n self.dy = jnp.zeros(self.n_dmps)\n self.ddy = jnp.zeros(self.n_dmps)\n self.cs.reset_state()", "def reset(self):\n urdf=os.path.join(os.environ[\"YUMI_PUSH_MODELS\"],\"robot_hand.urdf\")\n self._model = self._world.add_model(\n model_path=urdf,\n position=[-10.0, -10.0, 0.0],\n orientation=[0.0, 0.0, 0.0, 1.0],\n is_robot=True)\n self._model.set_dynamics(mass=self._config.get(\"act_mass\", 10.0),\n lateralFriction=0,spinningFriction=10,rollingFriction=10,\n linearDamping=0,angularDamping=0)", "def reset(self):\n self.t = 0\n # two outputs: the thrusters, u_r and u_l and stop neuron\n self.action = [0.0, 0.0, 0.0]\n # x, vx, y, vy, theta, omega\n # self.state = [2.0, 0.0, 2.0, 0.0, 0.0, 0.0]\n self.state = self.start_cnd\n x, vx, y, vy, theta, omega = self.state\n# print x, self.state\n self.init_distance = self.getDistance()\n \n self.solver = ode(self.dX)\n self.solver.set_integrator('dopri5') \n self.solver.set_initial_value(self.state, self.t)", "def reset(self):\n self.robot_path_ind = 0\n self.goal_path_ind = None\n self.global_plan = Path()", "def reset(self):\n self._turtle.clear()\n self._turtle.setposition((0,0)) \n self._turtle.shape('turtle')\n self.color = 'red'\n self.heading = 180\n self.speed = 0", "def reset(self):\n self.t = 0.0\n self.last_t = None\n self.current_y = np.copy(self.start_y)\n self.current_yd = np.copy(self.start_yd)", "def reset(self, # type: ignore[override]\n *,\n seed: Optional[int] = None,\n options: Optional[Dict[str, Any]] = None,\n ) -> Tuple[DataNested, InfoType]:\n # Reset the seed if requested\n if seed is not None:\n self._initialize_seed(seed)\n\n # Stop the simulator\n self.simulator.stop()\n\n # Remove external forces, if any\n self.simulator.remove_all_forces()\n\n # Make sure the environment is properly setup\n self._setup()\n\n # Make sure the low-level engine has not changed,\n # otherwise some proxies would be corrupted.\n if self.engine is not self.simulator.engine:\n raise RuntimeError(\n \"Changing unexpectedly the memory address of the low-level \"\n \"jiminy engine is an undefined behavior.\")\n\n # Re-initialize some shared memories.\n # It is necessary because the robot may have changed.\n self.sensors_data = OrderedDict(self.robot.sensors_data)\n\n # Enforce the low-level controller.\n # The robot may have changed, for example it could be randomly\n # generated, which would corrupt the old controller. As a result, it is\n # necessary to either instantiate a new low-level controller and to\n # re-initialize the existing one by calling `controller.initialize`\n # method BEFORE calling `reset` method because doing otherwise would\n # cause a segfault.\n mock_controller = jiminy.ControllerFunctor()\n mock_controller.initialize(self.robot)\n self.simulator.set_controller(mock_controller)\n\n # Reset the simulator.\n # Do NOT remove all forces since it has already been done before, and\n # because it would make it impossible to register forces in `_setup`.\n self.simulator.reset(remove_all_forces=False)\n\n # Reset some internal buffers\n self.num_steps = 0\n self._num_steps_beyond_terminate = None\n\n # Create a new log file\n if self.debug:\n fd, self.log_path = tempfile.mkstemp(suffix=\".data\")\n os.close(fd)\n\n # Extract the observer/controller update period.\n # The controller update period is used by default for the observer if\n # it was not specify by the user in `_setup`.\n engine_options = self.simulator.engine.get_options()\n self.control_dt = float(\n engine_options['stepper']['controllerUpdatePeriod'])\n if self.observe_dt < 0.0:\n self.observe_dt = self.control_dt\n\n # Run the reset hook if any.\n # Note that the reset hook must be called after `_setup` because it\n # expects that the robot is not going to change anymore at this point.\n # Similarly, the observer and controller update periods must be set.\n reset_hook: Optional[Callable[[], JiminyEnvInterface]] = (\n options or {}).get(\"reset_hook\")\n env: JiminyEnvInterface = self\n if reset_hook is not None:\n assert callable(reset_hook)\n env_derived = reset_hook() or self\n assert env_derived.unwrapped is self\n env = env_derived\n self._env_derived = env\n\n # Instantiate the actual controller\n controller = jiminy.ControllerFunctor(env._controller_handle)\n controller.initialize(self.robot)\n self.simulator.set_controller(controller)\n\n # Configure the maximum number of steps\n self.max_steps = int(self.simulation_duration_max // self.step_dt)\n\n # Register user-specified variables to the telemetry\n for header, value in self._registered_variables.values():\n register_variables(controller, header, value)\n\n # Sample the initial state and reset the low-level engine\n qpos, qvel = self._sample_state()\n if not jiminy.is_position_valid(\n self.simulator.pinocchio_model, qpos):\n raise RuntimeError(\n \"The initial state provided by `_sample_state` is \"\n \"inconsistent with the dimension or types of joints of the \"\n \"model.\")\n\n # Start the engine\n self.simulator.start(\n qpos, qvel, None, self.simulator.use_theoretical_model)\n\n # Initialize shared buffers\n self._initialize_buffers()\n\n # Update shared buffers\n self._refresh_buffers()\n\n # Initialize the observation\n env._observer_handle(\n self.stepper_state.t,\n self.system_state.q,\n self.system_state.v,\n self.robot.sensors_data)\n\n # Initialize specialized most-derived observation clipping operator\n self._get_clipped_env_observation = build_clip(\n env.observation, env.observation_space)\n\n # Make sure the state is valid, otherwise there `refresh_observation`\n # and `_initialize_observation_space` are probably inconsistent.\n try:\n obs: ObsT = cast(ObsT, self._get_clipped_env_observation())\n except (TypeError, ValueError) as e:\n raise RuntimeError(\n \"The observation computed by `refresh_observation` is \"\n \"inconsistent with the observation space defined by \"\n \"`_initialize_observation_space` at initialization.\") from e\n\n # Make sure there is no 'nan' value in observation\n for value in tree.flatten(obs):\n if np.isnan(value).any():\n raise RuntimeError(\n f\"'nan' value found in observation ({obs}). Something \"\n \"went wrong with `refresh_observation` method.\")\n\n # The simulation cannot be done before doing a single step.\n if any(self.has_terminated()):\n raise RuntimeError(\n \"The simulation has already terminated at `reset`. Check the \"\n \"implementation of `has_terminated` if overloaded.\")\n\n # Reset cumulative reward\n self.total_reward = 0.0\n\n # Note that the viewer must be reset if available, otherwise it would\n # keep using the old robot model for display, which must be avoided.\n if self.simulator.is_viewer_available:\n self.simulator.viewer._setup(self.robot)\n if self.simulator.viewer.has_gui():\n self.simulator.viewer.refresh()\n\n return obs, deepcopy(self._info)", "def reset(self):\n self._proportional = 0\n self._integral = 0\n self._derivative = 0\n\n self._last_time = self._current_time()\n self._last_output = None\n self._last_input = None", "def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()\n self.dr_optimizer.zero_grad()", "def reset (self):\n log.debug(\"RESET\")\n self.ref_pos_x = -1\n self.ref_pos_y = -1\n self.ref_pos_z = -1\n self.pos_x = -1\n self.pos_y = -1\n self.pos_z = -1\n self.yaw = 0\n self.tracking = False\n self.distance_mode = False\n self.keep_distance = None\n self.hand_ctrl = False\n self.timestamp_hand_ctrl = None\n self.head_hand_x_ref = None\n self.head_hand_x_dist = None\n self.palm_landing = False\n self.palm_landing_approach = False\n self.yaw_to_consume = 0\n self.timestamp_keep_distance = time.time()\n self.wait_before_tracking = None\n self.timestamp_take_picture = None\n self.throw_ongoing = False\n self.scheduled_takeoff = None\n\n # When in trackin mode, but no body is detected in current frame,\n # we make the drone rotate in the hope to find some body\n # The rotation is done in the same direction as the last rotation done\n self.body_in_prev_frame = False\n self.timestamp_no_body = time.time()\n self.last_rotation_is_cw = True", "def reset(self):\n Simulation.reset(self)", "def reset(self):\r\n\t\t# Performs Player superclass reset.\r\n\t\tsuper().reset()\r\n\r\n\t\t# Resets the Q value estimate to the default initial_Q for all actions.\r\n\t\tself.player_Q = np.ones_like(self.player_selected_actions, dtype=float) * self.initial_Q", "def reset(self):\r\n\t\t# Performs Player superclass reset.\r\n\t\tsuper().reset()\r\n\r\n\t\t# Resets the Q value estimate to the default initial_Q for all actions.\r\n\t\tself.player_Q = np.ones_like(self.player_selected_actions, dtype=float) * self.initial_Q", "def reset(self):\n self.test = 0\n self.pos = 0", "def reset(self):\n self.test = 0\n self.pos = 0", "def reset(self):\n self.x_pos1 = 0\n self.x_pos2 = self.x_pos1 + self.width\n self.y_pos = self.offset_y\n self.velocity = self.origin_velocity", "def reset_pose():\n rospy.wait_for_service('/drl/set_model_state')\n try:\n reset_pose_proxy = rospy.ServiceProxy(\n '/drl/set_model_state', ResetPosition)\n reset_pose_proxy(True)\n except rospy.ServiceException, ex:\n print \"Service call reset_pose failed: %s\" % ex", "def reset(self):\n self.steps = 0\n self.state = 0\n self.trajectory = []", "def _reset_wheel(self):\n [j.reset_dynamic_object() for j in self.wheels]\n\n p = [[-pi / 4, 0, 0], [pi / 4, 0, pi], [-pi / 4, 0, 0], [pi / 4, 0, pi]]\n\n for i in range(self.num_wheels):\n self.joints_slipping[i].set_position([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.joints_slipping[i].set_orientation(p[i],\n relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_position([0, 0, 0], relative_to=self.joints[i],\n reset_dynamics=False)\n self.wheels[i].set_orientation([0, 0, 0],\n relative_to=self.joints[i],\n reset_dynamics=False)", "def specific_reset(self) -> None:\n self.old_velocity = 0.\n self.agent.specific_reset()\n max_dist_to_origin = 4.\n min_dist_to_origin = 2\n\n agent_pos = np.random.uniform(-max_dist_to_origin, max_dist_to_origin, 2)\n positioning_done = False\n while not positioning_done:\n agent_pos = np.random.uniform(-max_dist_to_origin,\n max_dist_to_origin, 2)\n if min_dist_to_origin <= np.linalg.norm(agent_pos) <= max_dist_to_origin:\n positioning_done = True\n\n # adjust the height of agent\n agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n\n # set agent orientation in forward run direction\n y = angle2pos(self.agent.get_position(), np.zeros(3)) + np.pi / 2\n y += self.agent.init_rpy[2]\n quaternion = self.bc.getQuaternionFromEuler([0, 0, y])\n self.agent.set_orientation(quaternion)", "def reset(self):\n self.params.resetParams()", "def reset(self):\n self.mol.RHF(doPrint=False)\n self.dipole = []\n self.angmom = []\n self.Energy = []\n self.shape = []", "def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))", "def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))", "def reset(self):\n p.resetBasePositionAndOrientation(self.pybullet_id, self.initial_position, (0., 0., 0., 1.))", "def reset(self):\n self.control_counter = 0\n self.last_position_error = np.zeros(3)\n self.integral_position_error = np.zeros(3)\n self.last_attitude_error = np.zeros(3)\n self.integral_attitude_error = np.zeros(3)", "def reset(self):\n self.noise = [0.] * 6\n self.state = [0.0] * 9\n self.q = [0.0, 0.0, 0.0, 1.0]\n self.terminal = False\n self.steps = 0\n # Return current state and error\n return self.observation, self.error", "def reset_movement(self):\n self.direction = [0, 0]", "def reset(self):\r\n self._p = self._p_init\r\n self._r = self._r_init\r\n self._v = self._v_init\r\n self._w = self._w_init\r\n self._a = self._a_init\r\n self._alpha = self._alpha_init", "def reset(self):\n self.acc_loss = 0\n self.norm_term = 0", "def reset(self) -> VecEnvObs:\n raise NotImplementedError()", "def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()", "def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()", "def reset_grad(self):\n self.g_optimizer.zero_grad()\n self.d_optimizer.zero_grad()", "def reset(self):\n self.F = 0\n self.M = 0\n self.w = np.zeros(self.n)\n self.z = np.zeros(self.n)", "def reset(self) -> None:\n self._successful = False\n self._measurement_covariance = None\n self._measurements = None\n self._base_frame_directions = None\n self._temperatures = None\n self._postfit_covariance = None\n self._postfit_residuals = None\n self._jacobian = None\n self._a_priori_state_covariance = None", "def reset(self):\n self.reset_image_estimate()\n self.init_m_aux()\n self.reset_hessian_and_bias()\n self.reset_adadelta_variables()", "def reset_position(self):\n import interface\n\n print(\"Start restet position...\")\n\n sign = lambda x: int(x > 0) - int(x < 0) # Renvoi le signe de x (-1, 0, 1).\n fact_speed = 0.7 # On divise les vitesses.\n\n eps_angle = np.pi*20/180 # Tolerance angulaire. (en radian)\n eps_pos = 50 # Tolerance sur le carre centre autour du point d'arrive (en pxl).\n x0, y0 = 320, 230 # Point a atteindre.(en pxl)\n\n self.position, self.orientation = interface.get_position()\n\n # Calcul de l'angle entre barycentre de la voiture et point de depart.\n def get_alpha():\n \"\"\"\n Recupere l'angle entre l'axe horizontal et le vecteur position de la voiture.\n \"\"\"\n norm = np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2)\n if norm:\n return np.arccos((self.position[0] - x0)/norm) * (1 - 2*(self.position[1] > y0))\n return 0\n\n control_angle = lambda a: (a+np.pi)%(2*np.pi) - np.pi\n\n # alpha : orientation souhaitee de la voiture pour retourner au point de depart (comprise entre -pi et +pi)\n\n # As long as we are not in the direction of the center, the car rotates on itself\n print(\"angle de la voiture en degre:\", self.orientation*180/np.pi)\n print(\"angle qui reste a faire:\", control_angle(np.pi - get_alpha() + self.orientation))\n print(\"\\tOrientation vers la cible....\")\n fact_bis = fact_speed\n while abs(control_angle(np.pi - get_alpha() + self.orientation)) > eps_angle:\n # while True:\n fact_bis *= 1.01\n # interface.move_wheel(\"l\", -0.4)\n # interface.move_wheel(\"r\", 0.4)\n interface.move_wheel(\"l\", -fact_bis*control_angle(np.pi + get_alpha() - self.orientation)/np.pi)\n interface.move_wheel(\"r\", fact_bis*control_angle(np.pi + get_alpha() - self.orientation)/np.pi)\n self.position, self.orientation = interface.get_position()\n print(\"Orientation: \", control_angle(np.pi - get_alpha() + self.orientation),\n \"position actuelle: \", self.position, self.orientation)\n # print(\"fact speed : \", fact_bis)\n # As long as we are not at the center, the car goes straight\n interface.move_wheel(\"\", 0)\n\n input(\"suite\")\n\n print(\"\\tavancer vers la cible\")\n while abs(x0 - self.position[0]) > eps_pos or abs(y0 - self.position[1]) > eps_pos:\n # print(abs(x0 - self.position[0]), abs(y0 - self.position[1]))\n print(\"Avancer vers la cible - distance\", 0.5*(np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2) / norm))\n interface.move_wheel(\"\", (0.5*(np.sqrt((self.position[1] - y0)**2 + (self.position[0] - x0)**2) / norm)))\n self.position, self.orientation = interface.get_position()\n print(\"Avancer vers la cible - position : \", self.position, self.orientation)\n\n # As long as the the car is not facing the chosen direction, it rotates on itself\n interface.move_wheel(\"\", 0)\n print(\"\\torientation finale\")\n while abs(np.pi/2 - self.orientation) > eps_angle:\n print(\"Orientation finale - Angle : \", abs(np.pi/2 - self.orientation))\n interface.move_wheel(\"l\", -fact_speed*(0.5+0.5*(abs(abs(self.orientation)-np.pi/2))/np.pi))\n interface.move_wheel(\"r\", fact_speed*(0.5+0.5*(abs(abs(self.orientation)-np.pi/2))/np.pi))\n self.position, self.orientation = interface.get_position()\n\n interface.move_wheel(\"\", 0)\n print(\"\\tterminated\")", "def reset_parameters(self):\n self.lstm.reset_parameters()", "def reset(self):\n self._robot.reset()\n self._world.reset()\n self.goal = self.sample_goal()\n initial_observation = self.get_observation().observation\n return initial_observation", "def specific_reset(self) -> None:\n\n # set agent and goal positions\n self.agent.specific_reset()\n agent_pos = self.agent.init_xyz\n agent_pos[:2] = self.world.generate_random_xyz_position()[:2]\n goal_pos = agent_pos\n while np.linalg.norm(agent_pos[:2]-goal_pos[:2]) < self.world.body_min_distance:\n goal_pos = self.world.generate_random_xyz_position()\n # adjust the height of agent\n # agent_pos = np.concatenate((agent_pos[:2], [self.agent.init_xyz[2]]))\n self.agent.set_position(agent_pos)\n self.goal.set_position(goal_pos)\n self.old_dist = self.get_xy_distance()\n\n # set agent orientation towards goal\n yaw = angle2pos(self.agent.get_position(), self.goal.get_position())\n yaw = self.agent.init_rpy[2] + yaw\n # apply random orientation to agent.\n yaw += np.random.uniform(-np.pi, np.pi)\n quaternion = self.bc.getQuaternionFromEuler([0, 0, yaw])\n self.agent.set_orientation(quaternion)\n\n # reset obstacle positions\n if len(self.obstacles) > 0:\n obs_init_pos = env_utils.generate_obstacles_init_pos(\n num_obstacles=len(self.obstacles),\n agent_pos=self.agent.get_position(),\n goal_pos=self.goal.get_position(),\n world=self.world,\n min_allowed_distance=self.world.body_min_distance,\n agent_obstacle_distance=self.agent_obstacle_distance\n )\n for i in range(len(self.obstacles)):\n self.obstacles[i].set_position(obs_init_pos[i])", "def reset(self):\n super().reset()\n self.m_n = 1\n self.m_num_errors = 0\n self.m_d = 0\n self.m_lastd = 0\n self.m_mean = 0.0\n self.m_std_temp = 0.0\n self.m_m2s_max = 0.0\n self.estimation = 0.0", "def reset(self):\n newPerm = randomUtils.randomPermutation(self.xArray.tolist(),self)\n self.pot = np.asarray(newPerm)", "def reset(self):\n self.num_frames = 0\n self.nbin = self.nfft // 2 + 1\n\n if self.D==1:\n self.fft_in_buffer[:] = 0.\n self.X[:] = 0.\n self.y_p[:] = 0.\n else:\n self.fft_in_buffer[:,:] = 0.\n self.X[:,:] = 0.\n self.y_p[:,:] = 0.\n\n self.dft = DFT(nfft=self.nfft,D=self.D,\n analysis_window=self.analysis_window,\n synthesis_window=self.synthesis_window,\n transform=self.transform)", "def reset(self):\n raise NotImplementedError('Abstract method \"reset\" must be '\n 'specialised!')", "def reset(self):\n self._previous_v = 0\n self._previous_m = 0\n self._previous_shape = 0", "def clear(self):\r\n self.SetPoint = 0.0\r\n\r\n self.PTerm = 0.0\r\n self.ITerm = 0.0\r\n self.DTerm = 0.0\r\n self.last_error = 0.0\r\n\r\n # Windup Guard\r\n self.int_error = 0.0\r\n self.windup_guard = 20.0\r\n\r\n self.output = 0.0", "def reset(self):\n self.solver = None", "def reset(self):\n\t\t\n\t\t# The measured information, from the shape measurement on the observed image\n\t\tself.mes_x = 0.0\n\t\tself.mes_y = 0.0\n\t\tself.mes_a = 0.0\n\t\tself.mes_b = 0.0\n\t\tself.mes_theta = 0.0 # Sextractor : from -90 to 90 deg\n\t\tself.mes_fwhm = 0.0\n\t\tself.mes_flux = 0.0\n\t\tself.mes_fluxerr = 0.0\n\t\tself.mes_flux_max = 0.0\n\n\t\tself.mes_sky = 0.0\n\t\tself.mes_sig = 0.0", "def reset(self, lens=None, is_3d=True, scale=1.0):\r\n if lens != None:\r\n view = _LookAtMatrix(self.at, self.start_eye, [0, 1, 0])\r\n projection = _ProjectionMatrix(lens[0], lens[1], lens[2] / scale, lens[3])\r\n self.model_view = dot(view, projection)\r\n elif not is_3d:\r\n view = _LookAtMatrix(self.at, self.start_eye, [0, 1, 0])\r\n projection = _OrthographicMatrix(scale=scale)\r\n self.model_view = dot(view, projection)\r\n # TODO some way of resetting to original matrix\r\n self.mtrx = copy(self.model_view)\r\n self.rtn = [0.0, 0.0, 0.0]\r\n self.was_moved = True", "def reset_q_table(self):\n for i in range(len(self.qtable)):\n for j in range(len(self.qtable[0])):\n self.qtable[i][j] = 0\n self.epsilon = 1 # Reset espilon as well.", "def reset_world():\n __switch_ctrl.call(start_controllers=[],\n stop_controllers=[\"hand_position_trajectory_controller\", \"arm_position_trajectory_controller\", \"joint_state_controller\"],\n strictness=SwitchControllerRequest.BEST_EFFORT)\n __pause_physics.call()\n\n joint_names = ['j1', 'j2', 'j3', 'j4', 'j5', 'flange','H1_F1J1', 'H1_F1J2',\n 'H1_F1J3', 'H1_F2J1', 'H1_F2J2', 'H1_F2J3','H1_F3J1', 'H1_F3J2', 'H1_F3J3']\n joint_positions = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0] \n\n __set_model.call(model_name=\"denso\",\n urdf_param_name=\"robot_description\",\n joint_names=joint_names,\n joint_positions=joint_positions)\n\n timer = Timer(0.0, __start_ctrl)\n timer.start()\n\n time.sleep(0.1)\n __unpause_physics.call()\n\n #__reset_world.call()\n spawn_extras()", "def reset(self):\n self.epsilon = self.start", "def reset(self):\n TNavigator.reset(self)\n TPen._reset(self)\n self._clear()\n self._drawturtle()\n self._update()", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def clear(self):\n self.SetPoint = 0.0\n\n self.PTerm = 0.0\n self.ITerm = 0.0\n self.DTerm = 0.0\n self.last_error = 0.0\n\n # Windup Guard\n self.int_error = 0.0\n self.windup_guard = 20.0\n\n self.output = 0.0", "def reset(self):\n self.x_prev = np.zeros_like(self.mu)", "def resetRigPose(self):\n\n # get the network node\n networkNode = self.returnNetworkNode\n\n # remove the rigPose attribute on the networkNode\n cmds.deleteAttr(networkNode, at=\"rigPose\")\n\n # recreate rig pose node with defaults\n self.getReferencePose(\"rigPose\")\n\n # set slider\n self.overallSlider.setValue(0)\n self.overallSlider.setValue(100)", "def reset(self):\r\n self.state = copy.copy(self.mu)", "def reset(self):\n self.num_steps = 0\n self.world_state = self.action = None", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)", "def reset(self):\n self.state = copy.copy(self.mu)" ]
[ "0.6537879", "0.6535229", "0.64604867", "0.64147246", "0.6386366", "0.6382191", "0.6351365", "0.62435204", "0.623713", "0.6203848", "0.6164331", "0.61403173", "0.612164", "0.61177164", "0.6117006", "0.60989344", "0.6085719", "0.6066662", "0.60641676", "0.60370797", "0.60288024", "0.6020595", "0.59831166", "0.59458846", "0.59455335", "0.5945177", "0.5937484", "0.5937377", "0.59263575", "0.59239495", "0.59098524", "0.5885081", "0.5882263", "0.5851015", "0.58425784", "0.5835618", "0.58354604", "0.58335555", "0.58289325", "0.58287144", "0.5825072", "0.5825072", "0.57876974", "0.57876974", "0.5784658", "0.57806224", "0.57794946", "0.5778529", "0.5773628", "0.576928", "0.57643485", "0.57595855", "0.57595855", "0.57595855", "0.5746779", "0.57369316", "0.5733393", "0.5728711", "0.57246226", "0.5713875", "0.5707804", "0.5707804", "0.5707804", "0.570269", "0.57026565", "0.5682239", "0.5678431", "0.56765944", "0.56708467", "0.56660026", "0.5664419", "0.5664353", "0.5662216", "0.5634432", "0.562578", "0.561767", "0.5615963", "0.56002474", "0.55980134", "0.5596296", "0.5595491", "0.5587185", "0.55864155", "0.5574512", "0.5574512", "0.5574512", "0.55596024", "0.55554897", "0.55374527", "0.55315125", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398", "0.5530398" ]
0.0
-1
Backwards compatibility with MJRL.
def mj_render(self): self.render(mode='human')
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __call__(self) -> None:", "def moi(self):\n\n pass", "def __call__(self):\n raise NotImplementedError", "def __call__(self):\n\t\treturn", "def use(self):", "def support(self):", "def lro(self) -> global___Snippet.Lro:", "def regular(self):", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__():", "def __call__(self):\n pass", "def __call__(self):\n pass", "def rest(self):\n\t\tpass", "def express(self):\n raise NotImplementedError", "def __int__(self):\n pass", "def __call__(self):\n raise NotImplementedError()", "def CL(self):", "def DM(self):", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __init__(self):\n raise NotImplementedError", "def __call__(object):", "def result(self):", "def result(self):", "def __init__():", "def __call__( self ):\n pass", "def mechanism(self):", "def LDLT(self):\n\t\tpass", "def _init(self):", "def __init__(self, ):\n super(MayaRefobjInterface, self).__init__()", "def __upgrade(self):", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __init__(__self__):\n pass", "def __call__(self):\r\n raise NotImplementedError('override me')", "def __init__(self):\r\n\t\tpass", "def __call__(self):", "def __call__(self):", "def _regr_basic():", "def __init__(self):\n raise NotImplementedError()", "def init(self):", "def init(self):", "def exo2():", "def scheme(self):", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__(self):\n\t\tpass", "def __init__ (self) :", "def call(self):", "def mezclar_bolsa(self):", "def basic(self):\n pass", "def get(self):\n raise NotImplementedError", "def implement(self):\n\t#@DEBUG remove comments", "def init(self) -> None:", "def target(self):", "def LocalReplaySupport(self): # real signature unknown; restored from __doc__\n pass", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def initialize(self):", "def __init__(object):", "def object(self):", "def logic(self):\r\n raise NotImplementedError", "def function(self):\n raise NotImplementedError", "def degibber(self):", "def result(self):\n raise NotImplementedError('Must be implemented in subclasses.')", "def _create_impl(self):", "def _init(self):\n raise NotImplementedError", "def __init__(self) -> None:", "def __init__(self) -> None:", "def _build_impl(self):", "def _to_be_wrapped(self) -> None:", "def __init__(self):\n raise NotImplementedError(\"This class cannot be instantiated!\")", "def __post_init__(self) -> 'None':", "def parse(self):", "def protocol(self):\n ...", "def prim_method(self):", "def prim_method(self):", "def __init__ (self):\n pass" ]
[ "0.62065375", "0.59999824", "0.59971726", "0.5977466", "0.5968731", "0.59595406", "0.58379644", "0.5770757", "0.5765837", "0.5765837", "0.5765837", "0.5765837", "0.5765837", "0.57506377", "0.57506377", "0.570763", "0.5704474", "0.5680616", "0.5670139", "0.5652243", "0.56391144", "0.5630054", "0.5630054", "0.5630054", "0.5630054", "0.5625588", "0.55791646", "0.55791646", "0.5568128", "0.55565", "0.55249304", "0.5477745", "0.54731804", "0.54703516", "0.54682314", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5455801", "0.5428523", "0.5410492", "0.5399632", "0.5399632", "0.5392773", "0.53923887", "0.53800744", "0.53800744", "0.53757983", "0.5336519", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5331377", "0.5322222", "0.52992314", "0.5295724", "0.5293197", "0.52799255", "0.5261485", "0.52522385", "0.5245271", "0.52446836", "0.5243065", "0.5243065", "0.5243065", "0.5243065", "0.52292615", "0.5221057", "0.52189296", "0.52145106", "0.52128226", "0.52123857", "0.52044517", "0.520321", "0.52020156", "0.52020156", "0.5194851", "0.51894134", "0.51880926", "0.51803577", "0.5179646", "0.5175688", "0.51745915", "0.51745915", "0.517173" ]
0.0
-1
Loads class labels at 'path'
def load_class_ck(path): fp = open(path, "r",encoding="utf-8") names = fp.read().split("\n")[:-1] return names
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def load_labels(self, pathLabel):\n self.pathLabel = pathLabel\n self.labelList = os.listdir(pathLabel)", "def load_label(path: str) -> dict:\n if not os.path.exists(path):\n print(f\"Warning, try to load non-exist label {path}\")\n return None\n return np.load(path, allow_pickle=True).tolist()", "def load_labels(path, kmer=True, rg=True, clip=True, rna=True, go=True):\n\n labels = dict()\n if go: labels[\"X_GO\"] = gzip.open(os.path.join(path,\n \"matrix_GeneOntology.tab.gz\")).readline().split(\"\\t\")\n if kmer: labels[\"X_KMER\"] = gzip.open(os.path.join(path,\n \"matrix_RNAkmers.tab.gz\")).readline().split(\"\\t\")\n if rg: labels[\"X_RG\"] = gzip.open(os.path.join(path,\n \"matrix_RegionType.tab.gz\")).readline().split(\"\\t\")\n if clip: labels[\"X_CLIP\"] = gzip.open(os.path.join(path,\n \"matrix_Cobinding.tab.gz\")).readline().split(\"\\t\")\n if rna: labels[\"X_RNA\"] = gzip.open(os.path.join(path,\n \"matrix_RNAfold.tab.gz\")).readline().split(\"\\t\")\n return labels", "def _load_labels(self, label_path: str) -> List[str]:\n with open(label_path, 'r') as f:\n return [line.strip() for _, line in enumerate(f.readlines())]", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n return labels", "def load_labels(path):\n with open(path, \"r\", encoding=\"utf-8\") as f:\n lines = f.readlines()\n labels = {}\n for row_number, content in enumerate(lines):\n pair = re.split(r\"[:\\s]+\", content.strip(), maxsplit=1)\n if len(pair) == 2 and pair[0].strip().isdigit():\n labels[int(pair[0])] = pair[1].strip()\n else:\n labels[row_number] = pair[0].strip()\n # print(labels)\n return labels", "def load(path):\n pass", "def load(self, path=None):\n if not path:\n path = \"classifiers.txt\"\n self.classifiers = list(filter(None, open(path).read().splitlines()))", "def load(cls, labpath: str) -> None:\n raise NotImplementedError", "def load_from_path(self, paths, label_key='labels'):\n data = []\n labels = []\n for path in paths:\n with tf.io.gfile.GFile(path, 'rb') as f:\n d = {\n k.decode('utf8'): v\n for k, v in cPickle.load(f, encoding='bytes').items()\n }\n data.append(d['data'])\n labels.append(d[label_key])\n data = np.concatenate(data, axis=0)\n data = data.reshape((data.shape[0], 3, 32, 32))\n labels = np.concatenate(labels, axis=0)\n labels = np.reshape(labels, (len(labels), 1))\n\n if tf.keras.backend.image_data_format() == 'channels_last':\n data = data.transpose(0, 2, 3, 1)\n\n return data, labels", "def _label_loader(self, prefix):\n return self._base_loader(prefix, 'labels')", "def load(self, path):\n pass", "def load(self, path):\n pass", "def load_classes(path):\n fp = open(path, \"r\")\n names = fp.read().split(\"\\n\")[:-1]\n # -1까지 하는 이유 마지막에 공백이 있다.\n print(\"Load Class Nums : \",len(names))\n return names", "def load_labels():\n filename = os.path.join(config['inference']['model_dir'], 'output_labels.txt')\n global labels\n labels = [line.rstrip() for line in tf.gfile.FastGFile(filename)]", "def load_labels(path):\n with open(path, 'r', encoding='utf-8') as f:\n lines = f.readlines()\n labels = []\n for row_number, content in enumerate(lines):\n pair = re.split(r'[:\\s]+', content.strip(), maxsplit=1)\n #if len(pair) == 2 and pair[0].strip().isdigit():\n labels.append(np.array([int(pair[0].strip()),pair[1].strip()]))\n #else:\n # labels.append(pair[0].strip())\n return np.array(labels)", "def load(self, path: str):\n pass", "def load_label(path):\r\n anno_path=path\r\n root = ET.parse(anno_path).getroot()\r\n size = root.find('size')\r\n width = float(size.find('width').text)\r\n height = float(size.find('height').text)\r\n label = []\r\n for obj in root.iter('object'):\r\n try:\r\n difficult = int(obj.find('difficult').text)\r\n except ValueError:\r\n difficult = 0\r\n cls_name = obj.find('name').text.strip().lower()\r\n xml_box = obj.find('bndbox')\r\n xmin = (float(xml_box.find('xmin').text) - 1)\r\n ymin = (float(xml_box.find('ymin').text) - 1)\r\n xmax = (float(xml_box.find('xmax').text) - 1)\r\n ymax = (float(xml_box.find('ymax').text) - 1)\r\n print(xmin,ymin,xmax,ymax)", "def load_annotations(path):\n annotations = joblib.load(path);\n return annotations", "def load_classes(path):\n with open(path, 'r') as f:\n names = f.read().split(\"\\n\")\n # Filter removes empty strings (such as last line)\n return list(filter(None, names))", "def loadLabelMap(self):\n print(\">>> load Label Map: {}\".format(self.PATH_TO_LABELS))\n self.label_map = self.label_map_util.load_labelmap(\n self.PATH_TO_LABELS)\n self.categories = self.label_map_util.convert_label_map_to_categories(\n self.label_map,\n max_num_classes=self.NUM_CLASSES,\n use_display_name=True)\n\n self.category_index = self.label_map_util.create_category_index(\n self.categories)\n # create a list filled with 0\n self.categoryNames = [0 for x in range(self.NUM_CLASSES)]\n for cc in self.categories:\n self.categoryNames[int(cc[\"id\"]) - 1] = cc[\"name\"]", "def __init__(self, path, type = 'mrk') :\n stim = np.loadtxt(path, skiprows = 1, usecols = (0,1), dtype = np.dtype(int))\n labels = np.loadtxt(path, skiprows = 1, usecols = 2, dtype = np.dtype(str))\n\n self.dic = dict.fromkeys(labels)\n for key, _ in self.dic.items() : self.dic[key] = []\n for k in range(len(stim)) :\n self.dic[labels[k]].append(stim[k, :])\n return None", "def load_labels(label_path):\r\n\r\n with open(label_path, \"r\") as f:\r\n\r\n lines = f.readlines()\r\n \r\n label = {}\r\n index = []\r\n for i, line in enumerate(lines):\r\n sp = line.split()\r\n label[sp[0]] = [int(sp[1]),int(sp[2]),int(sp[3])]\r\n index.append([int(sp[3]),int(sp[2]),int(sp[1])])\r\n\r\n return label, index", "def load_labels(source_dir, label_pattern):\r\n\r\n logging.info(\"Loading labels from %s with pattern %s\"\r\n % (source_dir, label_pattern))\r\n label_files = glob(path.join(source_dir, label_pattern))\r\n if len(label_files) == 0:\r\n raise ValueError(\"No label files found with pattern %s\"\r\n % label_pattern)\r\n if len(label_files) > 1:\r\n raise ValueError(\"Only one label file supported ATM.\")\r\n labels = np.load(label_files[0]).flatten()\r\n logging.info(\"Label loading complete. Shape is %r\" % (labels.shape,))\r\n return labels", "def load_labels(path, encoding='utf-8'):\r\n with open(path, 'r', encoding=encoding) as f:\r\n lines = f.readlines()\r\n if not lines:\r\n return {}\r\n\r\n if lines[0].split(' ', maxsplit=1)[0].isdigit():\r\n pairs = [line.split(' ', maxsplit=1) for line in lines]\r\n return {int(index): label.strip() for index, label in pairs}\r\n else:\r\n return {index: line.strip() for index, line in enumerate(lines)}", "def load_label(self, pr):\n return", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load_labels(filename):\n return [line.rstrip() for line in tf.gfile.GFile(filename)]", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n \ttf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n \ttf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n\t\tparsed_items = p.findall(line)\n\t\tuid = parsed_items[0]\n\t\thuman_string = parsed_items[2]\n\t\tuid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n \tif line.startswith(' target_class:'):\n \t\ttarget_class = int(line.split(': ')[1])\n \tif line.startswith(' target_class_string:'):\n \t\ttarget_class_string = line.split(': ')[1]\n \t\tnode_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n\t\tif val not in uid_to_human:\n\t\t\ttf.logging.fatal('Failed to locate: %s', val)\n\t\tname = uid_to_human[val]\n\t\tnode_id_to_name[key] = name\n\n return node_id_to_name", "def load(self, path):\n # Open tarfile\n tar = tarfile.open(mode=\"r:gz\", fileobj=open(path, \"rb\"))\n\n # Iterate over every member\n for filename in tar.getnames():\n if filename == \"model.h5\":\n self.k_model = load_model(tar.extractfile(filename))\n if filename == \"model.w2v\":\n self.w2v_model = pickle.loads(tar.extractfile(filename).read())\n if filename == \"tokenizer.pkl\":\n self.tokenizer = pickle.loads(tar.extractfile(filename).read())\n if filename == \"label_encoder.pkl\":\n self.label_encoder = pickle.loads(tar.extractfile(filename).read())\n if filename == \"params.pkl\":\n params = pickle.loads(tar.extractfile(filename).read())\n for k, v in params.items():\n self.__setattr__(k, v)", "def load_labels(labels_dir, trial_name):\n labels_path = labels_dir + trial_name + \".txt\"\n raw_labels_data = np.genfromtxt(labels_path, dtype=np.int,\n converters=LABELS_CONVERTERS,\n usecols=LABELS_USECOLS)\n #print(\"rawlabelsdata: \", raw_labels_data)\n #print(get_first_frame(labels_path))\n frames = np.arange(get_first_frame(labels_path), get_last_frame(labels_path)+1, dtype=np.int)\n #print(\"frames: \", frames)\n #print(frames.shape)\n #labels = np.zeros(frames.shape, dtype=np.int)\n labels1 = []\n #print(labels)\n for start, end, label in raw_labels_data:\n #mask = (frames >= start) & (frames <= end)\n #print(start)\n #print(end)\n i = start\n while(i<end):\n if(i%6 == 0):\n labels1.append(label)\n i = i+1\n\n #labels[mask] = label\n #print(\"labels[mask]: \",labels[mask])\n labels1 = np.array(labels1)\n #print(labels1)\n labels_data = labels1.reshape(-1,1)\n #print(labels1.shape)\n #print(\"labels: \", labels_data)\n \n return labels_data", "def load_metadata(self, path):\n self.paths = []\n self.annotations = []\n\n with open(path, \"r\") as f:\n for line in f:\n line = line.strip().split(\" \")\n \n rgb_path = line[0]\n\n if len(line) > 1:\n bounding_boxes = np.array([list(map(int, box.split(','))) for box in line[1:]])\n else:\n bounding_boxes = []\n \n self.annotations.append({\n \"rgb_path\": rgb_path, \n \"bounding_boxes\": bounding_boxes,\n })", "def load_labels(self, labels):\n self.labels = pd.DataFrame(labels, index=[\"label\"]).T", "def load_imgsLabels(self, image_paths):\n \n# label = image_paths[-1]\n \n images = self.load_images(image_paths)\n \n images = self.resize_images(images)\n \n images_list = self.greyscale_images(images)\n\n return images_list", "def _load_classes(self):\n\t\t# load class names (name -> label)\n\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\tself.classes \t\t\t\t= {}\n\t\tself.coco_labels \t\t\t= {}\n\t\tself.coco_labels_inverse \t= {}\n\t\tfor c in categories:\n\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\tself.classes[c['name']] = len(self.classes)\n\t\tself.labels = {}\n\t\tfor key, value in self.classes.items():\n\t\t\tself.labels[value] = key\n\n\t\tprint(self.coco_labels)\n\t\tprint(self.coco_labels_inverse)\n\t\tprint(self.classes)\n\t\tprint(self.labels)", "def loadgraph(self, path):\n\n raise NotImplementedError", "def load_paths_and_labels(self,classes):\n\t\tim_paths , im_labels = [], [] \n\n\t\tfor image_type in classes:\n\t\t\tmypath = self.data_path + self.dataset + '/' + image_type\n\t\t\tonlyfiles = [ f for f in listdir(mypath) if isfile(join(mypath,f)) ]\n\t\t\tclass_support = 0\n\t\t\tfor file_name in onlyfiles:\n\t\t\t\t#print file_name\n\t\t\t\tif file_name != '.DS_Store':\n\t\t\t\t\tim_path = mypath = self.data_path + self.dataset + '/' + image_type + '/' + file_name\n\t\t\t\t\tim_paths.append(im_path)\n\t\t\t\t\tim_labels.append(image_type)\n\t\t\t\tclass_support += 1\n\t\t\t\tif self.support_per_class != None and class_support == self.support_per_class:\n\t\t\t\t\tbreak\n\n\t\tcombined = zip(im_paths, im_labels)\n\t\trandom.shuffle(combined)\n\t\t\n\t\tim_paths[:], im_labels[:] = zip(*combined)\n\n\t\treturn im_paths,im_labels", "def load(dirpath):\n\n batch = Pickler.load(join(dirpath, 'batch.pkl'))\n\n # load annotator\n if exists(join(dirpath, 'annotation.json')):\n annotator = Annotation.load(dirpath)\n batch.annotator = annotator\n\n return batch", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n image = vision.types.Image(content=content)\n response = client.label_detection(image=image)\n labels = response.label_annotations\n print('Labels:')\n return response", "def generate_labels(path_to_classes: str, path_to_dataset: str):\n\n print('Generating the labels...')\n\n path_to_labels = os.path.join(path_to_dataset, 'labels')\n\n if not os.path.isdir(path_to_labels):\n print('Creating labels folder at {}...'.format(path_to_labels))\n os.makedirs(path_to_labels)\n\n path_to_csv = os.path.join(path_to_labels, 'class_name_to_number.csv')\n path_to_txt = os.path.join(path_to_labels, 'labels.txt')\n\n # Read the list of characters into a dataframe\n classes = pd.read_csv(path_to_classes)\n\n # Write the class-label mapping to csv file\n write_class_label_map(classes, path_to_csv)\n\n # Write the labels to txt file\n write_labels_txt(pd.DataFrame(classes['Unicode']), path_to_txt)", "def load(self, path):\n load_model(path, self)", "def load(self, path):\n checkpoint = torch.load(path, map_location=torch.device(\"cpu\"))\n self.load_state_dict(checkpoint[\"state_dict\"])\n self.on_epoch_start(checkpoint[\"epoch\"])\n self.logger.info(\"Loaded controller network from %s\", path)", "def load_label(path_file):\n if '.csv' not in path_file:\n raise FileNotFoundError('Only CSV format is supported currently')\n\n t0 = time()\n df = pd.DataFrame()\n\n with open(path_file, 'r') as f:\n # TODO: Implement the logic once the format is finalised\n pass\n\n logging.info('Loading label data with {} rows from {} takes {} secs'.format(df.shape[0],\n path_file, time() - t0))\n return df", "def detect_labels(path):\n client = vision.ImageAnnotatorClient()\n\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n #print('Labels:')\n\n #for label in labels:\n # print(label.description)\n return labels", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def load(self, label_lookup_path, uid_lookup_path):\n if not tf.gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not tf.gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = tf.gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = tf.gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.items():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def load_labels_index_map(self, file_path):\n with open(file_path) as handle:\n self._labels_2_index = json.loads(handle.read())\n self._index_2_labels = {i: label.lower() for label, i in self._labels_2_index.items()}\n self._labels_dim = len(self._labels_2_index)", "def load(self, path):\n states = torch.load(path, map_location=lambda cpu, _: cpu)\n return states", "def detect_labels(path):\n from google.cloud import vision\n client = vision.ImageAnnotatorClient()\n\n # [START vision_python_migration_label_detection]\n with io.open(path, 'rb') as image_file:\n content = image_file.read()\n\n image = vision.types.Image(content=content)\n\n response = client.label_detection(image=image)\n labels = response.label_annotations\n ss=labels[0].description \n ss.split('/')[0]\n os.system(\"./ILOVEAPPLE/sort {} {}\".format(ss, path))\n # [END vision_python_migration_label_detection]", "def load(path: str, config_cls):\n\n return cfg.load(path, config_cls)", "def load_labels(db_dir, patient_id, flatten=True, unzipped=False):\n if unzipped:\n flat_labels = np.load(os.path.join(db_dir, '{:05d}_batched_lbls.npz'.format(patient_id)), allow_pickle=True)\n return flat_labels\n else:\n raw_labels = load_pkl(os.path.join(db_dir, '{:05d}_batched_lbls.pkl.gz'.format(patient_id)))\n if flatten:\n flat_labels = flatten_raw_labels(raw_labels)\n return flat_labels\n else:\n return raw_labels", "def load_from_path(path):\n module, attr = path.rsplit('.', 1)\n mod = importlib.import_module(module)\n return getattr(mod, attr)", "def load(self, label_lookup_path, uid_lookup_path):\n if not gfile.Exists(uid_lookup_path):\n tf.logging.fatal('File does not exist %s', uid_lookup_path)\n if not gfile.Exists(label_lookup_path):\n tf.logging.fatal('File does not exist %s', label_lookup_path)\n\n # Loads mapping from string UID to human-readable string\n proto_as_ascii_lines = gfile.GFile(uid_lookup_path).readlines()\n uid_to_human = {}\n p = re.compile(r'[n\\d]*[ \\S,]*')\n for line in proto_as_ascii_lines:\n parsed_items = p.findall(line)\n uid = parsed_items[0]\n human_string = parsed_items[2]\n uid_to_human[uid] = human_string\n\n # Loads mapping from string UID to integer node ID.\n node_id_to_uid = {}\n proto_as_ascii = gfile.GFile(label_lookup_path).readlines()\n for line in proto_as_ascii:\n if line.startswith(' target_class:'):\n target_class = int(line.split(': ')[1])\n if line.startswith(' target_class_string:'):\n target_class_string = line.split(': ')[1]\n node_id_to_uid[target_class] = target_class_string[1:-2]\n\n # Loads the final mapping of integer node ID to human-readable string\n node_id_to_name = {}\n for key, val in node_id_to_uid.iteritems():\n if val not in uid_to_human:\n tf.logging.fatal('Failed to locate: %s', val)\n name = uid_to_human[val]\n node_id_to_name[key] = name\n\n return node_id_to_name", "def load(self, path):\n actor_state_dict = load(path + '_actor.pkl')\n critic_state_dict = load(path + '_critic.pkl')\n self.actor_net.load_state_dict(actor_state_dict)\n self.critic_net.load_state_dict(critic_state_dict)", "def load(self, filename):\n self.classifiers = []\n for i in range(0, self.category_level):\n clf = joblib.load(filename + '.level_%d' % (i + 1))\n self.classifiers.append(clf)", "def load_mnist(path, kind='train'):\n\tlabels_path = os.path.join(path,'%s-labels.idx1-ubyte'%kind)\n\timages_path = os.path.join(path,'%s-images.idx3-ubyte'%kind)\n\t\n\twith open(labels_path, 'rb') as lbpath:\n\t\tmagic, n = struct.unpack('>II', lbpath.read(8))\n\t\tlabels = np.fromfile(lbpath, dtype=np.uint8)\n\t\t\n\twith open(images_path, 'rb') as imgpath:\n\t\tmagic, num, row, cols = struct.unpack('>IIII', imgpath.read(16))\n\t\timages = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels), 784)\n\t\n\treturn images, labels", "def loadLabeled(self):\n\n maxNumChannels = self._maxNumChannels # 4\n\n baseFilePath, ext = os.path.splitext(self.path)\n baseFilePath = baseFilePath.replace('_ch1', '')\n baseFilePath = baseFilePath.replace('_ch2', '')\n\n # load mask\n #labeledPath = dvMaskPath + '_mask.tif'\n #labeledData = tifffile.imread(labeledPath)\n\n maskFromLabelGreaterThan = 0\n\n # load labeled\n for channelIdx in range(maxNumChannels):\n channelNumber = channelIdx + 1 # for _ch1, _ch2, ...\n stackListIdx = maxNumChannels + channelIdx # for index into self._stackList\n\n chStr = '_ch' + str(channelNumber)\n labeledPath = baseFilePath + chStr + '_labeled.tif'\n maskPath = baseFilePath + chStr + '_mask.tif'\n\n # if we find _labeeled.tif, load and make a mask\n # o.w. if we find _mask.tif then load that\n if os.path.isfile(maskPath):\n print(' bStack.loadLabeled() loading _mask.tif channelNumber:', channelNumber, 'maskPath:', maskPath)\n maskData = tifffile.imread(maskPath)\n self._stackList[stackListIdx] = maskData\n elif os.path.isfile(labeledPath):\n print(' bStack.loadLabeled() loading channelNumber:', channelNumber, 'labeledPath:', labeledPath)\n labeledData = tifffile.imread(labeledPath)\n self._stackList[stackListIdx] = labeledData > maskFromLabelGreaterThan\n else:\n # did not find _mask or _labeled file\n pass\n\n # erode _mask by 1 (before skel) as skel was getting mized up with z-collisions\n #self._dvMask = bimpy.util.morphology.binary_erosion(self._dvMask, iterations=2)\n\n # bVascularTracing.loadDeepVess() uses mask to make skel", "def loadFile(self, path):\n print(\"loading \\'{}\\',\".format(path.split('/')[-1]), end = \" \")\n with open(path, \"r\") as file_content:\n list_of_lines = file_content.readlines() # get all lines of level representation\n\n # remove '\\n' if a line has '\\n' at the end\n for i in range(len(list_of_lines)):\n if (list_of_lines[i][-1] == \"\\n\"):\n list_of_lines[i] = list_of_lines[i][:-1] # remove '\\n'\n\n # calculate dimensions of the level tensor\n width = len(list_of_lines[0])\n height = len(list_of_lines)\n depth = len(self.tile_reprs)\n level_tensor = np.zeros((height, width, depth)) # this tensor represent level\n\n # traverse the entire level space to populate the tensor\n for row in range(height):\n for col in range(width):\n char = list_of_lines[row][col]\n # if this character does not exist in json file, it is all zero\n try:\n level_tensor[row, col, self.tile_reprs.index(char)] = 1\n except:\n pass\n\n\n self.loaded_files.append(path.split('/')[-1].split('.')[0])\n self.loaded_data.append(level_tensor) # store this loaded level\n print(\"success.\")", "def _labels(path):\r\n with gzip.open(path) as f:\r\n # First 8 bytes are magic_number, n_labels\r\n integer_labels = np.frombuffer(f.read(), 'B', offset=8)\r\n\r\n def _onehot(integer_labels):\r\n \"\"\"Return matrix whose rows are onehot encodings of integers.\"\"\"\r\n n_rows = len(integer_labels)\r\n n_cols = integer_labels.max() + 1\r\n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\r\n onehot[np.arange(n_rows), integer_labels] = 1\r\n return onehot\r\n\r\n return _onehot(integer_labels)", "def load_classes(self):\n\t\t\t# Load class names (name -> label).\n\t\t\tcategories = self.coco.loadCats(self.coco.getCatIds())\n\t\t\tcategories.sort(key=lambda x: x['id'])\n\n\t\t\tself.classes = {}\n\t\t\tself.coco_labels = {}\n\t\t\tself.coco_labels_inverse = {}\n\t\t\tfor c in categories:\n\t\t\t\tself.coco_labels[len(self.classes)] = c['id']\n\t\t\t\tself.coco_labels_inverse[c['id']] = len(self.classes)\n\t\t\t\tself.classes[c['name']] = len(self.classes)\n\n\t\t\t# Also load the reverse (label -> name).\n\t\t\tself.labels = {}\n\t\t\tfor key, value in self.classes.items():\n\t\t\t\tself.labels[value] = key", "def load_data(path):\n\n\t# Create a list of all files ending in .jpg\n\tim_list = list_images(path, '.jpg')\n\n\t# Create labels\n\tlabels = [int(im_name.split('/')[-1][0]) for im_name in im_list]\n\tfeatures = []\n\n\t# Create features from the images\n\t# TOD.O: iterate over images paths\n\tfor im_path in im_list:\n\t\t# TOD.O: load image as a gray level image\n\t\tim = np.array(Image.open(im_path).convert('L'))\n\t\t# TOD.O: process the image to remove borders and resize\n\t\tim = process_image(im)\n\t\t# TOD.O: append extracted features to the a list\n\t\tfeatures.append(extract_features(im))\n\n\t# TOD.O: return features, and labels\n\treturn features, labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'{}-labels-idx1-ubyte'.format(kind))\n images_path = os.path.join(path,'{}-images-idx3-ubyte'.format(kind))\n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8).reshape(n)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII',\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape((num,1,rows,cols))\n print(kind)\n print(\"label num:\",n)\n print(\"image num:\",num)\n print(\"image rows:\",rows)\n print(\"image cols:\",cols)\n images = images/255\n return images, labels", "def load(cls, path: utils.URLPath):\n config = load_somclassifier_config(path / \"config.json\")\n model = keras.models.load_model(str(path / \"model.h5\"))\n binarizer = io_functions.load_joblib(path / \"binarizer.joblib\")\n\n data_ids = {\n \"validation\": io_functions.load_json(path / \"ids_validate.json\"),\n \"train\": io_functions.load_json(path / \"ids_train.json\"),\n }\n return cls(config, binarizer=binarizer, model=model, data_ids=data_ids, modeldir=path)", "def load_mnist(path, kind = 'train'):\n label_path = os.path.join(path, '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path, '%s-images-idx3-ubyte' % kind)\n\n\n with open(label_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II', lbpath.read(8))\n\n labels = np.fromfile(lbpath, dtype= np.uint8)\n\n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack('>IIII', imgpath.read(16))\n\n images = np.fromfile(imgpath, dtype=np.uint8).reshape(len(labels),784)\n\n\n return images, labels", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte' % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte' % kind)\n \n with open(labels_path, 'rb') as lbpath:\n magic, n = struct.unpack('>II',\n lbpath.read(8))\n labels = np.fromfile(lbpath,\n dtype=np.uint8)\n \n with open(images_path, 'rb') as imgpath:\n magic, num, rows, cols = struct.unpack(\">IIII\",\n imgpath.read(16))\n images = np.fromfile(imgpath,\n dtype=np.uint8).reshape(len(labels), 784)\n \n return images, labels", "def read_label_map(path):\n with tf.io.gfile.GFile(path) as f:\n if path.endswith('.json'):\n return json.load(f)\n else:\n label_map = {}\n empty_line_encountered = False\n for tag in f:\n tag = tag.strip()\n if tag:\n label_map[tag] = len(label_map)\n else:\n if empty_line_encountered:\n raise ValueError(\n 'There should be no empty lines in the middle of the label map '\n 'file.'\n )\n empty_line_encountered = True\n return label_map", "def load(self):\n logger.debug(f\"Reading {self.path.name}\")\n self.label = int(Data.fromLabel(self.path.parent.name))\n self.image = skimg.data.imread(self.path)", "def load_path(path: str) -> List[object]:\n if not os.path.isdir(path):\n raise ValueError(\"{} is not a directory\".format(path))\n\n objs: List[object] = list()\n for file_name in os.listdir(path):\n if os.path.splitext(file_name)[1].lower() in [\".yaml\", \".yml\"]:\n objs = objs + load_file(os.path.join(path, file_name))\n return objs", "def load_mnist(path, kind='train'):\n\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def load_barcodes(self, path):\n self._barcodes = pickle.load(open(path, 'rb'))", "def load(self):\n self.classifier = joblib.load(\"data/models/repeatsfinder/repeatsfinder.joblib\")", "def load(path):\n \n with codecs.open(path, 'r', **rparams) as f:\n print ' > loading... {}'.format(path)\n if '.json' in path:\n obj = json.load(f, object_hook=json_numpy_obj_hook)\n elif '.pkl' in path:\n obj = pickle.load(file=f)\n else:\n # check the file referenced is sensible\n obj_id = [k for k in flocs.keys() if k in path]\n if obj_id is None or len(obj_id) != 1: raise ValueError(\n '{} not found in the path: \\n {}'.format(flocs.keys(), path))\n return obj", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,\n '%s-labels-idx1-ubyte.gz'\n % kind)\n images_path = os.path.join(path,\n '%s-images-idx3-ubyte.gz'\n % kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,\n offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,\n offset=16).reshape(len(labels), 784)\n\n return images, labels", "def loadFromPath(self, path):\n\t\tself.root = TreeNode(' ');\n\t\tnode = TreeNode()\n\t\tf = codecs.open(path, 'r', 'utf-8')\n\t\tfor line in f:\n\t\t\tline = removeAccentsInString(line)\n\t\t\tnode.addString(line.rstrip())\n\t\tf.close()\n\t\tself.root.addChild(node)", "def load_model(self, path):\n pass", "def load_from_disk(self, file_name = \"vehicle_classifier.pkl\"):\n self.classifier.load_from_disk(file_name)", "def get_imagenet_classnames():\r\n return np.loadtxt(open(path_data+'/ilsvrc_2012_labels.txt'), dtype=object, delimiter='\\n')", "def _load_unlabeled(self, path):\n signal, info = wfdb.rdsamp(path)\n self.fs = 250\n self.lead_match = ['anonymous1', 'anonymous2']\n self.raw_data = np.transpose(np.array([signal]), (2, 0, 1))\n self.symbol = []\n self.coords = []\n self.label_name = None\n self._generate_beatlabel_from_estimation()", "def parse_labelfile(path):\n with open(path, \"r\") as FILE:\n lines = FILE.readlines()\n\n\n labels = {x.split(\":\")[0]: x.split(\":\")[1] for x in lines[1:]}\n\n for key in labels:\n labels[key] = np.array(labels[key].split(\",\")).astype(\"uint8\")\n\n return labels", "def load(self):\n self.classifier = joblib.load(\n \"data/models/badlymappedfinder/badlymappedfinder.joblib\"\n )", "def load_labels(label_file):\n\n label = []\n proto_as_ascii_lines = tf.io.gfile.GFile(label_file).readlines()\n for l in proto_as_ascii_lines:\n label.append(l.rstrip())\n return label", "def import_class(path):\n components = path.split(\".\")\n module = components[:-1]\n module = \".\".join(module)\n # __import__ needs a native str() on py2\n mod = __import__(module, fromlist=[str(components[-1])])\n return getattr(mod, str(components[-1]))", "def assign_labels(basename, data_folder=Path(\"/data\"), verbose=False):\n urls_path = data_folder / \"graphs\" / basename / (basename + \".urls\")\n assert urls_path.exists(), \"Urls file not found!\"\n # check if labels dict already existing\n labels_path = data_folder / \"models\" / basename / (\"labels.json\")\n if labels_path.exists():\n print(\"Labels json already existing.\")\n else:\n print(\"Building labels json..\")\n # count number of lines in file\n num_lines = sum(1 for line in urls_path.open())\n labels_array = [0] * num_lines\n with urls_path.open() as f:\n clusters_count = Counter()\n labels = dict()\n class_index = 0\n for pos, line in enumerate(tqdm(f, total=num_lines)):\n # extract the TLD\n complete_domain = tldextract.extract(line).suffix\n # we only need the country domain now\n domain = complete_domain.split(\".\")[-1]\n # if domain unseen add it to class indices\n if domain not in labels:\n class_index += 1\n labels[domain] = class_index\n # assign label and add it to array\n y = labels[domain]\n labels_array[pos] = y\n clusters_count[domain] += 1\n labels_data = dict()\n # labels_data['labels'] = labels # do we really need this?\n labels_data['labels'] = {int(v): k for k, v in labels.items()}\n labels_data['count'] = clusters_count\n labels_data['array'] = labels_array\n if verbose:\n print(\"Found following labels:\")\n print(labels)\n with open(labels_path, 'w', encoding='utf-8') as outfile:\n json.dump(labels_data, outfile, ensure_ascii=False, indent=4)\n return labels_path", "def load_for_sklearn(self):\n\n labels = [] # string labels\n examples = [] # examples as strings\n\n # document number -> label mapping\n doc2label = n2b2.map_patients_to_labels(\n self.xml_dir,\n self.category)\n\n for f in os.listdir(self.cui_dir):\n doc_id = f.split('.')[0]\n file_path = os.path.join(self.cui_dir, f)\n file_as_string = open(file_path).read()\n\n string_label = doc2label[doc_id]\n int_label = LABEL2INT[string_label]\n labels.append(int_label)\n examples.append(file_as_string)\n\n return examples, labels", "def load_label_map(location=\"configs/label_map.txt\"):\n ret = dict()\n num_class = 0\n with open(location) as f:\n for line in f:\n line = line.strip('\\n')\n index, relation = line.split(' ')\n ret[relation] = int(index)\n ret[int(index)] = relation\n num_class += 1\n return ret", "def read_labels(labels_path):\n with open(labels_path, 'r') as file:\n data = file.read()\n data = data.split()\n data = np.array(data)\n data = np.reshape(data, (-1, 2))\n return data", "def register_data(path, prefix='yeast_cells_'):\n assert (\n os.path.exists(f'{path}/labels.umsgpack') or\n os.path.exists(f'{path}/labels.json')), (\n \"Labels not found, ensure either labels.umsgpack or labels.json \"\n f\"exists at {path}.\")\n\n if os.path.exists(f'{path}/labels.umsgpack'):\n with open(f'{path}/labels.umsgpack', 'rb') as f:\n labels = umsgpack.unpack(f, encoding = \"utf-8\")\n else:\n with open(f'{path}/labels.json', 'r') as f:\n labels = json.load(f)\n\n labels = validate_labels(labels, path)\n\n DatasetCatalog.clear()\n for label in labels:\n DatasetCatalog.register(f\"{prefix}{label}\", lambda label_=label: labels[label_])\n MetadataCatalog.get(f\"{prefix}{label}\").set(thing_classes=[\"yeast_cell\"])\n\n # yeast_cells_metadata = MetadataCatalog.get(f\"{prefix}train\")\n return labels", "def import_classifier(name):\n classinput=open(name,'rb')\n main_class=load(classinput)\n classinput.close()\n return main_class", "def from_path(cls, path: str) -> Any:", "def get_label_vocab(*paths: str) -> Dict[str, int]:\n label_set = set()\n for path in paths:\n with open(path) as r:\n for line in r:\n instance = json.loads(line)\n for annotation in instance['annotations']:\n label_set.update(annotation['labels'])\n return {label: idx for idx, label in enumerate(label_set)}", "def load(self, path):\n self.load_state_dict(torch.load(path))", "def load(self, path):\n self.load_state_dict(torch.load(path))", "def load(path, reset=False):\n pass", "def load_mnist(path, kind='train'):\n labels_path = os.path.join(path,'%s-labels-idx1-ubyte.gz'% kind)\n\n images_path = os.path.join(path,'%s-images-idx3-ubyte.gz'% kind)\n\n with gzip.open(labels_path, 'rb') as lbpath:\n labels = np.frombuffer(lbpath.read(), dtype=np.uint8,offset=8)\n\n with gzip.open(images_path, 'rb') as imgpath:\n images = np.frombuffer(imgpath.read(), dtype=np.uint8,offset=16).reshape(len(labels), 784)\n\n print(\"Dataset Loaded\")\n \n return images, labels", "def get_label(img_path):\n img_name = img_path.stem\n label_name = img_name + \".txt\"\n label_path = img_path.parent / label_name\n with open(label_path) as f:\n label = json.load(f)\n return label" ]
[ "0.76263887", "0.70822763", "0.6987063", "0.6917422", "0.6886614", "0.684222", "0.6831526", "0.67475176", "0.67435664", "0.6732817", "0.66995424", "0.6691952", "0.6691952", "0.6657494", "0.66031843", "0.6593739", "0.65641505", "0.649268", "0.6491007", "0.6329085", "0.6310958", "0.62040716", "0.6195257", "0.6176045", "0.6159681", "0.615561", "0.60863787", "0.60863787", "0.60802424", "0.6074264", "0.6072343", "0.6055371", "0.60351944", "0.6022415", "0.60118747", "0.60086006", "0.5991989", "0.5974673", "0.59714437", "0.59601474", "0.59601474", "0.59601474", "0.59601474", "0.59574604", "0.5955211", "0.59459317", "0.59446937", "0.5939886", "0.5888781", "0.58814657", "0.58814657", "0.5872904", "0.5867503", "0.5856334", "0.58509684", "0.58435345", "0.5842537", "0.58416665", "0.5816708", "0.5814217", "0.5804993", "0.5795989", "0.57939136", "0.5786235", "0.5783623", "0.57819605", "0.57681656", "0.57625043", "0.5755776", "0.5752126", "0.5751605", "0.57511705", "0.5727883", "0.57258415", "0.57158303", "0.5713633", "0.57077914", "0.570755", "0.57060915", "0.570589", "0.5705773", "0.5702764", "0.5700796", "0.56973845", "0.5695384", "0.5694897", "0.5691705", "0.5683666", "0.56793076", "0.5678911", "0.5675982", "0.5674945", "0.5668301", "0.566683", "0.56638664", "0.5648267", "0.5648267", "0.5643754", "0.5641398", "0.56260914" ]
0.6649412
14
The function reads the antenna positions (N_ant antennas) from the file given.
def from_antenna_config(filename, z, nu=None): antll = np.loadtxt(filename) Re = 6.371e6 # in m pp = np.pi/180 if not nu: nu = c2t.z_to_nu(z) # MHz antxyz = np.zeros((antll.shape[0],3)) # in m antxyz[:,0] = Re*np.cos(antll[:,1]*pp)*np.cos(antll[:,0]*pp) antxyz[:,1] = Re*np.cos(antll[:,1]*pp)*np.sin(antll[:,0]*pp) antxyz[:,2] = Re*np.sin(antll[:,1]*pp) del pp, antll N_ant = antxyz.shape[0] Nbase = np.zeros((N_ant*(N_ant-1)/2,3)) pair_comb = itertools.combinations(xrange(N_ant), 2) pair_comb = list(pair_comb) lam = c_light/(nu*1e6)/1e2 # in m for i in xrange(Nbase.shape[0]): ii,jj = pair_comb[i] ux = (antxyz[ii,0]-antxyz[jj,0])/lam uy = (antxyz[ii,1]-antxyz[jj,1])/lam uz = (antxyz[ii,2]-antxyz[jj,2])/lam Nbase[i,:] = ux,uy,uz return Nbase, N_ant
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def read_positions():\n return np.genfromtxt(\"POSITIONS.OUT\").transpose()", "def readInput(in_file_name):\n in_file = open(in_file_name, 'r')\n positions = []\n samples = []\n M = []; P = [];\n MC = []; PC = [];\n while True:\n line = in_file.readline()\n if not line: break\n if line[0] == '#': continue #skip comment\n line = line.rstrip('\\n').split('\\t')\n \n #genomic positions and allele support in plasma samples\n positions.append(int(line[0]))\n samples.append(tuple(map(int, line[1:5])))\n \n #maternal and paternal alleles\n M.append(tuple(line[5:7]))\n MC.append(tuple(map(float, line[7:9])))\n \n P.append(tuple(line[9:11]))\n PC.append(tuple(map(float, line[11:13]))) \n \n in_file.close()\n return positions, samples, M, P, MC, PC", "def readPositions(positionFilePath):\n\n if not os.path.exists(positionFilePath):\n print 'File ' + positionFilePath + ' is missing!'\n return []\n\n pointList = []\n\n #TODO: Read this from the file?\n MEAN_MOON_RADIUS = 1737400\n\n isLolaFile = False\n isPcAlignErrorFile = False\n f = open(positionFilePath, 'r')\n i = 0\n for line in f:\n # On first line check if this is a LOLA RDR file\n if (i == 0):\n if (line.find('Coordinated_Universal_Time') == 0):\n isLolaFile = True\n print 'Detected LOLA RDR file'\n continue # Skip this header line\n if (line.find('radius (meters)') > 0):\n isPcAlignErrorFile = True\n print 'Detected pc_align error file'\n continue # Skip this header line\n\n if isLolaFile: # Pick out the correct fields\n\n strings = line.split(',')\n pointList.append(float(strings[1])) # lon\n pointList.append(float(strings[2])) # lat\n pointList.append(float(strings[3])*1000 - MEAN_MOON_RADIUS) # alt\n \n elif isPcAlignErrorFile: # pc_align error file\n\n strings = line.split(',')\n pointList.append(float(strings[0])) # lon\n pointList.append(float(strings[1])) # lat\n pointList.append(float(strings[2]) - MEAN_MOON_RADIUS) # alt\n\n else: # Default handling\n if line.find('#') < 0: # Skip lines containing the comment symbol\n strings = line.split(',')\n #print strings\n pointList.append(float(strings[1])) # lon\n pointList.append(float(strings[0])) # lat\n pointList.append(float(strings[2])) # alt\n i = i + 1\n f.close()\n\n #print pointList\n return pointList", "def read_conll_pos_file(path):\n sents = []\n with open(path, \"r\") as f:\n curr = []\n for line in f:\n line = line.strip()\n if line == \"\":\n sents.append(curr)\n curr = []\n else:\n tokens = line.strip().split(\"\\t\")\n curr.append((tokens[1], tokens[3]))\n return sents", "def read_forces(filename):\n f=open(filename,\"r\")\n castep_forces = f.readlines()\n f.close() \n nruter = []\n for index, line in enumerate(castep_forces):\n if 'Total number of ions in cell' in line:\n n_atoms = int(line.split()[7])\n if 'Cartesian components (eV/A)' in line:\n starting_line = index + 4\n for i in range(n_atoms):\n f = starting_line + i\n nruter.append([float(castep_forces[f].split()[m]) for m in range(3,6)]) \n nruter=np.array(nruter,dtype=np.double)\n return nruter", "def read_coordinate_file(file):\n with open(file, 'r') as file1:\n coords = []\n\n for line in file1:\n line = line.strip('{} \\n')\n (a, b) = line.split(\",\")\n ''' \n x and y are expressed as latitude and longitude. These are converted with the Mercator projection (from Computer assignment 1)\n into x and y coordinates.\n '''\n coord = [(float(b)*m.pi/180), (m.log((m.tan(m.pi/4+m.pi*float(a)/360))))]\n coords.append(coord)\n return np.array(coords)", "def read_file(self, filename):\n with open(filename, 'r') as file:\n for line in file:\n l = line.strip()\n\n if l == ST_POS0:\n self._state = ST_POS0\n elif l == ST_TRNS:\n self._state = ST_TRNS\n elif l == ST_POS1:\n self._state = ST_POS1\n else:\n self._parse_line(l)\n self._state = None", "def read_xyz(filename):\n #print('Reading geom from:'),filename\n atoms = []\n coordinates = []\n\t\n xyz = open(filename)\n n_atoms = int(xyz.readline())\n title = xyz.readline()\n for line in xyz:\n\tif len(line.strip()) == 0:\n\t\tpass\n\t\tbreak\t\n\tatom,x,y,z = line.split()\n\tatoms.append(atom)\n\tcoordinates.append([float(x), float(y), float(z)])\n xyz.close()\n coordinates = [[w * angtobh for w in ww] for ww in coordinates] #ang to bh\n\n if n_atoms != len(coordinates):\n \tprint('Number of atoms in xyz file doesnt equal to the number of lines.')\n\tsys.exit(1)\n \n return atoms, coordinates", "def readAttributesFile(self, filepath):\n raw_data = np.genfromtxt(filepath, skip_header=1, delimiter=\",\", filling_values=0, dtype=None)\n data = [list(item)[1:] for item in raw_data]\n\n self.attributeMatrix = np.asmatrix(data)\n n = self.attributeMatrix.shape[1]\n self.location = self.attributeMatrix[:, 0:2]\n self.location = self.location.astype('float')\n self.pop = self.attributeMatrix[:, 2:n].astype('int')\n # self.pop[np.where(self.pop < 0)[0], np.where(self.pop < 0)[1]] = 0\n self.n_group = n-2\n self.n_location = self.attributeMatrix.shape[0]\n self.pop_sum = np.sum(self.pop, axis=1)\n self.tract_id = np.asarray([x[0] for x in raw_data]).astype(str)\n self.tract_id = self.tract_id.reshape((self.n_location, 1))\n\n return self.attributeMatrix", "def load_annos(self, anno_path):\n\n if os.path.exists(anno_path) is False or os.path.isfile(anno_path) is False or anno_path.endswith('txt') is False:\n print(\"Wrong path: not exist or not a txt file: %s\" % anno_path)\n return None, None\n\n list_file_id, list_anno_id = [], []\n list_x, list_y, list_w, list_h = [], [], [], []\n list_blur, list_expr, list_illum, list_occ, list_pose, list_inval = [], [], [], [], [], []\n anno_id = 0\n\n list_id = []\n list_filename = []\n file_id = 0\n\n num_annos_total = 0\n\n with open(anno_path) as afile:\n line = \"begin\"\n while line != \"\":\n line = afile.readline()\n\n if line.rstrip().endswith('jpg'): # it is a file\n file_name = line.strip()\n list_id.append(file_id)\n list_filename.append(file_name)\n\n num_annos = int(afile.readline().strip())\n\n for i in range(num_annos):\n px, py, pw, ph, blur, expr, illum, inval, occ, pose = afile.readline().strip().split(' ')\n px, py, pw, ph = int(px), int(py), int(pw), int(ph)\n\n if pw == 0 or ph == 0: # ignore invalid faces (0 width or height)\n continue\n\n if pw < 0:\n px = px+pw\n pw = abs(pw)\n if ph < 0:\n py = py+ph\n ph = abs(ph)\n\n list_file_id.append(file_id)\n list_anno_id.append(anno_id)\n list_x.append(px)\n list_y.append(py)\n list_w.append(pw)\n list_h.append(ph)\n list_blur.append(int(blur))\n list_expr.append(int(expr))\n list_illum.append(int(illum))\n list_occ.append(int(occ))\n list_pose.append(int(pose))\n list_inval.append(int(inval))\n anno_id = anno_id + 1\n\n file_id = file_id + 1\n num_annos_total += num_annos\n\n files = {'id': np.array(list_id), 'filename': list_filename }\n annos = {'file_id': np.array(list_file_id), 'anno_id': np.array(list_anno_id), \\\n 'x': np.array(list_x), 'y': np.array(list_y), \\\n 'w': np.array(list_w), 'h': np.array(list_h), \\\n 'blur': np.array(list_blur), 'expression': np.array(list_expr), \\\n 'illumination': np.array(list_illum), 'occlusion': np.array(list_occ), \\\n 'pose': np.array(list_pose), 'invalid': np.array(list_inval) }\n\n assert (len(list_id) == len(list_filename)), \\\n \"file_id and filename lists should have the same length\"\n\n self._num_annos = num_annos_total\n self._num_images = file_id\n\n return files, annos", "def read_alignment(file):\n alignments = list()\n with open(file, 'r') as f:\n for line in f:\n line_lst = line.strip().split()\n align_lst = list()\n for pair in line_lst:\n src_idx, tgt_idx = pair.split('-')\n align_lst.append((int(src_idx),int(tgt_idx)))\n # print(align_lst)\n alignments.append(align_lst)\n return alignments", "def atmparamread(filename):\n f = open(filename, 'r')\n f.readline()\n line = f.readline()\n #Td = float(line.split()[0])\n #Pd = float(line.split()[1])\n #Mc = float(line.split()[2])\n #rc = float(line.split()[3])\n n = int(line.split()[0])\n f.readline()\n atm = 0*numpy.ndarray(shape=(n, ncol), dtype=float)\n S = 0*numpy.ndarray(shape=(n), dtype=float)\n for i in range(n):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(ncol ):\n atm[i, j] = float(line.split()[j+1])\n f.close()\n return atm, S", "def readAngles(fileName: str) -> List[float]:\n outList = list()\n with open(fileName, 'r') as fileIn:\n for line in fileIn:\n val = float(line)\n outList.append(val)\n return outList", "def eeg_readavr(file):\t\n\tf=open(file,'r')\t\n\tfirstline = f.readline() # ntpts TSB info etc\n\tstr = string.split(firstline)\n\tntpts = int(str[1])\n\tnchan = int(str[11])\n\ttsb = float(str[3])\n\tdi = float(str[5])\t\n\ttim = np.arange(tsb,ntpts*di+tsb,di)\n\tsecondline = f.readline()\n\tchnam = string.split(secondline)\n\teeg = np.zeros([nchan,ntpts])\t\t\n\tfor i in range(0,nchan):\n\t\ttestline = f.readline()\n\t\ttestline = testline.strip().split()\t\t\n\t\teeg[i,:]=np.array(map(float,testline))\n\t\t\n\tf.close()\n\treturn eeg,tim,nchan,ntpts", "def _get_position_data(file):\n return pd.read_csv(file)", "def read_file(self):\n\n\t\twith open(self.filename, 'r') as f:\n\t\t\tfor line in f:\n\t\t\t\tif len(line)>1:\n\t\t\t\t\tlenght_value,array_values = line.split(';')\n\t\t\t\t\tlist_values = [int(x) for x in array_values.split(',')]\n\t\t\t\t\tprint self.get_arraysurdit(list_values)", "def read_points(from_file):\n points = []\n with open(from_file) as fp: \n for line in fp.readlines(): \n feats = line.strip().split()\n points.append((int(feats[0]), int(feats[1])))\n\n return points", "def read_log_attitude(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" ATTITUDE (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4))])\n return np.array(list_meas)", "def _read_antti_stations(station_file):\n if station_file.split('.')[-1] == 'gz':\n ff = gzip.open(station_file, 'r')\n else:\n ff = open(station_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # extract and convert single line with observatory IDs\n obsList = []\n llList = []\n incList = []\n nObs = 0\n nLL = 0\n nInc = 0\n for line in sIO:\n if re.search(b\"^%\", line):\n # skip comments\n continue\n\n if re.search(br\"^\\s*$\", line):\n # skip blank lines\n continue\n\n # first line of consequence should be a list of quoted strings holding\n # observatory IDs for observatories considered in this solution; convert\n # to a list of strings\n if len(obsList) == 0:\n obsList = re.sub(b'\\'', b'', line).split()\n nObs = len(obsList)\n continue\n\n # assume next nobs lines read are observatory locations\n if nLL < nObs:\n llList.append([float(elem) for elem in line.decode().split()])\n nLL = nLL+1\n continue\n\n # assume next nobs lines read are observatory inclusion (boolean) lists\n if nInc < nObs:\n #incList.append(line.strip())\n incList.append([int(elem) for elem in line.decode().strip()])\n nInc = nInc+1\n continue\n\n # close sIO\n sIO.close()\n\n if len(llList) > 2:\n obsLat, obsLon, obsRad = list(zip(*llList))\n elif len(llList) == 2:\n obsLat, obsLon = list(zip(*llList))\n obsRad = np.ones(obsLat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n obsInc = list(zip(*incList))\n\n return (np.array(obsLat), np.array(obsLon), np.array(obsRad),\n np.array(obsInc), np.array(obsList))", "def read_file(file):\n if opts.input_type == 'fits':\n data = fileio.read_fits(file)\n else:\n data = fileio.read_ascii(file)\n c_id = data[0,:]\n g_num = np.array(range(len(c_id)), dtype = 'int')\n g_id = data[3,:]\n g_ra = np.array(data[4,:], dtype = 'float')\n g_dec = np.array(data[5,:], dtype = 'float')\n g_z = np.array(data[6,:], dtype = 'float')\n return c_id, g_num, g_id, g_ra, g_dec, g_z", "def atmprofileread(filename):\n f = open(filename, 'r')\n line1 = f.readline()\n Nst = int(line1.split()[-1])\n line = f.readline()\n Np = int(line.split()[1])\n atm = 0*numpy.ndarray(shape=(Nst, Np, 5), dtype=float)\n S = 0*numpy.ndarray(shape=(Nst), dtype=float)\n f = open(filename, 'r')\n f.readline()\n for i in range(Nst):\n line = f.readline()\n S[i] = float(line.split()[0])\n for j in range(Np):\n line = f.readline()\n for k in range(numpy.shape(atm)[-1]):\n atm[i, j, k] = float(line.split()[k])\n f.close()\n return atm, S", "def _read_netgen(self, file):\n\n with open(file, 'r') as f:\n\n aid = 0 # current arc ID\n\n # Read the file line-by-line\n for line in f:\n\n # Decide what to do based on the line prefix\n\n # Comment line\n if line[0] == 'c':\n # Skip\n continue\n\n # Problem info\n elif line[0] == 'p':\n # p sense #nodes #arcs #int int_type #defenses #attacks\n # We always assume that the sense is minimization\n\n ls = line.split()\n if ls[5] == 'n':\n self.parent_type = 0\n self.def_limit = int(ls[6])\n self.att_limit = int(ls[7])\n\n # Initialize all nodes as transshipment (in case the NETGEN\n # file lists only nonzero supply values)\n self.nodes = [_Node(i, 0.0) for i in range(int(ls[2]))]\n\n # Node\n elif line[0] == 'n':\n # n ID supply\n\n # All nodes have already been defined, so update existing\n # supply values\n\n ls = line.split()\n self.nodes[int(ls[1])-1].supply = float(ls[2])\n\n # Arc\n elif line[0] == 'a':\n # a tail head LB UB cost\n\n ls = line.split()\n tail = self.nodes[int(ls[1])-1]\n head = self.nodes[int(ls[2])-1]\n if (int(ls[2]) == 0) and (self.parent_type == 0):\n head = None\n\n self.arcs.append(_Arc(aid, tail, head, float(ls[4]),\n float(ls[5])))\n aid += 1\n\n # Interdependency\n elif line[0] == 'i':\n # i parent child\n\n ### We assume for now that arcs are parents.\n\n ls = line.split()\n self.int.append((self.arcs[int(ls[1])-1],\n self.arcs[int(ls[2])-1]))\n\n # Defensible arc\n elif line[0] == 'd':\n # d arc\n\n ls = line.split()\n self.def_arcs.append(self.arcs[int(ls[1])-1])\n\n # All defensible arcs are assumed to be destructible\n self.att_arcs.append(self.arcs[int(ls[1])-1])\n\n # Destructible arc\n elif line[0] == 'r':\n # r arc\n\n ls = line.split()\n self.att_arcs.append(self.arcs[int(ls[1])-1])\n\n # If no defensible or destructible arcs were listed, we assume that\n # all arcs are available\n\n if len(self.def_arcs) == 0:\n self.def_arcs[:] = self.arcs[:]\n\n if len(self.att_arcs) == 0:\n self.att_arcs[:] = self.def_arcs[:]", "def read_xyz(self, filename):\n # first line contains number of atoms\n self.numatom = int(filename.readline().split()[0])\n # second line contains a comment\n self.comment = filename.readline()[:-3]\n # rest of the lines contain coordinates structured Element X Y Z\n string = \"Element X Y Z \\n\" + filename.read()\n self.contents = pd.read_table(StringIO(string), sep=r'\\s+')", "def _read_antti_location(location_file):\n # NOTE: genfromtxt() doesn't work with gzipped files as it should, so we\n # unzip the file ourself, and use io.BytesIO to fake out genfromtext()\n if location_file.split('.')[-1] == 'gz':\n ff = gzip.open(location_file, 'r')\n else:\n ff = open(location_file, 'r')\n\n sIO = io.BytesIO(ff.read().encode())\n ff.close()\n\n # read LatLon array (with optional labels...\n # either all have labels, or none, else genfromtxt() chokes)\n lll = list(zip(*np.atleast_1d(np.genfromtxt(\n sIO, comments=\"%\", dtype=None,\n names=['latReal','lonReal','radReal','labelString']\n ))))\n\n # handles older style(s) with no radius and/or labels\n if len(lll) > 3:\n lat, lon, rad = np.array(lll[0:3])\n label = np.array(lll[3])\n elif len(lll) > 2:\n lat, lon, rad = np.array(lll[0:3])\n if isinstance(rad[0], (str, bytes)):\n label = rad\n rad = np.ones(lat.shape)\n else:\n label = np.tile('', lat.shape)\n elif len(lll) == 2:\n lat, lon = np.array(lll[0:2])\n rad = np.ones(lat.shape)\n label = np.tile('', lat.shape)\n else:\n raise Exception('Requires (at least) latitude and longitude')\n\n return lat, lon, rad, label", "def readFastaFile(filename):", "def readAllfromFile(self):\n with open(self._fname, 'r') as f:\n lines = f.readlines()\n readList = []\n for line in lines:\n line = line.strip()\n if len(line) > 1:\n gra = self._readGrafromLine(line)\n readList.append(gra)\n f.close()\n return readList", "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n chr = int(line.strip().split()[0][-2:])\n loci = int(line.strip().split()[1])\n chr_list[chr] += [loci]\n else:\n pass\n infile.close()\n return chr_list", "def read_body(filepath, **kwargs):\n with open(filepath, 'r') as infile:\n coords = numpy.loadtxt(infile, unpack=True, **kwargs)\n return coords", "def get_locations(loc_fn):\n with open(loc_fn, 'rb') as fin:\n coordinate_string = fin.readline()\n coordinates = coordinate_string.split(',')\n coordinates = [float(c.strip()) for c in coordinates]\n return coordinates", "def readogle(filename, **kw):\n \n # 2008-12-21 18:53 IJC: Created\n\n f = open(filename, 'r')\n raw = f.readlines()\n f.close()\n\n nstars = len(raw)\n\n raw2 = array([line.split() for line in raw])\n ra = raw2[:,1]\n dec = raw2[:,2]\n xref = raw2[:,3]\n yref = raw2[:,4]\n vmag = raw2[:,5]\n imag = raw2[:,7]\n \n xref = [map(float, [x]) for x in xref]\n yref = [map(float, [y]) for y in yref]\n vmag = [map(float, [v]) for v in vmag]\n imag = [map(float, [i]) for i in imag]\n\n return (ra, dec, xref, yref, vmag, imag)", "def read_file(name):\n file = open(name,'r')\n data = file.readlines()\n \n n = int(data[0].split()[0])\n m = int(data[0].split()[1])\n \n A = np.zeros((n,n,n+1))\n inf = 9999\n \n for i in range(n):\n for j in range(n):\n if i==j:\n A[i,j,0] = 0\n else:\n A[i,j,0] = inf\n for index, line in enumerate(data[1:]):\n item = line.split()\n A[int(item[0]) -1 ,int(item[1])-1,0] = int(item[2])\n \n return A", "def read_abinit(filename='abinit.in'):\n\n from ase import Atoms, units\n\n if isinstance(filename, str):\n f = open(filename)\n else: # Assume it's a file-like object\n f = filename\n\n lines = f.readlines()\n if type(filename) == str:\n f.close()\n\n full_file = ''\n for line in lines:\n if '#' in line:\n meat, comment = line.split('#')\n else:\n meat = line\n full_file = full_file + meat + ' '\n\n full_file.strip()\n tokens = full_file.lower().split()\n\n # note that the file can not be scanned sequentially\n\n index = tokens.index(\"acell\")\n unit = 1.0\n if(tokens[index+4].lower()[:3] != 'ang'):\n unit = units.Bohr\n acell = [unit*float(tokens[index+1]),\n unit*float(tokens[index+2]),\n unit*float(tokens[index+3])]\n\n index = tokens.index(\"natom\")\n natom = int(tokens[index+1])\n\n index = tokens.index(\"ntypat\")\n ntypat = int(tokens[index+1])\n\n index = tokens.index(\"typat\")\n typat = []\n for i in range(natom):\n typat.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"znucl\")\n znucl = []\n for i in range(ntypat):\n znucl.append(int(tokens[index+1+i]))\n\n index = tokens.index(\"rprim\")\n rprim = []\n for i in range(3):\n rprim.append([acell[i]*float(tokens[index+3*i+1]),\n acell[i]*float(tokens[index+3*i+2]),\n acell[i]*float(tokens[index+3*i+3])])\n\n # create a list with the atomic numbers\n numbers = []\n for i in range(natom):\n ii = typat[i] - 1\n numbers.append(znucl[ii])\n\n # now the positions of the atoms\n if \"xred\" in tokens:\n index = tokens.index(\"xred\")\n xred = []\n for i in range(natom):\n xred.append([float(tokens[index+3*i+1]),\n float(tokens[index+3*i+2]),\n float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, scaled_positions=xred, numbers=numbers,\n pbc=True)\n else:\n if \"xcart\" in tokens:\n index = tokens.index(\"xcart\")\n unit = units.Bohr\n elif \"xangst\" in tokens:\n unit = 1.0\n index = tokens.index(\"xangst\")\n else:\n raise IOError(\n \"No xred, xcart, or xangs keyword in abinit input file\")\n\n xangs = []\n for i in range(natom):\n xangs.append([unit*float(tokens[index+3*i+1]),\n unit*float(tokens[index+3*i+2]),\n unit*float(tokens[index+3*i+3])])\n atoms = Atoms(cell=rprim, positions=xangs, numbers=numbers, pbc=True)\n \n try:\n i = tokens.index('nsppol')\n except ValueError:\n nsppol = None\n else:\n nsppol = int(tokens[i + 1])\n\n if nsppol == 2:\n index = tokens.index('spinat')\n magmoms = [float(tokens[index + 3 * i + 3]) for i in range(natom)]\n atoms.set_initial_magnetic_moments(magmoms)\n\n return atoms", "def ReadTrajectory(trajFile):\n trajectory=[]\n with open(trajFile, \"r\") as tF:\n line = tF.readline()\n while line is not \"\":\n #first line is number of atoms\n N = int(line.strip())\n tF.readline().strip() # second line is a comment that we throw away\n\n q = []\n for i in range(N):\n line = tF.readline().strip().split(\" \")\n for c in line[1:]:\n if c is not \"\":\n q.append(float(c))\n trajectory.append(np.array(q))\n\n line = tF.readline()\n\n return trajectory, N", "def readGPS(self, inFile):\n self.GPS.readFromShapeFile(inFile)\n self.gps_points = self.GPS.getGPSPoints()", "def __loadFromFile(self):\n try:\n f=open(self.__fileR, \"r\")\n line =f.readline().strip()\n rez=[]\n while line!=\"\":\n attrs=line.split(\",\")\n rt=Rent(attrs[0], attrs[1], attrs[2], attrs[3])\n rez.append(rt)\n line=f.readline().strip()\n f.close()\n return rez\n #the file cannot be reached\n except IOError:\n return None", "def read_groups_particles(filename):\n \n f = open(filename,'r')\n\n Ntot = fromstring(f.read(4),int32)[0]\n Pos\t = fromstring(f.read(3*4*Ntot),float32)\n Pos.shape = (Ntot,3)\n f.close()\n \n return Pos", "def get_number_of_atoms(self):\n natoms = 0\n\n with open(self.path, 'r') as f:\n line = f.readline()\n while line != '' and natoms == 0:\n # Automatically determine the number of atoms\n if 'CARTESIAN COORDINATES (ANGSTROEM)' in line and natoms == 0:\n for i in range(2):\n line = f.readline()\n\n while '---------------------------------' not in line:\n natoms += 1\n line = f.readline()\n if not line.strip():\n f.close()\n return natoms\n line = f.readline()", "def load_n3d_coords(file_path): \n \n import core.nuc_io as io\n\n seq_pos_dict = {}\n coords_dict = {} \n \n with io.open_file(file_path) as file_obj:\n chromo = None\n \n for line in file_obj:\n \n data = line.split()\n n_items = len(data)\n \n if not n_items:\n continue\n \n elif data[0] == '#':\n continue\n \n elif n_items == 3:\n chromo, n_coords, n_models = data\n \n #if chromo.lower()[:3] == 'chr':\n # chromo = chromo[3:]\n \n if chromo in coords_dict:\n raise Exception('Duplicate chromosome \"%s\" records in file %s' % (chromo, file_path))\n \n n_coords = int(n_coords)\n n_models = int(n_models)\n \n chromo_seq_pos = np.empty(n_coords, int)\n chromo_coords = np.empty((n_models, n_coords, 3), float)\n \n coords_dict[chromo] = chromo_coords\n seq_pos_dict[chromo] = chromo_seq_pos\n \n check = (n_models * 3) + 1\n i = 0\n \n elif not chromo:\n raise Exception('Missing chromosome record in file %s' % file_path)\n \n elif n_items != check:\n msg = 'Data size in file %s does not match Position + Models * Positions * 3'\n raise Exception(msg % file_path)\n \n else:\n chromo_seq_pos[i] = int(data[0])\n \n coord = [float(x) for x in data[1:]]\n coord = np.array(coord).reshape(n_models, 3)\n chromo_coords[:,i] = coord\n i += 1\n \n return seq_pos_dict, coords_dict", "def read_stations(filename):\n\n sta_x = []\n sta_y = []\n station_file = open(filename)\n\n for station in station_file:\n x, y, _ = station.split()\n sta_x.append(float(x))\n sta_y.append(float(y))\n\n station_file.close()\n\n return sta_x, sta_y", "def load_coords(self):\n file = open(self.coords_file, 'r')\n return file.readlines()", "def read_xyz(filename, freq):\n\n\n#xyz file\n\n Atoms = []\n Coordinates = []\n\n xyz = open(filename)\n frame = 0\n while True:\n\n n_atoms = xyz.readline()\n\n if n_atoms == '':\n break\n else:\n n_atoms = int(n_atoms)\n title = xyz.readline()\n\n if frame%freq==0:\n atoms, coordinates = read_frame(xyz, n_atoms)\n Coordinates.append(coordinates)\n Atoms.append(atoms)\n\n else:\n read_frame(xyz, n_atoms)\n frame+=1\n\n return Atoms, Coordinates", "def read(self, filename):\n f = open(filename, 'r')\n m = f.readline()\n n = f.readline()\n lst = []\n for line in f.readlines():\n lst.append(int(line))\n f.closed\n self.__init__(int(m), int(n), lst)", "def read_locations():\n r = open(\"resources/files/locations.txt\", \"r\", newline=\"\\n\")\n locations = r.read().split(\"\\n\")\n return locations", "def read_pts_file(filename):\n lines = open(filename).read().splitlines()\n if int(lines[1:2][0].split('n_points:')[-1]) != 68:\n print ('No 68-landmark format founded')\n return None\n lines = lines[3:71]\n\n landmarks = []\n for l in lines:\n coords = l.split()\n landmarks.append([float(coords[0]), float(coords[1])])\n return landmarks", "def reader(filename,only_length=False):\n print(\"Counting lines in file %s\"%filename)\n total_lines=0\n for n,line in enumerate(open(filename,\"r\")):\n total_lines+=1\n \n if only_length:\n return total_lines\n \n X,Y,Z,W,J=[np.zeros(total_lines) for _ in range(5)]\n \n for n, line in enumerate(open(filename, 'r')):\n if n%1000000==0:\n print(\"Reading line %d of %d from file %s\" %(n,total_lines,filename))\n split_line=np.array(line.split(\" \"), dtype=float) \n X[n]=split_line[0];\n Y[n]=split_line[1];\n Z[n]=split_line[2];\n W[n]=split_line[3];\n J[n]=int(split_line[4]);\n return X,Y,Z,W,J", "def read_file(path_to_file):\n 8", "def parse_FASTA(file):\r\n\tstate = 0\r\n\tdna_list = []\r\n\tfor line in file:\r\n\t\tline = line.strip()\r\n\t\tif state == 0:\r\n\t\t\tif line[0] == '>':\r\n\t\t\t\tadd_new_DNA(dna_list, line)\r\n\t\t\t\tstate = 1\r\n\t\t\telif line == '':\r\n\t\t\t\tcontinue\r\n\t\t\telse:\r\n\t\t\t\traise Exception()\r\n\t\telif state == 1:\r\n\t\t\tadd_line_to_DNA(dna_list[-1], line)\r\n\t\t\tstate = 2\r\n\t\telif state == 2:\r\n\t\t\tif line[0] == '>':\r\n\t\t\t\tadd_new_DNA(dna_list, line)\r\n\t\t\t\tstate = 1\r\n\t\t\telse:\r\n\t\t\t\tadd_line_to_DNA(dna_list[-1], line)\r\n\t\telse:\r\n\t\t\traise Exception()\r\n\tfile.seek(0)\r\n\treturn dna_list", "def read_data(self, fname, pts_shape=(12,3), retAll=False):\n import csv\n import copy\n angs = {}\n if sys.version < '3.0':\n access_mode = 'rb'\n else:\n access_mode = 'r'\n with open(fname, access_mode) as f:\n fr = csv.reader(f, skipinitialspace=True)\n _pts = np.zeros(pts_shape) #size depends on theta positions\n _apos = {}\n _runs = {}\n _bpos = 0\n _ptx = False\n _rnx = 0\n _angx = 0\n _frun = True\n for irow, row in enumerate(fr):\n if row[0][0] == '#': continue\n try:\n _frun = True\n ang = int(row[1])\n run = int(row[2])\n pos = float(row[3])\n pt = int(row[4])\n if _ptx:\n #flush points\n _apos[str(_bpos)] = _pts\n _ptx = False\n _pts = np.zeros(pts_shape)\n _pts[pt] = list(map(float, row[-3:]))\n if pt == (pts_shape[0]-1):\n _bpos = copy.deepcopy(pos)\n _ptx = True\n if run != _rnx:\n #flush positions\n _runs[_rnx] = copy.deepcopy(_apos)\n _rnx = copy.deepcopy(run)\n _apos = {}\n _frun = False\n if ang != _angx:\n #flush runs\n if _frun:\n _runs[_rnx] = copy.deepcopy(_apos)\n _apos = {}\n angs[_angx] = copy.deepcopy(_runs)\n _angx = copy.deepcopy(ang)\n _rnx = 0\n _runs = {}\n except:\n print('ERROR reading line {0}: {1}'.format(irow+1, row))\n break\n #final flush all\n _apos[str(_bpos)] = _pts\n _runs[_rnx] = copy.deepcopy(_apos)\n angs[_angx] = copy.deepcopy(_runs)\n if retAll:\n return angs\n else:\n self.dats = angs", "def read_pts(filename):\n lines = open(filename).read().splitlines()\n lines = lines[3:71]\n\n landmarks = []\n ibug_index = 1 # count from 1 to 68 for all ibug landmarks\n for l in lines:\n coords = l.split()\n landmarks.append(eos.core.Landmark(str(ibug_index), [float(coords[0]), float(coords[1])]))\n ibug_index = ibug_index + 1\n\n return landmarks", "def readAMBERCrd(self, phys, filename):\r\n\r\n\tf = open(filename, 'r')\r\n\tdata = f.read()\r\n\t# Keep going with this!!!\r\n numbers = data.split(' ')\r\n while (numbers.count('') != 0):\r\n numbers.remove('')\r\n \r\n phys.posvec.resize(int(numbers[0].replace('\\n', '')))\r\n for i in range(1, len(numbers), 3):\r\n if (numbers[i].find('\\n') != -1):\r\n numbers[i].replace('\\n', '')\r\n phys.positions[i-1] = numbers[i]\r\n phys.positions[i] = numbers[i+1]\r\n phys.positions[i+1] = numbers[i+2]", "def _read_arf(file):\n with fits.open(file) as hdul:\n data = hdul[1].data\n\n return data['energ_lo'], data['energ_hi'], data['specresp']", "def read_ann_file(fileid, ann_dir):\n ann_file = \"%s/%s.ann\"%(ann_dir,fileid)\n with codecs.open(ann_file, 'r', 'utf-8') as f:\n data = f.read()\n rows = data.split('\\n')\n entities = {}\n ent_count = 0\n relations = {}\n #annotations = []\n for row in rows:\n cols = row.split(\"\\t\")\n ann_id = cols[0]\n if(u\"#\" in cols[0]):\n tmp = cols[1].split()[1:],\" \",cols[2]\n annotations.append(tmp)\n elif(len(cols)==3 and u\"T\" in cols[0]):\n # is an entity\n ent_count += 1\n ent_type = cols[1].split()[0]\n ranges = cols[1].replace(\"%s\"%ent_type,\"\")\n if \";\" in ranges:\n ranges = [{\"start\":int(r.split()[0]),\"end\":int(r.split()[1])} for r in ranges.split(';')]\n else:\n ranges = [{\"start\":int(ranges.split()[0]),\"end\":int(ranges.split()[1])}]\n entities[cols[0]] = {\"ann_id\":ann_id\n ,\"entity_type\": ent_type\n ,\"positions\": ranges\n ,\"surface\":cols[2]\n ,\"continuation\":False}\n elif(len(cols)>=2 and u\"R\" in cols[0]):\n rel_type, arg1, arg2 = cols[1].split()\n relations[cols[0]] = {\"ann_id\":ann_id\n ,\"arguments\":(arg1.split(\":\")[1], arg2.split(\":\")[1])\n ,\"relation_type\":rel_type}\n else:\n if(len(cols)>1):\n if(cols[1].split()[0]==\"Continuation\"):\n continued_entity_id = cols[1].split()[1]\n #print cols[1].split()[0],continued_entity_id\n entities[continued_entity_id][\"continuation\"] = True\n return entities, relations", "def anglor(infile, sequence):\n return np.loadtxt(infile, usecols=1).clip(min=-180, max=180).reshape((1, -1, 1))", "def read_file(self, filename=None):\n print(f'reading file')\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read second line in file to get number of variables\n f.seek(0)\n f.readline()\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= (nvars_file + 2)\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i+1\n except KeyError:\n pass\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n base_r[i] = variables[2]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[i, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def import_from_pos(fh):\n elem = None\n while True:\n l = fh.readline()\n if not l: break\n if 'nwfc1' in l and 'nwfc2' in l:\n w = l.split()\n nwfc1, nwfc2 = int(w[0]), int(w[1])\n # nwfc2 is assumed to be one - only one l value\n if 'lwfc1' in l:\n w = l.split('!')[0].split()\n lwfc1 = [int(_) for _ in w]\n if 'lwfc2' in l:\n lwfc2 = int(l.split()[0])\n if 'nonzero elements' in l:\n n = int(l.split()[0])\n elem = []\n l = fh.readline()\n c = 0\n while l and c < n:\n w = l.split()\n if len(w) in {5, 10}: # 5-col is for old pos format and 10-col is the enriched format by yfliang\n # (l,m) in lwfc1, m in lwfc2 (only one), i = (x=1,y=2,z=3)\n # m ranges from -l to l\n # elem = < h_c | r_i | beta_lm > (Core-level wavefunctions always proceed. )\n elem.append([int(_) for _ in w[ : 3]] + [float(w[3]) + 1j * float(w[4])]) \n l = fh.readline()\n c += 1\n return lwfc1, lwfc2, elem", "def readLocations():\n locationsRead = []\n\n # Parallel reading from address_file and locations_file\n with open(\"Files/PublicPlaces.txt\", 'r', encoding='utf8') as f:\n for line in f:\n if line == \"\\n\":\n continue\n details = line.split(\",\")\n address = []\n for detail in details:\n address.append(detail.rstrip('\\n').rstrip().lstrip())\n locationsRead.append(address)\n f.close()\n return locationsRead", "def read_mesa(self, filename=None):\n\n if filename is None:\n filename = self.model_file\n\n with open(filename, 'r') as f:\n # count number of lines\n npts_file = sum([1 for line in f])\n\n # go back to start and read first line in file to get number of parameters\n f.seek(0)\n l = f.readline()\n nparams_file = int(l.split(' ')[-1])\n\n # skip lines 2-4\n for i in range(3):\n f.readline()\n\n # the fifth line will give us the number of variables\n l = f.readline()\n nvars_file = int(l.split(' ')[-1])\n\n # subtract header rows\n npts_file -= 6\n\n print(f'{nvars_file} variables found in the initial model file')\n print(f'{npts_file} points found in the initial model file')\n\n var_idx_map = {}\n logR_idx = -1\n\n # read in the names of the variables\n for i in range(nvars_file):\n var_name_file = f.readline().strip()\n if var_name_file.lower() == 'n':\n var_name_file = 'neut'\n elif var_name_file == 'p':\n var_name_file = 'prot'\n\n if var_name_file == 'logR':\n logR_idx = i\n continue\n\n # create map of file indices to model indices\n try:\n var_idx_map[self.idx[var_name_file]] = i\n except KeyError:\n var_idx_map[self.idx['spec'] - 1 + network_module.network_species_index(var_name_file.lower())] = i\n\n base_r = np.zeros(npts_file)\n base_state = np.zeros((npts_file, self.nvar))\n\n # read in model data\n for i, line in enumerate(f):\n variables = [float(v) for v in line.split(' ')]\n\n # need to reverse the inputs file here\n\n n = npts_file - i - 1\n\n base_r[n] = R_solar * 10**variables[logR_idx]\n\n for j in range(self.nvar):\n if j in var_idx_map:\n base_state[n, j] = variables[var_idx_map[j]]\n\n return npts_file, base_r, base_state", "def read_from_file(self, filename):\n with open(filename, 'r') as f:\n for line in f.read().splitlines():\n name, neighbours, r_table = line.split('!')\n\n self.add_new(name)\n if neighbours:\n for neighbour in neighbours.split(';'):\n try:\n self.add_neighbours(name, neighbour)\n except Exception as e:\n\n pass\n if r_table:\n for network in r_table.split(';'):\n net_name, distance = network.split(':')\n\n distance = int(distance)\n self.add_network(name, net_name, distance)", "def read_coordinate_file(file_path, start_row=1, end_row=None):\n\n # Read file\n with open(file_path, newline='') as csvfile:\n spamreader = csv.reader(csvfile, delimiter=',', quotechar='|')\n\n x = []\n y = []\n z = []\n\n for row in spamreader:\n x.append(row[0])\n y.append(row[1])\n z.append(row[2])\n\n if end_row is None:\n end_row = len(x) - 1\n\n x = [float(l) for l in x[start_row:end_row]]\n y = [float(l) for l in y[start_row:end_row]]\n z = [float(l) for l in z[start_row:end_row]]\n\n return [[x[i], y[i], z[i]] for i in range(len(x))]", "def readFile(fname):\n\n fromto = []\n cols = []\n with open(fname , 'r') as f:\n cols = f.readline().split(\",\")[0:4] # Headline\n for line in f:\n tm, frm, to, am = line.split(\",\")[0:4]\n frm = int(frm.lstrip())\n to = int(to.lstrip())\n fromto.append((frm,to))\n return cols, fromto", "def _read_file_for_magnets(sequence_file):\n LOG.debug(\" Reading File\")\n length_constants = {}\n magnet_strings = {}\n with open(sequence_file, 'r') as f_seq:\n for line in f_seq:\n var_and_value = _find_element_length(line)\n if var_and_value is not None:\n length_constants[var_and_value[0]] = var_and_value[1]\n else:\n var_and_value = _find_magnet_strength(line)\n if var_and_value is not None:\n magnet_strings[var_and_value[0]] = var_and_value[1]\n return magnet_strings, length_constants", "def readin():\r\n nodes = np.loadtxt('Vnodes.txt', ndmin=2)\r\n mats = np.loadtxt('Vmater.txt', ndmin=2)\r\n elements = np.loadtxt('Veles.txt', ndmin=2)\r\n loads = np.loadtxt('Vloads.txt', ndmin=2)\r\n return nodes, mats, elements, loads", "def read_in_file():\n\t# Declare variables\n\treads = []\n\n\t# Get command line arguments\n\targuments = sys.argv\n\targuments_length = len(arguments)\n\n\t# Read file is the first argument\n\tread_file_name = arguments[1]\n\n\t# Process read file \n\tread_file = open(read_file_name, 'r')\n\tfor line in read_file:\n\t\tread_info = line.split()\n\t\tread_string = read_info[2].replace('\\'', '')\n\t\tnew_read = GenerativeRead(read_string, [], read_info[5], read_info[3], None, [], read_info[0], read_info[1], read_info[4]) \n\t\treads.append(new_read)\n\tread_file.close()\n\n\t# Repeat regions file in the second argument\n\trepeat_file_name = arguments[2]\n\n\t# Process repeat file\n\trepeat_file = open(repeat_file_name, 'r')\n\talignments = [[]]\n\talignment_index = -1\n\tprevious_line = ''\n\n\n\tfor line in repeat_file:\n\t\talignment_info = line.split()\n\n\t\t# This consists of a tuple of alignment string, alignment start position and alignment chromosome\n\t\t#new_align = alignment_info[2], alignment_info[4], alignment_info[3]\n\n\t\tnew_align = Alignment(alignment_info[2], None, alignment_info[4], alignment_info[3])\n\n\t\tif previous_line != alignment_info[0]:\n\t\t\t# It is not a repeat\n\t\t\talignment_index = alignment_index + 1\n\t\t\talignments.append([])\n\t\t\tprevious_line = alignment_info[0]\n\n\t\talignments[alignment_index].append(new_align)\n\n\trepeat_file.close()\n\n\t# Associate each read with the other alignments\n\tfor read in reads:\n\t\t# Find the other alignments\n\t\tpos = read.get_position()\n\t\tfound = False\n\t\tfound_index = -1\n\n\t\tfor a_index, alignment_lists in enumerate(alignments):\n\t\t\t# find matching alignments\n\t\t\t# TODO: Don't add alignment already have\n\t\t\t# TODO: Make functional with filter\n\t\t\tfor align in alignment_lists:\n\t\t\t\tif align.get_position() == pos:\n\t\t\t\t\tfound = True\n\t\t\t\t\tfound_index = a_index\n\t\t\t\t\tbreak\n\n\t\t\tif found is True:\n\t\t\t\tbreak\n\n\t\tif found is True:\n\t\t\tfor new_align in alignments[found_index]:\n\t\t\t\tread.add_alignment(new_align)\n\t\t\t\n\n\n\t# SNP files are the remaining ones\n\tsnp_file_names = [arguments[file_id] for file_id in range(3, arguments_length) ]\n\n\t# Process SNP files\n\tfor file_name in snp_file_names:\n\t\tsnp_file = open(file_name, 'r')\n\n\t\tfor line in snp_file:\n\t\t\tsnp_info = line.split()\n\t\t\tsnps = snp_info[3].split('/')\n\t\t\tsnp_pos = int(float(snp_info[2]))\n\n\t\t\t# Ignore alleles that are longer than one base\n\n\t\t\t\n\t\t\tif all(len(x) < 2 for x in snps):\n\n\t\t\t\t# Iterate through reads and determine whether or not it contains this SNP\n\t\t\t\tpos_low = snp_pos - 49\n\t\t\t\n\n\t\t\t\tfor read in reads:\n\t\t\t\t\tpositions = read.get_alignment_positions()\n\n\t\t\t\t\tfor p_index, p in enumerate(positions):\n\t\t\t\t\t\tp = int(float(p))\n\t\t\t\t\t\tif p >= pos_low and p <= snp_pos:\n\t\t\t\t\t\t\t# Get index of snp\n\t\t\t\t\t\t\toffset = snp_pos - p\n\t\t\t\t\t\t\tcalls = [0, 0, 0, 0]\n\t\t\t\t\t\t\tfor snp in snps:\n\t\t\t\t\t\t\t\tcall_index = get_base_num(snp)\n\t\t\t\t\t\t\t\tcalls[call_index] = 1\n\n\t\t\t\t\t\t\t# Add the SNP to the read\n\t\t\t\t\t\t\tread.add_snp(p_index, offset, calls)\n\t\t\t\t\t\t\t\n\t\tsnp_file.close()\n\treturn reads", "def __read():\n f = file(constellation_data_path)\n constellations = []\n for line in f:\n tokens = line.split()\n if not tokens: continue\n hip_numbers = [int(t) for t in tokens[2:]]\n element = tokens[0], zip(hip_numbers[::2], hip_numbers[1::2])\n constellations.append(element)\n f.close()\n return constellations", "def readGenos(self,genofile):\n self.gen = np.zeros((len(self.ped),len(self.mark)))\n self.gen[:] = np.nan\n marklist = None\n with open(genofile,'r') as fin:\n for line in fin:\n if line.startswith('#'):\n if not marklist: marklist = line.strip('#').strip().split()\n continue\n l = line.strip().split()\n if len(l) < 1: continue\n try: irow = self.ped[l[self.nc]]['rank']\n except KeyError:\n continue\n for i,mark in enumerate(self.marklist):\n if mark not in self.mark: continue\n icol = self.mark[mark]['rank']\n if self.ia == 1:\n a = l[i+self.ic]\n elif self.ia == 2:\n a = self.tbase012(l[i+self.ic],mark)\n elif self.ia == 3:\n a = self.tbase012(l[i*2+self.ic]+l[i*2+1+self.ic],mark)\n if a not in ['0','1','2']: a = np.nan\n else: a = int(a)\n self.gen[irow,icol] = a", "def read_from_file(self,fn):\n fh = open(fn,\"r\")\n labels = []\n xyz = []\n sizes = []\n colors = []\n for line in fh.readlines():\n try:\n if not line.startswith(\"#\"):\n label,x,y,z,size,r,g,b = line.split(\",\")\n labels.append(label)\n xyz.append([x,y,z])\n sizes.append(size)\n colors.append((float(r),float(g),float(b)))\n except IOError, ioe:\n print \"IOError:\", ioe\n self._labels = np.array(labels)\n self._xyz = np.array(xyz).astype(\"f\")\n self._sizes = np.array(sizes).astype(\"f\")\n self._colors = np.array(colors)", "def read_off(filename):\n with open(filename) as f:\n # first line [0] has only the word OFF\n lines = f.readlines()\n if lines[0].find('OFF') < 0:\n print('not an OFF file')\n return None, None\n # second line [1] has counts for ....\n counts = lines[2].split()\n vertex_count = int(counts[0])\n vox_count = int(counts[1])\n # then follows vertices from lines[3] to lines[3+vertex_count]\n vertices = np.asarray([float(s) for s in lines[3].split()])\n\n for line in lines[4:3 + vertex_count]:\n vertices = np.vstack(\n (\n vertices,\n np.asarray([float(s) for s in line.split()])\n )\n )\n # now extract the centers lines[2+vertex_count] to lines(-1)\n centers = np.asarray([float(s)\n for s in lines[3 + vertex_count].split()])\n for line in lines[3 + vertex_count + 1:3 + vertex_count + vox_count]:\n if len(line) > 0:\n centers = np.vstack(\n (\n centers,\n np.asarray([float(s) for s in line.split()])\n )\n )\n return vertices, centers", "def read_legos(filename):\n \n all_legos = []\n for line in open(filename):\n line = line.strip(\"\\n\")\n lego_info = line.split(\",\")\n lego_type = lego_info[0].strip()\n lego_count = int(lego_info[1])\n for i in range(lego_count):\n all_legos.append(lego_type)\n return all_legos", "def read(self, FN, natoms=None, return_title=False, \\\n multiplier=None, trajectory=False):\n if not os.path.isfile(FN):\n raise Exception('Coordinate file %s does not exist!' % FN)\n if FN.endswith('.gz'):\n import gzip\n F = gzip.open(FN, 'r')\n else:\n F = open(FN, 'r')\n dat = F.read().strip().split('\\n')\n F.close()\n\n title = dat.pop(0) # Title\n\n if len(dat[0].split()) > 1:\n # VMD format (does not specify number of atoms)\n crd = []\n for line in dat:\n crd = crd + [float(x) for x in line.split()]\n crd = np.resize(crd, (len(crd) / 3, 3))\n else:\n # AMBER format\n file_natoms = int(dat.pop(0)) # Number of atoms\n if (natoms is not None) and (file_natoms != natoms):\n print \"Incorrect number of atoms in crd file\"\n return np.array([])\n\n if trajectory:\n w = 8 # For mdcrd\n else:\n w = 12 # For inpcrd\n crd = []\n for line in dat:\n crd = crd + [float(line[x:x + w]) for x in range(0, len(line), w)]\n crd = np.resize(crd, (len(crd) / 3, 3))\n\n if multiplier is not None:\n crd = multiplier * crd\n if (natoms is not None):\n crd = np.vsplit(crd, crd.shape[0] / natoms)\n print \" read %d configurations from %s\" % (len(crd), FN)\n\n if return_title:\n return (crd, title)\n else:\n return crd", "def cfdReadPointsFile(self):\r\n\r\n with open(self.pointsFile,\"r\") as fpid:\r\n \r\n print('Reading points file ...')\r\n points_x=[]\r\n points_y=[]\r\n points_z=[]\r\n \r\n for linecount, tline in enumerate(fpid):\r\n \r\n if not io.cfdSkipEmptyLines(tline):\r\n continue\r\n \r\n if not io.cfdSkipMacroComments(tline):\r\n continue\r\n \r\n if \"FoamFile\" in tline:\r\n dictionary=io.cfdReadCfdDictionary(fpid)\r\n continue\r\n \r\n if len(tline.split()) ==1:\r\n if \"(\" in tline:\r\n continue\r\n if \")\" in tline:\r\n continue\r\n else:\r\n self.numberOfNodes = int(tline.split()[0])\r\n continue\r\n \r\n tline=tline.replace(\"(\",\"\")\r\n tline=tline.replace(\")\",\"\")\r\n tline=tline.split()\r\n \r\n points_x.append(float(tline[0]))\r\n points_y.append(float(tline[1]))\r\n points_z.append(float(tline[2]))\r\n \r\n ## (array) with the mesh point coordinates \r\n self.nodeCentroids = np.array((points_x, points_y, points_z), dtype=float).transpose()", "def readevtfile(infile, skip_header=True):\n\n\tdata = []\n\n\twith open(infile, 'r') as f:\n\t\tif skip_header:\n\t\t\tf.readline()\n\t\tfor line in f:\n\t\t\tdata.append(line.split())\n\n\tangs = np.asarray(data, dtype=float)\n\n\treturn angs", "def parse_data_file(self, file_name: str) -> List[Tuple[str, int]]:\n with open(file_name, \"r\") as f:\n data_list = []\n for line in f.readlines():\n path, target = line.split()\n if not os.path.isabs(path):\n path = os.path.join(self.root, path)\n target = int(target)\n data_list.append((path, target))\n return data_list", "def readGR3File(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n print ' Nodal values min={0:g} max={1:g}'.format(min(nodalValues), max(nodalValues))\n\n # triangular elements\n triArray = readElemBlock(infile, nTriangles)\n\n trinum = triArray[:, 0].flatten()\n tritype = triArray[0, 1]\n trinodes = triArray[:, -3:] - 1 # three last columns, 0-based indexing\n #triangles = meshElements(trinodes,trinum,tritype)\n\n x = nodexyz[:, 0]\n y = nodexyz[:, 1]\n\n tmpStr = infile.readline()\n boundaries = []\n if len(tmpStr) > 0:\n # boundary information, if not end of file\n nOpenBndSegments = int(tmpStr.split()[0])\n nOpenBndNodesTot = int(infile.readline().split()[0])\n print ' nOpenBndSegments={0:d} nOpenBndNodesTot={1:d}'.format(nOpenBndSegments, nOpenBndNodesTot)\n for iBnd in range(nOpenBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n tag = bndHeader[-1]\n if tag.isdigit():\n tag = 'open' + tag\n print ' open bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary('open', tag, nodes))\n nLandBndSegments = int(infile.readline().split()[0])\n nLandBndNodesTot = int(infile.readline().split()[0])\n landBndTags = range(\n nOpenBndSegments + 1,\n nOpenBndSegments + nLandBndSegments + 1)\n print ' nLandBndSegments={0:d} nLandBndNodesTot={1:d}'.format(nLandBndSegments, nLandBndNodesTot)\n for iBnd in range(nLandBndSegments):\n bndHeader = infile.readline().split()\n nBndNodes = int(bndHeader[0])\n try:\n landType = int(bndHeader[1])\n except:\n print \"\"\"Land boundary type missing in gr3 file. Add 0/1 (land/island) after number of nodes in each land boudary, e.g.\n 1002 = Total number of closed boundary nodes\n 501 0 = Number of nodes in closed boundary 1\"\"\"\n raise Exception(\n 'Could not parse land boundary type (0/1 - land/island)\\n')\n landType = 'island' if landType == 1 else 'land'\n tag = landType + bndHeader[-1]\n print ' land bnd {0:d} {1:s}: {2:d} nodes'.format(iBnd + 1, tag, nBndNodes)\n tmpList = []\n for iN in range(nBndNodes):\n tmpList.append(int(infile.readline()))\n #tmpList = fromfile(infile,dtype=int,count=nBndNodes,sep=' ')\n nodes = np.array(tmpList, dtype=int) - 1\n boundaries.append(meshContainer.meshBoundary(landType, tag, nodes))\n\n infile.close()\n\n # for better interpolation, round coordinates to 1e-4\n nDig = 4\n x = np.round(x, nDig)\n y = np.round(y, nDig)\n\n return x, y, nodalValues, trinodes, boundaries, description", "def getPosition(fname, pos):\n count = 0\n infile = open(fname, \"r\")\n n_line = infile.readline()\n temp = []\n for line in infile:\n words = line.split(',')\n temp.append(Player(words[0], int(words[1]), int(words[2]), int(words[3]),\n int(words[4]), float(words[5]), pos, 1))\n count += 1\n infile.close()\n return temp, count", "def readNodalValues(inputFilename):\n print 'Reading ' + inputFilename + ' ...'\n infile = open(inputFilename, 'r')\n description = infile.readline().strip() # remove leading/trailing whitespace\n tmpStr = infile.readline()\n nTriangles, nNodes = (int(s) for s in tmpStr.split())\n print ' nTriangles={0:d} nNodes={1:d}'.format(nTriangles, nNodes)\n\n # nodes\n nodeArray = readNodeBlock(infile, nNodes)\n nodenum = np.array(nodeArray[:, 0].flatten(), dtype=int)\n nodexyz = np.zeros((nNodes, 3))\n nodexyz[:, :2] = nodeArray[:, 1:3]\n nodalValues = nodeArray[:, 3]\n\n return nodexyz[:, 0], nodexyz[:, 1], nodalValues", "def read_pattern_file(file_path: str) -> np.ndarray:\n\n # Check if the example file exists\n if not os.path.isfile(file_path):\n return None\n\n rows = 0\n cols = 0\n with open(file_path) as f:\n for i, l in enumerate(f):\n if l[0] != \"!\":\n rows += 1\n if len(l) > cols:\n cols = len(l) - 1 # Exclude the end of line char from the column count\n\n grid = np.zeros((rows, cols), dtype=np.uint8)\n\n skip_rows = 0\n with open(file_path) as f:\n for j, line in enumerate(f):\n for k, c in enumerate(line):\n if c == \"!\" and k == 0:\n skip_rows += 1\n break\n elif c == \"O\":\n grid[j - skip_rows, k] = 1\n\n return grid", "def _get_file_positions(self,filename):\n if os.path.exists(self._ahfBasename + 'fpos'):\n f = util.open_(self._ahfBasename + 'fpos')\n for i in range(self._nhalos):\n self._halos[i+1].properties['fstart'] = int(f.readline())\n f.close()\n else:\n f = util.open_(filename)\n for h in xrange(self._nhalos):\n if len((f.readline().split())) == 1:\n f.readline()\n self._halos[h+1].properties['fstart'] = f.tell()\n for i in xrange(self._halos[h+1].properties['npart']):\n f.readline()\n f.close()", "def Read_MapGen(self, filename, stats = 0,AllLines=0):\n with open(filename,'rt') as file_:\n data = [s.strip() for s in file_]\n\n Shorelines = []\n segment = []\n for line in data:\n if line:\n if line == \"# -b\": #New segment beginning\n if segment: Shorelines.append(N.array(segment))\n segment = []\n else:\n segment.append([float(e) for e in line.split()])\n if segment: Shorelines.append(N.array(segment))\n\n if stats:\n NumSegments = len(Shorelines)\n NumPoints = 0\n for segment in Shorelines:\n NumPoints = NumPoints + len(segment)\n AvgPoints = NumPoints / NumSegments\n print(\"Number of Segments: \", NumSegments)\n print(\"Average Number of Points per segment: \", AvgPoints)\n if AllLines:\n Lines = []\n for segment in Shorelines:\n Lines.append(segment[0])\n for point in segment[1:-1]:\n Lines.append(point)\n Lines.append(point)\n Lines.append(segment[-1])\n return Lines\n else:\n return Shorelines", "def read_file(self,filename):\n\n f = open(filename,'r')\n lines = f.readlines()\n f.close()\n\n sequences = [l.strip() for l in lines if l.strip() != \"\"]\n\n self.load_sequences(sequences)", "def _read(self, file_name):\n f = open(file_name)\n lines = f.readlines()\n begin = 0\n end = 0\n while end < len(lines):\n op = ''\n for l in lines[begin:]:\n end += 1\n op = l.split()[0]\n if op in operations:\n self.operations.append(op)\n break\n if op == '=push':\n nfa = Automaton(lines[begin:end - 1])\n self.aut_to_push.append(nfa)\n begin = end\n f.close()", "def read_file(self,file_name):\r\n data = np.genfromtxt(file_name)\r\n return data;", "def readAnnotation(path, gui):\n file = open(path, 'r')\n file.readline()\n line = file.readline()\n data = {}\n counter = 0\n gui.write_to_output(\"\\n\")\n while line != '':\n counter += 1\n if counter % 10000 == 0:\n gui.write_to_output(\"Done reading \" + str(counter) + \" annotation entries\\n\", overwrite=True)\n columns = line.split()\n name = columns[-4]\n start = int(columns[4])\n end = int(columns[5])\n cds_start = int(columns[6])\n cds_end = int(columns[7])\n strand = columns[3]\n # data.append(Gene(name, reads, np.array([start, end]).astype(np.int), strand, chrm, np.array([cds_start, cds_end])))\n line = file.readline()\n if name in data.keys():\n data[name] = np.vstack((data[name], np.array([start, end, cds_start, cds_end])))\n else:\n data[name] = np.array([[start, end, cds_start, cds_end]])\n # return list(sorted(data, key=lambda x: x.getName()))\n gui.write_to_output(\"Done reading \" + str(counter) + \" annotation entries\\n\", overwrite=True)\n return data", "def parse(self, filehandle):\n l = filehandle.readline()\n if l.split()[0] != '##maf':\n return\n else:\n self.setpar(l.split()[1:])\n\n l=filehandle.readline()\n while l:\n la = l.split()\n## print la\n if(len(la)==0 or la[0]=='#'):\n## print \"skipping\"\n 1\n elif(la[0]=='a'):\n## print \"reading alignment\"\n self.readalign(la[1:], filehandle)\n else:\n## print \"end of records\"\n return\n\n l=filehandle.readline()", "def read_file(infile_name):\n chr_list = [0]*13 \n for i in range(len(chr_list)):\n chr_list[i] = [] \n infile = open(infile_name)\n for line in infile:\n if line.startswith('SL2.40'):\n info = line.strip().split()\n chr = int(info[0][-2:])\n chr_list[chr].append(map(int,info[1:3])+[[info[-1]]])\n else:\n pass\n infile.close()\n return chr_list", "def read_file_agsm(self,filename):\n\n narr,larr,farr,iarr,nn,exceed_freqlim = \\\n aims_fortran.read_file_agsm(filename,config.npositive,config.agsm_cutoff, \\\n config.cutoff*self.cutoff)\n self.modes = np.array(zip(narr[0:nn],larr[0:nn],farr[0:nn],iarr[0:nn]),dtype=modetype)\n\n return exceed_freqlim", "def read_from(self, filename):\n self.x, self.y = np.loadtxt(filename, unpack=True, usecols=(0, 1))", "def read_log_gps(ac_id, filename):\n f = open(filename, 'r')\n pattern = re.compile(\"(\\S+) \"+ac_id+\" GPS (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+) (\\S+)\")\n list_meas = []\n while True:\n line = f.readline().strip()\n if line == '':\n break\n m = re.match(pattern, line)\n if m:\n list_meas.append([float(m.group(1)), float(m.group(2)), float(m.group(3)), float(m.group(4)), float(m.group(5)), float(m.group(6)), \n float(m.group(7)), float(m.group(8)), float(m.group(9)), float(m.group(10)), float(m.group(11)),float(m.group(12))])\n return np.array(list_meas)", "def __read_pond_file(self, pondfile):\r\n self.currents = []\r\n with open(pondfile, 'r') as infile:\r\n reader = csv.reader(infile)\r\n start_end = [int(v) for v in next(reader)]\r\n self.start_state = tuple(start_end[:2])\r\n self.end_state = tuple(start_end[2:])\r\n for row in reader:\r\n self.currents.append(row)\r\n self.currents = self.currents[::-1]", "def getCoords(file):\n global demag\n name = file.split('.')[0]\n name = name.split('_')\n x = int(name[2])//demag\n y = int(name[3])//demag\n return(int(x),int(y))", "def read_from_grid(filename):\n\n x=[]\n y=[]\n z=[]\n\n fid=open(filename,'r')\n\n for point in fid:\n x.append(float(point.split()[0]))\n y.append(float(point.split()[1]))\n z.append(float(point.split()[2]))\n\n fid.close()\n\n return x, y, z", "def loadNodes(self, fname):\r\n with open(fname, \"r\") as fp:\r\n\r\n # Read in the header\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"SPECGRID\":\r\n self.SPECGRID = np.array(fp.readline().split()[0:3], dtype=int)\r\n if item[0] == \"COORDSYS\":\r\n self.COORDSYS = fp.readline().split()\r\n if item[0] == \"COORD\":\r\n break\r\n\r\n # Read in the coordinates\r\n self.coords = []\r\n for line in fp:\r\n if line.split()[-1] != \"/\":\r\n item = line.split()\r\n for c in item:\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(int(cc[0])):\r\n self.coords.append(cc[-1])\r\n else:\r\n self.coords.append(c)\r\n else:\r\n if len(line.split()) > 1:\r\n item = line.split()\r\n for i in range(len(item) - 1):\r\n cc = item[i]\r\n if '*' in cc:\r\n ccc = cc.split('*')\r\n for j in range(int(ccc[0])):\r\n self.coords.append(ccc[-1])\r\n else:\r\n self.coords.append(c)\r\n break\r\n else:\r\n break\r\n\r\n # Read in ZCORN\r\n self.zcorn = []\r\n i = 0\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ZCORN\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n self.zcorn += line.split()\r\n else:\r\n self.zcorn += line.split()[0:-1]\r\n break\r\n if len(self.zcorn) > 0:\r\n break\r\n\r\n # Read in (in)active cells\r\n self.active = []\r\n for line in fp:\r\n item = line.split()\r\n if len(item) > 0:\r\n if item[0] == \"ACTNUM\":\r\n for line in fp:\r\n if line.split():\r\n if line.split()[-1] != \"/\":\r\n c = line.split()\r\n if '*' in c:\r\n cc = c.split('*')\r\n for i in range(float(cc[0])):\r\n self.active += cc[-1]\r\n else:\r\n self.active += c\r\n else:\r\n self.active += line.split()[0:-1]\r\n break\r\n\r\n self.coords = np.array(self.coords, dtype=float)\r\n print(self.coords)\r\n\r\n # In Petrel...\r\n self.ne = self.SPECGRID[0] # x i\r\n self.nn = self.SPECGRID[1] # y j\r\n self.nz = self.SPECGRID[2] # z k\r\n\r\n # build grid\r\n self.buildGrid(plot=False)\r\n self.buildActiveCells(plot=False)\r\n self.buildZGrid(plot=False)\r\n # self.calculateVolumes(plot=False)\r\n #\r\n # Convert to VTK\r\n self.GridType = \"vtkStructuredGrid\"\r\n self.Grid = vtk.vtkStructuredGrid()\r\n self.Grid.SetDimensions(self.ne+1, self.nn+1, self.nz+1)\r\n vtk_points = vtk.vtkPoints()\r\n ve = 1.\r\n\r\n for iz in range(self.nz):\r\n if iz == 0:\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZT[iz][ix,iy] )\r\n for iy in range(self.nn+1):\r\n for ix in range(self.ne+1):\r\n vtk_points.InsertNextPoint( self.X0[ix,iy], \\\r\n self.Y0[ix,iy], \\\r\n ve * self.ZZB[iz][ix,iy] )\r\n self.Grid.SetPoints(vtk_points)\r\n\r\n # Add in active cells\r\n ac = vtk.vtkIntArray()\r\n ac.SetName( \"ActiveCells\" )\r\n for iac in self.ActiveCells.flatten( order='F' ):\r\n ac.InsertNextTuple1( iac )\r\n self.Grid.GetCellData().AddArray(ac)", "def read_data(path):\n fnames = sorted([f for f in glob.glob(os.path.join(path, 'pos', '*.txt'))])\n data = [(1, open(f).readlines()[0]) for f in sorted(fnames)]\n fnames = sorted([f for f in glob.glob(os.path.join(path, 'neg', '*.txt'))])\n data += [(0, open(f).readlines()[0]) for f in sorted(fnames)]\n data = sorted(data, key=lambda x: x[1])\n return np.array([d[1] for d in data]), np.array([d[0] for d in data])", "def read_data(path):\n fnames = sorted([f for f in glob.glob(os.path.join(path, 'pos', '*.txt'))])\n data = [(1, open(f).readlines()[0]) for f in sorted(fnames)]\n fnames = sorted([f for f in glob.glob(os.path.join(path, 'neg', '*.txt'))])\n data += [(0, open(f).readlines()[0]) for f in sorted(fnames)]\n data = sorted(data, key=lambda x: x[1])\n return np.array([d[1] for d in data]), np.array([d[0] for d in data])", "def read_data(filename):\n f = open(filename, \"r\")\n line = f.readline()\n t, n, m, s, population = line.split()\n line = f.readline()\n board = []\n paths = []\n i = 0\n while line:\n if i < int(n):\n board.append([int(x) for x in line if x != '\\n'])\n else:\n paths.append(line if '\\n' not in line else line[:len(line) - 2])\n line = f.readline()\n i += 1\n return int(t), int(n), int(m), int(s), int(population), paths, np.array(board)", "def read(self, filename):\n with RavenFileReader(filename) as f:\n line = f.nexttag()\n while line:\n # Begin data type checks\n if self.cleantag(line) == 'Gauge':\n self.read_metgauge(line, f)\n elif self.cleantag(line) == 'ObservationData':\n self.read_obsgauge(line, f)\n # Next line\n line = f.nexttag()", "def Read_Points_From_File(self, fileName):\n\n try:\n fp = open(fileName, 'r')\n\n origSys = pyproj.Proj(init=\"epsg:4326\")\n newSys = pyproj.Proj(init=\"epsg:2436\")\n\n for line, content in enumerate(fp):\n if line > 5:\n lineField = content.replace('\\n', '').split(',')\n lat = float(lineField[0])\n lon = float(lineField[1])\n #DEBUG ONLY\n #print 'lat: %f; lon: %f' % (lat, lon)\n\n x, y = pyproj.transform(origSys, newSys, lon, lat)\n # DEBUG ONLY\n #print 'x: %f; y: %f' % (x, y)\n alt = float(lineField[3])\n date = lineField[5]\n time = lineField[6]\n temp = (x, y, alt, date, time)\n self.__traectory_list.append(temp)\n\n print 'Complete Reading Trajectories.'\n\n fp.close()\n # Catch the error if the Input/Output related error found\n except IOError:\n print 'The file could not be read!'\n self.__traectory_list = []", "def readAD(self):\n\n fname = self.ad_file\n print \"reading ad file \", fname, \" curdir = \", os.getcwd()\n try:\n fh = open(fname,'r')\n self.lines_ad = fh.readlines()\n fh.close()\n except:\n sys.stdout.write (\"Error opening {:}\\n\".format(fname))\n return 0\n\n for i in range(len(self.lines_ad)):\n ln = self.lines_ad[i].split() \n if (len(ln) >1):\n if (ln[1] == \"NumFoil\"):\n self.nSeg = int(ln[0])\n break\n if (ln[1] == \"WindFile\" and self.wind_file == None):\n self.wind_file = ln[0][1:-1]\n self.af_dict = {}\n self.af_dict['polar_idx'] = [0]*self.nSeg\n self.af_dict['polar_files'] = [0]*self.nSeg\n print \"ln, nSeg, i\", ln, self.nSeg, i\n for j in range(self.nSeg):\n lnidx = i+1+j\n ln = self.lines_ad[lnidx].split()\n afpath = fix_path(ln[0].strip().strip(\"\\\"\").strip(\"\\'\"))\n ln[0] = \"\\\"%s\\\"\" % afpath\n self.lines_ad[lnidx] = unsplit(ln)\n self.af_dict['polar_idx'][j] = j+1\n self.af_dict['polar_files'][j] = afpath", "def readAnnotations(f):\n lbf = \"../labels/\" + f[: f.rfind('.')] + \".txt\"\n b = []\n with open(lbf, \"r\") as fh:\n for l in fh:\n p = l.strip().split()\n b.append( (p[0], int(p[1]), int(p[2]), int(p[3]), int(p[4])) )\n\n return b", "def read_traj(ncfiles,indkeep=0):\n\n data = nc.Dataset(ncfiles)\n \n xyz = data.variables['coordinates']\n \n xyzn = Quantity(xyz[indkeep:-1], angstroms) \n \n lens = data.variables['cell_lengths']\n lensn = Quantity(lens[indkeep:-1], angstroms)\n\n angs = data.variables['cell_angles']\n angsn = Quantity(angs[indkeep:-1], degrees)\n\n return data, xyzn, lensn, angsn", "def read_input_pizza(filename):\n lines = open(filename).readlines()\n M, N = [int(val) for val in lines[0].split()]\n available = np.array([int(n) for n in lines[1].split()])\n return M, N, available" ]
[ "0.6724647", "0.6426691", "0.6014739", "0.59488297", "0.59237134", "0.5912095", "0.5865451", "0.5829912", "0.58188653", "0.58121365", "0.58050466", "0.5768979", "0.5765536", "0.5717999", "0.5706781", "0.56984544", "0.5671126", "0.565585", "0.5651771", "0.56221503", "0.5617779", "0.5608225", "0.5600888", "0.55948097", "0.55832714", "0.55720544", "0.5571854", "0.5564828", "0.5542798", "0.5541321", "0.5535555", "0.5534671", "0.5517541", "0.5515015", "0.55034524", "0.5500143", "0.5486868", "0.54722106", "0.54712576", "0.54706144", "0.54687464", "0.54648775", "0.54634327", "0.546229", "0.5454354", "0.5448213", "0.5444187", "0.54419833", "0.54355097", "0.5429446", "0.54260767", "0.54174066", "0.5416365", "0.54151446", "0.5412994", "0.54129606", "0.53984344", "0.5390193", "0.53880847", "0.53822", "0.5382193", "0.5375133", "0.53662694", "0.536491", "0.53635997", "0.53379655", "0.53379244", "0.53351474", "0.5325175", "0.5323847", "0.53220016", "0.5318742", "0.53179455", "0.531576", "0.5313315", "0.5312088", "0.5311794", "0.5311327", "0.53075147", "0.5297576", "0.5292595", "0.5286155", "0.52801764", "0.5268758", "0.52630156", "0.5261112", "0.5257043", "0.52536386", "0.52513856", "0.52510834", "0.5241059", "0.5235775", "0.5235775", "0.5235338", "0.5234959", "0.5228441", "0.52272725", "0.52248967", "0.5219232", "0.52192175" ]
0.56716186
16
The rotation of the earth over the observation times makes changes the part of the sky measured by each antenna.
def earth_rotation_effect(Nbase, slice_num, int_time, declination=30.): p = np.pi/180. delta = p*declination k = slice_num HA =-15.0*p*(k-1)*int_time/(3600.0) - np.pi/180.0*90.0 + np.pi/180.0*360.0 new_Nbase = np.zeros(Nbase.shape) new_Nbase[:,0] = np.sin(HA)*Nbase[:,0] + np.cos(HA)*Nbase[:,1] new_Nbase[:,1] = -1.0*np.sin(delta)*np.cos(HA)*Nbase[:,0] + np.sin(delta)*np.sin(HA)*Nbase[:,1] + np.cos(delta)*Nbase[:,2] new_Nbase[:,2] = np.cos(delta)*np.cos(HA)*Nbase[:,0] - np.cos(delta)*np.sin(HA)*Nbase[:,1] + np.sin(delta)*Nbase[:,2] return new_Nbase
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def rotate_orbit(self):\n try:\n ang = self.orbit_speed * self.time_scale / self.refresh_rate\n self.obj.rotate(angle=ang, axis=vector(0, 1, 0), origin=self.star.obj.pos)\n self.sum_ang += ang\n except ZeroDivisionError:\n print(\"ERROR: REFRESH_RATE is 0\")\n except (AttributeError, TypeError):\n print(\"ERROR: wrong arguments type while initializing!!\")", "def test_calc_vector_rotation(time_location, moon_time_location, telescope_frame):\n if telescope_frame == \"itrs\":\n time, telescope_location = time_location\n else:\n time, telescope_location = moon_time_location\n\n source = SkyModel(\n name=\"Test\",\n ra=Longitude(12.0 * units.hr),\n dec=Latitude(-30.0 * units.deg),\n frame=\"icrs\",\n stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,\n spectral_type=\"flat\",\n )\n source.update_positions(time, telescope_location)\n\n coherency_rotation = np.squeeze(source._calc_coherency_rotation())\n\n assert np.isclose(np.linalg.det(coherency_rotation), 1)", "def test_calc_basis_rotation_matrix(time_location, moon_time_location, telescope_frame):\n\n if telescope_frame == \"itrs\":\n time, telescope_location = time_location\n else:\n time, telescope_location = moon_time_location\n\n source = SkyModel(\n name=\"Test\",\n skycoord=SkyCoord(\n Longitude(12.0 * units.hr), Latitude(-30.0 * units.deg), frame=\"icrs\"\n ),\n stokes=[1.0, 0.0, 0.0, 0.0] * units.Jy,\n spectral_type=\"flat\",\n )\n source.update_positions(time, telescope_location)\n\n basis_rot_matrix = source._calc_average_rotation_matrix()\n\n assert np.allclose(np.matmul(basis_rot_matrix, basis_rot_matrix.T), np.eye(3))\n assert np.allclose(np.matmul(basis_rot_matrix.T, basis_rot_matrix), np.eye(3))", "def test_pol_rotator(time_location, spectral_type, unpolarized, below_horizon):\n time, telescope_location = time_location\n\n Nsrcs = 50\n ras = Longitude(np.linspace(0, 24, Nsrcs) * units.hr)\n decs = Latitude(np.linspace(-90, 90, Nsrcs) * units.deg)\n names = np.arange(Nsrcs).astype(\"str\")\n if unpolarized:\n fluxes = np.array([[[1.0, 0.0, 0.0, 0.0]]] * Nsrcs).T * units.Jy\n else:\n fluxes = np.array([[[5.5, 0.7, 0.3, 0.0]]] * Nsrcs).T * units.Jy\n\n # Make the last source non-polarized\n fluxes[..., -1] = [[1.0], [0], [0], [0]] * units.Jy\n\n extra = {}\n # Add frequencies if \"full\" freq:\n if spectral_type == \"full\":\n Nfreqs = 10\n freq_array = np.linspace(100e6, 110e6, Nfreqs) * units.Hz\n fluxes = fluxes.repeat(Nfreqs, axis=1)\n extra = {\"freq_array\": freq_array}\n\n assert isinstance(fluxes, Quantity)\n source = SkyModel(\n name=names,\n ra=ras,\n dec=decs,\n frame=\"icrs\",\n stokes=fluxes,\n spectral_type=spectral_type,\n **extra,\n )\n\n if unpolarized:\n assert source._n_polarized == 0\n else:\n assert source._n_polarized == Nsrcs - 1\n\n source.update_positions(time, telescope_location)\n\n # Check the default of inds for _calc_rotation_matrix()\n rots1 = source._calc_rotation_matrix()\n inds = np.array([25, 45, 16])\n rots2 = source._calc_rotation_matrix(inds)\n assert np.allclose(rots1[..., inds], rots2)\n\n # Unset the horizon mask and confirm that all rotation matrices are calculated.\n if below_horizon:\n source.above_horizon = np.full(source.Ncomponents, False, dtype=bool)\n warn_msg = \"\"\n warn_type = None\n else:\n source.above_horizon = None\n warn_msg = \"Horizon cutoff undefined\"\n warn_type = UserWarning\n\n with uvtest.check_warnings(warn_type, match=warn_msg):\n local_coherency = source.coherency_calc()\n\n if below_horizon:\n assert local_coherency.size == 0\n else:\n assert local_coherency.unit == units.Jy\n # Check that all polarized sources are rotated.\n if unpolarized:\n assert units.quantity.allclose(local_coherency, source.frame_coherency)\n else:\n assert not np.all(\n units.quantity.isclose(\n local_coherency[..., :-1], source.frame_coherency[..., :-1]\n )\n )\n assert units.quantity.allclose(\n local_coherency[..., -1], source.frame_coherency[..., -1]\n )", "def solar_model():\n \n latitude, longitude, timezone, elevation = location_input()\n year, time = time_input()\n\n lat_r = latitude/180*np.pi\n lon_r = longitude/180*np.pi \n n = 0\n for i in range(1900,year):\n if i%4 == 0:\n n += 366\n else:\n n+=365\n JulD = n + time + 2415018.5 - (timezone)/24\n LT = time - int(time)\n JC = (JulD - 2451545) / 36525\n x = 46.815 + JC * (0.00059 - JC * 0.001813)\n M_OE = 23 + (26 + (21.448 - JC * x) / 60) / 60\n EEO = 0.016708634 - JC * (0.000042037 + 0.0000001267 * JC)\n GMAS = 357.52911 + JC * (35999.05029 - 0.0001537 * JC)\n GMAS_r = m.radians(GMAS)\n GMLS = (280.46646 + JC * (36000.76983 + JC * 0.0003032))%360\n GMLS_r = m.radians(GMLS)\n Obliq_C = M_OE + 0.00256 * np.cos((125.04 - 1934.136 * JC) / 180 * np.pi)\n Obliq_C_r = m.radians(Obliq_C)\n SEC = np.sin(GMAS_r) * (1.914602 - JC * (0.004817 + 0.000014 * JC)) + np.sin(2 * GMAS_r) * (0.019993 - 0.000101 * JC) + np.sin(3 * GMAS_r) * 0.000289\n STL = GMLS + SEC\n SAL = STL - 0.00569 - 0.00478 * np.sin((125.04 - 1934.136 * JC) / 180 * np.pi)\n SAL_r = m.radians(SAL)\n sin_Delta = np.sin(Obliq_C_r) * np.sin(SAL_r)\n Delta_r = np.arcsin(sin_Delta) #in radians \n Var_y = np.tan((Obliq_C / 2) / 180 * np.pi) * np.tan((Obliq_C / 2) / 180 * np.pi)\n EOT_prime = Var_y * np.sin(2 * GMLS_r) - 2 * EEO * np.sin(GMAS_r) + 4 * EEO * Var_y * np.sin(GMAS_r) * np.cos(2 * GMLS_r) - 0.5 * Var_y * Var_y * np.sin(4 * GMLS_r) - 1.25 * EEO * EEO * np.sin(2 * GMAS_r)\n EOT = 4 * EOT_prime / np.pi * 180 \n TST = (LT * 1440 + EOT + 4 * longitude - 60 * timezone)%1440\n if TST / 4 < 0:\n Omega = TST/4+180\n else:\n Omega = TST/4 - 180 \n Omega_r = m.radians(Omega)\n \n cos_Zenith = np.sin(lat_r) * np.sin(Delta_r) + np.cos(lat_r) * np.cos(Delta_r) * np.cos(Omega_r)\n Zenith_r = np.arccos(cos_Zenith) #in radians\n Aprime_r = np.arccos((np.sin(lat_r) * np.cos(Zenith_r) - np.sin(Delta_r)) / (np.cos(lat_r) * np.sin(Zenith_r)))\n Aprime = Aprime_r / np.pi * 180\n if Omega > 0:\n Azimuth = (Aprime + 180) % 360 #in degrees\n else:\n Azimuth = (540 - Aprime) % 360 #in degrees \n Azimuth_r = Azimuth / 180 * np.pi\n Elev_angle = (np.pi)/2 - Zenith_r\n\n \n # calculate incidence angle\n # Beta is equal to angle of tilted surface to horizontal (in radians)\n Beta = 45 # in degrees\n Beta_r = m.radians(Beta)\n \n cos_incidence = np.sin(Delta_r)* np.sin(lat_r) * np.cos(Beta_r) - np.sin(Delta_r) * np.cos(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) + np.cos(Delta_r) * np.cos(lat_r) * np.cos(Beta_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(lat_r) * np.sin(Beta_r) * np.cos(Azimuth_r) * np.cos(Omega_r) + np.cos(Delta_r) * np.sin(Beta_r) * np.sin(Azimuth_r) * np.sin(Omega_r) \n incidence_ang_r = np.arccos(cos_incidence)\n \n return Delta_r, lat_r, Omega_r, Zenith_r, Azimuth_r, Elev_angle", "def era(self):\n # earth rotation angle using Universal Time\n J = self.MJD - 51544.5\n fraction = np.mod(J, self.turn)\n theta = np.mod(0.7790572732640 + 0.00273781191135448*J, self.turn)\n return self.turndeg*np.mod(theta + fraction, self.turn)", "def _solar_time(date, lon, lat):\n # IDL is computing time_t as hours and fractional minutes\n time_t = ee.Date(date).get('hour').add(\n ee.Date(date).get('minute').divide(60))\n\n # This will return the hour floating point value\n # time_t = ee.Date(date).get('hour').add(ee.Date(date).getFraction('hour'))\n\n # CGM - DOY and hour could be images in order to make these expressions\n julian = _to_jd(date)\n\n # Sunrise time\n julian_ = time_t.divide(24.0).add(julian)\n j_cen = julian_.add(0.5 - 2451545.0).divide(36525.0)\n # CGM - Does the mod happen before or after the multiply\n lon_sun = j_cen.multiply(0.0003032).add(36000.76983) \\\n .multiply(j_cen).mod(360.0).add(280.46646).subtract(360)\n an_sun = j_cen.multiply(-0.0001537).add(35999.05029) \\\n .multiply(j_cen).add(357.52911)\n ecc = j_cen.multiply(0.0000001267).add(0.000042037) \\\n .multiply(j_cen).multiply(-1).add(0.016708634)\n ob_ecl = j_cen.multiply(-0.001813).add(0.00059) \\\n .multiply(j_cen).add(46.815) \\\n .multiply(j_cen).multiply(-1).add(21.448) \\\n .divide(60.0).add(26).divide(60).add(23)\n ob_corr = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).cos() \\\n .multiply(0.00256).add(ob_ecl)\n var_y = ob_corr.divide(2.0).multiply(deg2rad).tan().multiply(\n ob_corr.divide(2.0).multiply(deg2rad).tan())\n eq_t = (\n lon_sun.multiply(2.0).multiply(deg2rad).sin().multiply(var_y)\n .subtract(an_sun.multiply(deg2rad).sin().multiply(ecc).multiply(2.0))\n .add(an_sun.multiply(deg2rad).sin()\n .multiply(lon_sun.multiply(2.0).multiply(deg2rad).cos())\n .multiply(var_y).multiply(ecc).multiply(4.0))\n .subtract(lon_sun.multiply(4.0).multiply(deg2rad).sin()\n .multiply(var_y).multiply(var_y).multiply(0.5))\n .subtract(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(ecc).multiply(ecc).multiply(1.25))\n .multiply(4.0).multiply(rad2deg))\n sun_eq = (\n an_sun.multiply(deg2rad).sin().multiply(\n j_cen.multiply(0.000014).add(0.004817)\n .multiply(j_cen).multiply(-1).add(1.914602))\n .add(an_sun.multiply(2.0).multiply(deg2rad).sin()\n .multiply(j_cen.multiply(-0.000101).add(0.019993)))\n .add(an_sun.multiply(3.0).multiply(deg2rad).sin().multiply(0.000289)))\n sun_true = sun_eq.add(lon_sun)\n sun_app = j_cen.multiply(-1934.136).add(125.04).multiply(deg2rad).sin() \\\n .multiply(-0.00478).subtract(0.00569).add(sun_true)\n\n # CGM - Intentionally not converting back to degrees\n d = ob_corr.multiply(deg2rad).sin() \\\n .multiply(sun_app.multiply(deg2rad).sin()) \\\n .asin()\n\n # CGM - Functions below are lat/lon dependent and can be written as\n # ee.Image expressions\n # CGM - d is still in radians, not converting\n ha_t = lat.expression(\n 'acos((cos(90.833 * pi / 180) / (cos(lat) * cos(d))) - tan(lat) * tan(d))'\n ' * (180 / pi)',\n {'lat': lat, 'd': d, 'pi': math.pi})\n\n # print('\\n{:10s} {:.12f}'.format('julian_', julian_.getInfo()))\n # print('{:10s} {:.12f}'.format('time_t', time_t.getInfo()))\n # print('{:10s} {:.12f}'.format('j_cen', j_cen.getInfo()))\n # print('{:10s} {:.12f}'.format('lon_sun', lon_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('an_sun', an_sun.getInfo()))\n # print('{:10s} {:.12f}'.format('ecc', ecc.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_ecl', ob_ecl.getInfo()))\n # print('{:10s} {:.12f}'.format('ob_corr', ob_corr.getInfo()))\n # print('{:10s} {:.12f}'.format('var_y', var_y.getInfo()))\n # print('{:10s} {:.12f}'.format('eq_t', eq_t.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_eq', sun_eq.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_true', sun_true.getInfo()))\n # print('{:10s} {:.12f}'.format('sun_app', sun_app.getInfo()))\n # print('{:10s} {:.12f}'.format('d', d.getInfo()))\n\n return d, eq_t, ha_t", "def sky(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n B = 19.95*u.kpc\n #B = 20.08*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n if old:\n old_label = '_old_up'\n observer = {'z_sun': -2000.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 50*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0,0,0]*u.km/u.s}\n \n # impact parameters\n M = 3e7*u.Msun\n B = 20.06*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 155\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.55*u.Gyr\n dt = 0.05*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh# * 0.94\n vy = np.sin(xphi) * Vh #* 0.97\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-0.5, 0.5], [-0.5, 0.5], [-25,25]]\n color = '0.35'\n ms = 4\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(5,1,figsize=(12,12), sharex=True)\n \n plt.sca(ax[0])\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n plt.scatter(g['phi1']+40, g['phi2'], s=g['pmem']*2, c=g['pmem'], cmap=mpl.cm.binary, vmin=0.5, vmax=1.1)\n \n plt.xlim(-45,45)\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n plt.ylabel('$\\phi_1$ [deg]')\n \n plt.sca(ax[1])\n plt.plot(xi.wrap_at(wangle), eta, 'o', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\phi_1$ [deg]')\n plt.ylim(-10,10)\n plt.gca().set_aspect('equal')\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n for i in range(3):\n plt.sca(ax[i+2])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n plt.plot(xi.wrap_at(wangle), veq[i]-vexp, 'o', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n\n plt.xlabel('$\\phi_2$ [deg]')\n \n plt.tight_layout()\n plt.savefig('../plots/spur_morphology_sky{}.png'.format(old_label))", "def sky_observed(seed=425, th=150, old=False):\n \n # impact parameters\n M = 3e7*u.Msun\n #M = 6e7*u.Msun\n B = 19.95*u.kpc\n V = 190*u.km/u.s\n phi = coord.Angle(0*u.deg)\n theta = coord.Angle(th*u.deg)\n Tenc = 0.01*u.Gyr\n T = 0.5*u.Gyr\n dt = 0.05*u.Myr\n rs = 0*u.pc\n \n old_label = ''\n \n if old:\n old_label = '_old'\n \n # impact parameters\n M = 5e7*u.Msun\n B = 19.8*u.kpc\n V = 210*u.km/u.s\n phi = coord.Angle(0*u.deg)\n th = 150\n theta = coord.Angle(th*u.deg)\n Tenc = 0.05*u.Gyr\n T = 2*u.Gyr\n dt = 0.1*u.Myr\n #dt = 1*u.Myr\n rs = 0*u.pc\n \n # potential parameters\n potential = 3\n Vh = 220*u.km/u.s\n q = 1*u.Unit(1)\n rhalo = 20*u.pc\n par_pot = np.array([Vh.si.value, q.value, rhalo.si.value])\n \n # setup tube\n Nstar = 1400\n wx = 30*u.kpc\n wy = 0*u.pc\n wz = 0*u.pc\n sx = 0*u.km/u.s\n \n np.random.seed(seed)\n observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': 60*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n alt_observer = {'z_sun': 27.*u.pc, 'galcen_distance': 8.3*u.kpc, 'roll': -45*u.deg, 'galcen_coord': coord.SkyCoord(ra=300*u.deg, dec=-90*u.deg, frame='icrs')}\n vobs = {'vcirc': 220*u.km/u.s, 'vlsr': [0, 0, 0]*u.km/u.s}\n wangle = 180*u.deg\n \n xphi = np.linspace(-0.3*np.pi,0.3*np.pi, Nstar)\n xphi0 = np.linspace(-0.1*np.pi, 0.1*np.pi, 1000)\n xphi1 = np.linspace(-0.28*np.pi, -0.1*np.pi, 200)\n xphi2 = np.linspace(0.1*np.pi, 0.32*np.pi, 200)\n xphi = np.concatenate([xphi1, xphi0, xphi2])\n \n xr = 20*u.kpc + np.random.randn(Nstar)*0.0*u.kpc\n x = np.sin(xphi) * xr\n y = np.cos(xphi) * xr\n z = x * 0\n vx = -np.cos(xphi) * Vh\n vy = np.sin(xphi) * Vh\n vz = vx * 0\n # closest to impact\n ienc = np.argmin(np.abs(x))\n \n # generate stream model\n potential_perturb = 1\n par_perturb = np.array([M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream = {}\n stream['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal = coord.Galactocentric(stream['x'], **observer)\n xeq = xgal.transform_to(coord.ICRS)\n veq_ = gc.vgal_to_hel(xeq, stream['v'], **vobs)\n veq = [None] * 3\n veq[0] = veq_[0].to(u.mas/u.yr)\n veq[1] = veq_[1].to(u.mas/u.yr)\n veq[2] = veq_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal_alt = coord.Galactocentric(stream['x'], **alt_observer)\n xeq_alt = xgal_alt.transform_to(coord.ICRS)\n veq_alt_ = gc.vgal_to_hel(xeq_alt, stream['v'], **vobs)\n veq_alt = [None] * 3\n veq_alt[0] = veq_alt_[0].to(u.mas/u.yr)\n veq_alt[1] = veq_alt_[1].to(u.mas/u.yr)\n veq_alt[2] = veq_alt_[2].to(u.km/u.s)\n \n # unperturbed stream\n par_perturb = np.array([0*M.si.value, 0., 0., 0.])\n x1, x2, x3, v1, v2, v3 = interact.interact(par_perturb, B.si.value, phi.rad, V.si.value, theta.rad, Tenc.si.value, T.si.value, dt.si.value, par_pot, potential, potential_perturb, x.si.value, y.si.value, z.si.value, vx.si.value, vy.si.value, vz.si.value)\n stream0 = {}\n stream0['x'] = (np.array([x1, x2, x3])*u.m).to(u.pc)\n stream0['v'] = (np.array([v1, v2, v3])*u.m/u.s).to(u.km/u.s)\n \n # sky coordinates\n xgal0 = coord.Galactocentric(stream0['x'], **observer)\n xeq0 = xgal0.transform_to(coord.ICRS)\n veq0_ = gc.vgal_to_hel(xeq0, stream0['v'], **vobs)\n veq0 = [None] * 3\n veq0[0] = veq0_[0].to(u.mas/u.yr)\n veq0[1] = veq0_[1].to(u.mas/u.yr)\n veq0[2] = veq0_[2].to(u.km/u.s)\n \n # alternative sky coordinates\n xgal0_alt = coord.Galactocentric(stream0['x'], **alt_observer)\n xeq0_alt = xgal0_alt.transform_to(coord.ICRS)\n veq0_alt_ = gc.vgal_to_hel(xeq0_alt, stream0['v'], **vobs)\n veq0_alt = [None] * 3\n veq0_alt[0] = veq0_alt_[0].to(u.mas/u.yr)\n veq0_alt[1] = veq0_alt_[1].to(u.mas/u.yr)\n veq0_alt[2] = veq0_alt_[2].to(u.km/u.s)\n \n # rotate to native coordinate system\n R = find_greatcircle(xeq0.ra.deg[::10], xeq0.dec.deg[::10])\n xi0, eta0 = myutils.rotate_angles(xeq0.ra, xeq0.dec, R)\n xi0 = coord.Angle(xi0*u.deg)\n \n # place gap at xi~0\n xioff = xi0[ienc]\n xi0 -= xioff\n \n xi, eta = myutils.rotate_angles(xeq.ra, xeq.dec, R)\n xi = coord.Angle(xi*u.deg)\n xi -= xioff\n \n # alternative observer\n R_alt = find_greatcircle(xeq0_alt.ra.deg[::10], xeq0_alt.dec.deg[::10])\n xi0_alt, eta0_alt = myutils.rotate_angles(xeq0_alt.ra, xeq0_alt.dec, R_alt)\n xi0_alt = coord.Angle(xi0_alt*u.deg)\n \n # place gap at xi~0\n xioff_alt = xi0_alt[ienc]\n xi0_alt -= xioff_alt\n \n xi_alt, eta_alt = myutils.rotate_angles(xeq_alt.ra, xeq_alt.dec, R_alt)\n xi_alt = coord.Angle(xi_alt*u.deg)\n xi_alt -= xioff_alt\n \n\n # observed gd1\n g = Table(fits.getdata('/home/ana/projects/GD1-DR2/output/gd1_members.fits'))\n \n vlabel = ['$\\mu_{\\\\alpha_\\star}$ [mas yr$^{-1}$]','$\\mu_{\\delta}$ [mas yr$^{-1}$]', '$V_r$ [km s$^{-1}$]']\n ylims = [[-1.5, 1.5], [-1.5, 1.5], [-30,30]]\n color = '0.35'\n ms = 4\n alpha = 0.7\n \n # plotting\n plt.close()\n fig, ax = plt.subplots(2,4,figsize=(17,8), sharex=True, sharey='col')\n \n plt.sca(ax[0][0])\n plt.plot(xi.wrap_at(wangle), eta, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n #plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n plt.sca(ax[1][0])\n plt.plot(xi_alt.wrap_at(wangle), eta_alt, '.', mec='none', color=color, ms=ms, label='Simulated GD-1')\n \n plt.xlabel('$\\phi_1$ [deg]')\n plt.ylabel('$\\phi_2$ [deg]')\n plt.xlim(-20,20)\n plt.ylim(-5,5)\n \n xeqs = [xeq.ra, xeq.dec, xeq.distance.to(u.kpc)]\n dv = []\n dv_alt = []\n for i in range(3):\n plt.sca(ax[0][i+1])\n \n # interpolate expected kinematics from an unperturbed stream\n vexp = np.interp(xi.wrap_at(wangle), xi0.wrap_at(wangle), veq0[i].value) * veq0[i].unit\n dv += [veq[i]-vexp]\n plt.plot(xi.wrap_at(wangle), dv[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n \n plt.sca(ax[1][i+1])\n # interpolate expected kinematics from an unperturbed stream\n vexp_alt = np.interp(xi_alt.wrap_at(wangle), xi0_alt.wrap_at(wangle), veq0_alt[i].value) * veq0_alt[i].unit\n dv_alt += [veq_alt[i]-vexp_alt]\n plt.plot(xi_alt.wrap_at(wangle), dv_alt[i], '.', mec='none', color=color, ms=ms)\n \n plt.ylabel('$\\Delta$ {}'.format(vlabel[i]))\n plt.ylim(*ylims[i])\n plt.xlabel('$\\phi_1$ [deg]')\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n np.random.seed(seed+1)\n fgaia = np.sqrt(2/5)\n print(2/5, fgaia)\n phi1 = xi[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin]\n pmra = dv[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Stream', 'Spur']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[0][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n print(np.sqrt(np.sum(g['pmra_error'][mask]**2))/np.sum(mask))\n print(np.sqrt(np.sum(g['pmdec_error'][mask]**2))/np.sum(mask))\n\n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi.wrap_at(wangle).to(u.deg).value, eta])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.0848, 0.0685])\n \n np.random.seed(seed+2)\n phi1 = xi[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta[idmin2]\n pmra = dv[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[0][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[0][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[0][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n ##############\n # alt observer\n \n # find closest model star to the gd-1 stars\n Ngd1 = len(g)\n p = np.array([g['phi1']+40, g['phi2']])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin = np.empty(Ngd1, dtype='int')\n \n for i in range(Ngd1):\n dist = np.sqrt((p[0,i]-q[0])**2 + (p[1,i]-q[1])**2)\n idmin[i] = np.argmin(dist)\n\n # mask stream, mask spur\n onstream_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>-0.2) & (g['phi2']<0.2))\n spur_mask = ((g['phi1']<-30.5) & (g['phi1']>-35.5) & (g['phi2']>1) & (g['phi2']<1.4))\n all_mask = np.ones(Ngd1, dtype='bool')\n \n # plot scaled data uncertainties on model pm drawn from a corresponding obs uncertainty\n #np.random.seed(seed+3)\n phi1 = xi_alt[idmin].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin]\n pmra = dv_alt[0][idmin] + g['pmra_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n pmdec = dv_alt[1][idmin] + g['pmdec_error']*u.mas/u.yr*np.random.randn(Ngd1) * fgaia\n \n colors = ['tab:red', 'tab:blue', '0.4']\n labels = ['Gaia DR4', '']\n \n for e, mask in enumerate([onstream_mask, spur_mask]):\n plt.sca(ax[1][0])\n plt.plot(phi1[mask], phi2[mask], 'o', color=colors[e], mec='none', alpha=alpha, label=labels[e])\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1[mask], pmra[mask].value, yerr=g['pmra_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1[mask], pmdec[mask].value, yerr=g['pmdec_error'][mask]*fgaia, fmt='o', color=colors[e], mec='none', alpha=alpha)\n \n Nfield = 2\n p2 = np.array([np.array([-32.77,-32.77])+40, [1.167,0]])\n q = np.array([xi_alt.wrap_at(wangle).to(u.deg).value, eta_alt])\n idmin2 = np.empty(Nfield, dtype='int')\n \n for i in range(Nfield):\n dist = np.sqrt((p2[0,i]-q[0])**2 + (p2[1,i]-q[1])**2)\n idmin2[i] = np.argmin(dist)\n \n pmerr = np.array([0.11, 0.08])\n \n np.random.seed(seed+6)\n phi1 = xi_alt[idmin2].wrap_at(wangle).to(u.deg).value\n phi2 = eta_alt[idmin2]\n pmra = dv_alt[0][idmin2].value + pmerr*np.random.randn(Nfield)\n pmdec = dv_alt[1][idmin2].value + pmerr*np.random.randn(Nfield)\n \n plt.sca(ax[1][0])\n plt.errorbar(phi1, phi2, color='k', fmt='o', label='HST')\n \n plt.sca(ax[1][1])\n plt.errorbar(phi1, pmra, yerr=pmerr, color='k', fmt='o')\n \n plt.sca(ax[1][2])\n plt.errorbar(phi1, pmdec, yerr=pmerr, color='k', fmt='o')\n \n \n plt.sca(ax[0][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = 60$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n\n plt.sca(ax[1][0])\n plt.text(0.1,0.85, '$\\\\theta_{roll}$ = -45$^\\circ$', fontsize='small', transform=plt.gca().transAxes)\n plt.legend(fontsize='small', loc=3, handlelength=0.2)\n \n plt.suptitle('Expected astrometric performance', fontsize='medium')\n plt.tight_layout(rect=[0,0,1,0.94])\n plt.savefig('../plots/astrometric_performance.png')", "def omega_sun_snodgrass90(lat):\n return differential_rotation(lat, 14.71, -2.39, -1.78)", "def _altaz_rotation(self, jd):\n R_lon = rot_z(- self.longitude.radians - jd.gast * TAU / 24.0)\n return einsum('ij...,jk...,kl...->il...', self.R_lat, R_lon, jd.M)", "def differential_rotation(lat, A, B, C):\n \n lat_deg = lat * np.pi/180.\n return A + B * np.sin(lat_deg)**2 + C * np.sin(lat_deg)**4", "def _make(self):\n\t\tself.scene.camera = self.camera\n\t\tself.camera.rotation_euler[0] = np.radians(np.random.randint(40, 100) +\n\t\t np.random.random())\n\t\tself.camera.rotation_euler[2] = np.radians(np.random.randint(0, 360) +\n\t\t np.random.random())\n\t\tprint([np.degrees(x) for x in self.camera.rotation_euler])", "def earth_tide(theta, lamda, gtime):\n\n global dsz, dcz, dsl, dcl, ssz, scz, ssl, scl, dpar, sdist # bpos common block\n global h, k, l # love common block\n h = [0.6114, 0.2891, 0.175]\n k = [0.304, 0.09421, 0.043]\n l = [0.0832, 0.0145, 0.0103]\n\n global azt, azs # azimut common block\n global etmut # tdiff common block\n global moon # sunny common block\n moon = 0\n # hardwire these - you can only send it ONE droptime\n deltat = 1\n NPT = 1\n\n temp_time = num2date(gtime)\n\n YY = temp_time.year\n MO = temp_time.month\n DD = temp_time.day\n HH = temp_time.hour\n MM = temp_time.minute\n SS = temp_time.second\n # Initialize variables\n irl = 1\n iflag = 0\n ntotl = 1\n iget = [0, 0, 0, 0, 0, 0, 0] # ' !!!\n ispc = [0, 0, 0, 0] # ' !!!\n ntw = [1, 0, 0] # ' !!!\n ioptn = 't'\n ielement = 0\n # \tdata statements for input and output unit numbers (on terminal I/O)\n inun = 5\n ioun = 6\n nptpb = 6\n\n yr1 = YY - 1900\n day1 = date2num(datetime(YY, MO, DD))\n # \tfind times in hours from 0 hr, 1 jan 1900\n # matlab:\n ts = (\n SS / 3600\n + MM / 60\n + HH\n + 24 * (day1 - 1)\n + 8760 * yr1\n + 24 * np.fix((yr1 - 1) / 4)\n )\n # python:\n dj = date_to_julian_day(datetime(YY, MO, DD))\n djref = date_to_julian_day(datetime(1899, 12, 31, 0, 0, 0))\n delta_dj = (\n dj - djref\n ) # difference in days from current date (0hr) to 0hr, 1 jan 1900\n delta_djhr = float(delta_dj) * 24.0 + HH - 12.0 + MM / 60.0 + SS / 3600.0\n te = ts + (NPT - 1) * deltat / 3600\n d = deltat / 3600\n # terms=(te-ts)/d + 1\n terms = NPT\n\n # done asking questions - begin execution\n i = 1\n tt = ts\n sph(theta, lamda, 0)\n etmut = 41.184 + yr1 - 70\n # matlab:\n # t = (tt+12 + (etmut/3600))/876600\n t = (delta_djhr + etmut / 3600) / 876600\n # t is ephemeris time in julian centuries from 12 hr 0 jan 1900\n ephem(t)\n\n # calculate normalized gravity tides\n [grav, tilt, strain, gdc] = elastd(ntw)\n\n gravtide = 1.0e5 * grav\n # convert m/s² to mgal: 1m/s² = 100 gal = 100 000 mgal\n\n iflag = 1\n\n iterms = np.fix(terms)\n i = 1\n return gravtide", "def galaxy():\n rot_ang = 1\n pol_ang = 1\n\n\n time_array = [datetime.datetime(2017, 5, 25, 2, 0),\n datetime.datetime(2017, 5, 26, 7, 0),\n #~ datetime.datetime(2017, 5, 28, 1, 0),\n #~ datetime.datetime(2017, 5, 30, 8, 0),\n datetime.datetime(2017, 6, 4, 2, 0)]\n\n lfdic = {1:{'name':'LI', 'lat':[26,33,19.676], 'long':[97,26,31.174], 't_offset':6.496132851851852},\n 2:{'name':'LII', 'lat':[34,4,43.497], 'long':[107,37,5.819], 't_offset':7.174552203703703},\n 3:{'name':'LIII', 'lat':[38,25,59.0], 'long':[79,50,23.0], 't_offset':5.322648148148148},\n 4:{'name':'LIV', 'lat':[34,12,3.0], 'long':[118,10,18.0], 't_offset':7.87811111111111}}\n lfs = lfdic[4]\n long_radians = (lfs['long'][0] + lfs['long'][1]/60.0 + lfs['long'][2]/3600.0)*np.pi/180.0\n\n LoFASM = station(lfs['name'],lfs['lat'],lfs['long'],FOV_color='b',\n time='',frequency=20.0,one_ring='inner',\n rot_angle=rot_ang,pol_angle=pol_ang)\n innerNS_FOV = 0.61975795698554226 #LoFASM.lofasm.Omega()\n inner_conversion_NS = np.divide((np.power(np.divide(3.0*1.0e8,45.0e6),2)),(innerNS_FOV))\n\n print('Stage 1/2 Done.')\n\n powe = np.multiply(LoFASM.calculate_gpowervslstarray(time_array),inner_conversion_NS)\n power = 10*np.log10(np.array(powe))\n print('Stage 2/2 Done.')\n\n return power", "def _orientation(location, time='now'):\n obstime = parse_time(time)\n\n # Define the frame where its Z axis is aligned with local zenith\n local_frame = AltAz(obstime=obstime, location=location)\n\n return _sun_north_angle_to_z(local_frame)", "def vect_from_lspe_to_earth (self, vector, time):\n\n position_coord = self.lspe_coordinates (time)\n # The following code has been optimized:\n # position_vector = coord_to_pointing (position_coord)\n # angle = np.arccos (np.dot (self.spin_axis_lspe (time), position_vector))\n # and is therefore a one-line assignment: \n angle = np.pi * 0.5 - position_coord[0]\n rot_axis = np.array ([-np.sin (position_coord[1]),\n np.cos (position_coord[1]),\n 0])\n\n return rotate (vector, angle, rot_axis)", "def spin_axis_earth (self, time):\n\n return self.vect_from_lspe_to_earth (self.spin_axis_lspe (time),\n time)", "def solar_azimuth(self, dateandtime, latitude, longitude):\n \n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n \n zone = -dateandtime.utcoffset().seconds / 3600.0\n utc_datetime = dateandtime.astimezone(pytz.utc)\n timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)\n\n JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)\n t = self._jday_to_jcentury(JD + timenow / 24.0)\n theta = self._sun_declination(t)\n Etime = self._eq_of_time(t)\n \n eqtime = Etime\n solarDec = theta # in degrees\n \n solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)\n trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix\n # in minutes\n \n while trueSolarTime > 1440:\n trueSolarTime = trueSolarTime - 1440\n \n hourangle = trueSolarTime / 4.0 - 180.0\n # Thanks to Louis Schwarzmayr for the next line:\n if hourangle < -180:\n hourangle = hourangle + 360.0\n \n harad = radians(hourangle)\n \n csz = sin(radians(latitude)) * sin(radians(solarDec)) + \\\n cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)\n \n if csz > 1.0:\n csz = 1.0\n elif csz < -1.0:\n csz = -1.0\n \n zenith = degrees(acos(csz))\n \n azDenom = (cos(radians(latitude)) * sin(radians(zenith)))\n \n if (abs(azDenom) > 0.001):\n azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom\n \n if abs(azRad) > 1.0:\n if azRad < 0:\n azRad = -1.0\n else:\n azRad = 1.0\n \n azimuth = 180.0 - degrees(acos(azRad))\n \n if hourangle > 0.0:\n azimuth = -azimuth\n else:\n if latitude > 0.0:\n azimuth = 180.0\n else:\n azimuth = 0#\n \n if azimuth < 0.0:\n azimuth = azimuth + 360.0\n \n return azimuth", "def set_earth(inclination, phases):\n cosi, sini = np.cos(inclination), np.sin(inclination)\n cosp = np.cos(2*np.pi*phases)\n sinp = np.sin(2*np.pi*phases)\n return CartesianRepresentation(sini*cosp, -sini*sinp, cosi)", "def return_obs_RA_DEC():\n return SkyCoord('03h 32m 30s', '10d 00m 24s')", "def test_v23_to_sky():\n ra_ref = 165 # in deg\n dec_ref = 54 # in deg\n v2_ref = -503.654472 / 3600 # in deg\n v3_ref = -318.742464 / 3600 # in deg\n r0 = 37 # in deg\n\n v2 = 210 # in deg\n v3 = -75 # in deg\n expected_ra_dec = (107.12810484789563, -35.97940247128502) # in deg\n angles = [v2_ref, -v3_ref, r0, dec_ref, -ra_ref]\n axes = \"zyxyz\"\n\n rot = RotationSequence3D(angles, axes_order=axes)\n v2s = SphericalToCartesian() | rot | CartesianToSpherical()\n radec = v2s(v2, v3)\n assert_allclose(radec, expected_ra_dec, atol=1e-10)", "def solar_elevation(self, dateandtime, latitude, longitude):\n \n if latitude > 89.8:\n latitude = 89.8\n \n if latitude < -89.8:\n latitude = -89.8\n\n zone = -dateandtime.utcoffset().seconds / 3600.0\n utc_datetime = dateandtime.astimezone(pytz.utc)\n timenow = utc_datetime.hour + (utc_datetime.minute / 60.0) + (utc_datetime.second / 3600)\n \n JD = self._julianday(dateandtime.day, dateandtime.month, dateandtime.year)\n t = self._jday_to_jcentury(JD + timenow / 24.0)\n theta = self._sun_declination(t)\n Etime = self._eq_of_time(t)\n \n eqtime = Etime\n solarDec = theta # in degrees\n \n solarTimeFix = eqtime - (4.0 * longitude) + (60 * zone)\n trueSolarTime = dateandtime.hour * 60.0 + dateandtime.minute + dateandtime.second / 60.0 + solarTimeFix\n # in minutes\n \n while trueSolarTime > 1440:\n trueSolarTime = trueSolarTime - 1440\n \n hourangle = trueSolarTime / 4.0 - 180.0\n # Thanks to Louis Schwarzmayr for the next line:\n if hourangle < -180:\n hourangle = hourangle + 360.0\n \n harad = radians(hourangle)\n \n csz = sin(radians(latitude)) * sin(radians(solarDec)) + \\\n cos(radians(latitude)) * cos(radians(solarDec)) * cos(harad)\n \n if csz > 1.0:\n csz = 1.0\n elif csz < -1.0:\n csz = -1.0\n \n zenith = degrees(acos(csz))\n \n azDenom = (cos(radians(latitude)) * sin(radians(zenith)))\n \n if (abs(azDenom) > 0.001):\n azRad = ((sin(radians(latitude)) * cos(radians(zenith))) - sin(radians(solarDec))) / azDenom\n \n if abs(azRad) > 1.0:\n if azRad < 0:\n azRad = -1.0\n else:\n azRad = 1.0\n \n azimuth = 180.0 - degrees(acos(azRad))\n \n if hourangle > 0.0:\n azimuth = -azimuth\n else:\n if latitude > 0.0:\n azimuth = 180.0\n else:\n azimuth = 0\n \n if azimuth < 0.0:\n azimuth = azimuth + 360.0\n \n exoatmElevation = 90.0 - zenith\n\n if exoatmElevation > 85.0:\n refractionCorrection = 0.0\n else:\n te = tan(radians(exoatmElevation))\n if exoatmElevation > 5.0:\n refractionCorrection = 58.1 / te - 0.07 / (te * te * te) + 0.000086 / (te * te * te * te * te)\n elif exoatmElevation > -0.575:\n step1 = (-12.79 + exoatmElevation * 0.711)\n step2 = (103.4 + exoatmElevation * (step1))\n step3 = (-518.2 + exoatmElevation * (step2))\n refractionCorrection = 1735.0 + exoatmElevation * (step3)\n else:\n refractionCorrection = -20.774 / te\n \n refractionCorrection = refractionCorrection / 3600.0\n \n solarzen = zenith - refractionCorrection\n \n solarelevation = 90.0 - solarzen\n \n return solarelevation", "def getRotationTrajectory(self) -> SO3Trajectory:\n return SO3Trajectory(self.times,[m[:9] for m in self.milestones])", "def rotationDetermination(self):\n \n for index, row in enumerate(self.magdata):\n if index > 11 and index < (len(self.magdata) - 12):\n br1 = [row[0] for row in self.magdata[(index-12):(index-2)]]\n bt1 = [row[1] for row in self.magdata[(index-12):(index-2)]]\n bn1 = [row[2] for row in self.magdata[(index-12):(index-2)]]\n b1 = np.matrix((np.mean(br1), np.mean(bt1), np.mean(bn1)))\n\n br2 = [row[0] for row in self.magdata[(index+2):(index+12)]]\n bt2 = [row[1] for row in self.magdata[(index+2):(index+12)]]\n bn2 = [row[2] for row in self.magdata[(index+2):(index+12)]]\n b2 = np.matrix((np.mean(br2), np.mean(bt2), np.mean(bn2)))\n\n theta = np.arccos(np.dot(b1,b2.T)/(np.linalg.norm(b1)*np.linalg.norm(b2)))*180/np.pi\n\n self.detections.rotations.append(theta[0,0])\n self.detections.rotationTimeTags.append(self.timestamps[index])\n \n\n## self.b1 = b1\n## self.b2 = b2\n self.detections.rotationBoundary=[]\n if len(self.detections.rotations) != 0:\n \n for index, theta in enumerate(self.detections.rotations):\n if index > 0:\n if theta > 30 and self.detections.rotations[index-1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])\n if index < len(self.detections.rotations)-1:\n if theta > 30 and self.detections.rotations[index+1] < 30:\n self.detections.rotationBoundary.append(self.detections.rotationTimeTags[index])", "def ROT(self):\n # The maximum update amount for these element\n # no dt since YawRate is already mult by dt\n YawRate_DELTA = (self.YawRate_LIMITS[1] -\n self.YawRate_LIMITS[0]) / (2.0)\n\n # Add either positive or negative or zero delta for each\n # NOTE: 'High' is open bracket ) so the max is 1\n YawRate_DIRECTION = np.random.randint(-1, 2, 1)[0]\n\n # Now, modify modifiable params AND CLIP\n self.YawRate += YawRate_DIRECTION * YawRate_DELTA\n self.YawRate = np.clip(self.YawRate, self.YawRate_LIMITS[0],\n self.YawRate_LIMITS[1])", "def update(self, time):\n\n delta_J2000 = self.time - constant.J2000_DATE\n n_days_J2000 = delta_J2000.days + delta_J2000.seconds/86400\n\n mean_lon_sun = 280.460 + 0.9856474*n_days_J2000\n mean_lon_sun %= 360.0\n mean_lon_sun *= constant.DEG_TO_RAD\n\n mean_anomaly_sun = 357.528 + 0.9856003*n_days_J2000\n mean_anomaly_sun %= 360.0\n mean_anomaly_sun *= constant.DEG_TO_RAD\n\n ecliptic_lon_sun = ( mean_lon_sun/constant.DEG_TO_RAD +\n 1.915*math.sin(mean_anomaly_sun) +\n 0.020*math.sin(2.0*mean_anomaly_sun) )\n ecliptic_lon_sun *= constant.DEG_TO_RAD\n\n dist_earth_to_sun = (1.00014 -\n 0.01671*math.cos(mean_anomaly_sun) -\n 0.00014*math.cos(2.0*mean_anomaly_sun) )\n dist_earth_to_sun *= constant.AU_TO_KM\n\n obliquity_ecliptic = 23.439 - 0.0000004*n_days_J2000\n obliquity_ecliptic *= constant.DEG_TO_RAD\n\n x_J2000_sun = math.cos(ecliptic_lon_sun)\n y_J2000_sun = math.cos(obliquity_ecliptic)*math.sin(ecliptic_lon_sun)\n z_J2000_sun = math.sin(obliquity_ecliptic)*math.sin(ecliptic_lon_sun)\n\n self.direction = vt.Vector([x_J2000_sun, y_J2000_sun, z_J2000_sun])\n self.distance = dist_earth_to_sun\n self.time = time", "def equation_of_time(cls, date):\n offset = cls.sine(cls.mean_position(date, cls.ANOMALISTIC_YEAR))\n equation_sun = (offset * angle(57, 18, 0) * (14/360 - (abs(offset) / 1080)))\n return ((cls.daily_motion(date) / 360) * (equation_sun / 360) * cls.SIDEREAL_YEAR)", "def get_rot_dt(self) -> WAQuaternion:\n pass", "def timerCallback(self,evprent):\n self._odom_list.waitForTransform('map', 'base_footprint', rospy.Time(0), rospy.Duration(1.0))\n (position, orientation) = self._odom_list.lookupTransform('map','base_footprint', rospy.Time(0)) #finds the position and oriention of two objects relative to each other (hint: this returns arrays, while Pose uses lists)\n self._current.position.x = position[0]\n self._current.position.y = position[1]\n\n self._current.orientation.x = orientation[0]\n self._current.orientation.y = orientation[1]\n self._current.orientation.z = orientation[2]\n self._current.orientation.w = orientation[3]\n q = [self._current.orientation.x,\n self._current.orientation.y,\n self._current.orientation.z,\n self._current.orientation.w] # quaternion nonsense\n\n (roll, pitch, yaw) = euler_from_quaternion(q)", "def pointing_dir_earth (self, time):\n\n return self.vect_from_lspe_to_earth (self.pointing_dir_lspe (time),\n time)", "def test_get_egovehicle_yaw_cam() -> None:\n sample_log_dir = _TEST_DATA_ROOT / \"sensor_dataset_logs\" / \"test_log\"\n\n # clockwise around the top of the car, in degrees.\n expected_ego_yaw_cam_deg_dict = {\n \"ring_rear_left\": 153.2,\n \"ring_side_left\": 99.4,\n \"ring_front_left\": 44.7,\n \"ring_front_center\": 0.4,\n \"ring_front_right\": -44.9,\n \"ring_side_right\": -98.9,\n \"ring_rear_right\": -152.9,\n }\n\n for cam_enum in list(RingCameras):\n cam_name = cam_enum.value\n pinhole_camera = PinholeCamera.from_feather(\n log_dir=sample_log_dir, cam_name=cam_name\n )\n\n ego_yaw_cam_deg = np.rad2deg(pinhole_camera.egovehicle_yaw_cam_rad)\n assert np.isclose(\n ego_yaw_cam_deg, expected_ego_yaw_cam_deg_dict[cam_name], atol=0.1\n )\n\n np.rad2deg(pinhole_camera.fov_theta_rad)", "def Rotation_EQD_HOR(time, observer):\n sinlat = math.sin(math.radians(observer.latitude))\n coslat = math.cos(math.radians(observer.latitude))\n sinlon = math.sin(math.radians(observer.longitude))\n coslon = math.cos(math.radians(observer.longitude))\n uze = [coslat * coslon, coslat * sinlon, sinlat]\n une = [-sinlat * coslon, -sinlat * sinlon, coslat]\n uwe = [sinlon, -coslon, 0.0]\n spin_angle = -15.0 * _sidereal_time(time)\n uz = _spin(spin_angle, uze)\n un = _spin(spin_angle, une)\n uw = _spin(spin_angle, uwe)\n return RotationMatrix([\n [un[0], uw[0], uz[0]],\n [un[1], uw[1], uz[1]],\n [un[2], uw[2], uz[2]],\n ])", "def rotate_ray_center(gph_data,print_results=False,print_plot=False,gph_number='No Info'):\n import numpy\n import matplotlib.pyplot as plt\n from obspy.signal.polarization import particle_motion_odr\n from obspy.signal.rotate import rotate_zne_lqt\n \n rotated_gph_data=gph_data.copy()\n time=numpy.linspace(gph_data[0].stats.starttime.second, gph_data[0].stats.endtime.second,gph_data[0].stats.npts)\n \n # Calculating angles for non-null arrays\n if ((numpy.sum(gph_data[0].data) !=0) and (numpy.sum(gph_data[1].data)!=0) and (numpy.sum(gph_data[2].data)!=0)):\n odr_azimuth, odr_incidence, odr_azi_error, odr_inc_error=particle_motion_odr(gph_data)\n\n # Correcting angles\n inclination=90-odr_incidence\n if odr_azimuth <180:\n ba=odr_azimuth+180\n else:\n ba=odr_azimuth-180\n\n # Print results for debugging\n if print_results==True:\n print('Gph_number: '+str(gph_number)+'. BA: ',str(round(ba,2)),'. Inclination: ', str(round(inclination,2)))\n \n # Signal Rotation\n rotated_gph_data[0].data, rotated_gph_data[1].data, rotated_gph_data[2].data=rotate_zne_lqt(gph_data[0].data, gph_data[1].data, gph_data[2].data, ba=ba, inc=inclination)\n else:\n print('Null gph signal identified')\n \n if print_plot==True:\n plt.figure(1,figsize=(8,5))\n \n plt.subplot(3,2,1)\n plt.title('Original data- Gph '+str(gph_number))\n plt.ylabel('Z', weight='bold')\n plt.plot(time,gph_data[0].data,lw=0.5,c='r')\n \n plt.subplot(3,2,3)\n plt.ylabel('h1', weight='bold')\n plt.plot(time,gph_data[1].data,lw=0.5,c='g')\n \n plt.subplot(3,2,5)\n plt.ylabel('h2', weight='bold')\n plt.plot(time,gph_data[2].data,lw=0.5,c='b')\n \n plt.subplot(3,2,2)\n plt.title('Rotated. BA: '+str(round(ba,2))+'. Inc: '+str(round(inclination,2)))\n plt.ylabel('L', weight='bold')\n plt.plot(time,rotated_gph_data[0].data,c='k',lw=0.5)\n \n plt.subplot(3,2,4)\n plt.ylabel('Q', weight='bold')\n plt.plot(time,rotated_gph_data[1].data,lw=0.5,c='k')\n \n plt.subplot(3,2,6)\n plt.ylabel('T', weight='bold')\n plt.plot(time,rotated_gph_data[2].data,lw=0.5,c='k')\n plt.tight_layout()\n \n plt.show()\n plt.close()\n \n return(rotated_gph_data)", "def euler_timestep_rotation(sphere_positions, sphere_rotations, new_sphere_positions, new_sphere_rotations, Oa_out, timestep):\r\n\r\n for i in range(sphere_positions.shape[0]):\r\n R0 = sphere_positions[i]\r\n O = (Oa_out[i][0] ** 2 + Oa_out[i][1] ** 2 + Oa_out[i][2] ** 2) ** 0.5\r\n\r\n ''' To rotate from basis (x,y,z) to (X,Y,Z), where x,y,z,X,Y,Z are unit vectors,\r\n you just need to multiply by the matrix\r\n ( X_x Y_x Z_x )\r\n ( X_y Y_y Z_y ),\r\n ( X_z Y_z Z_z )\r\n where X_x means the x-component of X.\r\n Our Z is Omega = o_spheres[i], so we need to make it into a complete basis.\r\n To do that we pick a unit vector different to Omega (either zhat or xhat depending on Omega)\r\n and use (Omega x zhat, Omega x (Omega x zhat), zhat) as our basis (X,Y,Z).\r\n That's it! [Only took me three days...]\r\n '''\r\n\r\n if np.array_equal(Oa_out[i], [0, 0, 0]):\r\n rot_matrix = np.identity(3)\r\n else:\r\n Otest = (abs(Oa_out[i] / O)).astype('float')\r\n perp1 = [0, 0, 1] if np.allclose(Otest, [1, 0, 0]) else [1, 0, 0]\r\n rot_matrix = np.array([np.cross(Oa_out[i], perp1) / O, np.cross(Oa_out[i], np.cross(Oa_out[i], perp1)) / O ** 2, Oa_out[i] / O]).transpose()\r\n\r\n for j in range(2):\r\n ''' rb0 is the position (\"r\") of the endpoint of the pointy rotation vector in the\r\n external (x,y,z) frame (\"b\") at the beginning of this process (\"0\") '''\r\n rb0 = sphere_rotations[i, j]\r\n\r\n ''' rbdashdash0_xyz is the position of the same endpoint in the frame of the rotating sphere (\"b''\"),\r\n\t\t\t\t\t\twhich we set to have the z-axis=Omega axis. It's in Cartesian coordinates. '''\r\n rbdashdash0_xyz = np.dot(linalg.inv(rot_matrix), (rb0 - R0))\r\n x0 = rbdashdash0_xyz[0]\r\n y0 = rbdashdash0_xyz[1]\r\n z0 = rbdashdash0_xyz[2]\r\n\r\n r0 = (x0 ** 2 + y0 ** 2 + z0 ** 2) ** 0.5\r\n t0 = np.arccos(z0 / r0)\r\n p0 = 0 if (x0 == 0 and y0 == 0) else np.arctan2(y0, x0)\r\n r = r0\r\n t = t0\r\n p = euler_timestep(p0, O, timestep)\r\n\r\n x = r * np.sin(t) * np.cos(p)\r\n y = r * np.sin(t) * np.sin(p)\r\n z = r * np.cos(t)\r\n rbdashdash_xyz = np.array([x, y, z])\r\n R = new_sphere_positions[i]\r\n rb = R + np.dot(rot_matrix, rbdashdash_xyz)\r\n new_sphere_rotations[i, j] = rb\r\n return new_sphere_rotations", "def _P(time='now'):\n obstime = parse_time(time)\n\n # Define the frame where its Z axis is aligned with geocentric north\n geocentric = PrecessedGeocentric(equinox=obstime, obstime=obstime)\n\n return _sun_north_angle_to_z(geocentric)", "def _sun_north_angle_to_z(frame):\n # Find the Sun center in HGS at the frame's observation time(s)\n sun_center_repr = SphericalRepresentation(0*u.deg, 0*u.deg, 0*u.km)\n # The representation is repeated for as many times as are in obstime prior to transformation\n sun_center = SkyCoord(sun_center_repr._apply('repeat', frame.obstime.size),\n frame=HGS, obstime=frame.obstime)\n\n # Find the Sun north in HGS at the frame's observation time(s)\n # Only a rough value of the solar radius is needed here because, after the cross product,\n # only the direction from the Sun center to the Sun north pole matters\n sun_north_repr = SphericalRepresentation(0*u.deg, 90*u.deg, 690000*u.km)\n # The representation is repeated for as many times as are in obstime prior to transformation\n sun_north = SkyCoord(sun_north_repr._apply('repeat', frame.obstime.size),\n frame=HGS, obstime=frame.obstime)\n\n # Find the Sun center and Sun north in the frame's coordinate system\n sky_normal = sun_center.transform_to(frame).data.to_cartesian()\n sun_north = sun_north.transform_to(frame).data.to_cartesian()\n\n # Use cross products to obtain the sky projections of the two vectors (rotated by 90 deg)\n sun_north_in_sky = sun_north.cross(sky_normal)\n z_in_sky = CartesianRepresentation(0, 0, 1).cross(sky_normal)\n\n # Normalize directional vectors\n sky_normal /= sky_normal.norm()\n sun_north_in_sky /= sun_north_in_sky.norm()\n z_in_sky /= z_in_sky.norm()\n\n # Calculate the signed angle between the two projected vectors\n cos_theta = sun_north_in_sky.dot(z_in_sky)\n sin_theta = sun_north_in_sky.cross(z_in_sky).dot(sky_normal)\n angle = np.arctan2(sin_theta, cos_theta).to('deg')\n\n # If there is only one time, this function's output should be scalar rather than array\n if angle.size == 1:\n angle = angle[0]\n\n return Angle(angle)", "def test_polarized_source_visibilities(time_location):\n time0, array_location = time_location\n\n ha_off = 1 / 6.0\n ha_delta = 0.1\n time_offsets = np.arange(-ha_off, ha_off + ha_delta, ha_delta)\n zero_indx = np.argmin(np.abs(time_offsets))\n # make sure we get a true zenith time\n time_offsets[zero_indx] = 0.0\n times = time0 + time_offsets * units.hr\n ntimes = times.size\n\n zenith = SkyCoord(\n alt=90.0 * units.deg,\n az=0 * units.deg,\n frame=\"altaz\",\n obstime=time0,\n location=array_location,\n )\n zenith_icrs = zenith.transform_to(\"icrs\")\n\n src_astropy = SkyCoord(\n ra=zenith_icrs.ra, dec=zenith_icrs.dec, obstime=times, location=array_location\n )\n src_astropy_altaz = src_astropy.transform_to(\"altaz\")\n assert np.isclose(src_astropy_altaz.alt.rad[zero_indx], np.pi / 2)\n\n stokes_radec = [1, -0.2, 0.3, 0.1] * units.Jy\n\n decoff = 0.0 * units.arcmin # -0.17 * units.arcsec\n raoff = 0.0 * units.arcsec\n\n source = SkyModel(\n name=\"icrs_zen\",\n ra=Longitude(zenith_icrs.ra + raoff),\n dec=Latitude(zenith_icrs.dec + decoff),\n frame=\"icrs\",\n stokes=stokes_radec,\n spectral_type=\"flat\",\n )\n\n coherency_matrix_local = np.zeros([2, 2, ntimes], dtype=\"complex128\") * units.Jy\n alts = np.zeros(ntimes)\n azs = np.zeros(ntimes)\n for ti, time in enumerate(times):\n source.update_positions(time, telescope_location=array_location)\n alt, az = source.alt_az\n assert alt == src_astropy_altaz[ti].alt.radian\n assert az == src_astropy_altaz[ti].az.radian\n alts[ti] = alt[0]\n azs[ti] = az[0]\n\n coherency_tmp = source.coherency_calc().squeeze()\n coherency_matrix_local[:, :, ti] = coherency_tmp\n\n zas = np.pi / 2.0 - alts\n Jbeam = analytic_beam_jones(zas, azs)\n coherency_instr_local = np.einsum(\n \"ab...,bc...,dc...->ad...\", Jbeam, coherency_matrix_local, np.conj(Jbeam)\n )\n\n expected_instr_local = (\n np.array(\n [\n [\n [\n 0.60572311 - 1.08420217e-19j,\n 0.60250361 + 5.42106496e-20j,\n 0.5999734 + 0.00000000e00j,\n 0.59400581 + 0.00000000e00j,\n 0.58875092 + 0.00000000e00j,\n ],\n [\n 0.14530468 + 4.99646383e-02j,\n 0.14818987 + 4.99943414e-02j,\n 0.15001773 + 5.00000000e-02j,\n 0.15342311 + 4.99773672e-02j,\n 0.15574023 + 4.99307016e-02j,\n ],\n ],\n [\n [\n 0.14530468 - 4.99646383e-02j,\n 0.14818987 - 4.99943414e-02j,\n 0.15001773 - 5.00000000e-02j,\n 0.15342311 - 4.99773672e-02j,\n 0.15574023 - 4.99307016e-02j,\n ],\n [\n 0.39342384 - 1.08420217e-19j,\n 0.39736029 + 2.71045133e-20j,\n 0.4000266 + 0.00000000e00j,\n 0.40545359 + 0.00000000e00j,\n 0.40960028 + 0.00000000e00j,\n ],\n ],\n ]\n )\n * units.Jy\n )\n\n assert units.quantity.allclose(coherency_instr_local, expected_instr_local)", "def moonlongitude(time):\n B0 = 481267.8809\n C0 = 218.3162\n # fmt: off\n A = np.array([62888.e-4, 12740.e-4, 6583.e-4, 2136.e-4, 1851.e-4, \\\n 1144.e-4, 588.e-4, 571.e-4, 533.e-4, 458.e-4, 409.e-4, \\\n 347.e-4, 304.e-4, 154.e-4, 125.e-4, 110.e-4, 107.e-4, \\\n 100.e-4, 85.e-4, 79.e-4, 68.e-4, 52.e-4, 50.e-4, 40.e-4, \\\n 40.e-4, 40.e-4, 38.e-4, 37.e-4, 28.e-4, 27.e-4, 26.e-4, \\\n 24.e-4, 23.e-4, 22.e-4, 21.e-4, 21.e-4, 21.e-4, 18.e-4, \\\n 16.e-4, 12.e-4, 11.e-4, 9.e-4, 8.e-4, 7.e-4, 7.e-4, \\\n 7.e-4, 7.e-4, 6.e-4, 6.e-4, 5.e-4, 5.e-4, 5.e-4, \\\n 4.e-4, 4.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, 3.e-4, \\\n 3.e-4, 3.e-4])\n B = np.array([477198.868, 413335.35, 890534.22, 954397.74, \\\n 35999.05, 966404.0, 63863.5, 377336.3, \\\n 1367733.1, 854535.2, 441199.8, 445267.1, \\\n 513197.9, 75870, 1443603, 489205, 1303870, \\\n 1431597, 826671, 449334, 926533, 31932, \\\n 481266, 1331734, 1844932, 133, 1781068, \\\n 541062, 1934, 918399, 1379739, 99863, \\\n 922466, 818536, 990397, 71998, 341337, \\\n 401329, 1856938, 1267871, 1920802, 858602, \\\n 1403732, 790672, 405201, 485333, 27864, \\\n 111869, 2258267, 1908795, 1745069, 509131, \\\n 39871, 12006, 958465, 381404, 349472, \\\n 1808933, 549197, 4067, 2322131.])\n C = np.array([44.963, 10.74, 145.70, 179.93, 87.53, 276.5, \\\n 124.2, 13.2, 280.7, 148.2, 47.4, 27.9, 222.5, \\\n 41, 52, 142, 246, 315, 111, 188, \\\n 323, 107, 205, 283, 56, 29, 21, \\\n 259, 145, 182, 17, 122, 163, 151, \\\n 357, 85, 16, 274, 152, 249, 186, \\\n 129, 98, 114, 50, 186, 127, 38, \\\n 156, 90, 24, 242, 223, 187, 340, \\\n 354, 337, 58, 220, 70, 191])\n # fmt: on\n RAD = 0.0174532925199433\n tempb = (B * time + C) * RAD\n amp = A * np.cos(tempb)\n moonlon = np.sum(amp)\n moonlon = (moonlon + B0 * time + C0) * RAD\n return moonlon", "def test_polarized_source_smooth_visibilities(\n time_location, moon_time_location, telescope_frame\n):\n if telescope_frame == \"itrs\":\n time0, array_location = time_location\n altaz_frame = \"altaz\"\n skycoordobj = SkyCoord\n else:\n pytest.importorskip(\"lunarsky\")\n from lunarsky import SkyCoord as LunarSkyCoord\n\n time0, array_location = moon_time_location\n altaz_frame = \"lunartopo\"\n skycoordobj = LunarSkyCoord\n\n ha_off = 1\n ha_delta = 0.01\n time_offsets = np.arange(-ha_off, ha_off + ha_delta, ha_delta)\n zero_indx = np.argmin(np.abs(time_offsets))\n # make sure we get a true zenith time\n time_offsets[zero_indx] = 0.0\n times = time0 + time_offsets * units.hr\n ntimes = times.size\n\n zenith = skycoordobj(\n alt=90.0 * units.deg,\n az=0 * units.deg,\n frame=altaz_frame,\n obstime=time0,\n location=array_location,\n )\n zenith_icrs = zenith.transform_to(\"icrs\")\n\n src_astropy = skycoordobj(\n ra=zenith_icrs.ra, dec=zenith_icrs.dec, obstime=times, location=array_location\n )\n src_astropy_altaz = src_astropy.transform_to(altaz_frame)\n assert np.isclose(src_astropy_altaz.alt.rad[zero_indx], np.pi / 2)\n\n stokes_radec = [1, -0.2, 0.3, 0.1] * units.Jy\n\n source = SkyModel(\n name=\"icrs_zen\", skycoord=zenith_icrs, stokes=stokes_radec, spectral_type=\"flat\"\n )\n\n coherency_matrix_local = np.zeros([2, 2, ntimes], dtype=\"complex128\") * units.Jy\n alts = np.zeros(ntimes)\n azs = np.zeros(ntimes)\n for ti, time in enumerate(times):\n source.update_positions(time, telescope_location=array_location)\n alt, az = source.alt_az\n assert alt == src_astropy_altaz[ti].alt.radian\n assert az == src_astropy_altaz[ti].az.radian\n alts[ti] = alt[0]\n azs[ti] = az[0]\n\n coherency_tmp = source.coherency_calc().squeeze()\n coherency_matrix_local[:, :, ti] = coherency_tmp\n\n zas = np.pi / 2.0 - alts\n Jbeam = analytic_beam_jones(zas, azs)\n coherency_instr_local = np.einsum(\n \"ab...,bc...,dc...->ad...\", Jbeam, coherency_matrix_local, np.conj(Jbeam)\n )\n\n # test that all the instrumental coherencies are smooth\n t_diff_sec = np.diff(times.jd) * 24 * 3600\n for pol_i in [0, 1]:\n for pol_j in [0, 1]:\n real_coherency = coherency_instr_local[pol_i, pol_j, :].real.value\n real_derivative = np.diff(real_coherency) / t_diff_sec\n real_derivative_diff = np.diff(real_derivative)\n assert np.max(np.abs(real_derivative_diff)) < 1e-6\n imag_coherency = coherency_instr_local[pol_i, pol_j, :].imag.value\n imag_derivative = np.diff(imag_coherency) / t_diff_sec\n imag_derivative_diff = np.diff(imag_derivative)\n assert np.max(np.abs(imag_derivative_diff)) < 1e-6\n\n # test that the stokes coherencies are smooth\n stokes_instr_local = skyutils.coherency_to_stokes(coherency_instr_local)\n for pol_i in range(4):\n real_stokes = stokes_instr_local[pol_i, :].real.value\n real_derivative = np.diff(real_stokes) / t_diff_sec\n real_derivative_diff = np.diff(real_derivative)\n assert np.max(np.abs(real_derivative_diff)) < 1e-6\n imag_stokes = stokes_instr_local[pol_i, :].imag.value\n assert np.all(imag_stokes == 0)", "def calculation_time_analysis():\n\tfrom . import spectra as sp\n\tp_dict = {'Bfield':700,'rb85frac':1,'Btheta':88*np.pi/180,'Bphi':0*np.pi/180,'lcell':75e-3,'T':84,'Dline':'D2','Elem':'Cs'}\n\tchiL,chiR,chiZ = sp.calc_chi([-3500],p_dict)\n\t\n\tfor angle in [0, np.pi/32, np.pi/16, np.pi/8, np.pi/4, np.pi/2]:\n\t\tprint(('Angle (degrees): ',angle*180/np.pi))\n\t\tRotMat, n1, n2 = solve_diel(chiL,chiR,chiZ,angle)", "def compute_rotation(self):\n if self.predictions[self.iteration][0] == 90.0 or self.predictions[self.iteration][0] == 270.0:\n self.rotation = 20\n self.initial_adjust = True\n return\n\n if self.iteration == 0 or (self.iteration == 1 and self.initial_adjust):\n self.rotation = rotate.get_90_deg_rotation(self.predictions[self.iteration])\n elif self.iteration == 1 or (self.iteration == 2 and self.initial_adjust):\n self.rotation = rotate.get_45_deg_rotation(self.predictions, self.current_position)\n elif self.iteration >= 2 or (self.iteration > 2 and self.initial_adjust):\n self.rotation = rotate.get_fine_rotation(self.iteration)", "def solid_body_rotation(grid, time=0., amplitude=15.):\n u = amplitude * np.cos(grid.phi2)\n v = np.zeros_like(u)\n return State.from_wind(grid, time, u, v)", "def test_calc_rotation(self):\n t = AioBaseTurtle()\n t.speed(speed=2)\n orient, steps, delta = t._calc_rotation(120)\n self.assertEqual(steps, 21)\n self.assertAlmostEqual(delta, 120.0 / 21.0)\n self.assertAlmostEqual(orient[0], math.cos(math.radians(120)))\n self.assertAlmostEqual(orient[1], math.sin(math.radians(120)))", "def dynamics(x,Earth):\r\n\r\n # precompute a few terms to reduce number of operations\r\n r = norm(x[0:3])\r\n Re_r_sqr = 1.5*Earth.J2*(Earth.R/r)**2\r\n five_z_sqr = 5*x[2]**2/(r**2)\r\n\r\n # two body and J2 acceleration together\r\n accel = (-Earth.mu/(r**3))*np.array([x[0]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[1]*(1 - Re_r_sqr*(five_z_sqr - 1)),\r\n x[2]*(1 - Re_r_sqr*(five_z_sqr - 3))])\r\n\r\n return np.array([x[3],x[4],x[5],accel[0],accel[1],accel[2]])", "def Rotation_EQD_EQJ(time):\n nut = _nutation_rot(time, _PrecessDir.Into2000)\n prec = _precession_rot(time, _PrecessDir.Into2000)\n return CombineRotation(nut, prec)", "def __init__(self, markers):\n self.markers = markers\n self.last_time = None # Used to keep track of time between measurements \n self.Q_t = np.eye(2)\n self.R_t = np.eye(3)\n # YOUR CODE HERE", "def azizen(self):\n # x0,y0 array pixel coordinates relative to cx,cy\n# ndy0,ndx0=img.shape\n ndy0=self.ndy0\n ndx0=self.ndx0\n x0,y0=np.meshgrid(np.linspace(0,ndx0-1,ndx0)-self.cx,np.linspace(0,ndy0-1,ndy0)-self.cy)\n r0=np.sqrt(x0**2+y0**2)/self.pr0 # fractional radial distance from 0,0\n# self.roi=np.s_[ystart:ystart+self.ny0,xstart:xstart+self.nx0]\n # why not model the zenith angle dependence with polynomial directly\n # rather than linear interpolation between roots.\n roots=np.zeros(51)\n rr=np.arange(51)/100.0\n for i,ref in enumerate(rr):\n roots[i]=np.real(np.roots([self.c3,0,self.c2,0,self.c1,-ref])[-1])\n theta0 = np.interp(r0/2,rr,roots)\n \n phi0 = np.arctan2(x0,y0) - self.rot ####phi (i.e., azimuth) is reckoned with -pi corresponding to north, increasing clockwise, NOTE: pysolar use sub-standard definition\n phi0 = phi0%(2*np.pi)\n\n #####correction for the tilt of the camera\n k=np.array((np.sin(self.azm),np.cos(self.azm),0))\n a=np.array([np.sin(theta0)*np.cos(phi0),np.sin(theta0)*np.sin(phi0),np.cos(theta0)]); \n a = np.transpose(a,[1,2,0])\n b=np.cos(self.beta)*a + np.sin(self.beta)*np.cross(k,a,axisb=2) \\\n + np.reshape(np.outer(np.dot(a,k),k),(self.ndy0,self.ndx0,3))*(1-np.cos(self.beta))\n theta0=np.arctan(np.sqrt(b[:,:,0]**2+b[:,:,1]**2)/b[:,:,2])\n phi0=np.arctan2(b[:,:,1],b[:,:,0])%(2*np.pi)\n# max_theta *= deg2rad \n# valid0 = (theta0<max_theta) & (theta0>0); \n# theta0[valid0]=np.nan;\n self.theta0,self.phi0=theta0,phi0", "def Rotation_EQJ_EQD(time):\n prec = _precession_rot(time, _PrecessDir.From2000)\n nut = _nutation_rot(time, _PrecessDir.From2000)\n return CombineRotation(prec, nut)", "def zodiac(cls, tee):\n return quotient(float(cls.solar_longitude(tee)), 30) + 1", "def rotateZ(self, theta):\n rot_m = numpy.array([[numpy.sin(theta), numpy.cos(theta)], \\\n [numpy.cos(theta), -numpy.sin(theta)]])\n for sec in self.all:\n for i in range(int(nrn.n3d())):\n xy = numpy.dot([nrn.x3d(i), nrn.y3d(i)], rot_m)\n nrn.pt3dchange(i, float(xy[0]), float(xy[1]), nrn.z3d(i), \\\n nrn.diam3d(i))", "def set_RotationsInTiltSeries(self, TiltSeries_):\n kk = 0\n for Proj in TiltSeries_.Projections:\n Proj.rotInPlane = self.rotInPlane[kk]\n kk = kk + 1", "def rotate_arcs(self):\n\n if self.arc_direction:\n self.thick_arc_start_angle -= 5\n self.thick_arc_end_angle -= 5\n\n self.thin_arc_start_angle += 5\n self.thin_arc_end_angle += 5\n else:\n self.thick_arc_start_angle += 5\n self.thick_arc_end_angle += 5\n\n self.thin_arc_start_angle -= 5\n self.thin_arc_end_angle -= 5", "def get_RotationsFromTiltSeries(self, TiltSeries_):\n # initialize alignment \n self.rotInPlane = len(TiltSeries_.Projections) * [0.]\n kk = 0\n for Proj in TiltSeries_.Projections:\n self.rotInPlane[kk] = Proj.rotInPlane\n kk = kk + 1\n return self.rotInPlane", "def get_rot_dtdt(self) -> WAQuaternion:\n pass", "def rotate(self):\n pass", "def setCameraRotation3D(ang):\n dislin.vup3d(ang)", "def coriolis(self, lat):\n return 2. * self.omega * np.sin(np.deg2rad(lat))", "def fun_azimuth(self):\n\n energy_kev = self.energy_kev.get()\n hkl = self.hkl_magnetic.get()\n hkl = hkl.replace(',', ' ') # remove commas\n hkl = hkl.replace('(', '').replace(')', '') # remove brackets\n hkl = hkl.replace('[', '').replace(']', '') # remove brackets\n hkl = np.fromstring(hkl, sep=' ')\n\n azi = self.azim_zero.get()\n azi = azi.replace(',', ' ') # remove commas\n azi = azi.replace('(', '').replace(')', '') # remove brackets\n azi = azi.replace('[', '').replace(']', '') # remove brackets\n azi = np.fromstring(azi, sep=' ')\n\n pol = self.polval.get()\n if pol == u'\\u03c3-\\u03c3':\n pol = 's-s'\n elif pol == u'\\u03c3-\\u03c0':\n pol = 's-p'\n elif pol == u'\\u03c0-\\u03c3':\n pol = 'p-s'\n else:\n pol = 'p-p'\n\n F0 = self.resF0.get()\n F1 = self.resF1.get()\n F2 = self.resF2.get()\n\n isres = self.isres.get()\n if isres:\n # Resonant scattering\n self.xtl.Plot.simulate_azimuth_resonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol,\n F0=F0, F1=F1, F2=F2)\n plt.show()\n else:\n # Non-Resonant scattering\n self.xtl.Plot.simulate_azimuth_nonresonant(\n hkl,\n energy_kev=energy_kev,\n azim_zero=azi,\n polarisation=pol)\n plt.show()", "def get_earth(time='now'):\n earth = get_body_heliographic_stonyhurst('earth', time=time)\n\n # Explicitly set the longitude to 0\n earth = SkyCoord(0*u.deg, earth.lat, earth.radius, frame=earth)\n\n return earth", "def earth_relative_winds(u, v, sinalpha, cosalpha):\n u_rot = u * cosalpha - v * sinalpha\n v_rot = v * cosalpha + u * sinalpha\n return u_rot, v_rot", "def st(self):\n # sidereal time polynomial coefficients in arcseconds\n sidereal_time = np.array([0.014506, 4612.156534, 1.3915817, -4.4e-7,\n -2.9956e-05, -3.68e-08])\n ST = self.polynomial_sum(sidereal_time, self.T)\n # get earth rotation angle and convert to arcseconds\n return np.mod(ST + self.era*self.deg2asec, self.turnasec)/self.turnasec", "def thetas (self,lag):\n tanthetas = [0,0,0]\n for k in range(3,self.n):\n tantheta_k = (self.radii[k-3] - self.radii[k-1])/((self.v2+self.v1)/self.v2* lag[k-1] + (self.v1/self.v2-1)*lag[k])\n tanthetas.append(tantheta_k)\n return np.arctan(tanthetas) # maybe no need for arctan with such small angles", "def theta_v_time():\n pass", "def solar_angles(df, lat, lon, alt=0):\n\n jd = pd.Timestamp(df).to_julian_date()\n\n # offset (2451543.5)\n d_offset = pd.Timestamp('1999-12-31 00:00:00').to_julian_date()\n\n d = jd - d_offset\n\n\n # Keplerian elements for the sun (geocentric)\n w = 282.9404 + 4.70935E-5 * d # longitude of perihelion [degrees]\n a = 1.0 # mean distance [AU]\n e = 0.016709 - 1.151E-9 * d # eccentricity [-]\n M = np.mod(356.0470 + 0.9856002585 * d, 360.0) # mean anomaly [degrees]\n L = w + M # Sun's mean longitude [degrees]\n oblecl = 23.4393 - 3.563E-7 * d # Sun's obliquity of the eliptic [degrees]\n\n # Auxiliary angle [degrees]\n E = M + (180.0 / np.pi) * e * np.sin(np.deg2rad(M)) * (1.0 + e * np.cos(np.deg2rad(M)))\n\n # Rectangular coordinates in the plane of the ecliptic (x-axis toward perihelion)\n x = np.cos(np.deg2rad(E)) - e\n y = np.sin(np.deg2rad(E)) * np.sqrt(1 - (e ** 2))\n\n # Distance (r) and true anomaly (v)\n r = np.sqrt((x ** 2) + (y ** 2))\n v = np.rad2deg(np.arctan2(y, x))\n\n # Longitude of the sun\n lon_sun = v + w\n\n # Ecliptic rectangular coordinates\n xeclip = r * np.cos(np.deg2rad(lon_sun))\n yeclip = r * np.sin(np.deg2rad(lon_sun))\n zeclip = 0.0\n\n # Rotate coordinates to equatorial rectangular coordinates\n xequat = xeclip\n yequat = yeclip * np.cos(np.deg2rad(oblecl)) + zeclip * np.sin(np.deg2rad(oblecl))\n zequat = yeclip * np.sin(np.deg2rad(23.4406)) + zeclip * np.cos(np.deg2rad(oblecl))\n\n # Convert equatorial rectangular coordinates to right-ascension (RA) and declination\n r = np.sqrt(xequat ** 2 + yequat ** 2 + zequat ** 2) - (alt / 149598000.0)\n RA = np.rad2deg(np.arctan2(yequat, xequat))\n delta = np.rad2deg(np.arcsin(zequat / r))\n\n # Calculate local siderial time\n uth = df.hour + (df.minute / 60.0) + (df.second / 3600.0)\n gmst0 = np.mod(L + 180.0, 360.0) / 15.0\n sidtime = gmst0 + uth + (lon / 15.0)\n\n # Replace RA with hour-angle (HA)\n HA = sidtime * 15.0 - RA\n\n # Convert to rectangular coordinates\n x = np.cos(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n y = np.sin(np.deg2rad(HA)) * np.cos(np.deg2rad(delta))\n z = np.sin(np.deg2rad(delta))\n\n # Rotate along an axis going East-West\n xhor = x * np.cos(np.deg2rad(90.0 - lat)) - z * np.sin(np.deg2rad(90.0 - lat))\n yhor = y\n zhor = x * np.sin(np.deg2rad(90.0 - lat)) + z * np.cos(np.deg2rad(90.0 - lat))\n\n # Find azimuthal and elevation angles\n azimuthal = np.rad2deg(np.arctan2(yhor, xhor)) + 180.0\n elevation = np.rad2deg(np.arcsin(zhor))\n\n zenith = 90.0 - elevation\n\n return np.column_stack((zenith, elevation, azimuthal))", "def sidereal_zodiac(tee):\n return quotient(int(sidereal_solar_longitude(tee)), 30) + 1", "def jupiter_system_angles(epoch):\n # Calculate solar coordinates\n O, beta, R = Sun.geometric_geocentric_position(epoch)\n\n # Compute distance Earth - Jupiter (DELTA) by iteration (start value:\n # DELTA = 5 AU)\n DELTA_old = -1.0\n DELTA = 5.0\n x = 0.0\n y = 0.0\n z = 0.0\n tau = 0.0\n iterations = 0\n\n while DELTA != DELTA_old and iterations < 5:\n # Calculate light-time delay\n tau = 0.0057755183 * DELTA\n\n l, b, r = Jupiter.geometric_heliocentric_position(epoch - tau)\n\n x = r * cos(b.rad()) * cos(l.rad()) + R * cos(O.rad())\n y = r * cos(b.rad()) * sin(l.rad()) + R * sin(O.rad())\n z = r * sin(b.rad()) + R * sin(beta.rad())\n\n DELTA_old = DELTA\n DELTA = sqrt(x ** 2 + y ** 2 + z ** 2)\n iterations += 1\n\n # t is time since JDE 2433000.5 - light time (tau)\n t = epoch.jde() - 2443000.5 - tau\n\n # Longitude of the node of the equator of Jupiter on the ecliptic\n psi = 316.5182 - 0.00000208 * t\n\n # Calculate precession since epoch B1950.0\n T_0 = (epoch.jde() - 2433282.423) / 36525\n\n # Precession in longitude from the epoch B1950.0 in deg\n P = 1.3966626 * T_0 + 0.0003088 * (T_0 ** 2)\n\n psi_corrected = psi + P\n\n # Calculate longitude of ascending node (\n # OMEGA_ascending_node_jupiter) and inclination on\n # the plane of the ecliptic (i_ecliptic_jupiter) in deg\n JC_jupiter_angles = (epoch.jde() - tau - 2451545) / 36525\n\n OMEGA_ascending_node_jupiter = 100.464407 + 1.0209774 * \\\n JC_jupiter_angles + 0.00040315 * (\n JC_jupiter_angles ** 2) + \\\n 0.000000404 * (\n JC_jupiter_angles ** 3)\n\n return psi_corrected, OMEGA_ascending_node_jupiter", "def __init__ (self,\n elevation = 45.0,\n start_angle = 0.0,\n latitude = 78.0,\n longitude = 0.0,\n rot_per_min = 1,\n rev_days = 15):\n\n self.elevation = np.radians (elevation) # Units: [rad]\n self.start_angle = np.radians (start_angle) # Units: [rad]\n\n self.base_lat = np.radians (latitude) # Units: [rad]\n self.base_long = np.radians (longitude) # Units: [rad]\n\n self.omega_rot = 2 * np.pi * rot_per_min / 60 # Units: [rad/s]\n self.rev_days = rev_days # Units: [day]", "def compute_RotMats(a, e, t):\n assert len(a)==len(e)==len(t)\n M = len(a)\n\n # camera intrinsic matrix\n Rz = np.zeros((M, 3, 3), dtype=np.float32)\n Rx = np.zeros((M, 3, 3), dtype=np.float32)\n Rz2 = np.zeros((M, 3, 3), dtype=np.float32)\n # C = np.zeros((M, 1, 3), dtype=np.float32)\n # initial \"1\" positions.\n Rz [:, 2, 2] = 1\n Rx [:, 0, 0] = 1\n Rz2[:, 2, 2] = 1\n #\n R = np.zeros((M, 3, 3), dtype=np.float32)\n\n # convert to radius\n a = a * pi / 180.\n e = e * pi / 180.\n t = t * pi / 180.\n\n # update a, e, t\n a = -a\n e = pi/2.+e\n t = -t\n #\n sin_a, cos_a = np.sin(a), np.cos(a)\n sin_e, cos_e = np.sin(e), np.cos(e)\n sin_t, cos_t = np.sin(t), np.cos(t)\n\n # ===========================\n # rotation matrix\n # ===========================\n \"\"\"\n # [Transposed]\n Rz = np.matrix( [[ cos(a), sin(a), 0 ], # model rotate by a\n [ -sin(a), cos(a), 0 ],\n [ 0, 0, 1 ]] )\n # [Transposed]\n Rx = np.matrix( [[ 1, 0, 0 ], # model rotate by e\n [ 0, cos(e), sin(e) ],\n [ 0, -sin(e), cos(e) ]] )\n # [Transposed]\n Rz2= np.matrix( [[ cos(t), sin(t), 0 ], # camera rotate by t (in-plane rotation)\n [-sin(t), cos(t), 0 ],\n [ 0, 0, 1 ]] )\n R = Rz2*Rx*Rz\n \"\"\"\n\n # Original matrix (None-transposed.)\n # No need to set back to zero?\n Rz[:, 0, 0], Rz[:, 0, 1] = cos_a, -sin_a\n Rz[:, 1, 0], Rz[:, 1, 1] = sin_a, cos_a\n #\n Rx[:, 1, 1], Rx[:, 1, 2] = cos_e, -sin_e\n Rx[:, 2, 1], Rx[:, 2, 2] = sin_e, cos_e\n #\n Rz2[:, 0, 0], Rz2[:, 0, 1] = cos_t, -sin_t\n Rz2[:, 1, 0], Rz2[:, 1, 1] = sin_t, cos_t\n # R = Rz2*Rx*Rz\n R[:] = np.einsum(\"nij,njk,nkl->nil\", Rz2, Rx, Rz)\n\n # Return the original matrix without transpose!\n return R", "def undo_mercator_project(x,y):\n lon = y*np.pi\n ex = np.exp(4*np.pi*x)\n lat = np.arcsin((ex - 1)/(ex +1 ))\n lon = lon*360/2/np.pi\n lat = lat*360 /2/np.pi\n return lon, lat", "def rad_field_initial_condition(self):\n\n # revert in viewing direct\n angle, _ = f.convert_direction(self.receiver_elevation, self.receiver_azimuth)\n # Looking at the sky\n if angle < 90:\n I_init = (\n self.sun_intensity\n * f.delta_func(self.sun_elevation - self.receiver_elevation)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n # Looking at the ground\n elif angle > 90:\n I_ground = RT_model_1D.calc_direct_beam_intensity(self, 0)\n\n I_lambert = (\n I_ground\n * self.ground_albedo\n * np.cos(np.deg2rad((self.sun_elevation + 180) % 360))\n )\n\n I_specular = (\n I_ground\n * self.ground_albedo\n * f.delta_func(self.sun_elevation + self.receiver_elevation - 180)\n * f.delta_func(self.sun_azimuth - self.receiver_azimuth)\n )\n\n I_init = (\n 1 - self.reflection_type\n ) * I_lambert + self.reflection_type * I_specular\n\n else:\n I_init = np.empty(self.stokes_dim)\n I_init.fill(np.nan)\n\n return I_init", "def rotated(self):\n return self.pol_lat != 90.", "def test_rotation_angle(self):\n\n self.test_shape.azimuth_placement_angle = [45, 135, 225, 315]\n test_volume = self.test_shape.volume()\n self.test_shape.rotation_angle = 180\n assert self.test_shape.volume() == pytest.approx(test_volume * 0.5)", "def photometric_calibration():\n pass", "def _calc_average_rotation_matrix(self):\n # unit vectors to be transformed by astropy\n x_c = np.array([1.0, 0, 0])\n y_c = np.array([0, 1.0, 0])\n z_c = np.array([0, 0, 1.0])\n\n if isinstance(self.telescope_location, EarthLocation):\n axes_icrs = SkyCoord(\n x=x_c,\n y=y_c,\n z=z_c,\n obstime=self.time,\n location=self.telescope_location,\n frame=\"icrs\",\n representation_type=\"cartesian\",\n )\n axes_altaz = axes_icrs.transform_to(\"altaz\")\n else:\n axes_icrs = LunarSkyCoord(\n x=x_c,\n y=y_c,\n z=z_c,\n obstime=self.time,\n location=self.telescope_location,\n frame=\"icrs\",\n representation_type=\"cartesian\",\n )\n axes_altaz = axes_icrs.transform_to(\"lunartopo\")\n\n axes_altaz.representation_type = \"cartesian\"\n\n # This transformation matrix is generally not orthogonal to better than 10^-7,\n # so let's fix that.\n\n R_screwy = axes_altaz.cartesian.xyz\n R_really_orthogonal, _ = ortho_procr(R_screwy, np.eye(3))\n\n # Note the transpose, to be consistent with calculation in sct\n R_really_orthogonal = np.array(R_really_orthogonal).T\n\n return R_really_orthogonal", "def skycoord(self):\n return SkyCoord(self['raj'], self['decj'], unit=(uu.hour, uu.degree))", "def time_calibration(input_file):\n original_path = os.getcwd()\n save_path = input_file['save_path']\n #change to save data reduction directory\n os.chdir(save_path)\n print '\\n Reading the list of images ....\\n'\n planet = input_file['exoplanet'] #set exoplanet name\n images = sorted(glob.glob('AB'+planet+'*.fits'))\n print images\n #include de RA,DEC and epoch of the exoplanet\n RA,DEC,epoch = input_file['RA'],input_file['DEC'],input_file['epoch']\n #obtain ST JD using iraf task and introduce in the header\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n if int(split(hdr['UT'],':')[0]) < int(hdr['timezone']):\n new_date = use.yesterday(hdr['date-obs'])\n #print images[i], new_date\n else:\n new_date = hdr['date-obs']\n year,month,day = split(new_date,'-')\n iraf.asttimes(year=year,month=month,day=day,time=hdr['loctime'],obs=input_file['observatory'])\n JD = iraf.asttimes.jd #obtain julian date\n LMST = iraf.asttimes.lmst #obtain the sideral time\n LMST = use.sexagesimal_format(LMST) #convert sideral time in sexagesimal format\n iraf.hedit(images[i],'ST',LMST,add='yes',verify='no',show='no',update='yes') #create the ST keyword in the header\n iraf.ccdhedit(images[i],'LMST',LMST,type='string') #include the mean sideral time in the header\n iraf.ccdhedit(images[i],'JD',JD,type='string') #include de julian date in the header\n #include RA, and DEC of the object in your header\n iraf.ccdhedit(images[i],\"RA\",RA,type=\"string\") #include right ascention in the header\n iraf.ccdhedit(images[i],\"DEC\",DEC,type=\"string\") #include declination in the header\n iraf.ccdhedit(images[i],\"epoch\",epoch,type=\"string\") #include epoch in the header\n # use.update_progress((i+1.)/len(images))\n print '\\n Setting airmass ....\\n'\n for i in range(len(images)):\n print '# ',images[i]\n #iraf.hedit(images[i],'airmass',airmass,add='yes')\n #iraf.hedit(images[i],'HJD',HJD,add='yes')\n iraf.setairmass.observatory = input_file['observatory']\n iraf.setairmass(images[i])\n iraf.setjd.time = 'ut'\n iraf.setjd(images[i])\n print '\\n.... done.\\n'\n #export information\n hjd, jd, airmass, st = [],[],[],[]\n for i in range(len(images)):\n hdr = fits.getheader(images[i])\n hjd.append(hdr['HJD'])\n jd.append(hdr['JD'])\n airmass.append(hdr['airmass'])\n st.append(hdr['st'])\n #saving the data\n data = DataFrame([list(hjd),list(jd),list(st),list(airmass)]).T\n data.columns = ['HJD','JD','ST','Airmass']\n data.to_csv('results_iraf_calibrations.csv')\n #change to workings directory\n os.chdir(original_path)\n return", "def _rotate_winds(rpn_hr):\n coords = {\"lon\": rpn_hr.nav_lon, \"lat\": rpn_hr.nav_lat}\n u_out, v_out = viz_tools.rotate_vel_bybearing(\n rpn_hr.UU, rpn_hr.VV, coords, origin=\"grid\"\n )\n\n return u_out, v_out", "def computerotmat(self, ra, dec, pa): \n\n #Rotating from equatorial J2000 to camera frame\n #PA is the angle from the north axis to the instrument y axis, measured towards east.\n #Note: We are rotating the coordinate frame axes and not points, so the angles of\n # rotation passed to the function rotationmatrix are negatives.\n rotmat0=self.rotationmatrix(3,-ra-np.pi/2.0)\n rotmat1=self.rotationmatrix(1,dec-np.pi/2.0)\n rotmat2=self.rotationmatrix(3,pa)\n rotmat=np.dot(rotmat1,rotmat0)\n rotmat=np.dot(rotmat2,rotmat)\n quat=sp.m2q(rotmat)\n return (quat)", "def randomize_trajectory(self):\n self.angle = randint(-360, 360)\n self.speed = randint(1, 5)/2.5", "def to_cart(self, t):\n # mean motion:\n n = np.sqrt(self.GM / self.a / self.a / self.a) * 86400.0 # [rad/day]\n # mean anomaly at t:\n M = n * (t - self.t0) + self.M0\n # print(np.fmod(M, 2*np.pi))\n # solve Kepler equation, get eccentric anomaly:\n E = self.kepler(self.e, M)\n cosE = np.cos(E)\n sinE = np.sin(E)\n # get true anomaly and distance from focus:\n sinv = np.sqrt(1.0 - self.e ** 2) * sinE / (1.0 - self.e * cosE)\n cosv = (cosE - self.e) / (1.0 - self.e * cosE)\n r = self.a * (1.0 - self.e ** 2) / (1.0 + self.e * cosv)\n # r = self.a*(1 - self.e*cosE)\n #\n sinw = np.sin(self.w)\n cosw = np.cos(self.w)\n sinu = sinw * cosv + cosw * sinv\n cosu = cosw * cosv - sinw * sinv\n # position\n cosNode = np.cos(self.Node)\n sinNode = np.sin(self.Node)\n cosi = np.cos(self.i)\n sini = np.sin(self.i)\n x = r * (cosu * cosNode - sinu * sinNode * cosi)\n y = r * (cosu * sinNode + sinu * cosNode * cosi)\n z = r * sinu * sini\n # velocity\n p = self.a * (1.0 - self.e ** 2)\n V_1 = np.sqrt(self.GM / p) * self.e * sinv\n V_2 = np.sqrt(self.GM / p) * (1.0 + self.e * cosv)\n vx = x * V_1 / r + (-sinu * cosNode - cosu * sinNode * cosi) * V_2\n vy = y * V_1 / r + (-sinu * sinNode + cosu * cosNode * cosi) * V_2\n vz = z * V_1 / r + cosu * sini * V_2\n\n state = np.array([x, y, z, vx, vy, vz])\n state = np.reshape(np.asarray(state), (3, 2), 'F')\n\n return state", "def rotate_telescope(gearfile=None, parametername=None, valueZ=None, valueAngle=None): \n\n tree = xml.etree.ElementTree.parse(gearfile)\n root = tree.getroot() \n\n # Read out positions of telescope before rotation\n xDistances=[]\n yDistances=[]\n zPositions=[]\n IDs=[]\n\n # angle in radians\n valueAngle = math.radians(valueAngle)\n\n # loop over ladders to find the z positions, IDs\n for detectors in root.findall('detectors'): \n for detector in detectors.findall('detector'):\n for layers in detector.findall('layers'):\n for layer in layers.findall('layer'):\n for ladder in layer.findall('ladder'):\n xDistances.append(float(ladder.get('positionX')))\n yDistances.append(float(ladder.get('positionY')))\n zPositions.append(float(ladder.get('positionZ')))\n IDs.append(ladder.get('ID'))\n\n # Calculate the distance between the rotaion axis position valueZ and the z positions of the sensors\n zDistances=[]\n for zPosition in zPositions:\n zDistances.append(zPosition-valueZ)\n\n planenumber=0\n\n # Calculate the positions of the sensor center\n if(parametername == 'positionX'):\n set_globalparameter(gearfile=gearfile, parametername='alpha', value=valueAngle)\n for zDistance in zDistances:\n\n # Calculate position after rotation in XZ plane\n z=math.cos(valueAngle)*zDistance-math.sin(valueAngle)*xDistances[planenumber]\n x=math.sin(valueAngle)*zDistance+math.cos(valueAngle)*xDistances[planenumber]\n\n # Set the calculated parameters in the gear file\n set_parameter(gearfile=gearfile, sensorID=IDs[planenumber], parametername='positionX', value=x)\n set_parameter(gearfile=gearfile, sensorID=IDs[planenumber], parametername='positionZ', value=z+valueZ)\n\n planenumber=planenumber+1\n \n \n # Calculate the positions of the sensor center \n elif(parametername == 'positionY'):\n set_globalparameter(gearfile=gearfile, parametername='beta', value=valueAngle)\n for zDistance in zDistances:\n\n # Calculate position after rotation in YZ plane\n z=math.cos(valueAngle)*zDistance-math.sin(valueAngle)*xDistances[planenumber]\n y=math.sin(valueAngle)*zDistance+math.cos(valueAngle)*xDistances[planenumber]\n\n # Set the calculated parameters in the gear file\n set_parameter(gearfile=gearfile, sensorID=IDs[planenumber], parametername='positionY', value=y)\n set_parameter(gearfile=gearfile, sensorID=IDs[planenumber], parametername='positionZ', value=z+valueZ)\n\n planenumber=planenumber+1\n else:\n print('Rotation plane not well defined!')", "def Rotation_EQJ_ECL():\n # ob = mean obliquity of the J2000 ecliptic = 0.40909260059599012 radians.\n c = 0.9174821430670688 # cos(ob)\n s = 0.3977769691083922 # sin(ob)\n return RotationMatrix([\n [ 1, 0, 0],\n [ 0, +c, -s],\n [ 0, +s, +c]\n ])", "def get_rotationalAngularPosition(self, t): # returns [rad]\n angle = self.theta0 + self.rotationalAngularVelocity * t # angular position [rad]\n return angle", "def _get_obs(self):\n pos1, orn1 = p.getBasePositionAndOrientation(self.car1)\n pos2, orn2 = p.getBasePositionAndOrientation(self.car2)\n theta1 = p.getEulerFromQuaternion(orn1)[2]\n theta2 = p.getEulerFromQuaternion(orn2)[2]\n x1 = pos1[0]\n y1 = pos1[1]\n x2 = pos2[0]\n y2 = pos2[1]\n dis = np.sqrt((x1-x2)**2 + (y1-y2)**2)\n vec_dis1 = np.array([x2-x1, y2-y1])\n vec_dis2 = np.array([x1-x2, y1-y2])\n wall1 = min([abs(self.max_dist_x-x1),abs(-self.max_dist_x-x1),abs(self.max_dist_y-y1),abs(-self.max_dist_y-y1)])\n wall2 = min([abs(self.max_dist_x-x2),abs(-self.max_dist_x-x2),abs(self.max_dist_y-y2),abs(-self.max_dist_y-y2)])\n xp1 = x1 + math.sin(theta1)\n yp1 = y1 - math.cos(theta1)\n vec1 = np.array([xp1 - x1, yp1 - y1])\n vec1_len = np.sqrt((x1-xp1)**2 + (y1-yp1)**2)\n cross1 = np.cross(vec1, vec_dis1)\n dot1 = np.dot(vec1, vec_dis1)\n angle1 = math.asin(cross1/(dis*vec1_len))\n if(dot1<0 and cross1<0):\n angle1 = -(np.pi + angle1)\n if(dot1<0 and cross1>0):\n angle1 = np.pi - angle1\n xp2 = x2 + math.sin(theta2)\n yp2 = y2 - math.cos(theta2)\n vec2 = np.array([xp2 - x2, yp2 - y2])\n vec2_len = np.sqrt((x2-xp2)**2 + (y2-yp2)**2)\n cross2 = np.cross(vec2, vec_dis2)\n dot2 = np.dot(vec2, vec_dis2)\n angle2 = math.asin(cross2/(dis*vec2_len))\n if(dot2<0 and cross2<0):\n angle2 = -(np.pi + angle2)\n if(dot2<0 and cross2>0):\n angle2 = np.pi - angle2 \n return np.array([[wall1,dis,angle1],[wall2,dis,angle2]])", "def _inst2earth(adcpo, reverse=False, rotate_vars=None, force=False):\n\n if reverse:\n # The transpose of the rotation matrix gives the inverse\n # rotation, so we simply reverse the order of the einsum:\n sumstr = 'jik,j...k->i...k'\n cs_now = 'earth'\n cs_new = 'inst'\n else:\n sumstr = 'ijk,j...k->i...k'\n cs_now = 'inst'\n cs_new = 'earth'\n\n # if ADCP is upside down\n if adcpo.orientation == 'down':\n down = True\n else: # orientation = 'up' or 'AHRS'\n down = False\n\n rotate_vars = _check_rotate_vars(adcpo, rotate_vars)\n\n cs = adcpo.coord_sys.lower()\n if not force:\n if cs == cs_new:\n print(\"Data is already in the '%s' coordinate system\" % cs_new)\n return\n elif cs != cs_now:\n raise ValueError(\n \"Data must be in the '%s' frame when using this function\" %\n cs_now)\n\n if 'orientmat' in adcpo:\n omat = adcpo['orientmat']\n else:\n omat = _euler2orient(adcpo['time'], adcpo['heading'].values, adcpo['pitch'].values,\n adcpo['roll'].values)\n\n # Take the transpose of the orientation to get the inst->earth rotation\n # matrix.\n rmat = np.rollaxis(omat.data, 1)\n\n _dcheck = rotb._check_rotmat_det(rmat)\n if not _dcheck.all():\n warnings.warn(\"Invalid orientation matrix (determinant != 1) at indices: {}. \"\n \"If rotated, data at these indices will be erroneous.\"\n .format(np.nonzero(~_dcheck)[0]), UserWarning)\n\n # The dictionary of rotation matrices for different sized arrays.\n rmd = {3: rmat, }\n\n # The 4-row rotation matrix assume that rows 0,1 are u,v,\n # and 2,3 are independent estimates of w.\n tmp = rmd[4] = np.zeros((4, 4, rmat.shape[-1]), dtype=np.float64)\n tmp[:3, :3] = rmat\n # Copy row 2 to 3\n tmp[3, :2] = rmat[2, :2]\n tmp[3, 3] = rmat[2, 2]\n # Extend rows 0,1\n tmp[0, 2:] = rmat[0, 2] / 2\n tmp[1, 2:] = rmat[1, 2] / 2\n\n if reverse:\n # 3-element inverse handled by sumstr definition (transpose)\n rmd[4] = np.moveaxis(inv(np.moveaxis(rmd[4], -1, 0)), 0, -1)\n\n for nm in rotate_vars:\n dat = adcpo[nm].values\n n = dat.shape[0]\n # Nortek documents sign change for upside-down instruments (equiv to adding 180 deg to roll)\n if down:\n # This is equivalent to adding 180 degrees to roll axis in _calc_omat()\n sign = np.array([1, -1, -1, -1], ndmin=dat.ndim).T\n signIMU = np.array([1, -1, -1], ndmin=dat.ndim).T\n if not reverse:\n if n == 3:\n dat = np.einsum(sumstr, rmd[3], signIMU*dat)\n elif n == 4:\n dat = np.einsum('ijk,j...k->i...k', rmd[4], sign*dat)\n else:\n raise Exception(\"The entry {} is not a vector, it cannot\"\n \"be rotated.\".format(nm))\n\n elif reverse:\n if n == 3:\n dat = signIMU*np.einsum(sumstr, rmd[3], dat)\n elif n == 4:\n dat = sign*np.einsum('ijk,j...k->i...k', rmd[4], dat)\n else:\n raise Exception(\"The entry {} is not a vector, it cannot\"\n \"be rotated.\".format(nm))\n\n else: # 'up' and AHRS\n if n == 3:\n dat = np.einsum(sumstr, rmd[3], dat)\n elif n == 4:\n dat = np.einsum('ijk,j...k->i...k', rmd[4], dat)\n else:\n raise Exception(\"The entry {} is not a vector, it cannot\"\n \"be rotated.\".format(nm))\n adcpo[nm].values = dat.copy()\n\n adcpo = rotb._set_coords(adcpo, cs_new)\n\n return adcpo", "def get_road_rotation(self):\r\n if self.container is not None:\r\n rot = self.container.get_road_rotation()\r\n rot2 = self.rotation\r\n if rot2 is None:\r\n rot2 = rot\r\n return rot2\r\n \r\n rot = self.track.get_road_rotation()\r\n rot2 = self.rotation\r\n if rot2 is None:\r\n rot2 = rot\r\n return rot2", "def plot_orbit(self) :\r\n self.x_vals = np.zeros((self.n_bodies,len(self.hist)))\r\n self.y_vals = np.zeros((self.n_bodies,len(self.hist)))\r\n self.z_vals = np.zeros((self.n_bodies,len(self.hist))) \r\n self.r_vals = np.zeros((self.n_bodies,len(self.hist)))\r\n \r\n for i in range(self.n_bodies) : \r\n ioff = i*6\r\n for j in range(len(self.hist)) : \r\n self.x_vals[i][j] = self.hist[j][ioff]\r\n self.y_vals[i][j] = self.hist[j][ioff+1]\r\n self.z_vals[i][j] = self.hist[j][ioff+2]\r\n self.r_vals[i][j] = np.sqrt((self.x_vals[i][j]**2) + (self.y_vals[i][j]**2) + (self.z_vals[i][j]**2))", "def getOblateXRotMatrix(aStar1, aStar2):\n aStarDir = aStar2 - a1\n aStarmid = aStar1 + 0.5 * aStarDir\n kath = np.sqrt((aStarDir[0] * aStarDir[0] + aStarDir[1] * aStarDir[1]) / 4.0)\n phi = np.arctan( abs( (aStarDir[2]/2) / kath) )\n octantAStar2 = octant(aStar2)\n if octantAStar2 in [1, 2, 7, 8]: #\n phi = -phi\n print \"phi =\" , np.rad2deg(phi)\n RotX = np.matrix( [ [ 1.0, 0.0 , 0.0 ],\n [ 0.0, np.cos(phi), np.sin(phi)],\n [ 0.0, -np.sin(phi), np.cos(phi)]\n ])\n return np.asarray( RotX )", "def _get_obs(self):\n obs = super()._get_obs()\n gripper_rot = rotations.mat2euler(\n self.sim.data.get_site_xmat('robot0:grip'))\n obs['observation'] = np.concatenate(\n [obs['observation'], gripper_rot.ravel()])\n return obs", "def rot_elements_at_epoch(self, epoch):\n T = (epoch.tdb - constants.J2000).to(\"day\").value / 36525\n d = (epoch.tdb - constants.J2000).to(\"day\").value\n return self._rot_elements_at_epoch(T, d)", "def from_earth_years(planet, earth_years):\n return earth_years * year_diff_dict[planet]", "def oceansim(sun_az,sun_zen,cam_head,cam_elev=0,m2=1.33,npart=1.08,mu=3.483, debug=True):\n\n #Water surface norm\n n = np.array([0,0,1])\n m1 = 1.0\n #vector from sun:\n ki = -np.asarray([np.sin(sun_az)*np.sin(sun_zen),\n np.cos(sun_az)*np.sin(sun_zen),\n np.cos(sun_zen)])\n xi = norm_cross(n,ki)\n #transmitted sunlight\n #tx, ty are the transmission amplitude coefficients in the xt, yt directions\n kt,tx,ty = Fresnel.transmission(ki,n,m1,m2)\n xt = xi\n #vector to camera\n kc = -np.asarray([np.sin(cam_head)*np.cos(cam_elev),\n np.cos(cam_head)*np.cos(cam_elev),\n np.sin(cam_elev)])*np.linalg.norm(kt)\n xc = norm_cross(n, kc) #right\n yc = norm_cross(kc, xc) #up\n #vectors for scattering\n ys = norm_cross(kt, kc) # y-axis of scattering event\n xst = norm_cross(ys, kt) # x-axis of scattering event relative to transmitted sunlight\n xsc = norm_cross(ys, kc) # x-axis of scattering event relative to camera\n #Mueller matrices\n # transmission through water surface:\n mm1 = Mueller.polarizer(tx,ty)\n # rotate to scattering plane\n mm2 = Mrotv(kt,xt,xst)\n # scatter\n th_s = vector_angle(kt,kc)\n #mm3 = Mocean(rad2deg(th_s)) #using Empirical ocean scattering\n mm3 = Mueller.rayleigh_norm(th_s) #normalized Rayleigh scattering matrix\n #b = Scattering.bsf_fournier(npart,mu)\n b = Scattering.vspf_fournier(th_s,npart,mu)\n # transform to camera's horizontal and up vectors\n mm4 = Mxform(xsc,ys, xc,yc)\n #Combined: mm4 . (b*mm3) . mm2 . mm1\n m = mm4.dot(b*mm3.dot(mm2.dot(mm1)))\n #stokes vector\n s = m.dot([1,0,0,0])\n if debug:\n return s,m,(ki,xi),(kt,xt,xst),(kc,xc,xsc),(mm1,mm2,mm3,b,mm4)\n else:\n return s,m", "def animate_tangent_angles_equal(self):\n\n # \\theta_i and \\theta_r\n self.play(FadeIn(self.tex_derive_ti_tr[0]), FadeIn(self.tex_derive_ti_tr[2]))\n self.wait(self.wait_time)\n\n # mirror reflection\n self.play(FadeIn(self.text_mirror), FadeIn(self.tex_derive_ti_tr[1]))\n self.wait(self.wait_time)\n\n self.play(FadeIn(self.tex_derive_ti_tr[3]), FadeIn(self.tex_derive_ti_tr[4]))\n self.wait(self.wait_time)\n\n # tangent\n tex_derive_tan_tin_tan_tr_work = copy.deepcopy(self.tex_derive_tan_tin_tan_tr)\n theta_work_1 = copy.deepcopy(self.theta_0)\n theta_work_1.move_to(self.tex_derive_tan_tin_tan_tr[2].get_center())\n theta_work_2 = copy.deepcopy(self.theta_0)\n theta_work_2.move_to(self.tex_derive_tan_tin_tan_tr[6].get_center())\n theta_i_org = copy.deepcopy(tex_derive_tan_tin_tan_tr_work[2])\n theta_r_org = copy.deepcopy(tex_derive_tan_tin_tan_tr_work[6])\n\n # Show 90 - theta_i , 90 - theta_r\n self.play(FadeIn(tex_derive_tan_tin_tan_tr_work[0:2]),\n FadeIn(tex_derive_tan_tin_tan_tr_work[4:6]),\n # Make final memory destination of ReplacementTransform\n # tex_derive_tan_tin_tan_tr_work, thus here we start with\n # the copies (theta_i_org and theta_r_org).\n FadeIn(theta_i_org),\n FadeIn(theta_r_org))\n self.wait(self.wait_time)\n\n # transform to theta_i, theta_r = theta\n self.play(ReplacementTransform(theta_i_org, theta_work_1),\n ReplacementTransform(theta_r_org, theta_work_2))\n self.wait(self.wait_time)\n\n # transform back to theta_i, theta_r\n self.play(ReplacementTransform(theta_work_1, tex_derive_tan_tin_tan_tr_work[2]),\n ReplacementTransform(theta_work_2, tex_derive_tan_tin_tan_tr_work[6]))\n self.wait(self.wait_time)\n\n # show = thera'\n self.play(FadeIn(tex_derive_tan_tin_tan_tr_work[3]),\n FadeIn(tex_derive_tan_tin_tan_tr_work[7:9]))\n self.wait(self.wait_time)\n\n # Show equal anges: theta_0\n theta_i_equal = copy.deepcopy(self.tex_derive_ti_tr[4])\n theta_r_equal = copy.deepcopy(self.tex_derive_ti_tr[4])\n self.add(theta_i_equal, theta_r_equal)\n self.play(ApplyMethod(theta_i_equal.move_to, self.tex_theta_in.get_center()),\n FadeOut(self.tex_theta_in),\n ApplyMethod(theta_r_equal.move_to, self.tex_theta_ref.get_center()),\n FadeOut(self.tex_theta_ref))\n self.wait(self.wait_time)\n\n # Show equal anges: theta_0'\n theta_i_tan_equal = copy.deepcopy(self.tex_derive_tan_tin_tan_tr[8])\n theta_r_tan_equal = copy.deepcopy(self.tex_derive_tan_tin_tan_tr[8])\n self.add(theta_i_tan_equal, theta_r_tan_equal)\n self.play(ApplyMethod(theta_i_tan_equal.move_to, self.tex_theta_in_tan. get_center()),\n FadeOut(self.tex_theta_in_tan),\n ApplyMethod(theta_r_tan_equal.move_to, self.tex_theta_ref_tan.get_center()),\n FadeOut(self.tex_theta_ref_tan))\n self.wait(self.wait_time)\n\n self.play(FadeOut(self.text_mirror),\n FadeOut(self.tex_derive_ti_tr),\n FadeOut(tex_derive_tan_tin_tan_tr_work))\n self.wait(self.wait_time)", "def target_acceleration(self, time):\n x_a = -self.w**2*self.r*sin(self.w*time)\n y_a = -self.w**2*self.r*cos(self.w*time)\n z_a = 0\n # raise NotImplementedError\n return np.array([x_a,y_a,z_a])", "def rotate(co):\n \n ow = co.owner\n\n # get the suffix of the human to reference the right objects\n suffix = ow.name[-4:] if ow.name[-4] == \".\" else \"\"\n \n keyboard = co.sensors['All_Keys']\n scene = logic.getCurrentScene()\n pos = scene.objects['POS_EMPTY' + suffix]\n human_pos = scene.objects['Human' + suffix]\n active_camera = scene.active_camera\n \n # if the human is external, do nothing\n if human_pos.get('External_Robot_Tag') or human_pos['disable_keyboard_control']:\n return\n \n if human_pos['move_cameraFP'] and active_camera.name != ('Human_Camera'+suffix):\n return\n \n keylist = keyboard.events\n\n k = [] #initiate a list with all currently pressed keys\n for key in keylist:\n if key[1] == logic.KX_INPUT_ACTIVE:\n k.append(key[0]) # add all pressed keys to a list - as ASCII CODES\n\n pos.worldPosition = ow.worldPosition\n\n # Get active camera\n scene = logic.getCurrentScene()\n active_camera = scene.active_camera\n \n if ow['Manipulate']:\n ow.worldOrientation = pos.worldOrientation\n # lock camera to head in Manipulation Mode\n else:\n if FORWARDS in k and not(LEFT in k or RIGHT in k): \n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation, ow)\n else:\n applyrotate(human_pos.worldOrientation, ow) \n elif LEFT in k and not(FORWARDS in k or BACKWARDS in k):\n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation *\n Matrix.Rotation(math.pi / 2, 3, 'Z'), ow)\n else: \n applyrotate(human_pos.worldOrientation *\n Matrix.Rotation(math.pi / 2, 3, 'Z'), ow)\n # turn around 90 deg\n elif RIGHT in k and not(FORWARDS in k or BACKWARDS in k):\n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation *\n Matrix.Rotation(math.pi * 3/2, 3, 'Z'), ow)\n else:\n applyrotate(human_pos.worldOrientation * Matrix.Rotation(math.pi * 3/2, 3, 'Z'), ow)\n # turn around 270 deg\n elif LEFT in k and FORWARDS in k:\n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation *\n Matrix.Rotation(math.pi / 4, 3, 'Z'), ow)\n else: \n applyrotate(human_pos.worldOrientation *\n Matrix.Rotation(math.pi / 4, 3, 'Z'), ow)\n # turn around 45 deg\n elif RIGHT in k and FORWARDS in k:\n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation *\n Matrix.Rotation(math.pi * 7 / 4, 3, 'Z'), ow)\n else:\n applyrotate(human_pos.worldOrientation *\n Matrix.Rotation(math.pi * 7 / 4, 3, 'Z'), ow)\n # turn around 315 deg\n elif BACKWARDS in k and not(LEFT in k or RIGHT in k):\n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation * Matrix.Rotation(math.pi, 3, 'Z'), ow)\n # turn around 180 deg if in game-mode\n elif LEFT in k and BACKWARDS in k: \n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation * Matrix.Rotation(math.pi * 3/4, 3, 'Z'), ow)\n else:\n applyrotate(human_pos.worldOrientation * Matrix.Rotation(math.pi / 4, 3, 'Z'), ow)\n # turn around 135 deg if in game-mode, else turn 45 deg\n elif RIGHT in k and BACKWARDS in k:\n if active_camera.name == (\"Human_Camera\"+suffix):\n applyrotate(pos.worldOrientation * Matrix.Rotation(math.pi * 5/4, 3, 'Z'), ow)\n else:\n applyrotate(human_pos.worldOrientation * Matrix.Rotation(math.pi * 7 / 4, 3, 'Z'), ow)\n # turn around 225 deg if in game mode, else turn 315 deg.", "def rotation(self):\n\t\treturn self.piv.a.rotate.v", "def sim_altitude(a):\n a.takeoff()\n for i in range(20):\n b = a.refresh()\n a.altitude.set_sensor_altitude_current(a.altitude.get_altitude_current() + 3)\n print a.altitude.get_altitudes(), b\n a.land()\n a.motor.set_speed(2000.0)\n for i in range(20):\n b = a.refresh()\n a.altitude.set_sensor_altitude_current(a.altitude.get_altitude_current() - 3)\n print a.altitude.get_altitudes(), b", "def delaz(eqlat, eqlon, stlat, stlon, flag):\n\n if flag==0: # convert geographic degrees to geocentric radians\n eqlat, eqlon = coortr(eqlat,eqlon,flag)\n stlat, stlon = coortr(stlat,stlon,flag) \n\n eqcolat = m.pi/2-eqlat\n stcolat = m.pi/2-stlat\n\n cos_eq = m.cos(eqcolat)\n sin_eq = m.sin(eqcolat)\n cos_st = m.cos(stcolat)\n sin_st = m.sin(stcolat)\n cos_eqst = m.cos(stlon-eqlon)\n sin_eqst = m.sin(stlon-eqlon)\n\n cos_delta = cos_eq * cos_st + sin_eq * sin_st * cos_eqst\n sin_delta = m.sqrt(1-cos_delta * cos_delta)\n delta = m.atan2(sin_delta,cos_delta)\n # if sin(delta)=0, set sin(delta)=eps=10**-16\n eps = 3.e-7\n sin_delta = sin_delta + (sin_delta==0)*eps\n\n # index is zero if expression is false, 1 if true; \n # if false, leave unchanged, if true azeqst=pi-azeqst\n # this puts azeqst into the correct quadrant\n azeqst = m.asin(sin_st*sin_eqst/sin_delta)\n index = (sin_eq*cos_st - cos_eq*sin_st*cos_eqst < 0)\n azeqst = azeqst + index*(m.pi-2*azeqst)\n azeqst = azeqst + (azeqst<0)*2*m.pi\n\n azsteq = m.asin(-sin_eq*sin_eqst/sin_delta)\n index = (cos_eq*sin_st - sin_eq*cos_st*cos_eqst < 0)\n azsteq = azsteq + index*(m.pi-2*azsteq)\n azsteq = azsteq + (azsteq<0)*2*m.pi\n\n # convert to degrees\n delta = delta*180/m.pi\n azeqst = azeqst*180/m.pi\n azsteq = azsteq*180/m.pi\n\n return delta, azeqst, azsteq", "def coords_on_spherical_earth(self):\n self.create_3d_coord_on_sphere(on_sphere=True)\n self.df_attributes['coord_x_earth'] = 6371.009 * self.df_attributes['coord_x']\n self.df_attributes['coord_y_earth'] = 6371.009 * self.df_attributes['coord_y']\n self.df_attributes['coord_z_earth'] = 6371.009 * self.df_attributes['coord_z']" ]
[ "0.6289923", "0.62318367", "0.6067944", "0.60519284", "0.59558403", "0.59422225", "0.5937349", "0.5936226", "0.59156203", "0.5899485", "0.5844154", "0.5770054", "0.576213", "0.5754233", "0.57514757", "0.5723362", "0.5705267", "0.56774634", "0.5661177", "0.5657915", "0.56561375", "0.5627605", "0.5617953", "0.56025684", "0.55915827", "0.5587309", "0.5566315", "0.5557793", "0.55455256", "0.5536868", "0.5526699", "0.5517656", "0.5514458", "0.55095893", "0.5506296", "0.5500514", "0.54972225", "0.5480813", "0.5446413", "0.5442823", "0.5428242", "0.5420151", "0.54097414", "0.5408694", "0.5400029", "0.5381978", "0.53622943", "0.536164", "0.53586143", "0.53576", "0.5355457", "0.5352439", "0.5345229", "0.5344156", "0.53372896", "0.53365904", "0.53305405", "0.53299916", "0.53262985", "0.53251636", "0.53209645", "0.5317883", "0.52967554", "0.52768916", "0.5265993", "0.52613986", "0.52543104", "0.52306324", "0.5226278", "0.522539", "0.5220434", "0.5218297", "0.5206535", "0.5203583", "0.5196593", "0.518861", "0.5186055", "0.5180084", "0.51748616", "0.516823", "0.5167744", "0.516354", "0.51617736", "0.51600385", "0.51578844", "0.51577526", "0.51499265", "0.5149692", "0.5149632", "0.51445985", "0.51442105", "0.51437557", "0.51426566", "0.5135984", "0.5131412", "0.5125905", "0.51203084", "0.51115835", "0.51092273", "0.5104676" ]
0.6019258
4
The radio telescopes observe the sky for 'total_int_time' hours each day. The signal is recorded every 'int_time' seconds.
def daily_observation(z, ncells, filename, total_int_time=4., int_time=10., boxsize=None, declination=30.): Nbase, N_ant = from_antenna_config(filename, z) uv_map0 = get_uv_coverage(Nbase, z, ncells, boxsize=boxsize) uv_map = np.zeros(uv_map0.shape) tot_num_obs = int(3600.*total_int_time/int_time) for i in xrange(tot_num_obs-1): new_Nbase = earth_rotation_effect(Nbase, i+1, int_time, declination=declination) uv_map1 = get_uv_coverage(new_Nbase, z, ncells, boxsize=boxsize) uv_map += uv_map1 print i uv_map = (uv_map+uv_map1)/tot_num_obs return uv_map, N_ant
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "async def set_integration_time(self, int_time: int):\n return await self.hw_device.integration_time(int_time)", "def setIntegrationTime(self,t_int):\n \n acc_len = self._adcClock*1e6*t_int/(1024.0) \n if acc_len > 65536:\n raise(\"Integration time is too long:\",t_int)\n self._t_int = t_int\n \n #acc_len = 2048 # hardwire for now to known working condition\n period = acc_len*16384\n \n self.regwrite(\"cs/vacc/acc_len\",acc_len-1)\n self.regwrite(\"period1\",period-2)\n self._write_info({'IntegrationTime': t_int})", "def setIntegrationTime(self,t_int):\n acc_len = np.round((self._adcClock/4)*1e6*t_int/(8192.0))\n period = acc_len*8192\n if acc_len >= 2**16-1:\n raise Exception(\"Requested integration length results in acc_len setting %d > 65535. Reduce requested integration time.\" % (acc_len-1,))\n t_int_actual = acc_len*8192/((self._adcClock/4)*1e6)\n self._t_int = t_int_actual\n self.regwrite(\"cfgspec/vacc/acc_len\",acc_len-1)\n self.regwrite(\"period\",period-2)\n self._write_info({'PeriodRegister': (period-2),\n 'AccLenRegister': (acc_len-1),\n 'IntegrationTime': t_int_actual})\n self.sync()", "def measure_t_interval(self):\n self.write(\"MEAS:TINT? (@1),(@2)\")", "def logging_time(self, cur_res_val=0):\n self.fixed_val = self.new_val\n self.minutes_val += 1\n \n if cur_res_val:\n if self.cur_hour == 23:\n self.time_counter[str(0)] = 0\n else:\n self.time_counter[str(self.cur_hour+1)] = 0\n if cur_res_val < 30:\n self.time_counter[str(self.time_hour)] = self.minutes_val\n self.minutes_val = 0\n self.new_hour_flag = False\n elif cur_res_val >= 30:\n if self.time_hour - self.cur_hour:\n self.time_counter[str(self.cur_hour)] = self.minutes_val\n self.minutes_val = 0\n self.new_hour_flag = False\n print(self.time_counter)", "def end_time(self, t):\r\n # Increase temperature while silent.\r\n if np.count_nonzero(self.next_note) == 0:\r\n self.silent_time += 1\r\n if self.silent_time >= NOTES_PER_BAR:\r\n self.temperature += 0.1\r\n else:\r\n self.silent_time = 0\r\n self.temperature = self.default_temp\r\n \r\n self.notes_memory.append(self.next_note)\r\n # Consistent with dataset representation\r\n self.beat_memory.append(compute_beat(t, NOTES_PER_BAR))\r\n self.results.append(self.next_note)\r\n # Reset next note\r\n self.next_note = np.zeros((NUM_NOTES, NOTE_UNITS))\r\n return self.results[-1]", "def handle_air_counts(self, this_start, this_end):\n aq_data_set = []\n average_data = []\n while time.time() < this_end:\n text = self.aq_port.read(32)\n buffer = [ord(c) for c in text]\n if buffer[0] == 66:\n summation = sum(buffer[0:30])\n checkbyte = (buffer[30]<<8)+buffer[31]\n if summation == checkbyte:\n current_second_data = []\n buf = buffer[1:32]\n current_second_data.append(datetime.datetime.now())\n for n in range(1,4):\n current_second_data.append(repr(((buf[(2*n)+1]<<8) + buf[(2*n)+2])))\n for n in range(1,7):\n current_second_data.append(repr(((buf[(2*n)+13]<<8) + buf[(2*n)+14])))\n aq_data_set.append(current_second_data)\n for c in range(len(self.variables)):\n c_data = []\n for i in range(len(aq_data_set)):\n c_data.append(aq_data_set[i][c+1])\n c_data_int = list(map(int, c_data))\n avg_c = sum(c_data_int)/len(c_data_int)\n average_data.append(avg_c)\n\n self.data_handler.main(\n self.datalog, average_data, this_start, this_end)", "def increment_hourly_total(self, unique, property_id=None, value=1):\n key = (self.user_name, self.bucket_name, \"hourly_event\", self.shard)\n property_id = property_id or _32_BYTE_FILLER\n column_id = \"\".join([\n self.id,\n property_id[0:16],\n pack_hour(),\n property_id[16:32]])\n increment_counter(key, column_id=column_id, value=value)\n if unique:\n key = (\n self.user_name, \n self.bucket_name, \n \"hourly_unique_event\", \n self.shard)\n increment_counter(key, column_id=column_id)", "def timing( self, input, band, temp, nsubint, nsubfreq, jump, saveDir, saveFile, verbose, exciseRFI ):\n\n timingObject = Timing( temp, input, band, nsubint, nsubfreq, jump, saveDir, saveFile, verbose, exciseRFI )", "def evaluate(self, time) -> float:\n ...", "def alarm(self, interval, call):", "def hourly(self, start_time: str = \"now\", end_time: Optional[str] = None,\n fields: List[str] = list()) -> dict:\n end_time = end_time or str(pendulum.parse(start_time).add(hours=108))\n query = {\n \"start_time\": start_time,\n \"end_time\": end_time,\n \"fields\": fields or self.fields\n }\n return self.call(\"weather/forecast/hourly\", query)", "def encode(self, signal, signal_end_time, delta_t):\n\n self.__dict__.update(self.params.__dict__)\n\n spikes = SpikeTimes(self.n_channels)\n if isinstance(signal, src.signals.Signal) or isinstance(\n signal, src.signals.signalCollection\n ):\n sampled = signal.sample(np.arange(0, signal_end_time, delta_t))\n else:\n sampled = signal\n time = np.arange(0, signal_end_time, delta_t)\n\n weighted_biased_integral = (\n (\n self.mixing_matrix.dot(np.cumsum(np.atleast_2d(sampled), 1) * delta_t)\n + np.outer(self._b, time)\n ).T\n / np.array(self._kappa)\n + np.array(self._integrator_init)\n + self._delta\n )\n moduloed_integral_quotient = np.floor_divide(\n weighted_biased_integral, 2 * np.array(self._delta)\n ).T\n\n for ch in range(self.n_channels):\n unique, unique_indices = np.unique(\n moduloed_integral_quotient[ch, :], return_index=True\n )\n start_index = bisect.bisect_right(unique, 0)\n unique = unique[start_index:]\n unique_indices = unique_indices[start_index:]\n if (np.diff(unique_indices) < 0).any() or (\n np.diff(moduloed_integral_quotient) > 1\n ).any():\n print(delta_t)\n raise ValueError(\"Your delta_t is too large\")\n spikes.add(ch, time[unique_indices].tolist())\n return spikes", "def calcExpTime(self, background: SpectralQty, signal: SpectralQty, obstruction: float, snr: u.Quantity) -> u.s:\n # Calculate the signal and background temperatures\n t_signal, t_background = self.calcTemperatures(background, signal, obstruction)\n line_ind = np.where(t_signal.wl == self.__lambda_line)[0][0]\n t_sys = t_background + 2 * self.__receiver_temp + t_signal\n # Calculate the noise bandwidth\n delta_nu = t_signal.wl.to(u.Hz, equivalencies=u.spectral()) / (t_signal.wl / self.__common_conf.wl_delta() + 1)\n exp_time = []\n for snr_ in snr if snr.size > 1 else [snr]:\n # Calculate the RMS background temperature\n t_rms = t_signal / snr_\n # Calculate the exposure time\n if self.__n_on is None:\n exp_time_ = ((2 * t_sys * self.__kappa / t_rms) ** 2 / delta_nu)\n else:\n exp_time_ = ((t_sys * self.__kappa / t_rms) ** 2 *\n (1 + 1 / np.sqrt(self.__n_on)) / delta_nu)\n exp_time_ = SpectralQty(exp_time_.wl, exp_time_.qty.decompose())\n exp_time.append(exp_time_.qty[line_ind])\n # Print details\n self.__printDetails(t_sys.qty[line_ind], delta_nu[line_ind], t_rms.qty[line_ind], t_signal.qty[line_ind],\n \"SNR=%.2f: \" % snr_.value)\n self.__output(t_signal, t_background, t_rms, \"snr_%.2f\" % snr_.value, exp_time=exp_time_)\n return u.Quantity(exp_time) if len(exp_time) > 1 else u.Quantity(exp_time[0])", "def in_time(self):\n if self._in_time is None:\n self._in_time = np.fft.irfft(self._in_freq, n=self._times.size)\n return self._in_time", "def signal_rsi(self):\n pass", "def update_rain_temp(self, day_of_week, departure_time_seconds):\n\n current_time = t.time()\n today = datetime.today().weekday()\n\n if (departure_time_seconds < (current_time + 3600) \\\n and day_of_week == today):\n\n self.temp = self.current_temperature\n self.rain = self.current_rainfall\n\n elif (day_of_week == today):\n for i in range(24):\n if (departure_time_seconds > self.weather_forecast_json \\\n [\"hourly\"][\"data\"][i][\"time\"] and departure_time_seconds \\\n < self.weather_forecast_json[\"hourly\"][\"data\"][i + 1][\"time\"]):\n\n self.temp = self.weather_forecast_json \\\n ['hourly']['data'][i]['temperature']\n\n self.rain = self.weather_forecast_json['hourly'] \\\n ['data'][i]['precipIntensity']\n break\n else:\n continue\n else:\n day_difference = int((departure_time_seconds - current_time) / 86400)\n\n self.temp = (self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMax'] + \\\n self.weather_forecast_json['daily']['data'] \\\n [day_difference]['temperatureMin']) / 2\n\n self.rain = self.weather_forecast_json['daily'] \\\n ['data'][day_difference]['precipIntensity']", "def on_scale_total_time_button_clicked(self):\n if self.total_time_setting is None:\n print(\"Need to input time\")\n return\n times = self.qr_polytraj.times.copy()\n\n times = times/np.sum(times)*self.total_time_setting\n\n self.qr_polytraj.update_times(np.arange(np.size(times)),times,defer=False)#self.defer)\n\n self.update_path_markers()\n acc_wp = self.get_accel_at_waypoints(\"main\")\n self.interactive_marker_worker.make_controls(self.qr_polytraj.waypoints)\n self.interactive_marker_worker.update_controls(self.qr_polytraj.waypoints,acc_wp = acc_wp)", "def overheads(NPT, DIT, NDIT):\n ov = 360. + 120. + NPT*NDIT*(DIT + 80. + 15.)\n print 'Telescope time in h = ', ov/3600.", "def getDuration(self):\n #return np.sum(self.subintinfo['TSUBINT']) #This is constant.\n return np.sum(self.getSubintinfo('TSUBINT')) #This is constant.", "def averageTime(self):\n \n pass", "def update_signal(self,current_time):\r\n time = (current_time+self.offset)%self.cycle\r\n \r\n for ph_id,group in self.lane_groups.items():\r\n \r\n ph = self.phases[ph_id]\r\n \r\n if not (ph.start<=time<ph.end):\r\n # when the light is red, the section cannot generate demand\r\n for sec in group:\r\n sec.demand=0", "def __init__(self, date_time, diastolic):\n Encounter.__init__(self, date_time)\n self.__diastolic = diastolic", "def record(self, time, increment):\n raise NotImplementedError(\"Abstract method not implemented.\")", "def get_reltriggertimes(self):\n return np.array(self.trtimes)-self.soundstarttime", "def _ims_res_timing(self) -> None:\n self._ims_res_timer.start(500)", "def time_function(t):\n\n omega = np.pi\n return np.sin(omega * t) + np.sin(10 * omega * t) + np.sin(20 * omega * t)", "def time(self):\n return sum(self._interval) * .5", "def hourly_table(self):\n htable = [0 for i in range(24)]\n for i in range(self.dataset.shape[0]):\n stime = time.localtime(np.int32(self.dataset[i][2]))\n evtime = stime[3]\n htable[evtime] += 1\n return htable", "def time_day_update_func(self, time, day, ride_duration):\n day = (day + ((time + ride_duration) // t)) % d\n time = (time + ride_duration) % t\n return time, day", "def get_hourly(self):\n pass", "def time_interval_sub(self, time_step, nsteps):\n world.subtime = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting subtime\")", "def run_at_time(self, input_dict):\n lead_seq = util.get_lead_sequence(self.config, input_dict)\n for lead in lead_seq:\n self.clear()\n input_dict['lead_hours'] = lead\n self.config.set('config', 'CURRENT_LEAD_TIME', lead)\n os.environ['METPLUS_CURRENT_LEAD_TIME'] = str(lead)\n time_info = time_util.ti_calculate(input_dict)\n self.run_at_time_once(time_info)", "def signal_time(self, signal_time):\n\n self._signal_time = signal_time", "def caltrack_hourly(\n meter_data,\n temperature_data,\n blackout_start_date,\n blackout_end_date,\n degc: bool = False,\n):\n\n baseline_meter_data, baseline_warnings = get_baseline_data(\n meter_data,\n start=blackout_start_date - relativedelta(years=1),\n end=blackout_start_date,\n max_days=None,\n )\n\n # create a design matrix for occupancy and segmentation\n preliminary_design_matrix = create_caltrack_hourly_preliminary_design_matrix(\n baseline_meter_data, temperature_data, degc\n )\n\n # build 12 monthly models - each step from now on operates on each segment\n segmentation = segment_time_series(\n preliminary_design_matrix.index, \"three_month_weighted\"\n )\n\n # assign an occupancy status to each hour of the week (0-167)\n occupancy_lookup = estimate_hour_of_week_occupancy(\n preliminary_design_matrix, segmentation=segmentation\n )\n\n # assign temperatures to bins\n (\n occupied_temperature_bins,\n unoccupied_temperature_bins,\n ) = fit_temperature_bins(\n preliminary_design_matrix,\n segmentation=segmentation,\n occupancy_lookup=occupancy_lookup,\n )\n\n # build a design matrix for each monthly segment\n segmented_design_matrices = create_caltrack_hourly_segmented_design_matrices(\n preliminary_design_matrix,\n segmentation,\n occupancy_lookup,\n occupied_temperature_bins,\n unoccupied_temperature_bins,\n )\n\n # build a CalTRACK hourly model\n baseline_model = fit_caltrack_hourly_model(\n segmented_design_matrices,\n occupancy_lookup,\n occupied_temperature_bins,\n unoccupied_temperature_bins,\n )\n # get a year of reporting period data\n reporting_meter_data, warnings = get_reporting_data(\n meter_data, start=blackout_end_date, max_days=365\n )\n\n # compute metered savings for the year of the reporting period we've selected\n metered_savings_dataframe, error_bands = metered_savings(\n baseline_model,\n reporting_meter_data,\n temperature_data,\n with_disaggregated=True,\n degc=degc,\n )\n\n return metered_savings_dataframe", "def on_timer(context, data_type, data):\n pass", "def gauge_int_timeseries(resource_type, resource_labels, metric_type,\n metric_labels, value):\n series = monitoring_v3.TimeSeries()\n series.metric.type = metric_type\n series.metric.labels.update(metric_labels)\n series.resource.type = resource_type\n series.resource.labels.update(resource_labels)\n series.metric_kind = 'GAUGE'\n now = time.time()\n seconds = int(now)\n nanos = int((now - seconds) * 10**9)\n interval = monitoring_v3.TimeInterval(\n {'end_time': {\n 'seconds': seconds,\n 'nanos': nanos\n }})\n point = monitoring_v3.Point({\n 'interval':\n interval,\n 'value':\n monitoring_v3.TypedValue(int64_value=value)\n })\n series.points = [point]\n return series", "def arming_time(self, time):\n self.write(\":FREQ:ARM:STAR:SOUR IMM\")\n self.write(\":FREQ:ARM:STOP:SOUR TIM\")\n self.write((\":FREQ:ARM:STOP:TIM %.1f\" % time).lstrip('0'))", "def time(self):\r\n time = datetime.datetime.now().strftime(\"%I:%M:%S\")\r\n self.speak(\"the current time is\")\r\n self.speak(time)", "def qinteg(integTime=10, reps=6, \n gap=0.0, science=True,\n subarray=DEFAULT) :\n multiSubarray('integrate', subarray, integTime, reps, gap, science)", "def time_steps(Tin, ints):\n from numpy import linspace\n \n t_steps=linspace(0, Tin, int(ints*Tin))\n dt=(len(t_steps)/Tin)**-1\n \n return t_steps, dt", "def sundial_time(cls, tee):\n date = Clock.fixed_from_moment(tee)\n time = mod(tee, 1)\n q = ifloor(4 * time)\n if q == 0:\n a = cls.sunset(date - 1)\n b = cls.sunrise(date)\n t = Clock.days_from_hours(-6)\n elif q == 3:\n a = cls.sunset(date)\n b = cls.sunrise(date + 1)\n t = Clock.days_from_hours(18)\n else:\n a = cls.sunrise(date)\n b = cls.sunset(date)\n t = Clock.days_from_hours(6)\n return a + (2 * (b - a) * (time - t))", "def risetime_calc(self):\n\n # given the transmitter's 20%-80% risetime, and assuming a\n # Gaussian impulse response, calculate the 10%-90% risetime\n # cell G3\n\n #self.tx_1090_rise = 1.518*self.tx_2080_rise #Fix 1 : Formula not same as in Cell T7\n self.tx_1090_rise = 329*1000/self.tx_2080_rise\n \n # calculate the effective risetimes for the fiber channel, given\n # the bandwidths calculated in the previous section, assuming\n # a Gaussian impulse response model\n self.cd_1090_rise = 0.48E6 / self.bw_cd\n self.md_1090_rise = 0.48E6 / self.bw_md\n\n # calculate the risetime for the link receiver, given its\n # bandwidth and assuming a single pole impulse response\n # Cell T7\n self.rx_1090_rise = 0.329E6/self.rx_bw\n\n # calculate the risetime for the test receiver used for transmitter\n # eye displays, given its bandwidth and assuming a single pole\n # response\n self.rx_txeye_1090_rise = 0.329E6 / self.txeye_rx_bw\n\n # calculate Te from column H and Tc from column I\n tr_tx_2 = self.tx_1090_rise**2*self.l_1\n tr_rx_2 = self.rx_1090_rise**2*self.l_1\n tr_cd_2 = np.square(self.cd_1090_rise)\n tr_md_2 = np.square(self.md_1090_rise)\n self.te = np.sqrt(tr_cd_2 + tr_md_2 + tr_tx_2) # column H\n \n self.tc = np.sqrt(tr_cd_2 + tr_md_2 + tr_tx_2 + tr_rx_2) # column I\n \n\n # end of GbE10..risetime_calc", "def time_window_frequency(self, t0, tend):\n eigs = self.time_window_eigs(t0, tend)\n return np.log(eigs).imag/(2*np.pi*self.original_time['dt'])", "def timestep(self, simsystem, osc, obs):\n pass", "def track(self, t0 = 0., show=0):\n\n reltime = self.reltime\n chans = self.chans\n tint = self.inttime\n\n # calculate pulse time and duration\n pulset = t0\n pulsedt = self.pulsewidth[0] # dtime in seconds. just take one channel, since there is no freq dep\n\n timebin = []\n chanbin = []\n\n ontime = n.where(((pulset + pulsedt) >= reltime - tint/2.) & (pulset <= reltime + tint/2.))\n for ch in xrange(len(chans)):\n timebin = n.concatenate((timebin, ontime[0]))\n chanbin = n.concatenate((chanbin, (ch * n.ones(len(ontime[0]), dtype='int'))))\n\n track = (list(timebin), list(chanbin))\n\n if show:\n p.plot(track[0], track[1], 'w*')\n\n return track", "def show_time(self):\n hour = str(datetime.datetime.now().strftime(\"%H\"))\n minute = str(datetime.datetime.now().strftime(\"%M\"))\n\n hour1 = int(hour[0])\n hour2 = int(hour[1])\n minute1 = int(minute[0])\n minute2 = int(minute[1])\n\n self.light_number(self.numbers[hour1], [0, 5])\n self.light_number(self.numbers[hour2], [0, 0])\n self.light_number(self.numbers[minute1], [5, 5])\n self.light_number(self.numbers[minute2], [5, 0])", "def report(inclusion_cutoff=0.1, total_time=False):\n master = MasterTimer.getMasterTimer()\n\n table = [\n \"{:60s} {:^20} {:^20} {:^20} {:^8} {}\\t\".format(\n \"TIMER REPORTS\",\n \"SINCE LAST (s)\",\n \"CUMULATIVE (s)\",\n \"AVERAGE (s)\",\n \"PAUSES\",\n \"ACTIVE\",\n )\n ]\n\n for timer in sorted(master.timers.values(), key=lambda x: x.time):\n if total_time:\n time_ratio = timer.time / master.time()\n else:\n time_ratio = timer.timeSinceReport / master.time()\n if time_ratio < inclusion_cutoff:\n continue\n table.append(str(timer))\n\n return \"\\n\".join(table)", "def time(self):\n return signal_base_get_time(self.obj)", "def time_to_sample(self, time):\n return time * self.freq", "def realtime(self):", "def REAL_TIME_ADVANCE(dt):", "def integrate(integTime=10, reps=6, ants=0, antwait=-2, tmo=500, \n gap=0.0, science=True,\n subarray=DEFAULT) :\n if type(reps) != int:\n m = \"integrate(integTime, reps): reps must be an integer\"\n commandlog(m)\n raise Exception, m\n if reps <= 0:\n m = \"Infinite integration not allowed\"\n commandlog(m)\n raise Exception, m\n if antwait != NONE:\n c1 = \"Waiting for antennas to acquire source\"\n c2 = \"Antennas acquired\"\n wait(TRACK, ants, tmo, antwait, precomment=c1, postcomment=c2,\n subarray=subarray)\n\n cblist = makeCorrBandList(0)\n # If band is offline, don't wait for it.\n if ( len( cblist ) != 0 ) :\n c1 = \"Waiting for previous astroband commands to complete...\"\n c2 = \"Correlator band configuration complete\"\n wait(CORR, cblist, 10, ALL, precomment=c1, postcomment=c2, \n subarray=subarray)\n rtdComment(\"Integrating...\", subarray=subarray) \n multiSubarray('integrate', subarray, integTime, reps, gap, science)\n rtn = wait(INTEG, postcomment=\"Integration complete\" ,subarray=subarray)\n #print \"Integration complete, itime=\", integTime\n return rtn", "def calcSNR(self, background: SpectralQty, signal: SpectralQty, obstruction: float,\n exp_time: u.Quantity) -> u.dimensionless_unscaled:\n # Calculate the signal and background temperatures\n t_signal, t_background = self.calcTemperatures(background, signal, obstruction)\n line_ind = np.where(t_signal.wl == self.__lambda_line)[0][0]\n t_sys = t_background + 2 * self.__receiver_temp + t_signal\n # Calculate the noise bandwidth\n delta_nu = t_signal.wl.to(u.Hz, equivalencies=u.spectral()) / (t_signal.wl / self.__common_conf.wl_delta() + 1)\n snr = []\n for exp_time_ in exp_time if exp_time.size > 1 else [exp_time]:\n # Calculate the RMS background temperature\n if self.__n_on is None:\n t_rms = 2 * t_sys * self.__kappa / np.sqrt(exp_time_ * delta_nu)\n else:\n t_rms = t_sys * self.__kappa * np.sqrt(1 + 1 / np.sqrt(self.__n_on)) / np.sqrt(exp_time_ * delta_nu)\n # Calculate the SNR\n snr_ = t_signal / t_rms\n snr.append(snr_.qty[line_ind])\n # Print details\n self.__printDetails(t_sys.qty[line_ind], delta_nu[line_ind], t_rms.qty[line_ind], t_signal.qty[line_ind],\n \"t_exp=%.2f s: \" % exp_time_.value)\n self.__output(t_signal, t_background, t_rms, \"texp_%.2f\" % exp_time_.value, snr=snr_)\n return u.Quantity(snr) if len(snr) > 1 else u.Quantity(snr[0])", "def updateInt(data):\n emit('timestamp', data)", "def test_scenario(timestep_per_pi, int_method):\n\n #determine BC and IC\n x0 = 0.0 #init pos\n v0 = 1.0 #init vel\n t0 = 0.0 #start-time\n tn = 4.0*np.pi #end-time\n tau = timestep_per_pi*np.pi #timesteps\n n = (tn-t0)/tau + 1 #number of timesteps\n \n time = np.linspace(t0, tn, n) #time-array\n\n #acceleration of point particle with k=m=1\n acc1 = lambda x,v,t: -1.0*x #function must take three arguments!\n\n pos, vel, time = integrate_time(func=acc1,\n init=(x0,v0),\n timearray=time,\n method=int_method)\n\n #analytical solutions\n pos_an = np.sin(time)\n vel_an = np.cos(time)\n\n return time, pos, pos_an, vel, vel_an", "def time_interval_prop(self, time_step, nsteps):\n world.time = TimeAxis(0.0, int(nsteps), float(time_step))\n print(\"Setting time\")", "def _time0(self, int_max=None):\n\n if int_max == None: return np.array(range(self.skip, self.frames - 1))\n return np.linspace(\n self.skip, self.frames - 1, int(int_max), endpoint=False, dtype=int)", "def __iadd__(self, *args, **kwargs):\n return _uhd_swig.time_spec_t___iadd__(self, *args, **kwargs)", "def time_in_msk():\n str_time = datetime.now(MSK).strftime('%H:%M:%S')\n with open(FILE, \"a+\") as f:\n f.write(str_time + \"\\n\")\n return str_time, 200", "def compute_iactT(data):\n\t# get only the timestamps\n\ttime = data[0]\n\tprevT = time[0]\n\n\ttotalT = len(time)*0.1\n\n\treturn totalT", "def time_interval( self ):\n begin = self.begin; end = self.end\n if end - begin < 600*self.hour_switch:\n return 600\n if end - begin < 86400*self.day_switch:\n return 3600\n elif end - begin < 86400*7*self.week_switch:\n return 86400\n else:\n return 86400*7", "def setIntegrationTime(self, time = 1.0):\n setI1DisplayIntegrationTime(time)", "def Run_abio(HatmT,time):\n\tHatm = HatmT[len(HatmT)-1]\n \n\tHatm0 = Hatm\n \n\tt_temp = 0\n \n\twhile t_temp < time:\n \n\t\tEsc = -13.1e-4*(Hatm)/(ntot) \n \n\t\tdHatm = S * (Esc + Volc)\n \n\t\tvec = np.array([Hatm])\n\t\tder_vec = np.array([dHatm])\n \n\t\tdt = min(abs(vec[np.where(der_vec != 0)[0]]/der_vec[np.where(der_vec != 0)[0]]/1000))\n\t\t#dt = tmax/10000000\n\t\tHatm = max(1e-40,Hatm + dHatm*dt)\n\n\t\tt_temp = t_temp + dt\n \n\t\tHatmT.append(Hatm)\n \n\treturn(HatmT)", "def update_rec_timer(self, time_s):\n self._stop_section.ids.rec_time_lbl.text = format_time_str(int(round(time_s)))", "def calculate_interface_util(device_int, time_now):\n device_int[\"current_counter\"] = int(device_int[\"current_counter\"])\n device_int[\"previous_counter\"] = int(device_int[\"previous_counter\"])\n\n if device_int[\"current_counter\"] == device_int[\"previous_counter\"]:\n # print \"no traffic on interface {} on {}\".format(device_int[\"name\"], device_ip)\n return False, device_int\n device_int[\"update_time\"] = time_now\n device_int[\"seconds_since\"] = (time_now - device_int[\"previous_update\"]).seconds\n device_int[\"bits_out\"] = (device_int[\"current_counter\"] * 8) - (device_int[\"previous_counter\"] * 8)\n max_int = 9223372036854775807\n if device_int[\"bits_out\"] < 0:\n device_int[\"bits_out\"] = (max_int - device_int[\"previous_counter\"]) + device_int[\"current_counter\"]\n device_int[\"bits_per_sec\"] = device_int[\"bits_out\"] / device_int[\"seconds_since\"]\n device_int[\"util_percentage\"] = float(device_int[\"bits_per_sec\"]) * 100 / float(device_int[\"speed\"])\n device_int[\"util_percentage\"] = round(device_int[\"util_percentage\"], 3)\n return True, device_int", "def rainfall_event(self):\n\n # assign local variables\n datatype = 'strds'\n increment = str(self.rain_interval)+' minutes'\n raster = 'raster'\n iterations = int(self.rain_duration)/int(self.rain_interval)\n rain_excess = 'rain_excess'\n net_difference = 'net_difference'\n\n # create raster space time datasets\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.elevation_timeseries,\n title=self.elevation_title,\n description=self.elevation_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.depth_timeseries,\n title=self.depth_title,\n description=self.depth_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.erdep_timeseries,\n title=self.erdep_title,\n description=self.erdep_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.flux_timeseries,\n title=self.flux_title,\n description=self.flux_description,\n overwrite=True)\n gscript.run_command(\n 't.create',\n type=datatype,\n temporaltype=self.temporaltype,\n output=self.difference_timeseries,\n title=self.difference_title,\n description=self.difference_description,\n overwrite=True)\n\n # register the initial digital elevation model\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=self.elevation,\n start=self.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # create evolution object\n evol = Evolution(elevation=self.elevation,\n precipitation=self.precipitation,\n start=self.start,\n rain_intensity=self.rain_intensity,\n rain_interval=self.rain_interval,\n rain_duration=self.rain_duration,\n walkers=self.walkers,\n runoff=self.runoff,\n mannings=self.mannings,\n detachment=self.detachment,\n transport=self.transport,\n shearstress=self.shearstress,\n density=self.density,\n mass=self.mass,\n grav_diffusion=self.grav_diffusion,\n erdepmin=self.erdepmin,\n erdepmax=self.erdepmax,\n k_factor=self.k_factor,\n c_factor=self.c_factor,\n m=self.m,\n n=self.n,\n threads=self.threads,\n fill_depressions=self.fill_depressions)\n\n # determine mode and run model\n if self.mode == 'simwe_mode':\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # run the landscape evolution model\n # as a series of rainfall intervals in a rainfall event\n i = 1\n while i < iterations:\n\n # update the elevation\n evol.elevation = evolved_elevation\n print evol.elevation\n\n # update time\n evol.start = time\n print evol.start\n\n # derive excess water (mm/hr) from rainfall rate (mm/hr)\n # plus the depth (m) per rainfall interval (min)\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_excess}\"\n \"={rain_intensity}\"\n \"+{depth}\"\n \"/1000.\"\n \"/{rain_interval}\"\n \"*60.\".format(\n rain_excess=rain_excess,\n rain_intensity=self.rain_intensity,\n depth=depth,\n rain_interval=self.rain_interval),\n overwrite=True)\n\n # update excess rainfall\n rain_intensity = 'rain_intensity'\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{rain_intensity} = {rain_excess}\".format(\n rain_intensity='rain_intensity',\n rain_excess=rain_excess),\n overwrite=True)\n evol.rain_intensity = rain_intensity\n\n # determine mode and run model\n if self.mode == \"simwe_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.erosion_deposition()\n # remove relative timestamps\n # from r.sim.water and r.sim.sediment\n gscript.run_command(\n 'r.timestamp',\n map=depth,\n date='none')\n gscript.run_command(\n 'r.timestamp',\n map=erosion_deposition,\n date='none')\n\n elif self.mode == \"usped_mode\":\n (evolved_elevation, time, depth, erosion_deposition,\n difference) = evol.usped()\n\n elif self.mode == \"rusle_mode\":\n (evolved_elevation, time, depth, sediment_flux,\n difference) = evol.rusle()\n\n else:\n raise RuntimeError(\n '{mode} mode does not exist').format(mode=self.mode)\n\n # register the evolved maps\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.elevation_timeseries,\n maps=evolved_elevation,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.depth_timeseries,\n maps=depth,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.erdep_timeseries,\n maps=erosion_deposition,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n except (NameError, CalledModuleError):\n pass\n try:\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.flux_timeseries,\n maps=sediment_flux,\n start=evol.start,\n increment=increment,\n flags='i', overwrite=True)\n except (NameError, CalledModuleError):\n pass\n gscript.run_command(\n 't.register',\n type=raster,\n input=self.difference_timeseries,\n maps=difference,\n start=evol.start,\n increment=increment,\n flags='i',\n overwrite=True)\n\n # remove temporary maps\n gscript.run_command(\n 'g.remove',\n type='raster',\n name=['rain_excess'],\n flags='f')\n\n i = i+1\n\n # compute net elevation change\n gscript.run_command(\n 'r.mapcalc',\n expression=\"{net_difference}\"\n \"={evolved_elevation}-{elevation}\".format(\n net_difference=net_difference,\n elevation=self.elevation,\n evolved_elevation=evol.elevation),\n overwrite=True)\n gscript.write_command(\n 'r.colors',\n map=net_difference,\n rules='-',\n stdin=difference_colors)", "def doit(ts, hours):\n # Start at 1 AM\n ts = ts.replace(minute=0, second=0, microsecond=0)\n now = ts - datetime.timedelta(hours=hours-1)\n interval = datetime.timedelta(hours=1)\n ets = datetime.datetime.utcnow()\n ets = ets.replace(tzinfo=pytz.timezone(\"UTC\"))\n total = None\n while now < ets:\n gmt = now.astimezone(pytz.timezone(\"UTC\"))\n for prefix in ['GaugeCorr', 'RadarOnly']:\n gribfn = gmt.strftime((\"/mnt/a4/data/%Y/%m/%d/mrms/ncep/\" +\n prefix + \"_QPE_01H/\" +\n prefix + \"_QPE_01H_00.00_%Y%m%d-%H%M00\"\n \".grib2.gz\"))\n if os.path.isfile(gribfn):\n break\n if not os.path.isfile(gribfn):\n print(\"q3_Xhour.py MISSING %s\" % (gribfn,))\n now += interval\n continue\n fp = gzip.GzipFile(gribfn, 'rb')\n (tmpfp, tmpfn) = tempfile.mkstemp()\n tmpfp = open(tmpfn, 'wb')\n tmpfp.write(fp.read())\n tmpfp.close()\n grbs = pygrib.open(tmpfn)\n grb = grbs[1]\n os.unlink(tmpfn)\n # careful here, how we deal with the two missing values!\n if total is None:\n total = grb['values']\n else:\n maxgrid = np.maximum(grb['values'], total)\n total = np.where(np.logical_and(grb['values'] >= 0,\n total >= 0),\n grb['values'] + total, maxgrid)\n now += interval\n\n if total is None:\n print(\"q3_Xhour.py no data ts: %s hours: %s\" % (ts, hours))\n return\n\n # Scale factor is 10\n routes = \"c\"\n if ts.minute == 0:\n routes = \"ac\"\n pqstr = \"plot %s %s iowa_q2_%sh.png q2/iowa_q2_%sh_%s00.png png\" % (\n routes, ts.strftime(\"%Y%m%d%H%M\"), hours, hours,\n ts.strftime(\"%H\"))\n\n lts = ts.astimezone(pytz.timezone(\"America/Chicago\"))\n subtitle = 'Total up to %s' % (lts.strftime(\"%d %B %Y %I:%M %p %Z\"),)\n m = MapPlot(title=(\"NCEP MRMS Q3 (RADAR Only) %s Hour \"\n \"Precipitation [inch]\") % (hours,),\n subtitle=subtitle)\n\n clevs = np.arange(0, 0.2, 0.05)\n clevs = np.append(clevs, np.arange(0.2, 1.0, 0.1))\n clevs = np.append(clevs, np.arange(1.0, 5.0, 0.25))\n clevs = np.append(clevs, np.arange(5.0, 10.0, 1.0))\n clevs[0] = 0.01\n\n m.contourf(mrms.XAXIS, mrms.YAXIS, np.flipud(total) / 24.5, clevs)\n m.drawcounties()\n m.postprocess(pqstr=pqstr)", "def record(self, time, increment):\n\n if time < self._initialTime:\n return\n\n if self._lastObsValue > self._max:\n self._max = self._lastObsValue\n if time == self._initialTime:\n self._min = self._lastObsValue\n elif self._lastObsValue < self._min:\n self._min = self._lastObsValue\n\n self._n += 1\n self._area += self._lastObsValue * (time - self._lastObsTime)\n self._areaSquared += (self._lastObsValue ** 2) * (time - self._lastObsTime)\n self._lastObsTime = time\n self._lastObsValue += increment", "def rates_of_onset_and_decline(mhw, tt_start, tt_end, tt_peak, mhw_relSeas, temp, clim):\n ## For START of event\n if tt_start > 0:\n mhw_relSeas_start = 0.5 * (\n mhw_relSeas[0] + temp[tt_start - 1] - clim[\"seas\"][tt_start - 1]\n )\n mhw[\"rate_onset\"].append(\n (mhw_relSeas[tt_peak] - mhw_relSeas_start) / (tt_peak + 0.5)\n )\n else: # MHW starts at beginning of time series\n if (\n tt_peak == 0\n ): # Peak is also at begining of time series, assume onset time = 1 day\n mhw[\"rate_onset\"].append((mhw_relSeas[tt_peak] - mhw_relSeas[0]) / 1.0)\n else:\n mhw[\"rate_onset\"].append((mhw_relSeas[tt_peak] - mhw_relSeas[0]) / tt_peak)\n\n ## For END of event\n if tt_end < T - 1:\n mhw_relSeas_end = 0.5 * (\n mhw_relSeas[-1] + temp[tt_end + 1] - clim[\"seas\"][tt_end + 1]\n )\n mhw[\"rate_decline\"].append(\n (mhw_relSeas[tt_peak] - mhw_relSeas_end)\n / (tt_end - tt_start - tt_peak + 0.5)\n )\n else: # MHW finishes at end of time series\n if (\n tt_peak == T - 1\n ): # Peak is also at end of time series, assume decline time = 1 day\n mhw[\"rate_decline\"].append((mhw_relSeas[tt_peak] - mhw_relSeas[-1]) / 1.0)\n else:\n mhw[\"rate_decline\"].append(\n (mhw_relSeas[tt_peak] - mhw_relSeas[-1]) / (tt_end - tt_start - tt_peak)\n )\n\n return mhw", "def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')", "def analytic(self):\r\n data = self.input.data\r\n sampling_rate = self.input.sampling_rate\r\n\r\n a_signal =\\\r\n ts.TimeSeries(data=np.zeros(self.freqs.shape + data.shape,\r\n dtype='D'), sampling_rate=sampling_rate)\r\n if self.freqs.ndim == 0:\r\n w = self.wavelet(self.freqs, self.sd,\r\n sampling_rate=sampling_rate, ns=5,\r\n normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[...] = (np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n else:\r\n for i, (f, sd) in enumerate(zip(self.freqs, self.sd)):\r\n w = self.wavelet(f, sd, sampling_rate=sampling_rate,\r\n ns=5, normed='area')\r\n\r\n # nd = (w.shape[0] - 1) / 2\r\n a_signal.data[i, ...] = (\r\n np.convolve(data, np.real(w), mode='same') +\r\n 1j * np.convolve(data, np.imag(w), mode='same'))\r\n\r\n return a_signal", "def handle_sound_int(sid, timeslot, sd):\n sd[ds.color_map[sid]] = sd[ds.color_map[sid]] + [timeslot]", "def find_ts(uh_t):\n input_interval = uh_t[1]-uh_t[0]\n log.debug('Input Timestep = %i seconds' % input_interval)\n return input_interval", "def calculate_timestamp(self):\n return ((self.calculate_record_number() - 1) * SAMPLE_RATE) + \\\n self.time_on", "def get_integration_times( self ):\n if self.inst is not None:\n low = 0.01/ self.inst.line_freq\n else:\n low = 0.01/ 50\n \n high = 1\n \n return ( low, high )", "def returnGlobalTimer(self):\n self.globalTime = (time.time() - self.globalStartRef) + self.addedTime #Reports time in minutes, addedTime is for population reboot.\n return self.globalTime/ 60.0", "def timer_handler():\r\n \r\n global elapsed_time\r\n elapsed_time += 1", "def generate_singlesine(time = 0, samples_nb = 1000, rep_frequency = 10 , pulse_frequency = 50, amplitude = 1 , edge = 1, phase_offset = 0, noise = 0):\r\n\r\n\tif edge not in [0,1]:\r\n\t\tprint(colorama.Back.RED + colorama.Style.BRIGHT + \"ERROR: invalid phase (either 0 for a rising or a 1 for a falling edge) , exit.\"+ colorama.Style.NORMAL + colorama.Back.RESET)\r\n\t\t# Return code for error (empty input file):\r\n\t\tsys.exit(10)\r\n\r\n\r\n\t#Creating empty lists for t and y\r\n\tt = np.zeros(samples_nb)\r\n\r\n\tif noise == 0:\r\n\t\ty = np.zeros(samples_nb)\r\n\telse:\r\n\t\ty = np.random.normal(0, noise, samples_nb)\r\n\r\n\t#Determining the interval limits of t\r\n\tt_limit =1/float(rep_frequency*2)\r\n\r\n\t#Updating the t interval\r\n\tt = np.arange(-samples_nb/2,samples_nb/2)/float(samples_nb*rep_frequency) + 1/float(samples_nb*rep_frequency)\r\n\r\n\r\n\t#calculating the time_shift\r\n\t#delta_t = phase_offset/(2*np.pi*pulse_frequency)\r\n\tdelta_t = phase_offset/(2*np.pi*rep_frequency)\r\n\r\n\t#Setting the pulse amplitude\r\n\ta_pulse = amplitude\r\n\tif edge == 1:\r\n\t\ta_pulse *= -1\r\n\r\n\t#Calculating the pulse limits\r\n\tp_limit = 1/float(2*pulse_frequency)\r\n\tp_interval = list ([-p_limit,p_limit])\r\n\r\n\r\n\tfor n in range (0,len(t)) :\r\n\t\tif (t[n] + delta_t) > p_interval[0] and (t[n] + delta_t) <= p_interval[1]:\r\n\t\t\ty[n] += a_pulse * np.sin(2*np.pi*pulse_frequency*(t[n]+delta_t))\r\n\r\n\r\n\r\n\t#plt.plot(t,y)\r\n\t#plt.show()\r\n\r\n\tresult = {}\r\n\tresult ['time'] = time\r\n\tresult ['t'] = t\r\n\tresult ['y'] = y\r\n\r\n\treturn result", "def increment_time(self, **kwargs):\n \n #Pull all optional keyword arguements\n if 'timerange' in kwargs:\n timerange = kwargs.pop('timerange')\n else:\n timerange = 7\n \n if 'display' in kwargs:\n displayflag = kwargs.pop('display')\n else:\n displayflag = 1\n \n if 'auto' in kwargs:\n autoflag = kwargs.pop('auto')\n else:\n autoflag = 0\n \n if 'triggered' in kwargs:\n triggered_rules = kwargs.pop('triggered')\n else:\n triggered_rules = []\n \n #Run simulation one day at a time until specified end point is reached\n count = range(0,timerange)\n for i in count:\n \n \n #Increment one day if at least one infected person remains. If not, end the simulation\n if self.SD_Map.IPop.value() > 1:\n time = self.timeSeries[-1]\n self.timeSeries.append(time+1)\n self.SD_Map.update_all(self.timestep(), len(self.timeSeries)-2)\n else:\n print('Done!')\n \n #Update the time display\n self.timev.set(self.timeSeries[-1])\n \n #Add any triggered rules to the rule log display\n if triggered_rules != []:\n day_text = self.translate('Day')+' ' + str(self.timeSeries[-1]) \n rule_text = '; ' + self.translate('Rules') + ': ' + str(triggered_rules)[1:-1]\n log_text = day_text + rule_text\n self.list_info_boxes['Log'].insert(tk.END, log_text)\n \n #If appropriate, update all of the graphs\n if displayflag == 1:\n if self.arrangment == ['Map', 'Graph']:\n index = 2\n invertflag = 1\n else:\n index = 0\n invertflag = 0\n \n #Select all of the graphs\n canvaslist = []\n for entrylist in self.graph_canvas_list:\n for entry in entrylist:\n canvaslist.append(entry)\n\n #For each graph, delete it and replace it with an update graph\n for canvas in canvaslist:\n if index < 2:\n col = 0\n inputindex = index\n self.figures[index].clear()\n plt.close(self.figures[index])\n else:\n col = 1\n inputindex = index - 2\n if invertflag:\n self.figures[inputindex].clear()\n plt.close(self.figures[inputindex])\n else:\n self.figures[index].clear()\n plt.close(self.figures[index])\n \n #Make new graph\n framename = canvas.get_tk_widget().master\n canvas.get_tk_widget().destroy()\n graph = self.translate(self.graph_setting_list[col][inputindex].get(),\n input_language=self.language,\n output_language='english')\n canvas,fig = self.make_graph(framename, graph,\n gridpos = inputindex*2+1)\n self.graph_canvas_list[col][inputindex]=canvas\n \n #Update figures list\n if invertflag:\n self.figures[inputindex] = fig\n else:\n self.figures[index] = fig\n index += 1", "def integrate(self, t):", "def queriesInEachHour(self):\n hours = 0\n\n #prints out each element (with number of DB Queries) of array\n while hours < 24:\n print (hours,'to',hours+1, ' : ', self.arrayOfTimes[hours])\n hours += 1", "def calculateQueriesPerHour(self):\n\n self.totalTime = (self.lastTime - self.firstTime) #deltatime\n print ('Total Time Difference:', self.totalTime)\n print ('Total Number of Queries:', self.numberOfQueries)\n\n #divides number of queries by number of seconds\n self.queriesPerSecond = float(self.numberOfQueries)/(self.totalTime.total_seconds())\n\n #converts queries/sec to queries/hour\n self.queriesPerHour = self.queriesPerSecond*3600\n\n print ('Total Queries per Hour:', self.queriesPerHour, 'queries/hour \\n')\n\n return self.queriesPerHour", "async def subscribe(self, payload):\n\n time = payload['inline_params']\n\n if not time:\n await self.__call__(payload)\n return\n\n result = self.sdk.scheduler.find(payload['chat'])\n if result and result['hour'] == time:\n await self.sdk.send_text_to_chat(\n payload[\"chat\"],\n \"Вы уже подписаны на ежедневный дайджест в {}:00\".format(time)\n )\n else:\n payload['command'] = 'today'\n self.sdk.scheduler.remove(payload['chat'])\n self.sdk.scheduler.add(\n CommandStatistics(self.sdk).stats,\n chat_id=str(payload['chat']),\n hour=time,\n args=[payload]\n )\n await self.sdk.send_text_to_chat(\n payload[\"chat\"],\n \"Вы успешно подписались на ежедневный дайджест в {}:00\".format(time)\n )", "def setIntegrationTime(self, timeInMs):\n self.sendCommand(cmdBytes = b'\\x02',\n payloadBytes = pack('<H',int(timeInMs)))", "def setIntegrationTime(self, timeInMs):\n self.sendCommand(cmdBytes = b'\\x02',\n payloadBytes = pack('<L',int(timeInMs*self.timeScale)))", "def tick(self):\n if self.start > 0:\n self.time -= 1\n else:\n self.time += 1\n if self.time < 0:\n self.timer.stop()\n if self.signal_params is None:\n self.time_out.emit()\n else:\n self.time_out[object].emit(self.signal_params)\n else:\n self.showInterval()", "def timed_recipes():\n time = request.args.get('time', 0, type=int) #raw input from HTML page\n global time_global\n time_global = time #sets global time to inputted time, for use in search function\n return jsonify(cooktime=time_global) #returns a confirmation of the input tiime", "def twr(begin_ts, ts, current_ts):\n\n begin_diff = ts - begin_ts\n diff = current_ts - begin_ts\n if diff == 0:\n normalized = 1\n else:\n normalized = Decimal(begin_diff) / Decimal(diff)\n twr = 1 / (1 + Decimal.exp(Decimal(-12) * normalized + Decimal(2) + ((1 - Metrics.TIME_RANGE) * 10)))\n return twr", "def get_time(self) -> int:\n t = str(self.eval(\"pyb.RTC().datetime()\").encode(\"utf-8\"))[1:-1].split(\", \")\n return int(t[4]) * 3600 + int(t[5]) * 60 + int(t[6])", "def main():\n # Create a new instance of a high pass filter, using the default constructor\n hpf = GRT.HighPassFilter()\n\n # Set the cutoff frequency of the filter to 2.0Hz\n hpf.setCutoffFrequency(2, 1.0 / 1000.0)\n\n # Create some variables to help generate the signal data\n num_seconds = 6 # The number of seconds of data we want to generate\n t = 0 # This keeps track of the time\n t_step = 1.0 / 1000.0 # This is how much the time will be updated at each iteration in the for loop\n\n # Add the freq rates\n # The first value is the time in seconds and the second value is the frequency that should be set at that time\n freq_rates = {0: 0.1, 1: 0.5, 2: 1, 3: 2, 4: 4, 5: 8, 6: 16}\n\n # Generate the signal and filter the data\n for i in range(num_seconds * 1000):\n # Check to see if we should update the freq rate to the next value\n # Set the new frequency value\n freq = [v for (k, v) in freq_rates.items() if k > (i / 1000)][0]\n\n # Generate the signal\n signal = math.sin(t * math.tau * freq)\n\n # Filter the signal\n filtered_value = hpf.filter(signal)\n\n # Print the signal and the filtered data\n print(\"%.3f %.3f %.3f\" % (freq, signal, filtered_value))\n\n # Update the t\n t += t_step\n\n # Save the HighPassFilter settings to a file\n hpf.save(\"HighPassFilterSettings.grt\")\n\n # We can then load the settings later if needed\n hpf.load(\"HighPassFilterSettings.grt\")", "def set_total_time(self, total_time):\n self._total_time = total_time", "def time_slot(self):\n pass", "def lookback_time(self, z):\n\n # Calculate the integrand.\n def f(z1):\n return 1.0 / (self.H(z1) * (1 + z1))\n\n return _intf_0_z(f, z) / self._unit_time", "def clock_callback(data):\n global current_second\n current_second = data.clock.secs", "def dt_estimate_scanlines(self, sensor=\"vnir\"):\n dtdelta = datetime.timedelta(seconds=8, milliseconds=849)\n scanlines = {\"vnir\": 4200, \"swir\": 2100, \"tir\": 700}\n\n return np.linspace(-0.5, 0.5, scanlines[sensor]) * dtdelta + self.datetime", "def calculate():\n con = mdb.connect(constants.sql_.IP, constants.sql_.USER, constants.sql_.PASS,\n constants.sql_.DB)\n# dicti = {}\n liste = mdb_get_table(constants.sql_tables.cron.name)\n# with con:\n# cur = con.cursor()\n# sql = 'SELECT * FROM '+constants.sql_tables.cron.name\n# cur.execute(sql)\n# results = cur.fetchall()\n# field_names = [i[0] for i in cur.description]\n# j = 0\n# for row in results:\n# for i in range(0, len(row)):\n# dicti[field_names[i]] = row[i]\n# liste.append(dicti)\n# dicti = {}\n# j = j + 1\n# con.close\n time = localtime()\n HOME.date = strftime(\"%Y-%m-%d 00:00:00\", time)\n # check for daylight saving\n if getattr(localtime(), 'tm_isdst') > 0:\n delta = 2\n else:\n delta = 1\n sunrise = ((HOME.next_rising(ephem.Sun())).datetime() +\n datetime.timedelta(hours=delta, minutes=0, seconds=0))\n sunset = ((HOME.next_setting(ephem.Sun())).datetime() +\n datetime.timedelta(hours=delta, minutes=0, seconds=0))\n for eintrag in liste:\n dynamic = False\n for setting in eintrag:\n if setting == \"Sonne\" and str(eintrag.get(\"Sonne\")) <> \"None\":\n dynamic = True\n if str(eintrag.get(\"Sonne\")) == \"rise\":\n time = sunrise.replace(second=0)\n else:\n time = sunset.replace(second=0)\n elif setting == \"Rohtime\" and str(eintrag.get(\"Rohtime\")) <> \"None\":\n dynamic = True\n time = eintrag.get(\"Rohtime\")\n for setting in eintrag:\n if setting == \"offset\" and str(eintrag.get(\"offset\")) <> \"None\":\n time = time + datetime.timedelta(hours=0, minutes=int(eintrag.get(\"offset\")),\n seconds=0)\n if setting == \"Zufall\" and str(eintrag.get(\"Zufall\")) <> \"None\":\n time = (time +\n datetime.timedelta(hours=0,\n minutes=random.randrange(int(eintrag.get(\"Zufall\"))),\n seconds=0))\n if dynamic:\n with con:\n #time = time - datetime.timedelta(seconds=int(str(time)[6:]))\n cur = con.cursor()\n sql = ('UPDATE %s SET Time = \"%s\" WHERE Id = \"%s\"'\n % (constants.sql_tables.cron.name, str(time), str(eintrag.get(\"Id\"))))\n cur.execute(sql)\n con.close\n return True", "def set_als_integration_time(self, wtime):\n if not (2.78 <= wtime <= 712):\n raise ValueError(\"The integration time must be in range [2.78 - 712] millis.\")\n\n value = 256 - int(wtime / 2.78)\n self.write_byte_data(value, APDS_9960.ALS_ATIME_REG_ADDRESS)", "def write_time_millis_int(self, dt: time) -> None:\n self.write_int(int(time_object_to_micros(dt) / 1000))", "def main():\n current_time = datetime.datetime.now()\n is_night = False\n\n while True:\n sleep(HOUR_DURATION)\n current_time += datetime.timedelta(hours=HOUR_DURATION)\n light_changed = False\n\n if (current_time.hour >= NIGHT_STARTS or current_time.hour < DAY_STARTS) and not is_night:\n is_night = True\n light_changed = True\n elif DAY_STARTS <= current_time.hour < NIGHT_STARTS and is_night:\n is_night = False\n light_changed = True\n\n if light_changed:\n if is_night:\n write_file_and_screen(\"Se ha hecho de noche\", \"horas.txt\")\n else:\n write_file_and_screen(\"Se ha hecho de dia\", \"horas.txt\")\n\n write_file_and_screen(\"La hora actual es: {}\".format(current_time), \"horas.txt\")", "def post_time(self, amt):\n amtOfTime = amt + 1\n Publisher().sendMessage(\"update\", amtOfTime)" ]
[ "0.5954411", "0.5676104", "0.55020225", "0.52751565", "0.49494395", "0.49394286", "0.4906087", "0.4901808", "0.48571083", "0.48567733", "0.4848392", "0.48170355", "0.48030984", "0.47985277", "0.47943923", "0.4758018", "0.47353464", "0.47334984", "0.472768", "0.47261432", "0.47198805", "0.47147274", "0.46909198", "0.46778172", "0.46755457", "0.46734017", "0.46678668", "0.46600062", "0.46512067", "0.46509477", "0.46452746", "0.46416885", "0.4634023", "0.46309504", "0.45961705", "0.4594608", "0.45848092", "0.4562015", "0.45460424", "0.45426643", "0.4541049", "0.45378003", "0.4535978", "0.45326445", "0.45324406", "0.45309585", "0.45293078", "0.45290393", "0.4527821", "0.45191157", "0.451001", "0.4504263", "0.44976494", "0.44938758", "0.44929066", "0.4492256", "0.44909433", "0.44892594", "0.44885582", "0.4476264", "0.44715104", "0.4469779", "0.44696927", "0.44592226", "0.4453923", "0.44522455", "0.44496712", "0.44489056", "0.44410133", "0.4436967", "0.44335973", "0.44268396", "0.4426382", "0.44255266", "0.44230342", "0.4422988", "0.44220275", "0.44205162", "0.44180802", "0.44046348", "0.44038135", "0.43999872", "0.43964213", "0.43909624", "0.43909317", "0.4388085", "0.43875894", "0.4385177", "0.4378976", "0.43788636", "0.4372872", "0.43726194", "0.43721607", "0.43697643", "0.43674183", "0.43586954", "0.4358614", "0.43582487", "0.43582326", "0.43574804", "0.4356769" ]
0.0
-1
It calculated the uv_map for the uvcoverage.
def get_uv_coverage(Nbase, z, ncells, boxsize=None): if not boxsize: boxsize = c2t.conv.LB uv_map = np.zeros((ncells,ncells)) theta_max = c2t.conv.LB/c2t.z_to_cdist(z) for p in xrange(Nbase.shape[0]): i,j,k = np.round(Nbase[p,0]*theta_max),np.round(Nbase[p,1]*theta_max),np.round(Nbase[p,2]*theta_max) if np.abs(i)<ncells: if np.abs(j)<ncells: uv_map[int(i),int(j)] += 1 return uv_map
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def uvmap(self, p):\n pass", "def uvmap(self, p):\n # bottom left corner of the plane\n p00 = self.position - (self.sx * self.n0) / 2 - (self.sy * self.n1) / 2\n dif_vector = p - p00\n u = np.dot(dif_vector, self.n0) / self.sx\n v = np.dot(dif_vector, self.n1) / self.sy\n return u, v", "def polyUVCoverage(*args, uvRange: List[float, float, float, float]=None,\n **kwargs)->List[float]:\n pass", "def uvmap(self, p):\n # local_v is the unit vector that goes in the direction from the center\n # of the sphere to the position p\n local_v = (p - self.position) / self.radius\n n0, n1, n2 = self.get_orientation()\n x = np.dot(n0, local_v)\n y = np.dot(n1, local_v)\n z = np.dot(n2, local_v)\n # phi = np.arccos(z)\n # v = phi / np.pi\n # theta = np.arccos((y / np.sin(phi)).round(4))\n # if x < 0:\n # theta = 2 * np.pi - theta\n # u = theta / (2 * np.pi)\n u = 0.5 + np.arctan2(z, x) / (2 * np.pi)\n v = 0.5 - np.arcsin(y) / np.pi\n v = 1 - v\n return u, v", "def map(self):\n map_rupture(self)", "def map(self, mapunit):\n\n #The number of bands to measure the LF for\n if len(mapunit['luminosity'].shape)>1:\n self.nbands = mapunit['luminosity'].shape[1]\n else:\n mapunit['luminosity'] = np.atleast_2d(mapunit['luminosity']).T\n self.nbands = 1\n\n #If only measuring for centrals, get the appropriate\n #rows of the mapunit\n\n mu = {}\n if self.central_only:\n delete_after_map = True\n for k in mapunit.keys():\n mu[k] = mapunit[k][mapunit['central']==1]\n else:\n delete_after_map = False\n mu = mapunit\n\n #Want to count galaxies in bins of luminosity for\n #self.nbands different bands in self.nzbins\n #redshift bins\n if self.lumcounts is None:\n self.lumcounts = np.zeros((self.njack, len(self.magbins)-1,\n self.nbands, self.nzbins))\n\n #Assume redshifts are provided, and that the\n #mapunit is sorted in terms of them\n \n if self.lightcone:\n for i, z in enumerate(self.zbins[:-1]):\n zlidx = mu['redshift'].searchsorted(self.zbins[i])\n zhidx = mu['redshift'].searchsorted(self.zbins[i+1])\n\n #Count galaxies in bins of luminosity\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][zlidx:zhidx])\n c, e = np.histogram(mu['luminosity'][zlidx:zhidx,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,i] += c\n else:\n for j in range(self.nbands):\n if not self.CMASS:\n c, e = np.histogram(mu['luminosity'][:,j],\n bins=self.magbins)\n else:\n cidx = self.selectCMASS(mu['appmag'][:])\n c, e = np.histogram(mu['luminosity'][:,j][cidx],\n bins=self.magbins)\n \n self.lumcounts[self.jcount,:,j,0] += c\n\n if delete_after_map:\n True", "def reportNormalizedRawDataMap(self):\n copy_map = defaultdict(list)\n for para in self.block_map:\n offset = self.offset_map[para]\n for i in xrange(len(self.block_map[para]) - 1):\n start, var, block = self.block_map[para][i]\n span = self.block_map[para][i + 1][0] - start\n if var is not None:\n copy_map[para].append([start + offset, span, block.adjustedCount / len(block.getVariables())])\n prevVar = block.adjustedCount / len(block.getVariables())\n else:\n copy_map[para].append([start + offset, span, prevVar])\n finalStart, finalVar, finalBlock = self.block_map[para][-1]\n finalSpan = self.G.sizes[para] - finalStart\n if finalVar is not None:\n copy_map[para].append([finalStart + offset, finalSpan, block.adjustedCount / len(block.getVariables())])\n else:\n copy_map[para].append([finalStart + offset, finalSpan, prevVar])\n return copy_map", "def uvgrid(self):\n if self.baselines_type != \"grid_centres\":\n ugrid = np.linspace(-self.uv_max, self.uv_max, self.n_uv + 1) # +1 because these are bin edges.\n return (ugrid[1:] + ugrid[:-1]) / 2\n else:\n # return the uv\n return self.baselines", "def _get_addr_coverage(self):\n logger.info('Generating translation block coverage information')\n\n tb_files = get_tb_files(self.project_path('s2e-last'))\n tb_coverage_files = aggregate_tb_files(tb_files)\n ret = {}\n\n # Get the number of times each address was executed by S2E\n for module_path, coverage in tb_coverage_files.iteritems():\n addr_counts = {}\n\n for start_addr, _, size in coverage:\n # TODO: it's better to use an interval map instead\n for byte in xrange(0, size):\n addr = start_addr + byte\n # The coverage files we get do not indicate how many times an bb has been\n # executed. It's more of an approximation of how many times\n # the block was translated. To avoid confusion, always set execution\n # count to 1.\n addr_counts[addr] = 1\n\n ret[module_path] = addr_counts\n\n return ret", "def update_cnt_map(self,s):\r\n cnts = []\r\n num_grid = self.cnt_map.shape[0]*self.cnt_map.shape[1]\r\n old_coverage =num_grid- self.cnt_map.flatten().tolist().count(0)\r\n for sj in s:\r\n grid_s = self.get_gridState(sj)\r\n self.cnt_map[grid_s[0], grid_s[1]] += 1\r\n cnts.append(self.cnt_map[grid_s[0], grid_s[1]])\r\n\r\n self.map_coverage = num_grid - self.cnt_map.flatten().tolist().count(0)\r\n print(\"Coverage:\",self.map_coverage)\r\n print(\"Change of coverage:\",self.map_coverage-old_coverage)\r\n\r\n return cnts", "def _compute_maps_data(self, feature1, feature2, samples):\n # TODO Refactor:\n\n # Reshape the data as ndimensional array. But account for the lower and upper bins.\n coverage_data = np.zeros(shape=(feature1.num_cells, feature2.num_cells), dtype=int)\n misbehaviour_data = np.zeros(shape=(feature1.num_cells, feature2.num_cells), dtype=int)\n\n coverage_outer_data = np.zeros(shape=(feature1.num_cells + 2, feature2.num_cells + 2), dtype=int)\n misbehaviour_outer_data = np.zeros(shape=(feature1.num_cells + 2, feature2.num_cells + 2), dtype=int)\n\n for sample in samples:\n\n # Coordinates reason in terms of bins 1, 2, 3, while data is 0-indexed\n x_coord = feature1.get_coordinate_for(sample, is_outer_map=False) - 1\n y_coord = feature2.get_coordinate_for(sample, is_outer_map=False) - 1\n\n # Increment the coverage cell\n coverage_data[x_coord, y_coord] += 1\n\n # Increment the misbehaviour cell\n if sample.is_misbehavior():\n misbehaviour_data[x_coord, y_coord] += 1\n\n # Outer Maps\n x_coord = feature1.get_coordinate_for(sample, is_outer_map=True) - 1\n y_coord = feature2.get_coordinate_for(sample, is_outer_map=True) - 1\n\n # Increment the coverage cell\n coverage_outer_data[x_coord, y_coord] += 1\n\n # Increment the misbehaviour cell\n if sample.is_misbehavior():\n misbehaviour_outer_data[x_coord, y_coord] += 1\n\n return coverage_data, misbehaviour_data, coverage_outer_data, misbehaviour_outer_data", "def hashdict(self):\n return {\n 'pix': super(rmap, self).hashdict(),\n 'map': hashlib.sha1(self.map.view(np.uint8)).hexdigest()\n }", "def map():", "def test_basic(self):\n scale_factor = 1.0\n expected = self.cube_uv_down.data.copy()\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def _origin_map(self) -> Dict[Type[BaseSpriteLoader], chunk_map_type]:", "def map(self):\r\n pass", "def uv_bounds(self):\n umin, umax, vmin, vmax = breptools_UVBounds(self.topods_shape())\n bounds = Box(np.array([umin, vmin]))\n bounds.encompass_point(np.array([umax, vmax]))\n return bounds", "def _gtBinmap(self):\n if os.path.isfile(self.outbinmap) and (not self.clobber):\n print(\"\\t=== '{}' already exists ===\".format(self.outbinmap))\n return\n else:\n if not os.path.isfile(self.outmktime):\n self._gtMktime()\n\n # Image width must be comprised within the acceptance cone\n imWidth = int( np.floor(self.rad* 2**(0.5)) ) # deg\n imWipix = int(imWidth / self.binsz)\n\n # Coordinate system\n if self.csys == 'GAL':\n center_icrs = SkyCoord(ra=self.ra*u.degree, dec=self.dec*u.degree, frame='icrs')\n self.ra = center_icrs.galactic.l.deg\n self.dec = center_icrs.galactic.b.deg\n\n os.popen(\"gtbin evfile={} scfile=none outfile={} algorithm=CMAP emin={}\\\n emax={} nxpix={} nypix={} binsz={} coordsys={} xref={} yref={} axisrot=0\\\n proj=AIT\".format(self.outmktime, self.outbinmap, self.emin, self.emax,\n imWipix, imWipix, self.binsz, self.csys, self.ra, self.dec))\n\n if self.csys == 'GAL':\n self.ra = center_icrs.ra.deg\n self.dec = center_icrs.dec.deg\n return", "def GetOutTextureCoord(self):\n ...", "def draw_uv_info(context, layout):\n obj = context.object\n\n def warning_no_uvmap(_layout):\n _layout.label(\"No UV map\", icon=icons.WARNING)\n\n if obj and obj.data:\n if obj.type in {\"CURVE\", \"SURFACE\", \"FONT\"}:\n if not obj.data.use_uv_as_generated:\n row = layout.row()\n warning_no_uvmap(row)\n row.prop(obj.data, \"use_uv_as_generated\", toggle=True, text=\"Enable UV\")\n elif obj.type == \"MESH\":\n if len(obj.data.uv_textures) > 1:\n box = layout.box()\n box.label(\"LuxCore only supports one UV map\", icon=icons.INFO)\n active_uv = find_active_uv(obj.data.uv_textures)\n box.label('Active: \"%s\"' % active_uv.name, icon=\"GROUP_UVS\")\n elif len(obj.data.uv_textures) == 0:\n row = layout.row()\n warning_no_uvmap(row)\n row.operator(\"mesh.uv_texture_add\")\n else:\n warning_no_uvmap(layout)", "def get_uv_positions(data, image_size, target_grid, up_vector, right_vector, tile_xy, verts, vtx_center):\n\n return get_uv_pos_size(data, image_size, target_grid, tile_xy,\n target_grid.grid[0], target_grid.grid[1],\n up_vector, right_vector,\n verts, vtx_center)", "def test_get_voltage_maps(self):\n pass", "def get_uvd(self):\n return {\"u\": self.__u,\n \"v\": self.__v,\n \"d\": self.__d}", "def get_map(self):\n return self.get_raw_ys()", "def _do_mapping(self):\n pass", "def getUVIndex(self):\n\t\tval = grovepi.analogRead(self.uv_sensor)\n\t\tillumination_intensity = val*307\n\t\tuv_index = illumination_intensity/float(200)\n\t\treturn uv_index", "def conclusion_summary_map(self):\n pass", "def end_cast(self):\r\n #draw the actual map\r\n self.emap.draw(shader=self.mshader, camera=self.camera)\r\n super(ShadowCaster, self)._end()\r\n # set third texture to this ShadowCaster texture\r\n texs = self.emap.buf[0].textures\r\n if len(texs) == 2:\r\n texs.append(self)\r\n else:\r\n texs[2] = self\r\n # change background back to blue\r\n opengles.glClearColor(ctypes.c_float(0.4), ctypes.c_float(0.8), \r\n ctypes.c_float(0.8), ctypes.c_float(1.0))\r\n # work out left, top, right, bottom for shader\r\n self.emap.unif[48] = 0.5 * (1.0 + self.scaleu) # left [16][0]\r\n self.emap.unif[49] = 0.5 * (1.0 + self.scalev) # top [16][1]\r\n self.emap.unif[51] = 1.0 - self.emap.unif[48] # right [17][0]\r\n self.emap.unif[52] = 1.0 - self.emap.unif[49] # bottom [17][1]\r\n \r\n du = float(self.location[0] / self.emap.width)\r\n dv = float(self.location[2] / self.emap.depth)\r\n self.emap.unif[48] -= self.scaleu * (du if self.emap.unif[50] == 1.0 else dv)\r\n self.emap.unif[49] += self.scalev * (dv if self.emap.unif[50] == 1.0 else du)\r\n self.emap.unif[51] -= self.scaleu * (du if self.emap.unif[50] == 1.0 else dv)\r\n self.emap.unif[52] += self.scalev * (dv if self.emap.unif[50] == 1.0 else du)", "def _standard_mapping(self):\n mapping_raw = scipy.io.loadmat(join(self.dataset_dir, 'scripts/mapping.mat'))\n self.camvidMap = mapping_raw['camvidMap'] * 255\n self.cityscapesMap = mapping_raw['cityscapesMap'] * 255", "def applyMapping(self):\n pass", "def coverage(self):\n _min_value = (0,0)\n _max_value = (0,0)\n \n if self.objects:\n _min_value=(min(self.objects.values(), key=lambda o: o[0])[0], \\\n min(self.objects.values(), key=lambda o: o[1])[1])\n \n _max_value=(max(self.objects.values(), key=lambda o: o[0])[0],\\\n max(self.objects.values(), key=lambda o: o[1])[1])\n\n self.width = _max_value[0]-_min_value[0]\n self.height = _max_value[1]-_min_value[1]\n \n return (_min_value, _max_value)", "def test_post_voltage_maps(self):\n pass", "def return_map(self):\n idx1 = self.y[1]>np.pi\n idx2 = self.y[1]<np.pi\n \n if np.sum(idx1) > 0:\n self.y[1][idx1] = self.y[1][idx1]-np.floor((self.y[1][idx1]+np.pi)/(2*np.pi))*2*np.pi\n if np.sum(idx2) > 0:\n self.y[1][idx2] = self.y[1][idx2]-np.ceil((self.y[1][idx2]-np.pi)/(2*np.pi))*2*np.pi", "def to_world(self, uv):\n return self._projective_transform(self.A, uv)", "def scale_uv(self):\n self.u = [i * self.scale * self.scaleratio for i in self.u]\n self.v = [i * self.scale for i in self.v]", "def complete_mapping(self):\r\n\r\n self._reset_map()\r\n #position_prey = self.prey.position\r\n #self.complete_map[position_prey[1], position_prey[0]] = 1.0\r\n position_body = [part.position for part in self.body]\r\n\r\n for position in position_body:\r\n self.complete_map[position[1], position[0]] = 1\r\n\r\n return self.complete_map", "def compute_filecoverage(self):\n result = dict()\n for filename, fns in self.point_symbol_info.items():\n file_points = []\n for fn, points in fns.items():\n file_points.extend(points.keys())\n covered_points = self.covered_points & set(file_points)\n result[filename] = int(math.ceil(\n len(covered_points) * 100 / len(file_points)))\n return result", "def _update_farness_map(self,ind):", "def _createMap(self):\n width = self.map_size[0] * self.chunk_size\n height = self.map_size[1] * self.chunk_size\n map_array = np.zeros((height, width), dtype=float)\n chunks = {}\n clist = []\n for i in range(0, self.map_size[0]*self.map_size[1]):\n chunks[i+1] = Chunk(self)\n chunk_array = np.asarray(list(chunks.keys()))\n chunk_array.resize(self.map_size[0], self.map_size[1])\n return map_array, chunk_array, chunks", "def get_coverage(self):\n coverage = np.zeros(self.Set.Shape, dtype=np.int8)\n for ig in self.Set:\n igram = self.load_ma(ig)\n coverage[~igram.mask] += 1\n\n return coverage", "def uv_map_face(context, up_vector, right_vector, tile_xy, origin_xy, face_index, mesh, tile_size=(1, 1)):\n if mesh is None:\n return None, None\n\n scene = context.scene\n obj = context.object\n data = scene.sprytile_data\n\n grid_id = obj.sprytile_gridid\n target_grid = sprytile_utils.get_grid(context, grid_id)\n\n uv_layer = mesh.loops.layers.uv.verify()\n\n if face_index >= len(mesh.faces):\n return None, None\n\n target_img = sprytile_utils.get_grid_texture(obj, target_grid)\n if target_img is None:\n return None, None\n\n face = mesh.faces[face_index]\n if face.hide:\n return None, None\n\n vert_origin = context.object.matrix_world @ face.calc_center_bounds()\n verts = []\n for loop in face.loops:\n vert = loop.vert\n verts.append(context.object.matrix_world @ vert.co)\n\n tile_start = [tile_xy[0], tile_xy[1]]\n if tile_size[0] > 1 or tile_size[1] > 1:\n tile_start[0] -= tile_size[0]\n tile_start[1] -= tile_size[1]\n\n size_x = tile_size[0] * target_grid.grid[0]\n size_y = tile_size[1] * target_grid.grid[1]\n\n uv_verts = get_uv_pos_size(data, target_img.size,\n target_grid, tile_start,\n size_x, size_y,\n up_vector, right_vector,\n verts, vert_origin)\n\n if uv_verts is None:\n return None, None\n\n apply_uvs(context, face, uv_verts,\n target_grid, mesh, data,\n target_img, tile_xy,\n origin_xy=origin_xy,\n uv_layer=uv_layer)\n\n return face.index, target_grid", "def get_file_mod_stats_for_upstream_refs(file_name, mod_stats_map):\n with open(file_name) as f:\n lines = f.readlines()\n upstream_ref = None\n upstream_start_line = None\n for line_number, line in enumerate(lines):\n if REGION_START_TAG in line:\n tag, ref_name = _extract_tag_and_ref_name_from_line(line, False)\n if REGION_UPSTREAM_TAG in tag:\n upstream_ref = ref_name\n upstream_start_line = line_number\n elif REGION_END_TAG in line and upstream_ref:\n mod_stats = mod_stats_map[upstream_ref]\n mod_stats.mod_count += 1\n mod_stats.line_count += line_number - upstream_start_line - 1\n upstream_ref = None\n upstream_start_line = None", "def geo_transform(self):\n pass", "def test_get_voltage_map_item(self):\n pass", "def calculate_offset_mapping(self):\n self.quad_offset_mapping = {\n 'forward': direction_arr[self.direction_mod_offset % len(self.direction_arr)],\n 'right': direction_arr[(self.direction_mod_offset + 1) % len(self.direction_arr)],\n 'backward': direction_arr[(self.direction_mod_offset + 2) % len(self.direction_arr)],\n 'left': direction_arr[(self.direction_mod_offset + 3) % len(self.direction_arr)],\n }", "def test_map_overview_accuracy(self):\n params = [10000, 5, 10, 15]\n height = 100\n width = 200\n world_map = gen.generate_map(height=height, width=width, params=params)\n image = img.get_map_overview(world_map)\n pixels = image.load()\n for x in range(width):\n for y in range(height):\n color = tuple(img.get_color(world_map[x][y]))\n self.assertEqual(pixels[x, y], color)", "def uv_tcoords(u_resolution, v_resolution, pd):\n u0 = 1.0\n v0 = 0.0\n du = 1.0 / (u_resolution - 1)\n dv = 1.0 / (v_resolution - 1)\n num_pts = pd.GetNumberOfPoints()\n t_coords = vtk.vtkFloatArray()\n t_coords.SetNumberOfComponents(2)\n t_coords.SetNumberOfTuples(num_pts)\n t_coords.SetName('Texture Coordinates')\n pt_id = 0\n u = u0\n for i in range(0, u_resolution):\n v = v0\n for j in range(0, v_resolution):\n tc = [u, v]\n t_coords.SetTuple(pt_id, tc)\n v += dv\n pt_id += 1\n u -= du\n pd.GetPointData().SetTCoords(t_coords)\n return pd", "def uv_at_xy(self, x, y, x0, y0, s0):\n dx, dy = self.distance(x0, y0, x, y)\n #print 'dx, dy:', dx, dy\n rr2 = (dx**2 + dy**2)**-1\n u = - s0 * dy * r_twopi * rr2\n v = s0 * dx * r_twopi * rr2\n #print 'u, v', u, v\n return u, v", "def compute_map(self):\n number_of_orders = 0\n orders = []\n for i, line in enumerate(self.__grid):\n for j, column in enumerate(line):\n if self.__grid[i][j][\"humans\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(self.__grid[i][j][\"humans\"])\n orders.append(0)\n orders.append(0)\n if self.__grid[i][j][\"vampires\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(self.__grid[i][j][\"vampires\"])\n orders.append(0)\n if self.__grid[i][j][\"werewolves\"] != 0:\n number_of_orders += 1\n orders.append(i)\n orders.append(j)\n orders.append(0)\n orders.append(0)\n orders.append(self.__grid[i][j][\"werewolves\"])\n return number_of_orders, orders", "def dft_map(input_map, input_uv):\n m, n = input_map.shape\n size = m * n\n vis = np.zeros(input_uv.shape[1], dtype=complex)\n\n x = Visibility.generate_xy(m, 1)\n y = Visibility.generate_xy(n, 1)\n\n x, y = np.meshgrid(x, y)\n x = x.reshape(size)\n y = y.reshape(size)\n\n for i in range(size):\n vis[i] = np.sum(\n input_map.reshape(size) * np.exp(\n -2j * np.pi * (input_uv[0, i] * x + input_uv[1, i] * y)))\n\n return vis", "def totals_map():\n totals_map = [*map(sum,poke_stats)]\n\n return(totals_map)", "def turbine_map(self):\n return self.flow_field.turbine_map", "def find_coverage(self, zoom):\n # Find a group of adjacent available tiles at this zoom level\n rows = self.mbtiles_cursor.execute('''SELECT tile_column, tile_row FROM tiles WHERE zoom_level=? ORDER BY tile_column, tile_row;''', (zoom,))\n tile = rows.fetchone()\n xmin, ymin = tile\n tile_prev = tile\n while tile and tile[0] - tile_prev[0] <= 1:\n # adjacent, go on\n tile_prev = tile\n tile = rows.fetchone()\n xmax, ymax = tile_prev\n # Transform (xmin, ymin) (xmax, ymax) to pixels\n tile_size = self.tilesize\n bottomleft = (xmin * tile_size, (ymax + 1) * tile_size)\n topright = ((xmax + 1) * tile_size, ymin * tile_size)\n # Convert center to (lon, lat)\n mercator = GlobalMercator(self.tms_osm,tile_size,[zoom])\n return mercator.unproject_pixels(bottomleft, zoom) + mercator.unproject_pixels(topright, zoom)", "def prepare_map(self):\n for y, row in enumerate(self.contents):\n for x, tile in enumerate(row):\n bm = self.get_tile(tile)\n self.image[\n y * TILE_SIZE : (y + 1) * TILE_SIZE,\n x * TILE_SIZE : (x + 1) * TILE_SIZE,\n ] = bm", "def population_results_map():\n start_time = time()\n fig= Figure(figsize=(60,52), frameon=True, tight_layout=True)\n ax = fig.add_subplot(1,1,1, axisbg='#EEEEEE')\n ax.grid(color='white', linestyle='solid')\n rstyle(ax)\n\n queryset = Unit.objects.all()\n # It might be faster to request a flat value list and then construct new tuples based on that\n latlong = [(u.latitude, u.longitude, \n u.unitstats.cumulative_infected, \n u.unitstats.cumulative_vaccinated,\n u.unitstats.cumulative_destroyed,\n u.unitstats.cumulative_zone_focus, \n u.initial_size,\n ) if hasattr(u, \"unitstats\") else\n (u.latitude, u.longitude, -1, -1, -1, -1, u.initial_size)\n for u in queryset]\n total_iterations = float(len(list_of_iterations()))\n latitude, longitude, infected, vaccinated, destroyed, zone_focus, herd_size = zip(*latlong)\n zone_blues, red_infected, green_vaccinated = define_color_mappings()\n \n graph_zones(ax, latitude, longitude, total_iterations, zone_blues, zone_focus)\n graph_states(ax, latitude, longitude, total_iterations, infected, vaccinated, destroyed)\n \n neutral_longitude = [entry[1] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]\n neutral_latitude = [entry[0] for entry in latlong if not any(x > 0 for x in (entry[2], entry[3], entry[4]))]\n # to ensure zero occurrences has a different color\n uninvolved = ax.scatter(neutral_longitude,\n neutral_latitude,\n marker='s',\n s=[min(max(0.25, size / 100), 1000) for size in herd_size],\n color=(0.2, 0.2, 0.2, 1.0),\n zorder=1000)\n Results.graphing.crop_to_fit_map(ax)\n print(\"Population Map took %i seconds\" % int(time() - start_time))\n return fig", "def edge_mapping(self):\n ...", "def _uv_to_xy(self, uv: np.ndarray) -> np.ndarray:\n xy = (uv - (self.imgsz * 0.5 + self.c)) * (1 / self.f)\n xy = self._undistort(xy)\n return xy", "def map(z):\n pass", "def data_mapping(image, statistics, method):\n if method == \"normalize\":\n image = transforms.Normalize( statistics[\"mapped_mean\"] , \n statistics[\"mapped_std\"] )(image) \n elif method == \"map_zero_one\":\n for ch in range(image.shape[0]):\n a = statistics[\"lower_bound\"][ch]\n b = statistics[\"upper_bound\"][ch]\n image[ch,:,:] = map_zero_one(image[ch,:,:], a, b)\n elif method == \"map_minus_one_to_one\":\n for ch in range(image.shape[0]):\n a = statistics[\"lower_bound\"][ch]\n b = statistics[\"upper_bound\"][ch]\n image[ch,:,:] = map_minus_one_to_one(image[ch,:,:], a, b)\n else:\n raise Exception('Wrong mapping function')\n return image", "def create_single_map(self,tod,x,y,x0,y0):\n maps = {'map':np.zeros((self.Nx*self.Ny)),\n 'cov':np.zeros((self.Nx*self.Ny))}\n\n\n pixels,xp,yp,r_x, r_y = self.get_pixel_positions(x,y,x0,y0,0,invertx=True)\n mask = np.ones(pixels.size,dtype=int)\n\n mask[(pixels == -1) | np.isnan(tod) | np.isinf(tod)] = 0\n rms = stats.AutoRMS(tod)\n weights = {'map':tod.astype(np.float64)/rms**2,\n 'cov':np.ones(tod.size)/rms**2}\n for k in maps.keys():\n binFuncs.binValues(maps[k],\n pixels,\n weights=weights[k],mask=mask)\n maps[k] = np.reshape(maps[k],(self.Ny,self.Nx))\n return maps", "def testStepBuildStatsMap(self):\n self._StringToMapHelper(data_types.StepBuildStatsMap, data_types.BuildStats)", "def map(self):\n return self.map_digis(self.group)", "def synthetic_gen(self):\r\n logging.debug('generating synthetic map...')\r\n data = self.realData\r\n unit = Params.unitGrid\r\n x_min = np.floor(Params.LOW[0] / unit) * unit\r\n x_max = np.ceil(Params.HIGH[0] / unit) * unit\r\n y_min = np.floor(Params.LOW[1] / unit) * unit\r\n y_max = np.ceil(Params.HIGH[1] / unit) * unit\r\n\r\n x_CELL = int(np.rint((x_max - x_min) / unit))\r\n y_CELL = int(np.rint((y_max - y_min) / unit))\r\n\r\n self.root.n_box = np.array([[x_min, y_min], [x_max, y_max]])\r\n\r\n self.mapp = np.zeros((x_CELL, y_CELL)) - 1 # ## initialize every cell with -1\r\n for i in range(Params.NDATA): # ## populate the map\r\n point = data[:, i]\r\n cell_x = int(np.floor((point[0] - x_min) / unit))\r\n cell_y = int(np.floor((point[1] - y_min) / unit))\r\n if self.mapp[cell_x, cell_y] != -1:\r\n self.mapp[cell_x, cell_y] += 1\r\n else:\r\n self.mapp[cell_x, cell_y] = 1\r\n\r\n for i in range(x_CELL): # ## perturb the counts\r\n for j in range(y_CELL):\r\n if self.mapp[i, j] != -1:\r\n self.mapp[i, j] += np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n else:\r\n self.mapp[i, j] = np.rint(self.differ.getNoise(1, 0.5 * self.param.Eps))\r\n # if noisy count is negative, ignore the noise and generate no points\r\n if self.mapp[i, j] < 0:\r\n self.mapp[i, j] = 0", "def components_map(self):\r\n raise NotImplementedError", "def process_data(self):\n\n # direct and opposite mappings for items\n idpool = itertools.count(start=1)\n FVMap = collections.namedtuple('FVMap', ['dir', 'opp'])\n self.fvmap = FVMap(dir={}, opp={})\n\n # mapping features to ids\n for i in range(len(self.names) - 1):\n feats = sorted(self.feats[i])\n\n # try to rangify this feature\n if self.intvs and len(feats) > len(self.intvs) + 1:\n feats = self.rangify(feats, i)\n self.feats[i] = set(feats)\n\n if len(feats) != 2:\n for l in feats:\n self.fvmap.dir[(self.names[i], l)] = next(idpool)\n else:\n var = next(idpool)\n self.fvmap.dir[(self.names[i], feats[0])] = var\n self.fvmap.dir[(self.names[i], feats[1])] = -var\n\n # use ranges for updating samples\n if self.vimap:\n for i, s in enumerate(self.samps):\n self.samps[i] = [self.vimap[j][v] if j in self.vimap and v != '' else v for j, v in enumerate(s)]\n\n # recomputing the weights\n counter = collections.Counter()\n for s, w in zip(self.samps, self.wghts):\n counter[tuple(s)] += w\n\n self.samps = []\n self.wghts = []\n for s, w in six.iteritems(counter):\n self.samps.append(list(s))\n self.wghts.append(w)\n\n # all labels are marked with distinct ids\n for l in sorted(self.feats[-1]):\n self.fvmap.dir[(self.names[-1], l)] = next(idpool)\n\n # opposite mapping\n for key, val in six.iteritems(self.fvmap.dir):\n self.fvmap.opp[val] = key\n\n # encoding samples and filtering out features with only 1 value\n for i in range(len(self.samps)):\n self.samps[i] = [self.fvmap.dir[(self.names[j], self.samps[i][j])] for j in range(len(self.samps[i])) if self.samps[i][j] and len(self.feats[j]) > 1]\n\n # determining feature variables (excluding class variables)\n for v, pair in six.iteritems(self.fvmap.opp):\n if pair[0] == self.names[-1]:\n self.fvars = v - 1\n break", "def rasterize(self, uv: np.ndarray, values: np.ndarray) -> np.ndarray:\n mask = self.inframe(uv)\n a = np.full((self.imgsz[1], self.imgsz[0]), np.nan)\n helpers.rasterize_points(\n uv[mask, 1].astype(int), uv[mask, 0].astype(int), values[mask], a=a\n )\n return a", "def __init__(self, counts, hpx):\n super(HpxMap, self).__init__(counts)\n self._hpx = hpx\n self._wcs2d = None\n self._hpx2wcs = None", "def test_unit_conversion(self):\n self.cube_uv_down.convert_units(\"kW m-2\")\n scale_factor = 1.0\n expected = np.full_like(\n self.cube_uv_down.data, dtype=np.float32, fill_value=0.1\n )\n result = calculate_uv_index(self.cube_uv_down, scale_factor)\n self.assertArrayEqual(result.data, expected)", "def map( self ) :\n\n self.readMap( )\n\n return( self.__map )", "def target_mapping(self):\n\n map_list = []\n self.bin_tracking_array = self.seg_analyzer.bin_tracking_array\n self.log.info(\"Spawning {0} jobs to begin building Target_Bed_Map_Array for permutation analysis.\"\n .format(self.args.Spawn))\n\n p = pathos.multiprocessing.Pool(int(self.args.Spawn))\n for lst in p.starmap(self.sub_target_mapping,\n zip(itertools.repeat(self.bin_tracking_array), itertools.repeat(self.target_bed_array),\n itertools.repeat(self.args), self.seg_analyzer.chrom_list)):\n\n map_list.extend(lst)\n\n map_list.sort(key=lambda x: x[0])\n\n if eval(self.args.Map_File):\n self.log.info(\"Writing Map File\")\n file_data = \"\"\n map_file = open(\"{0}{1}_{2}_mapfile.txt\"\n .format(self.args.Working_Folder, self.args.Job_Name, self.args.Cell_Name), 'w')\n map_file.write(\"Chrom\\tstart\\tstop\\trefBinID\\ttargetBinID\\ttargetCount\\n\")\n\n for row in sorted(map_list, key=itemgetter(0)):\n\n coord_start = int(self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 2])\n coord_stop = int(self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 3])\n chrom = self.bin_tracking_array[self.bin_tracking_array[:, 0] == row[0]][0, 1].decode()\n r_count = len(row[1])\n file_data += (\"{0}\\t{1}\\t{2}\\t{3}\\t{4}\\t{5}\\n\"\n .format(chrom, coord_start, coord_stop, row[0], row[1], r_count))\n\n map_file.write(file_data)\n map_file.close()\n self.log.info(\"Map File Written\")\n\n self.log.info(\"Target_Bed_Map_Array built.\")\n return numpy.array(map_list, dtype='object')", "def prepare_map(self):\n for y_coord, row in enumerate(self.contents):\n for x_coord, tile in enumerate(row):\n bit_map = self.get_tile_bitmap(tile)\n self.image[y_coord * TILE_SIZE:(y_coord+1) * TILE_SIZE,\n x_coord * TILE_SIZE:(x_coord+1) * TILE_SIZE] = bit_map", "def undistort_rectify_map(self):\n return cv.initUndistortRectifyMap(self._k, self._dist, np.eye(3), self._k, self.frame_size[::-1], cv.CV_16SC2)", "def _create_color_map(self):\n unique_labels = np.unique(self.out_labels)\n color_map = {}\n for unique_label in unique_labels:\n color_map[unique_label] = self._random_color()\n\n return color_map", "def _xy_to_uv(self, xy: np.ndarray) -> np.ndarray:\n xy = self._distort(xy)\n uv = xy * self.f + (self.imgsz / 2 + self.c)\n return uv", "def build_r_map(input_file: str, output_file: str, threshold: float):\n\n DataSiPM = db.DataSiPMsim_only('petalo', 0) # full body PET\n DataSiPM_idx = DataSiPM.set_index('SensorID')\n\n try:\n sns_response = pd.read_hdf(input_file, 'MC/sns_response')\n except ValueError:\n print(f'File {input_file} not found')\n exit()\n except OSError:\n print(f'File {input_file} not found')\n exit()\n except KeyError:\n print(f'No object named MC/sns_response in file {input_file}')\n exit()\n print(f'Analyzing file {input_file}')\n\n sel_df = rf.find_SiPMs_over_threshold(sns_response, threshold)\n\n particles = pd.read_hdf(input_file, 'MC/particles')\n hits = pd.read_hdf(input_file, 'MC/hits')\n events = particles.event_id.unique()\n\n true_r1, true_r2 = [], []\n var_phi1, var_phi2 = [], []\n var_z1, var_z2 = [], []\n\n touched_sipms1, touched_sipms2 = [], []\n\n for evt in events:\n\n ### Select photoelectric events only\n evt_parts = particles[particles.event_id == evt]\n evt_hits = hits [hits .event_id == evt]\n select, true_pos = mcf.select_photoelectric(evt_parts, evt_hits)\n if not select: continue\n\n sns_resp = sel_df[sel_df.event_id == evt]\n if len(sns_resp) == 0: continue\n\n _, _, pos1, pos2, q1, q2 = rf.assign_sipms_to_gammas(sns_resp, true_pos, DataSiPM_idx)\n\n if len(pos1) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos1))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q1)\n\n pos_z = np.array(pos1)[:,2]\n mean_z = np.average(pos_z, weights=q1)\n var_z = np.average((pos_z-mean_z)**2, weights=q1)\n r = np.sqrt(true_pos[0][0]**2 + true_pos[0][1]**2)\n\n var_phi1 .append(var_phi)\n var_z1 .append(var_z)\n touched_sipms1.append(len(pos1))\n true_r1 .append(r)\n\n else:\n var_phi1 .append(1.e9)\n var_z1 .append(1.e9)\n touched_sipms1.append(1.e9)\n true_r1 .append(1.e9)\n\n if len(pos2) > 0:\n pos_phi = rf.from_cartesian_to_cyl(np.array(pos2))[:,1]\n _, var_phi = rf.phi_mean_var(pos_phi, q2)\n\n pos_z = np.array(pos2)[:,2]\n mean_z = np.average(pos_z, weights=q2)\n var_z = np.average((pos_z-mean_z)**2, weights=q2)\n r = np.sqrt(true_pos[1][0]**2 + true_pos[1][1]**2)\n\n var_phi2 .append(var_phi)\n var_z2 .append(var_z)\n touched_sipms2.append(len(pos2))\n true_r2 .append(r)\n\n else:\n var_phi2 .append(1.e9)\n var_z2 .append(1.e9)\n touched_sipms2.append(1.e9)\n true_r2 .append(1.e9)\n\n a_true_r1 = np.array(true_r1)\n a_true_r2 = np.array(true_r2)\n a_var_phi1 = np.array(var_phi1)\n a_var_phi2 = np.array(var_phi2)\n a_var_z1 = np.array(var_z1)\n a_var_z2 = np.array(var_z2)\n\n a_touched_sipms1 = np.array(touched_sipms1)\n a_touched_sipms2 = np.array(touched_sipms2)\n\n\n np.savez(output_file, a_true_r1=a_true_r1, a_true_r2=a_true_r2, a_var_phi1=a_var_phi1, a_var_phi2=a_var_phi2, a_var_z1=a_var_z1, a_var_z2=a_var_z2, a_touched_sipms1=a_touched_sipms1, a_touched_sipms2=a_touched_sipms2)", "def calculate_MAP(self):\n testing_images = open('./digitdata/testimages', 'r')\n with testing_images as ti:\n data = list(csv.reader(ti))\n data = [i for i in data if i]\n count = 0\n #loop through all the test images\n for j in range(0,1000):\n classification_dict = {0:0,1:0,2:0,3:0,4:0,5:0,6:0,7:0,8:0,9:0} \n for l in range(0,28):\n coord = count + l\n for w in range(0,28):\n if data[coord][0][w] == \"+\":\n #iterate through each class. z is the class [0-9]\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][0]) \n elif data[coord][0][w] == \"#\":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][1])\n elif data[coord][0][w] == \" \":\n for z in range(0,10):\n classification_dict[z] += math.log(self.class_probabilities[z][l][w][2])\n count += 28\n self.solutions.append(max(classification_dict, key=classification_dict.get))", "def apply_uvs(mesh, bsp_verts):\n\n mesh.uv_textures.new(\"UVs\")\n bm = bmesh.new()\n bm.from_mesh(mesh)\n\n if hasattr(bm.faces, \"ensure_lookup_table\"): \n bm.faces.ensure_lookup_table()\n\n uv_layer = bm.loops.layers.uv[0]\n\n for face_idx, current_face in enumerate(bm.faces):\n current_face.loops[0][uv_layer].uv = bsp_verts[current_face.loops[0].vert.index][1]\n current_face.loops[1][uv_layer].uv = bsp_verts[current_face.loops[1].vert.index][1]\n current_face.loops[2][uv_layer].uv = bsp_verts[current_face.loops[2].vert.index][1]\n \n bm.to_mesh(mesh)", "def init_coverage(self, coverage_data, frag_dist=None):\n\n # TODO this section is also quite slow and will need further investigation\n # If we're only creating a vcf, skip some expensive initialization related to coverage depth\n if not self.only_vcf:\n (self.window_size, gc_scalars, target_cov_vals) = coverage_data\n gc_cov_vals = [[]] * self.ploidy\n tr_cov_vals = [[]] * self.ploidy\n avg_out = []\n self.coverage_distribution = []\n for i in range(self.ploidy):\n # Combined a couple of lines for this. I'm trying to divorce these calculations from the cigar creation\n if len(self.sequences[i]) - self.read_len > 0:\n max_coord = len(self.sequences[i]) - self.read_len\n else:\n max_coord = len(self.sequences[i])\n\n # compute gc-bias\n j = 0\n while j + self.window_size < len(self.sequences[i]):\n gc_c = self.sequences[i][j:j + self.window_size].count('G') + \\\n self.sequences[i][j:j + self.window_size].count('C')\n gc_cov_vals[i].extend([gc_scalars[gc_c]] * self.window_size)\n j += self.window_size\n gc_c = self.sequences[i][-self.window_size:].count('G') + \\\n self.sequences[i][-self.window_size:].count('C')\n gc_cov_vals[i].extend([gc_scalars[gc_c]] * (len(self.sequences[i]) - len(gc_cov_vals[i])))\n\n # Targeted values\n tr_cov_vals[i].append(target_cov_vals[0])\n\n prev_val = self.fm_pos[i][0]\n for j in range(1, max_coord):\n if self.fm_pos[i][j] is None:\n tr_cov_vals[i].append(target_cov_vals[prev_val])\n elif self.fm_span[i][j] - self.fm_pos[i][j] <= 1:\n tr_cov_vals[i].append(target_cov_vals[prev_val])\n else:\n tr_cov_vals[i].append(sum(target_cov_vals[self.fm_pos[i][j]:self.fm_span[i][j]]) / float(\n self.fm_span[i][j] - self.fm_pos[i][j]))\n prev_val = self.fm_pos[i][j]\n # Debug statement\n # print(f'({i, j}), {self.all_cigar[i][j]}, {self.fm_pos[i][j]}, {self.fm_span[i][j]}')\n\n # shift by half of read length\n if len(tr_cov_vals[i]) > int(self.read_len / 2.):\n tr_cov_vals[i] = [0.0] * int(self.read_len // 2) + tr_cov_vals[i][:-int(self.read_len / 2.)]\n # fill in missing indices\n tr_cov_vals[i].extend([0.0] * (len(self.sequences[i]) - len(tr_cov_vals[i])))\n\n #\n coverage_vector = np.cumsum([tr_cov_vals[i][nnn] *\n gc_cov_vals[i][nnn] for nnn in range(len(tr_cov_vals[i]))])\n coverage_vals = []\n # TODO if max_coord is <=0, this is a problem\n for j in range(0, max_coord):\n coverage_vals.append(coverage_vector[j + self.read_len] - coverage_vector[j])\n # Below is Zach's attempt to fix this. The commented out line is the original\n # avg_out.append(np.mean(coverage_vals) / float(self.read_len))\n avg_out.append(np.mean(coverage_vals)/float(min([self.read_len, max_coord])))\n # Debug statement\n # print(f'{avg_out}, {np.mean(avg_out)}')\n\n if frag_dist is None:\n # Debug statement\n # print(f'++++, {max_coord}, {len(self.sequences[i])}, '\n # f'{len(self.all_cigar[i])}, {len(coverage_vals)}')\n self.coverage_distribution.append(DiscreteDistribution(coverage_vals, range(len(coverage_vals))))\n\n # fragment length nightmare\n else:\n current_thresh = 0.\n index_list = [0]\n for j in range(len(frag_dist.cum_prob)):\n if frag_dist.cum_prob[j] >= current_thresh + COV_FRAGLEN_PERCENTILE / 100.0:\n current_thresh = frag_dist.cum_prob[j]\n index_list.append(j)\n flq = [frag_dist.values[nnn] for nnn in index_list]\n if frag_dist.values[-1] not in flq:\n flq.append(frag_dist.values[-1])\n flq.append(LARGE_NUMBER)\n\n self.fraglen_ind_map = {}\n for j in frag_dist.values:\n b_ind = bisect.bisect(flq, j)\n if abs(flq[b_ind - 1] - j) <= abs(flq[b_ind] - j):\n self.fraglen_ind_map[j] = flq[b_ind - 1]\n else:\n self.fraglen_ind_map[j] = flq[b_ind]\n\n self.coverage_distribution.append({})\n for flv in sorted(list(set(self.fraglen_ind_map.values()))):\n buffer_val = self.read_len\n for j in frag_dist.values:\n if self.fraglen_ind_map[j] == flv and j > buffer_val:\n buffer_val = j\n max_coord = min([len(self.sequences[i]) - buffer_val - 1,\n len(self.all_cigar[i]) - buffer_val + self.read_len - 2])\n # print 'BEFORE:', len(self.sequences[i])-buffer_val\n # print 'AFTER: ', len(self.all_cigar[i])-buffer_val+self.read_len-2\n # print 'AFTER2:', max_coord\n coverage_vals = []\n for j in range(0, max_coord):\n coverage_vals.append(\n coverage_vector[j + self.read_len] - coverage_vector[j] + coverage_vector[j + flv] -\n coverage_vector[\n j + flv - self.read_len])\n\n # EXPERIMENTAL\n # quantized_cov_vals = quantize_list(coverage_vals)\n # self.coverage_distribution[i][flv] = \\\n # DiscreteDistribution([n[2] for n in quantized_cov_vals],\n # [(n[0], n[1]) for n in quantized_cov_vals])\n\n # TESTING\n # import matplotlib.pyplot as mpl\n # print len(coverage_vals),'-->',len(quantized_cov_vals)\n # mpl.figure(0)\n # mpl.plot(range(len(coverage_vals)), coverage_vals)\n # for qcv in quantized_cov_vals:\n # mpl.plot([qcv[0], qcv[1]+1], [qcv[2],qcv[2]], 'r')\n # mpl.show()\n # sys.exit(1)\n\n self.coverage_distribution[i][flv] = DiscreteDistribution(coverage_vals,\n range(len(coverage_vals)))\n\n return np.mean(avg_out)", "def __call__(self, map_in: np.ndarray) -> np.ndarray:\n return hp.map2alm(maps=map_in, lmax=self.n_max, use_weights=self.use_weights, verbose=self.verbose)", "def __get_map_offsets(self):\n map = self.map.copy()\n map_up = np.zeros((self.h + 1, self.w), np.uint8) # create 4-neighbor connectivity comparision\n map_down = np.zeros((self.h + 1, self.w), np.uint8)\n map_right = np.zeros((self.h, self.w + 1), np.uint8)\n map_left = np.zeros((self.h, self.w + 1), np.uint8)\n map_up[1:, :] = map # paste mask onto it, 1 shifted\n map_down[:-1, :] = map\n map_right[:, :-1] = map\n map_left[:, 1:] = map\n map_up = np.delete(map_up, -1, 0) # delete the extra row/column\n map_down = np.delete(map_down, 0, 0)\n map_right = np.delete(map_right, 0, 1)\n map_left = np.delete(map_left, -1, 1)\n map_up[0, :] = 1 # set new cells (after the shift) to 1(walls) to eliminate false-positives\n map_down[-1, :] = 1\n map_right[:, -1] = 1\n map_left[:, 0] = 1\n return map_up, map_right, map_down, map_left", "def MAP(self):\n return self.__map", "def makeValMap(self,value = 'readcount'):\n self.valMap = np.zeros(len(self))\n self.valMap = self.valMap-1\n myTmp = []\n for x in range(0,len(self)):\n myTmp.append([])\n for i in self.children:\n for j in range(i.start,i.end+1):\n myTmp[j-self.start].append(i.__dict__[value])\n for nt in range(0,len(myTmp)):\n if len(myTmp[nt])>0:\n self.valMap[nt]=sum(myTmp[nt])/len(myTmp[nt])", "def test_metric_map_values(self):\n url = reverse(\"metrics\")\n client = APIClient()\n\n params = {\"source_type\": Provider.PROVIDER_OCP}\n url = url + \"?\" + urlencode(params, quote_via=quote_plus) + \"&limit=11\"\n response = client.get(url, **self.headers).data[\"data\"]\n self.assertEqual(len(COST_MODEL_METRIC_MAP), len(response))\n for metric in COST_MODEL_METRIC_MAP:\n self.assertIsNotNone(metric.get(\"source_type\"))\n self.assertIsNotNone(metric.get(\"metric\"))\n self.assertIsNotNone(metric.get(\"label_metric\"))\n self.assertIsNotNone(metric.get(\"label_measurement_unit\"))\n self.assertIsNotNone(metric.get(\"default_cost_type\"))", "def GetInTextureCoord(self):\n ...", "def _build_memorymap(self):\n\t\tmemorymap = {}\n\t\ttotalsize = 0\n\t\tbaserva = self.liststream64.DirectoryData.BaseRva\n\t\tmmdscrptr64 = self.liststream64.DirectoryData.MINIDUMP_MEMORY_DESCRIPTOR64\n\t\tnumberofmemoryranges = self.liststream64.DirectoryData.NumberOfMemoryRanges\n\t\tfor i in range(numberofmemoryranges):\n\t\t\tmemorymap[mmdscrptr64[i].StartOfMemoryRange] = ((baserva + totalsize),mmdscrptr64[i].DataSize)\n\t\t\ttotalsize += mmdscrptr64[i].DataSize\n\t\treturn memorymap", "def map_value(self) -> global___Expression.MapValue:", "def velocity_map(self, output='test'):\n self.figure = figure(figsize=(10,3))\n self.axes = self.figure.gca() \n xWindowLim = (self.analyst.windowSize[0], self.analyst.windowSize[1])\n yWindowLim = (self.analyst.windowSize[2], self.analyst.windowSize[3])\n \n # Generate contours for velocity magnitude \n xGrid = linspace(\\\n xWindowLim[0]*self.millimetersPerPixel, \n xWindowLim[1]*self.millimetersPerPixel, self.nbins)\n yGrid = linspace(\\\n yWindowLim[0]*self.millimetersPerPixel, \n yWindowLim[1]*self.millimetersPerPixel, self.nbins)\n magVelGrid = griddata(self.xs, self.ys, self.magVel, xGrid, yGrid) \n # csf = self.axes.contourf(xGrid, yGrid, magVelGrid, range(2,26,2), cmap=myColorMap)\n csf = self.axes.contourf(xGrid, yGrid, magVelGrid, cmap=myColorMap)\n cbar = self.figure.colorbar(csf) \n cbar.set_label(\"Velocity magnitude, px/s\")\n \n # Generate arrow plot\n # q = self.axes.quiver(self.xs, self.ys, self.us, self.vs,\n # angles = 'xy', scale_units='xy', scale=2, pivot = 'mid')\n # self.axes.quiverkey(q, 0.9, 1.0, 10, \"10 px/frame\", coordinates='axes') \n \n # Save figure \n self.axes.set_aspect('equal')\n self.axes.set_xlim(*xWindowLim)\n self.axes.set_ylim(*yWindowLim)\n self.figure.savefig(output + '_velocity_map.pdf')", "def uvregister(self,v):\n return self.get('patchmesh.uvvertices').intern(v)", "def uv(vec):\n return vec / sqrt(dot(vec, vec))", "def yuv(self):\n r, g, b = self.rgb\n y = 0.299 * r + 0.587 * g + 0.114 * b\n return (\n y,\n 0.492 * (b - y),\n 0.877 * (r - y),\n )", "def uvregister(self,v):\n return self.get('mesh.uvvertices').intern(v)", "def generateNeighborMap(self):\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(np.array([i.replace(\"#\",\" \")\n .split()[0:4] for i in value.index])\n .astype(float))\n\n B=np.array(A[0]).reshape(len(A[0]),4)\n print (B[:,0]+B[:,1])/2\n A=[]\n for key,value in self._ts_dict.iteritems():\n A.append(value.sum(axis=1).values)\n print A", "def get_map_size(level):\n if level < 5:\n return 5, 5\n if level < 70:\n return 10, 10\n if level < 150:\n return 25, 25\n return 50, 50", "def positions(self, tileID, numSamples):", "def get_uv_value(self):\n return float(int(self.data[2]) / 10)", "def get_uv(u, v):\n uv = np.zeros((2, 2))\n uv[0][0] = u[0]\n uv[1][0] = u[1]\n uv[0][1] = v[0]\n uv[1][1] = v[1]\n return uv", "def GenerateMapAffinity(img,nb_vertex,pointsInterest,objects_centroid,scale):\n\n # Apply the downscale right now, so the vectors are correct. \n img_affinity = Image.new(img.mode, (int(img.size[0]/scale),int(img.size[1]/scale)), \"black\")\n # Create the empty tensors\n totensor = transforms.Compose([transforms.ToTensor()])\n\n affinities = []\n for i_points in range(nb_vertex):\n affinities.append(torch.zeros(2,int(img.size[1]/scale),int(img.size[0]/scale)))\n \n for i_pointsImage in range(len(pointsInterest)): \n pointsImage = pointsInterest[i_pointsImage]\n center = objects_centroid[i_pointsImage]\n for i_points in range(nb_vertex):\n point = pointsImage[i_points]\n affinity_pair, img_affinity = getAfinityCenter(int(img.size[0]/scale),\n int(img.size[1]/scale),\n tuple((np.array(pointsImage[i_points])/scale).tolist()),\n tuple((np.array(center)/scale).tolist()), \n img_affinity = img_affinity, radius=1)\n\n affinities[i_points] = (affinities[i_points] + affinity_pair)/2\n\n\n # Normalizing\n v = affinities[i_points].numpy() \n \n xvec = v[0]\n yvec = v[1]\n\n norms = np.sqrt(xvec * xvec + yvec * yvec)\n nonzero = norms > 0\n\n xvec[nonzero]/=norms[nonzero]\n yvec[nonzero]/=norms[nonzero]\n\n affinities[i_points] = torch.from_numpy(np.concatenate([[xvec],[yvec]]))\n affinities = torch.cat(affinities,0)\n\n return affinities", "def nominal_map(options):\n pass", "def test_put_voltage_map_item(self):\n pass", "def _get_map_pixel_size(self, width_page_m, height_page_m):\n return (int(m2px(width_page_m, self._resolution)),\n int(m2px(height_page_m, self._resolution)))" ]
[ "0.7532845", "0.6147069", "0.59362704", "0.59166545", "0.59061474", "0.5892625", "0.55217695", "0.5503293", "0.54990137", "0.5495749", "0.54743665", "0.5441941", "0.54397714", "0.54297936", "0.5418282", "0.53442216", "0.5343465", "0.5342541", "0.5336867", "0.5311103", "0.52925897", "0.52845997", "0.5262771", "0.52393043", "0.5234201", "0.52256036", "0.52246994", "0.51905626", "0.51774305", "0.516973", "0.5166104", "0.5152955", "0.5117829", "0.51077724", "0.5103383", "0.51023513", "0.50951296", "0.50790846", "0.5071805", "0.50672567", "0.5044311", "0.50048053", "0.49995387", "0.49995384", "0.4993516", "0.49863684", "0.49700427", "0.49659675", "0.49658054", "0.4963565", "0.49577844", "0.49565336", "0.49548647", "0.49514735", "0.49499956", "0.4922769", "0.49092853", "0.4906095", "0.4884021", "0.488335", "0.48832324", "0.48783547", "0.48734903", "0.4869733", "0.48622572", "0.48620316", "0.4861935", "0.4859816", "0.48486722", "0.48476326", "0.48446092", "0.48421302", "0.4840518", "0.48391914", "0.48277414", "0.48267627", "0.4825658", "0.48213094", "0.48181346", "0.48089683", "0.48070362", "0.48057", "0.47928366", "0.47907984", "0.4784854", "0.4783389", "0.47817037", "0.47806194", "0.47705814", "0.47697788", "0.47648674", "0.47635192", "0.47633293", "0.4762944", "0.47556368", "0.47535244", "0.47533008", "0.4738108", "0.47359255", "0.47317147" ]
0.6111474
2
It calculates the rms of the noise added by the interferrometers of ska.
def kanan_noise_image_ska(z, uv_map, depth_mhz, obs_time, N_ant_ska=564.): nuso = 1420.0/(1.0 + z) delnu = depth_mhz*1e3 # in kHz effective_baseline = np.sum(uv_map) T_sys_atnu300MHz= 60.0 #K T_sys = T_sys_atnu300MHz*(300.0/nuso)**2.55 ant_radius_ska = 35./2. #in m A_ant_ska = np.pi*ant_radius_ska*ant_radius_ska sigma = np.sqrt(2.0)*KB_SI*(T_sys/A_ant_ska)/np.sqrt((depth_mhz*1e6)*(obs_time*3600.0))/janskytowatt*1e3/np.sqrt(N_ant_ska*N_ant_ska/2.0) ## in mJy rms_noi = np.sqrt(2.0)*KB_SI/janskytowatt/1e3/600. *(T_sys/100.0)*(100.0/A_ant_ska)* np.sqrt(1000.0/delnu)*np.sqrt(100.0/obs_time)*1e3 sigma *= 1e3 #in muJy rms_noi *= 1e3 print 'Expected: rms in image in muJy per beam for full =', sigma print 'Effective baseline =', sigma*np.sqrt(N_ant_ska*N_ant_ska/2.0)/np.sqrt(effective_baseline), 'm' print 'Calculated: rms in the visibility =', rms_noi, 'muJy' return sigma, rms_noi
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def calculate_desired_noise_rms(clean_rms, snr):\n a = float(snr) / 20\n noise_rms = clean_rms / (10 ** a)\n return noise_rms", "def noiseReduction(self):\n pass", "def __mix_with_snr(self, sig_spk, sig_noise, need_snr):\n\n # Calc SNR\n pow_sp = np.sum((sig_spk) ** 2) / float(len(sig_spk))\n pow_noise = np.sum((sig_noise) ** 2) / float(len(sig_noise))\n actual_snr = 10 * np.log10(pow_sp / (pow_noise + self.eps))\n alfa = pow(10.0, (actual_snr - need_snr) / 20.0)\n sig_noise = sig_noise * alfa\n\n return sig_spk, sig_noise", "def __mix_with_snr(self, sig_spk, sig_noise, need_snr):\n\n # Calc SNR\n pow_sp = np.sum((sig_spk) ** 2) / float(len(sig_spk))\n pow_noise = np.sum((sig_noise) ** 2) / float(len(sig_noise))\n actual_snr = 10 * np.log10(pow_sp / (pow_noise + self.eps))\n alfa = pow(10.0, (actual_snr - need_snr) / 20.0)\n sig_noise = sig_noise * alfa\n\n return sig_spk, sig_noise", "def noise(self, freq: int, /) -> None:", "def noisePreset() :\n s.noisePreset()", "def get_noise(self):\n\n n = self.qubic.get_noise().ravel()\n n = np.r_[n, self.planck.get_noise().ravel()]\n\n return n", "def get_estimated_noise(self):\n raise NotImplementedError('Abstract Method.')", "def add_noise(self, snr, unit=None):\n return self.from_time(self.fs, noisify(self.in_time, snr, unit=unit))", "def snr(mag=20, itime=1., read=24.5, sky=8.43, npix=24., zero=26.44, dark=0.0):\n # 2009-02-20 14:40 IJC: Initiated\n \n star = itime * 10**(0.4*(zero-mag))\n noise = npix * (itime*(sky+dark)+read**2)\n\n return star * (star+noise)**-0.5", "def _calc(self):\r\n tot_sum: float = 0 # ? total sum of the noise values\r\n max_amp: float = 0 # ? keep the sum in [0,1]\r\n amp: float = 1.0 # ? amplitude of each noise value\r\n freq: float = 1.0 # ? frequency for getting the detailed noise\r\n\r\n # for each octave we twice the frequency and multiply the amplitude \r\n # by persistance to get the detailed noise value\r\n # to keep the final sum value in the range [0, 1] we keep track of the \r\n # max amplitude (sum of all the amplitudes)\r\n for octave in range(self.octaves):\r\n noise_obj = PerlinNoise(self.inp_x*freq, self.inp_y*freq, self.inp_z*freq)\r\n # ? multiply the noise value by the amplitude\r\n tot_sum += noise_obj.val() * amp\r\n max_amp += amp\r\n\r\n amp *= self.persist\r\n freq *= 2.0 # double the freq each iteration\r\n\r\n # value is in the range [0,1]\r\n self.value = tot_sum / max_amp", "def noiseAtten(atten) :\n s.noiseAtten(atten)", "def snr(self):\n\n return self.signal.astype(numpy.float32) / self.noise.astype(numpy.float32)", "def __call__(self, wav):\n beg_i = 0\n end_i = wav.shape[0]\n sel_noise = self.load_noise(self.sample_noise())\n if len(sel_noise) < len(wav):\n # pad noise\n P = len(wav) - len(sel_noise)\n sel_noise = np.pad(sel_noise, (0, P))\n # mode='reflect').view(-1).data.numpy()\n T = end_i - beg_i\n # TODO: not pre-loading noises from files?\n if len(sel_noise) > T:\n n_beg_i = np.random.randint(0, len(sel_noise) - T)\n else:\n n_beg_i = 0\n noise = sel_noise[n_beg_i:n_beg_i + T]\n # randomly sample the SNR level\n snr = random.choice(self.snr_levels)\n K, Ex, En = self.compute_SNR_K(wav, noise, snr)\n scaled_noise = K * noise\n if En > 0:\n noisy_wav = wav + scaled_noise\n noisy_wav = self.norm_energy(noisy_wav, Ex)\n else:\n noisy_wav = wav\n return noisy_wav", "def pink_noise():\n global curr_tick\n octave = octave_lookup[curr_tick]\n curr_noise[octave] = int(white_noise() / (5-octave))\n curr_tick += 1\n if curr_tick >= len(octave_lookup):\n curr_tick = 0\n return sum(curr_noise)", "def calculate_SNR(snid: int, photo_data: pd.DataFrame, \n head_data: pd.DataFrame, code_zenodo: int, \n snana_file_index: int, code_snana: int):\n \n types_names = {90: 'Ia', 62: 'Ibc', 42: 'II', 67: '91bg', 52: 'Iax',\n 64:'KN', 95: 'SLSN', 994: 'PISN', 992: 'ILOT', \n 993: 'CaRT', 15: 'TDE', 88: 'AGN', 92: 'RRL', \n 65: 'M-dw', 16: 'EB', 53: 'Mira', 991: 'BMicroL',\n 6: 'MicroL'}\n \n # LSST filters\n filters = [b'u ', b'g ', b'r ', b'i ', b'z ', b'Y ']\n\n flag_id_photo = photo_data['SNID'] == snid\n\n flux = photo_data[flag_id_photo]['FLUXCAL'].values\n fluxerr = photo_data[flag_id_photo]['FLUXCALERR'].values\n\n SNR_all = flux/fluxerr\n \n indx = np.random.choice(range(flux.shape[0]))\n\n flag_id_head = head_data['SNID'].values == snid\n redshift = head_data['SIM_REDSHIFT_CMB'].values[flag_id_head][0]\n \n # store values\n line = [snid, snana_file_index, code_zenodo, code_snana, \n types_names[code_zenodo], redshift]\n \n for fil in filters: \n line.append(head_data['SIM_PEAKMAG_' + str(fil)[2]].values[flag_id_head][0])\n \n # calculate SNR statistics \n for f in [np.mean, max, np.std]: \n for fil in filters: \n \n flag_fil = photo_data[flag_id_photo]['FLT'] == fil\n neg_flag = flux > -100\n flag2 = np.logical_and(flag_fil, neg_flag)\n \n if sum(flag2) > 0:\n SNR_fil = SNR_all[flag2] \n line.append(f(SNR_fil))\n \n if len(line) == 30:\n return line\n else:\n return []", "def addNoise(pure,snr):\r\n watts = pure**2\r\n # Calculate signal power and convert to dB \r\n sig_avg_watts = np.mean(watts)\r\n sig_avg_db = 10 * np.log10(sig_avg_watts)\r\n # Calculate noise according to [2] then convert to watts\r\n noise_avg_db = sig_avg_db - snr\r\n noise_avg_watts = 10 ** (noise_avg_db / 10)\r\n # Generate an sample of white noise\r\n mean_noise = 0\r\n noise = np.random.normal(mean_noise, np.sqrt(noise_avg_watts), len(watts))\r\n \r\n return pure+noise", "def add_noise(self, data):", "def estimate_noise_std(r_data):\n\n s_noise = 0.2 * np.max(abs(r_data))\n return s_noise", "def computeSNR(self,doppMatchLow,doppMatchHigh,windowWidth):\n # print(f'SNR params: low {doppMatchLow} high {doppMatchHigh} width {windowWidth}')\n doppMatchLow_FFT_idx = self.doppCyperSymNorm[doppMatchLow]\n doppMatchHigh_FFT_idx = self.doppCyperSymNorm[doppMatchHigh]\n # print(f'SNR {doppMatchLow_FFT_idx} {doppMatchHigh_FFT_idx}')\n noiseIdxLow_FFT_idx = (doppMatchLow_FFT_idx + int(self.Nfft//2)) % self.Nfft\n noiseIdxHigh_FFT_idx = (doppMatchHigh_FFT_idx + int(self.Nfft//2)) % self.Nfft\n \n t = time.time()\n cuda.Context.synchronize()\n\n if doppMatchLow_FFT_idx > doppMatchHigh_FFT_idx: # the signal is around zero Hz IF\n sigPwr = np.mean(np.concatenate((np.abs(self.GPU_bufSignalFreq_cpu_handle[doppMatchLow_FFT_idx-windowWidth:]),np.abs(self.GPU_bufSignalFreq_cpu_handle[:doppMatchHigh_FFT_idx+windowWidth]))))\n else:\n sigPwr = np.mean(np.abs(self.GPU_bufSignalFreq_cpu_handle[doppMatchLow_FFT_idx-windowWidth:doppMatchHigh_FFT_idx+windowWidth]))\n\n if noiseIdxLow_FFT_idx > noiseIdxHigh_FFT_idx: # the signal is around zero Hz IF\n noisePwr = np.mean(np.concatenate((np.abs(self.GPU_bufSignalFreq_cpu_handle[noiseIdxLow_FFT_idx-windowWidth:]),np.abs(self.GPU_bufSignalFreq_cpu_handle[:noiseIdxHigh_FFT_idx+windowWidth]))))\n else:\n noisePwr = np.mean(np.abs(self.GPU_bufSignalFreq_cpu_handle[noiseIdxLow_FFT_idx-windowWidth:noiseIdxHigh_FFT_idx+windowWidth]))\n \n SNR = 20*np.log10(sigPwr/noisePwr - 1)\n # print(f'SNR {SNR:.1f} sigPwr {sigPwr:.6f} noisePwr {noisePwr:.6f} dopp idx: {doppMatchLow_FFT_idx} {doppMatchHigh_FFT_idx} noise idx : {noiseIdxLow_FFT_idx} {noiseIdxHigh_FFT_idx}')\n\n # log.error(f'time SNR {(time.time()-t)*1000:.3f} ms')\n return SNR", "def calculateSNR(self):\n pass", "def SNR(self, flux_sky, n_pix_star, flux_star, gain, ron):\n SNR = (gain*flux_star/sqrt(gain*flux_star + n_pix_star*gain*flux_sky + n_pix_star*ron**2)) \n return SNR", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n return 1.0 / ngals", "def add_noise(self,fing):\r\n\t\tfingerprint = copy.deepcopy(fing)\r\n\t\tl = len(fingerprint)\r\n\t\tnp.random.seed()\t\r\n\t\tif self.noise_type == 'SNR':\r\n\t\t\tnoise = np.random.normal(0, 1, l)\r\n\t\t\tsignal_Power = np.linalg.norm(fingerprint)\r\n\t\t\tnoise_Power = np.linalg.norm(noise)\r\n\t\t\tcst = signal_Power / (noise_Power * self.noise_level)\r\n\t\t\tnoise = noise * cst\r\n\t\telif self.noise_type == 'Standard':\r\n\t\t\tnoise = np.random.normal(0, self.noise_level, l)\r\n\t\tfingerprint += noise\r\n\t\tif self.trparas.normalization == 'Noisy_input':\r\n\t\t\treturn fingerprint / np.linalg.norm(fingerprint)\r\n\t\telse:\r\n\t\t\treturn fingerprint", "def snr(signal, noise, impl):\n if np.abs(np.asarray(noise)).sum() != 0:\n ave1 = np.sum(signal) / signal.size\n ave2 = np.sum(noise) / noise.size\n s_power = np.sqrt(np.sum((signal - ave1) * (signal - ave1)))\n n_power = np.sqrt(np.sum((noise - ave2) * (noise - ave2)))\n if impl == 'general':\n return s_power / n_power\n elif impl == 'dB':\n return 10.0 * np.log10(s_power / n_power)\n else:\n raise ValueError('unknown `impl` {}'.format(impl))\n else:\n return float('inf')", "def rms(a):\n\treturn np.sqrt(np.sum(np.power(a,2))/len(a))", "def white_noise():\n return random.randint(-32767, 32767)", "def calculate_psnr(img0, img1, data_range=None):\n psnr = skm.peak_signal_noise_ratio(img0, img1, data_range=data_range) \n return psnr", "def compute_noise_levels(tr, cfg):\n from obspy.signal.trigger import classic_sta_lta\n tr_snr = tr.copy()\n tr_snr.filter(\"bandpass\", freqmin=cfg.sig_noise.SNR_FREQ[0],\n freqmax=cfg.sig_noise.SNR_FREQ[1])\n wa = int(cfg.sig_noise.SNR_WIN[1]*tr.stats.sampling_rate)\n wb = int(cfg.sig_noise.SNR_WIN[0]*tr.stats.sampling_rate)\n # Prevent failing due to en(data) < nlta error\n if len(tr_snr.data) < wa or len(tr_snr.data) < wb:\n noise_level = 100.0\n return noise_level\n snr = classic_sta_lta(tr_snr.data, wa, wb)\n snr_smooth = do_smooth(snr, cfg.sig_noise.SNR_SMOOTH_WIN,\n tr.stats.sampling_rate)\n thresh_snr = np.nanmax(snr_smooth) * 0.4\n A = (snr_smooth - thresh_snr)\n A = A[np.where(A > 0)]\n if len(snr_smooth[wb:-wa]) == 0: # In case zerodivision error\n noise_level = 9999.9\n return noise_level\n noise_level = (len(A) / len(snr_smooth[wb:-wa])) * 100\n return noise_level", "def noise(self):\r\n if self.buffer_offset + self.frames_per_buffer - 1 > self.x_max:\r\n #relleno con ceros al final si es necesario\r\n xs = np.arange(self.buffer_offset, self.x_max)\r\n tmp = np.random.random_sample(len(xs)) #ruido\r\n out = np.append(tmp, np.zeros(self.frames_per_buffer-len(tmp)))\r\n else:\r\n xs = np.arange(self.buffer_offset,\r\n self.buffer_offset + self.frames_per_buffer)\r\n out = np.random.random_sample(len(xs))\r\n self.buffer_offset += self.frames_per_buffer\r\n return out", "def noise(self):\n return self._noise", "def at_rSNR(h5):\n ses = h5['SES'][:]['ses'].copy()\n ses.sort()\n h5.attrs['clipSNR'] = np.mean(ses[:-3]) / h5.attrs['noise'] *np.sqrt(ses.size)\n x = np.median(ses) \n h5.attrs['medSNR'] = np.median(ses) / h5.attrs['noise'] *np.sqrt(ses.size)", "def brown_noise():\n # TODO: try different values of BROWN_FACTOR\n # ... just seems to make it noisier or quieter - no change in freq\n global brown_val\n if brown_val > 32767:\n brown_val = brown_val - abs(white_noise()) / BROWN_FACTOR\n elif brown_val < -32767:\n brown_val = brown_val + abs(white_noise()) / BROWN_FACTOR\n else:\n brown_val = brown_val + white_noise() / BROWN_FACTOR\n return int(brown_val)", "def evaluate_scaled_noise_rms(freq_hz, num_times, bandwidth_hz=100e3, eta=1.0,\n obs_length_h=1000.0, num_antennas=256):\n k_B = 1.38064852e-23\n t_acc = (obs_length_h * 3600.0) / num_times\n a_eff, t_sys = element_area_and_temperature(freq_hz)\n\n # Single receiver polarisation SEFD.\n sefd = (2.0 * k_B * t_sys * eta) / (a_eff * num_antennas)\n sigma_pq = (sefd * 1e26) / (2.0 * bandwidth_hz * t_acc)**0.5\n # Stokes-I noise is from two receiver polarisations so scale by 1 / sqrt(2)\n sigma_pq /= 2**0.5\n return sigma_pq", "def test_self_consistency_noise(self):\n # test with SNR = 100\n SNR = self.p_gt[0] / 9\n noisy_data = self.data + SNR * RNG.normal(size=self.data.shape)\n popt, pcov = sine_fit(noisy_data, self.periods)\n assert_allclose(*fixed_signs(self.p_gt, popt), 5e-1)", "def calcSNR(self, background: SpectralQty, signal: SpectralQty, obstruction: float,\n exp_time: u.Quantity) -> u.dimensionless_unscaled:\n # Calculate the signal and background temperatures\n t_signal, t_background = self.calcTemperatures(background, signal, obstruction)\n line_ind = np.where(t_signal.wl == self.__lambda_line)[0][0]\n t_sys = t_background + 2 * self.__receiver_temp + t_signal\n # Calculate the noise bandwidth\n delta_nu = t_signal.wl.to(u.Hz, equivalencies=u.spectral()) / (t_signal.wl / self.__common_conf.wl_delta() + 1)\n snr = []\n for exp_time_ in exp_time if exp_time.size > 1 else [exp_time]:\n # Calculate the RMS background temperature\n if self.__n_on is None:\n t_rms = 2 * t_sys * self.__kappa / np.sqrt(exp_time_ * delta_nu)\n else:\n t_rms = t_sys * self.__kappa * np.sqrt(1 + 1 / np.sqrt(self.__n_on)) / np.sqrt(exp_time_ * delta_nu)\n # Calculate the SNR\n snr_ = t_signal / t_rms\n snr.append(snr_.qty[line_ind])\n # Print details\n self.__printDetails(t_sys.qty[line_ind], delta_nu[line_ind], t_rms.qty[line_ind], t_signal.qty[line_ind],\n \"t_exp=%.2f s: \" % exp_time_.value)\n self.__output(t_signal, t_background, t_rms, \"texp_%.2f\" % exp_time_.value, snr=snr_)\n return u.Quantity(snr) if len(snr) > 1 else u.Quantity(snr[0])", "def add_noise(spectrum,rms):\n noise = np.random.randn(spectrum.data.shape[0])*rms\n noisy_data = spectrum.data + noise\n noisy_spec = pyspeckit.Spectrum(xarr=spectrum.xarr,data=noisy_data)\n return noisy_spec", "def calc_noise(self, name):\n noise = self._noise_objs[name]\n cals = [self._cal_objs[cal].calc() for cal in self._noise_cals[name]]\n data = noise.calc_trace(cals)\n if isinstance(data, dict):\n return data['Total'][0]\n else:\n return data[0]", "def _dmsmear(self, psr):\n return 8.3E6 * psr.dm * self.bw_chan / math.pow(self.freq, 3.0)", "def add_noise_batch(self,fingerprints):\r\n\t\tif self.trparas.complex:\r\n\t\t\tn,l,r = fingerprints.shape\r\n\t\t\tnp.random.seed()\r\n\r\n\t\t\t# noise_level = 0.002\r\n\t\t\tnoise_level = self.noise_level\r\n\t\t\t# add noise to real and imag part seperately with different noise level\r\n\t\t\tnoise_real = np.random.normal(0, noise_level, (n, l))\r\n\t\t\tnoise_imag = np.random.normal(0, noise_level, (n, l))\r\n\t\t\tnoise = np.stack([noise_real,noise_imag],axis=2) # n,l,r 4000,666,2\r\n\t\t\tfingerprints += noise\r\n\t\t\treturn fingerprints\r\n\r\n\t\telse:\t\r\n\t\t\tn,l = fingerprints.shape\r\n\t\t\tnp.random.seed()\r\n\t\t\tif self.noise_type == 'SNR':\r\n\t\t\t\tnoise = np.random.normal(0, 1, (n,l))\r\n\t\t\t\tsignal_Power = np.linalg.norm(fingerprints, axis=1)\r\n\t\t\t\tnoise_Power = np.linalg.norm(noise,axis=1)\r\n\t\t\t\tcst = signal_Power / (noise_Power * self.noise_level)\r\n\t\t\t\tnoise = noise * np.tile(cst.reshape(-1,1),(1,l))\r\n\t\t\telif self.noise_type == 'Standard':\r\n\t\t\t\tnoise = np.random.normal(0, self.noise_level, (n,l))\r\n\t\t\tfingerprints += noise\r\n\t\t\tif self.trparas.normalization == 'Noisy_input':\r\n\t\t\t\treturn fingerprints / np.tile(np.linalg.norm(fingerprints,axis=1).reshape(-1,1), (1,l))\r\n\t\t\telse:\r\n\t\t\t\treturn fingerprints", "def compute_SNR(x, fs):\n segments, cough_mask = segment_cough(x,fs)\n RMS_signal = 0 if len(x[cough_mask])==0 else np.sqrt(np.mean(np.square(x[cough_mask])))\n RMS_noise = np.sqrt(np.mean(np.square(x[~cough_mask])))\n SNR = 0 if (RMS_signal==0 or np.isnan(RMS_noise)) else 20*np.log10(RMS_signal/RMS_noise)\n return SNR", "def noise(self):\n # Extract parameters\n pzs = self.params[0]\n # retrieve number of galaxies in each bins\n ngals = np.array([pz.gals_per_steradian for pz in pzs])\n if isinstance(self.config[\"sigma_e\"], list):\n sigma_e = np.array([s for s in self.config[\"sigma_e\"]])\n else:\n sigma_e = self.config[\"sigma_e\"]\n return sigma_e ** 2 / ngals", "def rmsi(self, tmin=None, tmax=None):\n res = self.ml.noise(tmin=tmin, tmax=tmax)\n N = res.size\n return np.sqrt(sum(res ** 2) / N)", "def snr_f(self, image):\n image_ps = self.pow_spec(image)\n noise_level = numpy.sum(self.rim*image_ps)/numpy.sum(self.rim)\n return numpy.sqrt(image_ps[int(self.size/2), int(self.size/2)]/noise_level)", "def _snr_preprocessing(self):\n if self.flux is None or self.fluxerr is None:\n return np.ones(len(self.stamps), dtype=bool)\n\n snrs = self.flux.astype(float) / self.fluxerr.astype(float)\n return snrs > self.snr_threshold", "def addNoise (image,noise_type=\"gauss\",var = .01):\n row,col,ch= image.shape\n if noise_type == \"gauss\": \n mean = 0.0\n #var = 0.001\n sigma = var**0.5\n gauss = np.array(image.shape)\n gauss = np.random.normal(mean,sigma,(row,col,ch))\n gauss = gauss.reshape(row,col,ch)\n #print(gauss)\n noisy = image + gauss*255\n return noisy.astype('uint8')\n elif noise_type == \"s&p\":\n s_vs_p = 0.5\n amount = 0.09\n out = image\n # Generate Salt '1' noise\n num_salt = np.ceil(amount * image.size * s_vs_p)\n coords = [np.random.randint(0, i - 1, int(num_salt))\n for i in image.shape]\n out[coords] = 255\n # Generate Pepper '0' noise\n num_pepper = np.ceil(amount* image.size * (1. - s_vs_p))\n coords = [np.random.randint(0, i - 1, int(num_pepper))\n for i in image.shape]\n out[coords] = 0\n return out\n elif noise_type == \"poisson\":\n vals = len(np.unique(image))\n vals = 2 ** np.ceil(np.log2(vals))\n noisy = np.random.poisson(image * vals) / float(vals)\n return noisy\n elif noise_type ==\"speckle\":\n gauss = np.random.randn(row,col,ch)\n gauss = gauss.reshape(row,col,ch) \n noisy = image + image * gauss\n return noisy\n else:\n return image", "def test_best_result(origianl_waveform):\n origianl_waveform = origianl_waveform.flatten()\n recovery_waveform = []\n audio_length = len(origianl_waveform)\n noise = np.random.random_sample((audio_length,))\n noise_list = [x / 100 for x in noise]\n noise_count = 0\n \n for n in origianl_waveform:\n difference = n - noise_list[noise_count]\n recovery_waveform.append(difference)\n noise_count += 1\n \n return np.asarray(recovery_waveform)", "def noise(self) -> Sequence:\n\n return self._noise", "def add_white_noise(rates, numreg):\n rtemp = rates.copy().getA()\n sdrates = np.sqrt(rtemp * (1 - rtemp) / numreg) + 1e-10\n noise = np.random.normal(0, sdrates)\n rtemp += noise\n return np.matrix(rtemp)", "def fun_cnoise_Stim(self, t_stim = 10*s, sexp = 0, cutf = 0, do_csd = 1, t_qual = 0, freq_used = np.array([]), K_mat_old = np.array([]), inh_factor = [1], onf = None, equi = 0):\n self.barrier() # wait for other nodes\n \n filename = str(self.pickle_prefix) + \"_results_pop_cnoise.p\"\n filepath = self.data_dir + \"/\" + filename\n \n if self.id == 0: print \"- filepath:\", filepath \n \n if self.do_run or (os.path.isfile(filepath) is False):\n\n tstart = 0; \n fs = 1 / self.dt # sampling rate \n fmax = fs / 2 # maximum frequency (nyquist)\n \n t_noise = arange(tstart, t_stim, self.dt) # create stimulus time vector, make sure stimulus is even!!!\n\n #print self.syn_ex_dist\n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_ex_dist == []):\n for nt in range(self.n_celltypes): # loop over all cells\n #print \"nt\", nt\n if hasattr(self.cells[nt][0], 'input_vec'):\n self.syn_ex_dist.append([1] * len(self.cells[nt][0].input_vec)) # default ex for all by default!!!\n else: \n self.syn_ex_dist.append([1] * self.n_syn_ex[nt]) # default ex for all by default!!!\n \n #print self.syn_ex_dist\n \n if (self.syn_ex_dist[0] == []):\n nemax = 1\n else:\n nemax = max([item for sublist in self.syn_ex_dist for item in sublist])\n \n if (self.syn_inh_dist == []): # and (any(self.n_syn_inh) > 0)\n for nt in range(self.n_celltypes): # loop over all cells\n self.syn_inh_dist.append([0] * self.n_syn_inh[nt]) # default no inh for all by default!!!\n \n #print self.syn_inh_dist\n #exit()\n \n if (self.syn_inh_dist[0] == []):\n nimax = 0\n else:\n nimax = max([item for sublist in self.syn_inh_dist for item in sublist]) \n \n #print \"self.syn_inh_dist, self.syn_ex_dist\", self.syn_inh_dist, self.syn_ex_dist\n \n n_noise = max([nemax,nimax]) # number of noise sources\n #print n_noise,nemax,nimax\n # create reproduceable input\n noise_data = []\n\n for nj in range(n_noise):\n \n if self.id == 0: # make sure all have the same signal !!!\n if len(freq_used) == 0: \n noise_data0 = create_colnoise(t_noise, sexp, cutf, self.seed+nj, onf = onf)\n else:\n noise_data0, _, _, _ = create_multisines(t_noise, freq_used) # create multi sine signal\n else:\n noise_data0 = np.empty(len(t_noise), dtype=np.float64)\n\n noise_data0 = self.broadcast(noise_data0, fast = True) \n \n noise_data.append(noise_data0)\n noise_data0 = [] \n \n noise_data_points = len(noise_data[0]) \n\n # Create signal weight vector inh_factor if it is not fully given\n if len(noise_data) > len(inh_factor):\n inh_factor = [inh_factor[0]] * len(noise_data) \n print \"inh_factor:\", inh_factor\n\n #if equi:\n #pass\n # tstop = t_stim\n \n if max(self.n_syn_ex) == 0: # this means current input\n \n self.set_IStim() # sets amp\n \n if self.fluct_s != []:\n if self.fluct_s[self.a_celltype[0]] > 0:\n if self.id == 0: print \"- adding i fluct\"\n self.connect_fluct()\n \n for i, m in enumerate(self.method_interpol):\n if \"syn\" in m: self.method_interpol[i] = \"syn \" + str(self.syn_tau1/ms) + \"/\" + str(self.syn_tau2/ms) + \"ms\"\n if \"bin\" in m: self.method_interpol[i] = \"bin \" + str(self.bin_width/ms) + \"ms\"\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, self.amp[self.a_celltype[0]], ihold = 0, delay_baseline = self.delay_baseline) # , tail_points = 0\n stimulus.append(stimulus0)\n tstop = t[-1]\n \n self.set_IPlay2(stimulus, t)\n if self.id == 0: print \"- starting colored noise transfer function estimation! with amp = \" + str(np.round(self.amp[self.a_celltype[0]],4)) + \", ihold = \" + str(np.round(self.ihold[self.a_celltype[0]],4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n else:\n\n self.give_freq = False\n ihold = self.set_i(self.ihold) # just sets amp, ihold should not change! \n\n if 'gsyn_in' not in self.method_interpol: \n pass\n else:\n self.g_syn_ex = [1]*len(self.N)\n \n \n if ((self.fluct_g_e0 != []) or (self.fluct_g_i0 != [])):\n if ((self.fluct_g_e0[self.a_celltype[0]] > 0) or (self.fluct_g_i0[self.a_celltype[0]] > 0)):\n if self.id == 0: print \"- adding g fluct\"\n self.connect_gfluct(E_i=-65)\n \n stimulus = []\n for nj in range(len(noise_data)):\n stimulus0, t, t_startstop = construct_Stimulus(noise_data[nj], fs, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) # self.amp\n stimulus.append(stimulus0)\n \n noise_data = [] \n tstop = t[-1]\n \n if self.N[self.a_celltype[0]] > 1:\n self.set_IStim(ihold = [0]*self.n_celltypes, ihold_sigma = [0]*self.n_celltypes, random_start = True, tstart_offset = 1)\n if self.id == 0: print \"- add random start\"\n \n #print \"Enter Synplay()\"\n self.set_SynPlay(stimulus, t, t_startstop = t_startstop) \n #print \"Exit Synplay()\"\n\n if self.id == 0: print \"- starting colored noise transfer function estimation with synaptic input! with amp = \" + str(np.round(self.amp,4)) + \", ihold = \" + str(np.round(self.ihold,4)) + \", ihold_sigma = \" + str(np.round(self.ihold_sigma,4)) + \", dt = \" + str(self.dt) + \" => maximum frequency = \" + str(fmax) + \"\\r\" \n \n amp_vec = []\n mag_vec = [] \n pha_vec = []\n freq_used = []\n ca = []\n SNR_mat = []\n VAFf_mat = []\n Qual_mat = []\n CF_mat = [] \n VAF_mat = []\n stim = []\n stim_re_mat = []\n resp_mat = []\n current_re = []\n ihold1 = []\n tk = []\n K_mat = []\n gsyn_in = []\n fmean = []\n fmax = [] \n fmstd = [] \n fcvm = [] \n fmeanA = []\n fmaxA = [] \n fmstdA = [] \n fcvmA = [] \n t_all_vec_input_sorted = []\n id_all_vec_input_sorted = []\n \n if (self.id == 0) and (max(self.n_syn_ex) > 0):\n print range(self.n_celltypes), np.shape(self.t_all_vec_input)\n for l in range(self.n_celltypes): \n ie = argsort(self.t_all_vec_input[l]) \n t_all_vec_input_sorted.append( self.t_all_vec_input[l][ie] )\n id_all_vec_input_sorted.append( self.id_all_vec_input[l][ie].astype(int) )\n \n #if (self.id == 0): \n # print self.g_syn_ex\n # print np.array(self.g_syn_ex)>= 0\n \n #print \"g_syn_ex:\",self.g_syn_ex\n if np.array(np.array(self.g_syn_ex)>= 0).any():\n \n if hasattr(self.cells[self.a_celltype[0]][0], 'get_states') and equi:\n print \"- Equilibrate!\"\n self.run(tstop, do_loadstate = False)\n m = md5.new()\n cell_exe_new = self.cell_exe[0]\n m.update(cell_exe_new)\n filename = './states_' + self.celltype[0] + '_' + m.hexdigest() + '_Population.b'\n self.cells[self.a_celltype[0]][0].get_states(filename)\n else:\n self.run(tstop, do_loadstate = False)\n \n i_startstop = []\n \n results = self.get(t_startstop, i_startstop) \n time = results.get('time')\n current = results.get('current') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n gsyn = results.get('gsyn') \n freq_times = results.get('freq_times')\n spike_freq = results.get('spike_freq')\n t_all_vec_vec = results.get('t_all_vec_vec')\n id_all_vec_vec = results.get('id_all_vec_vec')\n gsyns = results.get('gsyns')\n gsyn_in = results.get('gsyn_in')\n \n fmax = results.get('fmax')\n fmstd = results.get('fmstd')\n fcvm = results.get('fcvm')\n \n fmeanA = results.get('fmeanA') \n fmaxA = results.get('fmaxA')\n fmstdA = results.get('fmstdA')\n fcvmA = results.get('fcvmA')\n \n fbaseA = results.get('fbaseA') \n fbase = results.get('fbase')\n fbstdA = results.get('fbstdA')\n \n \n else: # do not run, analyse input!!!\n \n time = t\n voltage = []\n for l in range(self.n_celltypes): \n voltage.append(np.zeros(len(t)))\n current = []\n \n freq_times = []\n spike_freq = []\n gsyn = []\n gsyn_in = []\n \n t_all_vec_vec = []\n id_all_vec_vec = []\n \n fmean = []\n fmax = []\n fmstd = []\n fcvm = []\n fstdm = []\n \n fmeanA = []\n fmaxA = []\n fmstdA = []\n fcvmA = []\n fbaseA = []\n fbase = []\n fbstdA = []\n \n if self.id == 0:\n \n current = self.n_train_ex\n \n #t_all_vec = self.t_all_vec_input\n #id_all_vec = self.id_all_vec_input\n\n #ie = argsort(t_all_vec) \n #t_all_vec_vec.append( t_all_vec[ie] )\n #id_all_vec_vec.append( id_all_vec[ie].astype(int) )\n \n t_all_vec_vec = t_all_vec_input_sorted\n id_all_vec_vec = id_all_vec_input_sorted\n \n freq_times = arange(0, tstop, self.bin_width)\n spike_freq = np.zeros(len(freq_times))\n \n for j in self.a_celltype:\n \n [num_spikes, _] = neuronpy.util.spiketrain.get_histogram(t_all_vec_vec[j], bins = freq_times)\n\n if self.tau2_ex[0] > 0:\n spike_freq = np.concatenate((zeros(1),num_spikes)) \n print \"NOSYN TEST: start convolution with Ksyn\"\n Ksyn = syn_kernel(arange(0,10*self.tau2_ex[0],self.bin_width), self.tau1_ex[0], self.tau2_ex[0]) \n Ksyn = np.concatenate((zeros(len(Ksyn)-1),Ksyn))\n spike_freq = np.convolve(Ksyn, spike_freq, mode='same')\n print \"NOSYN TEST: convolution finished\"\n else:\n\n if isinstance(self.factor_celltype[j], ( int, long ) ):\n f = self.factor_celltype[j] \n else:\n f = self.factor_celltype[j][0] \n \n spike_freq = spike_freq + f * np.concatenate((zeros(1),num_spikes)) / self.bin_width\n\n fmean.append(self.fmean_input)\n fmax.append(self.fmax_input) \n fmstd.append(self.fmstd_input) \n fcvm.append(self.fcvm_input) \n fstdm.append(self.fstdm_input)\n\n if self.no_fmean == True:\n fmean.append(ihold)\n \n #plt.figure('spike_freq') \n #plt.plot(freq_times, spike_freq)\n #plt.savefig(\"./figs/Pub/Spike_freq_\" + str(self.pickle_prefix) + \".pdf\", dpi = 300, transparent=True) # save it \n #plt.clf()\n \n fmeanA = fmean[0]\n fmaxA = fmax[0]\n fmstdA = fmstd [0] \n fcvmA = fcvm[0]\n fstdmA = fstdm[0]\n \n \n if self.id == 0: \n \n if any([i<0 for i in inh_factor]):\n \n p0 = []\n inhf_idx = []\n for i, inhf in enumerate(inh_factor):\n if inhf < 0: \n p0.append(0) \n inhf_idx.append(i)\n \n plsq = fmin(self.residuals_compute_Transfer, p0, args=(stimulus, spike_freq, freq_times, t, noise_data_points, gsyn, gsyn_in, do_csd, t_qual, K_mat_old, t_startstop, inh_factor))\n p = plsq\n \n ip = 0\n for i in inhf_idx:\n inh_factor[i] = p[ip]\n ip += 1\n \n\n print \"Final inh_factor: \", inh_factor\n \n \n results = self.compute_Transfer(stimulus, spike_freq = spike_freq, freq_times = freq_times, \n t = t, noise_data_points = noise_data_points, gsyn = gsyn, gsyn_in = gsyn_in, \n do_csd = do_csd, t_qual = t_qual, K_mat_old = K_mat_old, t_startstop = t_startstop, inh_factor=inh_factor)\n \n mag_vec, pha_vec, ca, freq, freq_used, fmean_all = results.get('mag_mat'), results.get('pha_mat'), results.get('ca_mat'), results.get('freq'), results.get('freq_used'), results.get('fmean') \n SNR_mat, VAFf_mat, Qual_mat, CF_mat, VAF_mat = results.get('SNR_mat'), results.get('VAFf_mat'), results.get('Qual_mat'), results.get('CF_mat'), results.get('VAF_mat') \n stim, resp_mat, stim_re_mat, tk, K_mat = results.get('stim'), results.get('resp_mat'), results.get('stim_re_mat'), results.get('tk'), results.get('K_mat') \n \n \n self.barrier() # wait for other nodes\n \n \n if self.id == 0:\n \n if t_qual > 0:\n #print t_startstop[0], t_startstop[0]/self.dt, (t_startstop[0]+t_qual)/self.dt\n current_re = current[int(t_startstop[0]/self.dt):int((t_startstop[0]+t_qual)/self.dt)]\n current_re = current_re[int(len(K_mat[self.a_celltype[0]])):int(len(current_re))-int(len(K_mat[self.a_celltype[0]]))]\n \n if len(self.i_holdrs) > 0:\n ihold1 = self.i_holdrs[self.a_celltype[0]][0]\n else:\n ihold1 = []\n \n for l in range(len(self.method_interpol)): # unwrap \n pha_vec[l,:] = unwrap(pha_vec[l,:] * (pi / 180)) * (180 / pi) # unwrap for smooth phase\n \n # only return fraction of actual signal, it is too long!!! \n if time[-1] > self.tmax: \n imax = -1*int(self.tmax/self.dt)\n time = time[imax:]; current = current[imax:]; gsyn = gsyn[imax:]; gsyn_in = gsyn_in[imax:]\n for n in range(self.n_celltypes): \n voltage[n] = voltage[n][imax:]\n \n if freq_times != []: \n if freq_times[-1] > self.tmax:\n imax2 = where(freq_times > self.tmax)[0][0] # for spike frequency \n freq_times = freq_times[0:imax2]; spike_freq = spike_freq[0:imax2] \n \n bvec = [\"_syn\" in st for st in self.method_interpol]\n if np.any(bvec):\n # normalize synaptic integration with others \n mag_vec[1,:]= mag_vec[0,0]*mag_vec[1,:]/mag_vec[1,0] \n \n if self.id == 0: print \"start pickle\"\n \n results = {'freq_used':freq_used, 'amp':amp_vec,'mag':mag_vec,'pha':pha_vec,'ca':ca,'voltage':voltage,'tk':tk,'K_mat':K_mat, 'ihold1': ihold1, 't_startstop':t_startstop, #'stimulus':stimulus,\n 'current':current,'t1':time,'freq_times':freq_times,'spike_freq':spike_freq, 'stim':stim, 'stim_re_mat':stim_re_mat, 'resp_mat':resp_mat, 'current_re':current_re, 'gsyn_in':gsyn_in, 'fmeanA':fmeanA, 'fmaxA':fmaxA, 'fmstdA':fmstdA, 'fcvmA':fcvmA, 'fbaseA':fbaseA, 'fbase':fbase, 'fbstdA':fbstdA,\n 'fmean':fmean,'method_interpol':self.method_interpol, 'SNR':SNR_mat, 'VAF':VAFf_mat, 'Qual':Qual_mat, 'CF':CF_mat, 'VAFs':VAF_mat, 'fmax':fmax, 'fmstd':fmstd, 'fcvm':fcvm, 'inh_factor':inh_factor, 't_all_vec_vec':t_all_vec_vec, 'id_all_vec_vec':id_all_vec_vec} \n \n if self.id == 0:\n if self.dumpsave == 1:\n pickle.dump( results, gzip.GzipFile( filepath, \"wb\" ) )\n print \"pickle done\" \n \n \n if self.plot_train:\n \n for a in self.a_celltype:\n\n #i_start = mlab.find(t_all_vec_vec[a] >= 0)[0]\n #i_stop = mlab.find(t_all_vec_vec[a] >= 5)[0]\n \n #t_all_cut = t_all_vec_vec[a][i_start:i_stop]\n #id_all_cut = id_all_vec_vec[a][i_start:i_stop]\n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(use_spikes,use_id,'|', ms=2)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n \n plt.clf()\n \n if len(t_all_cut) > 0:\n \n tbin = 100*ms\n tb = np.arange(0,t[-1],tbin)\n [all_rate, _] = neuronpy.util.spiketrain.get_histogram(t_all_cut, bins = tb)\n all_rate = np.concatenate((np.zeros(1),all_rate)) / self.N[a] / tbin\n \n plt.figure('results_train2') \n plt.plot(tb,all_rate)\n plt.savefig(\"./figs/Pub/PSTH_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n plt.figure('results_noise') \n plt.plot(time,current)\n plt.savefig(\"./figs/Pub/Noise_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n \n if self.plot_input:\n \n if len(t_all_vec_input_sorted[0]) > 0:\n \n i_start = mlab.find(t_all_vec_input_sorted[0] >= 0)[0]\n i_stop = mlab.find(t_all_vec_input_sorted[0] >= 5)[0]\n \n t_all_cut = t_all_vec_input_sorted[0][i_start:i_stop]\n id_all_cut = id_all_vec_input_sorted[0][i_start:i_stop]\n \n plt.figure('results_input') \n ax99 = plt.subplot(1,1,1)\n ax99.plot(t_all_cut,id_all_cut,'|', ms=2)\n plt.text(0.5, 1.1, r'fmean=' + str(round(self.fmean_input,1)) + ',fmax=' + str(round(self.fmax_input,1)) + ',fmstd=' + str(round(self.fmstd_input,1)) + ',fcvm=' + str(round(self.fcvm_input,1)) + ',fstdm=' + str(round(self.fstdm_input,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Input_\" + str(self.pickle_prefix) + \"_N\" + str(self.N[self.a_celltype[0]]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.clf()\n \n\n else:\n \n if self.id == 0:\n results = pickle.load( gzip.GzipFile( filepath, \"rb\" ) )\n \n #print results\n #print {key:np.shape(value) for key,value in results.iteritems()}\n \n if self.minimal_dir: # save only info needed for plot\n \n print {key:np.shape(value) for key,value in results.iteritems()}\n \n if \"Fig6_pop_transfer_grc_syngr_nsyn4_cn_a1_noisesynlow_inhlow_adjfinh_varih_N100_CFo6.0_results_pop_cnoise.p\" in filename:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = [] \n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N100_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_is0.14_CFo9.0_results_pop_cnoise.p\" in filename) \\\n :\n\n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n \n elif (\"Fig8_pop_transfer_none_synno_cn_cutf30_a1_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_none_synno_cn_cutf30_a10_noisesynlow_ih20_varih_N50_twopop_CFo-1_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf30_a10_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_is0.14_twopop_CFo9.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N100_CFo14.0_results_pop_cnoise.p\" in filename) \\\n or (\"Fig8_pop_transfer_grc_syngr_nsyn4_cn_cutf5_a1_noisesynlow_inhlow_adjfinh_varih_varinhn_N50_twopop_CFo14.0_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig4_pop_transfer_grc_cn_addn100_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4_pop_transfer_grc_cn_addn1_N[100]_CF[40]_amod[1]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_twopop_N[50, 50]_CF[0.0055, 0.0055]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_cn_N[100]_CF[0.0055]_amod[None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_twopop_N[50, 50]_CF[0.0051, 0.0051]_amod[None, None]_results_pop_cnoise.p\" in filename) \\\n or (\"Fig4b_pop_transfer_grc_lowcf_slownoise_cn_N[100]_CF[0.0051]_amod[None]_results_pop_cnoise.p\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['voltage'] = [] \n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['t1'] = []\n results['gsyn_in'] = []\n results['freq_times'] = []\n results['spike_freq'] = []\n \n elif (\"Fig2_pop_transfer_\" in filename) \\\n :\n \n results['ca'] = [] \n results['resp_mat'] = []\n results['current'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n \n else:\n results['ca'] = [] \n results['resp_mat'] = []\n results['stim'] = []\n results['current'] = []\n results['tk'] = []\n results['K_mat'] = []\n results['t1'] = []\n results['voltage'] = [] \n results['freq_times'] = []\n results['spike_freq'] = []\n results['stim_re_mat'] = []\n results['current_re'] = []\n results['t_all_vec_vec'] = []\n results['id_all_vec_vec'] = []\n results['gsyn_in'] = []\n\n print {key:np.shape(value) for key,value in results.iteritems()}\n\n pickle.dump( results, gzip.GzipFile( self.minimal_dir + \"/\" + filename, \"wb\" ) ) \n \n else:\n results = {'freq_used':[], 'amp':[],'mag':[],'pha':[],'ca':[],'voltage':[], 'tk':[],'K_mat':[], 'ihold1':[], 't_startstop':[], #'stimulus':[],\n 'current':[],'t1':[],'freq_times':[],'spike_freq':[], 'stim':[], 'stim_re_mat':[], 'current_re':[], 'gsyn_in':[], 'fmeanA':[], 'fmaxA':[], 'fmstdA':[], 'fcvmA':[], 'fbaseA':[], 'fbase':[], 'fbstdA':[],\n 'fmean':[],'method_interpol':self.method_interpol, 'SNR':[], 'VAF':[], 'Qual':[], 'CF':[], 'VAFs':[], 'fmax':[], 'fmstd':[], 'fcvm':[], 'inh_factor':[], 't_all_vec_vec':[], 'id_all_vec_vec':[]} \n \n if self.id == 0: \n\n if self.plot_train: \n\n for a in self.a_celltype:\n \n t1 = results.get('t1') \n voltage = results.get('voltage') \n fmean = results.get('fmean') \n fmax = results.get('fmax') \n fmstd = results.get('fmstd') \n \n \n if results.has_key('t_all_vec_vec'):\n \n if len(results['t_all_vec_vec']) > 0: \n t_all_vec_vec = results.get('t_all_vec_vec') \n id_all_vec_vec = results.get('id_all_vec_vec') \n \n t_all_cut = t_all_vec_vec[a]\n id_all_cut = id_all_vec_vec[a]\n \n f_start_in = mlab.find(t_all_cut >= 0) \n f_stop_in = mlab.find(t_all_cut <= 10) \n \n f_start = f_start_in[0] \n f_stop = f_stop_in[-1]+1 \n use_spikes = t_all_cut[f_start:f_stop]\n use_id = id_all_cut[f_start:f_stop]\n \n plt.figure('results_train') \n ax97 = plt.subplot(1,1,1)\n ax97.plot(use_spikes,use_id,'|', ms=6)\n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax97.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Train_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n\n \n plt.figure('results_voltage') \n ax99 = plt.subplot(2,1,1)\n ax99.plot(t1,voltage[a])\n \n t_noise = arange(0, t_stim, self.dt)\n noise_data = create_colnoise(t_noise, sexp, cutf, 50, onf = onf)\n stimulus, t, t_startstop = construct_Stimulus(noise_data, 1/self.dt, amp=1, ihold = 0, tail_points = 0, delay_baseline = self.delay_baseline) \n ax98 = plt.subplot(2,1,2)\n ax98.plot(t[0:10/self.dt],stimulus[0:10/self.dt],color='k')\n \n plt.text(0.5, 1.1, r'CF=' + str(round(fmean,1)) + ',fmax=' + str(round(fmax,1)) + ',fmstd=' + str(round(fmstd,1)), transform=ax99.transAxes, fontsize=10, va='center', ha='center')\n plt.savefig(\"./figs/Pub/Voltage_\" + str(self.pickle_prefix) + \"_cell\" + str(a) + \"_N\" + str(self.N[a]) + \".pdf\", dpi = 300, transparent=True) # save it \n plt.show()\n plt.clf()\n \n if (self.id == 0) and (do_csd == 1):\n Qual = results.get('Qual') \n for i, ii in enumerate(self.method_interpol):\n print \"\\n[QUAL:] Interpol:\", ii, \"SNR0:\", Qual[i,0,0], \"SNR_cutff:\", Qual[i,0,1], \"SNR_mean:\", Qual[i,0,2], \"\\n VAF0:\", Qual[i,1,0], \"VAF_cutff:\", Qual[i,1,1], \"VAF_mean:\", Qual[i,1,2], \"\\n CF(subtracted):\", Qual[i,2,0], \"VAF(subtracted):\", Qual[i,2,1] \n \n VAF = results.get('VAF')\n freq_used = results.get('freq_used') \n iend = mlab.find(freq_used >= self.xmax)[0] \n print 'm(VAF)=' + str(np.mean(VAF[1][0,0:iend])) \n \n self.barrier() # wait for other nodes\n \n return results", "def add_noise_at_snr(channel_in, snr):\n\n rms_channel = np.sqrt(np.mean(channel_in ** 2.0))\n noise_std = rms_channel / np.sqrt(10.0 ** (snr/10.0))\n\n return channel_in + np.random.normal(size=channel_in.shape, scale=noise_std)", "def estimate_snr(img_ft, fxs, fys, fmax, filter_size=5):\n\n kernel = np.ones((filter_size, filter_size))\n kernel = kernel / np.sum(kernel)\n sig_real = scipy.ndimage.filters.convolve(img_ft.real, kernel)\n sig_imag = scipy.ndimage.filters.convolve(img_ft.imag, kernel)\n\n noise_power = get_noise_power(img_ft, fxs, fys, fmax)\n # have to subtract this to remove bias. This comes from fact pixel noise always correlated with itself\n sig_power = sig_real**2 + sig_imag**2 - noise_power / kernel.size\n # this is the expect standard deviation. Set anything less than 2 of these to zero...\n sd = noise_power / kernel.size\n sig_power[sig_power < 2*sd] = 1e-12\n\n # snr estimate\n snr = sig_power / noise_power\n\n return snr", "def _check_for_noise(self) -> None:\n safety_stop = 5\n while self._has_noise() and safety_stop > 0:\n self.filter(size=3)\n safety_stop -= 1", "def noise(self, stddev):\n #add noise to weights\n pass", "def sim_noise_data(det, rd_noise=[5,5,5,5], u_pink=[1,1,1,1], c_pink=3,\n acn=0, same_scan_direction=False, reverse_scan_direction=False,\n pow_spec_corr=None, corr_scales=None, fcorr_lim=[1,10],\n ref_ratio=0.8, **kwargs):\n \n from pynrc.reduce.calib import fit_corr_powspec, broken_pink_powspec\n import time\n\n nchan = det.nout\n nx = det.xpix\n ny = det.ypix\n chsize = det.chsize\n\n # Number of total frames up the ramp (including drops)\n ma = det.multiaccum\n nd1 = ma.nd1\n nd2 = ma.nd2\n nf = ma.nf\n ngroup = ma.ngroup\n nz = nd1 + ngroup*nf + (ngroup-1)*nd2\n\n nroh = det._line_overhead\n nfoh = det._extra_lines\n \n result = np.zeros([nz,ny,nx])\n \n # Make white read noise. This is the same for all pixels.\n if rd_noise is not None:\n # We want rd_noise to be an array or list\n if isinstance(rd_noise, (np.ndarray,list)):\n temp = np.asarray(rd_noise)\n if temp.size != nchan:\n _log.error('Number of elements in rd_noise not equal to n_out')\n return\n else: # Single value as opposed to an array or list\n rd_noise = np.ones(nchan) * rd_noise\n \n w = det.ref_info\n rr = ref_ratio #reference_pixel_noise_ratio \n \n if np.any(rd_noise):\n _log.info('Generating read noise...')\n\n # Go frame-by-frame\n for z in np.arange(nz):\n here = np.zeros((ny,nx))\n\n # First assume no ref pixels and just add in random noise\n for ch in np.arange(nchan):\n x1 = ch * chsize\n x2 = x1 + chsize\n here[:,x1:x2] = np.random.normal(scale=rd_noise[ch], size=(ny,chsize))\n\n # If there are reference pixels, overwrite with appropriate noise values\n # Noisy reference pixels for each side of detector\n rd_ref = rr * np.mean(rd_noise)\n if w[0] > 0: # lower\n here[:w[0],:] = np.random.normal(scale=rd_ref, size=(w[0],nx))\n if w[1] > 0: # upper\n here[-w[1]:,:] = np.random.normal(scale=rd_ref, size=(w[1],nx))\n if w[2] > 0: # left\n here[:,:w[2]] = np.random.normal(scale=rd_ref, size=(ny,w[2]))\n if w[3] > 0: # right\n here[:,-w[3]:] = np.random.normal(scale=rd_ref, size=(ny,w[3]))\n\n # Add the noise in to the result\n result[z,:,:] += here\n\n \n # Finish if no 1/f noise specified\n if (c_pink is None) and (u_pink is None) and (acn is None):\n return result\n \n #################################\n # 1/f noise\n \n ch_poh = chsize + nroh\n ny_poh = ny + nfoh\n\n # Compute the number of time steps per integration, per output\n nstep_frame = ch_poh * ny_poh\n nstep = nstep_frame * nz\n # Pad nsteps to a power of 2, which is much faster\n nstep2 = int(2**np.ceil(np.log2(nstep)))\n \n f2 = np.fft.rfftfreq(2*nstep2)\n f2[0] = f2[1] # First element should not be 0\n alpha = -1\n p_filter2 = np.sqrt(f2**alpha)\n p_filter2[0] = 0.\n \n # Add correlated pink noise.\n if (c_pink is not None) and (c_pink > 0):\n _log.info('Adding correlated pink noise...')\n\n if corr_scales is not None:\n scales = np.array(corr_scales)\n fcut1, fcut2 = np.array(fcorr_lim) / det._pixel_rate\n pf = broken_pink_powspec(f2, scales, fcut1=fcut1, fcut2=fcut2, alpha=alpha)\n pf[0] = 0\n elif pow_spec_corr is not None:\n n_ifft = 2 * (len(pow_spec_corr) - 1)\n freq_corr = np.fft.rfftfreq(n_ifft, d=1/det._pixel_rate)\n freq_corr[0] = freq_corr[1]\n # Fit power spectrum and remake for f2\n scales = fit_corr_powspec(freq_corr, pow_spec_corr, **kwargs)\n fcut1, fcut2 = np.array(fcorr_lim) / det._pixel_rate\n pf = broken_pink_powspec(f2, scales, fcut1=fcut1, fcut2=fcut2, alpha=alpha)\n pf[0] = 0\n else:\n pf = p_filter2\n\n tt = c_pink * pink_noise(nstep, pow_spec=pf)\n tt = tt.reshape([nz, ny_poh, ch_poh])[:,0:ny,0:chsize]\n _log.debug(' Corr Pink Noise (input, output): {:.2f}, {:.2f}'\n .format(c_pink, np.std(tt)))\n\n for ch in np.arange(nchan):\n x1 = ch*chsize\n x2 = x1 + chsize\n \n if same_scan_direction:\n flip = True if reverse_scan_direction else False\n elif np.mod(ch,2)==0:\n flip = True if reverse_scan_direction else False\n else:\n flip = False if reverse_scan_direction else True\n\n if flip: \n result[:,:,x1:x2] += tt[:,:,::-1]\n else:\n result[:,:,x1:x2] += tt\n del tt\n\n # Add uncorrelated pink noise. Because this pink noise is stationary and\n # different for each output, we don't need to flip it (but why not?)\n if u_pink is not None:\n # We want u_pink to be an array or list\n if isinstance(u_pink, (np.ndarray,list)):\n temp = np.asarray(u_pink)\n if temp.size != nchan:\n _log.error('Number of elements in u_pink not equal to n_out')\n return\n else: # Single value as opposed to an array or list\n u_pink = np.ones(nchan) * u_pink\n\n # Only do the rest if any values are not 0\n if np.any(u_pink):\n _log.info('Adding uncorrelated pink noise...')\n \n for ch in range(nchan):\n x1 = ch*chsize\n x2 = x1 + chsize\n\n tt = u_pink[ch] * pink_noise(nstep, pow_spec=p_filter2)\n tt = tt.reshape([nz, ny_poh, ch_poh])[:,0:ny,0:chsize]\n _log.debug(' Ch{} Pink Noise (input, output): {:.2f}, {:.2f}'\n .format(ch, u_pink[ch], np.std(tt)))\n\n if same_scan_direction:\n flip = True if reverse_scan_direction else False\n elif np.mod(ch,2)==0:\n flip = True if reverse_scan_direction else False\n else:\n flip = False if reverse_scan_direction else True\n\n if flip: \n result[:,:,x1:x2] += tt[:,:,::-1]\n else:\n result[:,:,x1:x2] += tt\n\n del tt\n\n # Add ACN\n if (acn is not None) and (acn>0):\n _log.info('Adding ACN noise...')\n\n facn = np.fft.rfftfreq(nstep2)\n facn[0] = facn[1] # First element should not be 0\n alpha = -2\n pf_acn = np.sqrt(facn**alpha)\n pf_acn[0] = 0.\n\n for ch in np.arange(nchan):\n x1 = ch*chsize\n x2 = x1 + chsize\n\n # Generate new pink noise for each even and odd vector.\n a = acn * pink_noise(int(nstep/2), pow_spec=pf_acn)\n b = acn * pink_noise(int(nstep/2), pow_spec=pf_acn)\n _log.debug(' Ch{} ACN Noise (input, [outa, outb]): {:.2f}, [{:.2f}, {:.2f}]'\n .format(ch, acn, np.std(a), np.std(b)))\n\n # Reformat into an image.\n tt = np.reshape(np.transpose(np.vstack((a, b))),\n (nz, ny_poh, ch_poh))[:, 0:ny, 0:chsize]\n\n if same_scan_direction:\n flip = True if reverse_scan_direction else False\n elif np.mod(ch,2)==0:\n flip = True if reverse_scan_direction else False\n else:\n flip = False if reverse_scan_direction else True\n\n if flip: \n result[:,:,x1:x2] += tt[:,:,::-1]\n else:\n result[:,:,x1:x2] += tt\n\n del tt\n\n return result", "def sample(self,noise,s):\n rhs=self.prior.sqrtM*noise\n self.prior.Msolver.solve(s,rhs)", "def get_estimated_noise(self):\n return self.gp_core.noise_var", "def estimate_noiseperbl(data):\n\n # define noise per baseline for data seen by detect_bispectra or image\n datamean = data.mean(axis=2).imag # use imaginary part to estimate noise without calibrated, on-axis signal\n noiseperbl = datamean.std() # measure single noise for input to detect_bispectra\n logger.debug('Measured noise per baseline of {0:.3f}'.format(noiseperbl))\n return noiseperbl", "def test_noise(self, lang):\n\n lang_id = self.params.lang2id[lang]\n sent1, len1 = self.get_batch('encdec', lang, None)\n sent1 = sent1.transpose_(0, 1)\n print(sent1.shape)\n print(\"sent1 before noise is \")\n print(sent1)\n print(\"len1 before noise is \")\n print(len1)\n\n sent1, len1 = self.add_noise(sent1, len1, lang_id)\n\n print('sent1 after noise for ' + lang + ' is')\n print(sent1)\n print('len1 for ' + lang + \" is \")\n print(len1)", "def SCAnoise(det=None, scaid=None, params=None, caldir=None, file_out=None, \n dark=True, bias=True, out_ADU=False, verbose=False, use_fftw=False, ncores=None,\n **kwargs):\n\n # Extensive testing on both Python 2 & 3 shows that 4 cores is optimal for FFTW\n # Beyond four cores, the speed improvement is small. Those other processors are\n # are better used elsewhere.\n if use_fftw and (ncores is None): ncores = 4\n\n if det is None:\n wind_mode = params.pop('wind_mode', 'FULL')\n xpix = params.pop('xpix', 2048)\n ypix = params.pop('ypix', 2048)\n x0 = params.pop('x0', 0)\n y0 = params.pop('y0', 0)\n det = DetectorOps(scaid, wind_mode, xpix, ypix, x0, y0, params)\n else:\n scaid = det.scaid\n\n\n # Line and frame overheads\n nroh = det._line_overhead\n nfoh = det._extra_lines\n nfoh_pix = det._frame_overhead_pix\n\n # How many total frames (incl. dropped and all) per ramp?\n # Exclude last set of nd2 and nd3 (drops that add nothing)\n ma = det.multiaccum\n naxis3 = ma.nd1 + ma.ngroup*ma.nf + (ma.ngroup-1)*ma.nd2\n\n # Set bias and dark files\n sca_str = np.str(scaid)\n if caldir is None:\n caldir = conf.PYNRC_PATH + 'sca_images/'\n bias_file = caldir + 'SUPER_BIAS_'+sca_str+'.FITS' if bias else None\n dark_file = caldir + 'SUPER_DARK_'+sca_str+'.FITS' if dark else None\n\n # Instantiate a noise generator object\n ng_h2rg = ng.HXRGNoise(naxis1=det.xpix, naxis2=det.ypix, naxis3=naxis3, \n n_out=det.nout, nroh=nroh, nfoh=nfoh, nfoh_pix=nfoh_pix,\n dark_file=dark_file, bias_file=bias_file,\n wind_mode=det.wind_mode, x0=det.x0, y0=det.y0,\n use_fftw=use_fftw, ncores=ncores, verbose=verbose)\n \n\n # Lists of each SCA and their corresponding noise info\n sca_arr = range(481,491)\n\n # These come from measured dark ramps acquired during ISIM CV3 at GSFC\n # Gain values (e/ADU). Everything else will be in measured ADU units\n gn_arr = [2.07, 2.01, 2.16, 2.01, 1.83, \n 2.00, 2.42, 1.93, 2.30, 1.85]\n\n # Noise Values (ADU)\n ktc_arr = [18.5, 15.9, 15.2, 16.9, 20.0, \n 19.2, 16.1, 19.1, 19.0, 20.0]\n ron_arr = [[4.8,4.9,5.0,5.3], [4.4,4.4,4.4,4.2], [4.8,4.0,4.1,4.0], [4.5,4.3,4.4,4.4],\n [4.2,4.0,4.5,5.4],\n [5.1,5.1,5.0,5.1], [4.6,4.3,4.5,4.2], [5.1,5.6,4.6,4.9], [4.4,4.5,4.3,4.0],\n [4.5,4.3,4.6,4.8]]\n # Pink Noise Values (ADU)\n cp_arr = [ 2.0, 2.5, 1.9, 2.5, 2.1,\n 2.5, 2.5, 3.2, 3.0, 2.5]\n up_arr = [[0.9,0.9,0.9,0.9], [0.9,1.0,0.9,1.0], [0.8,0.9,0.8,0.8], [0.8,0.9,0.9,0.8],\n [1.0,1.3,1.0,1.1],\n [1.0,0.9,1.0,1.0], [0.9,0.9,1.1,1.0], [1.0,1.0,1.0,0.9], [1.1,1.1,0.8,0.9],\n [1.1,1.1,1.0,1.0]]\n \n \n # Offset Values (ADU)\n bias_avg_arr = [5900, 5400, 6400, 6150, 11650, \n 7300, 7500, 6700, 7500, 11500]\n bias_sig_arr = [20.0, 20.0, 30.0, 11.0, 50.0, \n 20.0, 20.0, 20.0, 20.0, 20.0]\n ch_off_arr = [[1700, 530, -375, -2370], [-150, 570, -500, 350], [-530, 315, 460, -200],\n [480, 775, 1040, -2280], [560, 100, -440, -330],\n [105, -29, 550, -735], [315, 425, -110, -590], [918, -270, 400, -1240],\n [-100, 500, 300, -950], [-35, -160, 125, -175]]\n f2f_corr_arr = [14.0, 13.8, 27.0, 14.0, 26.0,\n 14.7, 11.5, 18.4, 14.9, 14.8]\n f2f_ucorr_arr= [[18.4,11.1,10.8,9.5], [7.0,7.3,7.3,7.1], [6.9,7.3,7.3,7.5],\n [6.9,7.3,6.5,6.7], [16.6,14.8,13.5,14.2],\n [7.2,7.5,6.9,7.0], [7.2,7.6,7.5,7.4], [7.9,6.8,6.9,7.0],\n [7.6,8.6,7.5,7.4], [13.3,14.3,14.1,15.1]]\n aco_a_arr = [[770, 440, 890, 140], [800, 410, 840, 800], [210,680,730,885],\n [595, 642, 634, 745], [-95,660,575,410],\n [220, 600, 680, 665], [930,1112, 613, 150], [395, 340, 820, 304],\n [112, 958, 690, 907], [495, 313, 392, 855]]\n ref_inst_arr = [1.0, 1.5, 1.0, 1.3, 1.0, \n 1.0, 1.0, 1.0, 2.2, 1.0]\n\n\n # SCA Index\n ind = sca_arr.index(scaid)\n\n # Convert everything to e-\n gn = gn_arr[ind]\n # Noise Values\n ktc_noise= gn * ktc_arr[ind] * 1.15 # kTC noise in electrons\n rd_noise = gn * np.array(ron_arr[ind]) * 0.93 # White read noise per integration\n # Pink Noise\n c_pink = gn * cp_arr[ind] * 1.6 # Correlated pink noise\n u_pink = gn * np.array(up_arr[ind]) * 1.4 # Uncorrelated\n ref_rat = 0.9 # Ratio of reference pixel noise to that of reg pixels\n\n # Offset Values\n bias_off_avg = gn * bias_avg_arr[ind] + 110 # On average, integrations start here in electrons\n bias_off_sig = gn * bias_sig_arr[ind] # bias_off_avg has some variation. This is its std dev.\n bias_amp = gn * 1.0 # A multiplicative factor to multiply bias_image. 1.0 for NIRCam.\n\n # Offset of each channel relative to bias_off_avg.\n ch_off = gn * np.array(ch_off_arr[ind]) + 110\n # Random frame-to-frame reference offsets due to PA reset\n ref_f2f_corr = gn * f2f_corr_arr[ind] * 0.95\n ref_f2f_ucorr = gn * np.array(f2f_ucorr_arr[ind]) * 1.15 # per-amp\n # Relative offsets of altnernating columns\n aco_a = gn * np.array(aco_a_arr[ind])\n aco_b = -1 * aco_a\n #Reference Instability\n ref_inst = gn * ref_inst_arr[ind]\n\n # If only one output (window mode) then select first elements of each array\n if det.nout == 1:\n rd_noise = rd_noise[0]\n u_pink = u_pink[0]\n ch_off = ch_off[0]\n ref_f2f_ucorr = ref_f2f_ucorr[0]\n aco_a = aco_a[0]; aco_b = aco_b[0]\n\n # Run noise generator\n hdu = ng_h2rg.mknoise(None, gain=gn, rd_noise=rd_noise, c_pink=c_pink, u_pink=u_pink, \n reference_pixel_noise_ratio=ref_rat, ktc_noise=ktc_noise,\n bias_off_avg=bias_off_avg, bias_off_sig=bias_off_sig, bias_amp=bias_amp,\n ch_off=ch_off, ref_f2f_corr=ref_f2f_corr, ref_f2f_ucorr=ref_f2f_ucorr, \n aco_a=aco_a, aco_b=aco_b, ref_inst=ref_inst, out_ADU=out_ADU)\n\n hdu.header = nrc_header(det)#, header=hdu.header)\n hdu.header['UNITS'] = 'ADU' if out_ADU else 'e-'\n\n # Write the result to a FITS file\n if file_out is not None:\n now = datetime.datetime.now().isoformat()[:-7]\n hdu.header['DATE'] = now #datetime.datetime.now().isoformat()[:-7]\n if file_out.lower()[-5:] == '.fits':\n file_out = file_out[:-5]\n if file_out[-1:] == '_':\n file_out = file_out[:-1]\n\n# \t\tfile_now = now\n# \t\tfile_now = file_now.replace(':', 'h', 1)\n# \t\tfile_now = file_now.replace(':', 'm', 1)\n# \t\tfile_out = file_out + '_' + file_now + '.fits'\n file_out = file_out + '.fits'\n\n hdu.header['FILENAME'] = os.path.split(file_out)[1]\n hdu.writeto(file_out, clobber='True')\n\n return hdu", "def calculate_ramp_noise(inps):\n observed_file = inps.Obs_file[0]\n mask_file = inps.mask[0]\n ramp_type = inps.surface_type\n ramp_file = inps.ramp_file[0]\n ramp_noise_file = inps.output[0]\n model_file = inps.Model_file[0]\n\n scp_args = [observed_file, model_file, '-o', ramp_noise_file, '--ramp', '-s', ramp_type, '-m', mask_file, '--ramp_file', ramp_file, '--outdir', inps.outdir[0]]\n scp_args = mu.seperate_str_byspace(scp_args)\n\n print('subtract_h5.py', scp_args)\n print('ramp_noise file is: %s' % ramp_noise_file)\n print('estimated ramp file is: %s' % ramp_file)\n mimtpy.subtract_h5.main(scp_args.split())\n\n return ramp_file, ramp_noise_file", "def get_noise_thresholds(size_of_class=45, fakes='./data/CASIA1_fakes', originals='./data/CASIA1_originals', \n fakes_ela='./data/CASIA1_fakes_ela'):\n fakes_list = os.listdir(fakes)\n\n fakes = load_fakes(fakes_list, fakes, originals)\n\n noises = []\n for i, item in enumerate(fakes):\n image = cv2.imread(os.path.join(fakes_ela, item.path.split('\\\\')[-1]))\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n \n image = cv2.inRange(image, np.array([0,0,0]), np.array([180,255,60]))\n image = cv2.bitwise_not(image)\n noises.append(estimate_noise(image))\n\n fakes = np.array(fakes)\n noises = np.array(noises)\n idxs = noises.argsort()\n sorted_by_noise = fakes[idxs]\n\n for i, item in enumerate(sorted(noises)):\n if (i+1) % size_of_class == 0:\n print(\"####\", i+1, item)\n else:\n print(i+1, item)", "def noiseon(delay=2.0, reference=False, subarray=DEFAULT) :\n multiSubarray('noiseSource', subarray, True, reference)\n multiSubarray('rfPower', subarray, False)\n sleep(delay) # Temporary - to allow for delay in correlator", "def sky_noise_weighting(file_name, sky_file_name):\n cs_data = spectra_analysis(file_name, sky_file_name)\n cube_data = cs_data['gd_shifted']\n sn_data = cs_data['sky_noise']\n wl_soln = wavelength_solution(file_name)\n\n sn_data_min = np.min(sn_data)\n in_wt = 1 / (sn_data - sn_data_min + 1)\n\n sky_regns = np.zeros((len(in_wt),2)) # storing regions of potential sky noise\n for i in range(len(in_wt)): \n data_acl = cube_data[i]\n data_sky = sn_data[i]\n data_prb = in_wt[i]\n \n if ( 0.00 <= np.abs(data_prb) <= 1.00 ):\n sky_regns[i][0] = data_prb\n sky_regns[i][1] = data_sky\n\n # finding max peak in the sky-noise data and fitting a Gaussian to that\n # x-axis data\n x_range = np.linspace(wl_soln['begin'], wl_soln['end'], wl_soln['steps'])\n\n # Finding peaks with PeakUtils\n sky_peaks = peakutils.indexes(sn_data, thres=300, thres_abs=True)\n sky_peaks_x = peakutils.interpolate(x_range, sn_data, sky_peaks)\n\n if (sky_peaks_x.size != 0):\n sky_peak = sky_peaks_x[0]\n sky_peak_index = find_nearest(sky_peak, x_range)\n else:\n sky_peak = 6000\n sky_peak_index = 0\n\n sky_peak_loc = x_range[sky_peak_index]\n\n sky_peak_range = [sky_peak-100, sky_peak+100]\n sky_peak_range_loc = [find_nearest(x_range, x) for x in sky_peak_range]\n\n sky_rng_x = x_range[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n sky_rng_y = sn_data[sky_peak_range_loc[0]:sky_peak_range_loc[1]]\n\n sky_gauss_params = Parameters()\n sky_gauss_params.add('c', value=0)\n sky_gauss_params.add('i1', value=np.max(sky_rng_y), min=0.0)\n sky_gauss_params.add('mu', value=sky_peak_loc)\n sky_gauss_params.add('sigma1', value=3)\n\n sky_gauss_model = Model(sn_gauss)\n sky_gauss_rslt = sky_gauss_model.fit(sky_rng_y, x=sky_rng_x, \n params=sky_gauss_params)\n sky_gauss_best = sky_gauss_rslt.best_values\n\n sky_sigma = sky_gauss_best['sigma1']\n\n return {'inverse_sky': in_wt, 'sky_regions': sky_regns, 'sky_sigma': sky_sigma}", "def flatNoiseCGH():\n #Get data\n wdir = '/home/rallured/Dropbox/AXRO/Metrology/NoiseStudy/FlatMeasurements/'\n d1,dx1 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas1.fits')\n d2,dx2 = met.read4DFits(wdir+'161205_RefFlat_Avg8_Meas2.fits')\n p1,px1 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas1.fits')\n p2,px2 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestPitch_Meas2.fits')\n p3,px3 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas1.fits')\n p4,px4 = met.read4DFits(wdir+'161205_RefFlat_ParrotingTestRoll_Meas2.fits')\n\n #Construct baseline power spectra\n f1,pow1 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1)\n f2,pow2 = fourier.meanPSD(d1-d2,win=np.hanning,dx=dx1,axis=1)\n \n #Construct parroted power spectra\n f3,pow3 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx1)\n f4,pow4 = fourier.meanPSD(p1-p2,win=np.hanning,dx=dx2,axis=1)\n f5,pow5 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx1)\n f6,pow6 = fourier.meanPSD(p3-p4,win=np.hanning,dx=dx2,axis=1)\n\n #Plot\n plt.loglog(f1,pow1/f1[0],label='Axial Baseline')\n plt.loglog(f2,pow2/f2[0],label='Azimuthal Baseline')\n plt.loglog(f3,pow3/f3[0],label='Pitch Axial')\n plt.loglog(f4,pow4/f4[0],label='Pitch Azimuthal')\n plt.loglog(f5,pow5/f5[0],label='Roll Axial')\n plt.loglog(f6,pow6/f6[0],label='Roll Azimuthal')\n plt.title('Residual Fringe Repeatability Impact')\n plt.xlabel('Frequency (1/mm)')\n plt.ylabel('Power ($\\mu$m$^2$ mm)')\n plt.grid()\n plt.legend(loc='lower left')\n\n return f1,pow1", "def noise(self, xs, ys):\n raise NotImplementedError", "def timing_recover(algo,sig,lam=20,h=8,tries=3):\n\n\t#temporary save\n\tS=0\n\tfor i in range(tries):\n\t\t(time,Sig)=denoise(algo,sig,lam,h);\n\t\tS+=time;\n\treturn S/tries", "def _calculate_snr_spread(self):\n\n dmSpacing, percentage = 100, 0\n while percentage < 0.5: \n x = np.linspace(self.centerDm - dmSpacing, self.centerDm + dmSpacing, 500)\n y = np.array([self.effective_snr(self.effective_width(self.pulseWidth, self.centerDm - dm_val, self.bandwidth, self.freq), self.pulseWidth * 20) for dm_val in x])\n y = (y / (np.max(y) * 1.0)) if np.max(y) > 0 else y\n percentage = np.size(np.where(y > 0)) / 1000.0\n dmSpacing = dmSpacing*0.6\n \n return x, y", "def noise_generator(self, power=None, SNR=None, size=None):\r\n alpha = self.db2power(SNR)\r\n sigma = np.sqrt(power / alpha) # 计算噪声标准差\r\n # 产生噪声\r\n noise_data = np.sqrt(0.5) * (np.random.normal(0, sigma, size=size) + np.random.normal(0, sigma, size=size) * 1j)\r\n noise_data = noise_data.astype(np.complex64)\r\n return noise_data", "def treat(self):\r\n if self.noiseS > 0:\r\n self.evaluations = min((self.evaluations * self.alphaevals, self.maxevals))\r\n return self.alphasigma\r\n else:\r\n self.evaluations = max((self.evaluations * self.alphaevalsdown, self.minevals))\r\n return 1.0", "def cal_sdri(src_ref, src_est, mix):\r\n src_anchor = np.stack([mix, mix], axis=0)\r\n sdr, sir, sar, popt = bss_eval_sources(src_ref, src_est)\r\n sdr0, sir0, sar0, popt0 = bss_eval_sources(src_ref, src_anchor)\r\n avg_sdri = ((sdr[0]-sdr0[0]) + (sdr[1]-sdr0[1])) / 2\r\n return avg_sdri", "def get_snr(image_data, b_var, hlr):\n img = galsim.Image(image_data)\n try:\n new_params = galsim.hsm.HSMParams(max_amoment=5.0e15,\n max_mom2_iter=20000,\n convergence_threshold=1.e-5)\n res = galsim.hsm.FindAdaptiveMom(img, hsmparams=new_params,\n guess_sig=hlr * 2.5)\n aperture_noise = float(np.sqrt(b_var * 2. * np.pi * (res.moments_sigma**2)))\n sn_ellip_gauss = res.moments_amp / aperture_noise\n print 'RES', res.moments_amp, res.moments_sigma\n print 'SNR', sn_ellip_gauss\n except:\n print 'SNR manually set'\n sn_ellip_gauss = -10.\n print 'SNR', sn_ellip_gauss\n return sn_ellip_gauss", "def SNR(X, k, n):\n signal, NSA = X # signal = mean signal intensity / SD\n return k * signal * (NSA ** n)", "def looptcs(self): \n while self.itr < 1: \n #self.genRandomNoise() #developing\n self.tcs(lpf=1)\n #self.itr +=1 ", "def compute_snr(origianl_waveform, target_waveform):\n return 10. * np.log10(np.sqrt(np.sum(origianl_waveform**2)) / np.sqrt(\n np.sum((origianl_waveform - target_waveform)**2)))", "def ComputeSNR(self):\n for epi in self.entry_map['epi']:\n epifile = self.info[epi]['imgfile_final'] + self.info[epi]['suffix']\n prefix = self.info[epi]['imgfile_final'] + '_snr'\n if not os.path.exists('%s_snr.png' % prefix):\n if self.verbose:\n print 'TemporalSnr(epifile=%s, prefix=%s)' % \\\n (epifile, prefix)\n try:\n TemporalSnr(epifile=epifile, prefix=prefix)()\n except:\n print(\"Error computing temporal SNR\")", "def get_snr(self, smoo=0, skips=50, plt_PS=False, plt_SNR=False):\n\n # print np.abs(self.ds.freq - 100.) # the difference between all the freqs and d\n # print self.set_width(100., factor=1) # the env width at d\n # print self.ds.power[np.abs(self.ds.freq - 100.) < self.set_width(100., factor=1)] # all the power values inside of the envelope around d\n # print np.median(self.ds.power[np.abs(self.ds.freq - 100.) < self.set_width(100., factor=1)]) # the median power in the envelope around d\n med = [np.median(self.ds.power[np.abs(self.ds.freq - d) < self.set_width(d, factor=1)]) for d in self.ds.freq[::skips]]\n\n # interpolate between skipped freqs in self.ds.freqs using the moving median\n f = interpolate.interp1d(self.ds.freq[::skips], med, bounds_error=False)\n self.bkg = f(self.ds.freq)\n self.snr = self.ds.power / self.bkg\n self.snr[:skips] = 1.0\n self.snr[-skips:] = 1.0\n if smoo > 1:\n self.snr = nd.filters.uniform_filter1d(self.snr, int(smoo[0]))\n\n if plt_PS: self.plot_ps()\n if plt_SNR: self.plot_snr()", "def calculate_snr_from_reflectivity(\n radar, refl_field=None, snr_field=None, toa=25000.):\n if refl_field is None:\n refl_field = get_field_name('reflectivity')\n if snr_field is None:\n snr_field = get_field_name('signal_to_noise_ratio')\n\n range_grid = np.meshgrid(radar.range['data'],\n np.ma.ones(radar.time['data'].shape))[0] + 1.0\n\n # remove range scale.. This is basically the radar constant scaled dBm\n pseudo_power = (radar.fields[refl_field]['data'] -\n 20.0*np.log10(range_grid / 1000.0))\n\n # Noise floor estimate\n # 25km.. should be no scatterers, not even planes, this high\n # we could get undone by AP though.. also sun\n rg, azg = np.meshgrid(radar.range['data'], radar.azimuth['data'])\n rg, eleg = np.meshgrid(radar.range['data'], radar.elevation['data'])\n x, y, z = antenna_to_cartesian(rg / 1000.0, azg, eleg) # XXX: need to fix\n\n points_above = np.where(z > toa)\n noise_floor_estimate = pseudo_power[points_above].mean()\n\n snr_dict = get_metadata(snr_field)\n snr_dict['data'] = pseudo_power - noise_floor_estimate\n return snr_dict", "def denoise(self):\n\n #make sure the data has a len dividible by 2^2\n self.len_swt = self.len\n while not (self.len_swt/4).is_integer():\n self.len_swt -= 1\n\n inp = self.input_nobase[:self.len_swt]\n self.wave = pywt.Wavelet(self.wave_type)\n nLevel = pywt.swt_max_level(self.len_swt)\n self.coeffs = pywt.swt(inp,self.wave,level=2)\n\n print(\" \\t Denoise STW coefficients \\t %1.2f %1.2f\" %(self.TK,self.TT))\n (cA2, cD2), (cA1, cD1) = self.coeffs\n\n # rolling kurtosis\n k2 = self._rolling_kts(cD2,self.nwin)\n k1 = self._rolling_kts(cD1,self.nwin)\n\n # thresholding\n cD2[k2<self.TK] = 0\n cD1[k1<self.TK] = 0\n\n cA2[k2<self.TK] = 0\n cA1[k1<self.TK] = 0\n\n # universal threshold\n sigma_roll_1 = mad(cD1[cD1!=0])*np.ones(self.len_swt)\n uthresh_roll_1 = self.TT * sigma_roll_1 * np.sqrt(2*np.log(self.len_swt))\n cD1[abs(cD1)<uthresh_roll_1] = 0\n\n # universal threshold\n sigma_roll_2 = mad(cD2[cD2!=0])*np.ones(self.len_swt)\n uthresh_roll_2 = self.TT * sigma_roll_2 * np.sqrt(2*np.log(self.len_swt))\n cD2[abs(cD2)<uthresh_roll_2] = 0\n\n # final threshold\n cA1[cD1 == 0] = 0\n cA2[cD2 == 0] = 0\n self.denoised_coeffs = [(cA1,cD1),(cA2,cD2)]\n\n # denoise the data\n #self.input_denoised = self._iswt(self.denoised_coeffs,self.wave)\n self.input_denoised = pywt.iswt(self.denoised_coeffs,self.wave)", "def synthesize(base, overlay, snr):\n assert -5 <= snr < 50\n noise_pre_scale = 1 - snr / 50\n if snr > 0:\n overlay = overlay * noise_pre_scale\n\n len_speech = base.shape[0]\n len_noise = overlay.shape[0]\n assert len_noise > len_speech\n\n start_point_noise = random.randint(0, len_noise - len_speech)\n overlay = overlay[start_point_noise: start_point_noise + len_speech]\n\n rms_overlay = rms(overlay)\n rms_base = rms(base)\n\n db_overlay = 20 * np.log10(rms_overlay + 1e-8)\n db_base = 20 * np.log10(rms_base)\n\n snr_origin = db_base - db_overlay\n db_adjust = snr - snr_origin\n scale_adjust = np.power(10, db_adjust / 20)\n\n output = overlay + base * scale_adjust\n return output", "def _sa():\n The.math.seed = 200\n The.sa.verbose=True\n The.optimize.era=50\n The.optimize.epsilon=0\n The.optimize.repeats=1\n The.optimize.format = \"%4.0f\"\n The.optimize.width = 25\n sa(Schaffer)", "def _get_noise(self, shape, dtype=None):", "def add_noise_m(self, data):\n return self.range_to_m(self.add_noise(self.m_to_range(data)))", "def white_noise(template, rms_uKarcmin_T, rms_uKarcmin_pol=None):\n\n noise = template.copy()\n rad_to_arcmin = 60 * 180 / np.pi\n if noise.pixel == \"HEALPIX\":\n nside = noise.nside\n pixArea = hp.pixelfunc.nside2pixarea(nside) * rad_to_arcmin ** 2\n if noise.pixel == \"CAR\":\n pixArea = noise.data.pixsizemap() * rad_to_arcmin ** 2\n if noise.ncomp == 1:\n if noise.pixel == \"HEALPIX\":\n size = len(noise.data)\n noise.data = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)\n if noise.pixel == \"CAR\":\n size = noise.data.shape\n noise.data = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)\n if noise.ncomp == 3:\n if rms_uKarcmin_pol is None:\n rms_uKarcmin_pol = rms_uKarcmin_T * np.sqrt(2)\n if noise.pixel == \"HEALPIX\":\n size = len(noise.data[0])\n noise.data[0] = np.random.randn(size) * rms_uKarcmin_T / np.sqrt(pixArea)\n noise.data[1] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)\n noise.data[2] = np.random.randn(size) * rms_uKarcmin_pol / np.sqrt(pixArea)\n if noise.pixel == \"CAR\":\n size = noise.data[0].shape\n noise.data[0] = np.random.randn(size[0], size[1]) * rms_uKarcmin_T / np.sqrt(pixArea)\n noise.data[1] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)\n noise.data[2] = np.random.randn(size[0], size[1]) * rms_uKarcmin_pol / np.sqrt(pixArea)\n\n return noise", "def silence_intervals(file_path,file_name):\r\n nsil_start_time=[]\r\n nsil_end_time=[]\r\n sil_start_time=[]\r\n sil_end_time=[]\r\n #read file \r\n audio, sample_rate = librosa.load(os.path.join(file_path,file_name))\r\n \r\n #silence extraction using librosa\r\n nsil_intv=librosa.effects.split(audio, top_db=30).astype('float32') / sample_rate\r\n \r\n #silence extraction using pyAudioanalysis\r\n # [Fs, x] = aIO.readAudioFile(os.path.join(file_path,file_name))\r\n # nsil_intv = np.array(aS.silenceRemoval(x, Fs, 0.020, 0.020, smoothWindow = 0.7, Weight = 0.3, plot = False))\r\n # print \"non-sil segments=\"+str(nsil_intv)\r\n\r\n #silence detection using webrtcvad (voice activity detection)\r\n #nsil_intv=np.array(vad_webrtcvad(file_path,file_name))\r\n\r\n\r\n dur=librosa.get_duration(y=audio, sr=sample_rate)\r\n print nsil_intv\r\n print dur\r\n print sample_rate\r\n curr_sil_start=0.0\r\n curr_sil_end=0.0\r\n for i in range(nsil_intv.shape[0]):\r\n nsil_start_time.append(nsil_intv[i][0])\r\n #sil_start_time=list(np.array(sil_start_time)/sample_rate)\r\n\r\n nsil_end_time.append(nsil_intv[i][1])\r\n #sil_end_time=list(np.array(sil_end_time)/sample_rate)\r\n\r\n for i in range(len(nsil_start_time)):\r\n curr_sil_end=nsil_start_time[i]\r\n sil_start_time.append(str(curr_sil_start))\r\n sil_end_time.append(str(curr_sil_end))\r\n curr_sil_start=nsil_end_time[i]\r\n\r\n print sil_start_time\r\n print sil_end_time\r\n return sil_start_time,sil_end_time", "def MC(a_in, b, c, S_0, I_0, R_0, N, T, vitality=False, seasonal=False, vaccine=False):\n\n if seasonal:\n\n a0 = a_in #average transmission rate\n A = 4 #max.deviation from a0\n omega = 0.5 #frequency of oscillation\n a = A*np.cos(omega*0) + a0\n else:\n a = a_in\n\n # Size of time step\n dt = np.min([4/(a*N), 1/(b*N), 1/(c*N)])\n\n # Nr of time steps\n N_time = int(T/dt)\n\n # Set up empty arrys\n S = np.zeros(N_time)\n I = np.zeros_like(S)\n R = np.zeros_like(S)\n t = np.zeros_like(S)\n\n #initalize arrays\n S[0] = S_0\n I[0] = I_0\n R[0] = R_0\n t[0] = 0\n\n # time loop\n for i in range(N_time - 1):\n\n if seasonal:\n a0 = a_in\n A = 4\n omega = 0.5\n a = A*np.cos(omega*t[i]) + a0\n else:\n a = a_in\n\n S[i+1] = S[i]\n I[i+1] = I[i]\n R[i+1] = R[i]\n\n rdm = np.random.random() #random number SIRS-transitions\n\n # S to I\n r_SI = rdm #np.random.random()\n if r_SI < (a*S[i]*I[i]*dt/N):\n S[i+1] -= 1\n I[i+1] += 1\n\n # I to R\n r_IR = rdm #np.random.random()\n if r_IR < (b*I[i]*dt):\n I[i+1] -= 1\n R[i+1] += 1\n\n # R to S\n r_RS = rdm #np.random.random()\n if r_RS < (c*R[i]*dt):\n R[i+1] -= 1\n S[i+1] += 1\n\n if vitality:\n\n rdm1 = np.random.random() #random number vital dynamics\n\n #death rate d in general population S, I and R\n r_dS = rdm1 #np.random.random()\n if r_dS < (d*S[i]*dt): #d*S*dt:probability of 1 individ. dying in S category\n S[i+1] -= 1\n\n #r_dI = rdm #np.random.random()\n r_dI = rdm1 #np.random.random()\n if r_dS < (d*I[i]*dt):\n I[i+1] -= 1\n\n #r_dR = rdm #np.random.random()\n r_dR = rdm1 #np.random.random()\n if r_dR < (d*R[i]*dt):\n R[i+1] -= 1\n\n #death rate dI for infected population I\n r_dII = rdm1 #np.random.random()\n if r_dII < (dI*I[i]*dt):\n I[i+1] -= 1\n\n #birth rate e for general population S, I and R\n r_eS = rdm1 #np.random.random()\n if r_eS < (e*S[i]*dt): #e*S*dt:probability of 1 individ. born in S category\n S[i+1] += 1\n\n r_eI = rdm1 #np.random.random()\n if r_eS < (e*I[i]*dt):\n I[i+1] += 1\n\n r_eR = rdm1 #np.random.random()\n if r_eR < (e*R[i]*dt):\n R[i+1] += 1\n\n if vaccine:\n tv = T/2\n if t[i] >= tv:\n r_v = rdm #np.random.random()\n if r_v < (f*S[i]*dt): #f*S*dt:probability of 1 individ. in S getting a vaccine\n S[i+1] -= 1\n R[i+1] += 1\n\n t[i+1] = t[i] + dt\n\n return S, I, R, t, f", "def detect_bispectra(self, sigma=5., tol=1.3, Q=0, show=0, save=0):\n\n try:\n ba = self.bispectra\n except AttributeError:\n print 'Need to make bispectra first.'\n return\n\n# ntr = lambda num: num*(num-1)*(num-2)/6 # assuming all triples are present\n ntr = lambda num: len(self.triples) # assume only good triples are present and use array size as input for noise estimate\n\n # using s=S/Q\n mu = lambda s: 1. # for bispectra formed from visibilities\n sigbQ3 = lambda s: n.sqrt((1 + 3*mu(s)**2) + 3*(1 + mu(s)**2)*s**2 + 3*s**4) # from kulkarni 1989, normalized by Q**3, also rogers et al 1995\n s = lambda basnr, nants: (2.*basnr/n.sqrt(ntr(nants)))**(1/3.) # see rogers et al. 1995 for factor of 2\n\n # measure SNR_bl==Q from sigma clipped times with normal mean and std of bispectra. put into time,dm order\n bamean = ba.real.mean(axis=2).transpose()\n bastd = ba.real.std(axis=2).transpose()\n\n bameanstd = []\n for dmind in xrange(len(self.dmarr)):\n (meanmin,meanmax) = sigma_clip(bamean[:, dmind]) # remove rfi to estimate noise-like parts\n (stdmin,stdmax) = sigma_clip(bastd[:, dmind])\n clipped = n.where((bamean[:, dmind] > meanmin) & (bamean[:, dmind] < meanmax) & (bastd[:, dmind] > stdmin) & (bastd[:, dmind] < stdmax) & (bamean[:, dmind] != 0.0))[0] # remove rfi and zeros\n bameanstd.append(ba[dmind][clipped].real.mean(axis=1).std())\n\n bameanstd = n.array(bameanstd)\n basnr = bamean/bameanstd # = S**3/(Q**3 / n.sqrt(n_tr)) = s**3 * n.sqrt(n_tr)\n if Q:\n print 'Using given Q =', Q\n else:\n Q = ((bameanstd/2.)*n.sqrt(ntr(self.nants)))**(1/3.)\n # Q = n.median( bastd[clipped]**(1/3.) ) # alternate for Q\n print 'Estimating noise per baseline from data. Q (per DM) =', Q\n self.Q = Q\n\n # detect\n cands = n.where( (bastd/Q**3 < tol*sigbQ3(s(basnr, self.nants))) & (basnr > sigma) ) # get compact sources with high snr\n\n # plot snrb lc and expected snr vs. sigb relation\n if show or save:\n for dmbin in xrange(len(self.dmarr)):\n cands_dm = cands[0][n.where(cands[1] == dmbin)[0]] # find candidates for this dmbin\n p.figure(range(len(self.dmarr)).index(dmbin)+1)\n ax = p.axes()\n p.subplot(211)\n p.title(str(self.nskip/self.nbl) + ' nskip, ' + str(dmbin) + ' dmbin, ' + str(len(cands_dm))+' candidates', transform = ax.transAxes)\n p.plot(basnr[:,dmbin], 'b.')\n if len(cands_dm) > 0:\n p.plot(cands_dm, basnr[cands_dm,dmbin], 'r*')\n p.ylim(-2*basnr[cands_dm,dmbin].max(),2*basnr[cands_dm,dmbin].max())\n p.xlabel('Integration',fontsize=12,fontweight=\"bold\")\n p.ylabel('SNR_b',fontsize=12,fontweight=\"bold\")\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n p.subplot(212)\n p.plot(bastd[:,dmbin]/Q[dmbin]**3, basnr[:,dmbin], 'b.')\n\n # plot reference theory lines\n smax = s(basnr[:,dmbin].max(), self.nants)\n sarr = smax*n.arange(0,101)/100.\n p.plot(sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k')\n p.plot(tol*sigbQ3(sarr), 1/2.*sarr**3*n.sqrt(ntr(self.nants)), 'k--')\n p.plot(bastd[cands_dm,dmbin]/Q[dmbin]**3, basnr[cands_dm,dmbin], 'r*')\n ax.spines['top'].set_visible(False)\n ax.spines['right'].set_visible(False)\n ax.spines['bottom'].set_position(('outward', 20))\n ax.spines['left'].set_position(('outward', 30))\n ax.yaxis.set_ticks_position('left')\n ax.xaxis.set_ticks_position('bottom')\n\n if len(cands_dm) > 0:\n p.axis([0, tol*sigbQ3(s(basnr[cands_dm,dmbin].max(), self.nants)), -0.5*basnr[cands_dm,dmbin].max(), 1.1*basnr[cands_dm,dmbin].max()])\n\n # show spectral modulation next to each point\n for candint in cands_dm:\n sm = n.single(round(self.specmod(dmbin,candint),1))\n p.text(bastd[candint,dmbin]/Q[dmbin]**3, basnr[candint,dmbin], str(sm), horizontalalignment='right', verticalalignment='bottom')\n p.xlabel('sigma_b/Q^3',fontsize=12,fontweight=\"bold\")\n p.ylabel('SNR_b',fontsize=12,fontweight=\"bold\")\n if save:\n if save == 1:\n savename = self.file.split('.')[:-1]\n savename.append(str(self.nskip/self.nbl) + '_' + str(dmbin) + '_bisp.png')\n savename = string.join(savename,'.')\n elif isinstance(save, types.StringType):\n savename = save\n print 'Saving file as ', savename\n p.savefig(self.pathout+savename)\n\n return basnr[cands], bastd[cands], zip(cands[0],cands[1])", "def add_signal_to_noise(self):\n\n # noise\n noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n noise.data.data = self.td_noise.data\n\n # signal\n signal = lal.CreateREAL8TimeSeries('blah',\n self.ext_params.geocent_peak_time, 0, self.td_signal.delta_t,\n lal.StrainUnit, int(self.td_signal.duration /\n self.td_signal.delta_t))\n signal.data.data = self.td_signal.data\n\n win = lal.CreateTukeyREAL8Window(len(signal.data.data),0.1)\n win.data.data[len(signal.data.data):] = 1.0\n #signal.data.data *= win.data.data\n\n # --- Scale to a target snr\n print '---'\n if self.target_snr is not None:\n\n tmp_sig = pycbc.types.TimeSeries(signal.data.data,\n delta_t=self.td_signal.delta_t)\n\n current_snr = pycbc.filter.sigma(tmp_sig, psd=self.psd,\n low_frequency_cutoff=self.f_low,\n high_frequency_cutoff=0.5/self.delta_t)\n\n signal.data.data *= self.target_snr / current_snr\n # ----\n\n # sum\n noise_plus_signal = lal.AddREAL8TimeSeries(noise, signal)\n\n self.td_response = \\\n pycbc.types.timeseries.TimeSeries(\\\n initial_array=np.copy(noise_plus_signal.data.data),\n delta_t=noise_plus_signal.deltaT,\n epoch=noise_plus_signal.epoch)\n\n # Finally, zero-pad the signal vector to have the same length as the actual data\n # vector\n no_noise = lal.CreateREAL8TimeSeries('blah', self.epoch, 0,\n self.td_noise.delta_t, lal.StrainUnit, \n int(self.td_noise.duration / self.td_noise.delta_t))\n\n no_noise.data.data = np.zeros(\\\n int(self.td_noise.duration / self.td_noise.delta_t))\n\n signal = lal.AddREAL8TimeSeries(no_noise, signal)\n\n self.td_signal = \\\n pycbc.types.timeseries.TimeSeries(initial_array=np.copy(signal.data.data),\n delta_t=signal.deltaT, epoch=noise_plus_signal.epoch)\n\n del noise, signal, noise_plus_signal", "def _calculate_measurement_error(self): \n \n # Calculate Hartmann Spot\n # FIXME what are factor_1, factor_2 ???\n factor_1, factor_2 = 206265*5.89e-7, 206265*6.5e-7\n term1, term2 = factor_1/self.actuator_spacing, factor_2/self.r0\n hartmann_spot = np.max([term1, term2])\n \n # Calculate SNR \n n_pix=4 # FIXME spreadsheet says not to change this idk why?\n sample_time = 1/(10*self.controller_frequency)\n brightness = (8.9e5)*10**((0-self.guide_star_mag)/2.5)\n n_photons = brightness*sample_time*((100*self.actuator_spacing)**2)\n snr = n_photons/np.sqrt(n_photons + n_pix*(self.sigma_readnoise)**2)\n\n # Calculate noise propagator \n degrees_of_freedom = np.round((np.pi/4) * (self.telescope_diameter/self.actuator_spacing)**2)\n factor_1, factor_2 = 0.0536, 0.0795 # FIXME WHAT THE HECK IS THIS\n if self.aperture == 'circular':\n factor_1, factor_2 = 0.0068, 0.0796\n noise_propagator = np.sqrt(2*(factor_1 + factor_2*np.log(degrees_of_freedom)))\n\n # Calculate close loop averaging\n controller_over_frame = 1/10\n close_loop_averaging = np.sqrt(2*controller_over_frame)*np.arctan(1/(2*controller_over_frame))\n sigma_measurement = noise_propagator * close_loop_averaging * (self.actuator_spacing*1e9) * (hartmann_spot/snr*4.84814e-6)\n self.sigma_measurement = sigma_measurement # in nm", "def calculate_mixture_features(data_type):\n workspace = config.workspace\n data_dir = config.data_dir\n speech_dir = os.path.join(data_dir,'{}_speech'.format(data_type))\n noise_dir = os.path.join(data_dir,'{}_noise'.format(data_type)) \n \n fs = config.sample_rate\n \n if data_type == 'train':\n snr = config.Tr_SNR\n elif data_type == 'test':\n snr = config.Te_SNR \n else:\n raise Exception(\"data_type must be train | test!\")\n \n \n # Open mixture csv. \n mixture_csv_path = os.path.join(workspace, \"mixture_csvs\", \"%s.csv\" % data_type)\n with open(mixture_csv_path, 'r') as f:\n reader = csv.reader(f, delimiter='\\t')\n lis = list(reader)\n \n t1 = time.time()\n cnt = 0\n for i1 in range(1, len(lis)):\n [speech_na, noise_na, noise_onset, noise_offset] = lis[i1]\n noise_onset = int(noise_onset)\n noise_offset = int(noise_offset)\n \n # Read speech audio. \n speech_path = os.path.join(speech_dir, speech_na)\n (speech_audio, _) = read_audio(speech_path, target_fs=fs)\n \n # Read noise audio. \n noise_path = os.path.join(noise_dir, noise_na)\n (noise_audio, _) = read_audio(noise_path, target_fs=fs)\n \n # Repeat noise to the same length as speech. \n if len(noise_audio) < len(speech_audio):\n n_repeat = int(np.ceil(float(len(speech_audio)) / float(len(noise_audio))))\n noise_audio_ex = np.tile(noise_audio, n_repeat)\n noise_audio = noise_audio_ex[0 : len(speech_audio)]\n # Truncate noise to the same length as speech. \n else:\n noise_audio = noise_audio[noise_onset : noise_offset]\n \n # Scale speech to given snr. \n scaler = get_amplitude_scaling_factor(speech_audio, noise_audio, snr=snr)\n speech_audio *= scaler\n \n # Get normalized mixture, speech, noise. \n (mixed_audio, speech_audio, noise_audio, alpha) = additive_mixing(speech_audio, noise_audio)\n\n # Write out mixed audio. \n out_bare_na = os.path.join(\"%s.%s\" % \n (os.path.splitext(speech_na)[0], os.path.splitext(noise_na)[0]))\n out_audio_path = os.path.join(workspace, \"mixed_audios\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.wav\" % out_bare_na)\n create_folder(os.path.dirname(out_audio_path))\n write_audio(out_audio_path, mixed_audio, fs)\n\n # Extract spectrogram. \n mixed_complx_x = calc_sp(mixed_audio, mode='complex')\n speech_x = calc_sp(speech_audio, mode='magnitude')\n noise_x = calc_sp(noise_audio, mode='magnitude')\n\n # Write out features. \n out_feat_path = os.path.join(workspace, \"features\", \"spectrogram\", \n data_type, \"%ddb\" % int(snr), \"%s.p\" % out_bare_na)\n create_folder(os.path.dirname(out_feat_path))\n data = [mixed_complx_x, speech_x, noise_x, alpha, out_bare_na]\n pickle.dump(data, open(out_feat_path, 'wb'), protocol=pickle.HIGHEST_PROTOCOL)\n \n # Print. \n if cnt % 100 == 0:\n print(cnt)\n \n cnt += 1\n\n print(\"Extracting feature time: %s\" % (time.time() - t1))", "def prob_3_6(self):\n \n ###### START CODE HERE ######\n\n\n ###### END CODE HERE ######\n pass\n \n ###### return addNoiseImg ######", "def _add_random_noise_and_flatten(x):\n # Random noise path indexes and random snr levels\n rand_noise = [\n (noise_type,\n tf.random.uniform([], 0, tf.size(type2paths[noise_type]), tf.int32),\n tf.random.uniform([], snr_low, snr_high, tf.float32))\n for noise_type, snr_low, snr_high in snr_list]\n # Select random noise signals by drawn indexes and read contents from files\n rand_noise = [\n (audio_features.read_wav(type2paths[noise_type][rand_index]), snr)\n for noise_type, rand_index, snr in rand_noise]\n\n # Assert sample rates\n # TODO maybe add inline resampling of noise signals so they match the speech sr\n for (noise, sample_rate), snr in rand_noise:\n tf.debugging.assert_equal(sample_rate, x[\"sample_rate\"], message=\"Invalid noise signals are being used, all noise signals must have same sample rate as speech signals that are being augmented\")\n\n # Fix noise signal length to match x[\"signal\"] by repeating the noise signal if it is too short and then slicing it\n rand_noise = [\n # How many multiples of `noise` fits in x[\"signal\"]\n (tf.cast(tf.size(x[\"signal\"]) / tf.size(noise), tf.int32), noise, snr)\n for (noise, _), snr in rand_noise]\n rand_noise = [\n # Repeat noise and slice\n (tf.tile(noise, [1 + noise_length_ratio])[:tf.size(x[\"signal\"])], snr)\n for noise_length_ratio, noise, snr in rand_noise]\n\n # Mix x[\"signal\"] and chosen noise signals\n mixed_signals = [audio_features.snr_mixer(x[\"signal\"], noise, snr)[2] for noise, snr in rand_noise]\n # Create new utterance ids that contain the mixed noise type and SNR level\n new_ids = [\n tf.strings.join((\n \"augmented\",\n x[\"id\"],\n noise_type,\n tf.strings.join((\"snr\", tf.strings.as_string(snr, precision=2)))),\n separator=\"-\")\n for (noise_type, _, _), (_, snr) in zip(snr_list, rand_noise)]\n\n # Create new elements from the mixed signals and return as dataset\n return (tf.data.Dataset\n .zip((tf.data.Dataset.from_tensor_slices(new_ids),\n tf.data.Dataset.from_tensor_slices(mixed_signals),\n tf.data.Dataset.from_tensors(x).repeat(len(mixed_signals))))\n .map(_update_element_meta))", "def get_noise_ceil(self):\n return self.noise_ceiling", "def _get_f50_worker(self, ra, dec, wave, sncut, \n direct_sigmas = False, linewidth = None):\n\n logger = logging.getLogger(name=\"ShotSensitivity\")\n\n try:\n [x for x in ra]\n except TypeError:\n ra = array([ra]) \n dec = array([dec]) \n wave = array([wave]) \n\n coords = SkyCoord(ra=ra, dec=dec, unit=\"deg\")\n wave_rect = self.extractor.get_wave()\n pixsize_aa = wave_rect[1] - wave_rect[0]\n\n # This will give 999 once the noise is scaled suitably\n badval = 999*1e17/pixsize_aa\n\n # Size of window in wave elements\n filter_len = 2*self.wavenpix + 1\n\n if type(wave) != type(None):\n wave_passed = True\n else:\n wave_passed = False\n convolution_filter = ones(filter_len) \n mask = True*ones(len(coords), dtype=int)\n \n noise = []\n \n info_results = self.extractor.get_fiberinfo_for_coords(\n coords,\n radius=self.rad,\n ffsky=self.ffsky,\n return_fiber_info=True,\n fiber_lower_limit=2, \n verbose=False\n )\n\n id_, aseps, aifux, aifuy, axc, ayc, ara, adec, adata, aerror, afmask, afiberid, \\\n amultiframe = info_results\n \n \n I = None\n fac = None\n norm_all = []\n amp = [] \n nan_fib_mask = []\n\n for i, c in enumerate(coords):\n \n sel = (id_ == i)\n\n if type(wave) != type(None):\n logger.debug(\"Running on source {:f} {:f} {:f}\".format(ra[i], dec[i], wave[i]))\n else:\n logger.debug(\"Running on position {:f} {:f}\".format(ra[i], dec[i]))\n\n logger.debug(\"Found {:d} fibers\".format(sum(sel)))\n\n if sum(sel) > 0:\n \n # fiber properties \n xc = axc[sel][0]\n yc = ayc[sel][0]\n ifux = aifux[sel]\n ifuy = aifuy[sel]\n data = adata[sel]\n error = aerror[sel]\n fmask = afmask[sel]\n fiberid = afiberid[sel]\n multiframe = amultiframe[sel]\n seps = aseps[sel]\n\n # Flag the zero elements as bad\n fmask[(abs(data) < 1e-30) | (abs(error) < 1e-30)] = False\n\n iclosest = argmin(seps)\n\n amp.append(fiberid[iclosest])\n\n if len(self.bad_amps) > 0:\n amp_flag = amp_flag_from_fiberid(fiberid[iclosest], \n self.bad_amps)\n else:\n amp_flag = True\n \n # XXX Could be faster - reloads the file every run\n meteor_flag = meteor_flag_from_coords(c, self.shotid)\n\n if not (amp_flag and meteor_flag):\n logger.debug(\"The data here are bad, position is masked\")\n if wave_passed:\n noise.append(badval)\n norm_all.append(1.0)\n # value doesn't matter as in amp flag\n nan_fib_mask.append(True)\n continue\n else:\n mask[i] = False\n \n weights, I, fac = self.extractor.build_weights(xc, yc, ifux, ifuy, self.moffat, \n I=I, fac=fac, return_I_fac = True)\n \n # (See Greg Zeimann's Remedy code)\n # normalized in the fiber direction\n norm = sum(weights, axis=0) \n weights = weights/norm\n\n\n result = self.extractor.get_spectrum(data, error, fmask, weights,\n remove_low_weights = False,\n sclean_bad = self.sclean_bad,\n return_scleaned_mask = True)\n \n spectrum_aper, spectrum_aper_error, scleaned = [res for res in result] \n \n if wave_passed:\n \n index = where(wave_rect >= wave[i])[0][0]\n ilo = index - self.wavenpix\n ihi = index + self.wavenpix + 1\n\n # If lower index less than zero, truncate\n if ilo < 0:\n ilo = 0\n \n if ihi < 0:\n ihi = 0\n \n # Output lots of information for very detailed debugging\n if logger.getEffectiveLevel() == logging.DEBUG: \n logger.debug(\"Table of fibers:\")\n logger.debug(\"# fiberid wave_index ifux ifuy weight noise mask\")\n for fibidx, fid in enumerate(fiberid):\n for wi, (tw, tnoise, tmask) in enumerate(zip((weights*norm)[fibidx, ilo:ihi], \n error[fibidx, ilo:ihi], fmask[fibidx, ilo:ihi]), \n ilo): \n logger.debug(\"{:s} {:d} {:f} {:f} {:f} {:f} {:s}\".format(fid, wi, ifux[fibidx], \n ifuy[fibidx], tw, tnoise, \n str(tmask)))\n\n\n # Mask source if bad values within the central 3 wavebins\n nan_fib = bad_central_mask(weights*norm, logical_not(fmask), \n index) \n nan_fib_mask.append(nan_fib)\n\n # Account for NaN and masked spectral bins\n bad = isnan(spectrum_aper_error[ilo:ihi])\n goodfrac = 1.0 - sum(bad)/len(bad)\n\n\n if all(isnan(spectrum_aper_error[ilo:ihi])):\n sum_sq = badval\n else:\n sum_sq = \\\n sqrt(nansum(square(spectrum_aper_error[ilo:ihi])/goodfrac))\n\n norm_all.append(mean(norm[ilo:ihi]))\n noise.append(sum_sq)\n else:\n logger.debug(\"Convolving with window to get flux limits versus wave\")\n\n\n # Use astropy convolution so NaNs are ignored\n convolved_variance = convolve(square(spectrum_aper_error),\n convolution_filter, \n normalize_kernel=False)\n std = sqrt(convolved_variance)\n\n # Also need to convolve aperture corrections to get\n # a total apcor across the wavelength window\n convolved_norm = convolve(norm,\n convolution_filter, \n normalize_kernel=True)\n\n # To get mean account for the edges in\n # the convolution\n for iend in range(self.wavenpix):\n edge_fac = filter_len/(filter_len + iend - self.wavenpix)\n convolved_norm[iend] *= edge_fac\n convolved_norm[-iend - 1] *= edge_fac\n\n\n # Mask wavelengths with too many bad pixels\n # equivalent to nan_fib in the wave != None mode\n wunorm = weights*norm\n for index in range(len(convolved_variance)):\n if not bad_central_mask(wunorm, logical_not(fmask), index):\n std[index] = badval\n\n noise.append(std)\n norm_all.append(convolved_norm)\n \n else:\n if wave_passed:\n noise.append(badval)\n norm_all.append(1.0)\n amp.append(\"000\")\n nan_fib_mask.append(True)\n else:\n noise.append(badval*ones(len(wave_rect)))\n norm_all.append(ones(len(wave_rect)))\n amp.append(\"000\")\n mask[i] = False\n\n\n \n # Apply the galaxy mask \n gal_mask = ones(len(coords), dtype=int)\n for gal_region in self.gal_regions:\n dummy_wcs = create_dummy_wcs(gal_region.center,\n imsize=2*gal_region.height)\n # zero if near galaxy\n gal_mask = gal_mask & invert(gal_region.contains(coords, dummy_wcs))\n\n noise = array(noise)\n snoise = pixsize_aa*1e-17*noise\n\n if wave_passed:\n\n bad = (gal_mask < 0.5) | (snoise > 998) | isnan(snoise) | invert(nan_fib_mask)\n\n normnoise = snoise/norm_all\n\n if not direct_sigmas:\n normnoise = self.f50_from_noise(normnoise, wave, sncut,\n linewidth = linewidth)\n\n \n normnoise[bad] = 999.\n\n return normnoise, amp, norm_all\n\n else:\n mask[gal_mask < 0.5] = False\n \n if self.badshot:\n mask[:] = False\n\n bad = (snoise > 998) | logical_not(isfinite(snoise))\n normnoise = snoise/norm_all\n\n if not direct_sigmas:\n normnoise = self.f50_from_noise(normnoise, wave, sncut,\n linewidth = linewidth)\n\n normnoise[bad] = 999\n\n return normnoise, mask, amp, norm_all", "def rms(signal, **kwargs):\n return np.sqrt(np.sum(signal**2))", "def radiation_measurement_analysis():\n import pint\n ureg = pint.UnitRegistry()\n\n mrem_h = ureg.parse_units('mrem') / ureg.hour\n m = ureg.parse_units('meters')\n s = ureg.parse_units('seconds')\n\n # Measurements of background radiation\n bg_dist = ureg.parse_expression('10 m') # estimate of how far away we are wrt background\n background_rows = [\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.022 * mrem_h, capture_time=0.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=4.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.021 * mrem_h, capture_time=5.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=11.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.023 * mrem_h, capture_time=16.0 * s),\n dict(vid=1, distance=bg_dist, rad=0.024 * mrem_h, capture_time=20.0 * s),\n ]\n\n # Measurements of sample radiation\n esp_dist = ureg.parse_expression('1 inch').to(m) / 2 # estimate of how far we are from the sample when very close\n dist0_rows = [\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=0.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=3.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=5.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=9.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=10.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=11.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.057 * mrem_h, capture_time=12.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.058 * mrem_h, capture_time=13.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.059 * mrem_h, capture_time=14.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.060 * mrem_h, capture_time=15.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.061 * mrem_h, capture_time=16.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.062 * mrem_h, capture_time=18.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=20.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=22.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.066 * mrem_h, capture_time=23.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=24.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=25.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=26.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.064 * mrem_h, capture_time=27.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.065 * mrem_h, capture_time=28.0 * s),\n dict(vid=2, distance=esp_dist, rad=0.063 * mrem_h, capture_time=30.0 * s),\n ]\n\n dist0_v2_rows = [\n dict(vid=3, distance=esp_dist, rad=0.012 * mrem_h, capture_time=0.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.011 * mrem_h, capture_time=1.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=8.0 * s),\n dict(vid=3, distance=esp_dist, rad=0.013 * mrem_h, capture_time=9.0 * s),\n ]\n\n close_rows = [\n dict(vid=4, distance=0.5 * m, rad=0.013 * mrem_h, capture_time=0.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.014 * mrem_h, capture_time=5.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=7.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.011 * mrem_h, capture_time=15.0 * s),\n dict(vid=4, distance=0.5 * m, rad=0.012 * mrem_h, capture_time=16.0 * s),\n ]\n\n mid_rows = [\n dict(vid=5, distance=1.0 * m, rad=0.014 * mrem_h, capture_time=0.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.015 * mrem_h, capture_time=5.0 * s),\n dict(vid=5, distance=1.0 * m, rad=0.013 * mrem_h, capture_time=10.0 * s),\n ]\n\n far_rows = [\n dict(vid=6, distance=2.0 * m, rad=0.023 * mrem_h, capture_time=0.0 * s),\n dict(vid=6, distance=2.0 * m, rad=0.025 * mrem_h, capture_time=0.1 * s),\n ]\n\n # guess_dist = ureg.parse_expression('0.3 m') # estimate of how far away we are wrt background\n # guess_rows = [\n # dict(vid=9, distance=guess_dist, rad=0.030 * mrem_h, capture_time=0.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.041 * mrem_h, capture_time=2.0 * s),\n # dict(vid=9, distance=guess_dist, rad=0.051 * mrem_h, capture_time=3.0 * s),\n # ]\n\n rows = dist0_rows + background_rows + dist0_v2_rows + close_rows + mid_rows + far_rows\n # rows += guess_rows\n\n import pandas as pd\n import numpy as np\n table = pd.DataFrame(rows)\n\n # Ensure comparable units\n units = {\n 'rad': mrem_h,\n 'distance': m,\n 'capture_time': s,\n }\n for key, unit in units.items():\n table[key] = table[key].apply(lambda c: c.to(unit).m)\n table['rad'] = table['rad'].astype(float)\n table['distance'] = table['distance'].astype(float)\n\n # Weight each measurement based on the amount of time the measurement was\n # sustained in the video.\n average_rad_rows = []\n for vid, group in table.groupby('vid'):\n from statsmodels.stats.weightstats import DescrStatsW\n weights = (-1 * group['capture_time'].diff(periods=-1).fillna(0)) / group['capture_time'].iloc[-1]\n table.loc[group.index, 'weight'] = weights\n values = group['rad']\n weighted_stats = DescrStatsW(values, weights=weights, ddof=0)\n dists = group['distance'].unique()\n assert len(dists) == 1\n average_rad_rows.append({\n 'vid': vid,\n 'distance': dists[0],\n 'rad_mean': weighted_stats.mean,\n 'rad_std': weighted_stats.std,\n })\n stats_table = pd.DataFrame(average_rad_rows)\n\n bg_row = stats_table.loc[stats_table['distance'].argmax()]\n fg_row = stats_table.loc[stats_table['distance'].argmin()]\n\n # -------------------\n ADD_DUMMY_VALUES = 0\n if ADD_DUMMY_VALUES:\n # Hack: because we don't have enough samples we can fudge the value\n # knowning that the value should be the background radiation in the\n # limit.\n\n dummy_measurements = []\n extra_support = 1\n for idx in range(3, 3 + extra_support):\n dummy_row = {\n 'vid': -idx,\n 'distance': bg_row['distance'] + idx,\n 'rad_mean': bg_row['rad_mean'],\n 'rad_std': 0.01,\n }\n dummy_measurements.append(dummy_row)\n\n # also add an extra value close to the sample\n rad_bg = bg_row['rad_mean']\n rad_above_bg = fg_row['rad_mean'] - rad_bg\n dummy_row = {\n 'vid': -1,\n 'distance': fg_row['distance'] / 2,\n 'rad_mean': rad_bg + (rad_above_bg * 4),\n 'rad_std': 0.5,\n }\n dummy_measurements.append(dummy_row)\n\n # dummy_row = {\n # 'vid': -2,\n # 'distance': fg_row['distance'] / 4,\n # 'rad_mean': rad_bg + (rad_above_bg * 16),\n # }\n # dummy_measurements.append(dummy_row)\n\n dummy_stats = pd.DataFrame(dummy_measurements)\n dummy_stats['weight'] = 0.5\n stats_table['weight'] = 1.0\n stats_table2 = pd.concat([stats_table, dummy_stats]).reset_index(drop=True).sort_values('distance')\n else:\n stats_table2 = stats_table\n # -------------------\n\n import scipy\n scipy.optimize.curve_fit\n\n # Because we know the radiation should follow an inverse square law wrt to\n # distance, we can fit a polynomial of degree 2 (parabola) to interpolate /\n # extrapolate the **inverse** values.\n x = stats_table2['distance'].values\n y = stats_table2['rad_mean'].values\n s = stats_table2['rad_std'].values\n\n # Model the squared falloff directly\n def invsquare(x, a, b):\n return a * (1 / (0.01 + x ** 2)) + b\n # bg_row['rad_mean']\n # Use curve_fit to constrain the first coefficient to be zero\n try:\n coef = scipy.optimize.curve_fit(invsquare, x, y, sigma=s, method='trf')[0]\n except Exception as ex:\n coef = None\n print(f'ex={ex}')\n\n # Also fit one to the raw weighted points as a sanity check\n # inv_poly2 = Polynomial.fit(table['distance'], 1 / table['rad'], w=table['weight'], deg=2)\n\n import kwplot\n sns = kwplot.autosns()\n plt = kwplot.autoplt()\n # ax = sns.boxplot(data=table, x='distance', y='rad', width=0.1)\n\n # Add in points to show each observation\n ax = sns.relplot(x=\"distance\", y=\"rad\", data=table, size=4, color=\".3\",\n linewidth=0, alpha=0.5, palette='deep')\n\n ax = plt.gca()\n ax.set_xlabel('distance from sample ({})'.format(str(units['distance'])))\n ax.set_ylabel('radiation dosage ({})'.format(str(units['rad'])))\n\n max_meters = 10\n\n extrap_x = np.linspace(0, max_meters, 1000)\n if coef is not None:\n extrap_y1 = invsquare(extrap_x, *coef)\n # extrap_y2 = 1 / inv_poly2(extrap_x)\n ax.plot(stats_table2['distance'].values, stats_table2['rad_mean'].values, 'rx')\n ax.plot(stats_table['distance'].values, stats_table['rad_mean'].values, 'bo')\n ax.plot(extrap_x, extrap_y1, '--')\n ax.set_ylim(0.001, 0.1)\n ax.set_yscale('log')\n # ax.plot(extrap_x, extrap_y2, '--')", "def make_fakeSN_spectrum(snfile, galfile, params, outfile, err=False, signal_noise=20.0, z=0.0, wave_range=[3842, 8195]):\n snt=loadtext(snfile)\n galt=loadsdss(fits.open(galfile))\n minsalt=list(snt.wavelength).index(3840)\n maxsalt=list(snt.wavelength).index(8200)\n snt.wavelength=snt.wavelength[minsalt:maxsalt+1]\n snt.flux=snt.flux[minsalt:maxsalt+1]\n igal=np.interp(snt.wavelength, galt.wavelength, galt.flux)\n \n #foflux=fake observed flux\n foflux=(snt.flux*params[0] + igal*params[1]) \n \n if err:\n foflux = signal_noise**2 * foflux\n foflux = np.random.poisson(foflux)\n fofluxerr=foflux**0.5\n \n# if err:\n# foflux = foflux + np.random.normal(0.0, 0.1, foflux.shape)\n# fofluxerr=0.1*foflux\n \n if not err:\n fofluxerr=0.1*foflux\n \n fout=open(outfile, 'w')\n for i in range(len(snt.wavelength)):\n fout.write('%f %f %f\\n' % (snt.wavelength[i], foflux[i], fofluxerr[i]))\n fout.close()\n \n return fout", "def get_read_noise(self):\n\n read_noise_adu = self.ccd.read_noise / self.ccd.gain\n return numpy.random.normal(scale=read_noise_adu, size=self.image.shape)", "def main():\n\n\n ## Groups showing similar noise profile\n #grp1 = [ 1, 4, 5, 8, 9 ]\n #grp2 = [ 18, 19, 22, 23, 30, 31 ]\n grp1 = [ 0, 1, 6, 7, 4, 5 ]\n grp2 = [ 12, 13, 16, 17, 18, 19 ]\n #grp3 = [ 18, 19, 22, 23, 26, 27 ]\n with tb.open_file(sys.argv[1], 'r') as dataF:\n\n npm = len(dataF.root.Sensors.DataPMT)#len(dataF.root.RD.pmtrwf[0])\n nevt = len(dataF.root.RD.pmtrwf)\n\n ## Filter definition\n fSample = 40E6\n freqLPF = 100E3\n freqLPFd = 2*freqLPF / fSample\n b, a = signal.butter(1, freqLPFd, 'low', analog=False)\n ##\n fig, axes = plt.subplots(nrows=3, ncols=2, figsize=(20,6))\n #fig.tight_layout()\n fig.show()\n wf_len = len(dataF.root.RD.pmtrwf[0][0])\n if len(sys.argv) > 3:\n wf_len = wf_len/2+1 \n elif len(sys.argv) == 3:\n g1_first = np.zeros(wf_len, np.float64)\n g2_first = np.zeros(wf_len, np.float64)\n g3_first = np.zeros(wf_len, np.float64)\n mean_first = np.zeros(wf_len, np.float64)\n ##\n for ievt in range(nevt):\n ## clear the axies\n for ax in axes.flatten():\n ax.cla()\n plt_frq = np.zeros(wf_len, np.float64)\n fwf_mean = np.zeros(wf_len, np.float64)\n wf_mean = np.zeros(wf_len, np.float64) # No filter\n g1_mean = np.zeros(wf_len, np.float64)\n g2_mean = np.zeros(wf_len, np.float64)\n g3_mean = np.zeros(wf_len, np.float64)\n for ipm in range(npm):\n\n sg = getWF(dataF, ipm, ievt)\n sg = sg - np.mean(sg)\n\n sgf = signal.lfilter(b, a, sg)\n ## remove mean again just in case\n sgf = sgf - np.mean(sgf)\n #sgf = sg\n\n pmID = getPMid(dataF, ipm)\n\n if len(sys.argv) == 3:\n axes[0][0].plot(sgf, label='pmt '+str(pmID))\n fwf_mean += sgf/npm\n wf_mean += sg/npm\n if pmID in grp1:\n g1_mean += sgf/len(grp1)\n elif pmID in grp2:\n g2_mean += sgf/len(grp2)\n elif pmID in grp3:\n g3_mean += sgf/len(grp3)\n else:\n ft = np.fft.rfft(sgf)\n freq = np.fft.rfftfreq(len(sgf), d=25E-9)\n if ipm == 0:\n plt_frq = freq\n if sys.argv[2] == 'mag':\n ft_mag = np.absolute(ft)\n axes[0][0].plot(freq, ft_mag, label='pmt '+str(pmID))\n fwf_mean += ft_mag/npm\n if pmID in grp1:\n g1_mean += ft_mag/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_mag/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_mag/len(grp3)\n elif sys.argv[2] == 'phase':\n ft_pha = np.angle(ft)\n axes[0][0].plot(freq, ft_pha, label='pmt '+str(pmID))\n fwf_mean += ft_pha/npm\n if pmID in grp1:\n g1_mean += ft_pha/len(grp1)\n elif pmID in grp2:\n g2_mean += ft_pha/len(grp2)\n elif pmID in grp3:\n g3_mean += ft_pha/len(grp3)\n \n \n ## The axes not set\n if len(sys.argv) == 3:\n axes[0][1].plot(g1_mean)\n axes[0][1].set_title('Group 1 mean waveform')\n axes[1][0].plot(g2_mean)\n axes[1][0].set_title('Group 2 mean waveform')\n axes[1][1].plot(g3_mean)\n axes[1][1].set_title('Group 3 mean waveform')\n axes[2][0].plot(fwf_mean)\n axes[2][0].set_title('Mean waveform')\n if ievt == 0:\n g1_first = g1_mean\n g2_first = g2_mean\n g3_first = g3_mean\n mean_first = fwf_mean\n else:\n axes[0][1].plot(g1_first)\n axes[1][0].plot(g2_first)\n axes[1][1].plot(g3_first)\n axes[2][0].plot(mean_first)\n axes[2][1].plot(wf_mean)\n axes[2][1].set_title('Mean waveform and corrected')\n axes[2][1].plot(wf_mean-fwf_mean)\n axes[2][1].set_xlim(0, 1000)\n else:\n axes[0][0].set_xlim(0,50000)\n axes[0][1].plot(plt_frq, g1_mean)\n axes[0][1].set_title('Group 1 mean '+sys.argv[2])\n axes[0][1].set_xlim(0,50000)\n axes[1][0].plot(plt_frq, g2_mean)\n axes[1][0].set_title('Group 2 mean '+sys.argv[2])\n axes[1][0].set_xlim(0,50000)\n axes[1][1].plot(plt_frq, g3_mean)\n axes[1][1].set_title('Group 3 mean '+sys.argv[2])\n axes[1][1].set_xlim(0,50000)\n axes[2][0].plot(plt_frq, fwf_mean)\n axes[2][0].set_title('Mean '+sys.argv[2])\n axes[2][0].set_xlim(0,50000)\n plt.draw()\n #fig.legend(loc=0)\n catcher = input(\"next plot?\")\n if catcher == 'q':\n exit()\n plt.cla()", "def add_noise(spectra: np.ndarray, maxLevel: float = 0.1, seed: int = 42) -> np.ndarray:\n np.random.seed(seed)\n spectra = spectra.copy()\n spectra[:, 1:] *= (1-maxLevel/2) + np.random.rand(spectra.shape[0], spectra.shape[1]-1) * maxLevel\n return spectra" ]
[ "0.7203031", "0.7082178", "0.6568871", "0.6568871", "0.6404555", "0.6361032", "0.6209166", "0.6132606", "0.61150664", "0.60341376", "0.6025671", "0.60177475", "0.6004052", "0.6003546", "0.5993322", "0.5989995", "0.5975633", "0.5961862", "0.59155107", "0.59027416", "0.5901672", "0.58871067", "0.58691186", "0.5850281", "0.5842846", "0.57875764", "0.57849103", "0.57823867", "0.57727456", "0.5767724", "0.5766124", "0.5724113", "0.57230306", "0.56999296", "0.5699298", "0.5691258", "0.56877583", "0.5687667", "0.56822526", "0.56721985", "0.56697637", "0.56475294", "0.5647118", "0.560071", "0.5600009", "0.55823946", "0.5567856", "0.55507755", "0.55407447", "0.5535791", "0.5534082", "0.55261457", "0.5522515", "0.5521617", "0.5519674", "0.55136824", "0.550682", "0.550008", "0.5499535", "0.54984796", "0.54981786", "0.5490727", "0.5487853", "0.548238", "0.5481259", "0.5473278", "0.54705065", "0.5464342", "0.54527575", "0.545205", "0.54483205", "0.5447695", "0.5436472", "0.5434894", "0.541199", "0.54104817", "0.54103404", "0.54075354", "0.5406068", "0.5401426", "0.54014117", "0.53967273", "0.5393508", "0.5388509", "0.5386131", "0.5385086", "0.5382764", "0.53772444", "0.53713775", "0.5362769", "0.53599936", "0.5359533", "0.5347565", "0.5347296", "0.5346872", "0.5346657", "0.5344091", "0.5340528", "0.53389794", "0.53349596" ]
0.59576935
18
In the initialization of the blanc canvas the number of rows and columns can be defined, finally the canvas is placed into the parent frame.
def __init__(self, parent, rows=1, columns=1, set_grid_propagate=False, *args, **kwargs): super().__init__(parent, highlightthickness=0, *args, **kwargs) self.grid_propagate(set_grid_propagate) self.set_num_of_rows(rows) self.set_num_of_columns(columns) self.grid(sticky="NESW")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup_canvas(self):\n # create frame to contain canvas\n self.world_container = tk.Frame(self,\n width = self.world_size[1],\n height = self.world_size[0])\n self.world_container.grid(row = 1, column = 0, sticky = tk.W+tk.N)\n\n # create canvas\n self.canvas = tk.Canvas(\n self.world_container,\n width = self.world_size[1],\n height = self.world_size[0],\n borderwidth = 1,\n highlightthickness = 0)\n self.canvas.grid(row = 0, column = 0, sticky = tk.W)\n self.canvas.bind('<Button-1>', self.click_cell)", "def _configure_canvas(event):\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def __init__(self, frame, width, height):\n \n self.canvas = Tkinter.Canvas(frame, width = int(width), \n height = int(height))\n self.canvas.pack(side = CANVAS[\"POSITION\"])\n self.canvas.configure(background = check_color(CANVAS[\"BACKGROUND_COLOR\"]))", "def __init__(self, width, height, pixelsPerCell = 10, title = \"Ants\"):\n from tkinter import Tk, Canvas, Toplevel\n self.width = width\n self.height = height\n self.color = [\"white\", \"black\", \"red\", \"yellow\", \"blue\", \"green\", \"purple\", \"pink\", \"cyan\", \"turquoise\", \"gray\"]\n self.board = [[0 for x in range(self.width)] for y in range(self.height)]\n self.box = [[0 for x in range(self.width)] for y in range(self.height)]\n self.pixelsPerCell = pixelsPerCell\n self.title = title\n self.app = Tk()\n self.app.withdraw()\n self.win = Toplevel()\n self.win.wm_title(title)\n self.canvas = Canvas(self.win,\n width=(self.width * pixelsPerCell),\n height=(self.height * pixelsPerCell))\n self.canvas.pack(side = 'bottom', expand = \"yes\", anchor = \"n\",\n fill = 'both')\n self.win.winfo_toplevel().protocol('WM_DELETE_WINDOW',self.close)\n #self.canvas.bind(\"<Configure>\", self.changeSize)\n self.draw()", "def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)", "def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)", "def create_canvas(self):\n self.canvas = tk.Canvas(\n self,\n bd=-2,\n height=self.controller.GAME_HEIGHT,\n width=self.controller.GAME_WIDTH)\n self.canvas.pack(expand=tk.YES, fill=tk.BOTH)", "def init_canvas_frame(self, max_width=4000, max_height=4000):\n self.frames[\"canvas\"] = Frame(\n master=self.window, width=400, height=400)\n self.canvas = Canvas(\n master=self.frames[\"canvas\"],\n scrollregion=(0, 0, max_width, max_height),\n bg=\"white\")\n h_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=HORIZONTAL)\n h_scrl_bar.pack(side=BOTTOM, fill=X)\n h_scrl_bar.config(command=self.canvas.xview)\n v_scrl_bar = Scrollbar(self.frames[\"canvas\"], orient=VERTICAL)\n v_scrl_bar.pack(side=RIGHT, fill=Y)\n v_scrl_bar.config(command=self.canvas.yview)\n self.canvas.config(\n xscrollcommand=h_scrl_bar.set,\n yscrollcommand=v_scrl_bar.set)\n self.canvas.pack(side=LEFT, expand=True, fill=BOTH)\n self.frames[\"canvas\"].pack(\n anchor=\"nw\", side=LEFT, expand=True, fill=BOTH)\n\n self.canvas.bind(\"<ButtonPress-1>\", self.move_start)\n self.canvas.bind(\"<B1-Motion>\", self.move_move)\n self.canvas.bind(\"<Button-4>\", self.linux_zoomer_plus)\n self.canvas.bind(\"<Button-5>\", self.linux_zoomer_minus)\n # windows scroll\n self.canvas.bind(\"<MouseWheel>\", self.windows_zoomer)", "def configure_canvas(self):\r\n self.window.update_idletasks() # this updates window size\r\n\r\n border = 10\r\n self.canvas.config(\r\n width=self.window.winfo_reqwidth() + border,\r\n height=min(350, self.window.winfo_reqheight() + border,))\r\n self.canvas.configure(scrollregion=(\r\n 0, 0,\r\n self.window.winfo_reqwidth() + border,\r\n self.window.winfo_reqheight() + border))", "def init_board(self) -> None:\n\t\tself.canvas.create_rectangle(0, 0, self.canvas_width, self.canvas_height, fill=self.color_background)\n\t\tfor x in range(0, self.canvas_width, self.canvas_width//self.board_size):\n\t\t\tself.canvas.create_line(x, 0, x, self.canvas_width, fill=self.color_tile_border)\n\n\t\tfor y in range(0, self.canvas_width+1, self.canvas_height//self.board_size):\n\t\t\tself.canvas.create_line(0, y, self.canvas_height, y, fill=self.color_tile_border)\n\n\t\tself.text_area.delete('0.1', '2.1')", "def draw_canvas(self):\n\n self.canvas = Canvas(self)\n self.scrollbar = ttk.Scrollbar(self, orient= VERTICAL,\n command=self.canvas.yview) \n self.canvas.configure(yscrollcommand=self.scrollbar.set)\n \n # make sure to add scrollbar before adding the canvas\n self.scrollbar.pack(side=RIGHT, fill=Y)\n self.canvas.pack(side=TOP, fill=BOTH, expand=1, padx=20, pady=20)\n \n # adding a frame to hold all the widgets, ttk Frame doesn't support\n # background config option \n self.frame = Frame(self.canvas) \n self.canvas.create_window(0,0,window=self.frame, anchor='nw')", "def _configure_interior(event):\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())", "def __createWidgets(self):\n # Widget canvas, used to draw rubik's cube\n self.cv = Canvas(self.master)\n self.cv['bg'] = 'white' # Background color\n self.cv['height'] = '440' # Height of canvas\n self.cv['width'] = '560' # Width of canvas\n self.cv.place(x=0, y=0)\n self.__drawCube()", "def initGame(self, parent):\n\n\t\tself.resetBoard()\n\n\t\tself.parent = parent\n\t\twidth = self.boardSize * self.squareSize\n\t\theight = self.boardSize * self.squareSize\n\t\ttk.Frame.__init__(self, self.parent)\n\n\t\tself.canvas = tk.Canvas(self, borderwidth=0, highlightthickness=0, width=width, height=height)\n\t\tself.canvas.pack()\n\t\tself.canvas.bind('<Button-1>', self.click)\n\t\tself.parent.title(\"TicTacToe\")\n\n\t\tfor row in range(self.boardSize):\n\t\t\tfor col in range(self.boardSize):\n\t\t\t\tx1 = (col * self.squareSize)\n\t\t\t\ty1 = (row * self.squareSize)\n\t\t\t\tx2 = x1 + self.squareSize\n\t\t\t\ty2 = y1 + self.squareSize\n\t\t\t\tself.canvas.create_rectangle(x1, y1, x2, y2, outline=\"black\", tags='square')", "def __init__(self, parent, *args, **kwargs):\n tk.LabelFrame.__init__(self, parent, *args, **kwargs)\n self.canvas = MainCanvas(self, bg=\"orange\")\n self.canvas.pack(side='top', fill='both', expand=True)", "def initialize(self, frame):\n self.grid_size = 5\n\n Label(frame, text=\"Grid Size:\").grid(row=0)\n\n self.e1 = Scale(frame, from_=self.grid_size, to=25, orient=HORIZONTAL)\n self.e1.grid(row=0, column=1)\n\n return self.e1", "def create_board_canvas(master: Widget) -> None:\r\n\r\n self.canvas = Canvas(master, bg='black')\r\n self.canvas.bind('<Configure>', self.on_canvas_resize)\r\n self.canvas.bind(\"<B1-Motion>\", self.on_canvas_click)\r\n self.canvas.bind(\"<Button-1>\", self.on_canvas_click)\r\n self.canvas.bind(\"<ButtonRelease-1>\", self.on_canvas_mouse_release)\r\n self.canvas.pack(fill=BOTH, expand = TRUE)", "def __init__(self):\n self.master = Tk()\n self.master.title(\"Brick Breaker\")\n self.master.geometry(\"800x600\")\n self.master.minsize(800, 600)\n self.master.iconbitmap(\"data/wall.ico\")\n self.master.config(background=\"lightblue\")\n self.frame = Frame(self.master, bg='lightblue')\n self.littleFrame = Frame(self.frame, bg='lightblue')\n\n # creation des composants\n self.create_title()\n self.create_play_button()\n self.create_quit_button()\n\n # empaquetage\n self.littleFrame.pack(expand=YES, pady=100)\n self.frame.pack(expand=YES)", "def _initialize_widgets(self):\n self.outer_board = [[Frame(self.root, bd = self.FRAME_BORDER_WIDTH, \n relief = self.FRAME_RELIEF) \n for _ in range(self.BOARD_DIM)] \n for _ in range(self.BOARD_DIM)]\n self.inner_boards = [[self._generate_inner_board(r, c) \n for c in range(self.BOARD_DIM)]\n for r in range(self.BOARD_DIM)]", "def setUI(self):\n self.parent.title(\"Handwritten digits classification\")\n self.pack(fill=BOTH, expand=1)\n self.columnconfigure(6,weight=1)\n self.rowconfigure(2, weight=1)\n self.canv = Canvas(self, bg=\"white\")\n self.canv.grid(row=2, column=0, columnspan=7,\n padx=5, pady=5,\n sticky=E + W + S + N)\n self.canv.bind(\"<B1-Motion>\",\n self.draw)\n\t\t\t\n\t\t\t\n #size_lab = Label(self, text=\"Classificator: \")\n #size_lab.grid(row=0, column=0, padx=5)\n predict_btn = Button(self, text=\"Predict\", width=10, command=lambda: self.predict())\n predict_btn.grid(row=0, column=0)\n delete_btn = Button(self, text=\"Clear\", width=10, command=lambda: self.canv.delete(\"all\"))\n delete_btn.grid(row=1, column=0, sticky=W)", "def grid(self, **kw):\n self.__imframe.grid(**kw) # place CanvasImage widget on the grid\n self.__imframe.grid(sticky='nswe') # make frame container sticky\n self.__imframe.rowconfigure(0, weight=1) # make canvas expandable\n self.__imframe.columnconfigure(0, weight=1)", "def __init__(self, canvas, base, frame):\n Connector.__init__(self, canvas, frame, base)", "def initialize_plots(self):\r\n #============================Draw circuit canvas=================================#\r\n # Draw Canvas with hardcoded width 600 and adjustable height to circuit input\r\n ckt_max_x = 600\r\n ckt_max_y = (ckt_max_x*(self.rows))/self.cols\r\n scale_x = round(ckt_max_x / self.cols)\r\n scale_y = round(ckt_max_y / self.rows)\r\n self.canvasCirkt = tk.Canvas(self.master,width=ckt_max_x+scale_x,height=(ckt_max_y*2)+int(scale_y))\r\n self.canvasCirkt.grid(row=1,column=1,columnspan=4)\r\n\r\n # Draw border\r\n self.canvasCirkt.create_rectangle(1, 1, (ckt_max_x+2)/2, (ckt_max_y*2)+int(scale_y))\r\n self.canvasCirkt.create_rectangle(((ckt_max_x+2)/2)+scale_x, 1, ckt_max_x+scale_x, (ckt_max_y*2)+int(scale_y))\r\n \r\n # Draw cell rows and columns in two groups\r\n blockIndex=0\r\n for cut in range(int(scale_y), int(ckt_max_y*2), int(scale_y)*2):\r\n for cut2 in range(1, int(ckt_max_x), int(scale_x)):\r\n if (cut2>ckt_max_x/2):\r\n cut2+=scale_x\r\n # Coordinates for top and bottom points of rectangle\r\n points = (cut2, cut, cut2+scale_x-1, cut+scale_y)\r\n blockObj = partitionGUI.Block(self.canvasCirkt,points,blockIndex,self.rows,self.cols)\r\n blockIndex+=1\r\n if (cut2>ckt_max_x/2):\r\n self.blocksB.append(blockObj)\r\n else:\r\n self.blocksA.append(blockObj)\r\n \r\n \r\n #===================================Draw Plots================================#\r\n # Draw Figure for 2 subplots (Connections Graph and Cost Function) \r\n self.figure, self.axes = plt.subplots(2, facecolor=\"white\")\r\n self.figure.set_figwidth(4)\r\n self.axGraph = self.axes[0]\r\n self.axCost = self.axes[1]\r\n \r\n # Initial condition for connection Graph\r\n self.axGraph.set_visible(False)\r\n \r\n # Select Cost Plot as current Axis. Get lines to use for plot updates\r\n plt.sca(self.axCost) \r\n self.lines, = self.axCost.plot([],[])\r\n self.axCost.set_xlabel(\"Time\")\r\n self.axCost.set_title(\"Cost\")\r\n\r\n # Draw Cost function Plot\r\n self.canvasPlot = FigureCanvasTkAgg(self.figure, master=self.master)\r\n self.canvasPlot.get_tk_widget().grid(row=1,column=0)\r\n \r\n # Draw Tool Bar\r\n self.toolbarFrame = tk.Frame(self.master)\r\n self.toolbarFrame.grid(row=2,column=0,columnspan=3,sticky=\"W\")\r\n self.toolbarPlot = NavigationToolbar2TkAgg(self.canvasPlot,self.toolbarFrame)", "def create_board(self):\n canvas = tk.Canvas(master=self.panel_mid, width=530, height=550)\n canvas.configure(scrollregion=(self.offset_x, self.offset_y, 20, 20))\n\n # x1 y1 x2 y2\n for i in range(8):\n y = i * self.width\n for j in range(8):\n x = j * self.width\n if ((j + 1) % 2) == 0:\n if ((i + 1) % 2) == 0:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#fff\") # biela\n else:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#999\") # cierna\n else:\n if ((i + 1) % 2) == 1:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#fff\") # biela\n else:\n canvas.create_rectangle(x, y, x + self.width, y + self.width,\n outline=\"#808080\", fill=\"#999\") # cierna\n\n return canvas", "def config_frames(self):\n self.root.grid_rowconfigure(1, weight=1)\n self.root.grid_columnconfigure(1, weight=1)\n\n self.top_frame = tkinter.Frame(self.root, pady=1)\n self.top_frame.grid(row=0, columnspan=2, sticky='nsew')", "def __init__(self, parent, controller):\n super().__init__(parent, width=\"10m\")\n\n self.parent = parent\n self.controller = controller\n\n self.grid_propagate(0)\n\n self.rowconfigure(0, weight=1)\n self.rowconfigure(1, weight=20)\n self.rowconfigure(2, weight=20)\n self.columnconfigure(0, weight=1)\n self.columnconfigure(1, weight=1)\n\n if (self.controller.game.color_queried and\n self.controller.game.in_turn == 0):\n self.create_title()\n self.create_buttons()", "def __init__(self):\n self.window = Tk()\n self.window.title(\"Brick Breaker\")\n self.window.attributes(\"-fullscreen\", True)\n self.window.iconbitmap(\"data/wall.ico\")\n self.window.config(background=\"light blue\")\n\n # initialization des composants\n self.frame = Frame(self.window, bg='light blue')\n self.littleFrame = Frame(self.frame, bg='light blue')\n self.littleFrame_bis = LabelFrame(self.frame, bg='light blue', text=\"USER NAME\")\n\n # creation des composants\n self.create_title()\n self.create_subtitle()\n self.create_play_button()\n self.create_quit_button()\n\n # empaquetage\n self.littleFrame_bis.pack(expand=YES, pady=30)\n self.littleFrame.pack(expand=YES, pady=50)\n self.frame.pack(expand=YES, fill=BOTH, pady=200)", "def __init__(self, *args, **kwargs):\r\n\r\n # ============================================= INITIALISATION ========================================\r\n\r\n if args==():\r\n self.master = Tk();\r\n self.isFrame = False #Permet l'importation de la fenetre en tant que frame\r\n self.master.title(\"PAP screen id %s\" %(random.randint(0, 1000)))\r\n else: #ou en tant que standalone.\r\n self.isFrame = True\r\n Frame.__init__(self, *args, **kwargs)\r\n self.frame = self\r\n self.master = args[0]\r\n print(\"hello\", self.frame)\r\n\r\n # =============================================== BINDING =============================================\r\n\r\n # Ce Bind est primordial afin de pouvoir centrer continuelement l'image\r\n # et ne pas étendre le canvas vers un côté précis.\r\n\r\n self.master.bind(\"<Configure>\", self.__reconfig__)\r\n\r\n\r\n # =============================================== CANVAS ==============================================\r\n\r\n self.canvas = Canvas(self.getRoot(), bg=CANVAS_BACKGROUND)\r\n self.canvas.grid(sticky = W+E+N+S)\r\n\r\n self.getRoot().grid_rowconfigure(0, weight=1)\r\n self.getRoot().grid_columnconfigure(0, weight=1)\r\n\r\n # =============================================== FULLSCREEN ==============================================\r\n\r\n self.is_fullscreen = False\r\n self.master.bind(\"<F11>\", self.toogle_fullscreen)\r\n # ================================================ ID =================================================\r\n\r\n Screen.__count__ += 1\r\n self.id = \"Screen_\"+str(Screen.__count__)", "def __init__(self, parent, maze_width, maze_height, scale=20):\n self.parent = parent\n self.parent.title(\"Maze Exploration Visualization\")\n\n self.maze_width = maze_width\n self.maze_height = maze_height\n self.scale = scale\n\n # Compute actual width and height\n self.width = maze_width * scale\n self.height = maze_height * scale\n\n # Store tkinter object\n self.frame = tkinter.Frame(self.parent,\n width=self.width,\n height=self.height,\n highlightthickness=1,\n highlightbackground=\"black\")\n self.canvas = tkinter.Canvas(self.frame,\n width=self.width, \n height=self.height)\n self.canvas.pack(expand=False)\n self.frame.pack(expand=False)\n\n # Initialize look of grid\n self.draw_gray_grid()\n\n self.person = None\n self.draw_person(self.maze_width // 2, self.maze_height // 2)", "def setupFrame(self, frame_width, frame_height):\n x, y = 0.0, 0.4\n self.x0 = int(frame_width*x)\n self.y0 = int(frame_height*y)\n self.width = 260\n self.height = 260", "def __init__(self, master, grid_size, board_width=600, *args, **kwargs):\n\n super().__init__(master)\n self._master = master\n self._grid_size = grid_size\n self._board_width = board_width\n self._board = None\n\n self.config(height=board_width, width=board_width)", "def build(self):\n with self.set_master(sticky=\"nsew\", row_weights=[1], column_weights=[0, 1], auto_columns=0):\n self.build_category_canvas()\n with self.set_master(sticky=\"nsew\", row_weights=[0, 1, 0], column_weights=[1, 1]):\n self.build_previous_range_button(row=0, column=0)\n self.build_hidden_fields_checkbutton(row=0, column=1)\n with self.set_master(sticky=\"nsew\", row=1, column=0, row_weights=[1], column_weights=[1]):\n self.build_entry_frame()\n with self.set_master(sticky=\"nsew\", row=1, column=1, row_weights=[1], column_weights=[1]):\n self.build_field_frame()\n self.build_next_range_button(row=2, column=0)", "def set_canvas_size(self, width_npix, height_npix):\n\n self.variables.canvas_width = width_npix\n self.variables.canvas_height = height_npix\n if self.variables.canvas_image_object is not None:\n self.variables.canvas_image_object.canvas_nx = width_npix\n self.variables.canvas_image_object.canvas_ny = height_npix\n self.config(width=width_npix, height=height_npix)", "def __init__(self,master,**kw):\n Frame.__init__(self,master,**kw)\n \n self.canvas=Canvas(self,scrollregion=(0,0,500,500))#,width=300,height=300,scrollregion=(0,0,500,500))\n self.internal_frame=Frame(self.canvas)\n self.hbar=Scrollbar(self,orient=HORIZONTAL)\n self.vbar=Scrollbar(self,orient=VERTICAL)\n\n interior_id=self.canvas.create_window((0,0),window=self.internal_frame,anchor=\"nw\")\n\n \n self.hbar.pack(side=BOTTOM,fill=X)\n self.hbar.config(command=self.canvas.xview)\n \n \n self.vbar.pack(side=RIGHT,fill=Y)\n self.vbar.config(command=self.canvas.yview)\n \n## self.canvas.config(width=300,height=300)\n self.canvas.config(xscrollcommand=self.hbar.set, yscrollcommand=self.vbar.set)\n self.canvas.bind_all(\"<MouseWheel>\",lambda x:self.on_mouse_wheel(x,self.canvas))\n self.canvas.pack(side=LEFT,expand=True,fill=BOTH)\n\n def _configure_interior(event):\n \"\"\"\n Figures out how big the interior frame needs to be\n \"\"\"\n # update the scrollbars to match the size of the inner frame\n size = (self.internal_frame.winfo_reqwidth(), self.internal_frame.winfo_reqheight())\n self.canvas.config(scrollregion=\"0 0 %s %s\" % size)\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n # update the canvas's width to fit the inner frame\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the canvas's width to fit the inner frame\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.internal_frame.bind('<Configure>', _configure_interior)\n\n def _configure_canvas(event):\n \"\"\"\n Figures out how bid the interior canvas needs to be\n \"\"\"\n if self.internal_frame.winfo_reqwidth() != self.canvas.winfo_width():\n## print \"frame\",self.internal_frame.winfo_reqwidth()\n## print \"canvas\",self.canvas.winfo_width()\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(width=self.internal_frame.winfo_reqwidth())\n if self.internal_frame.winfo_reqheight() != self.canvas.winfo_height():\n # update the inner frame's width to fill the canvas\n## self.canvas.itemconfigure(interior_id, width=self.canvas.winfo_width())\n self.canvas.config(height=self.internal_frame.winfo_reqheight())\n self.canvas.bind('<Configure>', _configure_canvas)", "def load(self, width=DEFAULT_PUZZLE_WIDTH, height=DEFAULT_PUZZLE_HEIGHT):\n self.frame.grid_configure(row=1, column=0, padx=PAD, pady=0)\n # Crossword clue\n self.clue_label.config(**settings.get(\"style:clue\"))\n self.clue_label.grid(row=0, sticky=tk.W)\n # Game timer\n self.time_label.config(**settings.get(\"style:time\"))\n self.time_label.grid(row=0, padx=TINY_PAD+1, sticky=tk.E)\n # Game canvas\n canvas_width = settings.get(\"board:cell-size\")*width + CANVAS_SPARE\n canvas_height = settings.get(\"board:cell-size\")*height + CANVAS_SPARE\n border_fill = settings.get(\"style:border:fill\")\n self.canvas.config(width=canvas_width, height=canvas_height, highlightthickness=0)\n self.canvas.grid(row=1, pady=PAD, padx=(PAD-CANVAS_PAD, 0))\n self.canvas.create_rectangle(0, 0, canvas_width-CANVAS_SPARE, canvas_height-CANVAS_SPARE, outline=border_fill)", "def setup(self):\n\n # push the frame for the toplevel window\n self.lumpy.pushfr(self.tl)\n self.lumpy.col([0,1])\n\n # the frame at the top contains buttons\n self.lumpy.row([0,0,1], bg='white')\n self.lumpy.bu(text='Close', command=self.close)\n self.lumpy.bu(text='Print to file:', command=self.printfile)\n self.en = self.lumpy.en(width=10, text='lumpy.ps')\n self.en.bind('<Return>', self.printfile)\n self.la = self.lumpy.la(width=40)\n self.lumpy.endrow()\n\n # the grid contains the canvas and scrollbars\n self.lumpy.gr(2)\n \n self.ca_width = 1000\n self.ca_height = 500\n self.canvas = self.ca(self.ca_width, self.ca_height, bg='white')\n\n yb = self.lumpy.sb(command=self.canvas.yview, sticky=N+S)\n xb = self.lumpy.sb(command=self.canvas.xview, orient=HORIZONTAL,\n sticky=E+W)\n self.canvas.configure(xscrollcommand=xb.set, yscrollcommand=yb.set,\n scrollregion=(0, 0, 800, 800))\n \n self.lumpy.endgr()\n self.lumpy.endcol()\n self.lumpy.popfr()\n\n # measure some sample letters to get the text height\n # and set the scale factor for the canvas accordingly\n self.canvas.clear_transforms()\n bbox = self.canvas.measure(['bdfhklgjpqy'])\n self.unit = 1.0 * bbox.height()\n transform = ScaleTransform([self.unit, self.unit])\n self.canvas.add_transform(transform)", "def setUpFrame(self):\n #adds labels to the Board\n self.mineLabel = tk.Label(self, text=\"Mines: \"+str(self.numMines))\n self.mineLabel.grid(row=0, column=0, sticky=\"W\", columnspan=int((self.cols-2)/2))\n self.smileButton = tk.Label(self, image=self.images[1])\n self.smileButton.grid(row=0, column=int((self.cols-2)/2), sticky=\"WE\", columnspan=2)\n self.flagLabel = tk.Label(self, text=\"Flags: \"+str(self.numFlags))\n self.flagLabel.grid(row=0, column=int((self.cols-2)/2)+2, sticky=\"E\", columnspan=int((self.cols-1)/2))\n\n #left click listeners on smileButton\n self.smileButton.bind('<ButtonPress-1>', lambda event, num=0: self.changeSmile(num))\n self.smileButton.bind('<ButtonRelease-1>', self.replay)", "def __init__(self):\n self.root = Tk()\n self.root.title(\"Brick Breaker\")\n self.root.geometry(\"800x600\")\n self.root.maxsize(800, 600)\n self.root.minsize(800, 600)\n self.root.iconbitmap(\"data/wall.ico\")\n self.root.config(background=\"#000000\")\n self.score = 0\n self.life = 3\n self.canevas = Canvas(self.root, bg='light blue', highlightthickness=0)\n self.paddle = Paddle(self)\n self.ball = Ball(self)\n self.brick = Brick(self)\n self.create_score()\n self.window = Window\n self.end = False\n self.canevas.pack(fill=BOTH, expand=YES)", "def __init__(self,master, grid_size, board_width=600, *args, **kwargs):\n super().__init__(master,grid_size, board_width=600, *args, **kwargs)", "def __init__(self):\n super().__init__()\n # self.left =\n self.csp = CSP([500, 200, 100], 500)\n self.ga = Trainer([500, 200, 100], 4, 5, 500)\n self.master.title(\"Yamb\")\n Style().configure(\"TButton\", padding=(10, 10, 10, 10),\n font='serif 20')\n\n # left columns\n self.columnconfigure(0, pad=3) # header (1 to 6, min, max, straight ...)\n self.columnconfigure(1, pad=3) # top2bot\n self.columnconfigure(2, pad=3) # free\n self.columnconfigure(3, pad=3) # bot2top\n self.columnconfigure(4, pad=3) # first\n self.columnconfigure(5, pad=3) # top&bot\n self.columnconfigure(6, pad=3) # middle\n\n self.columnconfigure(7, pad=10) # padding\n\n # right columns\n self.columnconfigure(8, pad=3) # header (1 to 6, min, max, straight ...)\n self.columnconfigure(9, pad=3) # top2bot\n self.columnconfigure(10, pad=3) # free\n self.columnconfigure(11, pad=3) # bot2top\n self.columnconfigure(12, pad=3) # first\n self.columnconfigure(13, pad=3) # top&bot\n self.columnconfigure(14, pad=3) # middle\n\n # rows\n self.rowconfigure(0, pad=3) # headers (top2bot, free, bot2top, first, top&bot, middle)\n self.rowconfigure(1, pad=3)\n self.rowconfigure(2, pad=3)\n self.rowconfigure(3, pad=3)\n self.rowconfigure(4, pad=3)\n self.rowconfigure(5, pad=3)\n self.rowconfigure(6, pad=3)\n self.rowconfigure(7, pad=3) # min\n self.rowconfigure(8, pad=3) # max\n self.rowconfigure(9, pad=3) # straight\n self.rowconfigure(10, pad=3) # threes\n self.rowconfigure(11, pad=3) # full\n self.rowconfigure(12, pad=3) # fours\n self.rowconfigure(13, pad=3) # yamb\n\n self.rowconfigure(14, pad=3) # score\n\n\n # init headers\n Label(self, text=\"∇\", borderwidth=3, relief=\"solid\").grid(row=0, column=1)\n Label(self, text=\"∇Δ\", borderwidth=3, relief=\"solid\").grid(row=0, column=2)\n Label(self, text=\"Δ\", borderwidth=3, relief=\"solid\").grid(row=0, column=3)\n Label(self, text=\"R\", borderwidth=3, relief=\"solid\").grid(row=0, column=4)\n Label(self, text=\"↕\", borderwidth=3, relief=\"solid\").grid(row=0, column=5)\n Label(self, text=\"↔\", borderwidth=3, relief=\"solid\").grid(row=0, column=6)\n\n Label(self, text=\" \", borderwidth=3, relief=\"flat\").grid(row=0, column=7)\n\n Label(self, text=\"∇\", borderwidth=3, relief=\"solid\").grid(row=0, column=9)\n Label(self, text=\"∇Δ\", borderwidth=3, relief=\"solid\").grid(row=0, column=10)\n Label(self, text=\"Δ\", borderwidth=3, relief=\"solid\").grid(row=0, column=11)\n Label(self, text=\"R\", borderwidth=3, relief=\"solid\").grid(row=0, column=12)\n Label(self, text=\"↕\", borderwidth=3, relief=\"solid\").grid(row=0, column=13)\n Label(self, text=\"↔\", borderwidth=3, relief=\"solid\").grid(row=0, column=14)\n\n Label(self, text=\"1\", borderwidth=3, relief=\"solid\").grid(row=1, column=0)\n Label(self, text=\"2\", borderwidth=3, relief=\"solid\").grid(row=2, column=0)\n Label(self, text=\"3\", borderwidth=3, relief=\"solid\").grid(row=3, column=0)\n Label(self, text=\"4\", borderwidth=3, relief=\"solid\").grid(row=4, column=0)\n Label(self, text=\"5\", borderwidth=3, relief=\"solid\").grid(row=5, column=0)\n Label(self, text=\"6\", borderwidth=3, relief=\"solid\").grid(row=6, column=0)\n Label(self, text=\"min\", borderwidth=3, relief=\"solid\").grid(row=7, column=0)\n Label(self, text=\"max\", borderwidth=3, relief=\"solid\").grid(row=8, column=0)\n Label(self, text=\"kenta\", borderwidth=3, relief=\"solid\").grid(row=9, column=0)\n Label(self, text=\"triling\", borderwidth=3, relief=\"solid\").grid(row=10, column=0)\n Label(self, text=\"ful\", borderwidth=3, relief=\"solid\").grid(row=11, column=0)\n Label(self, text=\"kare\", borderwidth=3, relief=\"solid\").grid(row=12, column=0)\n Label(self, text=\"yamb\", borderwidth=3, relief=\"solid\").grid(row=13, column=0)\n\n Label(self, text=\"Score: \", borderwidth=3, relief=\"solid\").grid(row=14, column=0)\n\n Label(self, text=\"1\", borderwidth=3, relief=\"solid\").grid(row=1, column=8)\n Label(self, text=\"2\", borderwidth=3, relief=\"solid\").grid(row=2, column=8)\n Label(self, text=\"3\", borderwidth=3, relief=\"solid\").grid(row=3, column=8)\n Label(self, text=\"4\", borderwidth=3, relief=\"solid\").grid(row=4, column=8)\n Label(self, text=\"5\", borderwidth=3, relief=\"solid\").grid(row=5, column=8)\n Label(self, text=\"6\", borderwidth=3, relief=\"solid\").grid(row=6, column=8)\n Label(self, text=\"min\", borderwidth=3, relief=\"solid\").grid(row=7, column=8)\n Label(self, text=\"max\", borderwidth=3, relief=\"solid\").grid(row=8, column=8)\n Label(self, text=\"kenta\", borderwidth=3, relief=\"solid\").grid(row=9, column=8)\n Label(self, text=\"triling\", borderwidth=3, relief=\"solid\").grid(row=10, column=8)\n Label(self, text=\"ful\", borderwidth=3, relief=\"solid\").grid(row=11, column=8)\n Label(self, text=\"kare\", borderwidth=3, relief=\"solid\").grid(row=12, column=8)\n Label(self, text=\"yamb\", borderwidth=3, relief=\"solid\").grid(row=13, column=8)\n\n Label(self, text=\"Score: \", borderwidth=3, relief=\"solid\").grid(row=14, column=8)\n\n\n\n # init zeroes\n for column_index in range(1, 15):\n if column_index == 8 or column_index == 7: # header\n continue\n for row_index in range(1, 14):\n Label(self, text=\"0\", borderwidth=3, relief=\"flat\").grid(row=row_index, column=column_index)\n self.pack()", "def __init__(self, is_debug):\n super(Board, self).__init__(BOARD_SIZE, 6.5, is_debug=is_debug)\n self.outline = pygame.Rect(GRID_SIZE+5, GRID_SIZE+5, DRAW_BOARD_SIZE[0]-GRID_SIZE*2, DRAW_BOARD_SIZE[1]-GRID_SIZE*2)\n self.draw_board()", "def _init_frame(self : \"animation\",\n init_frame : \"matplotlib.figure.Figure\",\n init_ax : \"matplotlib.axes._subplots.AxesSubplot\"\n ):\n self._cframe = init_frame.canvas.copy_from_bbox(init_ax.bbox)", "def __init__(self):\n Frame.__init__(self)\n self.master.title(\"Canvas Demo\")\n self.grid()\n\n # create a canvas and place in this frame\n self.canvas = Canvas(self, width = 200, height = 100, \n bg = \"white\")\n self.canvas.grid(row = 0, column = 0)\n\n # Place buttons in a frame\n frame = Frame(self)\n frame.grid(row = 1, column = 0)\n rectangle = Button(frame, text = \"Rectangle\", \n command = self.displayRect)\n oval = Button(frame, text = \"Oval\", \n command = self.displayOval)\n arc = Button(frame, text = \"Arc\", \n command = self.displayArc)\n polygon = Button(frame, text = \"Polygon\", \n command = self.displayPolygon)\n line = Button(frame, text = \"Line\", \n command = self.displayLine)\n string = Button(frame, text = \"String\", \n command = self.displayString)\n clear = Button(frame, text = \"Clear\", \n command = self.clearCanvas)\n\n rectangle.grid(row = 0, column = 0)\n oval.grid(row = 0, column = 1)\n arc.grid(row = 0, column = 2)\n polygon.grid(row = 0, column = 3)\n line.grid(row = 0, column = 4)\n string.grid(row = 0, column = 5)\n clear.grid(row = 0, column = 6)", "def __init__(self, grid_cols: int = 8, **attrs: Any) -> None:\n\n super().__init__(**attrs)\n\n self.grid_cols = grid_cols\n self.forced_width = self.grid_cols * 4 - 1 + self.sidelength\n self.width = self.forced_width\n\n self._layer_functions = [foreground, background]\n\n self.layer = 0", "def initialize(self):\n\n super(RectTab,self).initialize()\n # special tkinter variables that will be changed with the scales\n self.width = tk.IntVar()\n self.height = tk.IntVar()\n\n # make width scale\n self.widthScale = tk.Scale(self, from_=1, to=5, orient=tk.HORIZONTAL,\n label='Width', resolution=1, variable=self.width,\n command=self.updateSize)\n self.widthScale.grid(column=2, row=6, columnspan=1, sticky='W' + 'E')\n self.widthScale.set(2)\n\n # make height scale\n self.heightScale = tk.Scale(self, from_=1, to=5, orient=tk.HORIZONTAL,\n label='Height', resolution=1, variable=self.height,\n command=self.updateSize)\n self.heightScale.grid(column=2, row=7, columnspan=1, sticky='W' + 'E')\n self.heightScale.set(2)", "def draw_board(self) -> None:\n # -> establishment of new dimensions for the canvas :\n side_size = self.side_size\n wide, high = side_size * self.n_col, side_size * self.n_row\n self.can.configure(width=wide, height=high)\n # Layout of the grid:\n self.can.delete(tkinter.ALL) # erasing of the past Layouts\n s = side_size\n for _ in range(self.n_row - 1): # horizontal lines\n self.can.create_line(0, s, wide, s, fill=\"white\")\n s += side_size\n s = side_size\n for _ in range(self.n_col - 1): # vertical lines\n self.can.create_line(s, 0, s, high, fill=\"white\")\n s += side_size\n # Layout of all the pawns,\n # white or black according to the state of the game :\n for row in range(self.n_row):\n for col in range(self.n_col):\n x1 = col * side_size + 3 # size of pawns =\n x2 = (col + 1) * side_size - 3 # size of the case - 10\n y1 = row * side_size + 3 #\n y2 = (row + 1) * side_size - 3\n color = self.color(row, col)\n self.can.create_oval(x1, y1, x2, y2, outline=\"grey\",\n width=1, fill=color)", "def __init__(self, *args, **kwargs):\r\n\r\n tk.Tk.__init__(self, *args, **kwargs)\r\n\r\n self.title(TITLE)\r\n self.geometry(f\"{WIDTH}x{HEIGHT}\")\r\n self.config(background=\"pale turquoise\")\r\n\r\n self.scroll_frame = VerticalScrolledFrame(self)\r\n self.scroll_frame.grid(column=1, row=3)\r\n\r\n self.place_widgets()", "def __init__(self, target=None, height=0, width=0):\n\t\ttkinter.Canvas.__init__(self, target, height=height, width=width)\n\t\tself.Track_Record = Track.Track_Record()\n\t\tself.draw_points() #draw points on canvas\n\t\tself.draw_canvas() #draw grids on canvas", "def __init__(self, master=None):\n super().__init__(master)\n self.masterframe = Frame(self.master)\n self.masterframe.pack()\n self.grid = Grid(3)\n self.canvasSize = 100\n self.mX = 0\n self.mY = 0\n self.working = True\n # True is Player 1 and False is Player 2\n self.turn = True\n self.createCanvas()", "def addCanvases(self):\n self.canvasGrid = Grid(self.matrix.getHeight(),\n self.matrix.getWidth())\n for row in range(self.matrix.getHeight()):\n for column in range(self.matrix.getWidth()):\n canvas = MatrixCanvas(self, 20, 20,\n self.matrix[row][column])\n self.canvasGrid[row][column] = canvas\n self.matrixPanel.addCanvas(canvas = canvas,\n row = row,\n column = column)", "def init_frame(self):\n self._exit_button.grid(row=0, column=2, sticky=tk.W)\n self._clear_button.grid(row=0, column=0, sticky=tk.E)\n # self._copy_button.grid(row=0, column=1, sticky=(tk.W, tk.W))\n return None", "def __createLayout(self):\r\n self.__createCanvas()\r\n self.__createButton()\r\n self.__createInputFunction()\r\n self.__createLimits()\r\n self.__styleLayout()", "def __init__(self, size, class_to_use, master, row, column, report=None):\n try:\n master.master.geometry(size)\n except AttributeError:\n pass\n self.window = class_to_use(master=master, borderwidth=0, relief=tk.GROOVE)\n self.window.grid(row=row, column=column, padx=10, pady=20)", "def __init__(self):\n super().__init__()\n self.geometry('{}x{}'.format(425, 185))\n self.title('PAD Tracker')\n self.frame = Frame(self)\n self.populateFields()\n self.frame.pack()", "def __init__(self, row=4, col=4, initial=2):\n self.grid = Grid(row, col, initial)", "def __init__(self, canvas_size: Tuple[int, int, int] = DEFAULT_CANVAS_SIZE):\n\n if not hasattr(self, 'logger'):\n self.logger = get_logger(__class__.__name__)\n\n self.blank_canvas: ndarray = np.ones(canvas_size)\n self.empty_array: ndarray = np.zeros(canvas_size)\n self.empty: Image = Image.fromarray(self.empty_array.astype(np.uint8))\n\n self.COLRS: Colours = Colours(blank_canvas=self.blank_canvas, empty_array=self.empty_array)\n self.SPRITES: Sprites = Sprites(empty=self.empty)\n\n self.sprite_mappers = SpriteMapperContainer(\n sprite_mappers=[\n SpriteMapperVegetation(fallback_sprite=self.empty)\n ]\n )\n\n # Crop values\n # self.crop = [(0,0,300,225), (300,0,600,225), (600,0,900,225)]", "def create_widgets(self):\n self.pack(fill=tk.BOTH, expand=True)\n self.top_frame = tk.Frame(self)\n self.top_frame.pack(fill=tk.X, expand=False)\n\n # Create obstacle button\n self.create_obstacle_button = tk.Button(\n self.top_frame,\n text=self.OBSTACLE_CREATION_INACTIVE_LABEL,\n command=self._toggle_creation_mode_cb\n )\n self.create_obstacle_button.pack(side=tk.LEFT)\n\n # Load button\n self.load_button = tk.Button(\n self.top_frame,\n text=self.LOAD_BUTTON_LABEL,\n command=self._load_button_cb\n )\n self.load_button.pack(side=tk.LEFT)\n\n # Export button\n export_button = tk.Button(\n self.top_frame,\n text=self.EXPORT_BUTTON_LABEL,\n command=self._export_button_cb\n )\n export_button.pack(side=tk.RIGHT)\n\n # Main canvas\n self.canvas = tk.Canvas(self, background='white')\n self.canvas.config(width=self.CANVAS_WIDTH, height=self.CANVAS_HEIGHT)\n self.canvas.bind('<ButtonRelease-1>', self._draw_line)\n self.canvas.pack(fill=tk.BOTH, expand=True)\n self.canvas.focus_set()", "def __init__(self, parent: View):\n super().__init__(parent)\n # Crossword clue\n self.clue = tk.StringVar(self.root)\n self.clue_label = tk.Label(self.frame, textvariable=self.clue)\n # Game timer\n self.time = tk.StringVar(self.root)\n self.time_label = tk.Label(self.frame, textvariable=self.time)\n # Game canvas\n self.canvas = tk.Canvas(self.frame)\n # Cells\n self.cells = None\n # Load\n self.load()", "def set_ui(self):\r\n\r\n self.canvas = tk.Canvas(self)\r\n self.canvas.pack()\r\n\r\n self.entry = ttk.Entry(self.canvas, justify=\"center\", font=(\"Calibri\", 12))\r\n\r\n self.grid = Grid(self.canvas)", "def make_grid(self):\n length = self.size / 8\n # draw horizontal lines\n for y in range(0, self.size, length):\n self.window.create_line(0, y, self.size, y, fill = \"blue\")\n \n # draw vertical lines\n for x in range(0, self.size, length):\n self.window.create_line(x, 0, x, self.size, fill = \"blue\")\n\n # draw the axes red\n self.window.create_line(\n 0,\n self.size / 2,\n self.size, \n self.size / 2, \n fill = \"red\"\n )\n self.window.create_line(\n self.size / 2, 0,\n self.size / 2, \n self.size, \n fill = \"red\"\n )\n print(\"Grid Made.\")", "def __init__(self, master, width, height):\n super().__init__(master)\n self._width = width\n self._height = height\n # build a black background frame in master\n self._background = tk.Frame(master, bg='black', width=width, height=height)\n self._background.pack(side=tk.BOTTOM, fill=tk.X, expand=1)\n # build a score bar in background frame\n self._score_bar = tk.Label(self._background)\n self._score_bar.pack(side=tk.BOTTOM, fill=tk.X)\n # build a health canvas in background frame\n self._health_bar = tk.Canvas(self._background, height=20, bd=0)\n self._health_bar.pack(side=tk.BOTTOM, anchor=tk.W)", "def __init__ (self, cols = 6, rows = 7, requiredToWin = 4):\r\n\t\tself.cols = cols\r\n\t\tself.rows = rows\r\n\t\tself.win = requiredToWin\r\n\t\tself.board = [[NONE] * rows for _ in range(cols)]", "def __init__(self, data_frame, parent=None):\n if data_frame is None:\n self.generate_data()\n else:\n self.data = data_frame\n self.fig = Figure()\n # TODO change to dynamic ylim\n self.axes = self.fig.add_subplot(111, ylim=(0.0, 200.0))\n self.axes.hold(False)\n FigureCanvas.__init__(self, self.fig)\n self.plot(data_frame)", "def __init__( self, width = 128, height = 128, *args, **kwargs ):\n ### ZIH - works with old Tk, not with ttk\n # part of this might be because ttk doesn't use fg/bg?\n #ttk.Frame.__init__( self, *args, **kwargs )\n tk.Tk.__init__( self, *args, **kwargs )\n self.config( padx = 0, pady = 0 )\n self.canvas = tk.Canvas(\n self,\n width = width,\n height = height,\n bg = 'black'\n )\n self.canvas.pack( ipadx = 0, ipady = 0, padx = 0, pady = 0 )\n self.raster = tk.PhotoImage( width = width, height = height )\n self.canvas.create_image(\n ### ZIH - why do i need this 2 pixel offset to position the\n # image? there also seems to be a superfluous 2 pixel padding\n # around the canvas\n #( ( ( width >> 1 ) + 2 ), ( ( height >> 1 ) + 2 ) ),\n ( 2, 2 ),\n anchor = tk.NW,\n image = self.raster,\n #state = 'normal' ### see if tk.NORMAL works\n state = tk.NORMAL\n )", "def setUpGUI(self):\n WHITE = '#ffffff'\n # Set up the GUI so that we can paint the fractal image on the screen\n canvas = Canvas(self.window, width=self.width, height=self.height, bg=WHITE)\n canvas.pack()\n canvas.create_image((self.width/2, self.height/2), image=self.img, state=\"normal\")", "def __init__(self, parent):\r\n GLCanvas.__init__(self, parent,-1)\r\n self.init = 0\r\n\r\n\r\n self.Bind(wx.EVT_PAINT, self.OnPaint)\r\n self.Bind(wx.EVT_SIZE, self.OnSize)\r\n return", "def makeGraph(self):\n self.graphFrame = Frame(height=400, width=400, bd=10, bg='black')\n self.graphFrame.grid(row=1, column=0)", "def setup():\r\n #this happens just once\r\n size(width, height) #instead of create_canvas\r", "def __init__(self, canvas):\r\n\r\n # Initialize attributes\r\n self.canvas = canvas\r\n self.fig = canvas.fig\r\n self.units = None\r\n self.cb = None\r\n self.cb_bt = None\r\n self.cb_gga = None\r\n self.cb_vtg = None\r\n self.bt = None\r\n self.gga = None\r\n self.vtg = None\r\n self.hover_connection = None\r\n self.annot = None", "def __init__(self):\n super(GraphVisualizer, self).__init__()\n\n self._layout = QGridLayout()\n self.setLayout(self._layout)\n\n self._next_column = 0\n\n self._columns = []", "def __init__(self, width=7, height=6):\n self.width = width\n self.height = height\n self.board = self.createBoard()", "def init_new_board(self) -> None:\r\n\r\n TkState.enable(self.settings_menu.winfo_children())\r\n TkState.enable(self.edit_menu.winfo_children())\r\n TkState.enable([self.play_button, self.step_button])\r\n TkState.disable([self.reset_button])\r\n\r\n self.gen_number.config(text = 0)\r\n self.speed_scale.set(self.INITIAL_TIME_PER_GEN)\r\n self.zoom_scale.set(self.INITIAL_ZOOM)\r\n\r\n self.animator.board = self.anim_board\r\n self.painter.board = self.anim_board\r\n self.painter.adjust_to_canvas()", "def __init___(self, x, y, width, height):\n super(GRect, self).__init__()\n frameWidth = width\n frameHeight = height\n setLocation(x, y)", "def __init__(self):\n EasyFrame.__init__(self, title=\"Bouncy\")\n\n # Label and field for the initial height\n self.addLabel(text=\"Initial Height\",\n row=0, column=0)\n self.heightField = self.addFloatField(value=0.0,\n row=0, column=1)\n\n # Label and field for the bounciness index\n self.addLabel(text=\"Bounciness Index\",\n row=1, column=0)\n self.indexField = self.addFloatField(value=0.0,\n row=1, column=1)\n\n # Label and field for the number of bounces\n self.addLabel(text=\"Number of bounces\",\n row=2, column=0)\n self.bouncesField = self.addIntegerField(value=0,\n row=2, column=1)\n\n # The command button\n self.addButton(text=\"Compute distance\",\n row=3, column=0, columnspan=2,\n command=self.computeDistance)\n\n # Label and field for the distance traveled\n self.addLabel(text=\"Distance traveled\",\n row=4, column=0)\n self.distanceField = self.addFloatField(value=0,\n row=4, column=1)", "def __init__(self, tk, n=3):\n # things related to the number of cells\n self.n = n\n self.nDigits = n * n\n self.numCells = self.nDigits * self.nDigits\n size = self.boxSize * (self.nDigits +1)\n self.size = size\n #get a list of the 'legal' digits\n digList = self.digits[self.nDigits]\n self.digList = list(digList[1:len(digList)])\n\n self.gString = '{0}x{1}'.format(size+self.extraWidth,size)\n\n # set up the graphics\n self.tk = tk\n tk.title('Sudoku - Phil Martel')\n tk.geometry(self.gString)\n super(SudokuGame,self).__init__(tk)\n\n # set up the cells. Everything is on a canvas\n self.can = Canvas(tk, height=self.nDigits*self.boxSize+self.yOff,\n width=self.nDigits*self.boxSize+self.xOff+\n self.extraWidth, bg='light gray')\n self.can.grid(row=1,column=1)\n\n #draw outline\n for x in range(0,self.nDigits+1):\n if x % 3 == 0:\n wid = 3\n else:\n wid = 1\n s = self.boxSize # aliases\n yo = self.yOff\n xo = self.xOff\n xyMax = self.size -s\n \n self.can.create_line(0,x*s+yo,xyMax+xo,x*s+yo,fill='black',width=wid)\n self.can.create_line(x*s+xo,0,x*s+xo,xyMax+yo,fill='black',width=wid)\n\n #generate the cells. Each cell will have a entry widget attached\n # to the canvas\n for k in range(self.numCells):\n ( r, c) = divmod(k, self.nDigits)\n rr = r // self.n\n cc = c // self.n\n b = rr * self.n + cc\n # this checks that r,c, and b are good\n #print(k,r,c,b)\n self.cell.append(Cell(r,c,b,self.can,self))\n # add the solver\n self.solver = SudokuSolver(self)\n # add a menu\n self.menu = SudokuMenu(self)\n #add buttons\n self.restartButton = Button(tk,command = self.restart, text='Restart')\n self.can.create_window(xyMax+10,10,window=self.restartButton,\n anchor=NW) \n self.undoButton = Button(tk,command = self.undo, text='Undo')\n self.can.create_window(xyMax+10,s+10,window=self.undoButton,\n anchor=NW) \n self.checkButton = Button(tk,command = self.checkGame, text='Check')\n self.can.create_window(xyMax+10,2*s+10,window=self.checkButton,\n anchor=NW) \n self.optButton = Button(tk,command = self.printOptions, text='Options?')\n self.can.create_window(xyMax+10,3*s+10,window=self.optButton,\n anchor=NW) \n self.solveButton = Button(tk,command = self.solver.solve, text='Solve')\n self.can.create_window(xyMax+10,4*s+10,window=self.solveButton,\n anchor=NW) \n self.guessButton = Button(tk,command = self.solver.guessingSolve,\n text='Solve with guessing')\n self.can.create_window(xyMax+10,5*s+10,window=self.guessButton,\n anchor=NW) \n #clear board\n #self.clear()\n\n #set up exit actions\n self.top = self.can.winfo_toplevel()\n self.top.protocol(\"WM_DELETE_WINDOW\", self.__SaveOnClosing)\n pass", "def build_frames(self):\n self.cntrl_frame = tk.PanedWindow(self.root)\n self.cntrl_frame.pack(side = tk.TOP, padx = 1, pady = 1, fill = tk.Y)\n self.info_frame_1 = tk.PanedWindow(self.root)\n self.info_frame_1.pack(side = tk.TOP, padx = 1, pady = 2, fill = tk.Y)", "def configure_grid(self):\r\n\r\n for r in range(3):\r\n self.rowconfigure(r, weight=1)\r\n for c in range(3):\r\n self.columnconfigure(c, weight=1)", "def __createCanvas(self):\r\n # create a canvas and pass a figure to it\r\n self.figure = plt.figure()\r\n self.canvas = FigureCanvas(self.figure)\r\n\r\n # create an axis\r\n self.canvas.axes = self.figure.add_subplot(1, 1, 1) # 1X1 grid, 1st subplot\r\n self.canvas.axes.set_title(\"Plot\")\r\n\r\n # create Navigation widget and pass a Canvas widget and the parent\r\n self.toolbar = NavigationToolbar(self.canvas, self)", "def make_graph(self, frame, obname, **kwargs):\n \n #Generate the figure\n fig = self.make_fig(obname)\n \n #Identify the location to place the figure\n if 'gridpos' in kwargs:\n newrow = kwargs.pop('gridpos')\n else:\n newrow = frame.grid_size()[1] \n \n #Generate a frame specifically for the figure (this prevents resizing when the figure is updated)\n canvas_frame = tk.Frame(frame) #, width=self.screenwidth*0.13, height=self.screenheight*0.2778)\n canvas_frame.grid(column=0, row=newrow+1, columnspan=2)\n \n #Generate a canvas and place the figure in it\n canvas = FigureCanvasTkAgg(fig, master=canvas_frame) # A tk.DrawingArea.\n canvas.draw()\n canvas.get_tk_widget().grid(column=0, row=0)\n\n return canvas, fig", "def _create_canvas(self, parent):\n # matplotlib commands to create a canvas\n frame = QtGui.QWidget()\n mpl_canvas = FigureCanvas(self.value)\n mpl_canvas.setParent(frame)\n#\t\tmpl_toolbar = NavigationToolbar2QT(mpl_canvas,frame)\n\n vbox = QtGui.QVBoxLayout()\n vbox.addWidget(mpl_canvas)\n#\t\tvbox.addWidget(mpl_toolbar)\n frame.setLayout(vbox)\n return frame", "def create_grid(self, main_frame: tk.LabelFrame) -> None:\n for square_row in range(3):\n for square_column in range(3):\n square = tk.Frame(main_frame, highlightbackground='black', highlightcolor='red',\n highlightthickness=1, width=120, heigh=120, padx=0)\n square.grid(row=square_row, column=square_column)\n self.create_cells_and_entries(square, square_row)\n return None", "def __init__(self, frame):\n self.frame = frame\n self._configure()", "def __init__(self, min_height=600, min_width=600):\n self.window = Tk()\n # set minimum size to which the window can be reduced\n self.window.minsize(min_width, min_height)\n self.canvas = None\n self.frames = {\n \"parameters\": None,\n \"canvas\": None\n }\n self.menubar = {\n \"menubar\": None,\n \"helpmenu\": None,\n \"filemenu\": None,\n \"editmenu\": None\n }\n self.combo_box = {\n \"class\": None,\n \"variable\": None\n }\n self.init_canvas_frame()\n self.init_parameters_frame()\n # self.init_menu_bar()\n self.classes = {\n \"parameters\": Parameters(self),\n \"fractal\": FastFractal(self)\n }\n self.init_parameter_combobox()", "def __init__(self):\n self.rows = [18, 23, 24, 25]\n self.cols = [17, 27, 22]\n self.keypad = [\n [\"1\", \"2\", \"3\"],\n [\"4\", \"5\", \"6\"],\n [\"7\", \"8\", \"9\"],\n [\"*\", \"0\", \"#\"]\n ]\n self.setup()", "def __init__( self, a_parent_frame,\n a_title,\n a_title_color,\n button_width = 10,\n button_height = 2 ):\n a_frame = Tk.Frame( a_parent_frame,\n # bg =\"red\",\n bg = \"gray\", )\n\n a_frame.rowconfigure( 0, weight= 1 )\n a_frame.rowconfigure( 1, weight= 1 )\n\n a_frame.columnconfigure( 0, weight= 1 )\n #master.columnconfigure( 1, weight= 1 )\n self.frame = a_frame\n p_frame = a_frame\n\n a_frame = Tk.Frame( p_frame, bg = a_title_color, )\n # padx = 2, pady = 2, relief= Tk.GROOVE, )\n a_frame.grid( row = 0, column = 0 ,sticky = Tk.E + Tk.W )\n self.top_inner_frame = a_frame\n\n a_label = Tk.Label( a_frame,\n text = a_title,\n bg = a_title_color , )\n # relief = RAISED, )\n a_label.grid( row = 0, column = 0, )\n # columnspan = 1, sticky = Tk.W + Tk.E )\n\n a_frame = Tk.Frame( p_frame, )\n # bg = \"blue\", ) # use neutral color or the title color\n # padx = 2, pady = 2, relief= Tk.GROOVE, )\n a_frame.grid( row = 1, column = 0,sticky = Tk.E + Tk.W )\n self.bottom_inner_frame = a_frame\n\n self.button_width = button_width\n self.button_height = button_height\n self.button_row = 0\n self.button_column = 0", "def create(self):\n self.panel = pg.rect.Rect(self.position, self.dimensions)", "def init_parameters_frame(self, height=400, width=200):\n self.frames[\"parameters\"] = Frame(\n master=self.window,\n width=width,\n height=height,\n bg=\"pink\")\n self.frames[\"parameters\"].pack(\n anchor=\"ne\",\n side=RIGHT,\n expand=False,\n fill=BOTH)", "def __init__(self, numCells=32):\n\n Gtk.Window.__init__(self)\n self.set_title(\"Braille Monitor\")\n\n grid = Gtk.Grid()\n self.add(grid)\n\n self.cells = []\n for i in range(numCells):\n cell = BrlCell(i)\n grid.attach(cell, i, 0, 1, 1)\n self.cells.append(cell)\n\n self.set_resizable(False)\n self.set_property(\"accept-focus\", False)\n self.set_skip_taskbar_hint(True)\n self.set_skip_pager_hint(True)", "def __init__(self,*args, **kwargs):\n tk.Tk.__init__(self, *args, **kwargs)\n self.winfo_toplevel().title(\"ElogQP\")\n self.container = tk.Frame(self)\n self.container.pack(side=\"top\", fill=\"both\", expand=True)\n self.container.grid_rowconfigure(0, weight=1)\n self.container.grid_columnconfigure(0, weight=1)\n self.activeFrames = []\n for F in (Frames.frame_start.frame_start, Frames.frame_modules.frame_modules, Frames.frame_showError.frame_showError):\n self.createFrame(F, F.__name__)\n \n self.showFrame(\"frame_start\")", "def make_canvas(width, height, title):\n top = tkinter.Tk()\n top.minsize(width=width, height=height)\n top.title(title)\n canvas = tkinter.Canvas(top, width=width + 1, height=height + 1)\n # canvas.pack()\n canvas.place()\n return canvas", "def Define_Frame(self):\n self.frame=Frame(self.master, relief=GROOVE, bd=4)\n self.frame.grid(row=0,column=1,rowspan=2,columnspan=2)\n frame_title = Label(self.frame,text=\"Stage Control\",relief=RAISED,bd=2,width=24, bg=\"light yellow\",font=(\"Times\", 16))\n frame_title.grid(row=0, column=1)\n self.encoder_text = [] # These hold the stage position as read by the encoders\n self.coo_ent = [] # These hold the coordinate entry values\n but = []\n encoder_display = []\n for i in range(3):\n self.coo_ent.append(Entry(self.frame, justify=\"center\", width=12))\n but.append(Button(self.frame, text=\"Move %s (relative)\"%self.POS_NAME[i], width=12,command=lambda axis=i:self.GUI_move(axis)))\n self.encoder_text.append(StringVar())\n encoder_display.append(Label(self.frame,textvariable=self.encoder_text[i],relief=SUNKEN,bd=1, width=20))\n self.coo_ent[i].grid(row=i+1,column=0)\n self.coo_ent[i].focus_set()\n but[i].grid(row=i+1,column=1)\n encoder_display[i].grid(row=i+1,column=2)\n self.encoder_text[i].set(\"%8s microns\"%str(self.read_pos[i]))\n zero_encoders_button = Button(self.frame, text=\"Re-Initialize Encoders\", width=20, command=self.GUI_ReInitialize_Encoders)\n zero_encoders_button.grid(row=5,column=1)\n return", "def buildmainframe(self):\n self.mainframewidgets=[]\n for x in range(3):\n thislabel = Label(self.mainframe, text=str(x))\n thislabel.grid()\n self.mainframewidgets.append(thislabel)", "def options(self):\n opt = self.main_window.toplevel()\n cur_l = tkinter.Scale(opt, length=200, label=\"Number of lines:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_rows)\n cur_l.set(self.game.n_row) # initial position of the cursor\n cur_l.pack()\n cur_h = tkinter.Scale(opt, length=200, label=\"Number of columns:\",\n orient=tkinter.HORIZONTAL, from_=1, to=12,\n command=self.update_nb_cols)\n cur_h.set(self.game.n_col)\n cur_h.pack()", "def __init__(self, cell_size, nrows, ncols, **kwds):\n #\n # Python 3 update\n #\n super().__init__(**kwds)\n self.cell_size = cell_size\n w, h = cell_size\n d = 2 * self.margin\n self.size = (w * ncols + d, h * nrows + d)\n self.cell_size = cell_size", "def __init__(self, dat, frame, box_size, centre,\n label=False, **kwargs):\n\n super().__init__(dat, frame, box_size, centre) # initialise superclass\n\n self.label = label # write labels\n\n self.draw()", "def _create_canvas(self, parent):\n # The panel lets us add additional controls.\n panel = wx.Panel(parent, -1, style=wx.CLIP_CHILDREN)\n sizer = wx.BoxSizer(wx.VERTICAL)\n panel.SetSizer(sizer)\n # matplotlib commands to create a canvas\n mpl_control = FigureCanvas(panel, -1, self.value)\n sizer.Add(mpl_control, 1, wx.LEFT | wx.TOP | wx.GROW)\n toolbar = NToolbar(mpl_control)\n sizer.Add(toolbar, 0, wx.EXPAND)\n self.value.canvas.SetMinSize((10,10))\n return panel", "def init_plot(self, master):\n b = Figure(figsize=(8, 6), dpi=100)\n ac = b.add_subplot(111)\n ac.plot(10, 10)\n ac.set_title('Current tour plot')\n ac.set_xlabel('X axis coordinates')\n ac.set_ylabel('Y axis coordinates')\n ac.grid(True)\n canvas = FigureCanvasTkAgg(b, master)\n canvas.draw()\n canvas.get_tk_widget().grid(row=1, column=1, sticky=W)", "def __init__(self, master=None, margin=30):\r\n Frame.__init__(self, master, padx=margin, pady=margin)\r\n self.grid()\r\n self.widgets()\r\n self.behavior()", "def grid_init(self):\n # draw.line(surface, color, start_pos, end_pos, width/thickness)\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, GameData.square_size),\n (GameData.screen_dim, GameData.square_size),\n GameData.line_width\n )\n # # 2 horizontal\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (0, 2 * GameData.square_size),\n (GameData.screen_dim,2 * GameData.square_size),\n GameData.line_width\n )\n\n # # 1 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (GameData.square_size, 0),\n (GameData.square_size, GameData.screen_dim),\n GameData.line_width\n )\n # # 2 vertical\n pygame.draw.line(\n self.game_screen,\n GameData.line_color,\n (2 * GameData.square_size, 0),\n (2 * GameData.square_size, GameData.screen_dim),\n GameData.line_width)", "def Data_Frame( self ):\r\n #Create pane\r\n p = self.pane_widget.add( \"Data\", min = 0.1, max = 0.9)\r\n frame_sequence = Frame( p )\r\n #xscroll at the top\r\n self.xscroll = Scrollbar( frame_sequence, orient = HORIZONTAL )\r\n self.xscroll.pack(side = TOP, fill = X )\r\n #create the canvas where the data will be displayed\r\n self.canvas_two = Canvas( frame_sequence )\r\n #Make sure these values are consistent with self.canvas_one in Tree_Frame\r\n self.canvas_two.pack( side = TOP, fill = BOTH, expand = 1 )\r\n self.xscroll.config( command = self.canvas_two.xview )\r\n self.canvas_two.config( xscrollcommand = self.xscroll.set )\r\n frame_sequence.pack(side=LEFT, fill = BOTH)" ]
[ "0.7390701", "0.7325227", "0.7284718", "0.7152517", "0.7130581", "0.7130581", "0.7130581", "0.70996016", "0.6822051", "0.6757563", "0.67494226", "0.66495013", "0.66449696", "0.6612262", "0.66097903", "0.65810424", "0.6549738", "0.6528398", "0.6521471", "0.64550036", "0.6443862", "0.64325196", "0.64139813", "0.6409501", "0.63732016", "0.63435876", "0.6323557", "0.63142186", "0.63116074", "0.6305019", "0.6272632", "0.6263857", "0.6263023", "0.62588924", "0.6258185", "0.6245084", "0.6238522", "0.6237115", "0.6228507", "0.6224982", "0.6219855", "0.62168574", "0.6212588", "0.6205178", "0.62005043", "0.61993176", "0.61895144", "0.6181671", "0.6179933", "0.6174353", "0.61659527", "0.61296785", "0.61258", "0.6090103", "0.6079541", "0.6074275", "0.6063817", "0.6051531", "0.6050692", "0.60496956", "0.60441303", "0.6041221", "0.603697", "0.60354805", "0.60341704", "0.60260975", "0.60210747", "0.60186183", "0.60153073", "0.60060745", "0.59928304", "0.5985022", "0.59824926", "0.59742194", "0.59731185", "0.5968127", "0.5960575", "0.59526867", "0.5951346", "0.5950409", "0.59413415", "0.59254605", "0.5916592", "0.59030396", "0.5899873", "0.5893766", "0.5892104", "0.58910525", "0.58905655", "0.5888053", "0.58874464", "0.58788484", "0.58724785", "0.58650994", "0.5858722", "0.5856287", "0.5854053", "0.58529216", "0.5852193", "0.5846372" ]
0.6334319
26
Approximate the 95% confidence interval for Student's T distribution. Given the degrees of freedom, returns an approximation to the 95% confidence interval for the Student's T distribution.
def tdist95conf_level(df): df = int(round(df)) highest_table_df = len(_T_DIST_95_CONF_LEVELS) if df >= 200: return 1.960 if df >= 100: return 1.984 if df >= 80: return 1.990 if df >= 60: return 2.000 if df >= 50: return 2.009 if df >= 40: return 2.021 if df >= highest_table_df: return _T_DIST_95_CONF_LEVELS[highest_table_df - 1] return _T_DIST_95_CONF_LEVELS[df]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def confidence_at_95tpr(self):\r\n\r\n return self.confidence_at_tpr(0.95)", "def t_confidence_Interval_Difference_Of_Means(xSamples, ySamples, confidence):\n try:\n if len(xSamples) >= 30 or len(ySamples) >= 30:\n raise sampleSizeError(\"Should use normal distribution instead. m or n > 30.\")\n \n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n elif type(confidence) != int or type(confidence) != float:\n raise ValueError(\"Confidence Interval must be a numeric value\")\n \n # Find mean and variance for both sample distributions\n n = len(xSamples) \n xBar = sample_mean(xSamples)\n xSampStd = sample_variance(xSamples) ** .5\n \n m = len(ySamples)\n yBar = sample_mean(ySamples)\n ySampStd = sample_variance(ySamples) ** .5\n \n # Find t at alpha/2 and the new distribution's sample size - 2\n # Calculate the sample pooling standard deviation\n tAlpha = (1 + confidence) / 2.0\n t = scipy.stats.t.ppf(tAlpha, (m + n - 2)) \n spsd = ((((n - 1)* (xSampStd**2)) + ((m - 1) * (ySampStd**2)))/(m + n - 2)) ** .5 \n \n # Find the lower and upper bound \n # (X-Y) (+/-) t((spsd * (((1/m)+(1/n)) **.5))\n lowerBound = (xBar - yBar) - t * (spsd * (((1/m)+(1/n)) **.5))\n upperBound = (xBar - yBar) + t * (spsd * (((1/m)+(1/n)) **.5))\n \n return lowerBound, upperBound\n \n except sampleSizeError as inst:\n print(inst.args[0])\n \n except ValueError as inst:\n print(inst.args[0])", "def confidence_at_99tpr(self):\r\n\r\n return self.confidence_at_tpr(0.99)", "def confidence_at_98tpr(self):\r\n\r\n return self.confidence_at_tpr(0.98)", "def find_confidence(self, t, df):\n t_table = self.t_table\n nearest_df = round(find_nearest(t_table.index, df), 0)\n nearest_t = round(find_nearest(t_table.loc[nearest_df], t), 6)\n for col in list(t_table):\n if nearest_t == round(t_table[col][nearest_df], 6):\n # Subtract from one to get confidence, divide by two to get\n # single section on positive side of distribution.\n confidence = (1.0 - float(col)) / 2.0\n return confidence", "def confidence_at_995tpr(self):\r\n\r\n return self.confidence_at_tpr(0.995)", "def test_small_round_numbers_95_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 95\r\n expected_result = 9.12680\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def confidence(s, p):\r\n\r\n if p == 1:\r\n return (-oo, oo)\r\n\r\n assert p <= 1\r\n\r\n # In terms of n*sigma, we have n = sqrt(2)*ierf(p). The inverse\r\n # error function is not yet implemented in SymPy but can easily be\r\n # computed numerically\r\n\r\n from sympy.numerics import Float, secant, evalf\r\n from sympy.numerics.functions2 import erf\r\n p = evalf(p)\r\n # calculate y = ierf(p) by solving erf(y) - p = 0\r\n y = secant(lambda y: erf(y) - p, 0)\r\n t = Real(str(evalf(s.sigma) * Float(2)**0.5 * y))\r\n mu = s.mu.evalf()\r\n return (mu-t, mu+t)", "def student_t_approx(optimize=True, plot=True):\r\n real_std = 0.1\r\n #Start a function, any function\r\n X = np.linspace(0.0, np.pi*2, 100)[:, None]\r\n Y = np.sin(X) + np.random.randn(*X.shape)*real_std\r\n Y = Y/Y.max()\r\n Yc = Y.copy()\r\n\r\n X_full = np.linspace(0.0, np.pi*2, 500)[:, None]\r\n Y_full = np.sin(X_full)\r\n Y_full = Y_full/Y_full.max()\r\n\r\n #Slightly noisy data\r\n Yc[75:80] += 1\r\n\r\n #Very noisy data\r\n #Yc[10] += 100\r\n #Yc[25] += 10\r\n #Yc[23] += 10\r\n #Yc[26] += 1000\r\n #Yc[24] += 10\r\n #Yc = Yc/Yc.max()\r\n\r\n #Add student t random noise to datapoints\r\n deg_free = 5\r\n print \"Real noise: \", real_std\r\n initial_var_guess = 0.5\r\n edited_real_sd = initial_var_guess\r\n\r\n # Kernel object\r\n kernel1 = GPy.kern.rbf(X.shape[1]) + GPy.kern.white(X.shape[1])\r\n kernel2 = kernel1.copy()\r\n kernel3 = kernel1.copy()\r\n kernel4 = kernel1.copy()\r\n\r\n #Gaussian GP model on clean data\r\n m1 = GPy.models.GPRegression(X, Y.copy(), kernel=kernel1)\r\n # optimize\r\n m1.ensure_default_constraints()\r\n m1.constrain_fixed('white', 1e-5)\r\n m1.randomize()\r\n\r\n #Gaussian GP model on corrupt data\r\n m2 = GPy.models.GPRegression(X, Yc.copy(), kernel=kernel2)\r\n m2.ensure_default_constraints()\r\n m2.constrain_fixed('white', 1e-5)\r\n m2.randomize()\r\n\r\n #Student t GP model on clean data\r\n t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=deg_free, sigma2=edited_real_sd)\r\n stu_t_likelihood = GPy.likelihoods.Laplace(Y.copy(), t_distribution)\r\n m3 = GPy.models.GPRegression(X, Y.copy(), kernel3, likelihood=stu_t_likelihood)\r\n m3.ensure_default_constraints()\r\n m3.constrain_bounded('t_noise', 1e-6, 10.)\r\n m3.constrain_fixed('white', 1e-5)\r\n m3.randomize()\r\n\r\n #Student t GP model on corrupt data\r\n t_distribution = GPy.likelihoods.noise_model_constructors.student_t(deg_free=deg_free, sigma2=edited_real_sd)\r\n corrupt_stu_t_likelihood = GPy.likelihoods.Laplace(Yc.copy(), t_distribution)\r\n m4 = GPy.models.GPRegression(X, Yc.copy(), kernel4, likelihood=corrupt_stu_t_likelihood)\r\n m4.ensure_default_constraints()\r\n m4.constrain_bounded('t_noise', 1e-6, 10.)\r\n m4.constrain_fixed('white', 1e-5)\r\n m4.randomize()\r\n\r\n if optimize:\r\n optimizer='scg'\r\n print \"Clean Gaussian\"\r\n m1.optimize(optimizer, messages=1)\r\n print \"Corrupt Gaussian\"\r\n m2.optimize(optimizer, messages=1)\r\n print \"Clean student t\"\r\n m3.optimize(optimizer, messages=1)\r\n print \"Corrupt student t\"\r\n m4.optimize(optimizer, messages=1)\r\n\r\n if plot:\r\n plt.figure(1)\r\n plt.suptitle('Gaussian likelihood')\r\n ax = plt.subplot(211)\r\n m1.plot(ax=ax)\r\n plt.plot(X_full, Y_full)\r\n plt.ylim(-1.5, 1.5)\r\n plt.title('Gaussian clean')\r\n\r\n ax = plt.subplot(212)\r\n m2.plot(ax=ax)\r\n plt.plot(X_full, Y_full)\r\n plt.ylim(-1.5, 1.5)\r\n plt.title('Gaussian corrupt')\r\n\r\n plt.figure(2)\r\n plt.suptitle('Student-t likelihood')\r\n ax = plt.subplot(211)\r\n m3.plot(ax=ax)\r\n plt.plot(X_full, Y_full)\r\n plt.ylim(-1.5, 1.5)\r\n plt.title('Student-t rasm clean')\r\n\r\n ax = plt.subplot(212)\r\n m4.plot(ax=ax)\r\n plt.plot(X_full, Y_full)\r\n plt.ylim(-1.5, 1.5)\r\n plt.title('Student-t rasm corrupt')\r\n\r\n return m1, m2, m3, m4", "def confidence(samples, confidence_level):\n mean = scipy.mean(samples)\n sdev = scipy.std(samples)\n n = len(samples)\n df = n - 1\n t = distributions.t.ppf((1+confidence_level)/2.0, df)\n interval = (interval_low, interval_high) = ( mean - t * sdev / math.sqrt(n) , mean + t * sdev / math.sqrt(n) )\n interval_size = interval_high - interval_low\n interval_percentage = interval_size / mean * 100.0\n return (interval, mean, sdev, interval_percentage)", "def confidence_intervals(data):\r\n\r\n x_bar = np.nanmean(data) # Mean value\r\n s = np.nanstd(data) # Standard deviation\r\n n = len(data) # Sample size\r\n\r\n lo_conf = x_bar - (1.96 * (s / np.sqrt(n))) # Lower bound of confidence interval\r\n hi_conf = x_bar + (1.96 * (s / np.sqrt(n))) # Upper bound of confidence interval\r\n\r\n conf_range = hi_conf - lo_conf # Size of the 95% confidence interval\r\n\r\n return lo_conf, hi_conf, conf_range", "def tpr_at_95tpr(self):\r\n\r\n return self.tpr_at_confidence(self.confidence_at_tpr(0.95))", "def mean_confidence_interval(data, confidence=0.95):\n\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), scipy.stats.sem(a)\n h = se * scipy.stats.t.ppf((1 + confidence) / 2., n-1)\n return m, m-h, m+h", "def __call__(self, **kwargs):\n stddev = self.predictive_distribution.stddev(**kwargs)\n mean = self.predictive_distribution.mean(**kwargs)\n return normal_upper_confidence_bound(\n mean, stddev, exploration=self.exploration)", "def find_t(self, df, confidence=0.95):\n t_table = self.t_table\n nearest_confidence = round(find_nearest(list(t_table), 1.0-confidence), 4)\n nearest_df = round(find_nearest(t_table.index, df), 0)\n t_score = round(t_table[str(nearest_confidence)][nearest_df], 4)\n\n return t_score", "def confidence(s, p):\r\n p = Basic.sympify(p)\r\n assert p <= 1\r\n\r\n d = (s.b-s.a)*p / 2\r\n return (s.mean - d, s.mean + d)", "def confidenceInterval(start,end,confidence):\n\n\tmean = 0.5*(end+start)\n\tstddev = getStdDev(0.5*(end-start), confidence)\n\n\treturn (mean,stddev)", "def test_error_at_95tpr(self):\r\n\r\n return self.test_error_at_confidence(self.confidence_at_tpr(0.95))", "def mean_confidence_interval(data, confidence=0.95):\n a = 1.0 * np.array(data)\n n = len(a)\n m, se = np.mean(a), stats.sem(a)\n h = se * stats.t._ppf((1 + confidence) /2., n - 1)\n return m, m - h, m + h", "def _tconfint_generic(mean, std_mean, dof, alpha, alternative):\n\n if alternative in [\"two-sided\", \"2-sided\", \"2s\"]:\n tcrit = stats.t.ppf(1 - alpha / 2.0, dof)\n lower = mean - tcrit * std_mean\n upper = mean + tcrit * std_mean\n elif alternative in [\"larger\", \"l\"]:\n tcrit = stats.t.ppf(alpha, dof)\n lower = mean + tcrit * std_mean\n upper = np.inf\n elif alternative in [\"smaller\", \"s\"]:\n tcrit = stats.t.ppf(1 - alpha, dof)\n lower = -np.inf\n upper = mean + tcrit * std_mean\n else:\n raise ValueError(\"invalid alternative\")\n\n return lower, upper", "def confidence_at_tpr(self, tpr):\r\n\r\n assert self.validation_confidences is not None\r\n assert tpr > 0\r\n\r\n # true positives are correctly classified examples\r\n if self.sorted_correct_validation_confidences is None:\r\n correct_validation_confidences = self.validation_confidences[numpy.logical_not(self.validation_errors)]\r\n self.sorted_correct_validation_confidences = numpy.sort(numpy.copy(correct_validation_confidences))\r\n # rounding is a hack see tests\r\n cutoff = math.floor(self.sorted_correct_validation_confidences.shape[0] * round((1 - tpr), 2))\r\n assert cutoff >= 0\r\n assert cutoff < self.sorted_correct_validation_confidences.shape[0]\r\n return self.sorted_correct_validation_confidences[cutoff]", "def expected_improvement(ymin, mu, sig):\n p_imp = norm.cdf((ymin-mu)/sig)\n p_ymin = norm.pdf((ymin-mu)/sig)\n ei = (ymin-mu)*p_imp + sig*p_ymin\n return ei", "def confidence(self) -> float:\n return float(self.class_scores[self.class_num])", "def fpr_at_95tpr(self):\r\n\r\n return self.fpr_at_confidence(self.confidence_at_tpr(0.95))", "def validation_tpr_at_95tpr(self):\r\n\r\n return self.validation_tpr_at_confidence(self.confidence_at_tpr(0.95))", "def confidenceInterval(model, N = 30):\n predicted_accuracies = [0]*N\n predicted_roc = [0]*N\n for i in tqdm(range(N)):\n X_train, X_test, y_train, y_test = train_test_split(X, y_binary, random_state=i)\n scaler = StandardScaler()\n X_train = scaler.fit_transform(X_train)\n X_test = scaler.transform(X_test)\n model = model.fit(X_train, y_train)\n predicted_accuracies[i] = accuracy_score(model.predict(X_test), y_test)\n predicted_roc[i] = roc_auc_score(model.predict(X_test), y_test)\n r = np.mean(predicted_roc)\n m = np.mean(predicted_accuracies)\n\n variance_roc = np.var(predicted_roc)\n variance_acc = np.var(predicted_accuracies)\n sd_acc = np.sqrt(variance_acc)\n sd_roc = np.sqrt(variance_roc)\n CI_acc = 2*sd_acc\n CI_roc = 2*sd_roc\n return m, CI_acc, r, CI_roc", "def fisher_confidence_intervals(test_stat, n, confidence_level=.95):\r\n # compute confidence intervals using fishers z transform\r\n z_crit = abs(ndtri((1 - confidence_level) / 2.))\r\n ci_low, ci_high = None, None\r\n if n > 3:\r\n try:\r\n ci_low = tanh(arctanh(test_stat) - (z_crit / sqrt(n - 3)))\r\n ci_high = tanh(arctanh(test_stat) + (z_crit / sqrt(n - 3)))\r\n except (ZeroDivisionError, FloatingPointError):\r\n # r or rho was presumably 1 or -1. Match what R does in this case.\r\n # feel like nan should be returned here given that we can't make\r\n # the calculation\r\n ci_low, ci_high = test_stat, test_stat\r\n return ci_low, ci_high", "def compute_confidence_interval(data):\n a = 1.0 * np.array(data)\n m = np.mean(a)\n std = np.std(a)\n pm = 1.96 * (std / np.sqrt(len(a)))\n return m, pm", "def normal_upper_confidence_bound(mean, stddev, exploration=0.01):\n dtype = dtype_util.common_dtype([mean, stddev])\n mean = tf.convert_to_tensor(mean, dtype=dtype)\n stddev = tf.convert_to_tensor(stddev, dtype=dtype)\n return mean + exploration * stddev", "def test_small_round_numbers_90_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 90\r\n expected_result = 10.874494\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def calculate_probability(value, mean, stdev):\n exponent = math.exp(- pow(value - mean, 2) / (2 * pow(stdev, 2)))\n\n return exponent / (stdev * pow(math.pi * 2, .5))", "def calc_indttest_90(varx,vary):\n print('\\n>>> Using calc_ttest function!')\n \n ### Import modules\n import numpy as np\n import scipy.stats as sts\n \n ### 2-independent sample t-test\n stat,pvalue = sts.ttest_ind(varx,vary,nan_policy='omit')\n \n ### Significant at 90% confidence level\n pvalue[np.where(pvalue >= 0.1)] = np.nan\n pvalue[np.where(pvalue < 0.1)] = 1.\n pvalue[np.isnan(pvalue)] = 0.\n \n print('*Completed: Finished calc_ttest function!')\n return stat,pvalue", "def conf_interval_two_means(datae,dataf,conf):\n \n # Dataset E\n data_e = 1.0*np.array(datae)\n n_e = data_e.shape[0]*data_e.shape[1]\n mean_e = np.array(data_e).mean()\n var_e = np.array(data_e).var(ddof=1)\n df_e = n_e-1\n \n # Dataset F\n data_f = 1.0*np.array(dataf)\n n_f = dataf.shape[0]*dataf.shape[1]\n mean_f = np.array(data_f).mean()\n var_f = np.array(data_f).var(ddof=1)\n df_f = n_f-1\n \n # Sp,t calculated for lower/upper bounds \n Sp = np.sqrt((((df_e*var_e) + (df_f*var_f))/(df_e+df_f)))\n t = abs(scs.t.ppf(((1-conf)/2), (df_e+df_f)))\n lower = (mean_e-mean_f)-(Sp*t*np.sqrt(1/n_e+1/n_f))\n upper = (mean_e-mean_f)+(Sp*t*np.sqrt(1/n_e+1/n_f))\n \n return lower,upper", "def mod_pert_random(low, likely, high, confidence=4, samples=30):\n # Check minimum & maximum confidence levels to allow:\n confidence = min(8, confidence)\n confidence = max(2, confidence)\n\n mean = (low + confidence * likely + high) / (confidence + 2)\n\n a = (mean - low) / (high - low) * (confidence + 2)\n b = ((confidence + 1) * high - low - confidence * likely) / (high - low)\n\n beta = np.random.beta(a, b, samples)\n beta = beta * (high - low) + low\n return beta", "def stdProbabilityNorm(self):\n return 0.5", "def confidence_interval(data, control_label=None, *args, **kwargs):\n def fn(control, test):\n c_means = CompareMeans(DescrStatsW(test), DescrStatsW(control))\n if _is_proportion(control, test):\n return c_means.zconfint_diff()\n else:\n return c_means.tconfint_diff()\n\n return _apply(data, fn, control_label)", "def tpr_at_confidence(self, threshold):\r\n\r\n return numpy.sum(self.test_confidences[numpy.logical_not(self.test_errors)] >= threshold) / float(numpy.sum(numpy.logical_not(self.test_errors)))", "def conf(self, success, total):\n try:\n sp = success / total\n conf = binom_conf_interval(success, total, interval='jeffreys')\n uperr = conf[1] - sp # 1 sigma confidence above mean\n loerr = sp - conf[0] # 1 sigma confidence below mean\n return sp, uperr, loerr, 0.5*(uperr+loerr)\n except ValueError as e:\n return 0, 0, 0, 0", "def generate_confidence(self):\n conf_score = np.random.normal(self.speech_conf_mean,\n self.speech_conf_std)\n conf_score = round(conf_score, 2)\n conf_score = max(conf_score, 0.0) # >= 0.\n conf_score = min(conf_score, 1.0) # <= 1.\n return conf_score", "def ci_mean_std_unknown(array, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n # mean of the sample\n mean = np.mean(array)\n # standard deviation\n std = np.std(array)\n # size of the sample\n n = len(array)\n # degrees of freedom\n df = n - 1\n # calculate the standard error\n std_error = std / np.sqrt(n)\n # find the t critical value\n t_star = np.round(stats.t.ppf(1 - alpha / 2, df), 3)\n # margin of error\n margin_of_error = np.round(t_star * std_error, 2)\n # calculate the lower and upper confidence bounds\n lcb = np.round(mean - margin_of_error, 2)\n ucb = np.round(mean + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Mean: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def ci_mean_std_known(array, std, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n mean = np.mean(array)\n n = len(array)\n # calculate standard error\n std_error = std / np.sqrt(n)\n # find z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n # margin of error\n margin_of_error = np.round(z_star * std_error, 2)\n\n # calculate the lower and upper confidence bounds\n lcb = np.round(mean - margin_of_error, 2)\n ucb = np.round(mean + margin_of_error, 2)\n\n print(\"Margin Of Error: {}\".format(margin_of_error))\n print(\n \"{}% Confidence Interval for Population Mean: ({}, {})\".format(\n conf_level, lcb, ucb\n )\n )", "def estimates_conf(self):\n return self._est_L, self._est_R", "def test_small_round_numbers_98_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 98\r\n expected_result = 7.67748\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def test_conf_interval_normal_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.9, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.14, -4.88, -3.24, -2.98), (\n \"quantiles are incorrect\")", "def lower_confidence_bound(beta: torch.Tensor, mean: torch.Tensor, standard_deviation: torch.Tensor) -> torch.Tensor:\n return mean - beta * standard_deviation", "def get_conf_interval_from_sample(n, mean, sigma, alpha = 0.95) :\n df = n-1\n scale = sigma / np.sqrt(n)\n return stats.t.interval(alpha=alpha, df=df, loc=mean, scale=scale)", "def bootstrap_ci(x, n=300, ci=0.95):\n\n low_per = 100 * (1 - ci) / 2\n high_per = 100 * ci + low_per\n x = removena_numpy(x)\n if not len(x):\n return (np.nan, np.nan)\n bootstrap_samples = choice(a=x, size=(\n len(x), n), replace = True).mean(axis = 0)\n return np.percentile(bootstrap_samples, [low_per, high_per])", "def confidence_interval(self):\r\n coh_var = np.zeros((self.input.data.shape[0],\r\n self.input.data.shape[0],\r\n self._L), 'd')\r\n for i in range(self.input.data.shape[0]):\r\n for j in range(i):\r\n if i != j:\r\n coh_var[i, j] = tsu.jackknifed_coh_variance(\r\n self.spectra[i],\r\n self.spectra[j],\r\n self.eigs,\r\n adaptive=self._adaptive\r\n )\r\n\r\n idx = triu_indices(self.input.data.shape[0], 1)\r\n coh_var[idx[0], idx[1], ...] = coh_var[idx[1], idx[0], ...].conj()\r\n\r\n coh_mat_xform = tsu.normalize_coherence(self.coherence,\r\n 2 * self.df - 2)\r\n\r\n lb = coh_mat_xform + dist.t.ppf(self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n ub = coh_mat_xform + dist.t.ppf(1 - self.alpha / 2,\r\n self.df - 1) * np.sqrt(coh_var)\r\n\r\n # convert this measure with the normalizing function\r\n tsu.normal_coherence_to_unit(lb, 2 * self.df - 2, lb)\r\n tsu.normal_coherence_to_unit(ub, 2 * self.df - 2, ub)\r\n\r\n return ub - lb", "def bootstrap_confidence_interval(\n arr, ci=0.95, n_bootstraps=2000, stat_fun=\"mean\", random_state=None\n):\n if stat_fun == \"mean\":\n\n def stat_fun(x):\n return x.mean(axis=0)\n\n elif stat_fun == \"median\":\n\n def stat_fun(x):\n return np.median(x, axis=0)\n\n elif not callable(stat_fun):\n raise ValueError(\"stat_fun must be 'mean', 'median' or callable.\")\n n_trials = arr.shape[0]\n indices = np.arange(n_trials, dtype=int) # BCA would be cool to have too\n rng = check_random_state(random_state)\n boot_indices = rng.choice(indices, replace=True, size=(n_bootstraps, len(indices)))\n stat = np.array([stat_fun(arr[inds]) for inds in boot_indices])\n ci = (((1 - ci) / 2) * 100, ((1 - ((1 - ci) / 2))) * 100)\n ci_low, ci_up = np.percentile(stat, ci, axis=0)\n return np.array([ci_low, ci_up])", "def test_small_round_numbers_99_percent(self):\r\n upper_bound = 10\r\n lower_bound = 2\r\n n_value = 20\r\n ci_percent = 99\r\n expected_result = 6.94700\r\n result = ci_to_sd(upper_bound, lower_bound, ci_percent, n_value)\r\n\r\n self.assertAlmostEqual(expected_result, result, 4)", "def upper_confidence(self, X):\n x = np.asarray(X).reshape(1, -1)\n mu, sigma = self.gpr.predict(x, return_std=True)\n\n return mu - self.beta * sigma", "def get_confidence_interval(\n num_people,\n num_iter=1000000,\n percentile=2.576,\n num_days=365,\n):\n mean = 0.0\n variance = 0.0 # not exactly\n for i in range(1, num_iter + 1):\n x = [randint(1, num_days) for person in range(num_people)]\n x.sort()\n is_consecutive = any(p + 1 == q for (p, q) in zip(x[:-1], x[1:], strict=True))\n is_a_loop = x[0] + num_days - 1 == x[-1]\n is_positive = int(is_consecutive or is_a_loop)\n delta = is_positive - mean\n mean += delta / float(i)\n variance += delta * (is_positive - mean)\n sd = sqrt(variance / float(num_iter - 1))\n lower_bound = mean - percentile * sd / sqrt(num_iter)\n upper_bound = mean + percentile * sd / sqrt(num_iter)\n print(\n \"Number of people: {}\\tLower bound: {:2.5%}\\tUpper bound: {:2.5%}\".format(\n num_people,\n lower_bound,\n upper_bound,\n ),\n )\n return lower_bound, upper_bound", "def create_interval_NEW(confidence, samples=False, n_samples=False, sample_mean=False, sd=False, true_std=False, is_prob=False, is_normal=False, side=\"both\"):\n h = 0\n\n if samples:\n n_samples = len(samples)\n sample_mean = float(np.mean(samples))\n sd = float(np.std(samples))\n std_err = float(st.sem(samples))\n else:\n if sd is False and n_samples < 30:\n raise Exception(\"confidence intervals\", \"Missing standard deviation to estimate mean with less than 30 samples.\")\n else:\n std_err = sd / math.sqrt(n_samples)\n\n if side == \"both\":\n alpha = (1 - confidence) / 2\n z = st.norm.ppf(1 - (1 - confidence) / 2)\n t = st.t.ppf((1 + confidence) / 2, n_samples - 1)\n else:\n alpha = (1 - confidence)\n z = st.norm.ppf(1 - (1 - confidence))\n t = st.t.ppf((1 + confidence), n_samples - 1)\n\n if is_prob: ## CI for probabilities\n if sample_mean == 0: ## Rule of three\n return Interval(0, 3/n_samples)\n elif sample_mean == 1: ## Rule of three\n return Interval(1 - 3/n_samples, 1)\n elif n_samples >= 30: ## Binomial proportion confidence interval: Normal/Gaussian distribution of the proportion: https://machinelearningmastery.com/confidence-intervals-for-machine-learning/\n h = z * math.sqrt((sample_mean * (1 - sample_mean)) / n_samples)\n elif n_samples < 30:\n interval = st.bayes_mvs(samples, confidence)[0][1] ## 0 is the mean, 1 is the interval estimate\n return Interval(interval[0], interval[1])\n ## h = t * math.sqrt((mean * (1 - mean)) / n_samples) ## TODO, check this\n else: ## CI for usual values\n if (n_samples >= 30 or is_normal) and true_std is not False: ## use Normal Distribution\n h = z * true_std / math.sqrt(n_samples)\n elif is_normal: ## use Student distribution\n # h = t * s / math.sqrt(n_samples)\n h = t * std_err\n else:\n interval = st.bayes_mvs(samples, confidence)[0][1] ## 0 is the mean, 1 is the interval estimate\n return Interval(interval[0], interval[1])\n\n h = float(h)\n if side == \"both\":\n return Interval(sample_mean - h, sample_mean + h)\n elif side == \"right\":\n if is_prob:\n return Interval(0, sample_mean + h)\n else:\n return Interval(float('-inf'), sample_mean + h)\n else:\n if is_prob:\n return Interval(sample_mean - h, 1)\n else:\n return Interval(sample_mean - h, float('inf'))", "def std_cdf(x):\n return 0.5 + 0.5 * tt.erf(x / tt.sqrt(2.))", "def ci_diff_mean_std_unknown(array1, array2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n\n # means of samples\n mean1 = np.mean(array1)\n mean2 = np.mean(array2)\n\n # standard deviation fo samples\n std1 = np.std(array1)\n std2 = np.std(array2)\n\n # size of the samples\n n1 = len(array1)\n n2 = len(array2)\n\n # difference of the two means\n diff_mean = mean1 - mean2\n\n # degrees of freddom\n deg_fred = deg_fred_two_means(std1, std2, n1, n2)\n\n # find the t critical value\n t_star = np.round(stats.t.ppf(1 - alpha / 2, deg_fred), 3)\n\n # margin of error\n margin_of_error = t_star * np.sqrt((std1 ** 2 / n1) + (std2 ** 2 / n2))\n\n # upper and lower confidence bounds\n lcb = np.round(diff_mean - margin_of_error, 2)\n ucb = np.round(diff_mean + margin_of_error, 2)\n\n print(\n \"{}% Confidence Interval for difference of two population means: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def expected_improvement(f_min, mu, sigma):\n # log-scaling might not be the best idea here, especially\n # if people use negative values to maximize output\n # v = (np.log(f_min) - mu) / sigma\n v = (f_min - mu) / sigma\n return (f_min * norm.cdf(v)\n - (np.exp(0.5 * sigma ** 2 + mu)\n * norm.cdf(v - sigma)))", "def delta(tval, tp_confidences, fp_confidences, num_samples):\n tp_percentage = \\\n np.sum([1 for x in tp_confidences if x > tval]) / num_samples\n if fp_confidences:\n fp_percentage = np.sum([1 for x in fp_confidences if x > tval]) / \\\n len(fp_confidences)\n else:\n fp_percentage = 0\n optimal_tp = len(tp_confidences) / num_samples\n delta_value = (tp_percentage - optimal_tp) ** 2 + fp_percentage ** 2\n return delta_value, tp_percentage, fp_percentage", "def divergence(self, t, s):\n left = np.mean(np.log(self.dists[(t, s)] / self.dists[(t, t)]))\n right = np.mean(np.log(self.dists[(s, t)] / self.dists[(s, s)]))\n return 0.5 * (left + right)", "def ciw_95_normal (list1):\r\n sd = std(list1)\r\n return 1.96*sd/sqrt(len(list1))", "def test_confidence_intervals(self):\n # Taken from a T-Test table\n\n # Two Tailed\n p, ci = _p_value_and_confidence_intervals(2.228, 10, 'two')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n np.testing.assert_allclose(ci, [-2.228, 2.228], atol=.001)\n\n # Left One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.895, 7, 'left')\n\n self.assertAlmostEqual(p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[0]))\n np.testing.assert_allclose(ci, [-np.inf, 1.895], atol=.001)\n\n # Right One-Tailed\n p, ci = _p_value_and_confidence_intervals(1.761, 14, 'right')\n\n self.assertAlmostEqual(1-p, .05, delta = .001)\n self.assertTrue(ci.shape == (2, ))\n self.assertTrue(np.isinf(ci[1])) \n np.testing.assert_allclose(ci, [-1.761, np.inf], atol=.001)", "def calculate_ci(data, ci_level=0.99):\n\n # remove NaNs\n ys = data.dropna().values\n\n # calculate CI\n n = len(ys)\n std_err = sem(ys)\n h = std_err * t.ppf((1 + ci_level) / 2, n - 1)\n\n return h", "def _lower_confidence_bound(self, NA: int, N: int, alpha: float) -> float:\n return proportion_confint(NA, N, alpha=2 * alpha, method=\"beta\")[0]", "def confidence(self) -> float:\n return self._confidence", "def outcome_cdf_alt1(self,T=None,y=None):\n mu=self.mu\n sigma2=self.sigma2\n A=self.b-self.a\n x=0-self.a\n y=y-self.a\n gamma=mu/sigma2\n n=1\n s=0.0\n lambda_1=((math.pi/A)**2)*sigma2/2+(mu**2/sigma2)/2\n t0=math.exp(-lambda_1*T-x*gamma+y*gamma)\n while True:\n lambda_n=((n*math.pi/A)**2)*sigma2/2+(mu**2/sigma2)/2\n t1=math.exp(-(lambda_n-lambda_1)*T)\n t3=U(n,gamma,A,y)\n t4=math.sin(n*math.pi*x/A)\n s+=t1*t3*t4\n if abs(t0*t1*t3)<=1e-9:\n break\n n+=1\n if gamma*A>30: # avoid numerical overflow\n pre=math.exp(-2*gamma*x)\n elif abs(gamma*A)<1e-8: # avoid division by zero\n pre=(A-x)/A\n else:\n pre=(1-math.exp(2*gamma*(A-x)))/(1-math.exp(2*gamma*A))\n return pre+t0*s", "def _ci(arr, ci=0.95, method=\"bootstrap\", n_bootstraps=2000, random_state=None):\n if method == \"bootstrap\":\n return bootstrap_confidence_interval(\n arr, ci=ci, n_bootstraps=n_bootstraps, random_state=random_state\n )\n else:\n from .parametric import _parametric_ci\n\n return _parametric_ci(arr, ci=ci)", "def calc_invest(prob):\n if prob == 1.0:\n prob = 0.999\n elif prob == 0.0:\n prob = 0.001\n\n signal = (prob - (1.0 / Consts.NUM_OF_CLASSES)) / ((prob * (1.0 - prob)) ** 0.5)\n\n res = (2 * norm.cdf(signal) - 1)\n\n return res", "def relative_change_stdev(mean1, mean2, std1, std2):\n mean1, mean2 = float(mean1), float(mean2)\n quotient = mean2 / mean1\n first = std1 / mean1\n second = std2 / mean2\n std = quotient * math.sqrt(first * first + second * second)\n return (quotient - 1) * 100, std * 100", "def confidence_coefficient( confidence_level, dimensions=1 ):\n return np.sqrt(chi2.ppf(confidence_level, df=dimensions))", "def find_chi2(self, df, confidence=0.95):\n chi2_table = self.chi2_table\n nearest_confidence = round(find_nearest(list(chi2_table), 1.0-confidence), 4)\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n chi2 = round(chi2_table[str(nearest_confidence)][nearest_df], 4)\n return chi2", "def test_conf_interval_normal_method_no_conditionals(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``;\n # with no ``conditional_cols``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=None,\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.05, 290.37, 292.42, 292.74), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.41, -5.08, -3.04, -2.72), (\n \"quantiles are incorrect\")", "def ST99(self,dc,nu):\n if len(self.bias_par.keys()) == 0:\n q = 0.707\n p = 0.3\n else:\n q = self.bias_par['q']\n p = self.bias_par['p']\n return 1. + (q*nu**2-1.)/dc + (2.*p/dc)/(1.+(q*nu**2)**p)", "def _error(self, Y, T):\n err = np.mean((Y - T)**2)\n return err", "def _ei_acq(x):\n mu, sigma = gp.eval(x, uncert_form='std')\n Z = (mu - curr_best) / sigma\n return (mu - curr_best)*normal_distro.cdf(Z) + sigma*normal_distro.pdf(Z)", "def ttest_mean(self, value=0, alternative=\"two-sided\"):\n # TODO: check direction with R, smaller=less, larger=greater\n tstat = (self.mean - value) / self.std_mean\n dof = self.sum_weights - 1\n # TODO: use outsourced\n if alternative == \"two-sided\":\n pvalue = stats.t.sf(np.abs(tstat), dof) * 2\n elif alternative == \"larger\":\n pvalue = stats.t.sf(tstat, dof)\n elif alternative == \"smaller\":\n pvalue = stats.t.cdf(tstat, dof)\n else:\n raise ValueError(\"alternative not recognized\")\n\n return tstat, pvalue, dof", "def get_ci(self, ci_percent, test_type='t-test'):\n prop_cut = (1 - ci_percent) / 2\n if test_type == 'bootstrap':\n perf = self.evaluations\n while len(perf.shape) > 2:\n perf = np.nanmean(perf, axis=-1)\n framed_evals = np.concatenate(\n (np.tile(np.array(([-np.inf], [np.inf])),\n (1, self.n_model)),\n perf),\n axis=0)\n ci = [np.quantile(framed_evals, prop_cut, axis=0),\n np.quantile(framed_evals, 1 - prop_cut, axis=0)]\n else:\n tdist = scipy.stats.t\n std_eval = self.get_sem()\n means = self.get_means()\n ci = [means + std_eval * tdist.ppf(prop_cut, self.dof),\n means - std_eval * tdist.ppf(prop_cut, self.dof)]\n return ci", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def mse(gt, pred):\n return np.mean((gt - pred) ** 2)", "def test_conf_interval_ecdf_method(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n\n # ``quantile_estimation_method = \"ecdf\"``\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"ecdf\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=None,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n pred_df[ERR_STD_COL] = round(pred_df[ERR_STD_COL], 2)\n assert pred_df[\"y_quantile_summary\"].values[5] == (289.32, 289.38, 291.3, 291.34), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (-5.63, -5.56, -4.13, -4.08), (\n \"quantiles are incorrect\")\n expected_stds = [0.29, 0.42, 0.42, 0.42, 0.42, 0.58, 0.58, 0.58, 0.58, 0.58,\n 0.58, 0.42]\n assert list(pred_df[ERR_STD_COL].values) == expected_stds", "def example2(N, x):\n\n\tX = np.random.randn(N)\n\tI_estm = np.mean([0 if s>=x else 1 for s in X])\n\tprint(\"simulation estimate:\", I_estm)\n\tprint(\"true value: \", norm.cdf(x))", "def std_cdf(x):\n return 0.5 + 0.5 * pt.erf(x / pt.sqrt(2.0))", "def estimate_ate(self, x: np.array, t: np.array, y: np.array) -> float:\n self.fit(x, t, y)\n ite = self.predict_ite(x)\n return float(np.mean(ite))", "def ci2se(ci):\n\n ci = sorted(ci)\n\n return (ci[1] - ci[0]) / (2 * 1.96)", "def get_estimate(self):\n if not self.has_samplers():\n self.draw_samplers()\n \n v = np.percentile(self.samplers, [16, 50, 84])\n return v[1], v[2]-v[1], v[1]-v[0]", "def stationary_distribution_sensitivity(T, j):\n\n n = len(T)\n\n lEV = numpy.ones(n)\n rEV = stationary_distribution(T)\n eVal = 1.0\n\n T = numpy.transpose(T)\n\n vecA = numpy.zeros(n)\n vecA[j] = 1.0\n\n matA = T - eVal * numpy.identity(n)\n # normalize s.t. sum is one using rEV which is constant\n matA = numpy.concatenate((matA, [lEV]))\n\n phi = numpy.linalg.lstsq(numpy.transpose(matA), vecA)\n phi = numpy.delete(phi[0], -1)\n\n sensitivity = -numpy.outer(rEV, phi) + numpy.dot(phi, rEV) * numpy.outer(rEV, lEV)\n\n return sensitivity", "def recommend_T(self):\n max_eigvalue = torch.max(torch.linalg.eigvalsh(self.Wc))\n T = (self.settings[\"tgtStd\"] ** 2) * \\\n (self.vars['bowl_strength'] - max_eigvalue)\n return T", "def gaussian(T, Y, X, t, y, x, sigma, sigma_t=1):\n const_value = np.sqrt(2 * np.pi * sigma) ** 3\n norm = np.exp(\n -(\n ((X - x) ** 2) / (2 * sigma ** 2)\n + ((Y - y) ** 2) / (2 * sigma ** 2)\n + ((T - t) ** 2) / (2 * sigma_t ** 2)\n )\n )\n return norm / const_value", "def TST_ME_DK(X, Y, T, X_org, Y_org, T_org, alpha, sigma, sigma0, epsilon, flag_debug = False):\r\n J = T.shape[0]\r\n s = compute_ME_stat(X, Y, T, X_org, Y_org, T_org, sigma, sigma0, epsilon)\r\n pvalue = stats.chi2.sf(s.item(), J)\r\n if pvalue<alpha:\r\n h = 1\r\n else:\r\n h = 0\r\n if flag_debug:\r\n pdb.set_trace()\r\n return h, pvalue, s", "def plot_confidence_interval_for_data (model, X):\n preds = np.stack([t.predict(X) for t in model.estimators_], axis=1)\n preds_ds = pd.DataFrame()\n preds_ds['mean'] = preds.mean(axis=1)\n preds_ds['std'] = preds.std(axis=1)\n\n fig = plt.figure(figsize=(15,6))\n my_xticks = ['datapoint ' + str(i+1) for i in list(preds_ds.index)]\n plt.errorbar(x = preds_ds.index, y=preds_ds['mean'], yerr=preds_ds['std'], \n fmt='o', color='blue', ecolor='lightblue', capsize=3)\n plt.title('Confidence Interval for the predicted value')\n plt.xticks(preds_ds.index, my_xticks)\n for i in list(preds_ds.index):\n m, std = round(preds_ds['mean'][i],1), round(preds_ds['std'][i],2)\n s=f' pred={m} \\n std dev= {std}'\n plt.text(x = i, y=preds_ds['mean'][i], s=s ) \n plt.show()", "def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2", "def nmse(gt, pred):\n return np.linalg.norm(gt - pred) ** 2 / np.linalg.norm(gt) ** 2", "def prob(E1, E2, t):\n return 1 if E1 > E2 else np.exp((E1 - E2) / t)", "def student_t(cls, *marginals):\n return ((marginals[NGRAM] -\n _product(marginals[UNIGRAMS]) /\n float(marginals[TOTAL] ** (cls._n - 1))) /\n (marginals[NGRAM] + _SMALL) ** .5)", "def standardise(tx, mean=None, std=None):\n if((mean is None) and (std is None)):\n mean = np.mean(tx, axis=0)\n std = np.std(tx, axis=0)\n tx = tx - mean\n tx = tx / std\n return tx, mean, std", "def tconfint_mean(self, alpha=0.05, alternative=\"two-sided\"):\n # TODO: add asymmetric\n dof = self.sum_weights - 1\n ci = _tconfint_generic(\n self.mean, self.std_mean, dof, alpha, alternative\n )\n return ci", "def confidence(self):\n\n choices = self.choices\n\n # Get the chi-squared between the top two choices, if more than two choices exist\n if len(choices) >= 2:\n csq = chi_squared(*choices)\n confident = is_confident(csq, len(choices)) if len(choices) <= 10 else None\n else:\n csq = None\n confident = False\n\n return (csq, confident)", "def ci_diff_mean_std_known(array1, array2, std1, std2, conf_level=95):\n # calculate significance level\n alpha = np.round((1 - conf_level / 100), 2)\n\n # means of samples\n mean1 = np.mean(array1)\n mean2 = np.mean(array2)\n\n # size of the samples\n n1 = len(array1)\n n2 = len(array2)\n\n # difference of the two means\n diff_mean = mean1 - mean2\n\n # the z critical value\n z_star = np.round(stats.norm.ppf(1 - alpha / 2), 3)\n\n # margin of error\n margin_of_error = z_star * np.sqrt((std1 ** 2 / n1) + (std2 ** 2 / n2))\n\n # upper and lower confidence bounds\n lcb = np.round(diff_mean - margin_of_error, 2)\n ucb = np.round(diff_mean + margin_of_error, 2)\n\n print(\n \"{}% Confidence Interval for difference of two population means: ({},{})\".format(\n conf_level, lcb, ucb\n )\n )", "def find_confidence(self, chi2, df):\n chi2_table = self.chi2_table\n nearest_df = round(find_nearest(chi2_table.index, df), 0)\n nearest_chi2 = round(find_nearest(chi2_table.loc[nearest_df], chi2), 6)\n for col in list(chi2_table):\n if nearest_chi2 == round(chi2_table[col][nearest_df], 6):\n # Subtract from one to get confidence.\n confidence = (1.0 - float(col))\n return confidence", "def test_conf_interval_normal_method_with_bounds(data):\n df = data[\"df\"]\n new_df = data[\"new_df\"]\n # ``quantile_estimation_method = \"normal_fit\"``\n # with enforced lower limit (``min_admissible_value``)\n ci_model = conf_interval(\n df=df,\n value_col=\"y\",\n residual_col=\"residual\",\n conditional_cols=[\"x\"],\n quantiles=[0.005, 0.025, 0.975, 0.995],\n quantile_estimation_method=\"normal_fit\",\n sample_size_thresh=5,\n small_sample_size_method=\"std_quantiles\",\n small_sample_size_quantile=0.95,\n min_admissible_value=290.0,\n max_admissible_value=None)\n\n pred_df = predict_ci(\n new_df,\n ci_model)\n assert list(pred_df.columns) == [\"x\", \"y_quantile_summary\", ERR_STD_COL], (\n \"pred_df does not have the expected column names\")\n pred_df[\"y_quantile_summary\"] = pred_df[\"y_quantile_summary\"].apply(\n lambda x: tuple(round(e, 2) for e in x))\n assert pred_df[\"y_quantile_summary\"].values[5] == (290.0, 290.25, 292.54, 292.9), (\n \"quantiles are incorrect\")\n assert pred_df[\"y_quantile_summary\"].values[11] == (290.0, 290.0, 290.0, 290.0), (\n \"quantiles are incorrect\")", "def mean(self) -> float:\n points = np.concatenate(\n [\n [self.t_min],\n -np.logspace(-5, -1, 5)[::-1],\n np.logspace(-5, -1, 5),\n [self.t_max],\n ]\n )\n\n mean = 0.0\n for left, right in zip(points[:-1], points[1:]):\n integral, _ = integrate.quad(self.cdf, left, right, limit=500)\n mean += right * self.cdf(right) - left * self.cdf(left) - integral\n\n return mean", "def objective5EvalFunction(indivdual, test_data, truth_data, name=None):\r\n test_data = np.array(test_data).flatten()\r\n truth_data = np.array(truth_data).flatten()\r\n\r\n differences = abs(truth_data - test_data)\r\n percents = differences/truth_data\r\n return abs(np.mean(percents))" ]
[ "0.7058479", "0.6177722", "0.60270077", "0.601931", "0.58983326", "0.58310044", "0.57847005", "0.5780554", "0.5745698", "0.5712912", "0.5690209", "0.56681216", "0.56415606", "0.561999", "0.56135404", "0.56095594", "0.55160135", "0.54769456", "0.547189", "0.5418613", "0.5408574", "0.54020834", "0.53946763", "0.53661555", "0.5351085", "0.5350882", "0.5335048", "0.53155524", "0.52814585", "0.52402985", "0.5228487", "0.5176773", "0.5175243", "0.5153928", "0.5152537", "0.51418376", "0.51403004", "0.51357406", "0.5129243", "0.5128515", "0.5120254", "0.51084805", "0.50954217", "0.50782293", "0.5068805", "0.5065182", "0.5043127", "0.50178504", "0.50026715", "0.4998843", "0.4996242", "0.49936947", "0.49919158", "0.49883416", "0.49856654", "0.4982774", "0.4979691", "0.4977499", "0.49716604", "0.496926", "0.4958002", "0.4950587", "0.49389604", "0.49235433", "0.4918161", "0.49168813", "0.4896639", "0.48962846", "0.48698407", "0.48586044", "0.48569164", "0.48476082", "0.48353362", "0.48309565", "0.48297387", "0.48269036", "0.48269036", "0.48236793", "0.48076695", "0.47992626", "0.47894642", "0.47803548", "0.47748977", "0.47723815", "0.4766578", "0.47584566", "0.47522733", "0.4751068", "0.474591", "0.474591", "0.47404167", "0.47341537", "0.4726294", "0.4725713", "0.47217846", "0.47215068", "0.4721458", "0.47194958", "0.4710643", "0.47102848" ]
0.53884614
23
Find the pooled sample variance for two samples.
def pooled_sample_variance(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 mean1 = statistics.mean(sample1) squares1 = ((x - mean1) ** 2 for x in sample1) mean2 = statistics.mean(sample2) squares2 = ((x - mean2) ** 2 for x in sample2) return (math.fsum(squares1) + math.fsum(squares2)) / float(deg_freedom)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def variance(self):\r\n\t\t_mean = sum(self.sample)/len(self.sample)\r\n\t\treturn sum(map(lambda x: (x - _mean)**2, self.sample))/(len(self.sample) - 1)", "def variance( values, sample=False ):\n mean_val = mean_value( values )\n n_val = len( values ) -1 if sample else len( values )\n return sum( [ j**2 for j in [ i - mean_val for i in values ] ] ) / n_val", "def calc_variances(ds):\n if ds.size <= 1:\n print 'Fail: not enough items for calculation %d' % ds.size\n return 0,1\n obs_var = ((ds.storage - ds.storage.sum()/ds.size)**2).sum()/(ds.size-1)\n rep_var = ds.var.sum()/ds.size\n return obs_var,rep_var", "def variance(self, sample=True):\n distance_squared = list(map(lambda x: (x - sum(self.data)/self.size)**2, self.data))\n\n if sample == True:\n variance = sum(distance_squared)/(self.size - 1)\n if sample == False: \n variance = sum(distance_squared)/(self.size)\n return variance", "def std_meandiff_pooledvar(self):\n # this uses ``_var`` to use ddof=0 for formula\n\n d1 = self.d1\n d2 = self.d2\n # could make var_pooled into attribute\n var_pooled = (\n (d1.sumsquares + d2.sumsquares)\n /\n # (d1.nobs - d1.ddof + d2.nobs - d2.ddof))\n (d1.nobs - 1 + d2.nobs - 1)\n )\n return np.sqrt(var_pooled * (1.0 / d1.nobs + 1.0 / d2.nobs))", "def _variance(mean_variance, samples):\n mean = mean_variance[0] / samples\n variance = mean_variance[1]\n variance /= samples\n variance -= mean * mean\n return variance", "def test_two_pop_unknown_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_pool = cls.get_var_pool(data1_, data2_)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_pool / n_x + var_pool / n_y)", "def compute_variance(\n self,\n parameters: NDArray,\n resids: NDArray,\n sigma2: NDArray,\n backcast: Union[float, NDArray],\n var_bounds: NDArray,\n ) -> NDArray:", "def test_variance(self):\n self.assertEqual(variance(list1, sample=False), np.var(list1))\n self.assertEqual(variance(list1), np.var(list1, ddof=1))", "def variance(self):\n observations_raw = input(\"Observations: \").split()\n observations = [int(elem) for elem in observations_raw]\n observations_squared = sum([num**2 for num in observations])\n aggregate_squared = sum(observations)**2\n n = len(observations)\n mean = sum(observations)/n\n variance = (observations_squared - (aggregate_squared/n))/(n-1)\n print(f\"Variance is: {variance}\")\n return variance, mean", "def variance(self):\n return 1 / self.count() * sum((number-self.average())**2 for number in self.numbers)", "def variance(L, is_sample=0):\n\tm = mean(L)\n\treturn sum((x-m)**2 for x in L) / (len(L) - is_sample)", "def get_var_pool(cls, data1: tuple, data2: tuple) -> float:\n cls._data_validation(data1)\n cls._data_validation(data2)\n n1 = cls.get_n(data1)\n var1 = cls.get_var(data1)\n n2 = cls.get_n(data2)\n var2 = cls.get_var(data2)\n return ((n1 - 1) * var1 + (n2 - 1) * var2) / (n1 + n2 - 2)", "def variance(self, mean=None):\n raise NotImplementedError", "def test_two_pop_known_var_ind(data1_: tuple, data2_: tuple):\n x_bar = cls.get_mean(data1_)\n y_bar = cls.get_mean(data2_)\n var_x = cls.get_var(data1_, is_population=True)\n var_y = cls.get_var(data2_, is_population=True)\n n_x = cls.get_n(data1_)\n n_y = cls.get_n(data2_)\n return (x_bar - y_bar) / sqrt(var_x / n_x + var_y / n_y)", "def sample_variance(self, x_dict={}):\n raise NotImplementedError()", "def variance(dataset):\n avg = sum(dataset)/len(dataset)\n v = 0.0\n for data in dataset:\n v += (data - avg) * (data - avg)\n v = v / len(dataset)\n return v", "def _variance(self, features):\n return np.mean(np.var(features.reshape((features.shape[0], -1)), axis=1))", "def variance_scorer(x, y):\n scores = [np.var(column) for column in x.T]\n return scores, np.array([np.NaN]*len(scores))", "def sampleVariance(numlist):\n\tsum1 = sum2 = 0.0\n\tn = 0.0\n\tfor x in numlist:\n\t\tassert isinstance(x, int) or isinstance(x, float)\n\t\tsum1 += x\n\t\tsum2 += x * x\n\t\tn += 1.0\n\tif n < 2.0:\n\t\treturn 0.0\n\tvar = (1.0/(n+1.0))*(sum2 - (1/n)*sum1*sum1)\n\tif var < 0.0: # Due to numerical problems only!\n\t\tvar = 0.0\n\treturn var", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(data):\n differences = data - np.mean(data)\n diff_sq = differences ** 2\n variance = np.mean(diff_sq)\n\n return variance", "def variance(values, weights=None, axis=0):\n \n average = np.average(values, weights=weights, axis=axis)\n variance = np.average((values-average)**2, weights=weights, axis=axis)\n return variance", "def variance(data, xbar=None):\n if iter(data) is data:\n data = list(data)\n data_len = len(data)\n if data_len < 2:\n raise StatisticsError('variance requires at least two data points')\n return _ss(data, xbar) / (data_len - 1)", "def _compute_covariance(self, lc1, lc2):\n return np.cov(lc1.counts, lc2.counts)[0][1]", "def explained_variance_score(y_true, y_pred, *, sample_weight=..., multioutput=...):\n ...", "def variance(self):\n sum_sqdif = 0 # initialize sum of squared differences\n # Calculate sum of squared differences\n for site in self.sites:\n sqdif = (site.siteZmArea - self.meanZmArea()) ** 2\n sum_sqdif = sqdif + sum_sqdif \n # Standard Deviation\n stddev = ((1 / ( float(self.ni) - 1 )) * sum_sqdif ) ** 0.5\n # Variance\n var = stddev ** 2\n return var", "def _variance(self,gp):\r\n return self.variance", "def get_population_variance(self):\n\t\treturn self.variables.get('population_variance')", "def variance(numbers, mean):\n variance = 0 # We will add to this value in a loop\n N = len(numbers)\n \n for i in numbers:\n\n # Operations follow typical BEDMAS\n variance += ((i - mean) * (i - mean))/N\n \n return variance", "def variance(data, m=None):\n n, ss = _SS(data, m)\n if n < 2:\n raise ValueError('sample variance or standard deviation'\n ' requires at least two data points')\n return ss/(n-1)", "def variance(self):\n return self.properties.get('variance')", "def _compute_variance_of_points(self, points_to_sample):\n\n\n\n _hyperparameters = self._covariance.get_hyperparameters()\n del_idx = [idx +1 for idx in self.idx]\n covariance = SquareExponential(numpy.delete(_hyperparameters, del_idx, 0))\n var_star = numpy.empty((points_to_sample.shape[0],points_to_sample.shape[0]))\n marginal = self._get_variance_aij_marginal()\n for i, point_one in enumerate(points_to_sample):\n for j, point_two in enumerate(points_to_sample):\n tmp_point_two = numpy.delete(point_two, self.idx, 0)\n tmp_point_one = numpy.delete(point_one, self.idx, 0)\n var_star[i,j] = covariance.covariance(tmp_point_one, tmp_point_two) * marginal\n\n K_star = self._build_integrated_covariance_maxtrix_variance(\n self._covariance,\n self._points_sampled,\n points_to_sample,\n )\n K_star_K_C_Inv_K_star = numpy.dot(numpy.dot(K_star.T, self._K_C), K_star)\n tmp = var_star - K_star_K_C_Inv_K_star *self._get_variance_bij_marginal()\n return tmp * self._get_average()", "def variance(operator, state):\n return (expectation(operator**2, state) - expectation(operator, state)**2)", "def variance(x):\r\n n = len(x)\r\n deviations = dev_mean(x)\r\n return sum_of_squares(deviations) / (n-1)", "def get_var(cls, data: tuple or list, is_population=False) -> float:\n cls._data_validation(data)\n mean = cls.get_mean(data)\n variance = float()\n n = cls.get_n(data)\n for each_item in data:\n variance += (each_item - mean) ** 2\n # Checks whether is a population or sample\n if is_population:\n variance = variance / n\n else:\n variance = variance / (n - 1)\n return float(variance)", "def variance(self):\n raise RuntimeError(\"Needs to be implemented in base class\")", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pstdev(data):\n n = len(data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n ss = _ss(data)\n pvar = ss/n # the population variance\n return pvar**0.5", "def pooled_standard_deviation(input_variances):\r\n # compute and return pooled standard deviation\r\n return sqrt(mean(square([float(i) for i in input_variances])))", "def variance(data, finite_sample_correction=True, **kwargs):\n return Component(\n \"Variance\",\n arguments={\n 'data': Component.of(data)\n },\n options={\n 'finite_sample_correction': finite_sample_correction\n },\n constraints=kwargs)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def variance(x):\n n = len(x)\n deviations = de_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def calculate_variance(n, p):\n return p * (1 - p) / n", "def explained_variance(ypred,y):\n assert y.ndim == 1 and ypred.ndim == 1\n vary = np.var(y)\n return np.nan if vary==0 else 1 - np.var(y-ypred)/vary", "def update_mean_variance(self, n_past, mu, var, X, sample_weight=None):\n if X.shape[0] == 0:\n return mu, var\n\n # Compute (potentially weighted) mean and variance of new datapoints\n if sample_weight is not None:\n n_new = float(sample_weight.sum())\n new_mu = np.average(X, axis=0, weights=sample_weight / n_new)\n new_var = np.average((X - new_mu) ** 2, axis=0,\n weights=sample_weight / n_new)\n else:\n n_new = X.shape[0]\n new_var = np.var(X, axis=0)\n new_mu = np.mean(X, axis=0)\n\n if n_past == 0:\n return new_mu, new_var\n\n n_total = float(n_past + n_new)\n\n # Combine mean of old and new data, taking into consideration\n # (weighted) number of observations\n total_mu = (n_new * new_mu + n_past * mu) / n_total\n\n # Combine variance of old and new data, taking into consideration\n # (weighted) number of observations. This is achieved by combining\n # the sum-of-squared-differences (ssd)\n old_ssd = n_past * var\n new_ssd = n_new * new_var\n total_ssd = (old_ssd + new_ssd +\n (n_past / float(n_new * n_total)) *\n (n_new * mu - n_new * new_mu) ** 2)\n total_var = total_ssd / n_total\n\n return total_mu, total_var", "def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterID2_GetVarianceOutput(self, *args)", "def variance(y, w):\n # w = clip_normalize(w)\n # Compute the expectance (d_y, n_q)\n y_q_exp = np.dot(y.T, w)\n\n # Compute the expectance of squares (d_y, n_q)\n y_q_exp_sq = np.dot((y ** 2).T, w)\n\n # Compute the variance (d_y, n_q)\n return y_q_exp_sq - (y_q_exp ** 2)", "def _compute_variance(params):\n batch_grad = self._fetch_batch_grad(params, aggregate=True)\n grad = self._fetch_grad(params, aggregate=True)\n batch_size = batch_grad.size(0)\n\n if self._use_double:\n batch_grad = batch_grad.double()\n grad = grad.double()\n\n return (1 / (batch_size - 1)) * ((batch_size * batch_grad - grad) ** 2).sum(\n 0\n )", "def dp_variance(data, lower=None, upper=None, mechanism=\"Automatic\", privacy_usage=None, finite_sample_correction=True, **kwargs):\n return Component(\n \"DPVariance\",\n arguments={\n 'data': Component.of(data),\n 'lower': Component.of(lower),\n 'upper': Component.of(upper)\n },\n options={\n 'mechanism': mechanism,\n 'privacy_usage': serialize_privacy_usage(privacy_usage),\n 'finite_sample_correction': finite_sample_correction\n },\n constraints=kwargs)", "def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIF2_GetVarianceOutput(self, *args)", "def variance(y):\n \n # YOUR CODE HERE\n if len(y) == 0:\n return 0.\n \n return np.var(y)", "def pstdev(data):\n n = len(data)\n c = mean(data)\n ss = sum((x-c)**2 for x in data)\n if n < 2:\n raise ValueError('variance requires at least two data points')\n pvar = ss/n # the population variance\n return round(pvar**0.5, 1)", "def test_profiled_mean_and_variance(self):\n\n def mean(df):\n total = 0\n for item in df:\n total += item\n return total / len(df)\n\n def var(df):\n var = 0\n mean_df = mean(df)\n for item in df:\n var += (item - mean_df) ** 2\n return var / (len(df) - 1)\n\n def batch_variance(mean_a, var_a, count_a, mean_b, var_b, count_b):\n delta = mean_b - mean_a\n m_a = var_a * (count_a - 1)\n m_b = var_b * (count_b - 1)\n M2 = m_a + m_b + delta ** 2 * count_a * count_b / (\n count_a + count_b)\n return M2 / (count_a + count_b - 1)\n\n data = np.linspace(-5, 5, 11).tolist()\n df1 = pd.Series(data)\n\n data = np.linspace(-3, 2, 11).tolist()\n df2 = pd.Series(data)\n\n data = np.full((10,), 1)\n df3 = pd.Series(data)\n\n num_profiler = FloatColumn(df1.name)\n num_profiler.update(df1.apply(str))\n\n self.assertEqual(mean(df1), num_profiler.mean)\n self.assertEqual(var(df1), num_profiler.variance)\n self.assertEqual(np.sqrt(var(df1)), num_profiler.stddev)\n\n variance = batch_variance(\n mean_a=num_profiler.mean, var_a=num_profiler.variance,\n count_a=num_profiler.match_count,\n mean_b=mean(df2), var_b=var(df2), count_b=df2.count()\n )\n num_profiler.update(df2.apply(str))\n df = pd.concat([df1, df2])\n self.assertEqual(mean(df), num_profiler.mean)\n self.assertEqual(variance, num_profiler.variance)\n self.assertEqual(np.sqrt(variance), num_profiler.stddev)\n\n variance = batch_variance(\n mean_a=num_profiler.mean, var_a=num_profiler.variance,\n count_a=num_profiler.match_count,\n mean_b=mean(df3), var_b=var(df3), count_b=df3.count()\n )\n num_profiler.update(df3.apply(str))\n\n df = pd.concat([df1, df2, df3])\n self.assertEqual(mean(df), num_profiler.mean)\n self.assertEqual(variance, num_profiler.variance)\n self.assertEqual(np.sqrt(variance), num_profiler.stddev)", "def variance(self):\n return (math.exp(self.sigma ** 2) - 1.0) \\\n * math.exp(2.0 * self.mu + self.sigma ** 2)", "def f_test_var(data1,data2):\n var1, var2 = np.var(data1,ddof = 1),np.var(data2,ddof = 1)\t# compute variance\n df1, df2, = len(data1) - 1, len(data2) - 1\t\t# compute degrees of freedom\n if var1 > var2:\n\tprob = 2. * f.cdf(var1/var2,df1,df2)\n else:\n\tprob = 2. * f.cdf(var2/var1,df2,df1)\n if prob > 1.:\n\treturn 2. - prob\n else:\n\treturn prob", "def variance(x):\n \"\"\" note - why n-1?: since we are likely looking at a sample, x_bar is only an\n estimate of the actual mean, which means that on average (x_i - x_bar) ** 2\n is an underestimate of x_i's squared deviation from the mean, which is why\n we divide by n-1 instead of n (see bit.ly/lL2EapI)\"\"\"\n n = len(x)\n deviations = deviations_from_mean(x)\n return sum_of_squares(deviations) / (n - 1)", "def explained_variance(returns, values):\n exp_var = 1 - torch.var(returns - values) / torch.var(returns)\n return exp_var.item()", "def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUL2_GetVarianceOutput(self, *args)", "def get_mean_and_variance(self):\n self._set_statistics()\n return self.statistics_object.get_mean(), self.statistics_object.get_variance()", "def explained_variance(ypred, y):\n assert y.ndim == 1 and ypred.ndim == 1\n vary = np.var(y)\n return np.nan if vary == 0 else 1 - np.var(y-ypred)/vary", "def _effect_size(_sample_a, _sample_b):\n mean_a = np.mean(_sample_a)\n mean_b = np.mean(_sample_b)\n std_a = np.std(_sample_a)\n std_b = np.std(_sample_b)\n std_pooled = np.sqrt((np.square(std_a) + np.square(std_b)) / 2)\n print('Effect size: {}'.format((mean_a - mean_b) / std_pooled))", "def var(x, axis=None):\r\n x = asarray(x)\r\n # figure out sample size along the axis\r\n if axis is None:\r\n n = x.size\r\n else:\r\n n = x.shape[axis]\r\n # compute the sum of squares from the mean(s)\r\n sample_SS = sum(x ** 2, axis) - sum(x, axis) ** 2 / n\r\n return sample_SS / (n - 1)", "def variance(xs: List[float]) -> float:\n assert len(xs) >= 2, \"variance requires at least two elements\"\n\n n = len(xs)\n deviations = de_mean(xs)\n return sum_of_squares(deviations) / (n - 1)", "def variance(xs: List[float]) -> float:\n assert len(xs) >= 2, \"variance requires at least two elements\"\n\n n = len(xs)\n deviations = de_mean(xs)\n return sum_of_squares(deviations) / (n - 1)", "def _derive_variance_(self):\n # Pure Photon Noise\n self._properties[\"var\"] = np.sqrt(self.rawdata*self.exposuretime) / self.exposuretime", "def test_calculate_variance_covariance(self):\n\n _var_covar = calculate_variance_covariance(22, 620.0, 0.4239, 0.6142)\n self.assertAlmostEqual(_var_covar[0][0], 0.1351777)\n self.assertAlmostEqual(_var_covar[0][1], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][0], -0.04660735)\n self.assertAlmostEqual(_var_covar[1][1], 0.01710296)\n self.assertEqual(_var_covar[0][1], _var_covar[1][0])", "def variance(self):\n if self.dirty:\n self._finalize()\n return self.vvar", "def variance(self):\n return self.sigma", "def conditional_variance(self, gp):\n raise NotImplementedError", "def _variance(self, sums, squares):\n total_count = tf.slice(sums, [0, 0], [-1, 1])\n e_x = sums / total_count\n e_x2 = squares / total_count\n\n return tf.reduce_sum(e_x2 - tf.square(e_x), 1)", "def dif_std_for_region(var1,var2,mask):\r\n\t\t\r\n\t\tdif = np.nansum(np.nansum(np.multiply(mask,np.nanmean(var1,axis=0) - np.nanmean(var2,axis=0)),axis=1),axis=0)\r\n\t\tvar1_domain_mean = np.nansum(np.nansum(np.multiply(mask,var1),axis=2),axis=1)\r\n\t\tvar2_domain_mean = np.nansum(np.nansum(np.multiply(mask,var2),axis=2),axis=1)\r\n\t\tstd = np.std(var1_domain_mean - var2_domain_mean);print std\r\n\t\tp25 = np.abs(dif - np.nanpercentile(var1_domain_mean - var2_domain_mean,25,axis=0))/1.25\r\n\t\tp75 = np.abs(np.nanpercentile(var1_domain_mean - var2_domain_mean,75,axis=0) - dif)/1.25\r\n\t\t# print dif, p25,p75\r\n\t\treturn dif,p25,p75", "def getVariance(self):\n return self.__variance", "def gaussian_process_pointwise_variance(kernel, pred_samples, train_samples,\n nugget = 0):\n K_train = kernel(train_samples.T)\n # add small number to diagonal to ensure covariance matrix is\n # positive definite\n ntrain_samples = train_samples.shape[1]\n K_train[np.arange(ntrain_samples), np.arange(ntrain_samples)] += nugget\n k_pred = kernel(train_samples.T, pred_samples.T)\n L = np.linalg.cholesky(K_train)\n tmp = solve_triangular(L, k_pred, lower=True)\n variance = kernel.diag(pred_samples.T) - np.sum(tmp*tmp, axis=0)\n return variance", "def variance(num_energies, num_samples):\n fixed_header = (\n 1*8 # SSID\n + 4*8 # SCET Coarse time\n + 2*8 # SCET Fine time\n + 2*8 # Integration time\n + 1*8 # Samples per variance\n + 4*8 # Detector mask\n + 4*8 # Energy mask\n + 4 # Spare\n + 12 # Pixel mask\n + 1 # Spare\n + 1 # Comp Schema variance S\n + 3 # Comp Schema variance K\n + 3 # Comp Schema variance M\n + 2*8 # Number of data points\n )\n\n variable = (\n num_samples*1*8. # Number data points\n )\n\n return fixed_header, variable", "def variance_selection(X):\n try:\n selector = VarianceThreshold()\n X = selector.fit_transform(X)\n return X\n\n except ValueError:\n return 0", "def GetVariance(self, label: 'short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2ISS2_GetVariance(self, label)", "def calculate_covariance(column1: pd.Series, column2: pd.Series) -> np.float64:\n\n cov = column1.cov(column2)\n return cov", "def _variance(self,gp):\r\n return self.gp_link.transf(gp)**2", "def variance(df, cols, dummy_col, generated_feature_name, params=None): \n group_cols = cols[:-1]\n calc_col = cols[-1]\n group = df[cols].groupby(by=group_cols)[[calc_col]].var().reset_index().rename(index=str, columns={calc_col: generated_feature_name}).fillna(0)\n dtype = {x: df[x].dtype for x in group_cols if x in df.columns.values}\n dtype[generated_feature_name] = utils.set_type(group[generated_feature_name], 'float')\n _df = df.merge(group.astype(dtype), on=group_cols, how='left')\n r = _df[[generated_feature_name]].copy()\n del dtype, _df, group\n gc.collect()\n module_logger.debug('feature generated: {}'.format(generated_feature_name))\n return r", "def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUC2_GetVarianceOutput(self, *args)", "def tscore(sample1, sample2):\n if len(sample1) != len(sample2):\n raise ValueError(\"different number of values\")\n error = pooled_sample_variance(sample1, sample2) / len(sample1)\n diff = statistics.mean(sample1) - statistics.mean(sample2)\n return diff / math.sqrt(error * 2)", "def binomVariance(n, p):\r\n variance_of_Y = p * (1- p)\r\n variance_of_X = n * variance_of_Y \r\n print(\"variance of X = \", variance_of_X)\r\n return variance_of_X", "def GetVarianceOutput(self, *args):\n return _itkStatisticsImageFilterPython.itkStatisticsImageFilterIUS2_GetVarianceOutput(self, *args)", "def variance(self):\n return self.k * self.theta ** 2", "def var(self):\n return self._reduce_for_stat_function(F.variance, only_numeric=True)", "def variance(self):\n clean, total = self._prepare_for_stats()\n if not total:\n return None\n\n mean = self.mean()\n weighted_central_moment = sum(\n count * (value - mean) ** 2 for value, count in clean.items()\n )\n return weighted_central_moment / total", "def test_5_scalar_variance_1step(self):\n print(\"test 5 comparing variances\")\n\n means, vars, cl_probs = EM_step(\n self.X_h, self.means_h, self.dispersions_h, self.cluster_probabilities_h\n )\n\n self.assertEqual(means.shape[0], 2)\n\n print(vars[0], vars[1])", "def GetVariance(self, label: 'unsigned short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterIF2IUS2_GetVariance(self, label)", "def test_coeffvar(self):\n self.assertEqual(coeffvar(list1, sample=False), np.std(list1) /\n np.mean(list1))\n self.assertEqual(coeffvar(list1), np.std(list1, ddof=1) /\n np.mean(list1))", "def variance(X, C):\r\n if ((type(X) is not np.ndarray or X.ndim != 2 or\r\n type(C) is not np.ndarray or C.ndim != 2)):\r\n return None\r\n try:\r\n return (np.square(np.apply_along_axis(np.subtract, 1, X, C))\r\n .sum(axis=2).min(axis=1).sum())\r\n except Exception:\r\n return None", "def B2_variance(oldlist, newlist):\n NL, NR = mps_null_spaces(oldlist)\n AL, C, AR = newlist\n AC = ct.rightmult(AL, C)\n L = ct.XopL(AC, B=NL)\n R = ct.XopR(AR, B=NR)\n B2_tensor = L @ R.T\n B2 = norm(B2_tensor)\n return B2", "def get_population_variance(iterable):\n mean = get_mean(iterable)\n squares_of_differences = [(value - mean) ** 2 for value in iterable]\n return get_mean(squares_of_differences)", "def get_var(df=songs_df):\n n_years = len(years)\n n_songs = len(songs_df['page'])\n variances = np.zeros((n_songs, n_songs))\n annual_diffs = np.zeros((n_songs, n_songs, n_years))\n\n # Figure out how to just get upper/lower triangle rather than populating w dups\n for s1 in range(n_songs):\n for s2 in range(n_songs):\n s1_ranks = songs_df['ranks'][s1]\n s2_ranks = songs_df['ranks'][s2]\n\n # Set up an offset/normalizer so that we're just looking at\n # functional form, not call totals. Maybe do this as a frac instead.\n offset = s1_ranks[0] - s2_ranks[0]\n\n annual_difference = [s1_ranks[year] - s2_ranks[year] - offset for year in range(n_years)]\n variance = sum( (annual_difference - np.mean(annual_difference))**2)/float(n_years)\n\n variances[s1][s2] = variance\n annual_diffs[s1][s2] = annual_difference\n\n\n mask = np.zeros_like(variances)\n mask[np.triu_indices_from(mask)] = True\n corr_matrix=variances.corr()\n\n sns.heatmap(variances, mask=mask) #, vmin=510, vmax=530)\n plt.show()\n return variances", "def GetVariance(self, label: 'short') -> \"double\":\n return _itkLabelStatisticsImageFilterPython.itkLabelStatisticsImageFilterISS2ISS2_GetVariance(self, label)", "def variance(l):\n m = mean(l)\n dif = 0\n for x in l:\n dif += (m-x)**2\n return dif/len(l)", "def sample_mean_var_unbiased(x):\n n = len(x)\n assert(n > 0)\n if n == 1:\n return x[0], float('Inf')\n mean, v = sample_mean_var_ml(x)\n var = v*n/(n-1)\n return mean, var" ]
[ "0.69535667", "0.68496686", "0.67732096", "0.6738547", "0.67180645", "0.65582496", "0.6516183", "0.64949", "0.6486372", "0.6415831", "0.6393349", "0.639173", "0.63470465", "0.63140863", "0.6311792", "0.6310086", "0.6282926", "0.62156796", "0.62076354", "0.61959153", "0.60444593", "0.60444593", "0.60425395", "0.60338295", "0.6019867", "0.5990985", "0.5990547", "0.59890926", "0.5965683", "0.5951495", "0.592959", "0.5916442", "0.5886782", "0.5848198", "0.5826464", "0.5816589", "0.58161813", "0.58110595", "0.58110595", "0.58110595", "0.58110595", "0.5797695", "0.5794474", "0.57816786", "0.57816786", "0.57816786", "0.5755851", "0.575067", "0.57448655", "0.5737364", "0.56960547", "0.5695897", "0.5691046", "0.5689722", "0.5686878", "0.56854373", "0.5682788", "0.5671539", "0.56597596", "0.56564623", "0.56530493", "0.5652693", "0.5638171", "0.5636918", "0.5633506", "0.56263566", "0.5626086", "0.5626086", "0.5619353", "0.5612713", "0.56100667", "0.56035733", "0.56025815", "0.5598112", "0.5584035", "0.55778533", "0.5572299", "0.5566461", "0.55632055", "0.5562592", "0.5554201", "0.5550047", "0.5548731", "0.5545337", "0.55379665", "0.55348366", "0.55325675", "0.55294764", "0.5527714", "0.5526384", "0.552545", "0.5523943", "0.55121845", "0.549667", "0.5495384", "0.5489946", "0.54898936", "0.5489242", "0.5484684", "0.547359" ]
0.80888563
0
Calculate a ttest score for the difference between two samples.
def tscore(sample1, sample2): if len(sample1) != len(sample2): raise ValueError("different number of values") error = pooled_sample_variance(sample1, sample2) / len(sample1) diff = statistics.mean(sample1) - statistics.mean(sample2) return diff / math.sqrt(error * 2)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ttest(array1, array2):\n diff = np.mean(array1) - np.mean(array2)\n if diff < c.cart_p60:\n return c.low_score\n if array1.size <= 1 or array2.size <= 1:\n return min(diff, c.single_item_cart_max)\n return 1 - ttest_ind(array1, array2, equal_var=False).pvalue\n # return diff", "def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception", "def eeg_twosample_ttest(array1,array2):\t\n\tfrom scipy.stats import ttest_rel\n\ts1 = array1.shape\n\tp = np.zeros(s1[1])\n\tt = np.zeros(s1[1])\n\tfor i in range(s1[1]):\n\t\ttval,pval = ttest_rel(array1[:,i],array2[:,i])\n\t\tp[i]=pval\n\t\tt[i]=tval\n\t\t\n\treturn t,p", "def TestStatistic(self, data):\n group1, group2 = data\n test_stat = abs(group1.mean() - group2.mean())\n return test_stat", "def ttest_ind_corrected(performance_a, performance_b, k=10, r=10):\n df = k * r - 1\n\n x = performance_a - performance_b\n m = np.mean(x)\n\n sigma_2 = np.var(x, ddof=1)\n denom = np.sqrt((1 / k * r + 1 / (k - 1)) * sigma_2)\n\n with np.errstate(divide='ignore', invalid='ignore'):\n t = np.divide(m, denom)\n\n prob = stats.t.sf(np.abs(t), df) * 2\n\n return t, prob", "def t_test1(data1,data2):\n if not isinstance(data1,np.ndarray):\n\tdata1 = np.array(data1)\n if not isinstance(data2,np.ndarray):\n\tdata2 = np.array(data2)\n\n N1, N2 = len(data1), len(data2)\n mean1, mean2 = np.mean(data1), np.mean(data2)\n # Eq. 14.2.1\n sD = np.sqrt( (np.sum( (data1 - np.ones(N1) * mean1) ** 2.) + np.sum( (data2 - np.ones(N2) * mean2) ** 2.)) / (N1 + N2 - 2.) * (1./N1 + 1./N2))\n T = (mean1 - mean2) / sD\n return t.cdf(T, N1 + N2 - 2),T,N1 + N2 - 2", "def t_test2(data1,data2):\n N1, N2 = len(data1), len(data2)\n mean1, mean2 = np.mean(data1), np.mean(data2)\n var1, var2= np.var(data1,ddof = 1), np.var(data2,ddof = 1)\n\n T = (mean1 - mean2) / np.sqrt(var1/N1 + var2/N2)\t# Eq. 14.2.3\n df = (var1/N1 + var2/N2)**2. / ( (var1/N1)**2./(N1 - 1) + (var2/N2)**2./(N2 - 1))\n return t.cdf(T, df), T, df", "def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)", "def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val", "def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )", "def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob", "def test_model_outcome(predicted, actual, planned):\n if not isinstance(predicted, pd.DataFrame):\n predicted = pd.DataFrame(predicted, columns=[\"PREDICTED_TRIP_DURATION\"])\n if not isinstance(actual, pd.DataFrame):\n actual = pd.DataFrame(actual, columns=[\"ACTUAL_TRIP_DURATION\"])\n if not isinstance(planned, pd.DataFrame):\n planned = pd.DataFrame(planned, columns=[\"PLANNED_TRIP_DURATION\"])\n # Initialise the combined dataframe\n combined = pd.concat([predicted, actual, planned], axis=1)\n # Calculate the actual delay\n actual_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"ACTUAL_TRIP_DURATION\"]\n # Calculate the predicted delay\n predicted_delay = combined[\"PLANNED_TRIP_DURATION\"] - combined[\"PREDICTED_TRIP_DURATION\"]\n # Calculate the difference in delay\n delay_diff = actual_delay - predicted_delay\n # Combine the delays into a single dataframe\n combined_delay = pd.concat([pd.DataFrame(actual_delay, columns=['Actual_Delay']),\n pd.DataFrame(predicted_delay, columns=['Predicted_Delay']),\n pd.DataFrame(delay_diff, columns=['Difference_In_Delay'])], axis=1)\n # Obtain the index of the max and min values of the actual, predicted and difference delays\n actual_max_index = combined_delay[\"Actual_Delay\"].argmax()\n actual_min_index = combined_delay[\"Actual_Delay\"].argmin()\n predicted_max_index = combined_delay[\"Predicted_Delay\"].argmax()\n predicted_min_index = combined_delay[\"Predicted_Delay\"].argmin()\n delay_diff_max_index = combined_delay[\"Difference_In_Delay\"].argmax()\n delay_diff_min_index = combined_delay[\"Difference_In_Delay\"].argmin()\n # Get the Mean Absolute Error\n MAE = metrics.mean_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the R2 Score\n R2 = metrics.r2_score(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Root Mean Squared Error\n RMSE = metrics.mean_squared_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"],\n squared=False)\n # Get the Median Absolute Error\n MEDAE = metrics.median_absolute_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Get the Mean Squared Error Log Value\n MSLE = metrics.mean_squared_log_error(combined[\"ACTUAL_TRIP_DURATION\"], combined[\"PREDICTED_TRIP_DURATION\"])\n # Build Dictionary\n pass_val = {\"combined\": combined,\n \"combined_delay\": combined_delay,\n \"actual_max_index\": actual_max_index,\n \"actual_min_index\": actual_min_index,\n \"predicted_max_index\": predicted_max_index,\n \"predicted_min_index\": predicted_min_index,\n \"delay_diff_max_index\": delay_diff_max_index,\n \"delay_diff_min_index\": delay_diff_min_index,\n \"MAE\": MAE,\n \"R2\": R2,\n \"MEDAE\": MEDAE,\n \"RMSE\": RMSE,\n \"MSLE\": MSLE}\n # Return Dictionary\n return pass_val", "def score(self, X_test, y_test):\n correct = []\n for one in X_test:\n correct.append(self.predict(one))\n try:\n return sum(0 if correct[i] != y_test[i] else 1 for i in range(len(X_test))) / len(\n X_test\n )\n except ZeroDivisionError:\n pass", "def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T", "def baseline_score(self,t0,t1):\n return len(set(t0) & set(t1))/len(set(t0).union(set(t1)))", "def t_test(result, reference):\n \n # Check that result and reference are 1D and that they have the same length\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing t test\\n')\n \n t_stat, p_value = scipy.stats.ttest_ind(result, reference)\n \n print('t test completed successfully!\\n')\n \n print('t statistic: {} // p value: {}'.format(t_stat, p_value))\n \n return t_stat, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')", "def score(self, X_test, y_test):\r\n counter = 0\r\n sr = self.predict(X_test)\r\n for i in range(len(y_test)):\r\n if sr[i] == y_test[i]:\r\n counter += 1\r\n return counter / len(y_test)\r\n pass", "def TestStatistic(self, data):\n group1, group2 = data\n n1, n2 = len(group1), len(group2)\n pred1 = [i/n1 for i in range(n1, 0, -1)] \n pred2 = [i/n2 for i in range(n2, 0, -1)] \n test_stat = abs(\n roc_auc_score(group1, pred1) \n - roc_auc_score(group2, pred2)\n )\n return test_stat", "def score(self, test_data):\n\n\t\tpass", "def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def ttest_two_sided(arr1, arr2, alpha=0.05, verbose=False):\n res = stats.ttest_ind(arr1, arr2)\n if res[1] <= alpha:\n if verbose: print(\n f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are significantly DIFFERENT')\n is_significant = True\n else:\n if verbose: print(f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are from the SAME set')\n is_significant = False\n return res[1], is_significant", "def elapsed_time_for_test(self, test_class, test_name, end_time):\n if test_class is None or test_name is None:\n return -2.0\n\n test_key = \"{}.{}\".format(test_class, test_name)\n if test_key not in self.start_time_by_test:\n return -1.0\n else:\n start_time = self.start_time_by_test[test_key]\n del self.start_time_by_test[test_key]\n return end_time - start_time", "def league_ttest(df_league_one: pd.DataFrame, df_league_two: pd.DataFrame, parameter: str, alpha: float, ):\n assert isinstance(df_league_one, pd.DataFrame), 'df_league_one needs to be a pandas dataframe.'\n assert isinstance(df_league_two, pd.DataFrame), 'df_league_two needs to be a pandas dataframe.'\n assert isinstance(alpha, float), 'alpha needs to be a float.'\n\n\n df_league_one_mean = df_league_one.mean()\n n = len(df_league_one['club'])\n df = n-1\n t_critical = stats.t.ppf(1-alpha, df)\n leagues_ttest = stats.ttest_1samp(a= df_league_two[f'{parameter}'], popmean= df_league_one_mean)\n t_value = leagues_ttest[0]\n p_value = leagues_ttest[1]\n\n stats_values = {}\n\n stats_values['p_value'] = round(list(p_value)[0], 4)\n\n if stats_values['p_value'] < alpha:\n return ('Enough evidence to reject null hypothesis')\n elif stats_values['p_value'] > alpha:\n return ('Not enough evidence to reject null hypothesis')", "def t_test(dataType):\n\n\t# read the data\n\tparser = ExperimentUtils()\n\tdata = parser.parse_data(dataType)\n\n\tN = len(data.keys()) # number participants\n\n\t# - for trial 1 and trial 2:\n\t# \tL2 norm over each timestep, then sum all the values together\n\t# - average over two trials for each participant \n\ttask_avgs = {}\n\n\t# participant ID can take values 0 - 9\n\tfor ID in data.keys():\n\t\tfor task in data[ID]:\n\t\t\t# dont include the familiarization task (task can take values 1,2,3)\n\t\t\tif task != 0:\n\t\t\t\tif task not in task_avgs:\n\t\t\t\t\ttask_avgs[task] = {}\n\t\t\t\t\ttask_avgs[task][\"A\"] = np.array([0.0]*N)\n\t\t\t\t\ttask_avgs[task][\"B\"] = np.array([0.0]*N)\n\n\t\t\t\ttrialAsum = [0.0,0.0]\n\t\t\t\ttrialBsum = [0.0,0.0]\n\t\t\t\t# trial can take values 1 or 2\n\t\t\t\tfor trial in data[ID][task]:\n\t\t\t\t\t# only compute metrics over data, not timestamps\n\t\t\t\t\tAdata = data[ID][task][trial]['A'][1:8]\n\t\t\t\t\tBdata = data[ID][task][trial]['B'][1:8]\n\t\t\t\n\t\t\t\t\t#print str(ID)+str(task)+str(trial)+\"A\"\n\t\t\t\t\t#print \"Adata: \" + str(Adata)\n\t\t\t\t\t#print str(ID)+str(task)+str(trial)+\"B\"\n\t\t\t\t\t#print \"Bdata: \" + str(Bdata)\n\n\t\t\t\t\t(h, w) = np.shape(Adata)\n\t\t\t\t\tfor i in range(w):\n\t\t\t\t\t\ttrialAsum[trial-1] += np.linalg.norm(Adata[:,i])\n\t\t\t\t\t(h, w) = np.shape(Bdata)\n\t\t\t\t\tfor i in range(w):\n\t\t\t\t\t\ttrialBsum[trial-1] += np.linalg.norm(Bdata[:,i])\n\t\t\t\tavg_methodA = (trialAsum[0]+trialAsum[1])/2.0\n\t\t\t\tavg_methodB = (trialBsum[0]+trialBsum[1])/2.0\n\n\t\t\t\ttask_avgs[task][\"A\"][ID] = avg_methodA\n\t\t\t\ttask_avgs[task][\"B\"][ID] = avg_methodB\n\n\t# comput independent two-sample t-test \n\t# NOTE: we can assume that the two sample sizes are the same, and \n\t#\t\tthat the two distributions have the same variance\n\tfor task in range(1,4):\n\t\ttaskA = task_avgs[task][\"A\"]\n\t\ttaskB = task_avgs[task][\"B\"]\n\n\t\tmeanA = np.mean(taskA)\n\t\tmeanB = np.mean(taskB)\n\t\tprint \"meanA: \" + str(meanA)\n\t\tprint \"meanB: \" + str(meanB)\n\t\tdiff = meanA - meanB\n\t\tprint \"diff: \" + str(diff)\n\n\t\t(statistic, pvalue) = stats.ttest_ind(a=taskA, b=taskB, equal_var=True)\n\n\t\tprint \"\\n\"\n\t\tprint \"task\"+str(task)+\" statistic: \" + str(statistic)\n\t\tprint \"task\"+str(task)+\" pvalue: \" + str(pvalue)", "def calculate_td_error(self, old_state, new_state, reward):\n for state in [old_state, new_state]:\n if state not in self.expected:\n self.expected[state] = 0\n self.delta = reward + self.gamma*self.expected[new_state] - self.expected[old_state]\n return self.delta", "def _compare_pre_post_sampling(X_train, y_train, X_new, y_new):\n train_data_info = _basic_data_info(X_train, y_train)\n new_data_info = _basic_data_info(X_new, y_new)\n\n print(\"\\nNum samples increased from {} to {} samples\\n\".format(train_data_info[\"Num_samples\"], new_data_info[\"Num_samples\"]))\n\n # Create pandas Dataframe\n df = pd.DataFrame(np.nan, index = train_data_info['classes'], columns = ['og_dist', 'og_prop', 'new_dist', 'new_prop'])\n df.iloc[:, 0] = train_data_info[\"counts\"]\n df.iloc[:, 1] = train_data_info[\"percs\"]\n df.iloc[:, 2] = new_data_info[\"counts\"]\n df.iloc[:, 3] = new_data_info[\"percs\"]\n\n df.index.name = \"classes\"\n\n # Difference in distributions\n print(\"Count comparison is as follows: \\n\", df)", "def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = stdev(a)**2\r\n v2 = stdev(b)**2\r\n n1 = len(a)\r\n n2 = len(b)\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2)/float(df)\r\n t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,min(a),max(a),\r\n name2,n2,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t,prob", "def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))", "def test_score_2():\n\n tpot_obj = TPOTClassifier()\n tpot_obj._pbar = tqdm(total=1, disable=True)\n known_score = 0.986318199045 # Assumes use of the TPOT balanced_accuracy function\n\n # Reify pipeline with known score\n tpot_obj._optimized_pipeline = creator.Individual.\\\n from_string('RandomForestClassifier(input_matrix)', tpot_obj._pset)\n tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)\n tpot_obj._fitted_pipeline.fit(training_features, training_classes)\n\n # Get score from TPOT\n score = tpot_obj.score(testing_features, testing_classes)\n\n # http://stackoverflow.com/questions/5595425/\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n assert isclose(known_score, score)", "def test_TRt(self):\n\n test_value = self.portfolio._get_total_portfolio[\n self.test_row_number]\n\n test_total = self.portfolio._df_total.iloc(axis=0)[\n self.test_row_number].values\n test_weights = self.weights.iloc(axis=0)[\n self.test_row_number].values\n calculated_value = sum(self.list_multiplication(test_total,\n test_weights))\n self.assertAlmostEqual(test_value, calculated_value)", "def calculate_score(result):\n sample1=result['Sample1']\n sample2=result['Sample2']\n string1=paragraph_to_list(sample1)\n string2=paragraph_to_list(sample2)\n \n return round( strings_similarity(string1, string2), 2)\n #method_dict=strings_count_compare(string1, string2)/ max(len(string1), len(string2))\n #return round(0.5*(method_difflab+method_dict), 2)", "def test_score_text2(self):\n\t\t#import pdb; pdb.set_trace()\n\t\ttest = sentiment.LibraryRun(self.text3, self.lib)\n\t\tmatches = test.find_phrase_matches(self.tokens_generator3)[0]\n\t\tobj_ut, _ = test.score_text(matches, end_threshold=0.5)\n\t\tself.assertEqual(obj_ut, -1.25)", "def TVD(p1, p2):\n assert p1.shape == p2.shape\n return 0.5 * np.sum(np.absolute(np.subtract(p1, p2)))", "def _compare_results(y_pred, y_pred_sampled, y_true):\n scores_og = _compute_scores(y_pred, y_true)\n scores_samp = _compute_scores(y_pred_sampled, y_true)\n\n # Aggreggate both results\n result_comp = pd.concat({\"Og\": scores_og, \"samp\": scores_samp}, axis = 1)\n\n return result_comp", "def ttest_2samp(x1, x2, alpha=0.05, paired=False, is_bernoulli=False, two_sided=True, return_tuple=False):\n x = np.asarray(x1)\n y = np.asarray(x2)\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = 'X1_bar = X2_bar'\n h1 = 'X1_bar != X2_bar'\n else:\n quant_order = 1 - alpha\n h0 = 'X1 <= X2'\n h1 = 'X1 > X2'\n\n # Sample sizes\n n1, n2 = len(x), len(y)\n\n if paired:\n # If samples are paired, we perform a 1-sample student test\n # We compare if the difference is different from 0.\n mean1, mean2 = x.mean(), y.mean()\n d = x - y\n t, cv, p = ttest(d, alpha=alpha, return_tuple=True)\n df = len(d)\n else:\n # Else samples are independent\n # Compute means\n mean1, mean2 = x.mean(), y.mean()\n # Compute standard deviations\n if is_bernoulli:\n s1 = mean1 * (1 - mean1)\n s2 = mean2 * (1 - mean2)\n else:\n s1 = desc.var(x)\n s2 = desc.var(y)\n # Compute grouped variance\n sd = np.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))\n # Degrees of freedom\n df = n1 + n2 - 2\n # Calculate the t statistic\n t = (mean1 - mean2) / sd\n\n # calculate the critical value\n cv = scp.t.ppf(quant_order, df)\n # calculate the p-value\n if (n1 > 30) & (n2 > 30):\n p = 2.0 * (1.0 - scp.norm.cdf(math.fabs(t)))\n else:\n p = 2.0 * (1.0 - scp.t.cdf(math.fabs(t), df=df))\n\n extra = f\" * E(X1) = {round(mean1, 3)} and E(X2) = {round(mean2, 3)} \\n\"\n extra += \" * Performed test for paired samples \\n\" if paired else ''\n extra += \" * Large sample sizes, t ~ N(0, 1) from CLT\" if (n1 > 30) & (n2 > 30) else ' * Small sample sizes, assumed t ~ T(n-1)'\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='Two Samples Student test',\n h0=h0, h1=h1,\n alpha=alpha,\n extra=extra)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def p_value(set1, set2):\n\ts, p = stats.ttest_ind(set1, set2)\n\treturn p", "def test_score():\n\n tpot_obj = TPOTClassifier()\n\n try:\n tpot_obj.score(testing_features, testing_classes)\n assert False # Should be unreachable\n except ValueError:\n pass", "def ttest_ind(\n x1,\n x2,\n alternative=\"two-sided\",\n usevar=\"pooled\",\n weights=(None, None),\n value=0,\n):\n cm = CompareMeans(\n DescrStatsW(x1, weights=weights[0], ddof=0),\n DescrStatsW(x2, weights=weights[1], ddof=0),\n )\n tstat, pval, dof = cm.ttest_ind(\n alternative=alternative, usevar=usevar, value=value\n )\n\n return tstat, pval, dof", "def test(self,dataset):\n outputs = self.use(dataset)\n \n costs = np.ones((len(outputs),1))\n # Compute classification error\n for xy,pred,cost in zip(dataset,outputs,costs):\n x,y = xy\n if y == pred[0]:\n cost[0] = 0\n\n return outputs,costs", "def mw_test(n1, n2):\r\n # find smaller sample, defined historically as n2. modify the names so we\r\n # don't risk modifying data outside the scope of the function.\r\n if len(n2) > len(n1):\r\n sn1, sn2 = array(n2), array(n1)\r\n else:\r\n sn1, sn2 = array(n1), array(n2)\r\n # sum the ranks of s2 by using the searchsorted magic. the logic is that we\r\n # use a sorted copy of the data from both groups (n1 and n2) to figure out\r\n # at what index we would insert the values from sample 2. by assessing the\r\n # difference between the index that value x would be inserted in if we were\r\n # doing left insertion versus right insertion, we can tell how many values\r\n # are tied with x. this allows us to calculate the average ranks easily.\r\n data = sorted(hstack([sn1, sn2]))\r\n ssl = searchsorted(data, sn2, 'left')\r\n ssr = searchsorted(data, sn2, 'right')\r\n sum_sn2_ranks = ((ssl + ssr + 1) / 2.).sum()\r\n ln1, ln2 = sn1.size, sn2.size\r\n C = (ln1 * ln2) + (ln2 * (ln2 + 1) / 2.) - sum_sn2_ranks\r\n U = max(C, ln1 * ln2 - C)\r\n # now we calculate the pvalue using the normal approximation and the two\r\n # tailed test. our formula corrects for ties, because in the case where\r\n # there are no ties, the forumla on the bottom of pg 429=the formula on the\r\n # bottom of pg 430.\r\n numerator = (U - ln1 * ln2 / 2.)\r\n # follwing three lines give the T value in the formula on page 430. same\r\n # logic as above; we calculate the left and right indices of the unique\r\n # values for all combined data from both samples, then calculate ti**3-ti\r\n # for each value.\r\n ux = unique(data)\r\n uxl = searchsorted(data, ux, 'left')\r\n uxr = searchsorted(data, ux, 'right')\r\n T = _corr_kw(uxr - uxl).sum()\r\n denominator = sqrt(((ln1 * ln2) / float((ln1 + ln2) * (ln1 + ln2 - 1))) * (((ln1 + ln2) ** 3\r\n - (ln1 + ln2) - T) / 12.))\r\n if denominator == 0:\r\n # Warning: probability of U can't be calculated by mw_test\r\n # because all ranks of data were tied. Returning nan as pvalue.\r\n return U, nan\r\n else:\r\n pval = zprob(numerator / float(denominator))\r\n return U, pval", "def _tau_score(Y_true, Y_pred, sample_weight=None):\n (n_samples, n_classes) = Y_true.shape\n scores = np.zeros(n_samples)\n\n for sample in range(n_samples):\n for f_class in range(n_classes - 1):\n for s_class in range(f_class + 1, n_classes):\n a = Y_true[sample, f_class] - Y_true[sample, s_class]\n b = Y_pred[sample, f_class] - Y_pred[sample, s_class]\n scores[sample] += np.sign(a * b)\n\n scores[sample] *= 2 / (n_classes * (n_classes-1))\n\n return np.average(a=scores, weights=sample_weight)", "def test_score_3():\n\n tpot_obj = TPOTRegressor(scoring='neg_mean_squared_error')\n tpot_obj._pbar = tqdm(total=1, disable=True)\n known_score = 8.9673743407873712 # Assumes use of mse\n # Reify pipeline with known score\n tpot_obj._optimized_pipeline = creator.Individual.\\\n from_string('ExtraTreesRegressor(GradientBoostingRegressor(input_matrix, 100.0, 0.11), 0.17999999999999999)', tpot_obj._pset)\n tpot_obj._fitted_pipeline = tpot_obj._toolbox.compile(expr=tpot_obj._optimized_pipeline)\n tpot_obj._fitted_pipeline.fit(training_features_r, training_classes_r)\n\n # Get score from TPOT\n score = tpot_obj.score(testing_features_r, testing_classes_r)\n\n # http://stackoverflow.com/questions/5595425/\n def isclose(a, b, rel_tol=1e-09, abs_tol=0.0):\n return abs(a - b) <= max(rel_tol * max(abs(a), abs(b)), abs_tol)\n\n assert isclose(known_score, score)", "def delta(tval, tp_confidences, fp_confidences, num_samples):\n tp_percentage = \\\n np.sum([1 for x in tp_confidences if x > tval]) / num_samples\n if fp_confidences:\n fp_percentage = np.sum([1 for x in fp_confidences if x > tval]) / \\\n len(fp_confidences)\n else:\n fp_percentage = 0\n optimal_tp = len(tp_confidences) / num_samples\n delta_value = (tp_percentage - optimal_tp) ** 2 + fp_percentage ** 2\n return delta_value, tp_percentage, fp_percentage", "def get_tp_score(val_loss, best_val_loss, num_classes=10):\n random_loss = math.log(num_classes)\n\n return (random_loss - val_loss) / (random_loss - best_val_loss)", "def calculate_statistics(true_pos, false_pos, true_neg, false_neg, y_predict, Y_test):\n precision = float(true_pos) / (true_pos + false_pos)\n recall = float(true_pos) / (true_pos + false_neg)\n F1 = float(2 * precision * recall) / (precision + recall)\n #Get Mean Squared Error\n MSE = mean_squared_error(y_predict.flatten(), Y_test.flatten())\n\n return precision, recall, F1, MSE", "def test_paired_difference_analyses(self):\r\n actual = paired_difference_analyses(\r\n self.personal_ids_to_state_values1,\r\n ['firmicutes-abundance',\r\n 'bacteroidetes-abundance'],\r\n ['Pre', 'Post'],\r\n output_dir=self.test_out,\r\n ymin=0.0,\r\n ymax=1.0)\r\n self.assertTrue(exists(join(self.test_out,\r\n 'paired_difference_comparisons.txt')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'firmicutes-abundance.pdf')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))\r\n # three output paths returned\r\n self.assertEqual(len(actual[0]), 5)\r\n # expected t values returned, they should be less than (firmicutes) or greater (bacteroidetes) than 2 \r\n self.assertLess(abs(actual[1]['firmicutes-abundance'][4]), 2)\r\n self.assertLess(2, abs(actual[1]['bacteroidetes-abundance'][4]))", "def calculate_td_error(self, old_state, new_state, reward):\n\n output = self.net(self.state_tensor_convert(old_state))\n target = self.gamma * self.net(self.state_tensor_convert(new_state)) + reward\n self.loss = self.net.loss(output,target)\n return float(target-output)", "def get_failure_prob(self, t1, t2):\n return (self.get_failure_cdf(t2) / (1 - self.get_failure_cdf(t1)))", "def dt_train_test(dt, xTrain, yTrain, xTest, yTest):\n # train the model\n dt.train(xTrain, yTrain['label'])\n # predict the training dataset\n yHatTrain = dt.predict(xTrain)\n trainAcc = accuracy_score(yTrain['label'], yHatTrain)\n # predict the test dataset\n yHatTest = dt.predict(xTest)\n testAcc = accuracy_score(yTest['label'], yHatTest)\n return trainAcc, testAcc", "def score_method(pairs_true, pairs_test):\n \n set_true = {tuple(e) for e in pairs_true}\n set_test = {tuple(e) for e in pairs_test}\n true_pos, false_pos, false_neg = confusion_stats(set_true, set_test)\n \n total = true_pos + false_pos + false_neg\n true_pos_rate = true_pos / total\n false_pos_rate = false_pos / total\n false_neg_rate = false_neg / total\n \n return true_pos_rate, false_pos_rate, false_neg_rate", "def evaluate(self, test_X, test_Y):\n correct = 0\n total = 0.0\n\n for i, ((word_indices, word_char_indices), gold_tag_indices) in enumerate(zip(test_X, test_Y)):\n\n output = self.predict(word_indices, word_char_indices)\n predicted_tag_indices = [np.argmax(o.value()) for o in output] \n\n correct += sum([1 for (predicted, gold) in zip(predicted_tag_indices, gold_tag_indices) if predicted == gold])\n total += len(gold_tag_indices)\n\n return correct, total", "def test_t_two_sample_switch(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n self.assertFloatEqual(t_two_sample(x, sample), (-1.5637254, 0.1929248))\r\n self.assertFloatEqual(t_two_sample(sample, x), (1.5637254, 0.1929248))\r\n\r\n # can't do the test if both samples have single item\r\n self.assertEqual(t_two_sample(x, x), (None, None))\r\n\r\n # Test special case if t=0.\r\n self.assertFloatEqual(t_two_sample([2], [1, 2, 3]), (0.0, 1.0))\r\n self.assertFloatEqual(t_two_sample([1, 2, 3], [2]), (0.0, 1.0))", "def test_ddiff_v2(self):\n print \"\\n\"\n for d in ddiff_v2(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def report_result(force_a_before, force_b_before, force_a_after, force_b_after):\n damage_a = 0.0\n damage_b = 0.0\n ################################# YOUR CODE HERE #################################\n damage_a = calculate_training_cost(force_a_before) - calculate_training_cost(force_a_after)\n damage_b = calculate_training_cost(force_b_before) - calculate_training_cost(force_b_after)\n ##################################################################################\n return damage_a, damage_b", "def score(self, X_test: List[str], y_test: List[str]) -> int:\n predictions_count = 0\n right_predictions_count = 0\n\n for i in range(len(X_test)):\n label = self.predict(X_test[i].split())\n predictions_count += 1\n right_predictions_count += 1 if label == y_test[i] else 0\n\n return right_predictions_count / predictions_count", "def test_TPt(self):\n\n test_value = self.portfolio.calculate_total_performance(\n *self.boarder)[self.test_row_number]\n calculated_value = self.manual_cumprod(\n self.portfolio._get_total_portfolio)\n self.assertAlmostEqual(test_value, calculated_value)", "def timings(samples):\n groups = samples.groupby(axis=1, level=0)\n return groups.apply(lambda group: group.iloc[:, 1] - group.iloc[:, 0])", "def diffStats(name1, vals1, name2, vals2):\n from Stats import Stats\n label = name2 + ' - ' + name1\n diff = vals2 - vals1\n return Stats().label(label).addm(diff)", "def score(stripe1, stripe2):\n scr = 0\n count = 0\n for p1, p2 in zip(stripe1, stripe2):\n r = abs(p1[0] - p2[0])\n g = abs(p1[1] - p2[1])\n b = abs(p1[2] - p2[2])\n scr += r + g + b\n return scr", "def ks_test(df1, df2):\n p_val_list = []\n stat_list = []\n for element in df1.columns:\n res = stats.ks_2samp(df1[element], df2[element])\n p_val_list.append(res[1])\n stat_list.append(res[0])\n n = np.argmax(stat_list)\n p_val = p_val_list[n]\n stat = stat_list[n]\n return p_val, stat, n, p_val_list, stat_list", "def _t_test_results(self):\n t, df, p = self.api.m.math_utils.welchs_t_test(\n self.lkgr.values, self.fkbr.values)\n lines = [\n 'LKGR values: %r' % self.lkgr.values,\n 'FKBR values: %r' % self.fkbr.values,\n 't-statistic: %r' % t,\n 'deg. of freedom: %r' % df,\n 'p-value: %r' % p,\n 'Confidence score: %r' % (100 * (1 - p))\n ]\n return '\\n'.join(lines)", "def score(self, y_true, y_pred):\r\n pass", "def _testScoreGeneric(testcase, sigma=0.2, num_repl=3):\n # Fit to training data \n testcase.mclf.fit(testcase.dfs_train, testcase.ser)\n # Score on training data\n result1 = testcase.mclf.score(testcase.dfs_train[0], testcase.ser)\n # Score on test data\n result2 = testcase.mclf.score(testcase.df_test, testcase.ser)\n #\n testcase.assertGreater(result1.abs, result2.abs)", "def get_score(self,sentence_1, sentence_2):\n\t return self.DELTA * self.semantic_similarity(sentence_1, sentence_2, True) + (1.0 - self.DELTA) * self.word_order_similarity(sentence_1, sentence_2)", "def evaluate(y_test, y_hat):\n score = np.sum(y_test==y_hat)/len(y_test)\n return score", "def test_tau_score(sample_weight):\n np.testing.assert_almost_equal(\n tau_score(Y_true, Y_pred, sample_weight),\n _tau_score(Y_true, Y_pred, sample_weight))", "def ttest(x):\n from ..group.onesample import stat\n t = stat(x.T, id='student', axis=0)\n return np.squeeze(t)", "def calculate_difference(mark1, mark2):\n\n return mark1 - mark2", "def difference(first, second, rf, rs, years=(1980, 2000),smooth=1, corpus='bok'):\n try:\n a_first = nb_ngram(first, years=years, smooth=smooth, corpus=corpus)\n a_second = nb_ngram(second, years=years, smooth=smooth, corpus=corpus)\n a = a_first.join(a_second) \n b_first = nb_ngram(rf, years=years, smooth=smooth, corpus=corpus)\n b_second = nb_ngram(rs, years=years, smooth=smooth, corpus=corpus)\n if rf == rs:\n b_second.columns = [rs + '2']\n b = b_first.join(b_second)\n s_a = a.mean()\n s_b = b.mean()\n f1 = s_a[a.columns[0]]/s_a[a.columns[1]]\n f2 = s_b[b.columns[0]]/s_b[b.columns[1]]\n res = f1/f2\n except:\n res = 'Mangler noen data - har bare for: ' + ', '.join([x for x in a.columns.append(b.columns)])\n return res", "def get_data_diff(cls, data1: tuple, data2: tuple) -> tuple:\n cls._data_validation(data1)\n cls._data_validation(data2)\n data1_n = StatMe.get_n(data1)\n data2_n = StatMe.get_n(data2)\n if data1_n != data2_n:\n raise ValueError(f\"Samples are not of equal length.\\n\"\n f\"Items in 'data1' = {data1_n}\\n\"\n f\"Items in 'data2' = {data2_n}\")\n else:\n return_list = list()\n for i in range(data1_n):\n x1 = data1[i]\n x2 = data2[i]\n return_list.append(x1 - x2)\n return tuple(return_list)", "def trame_distance(t1, t2):\n return np.linalg.norm(t1 - t2)", "def mk_test(input_data):\r\n\r\n\ttrend, h, p, z, Tau, s, var_s, slope, intercept = mk.original_test(input_data)\r\n\r\n\treturn trend, h, p, z, Tau, s, var_s, slope, intercept", "def evaluate(self, training_scores, original_test_scores, imitation_test_scores):\n\n #finding a threshold: third to smallest training score\n sorted_scores = np.sort(training_scores)\n threshold = sorted_scores[2]\n\n #computing the number of errors\n errors = len(np.where(original_test_scores < threshold)[0])\n errors += len(np.where(imitation_test_scores > threshold)[0])\n\n #computing the local accuracy\n accuracy = 1 - errors/(len(original_test_scores)+len(imitation_test_scores))\n return accuracy, threshold", "def test(self):\n y_list = []\n y_hat_list = []\n for ex_dict in ut.TEST_LIST:\n y_list.append(ex_dict[1])\n y_hat_list.append(self.predict(ex_dict[0]))\n acc = ut.compute_accuracy(y_hat_list, y_list)\n return y_hat_list, acc", "def eval_perf_total(model, X_train, y_train, X_test, y_test):\n\n y_hat_train = model.predict(X_train)\n y_hat_test = model.predict(X_test)\n \n train_mae = metrics.mean_absolute_error(y_train, y_hat_train)\n train_mse = metrics.mean_squared_error(y_train, y_hat_train)\n train_rmse = np.sqrt(metrics.mean_squared_error(y_train, y_hat_train))\n train_r = metrics.r2_score(y_train, y_hat_train)\n\n print('Evaluating Performance on Training Data:\\n')\n print(f' Train Mean Absolute Error: {train_mae:,.2f}')\n print(f' Train Mean Squared Error: {train_mse:,.2f}\\n')\n print(f'Train Root Mean Squared Error: {train_rmse:,.2f}')\n print(f'Train R-Square Value: {round(train_r,2)}')\n\n print('\\n'+'---'*25+'\\n')\n\n test_mae = metrics.mean_absolute_error(y_test, y_hat_test)\n test_mse = metrics.mean_squared_error(y_test, y_hat_test)\n test_rmse = np.sqrt(metrics.mean_squared_error(y_test, y_hat_test))\n test_r = metrics.r2_score(y_test, y_hat_test)\n\n print('Evaluating Performance on Testing Data:\\n')\n print(f' Test Mean Absolute Error: {test_mae:,.2f}')\n print(f' Test Mean Squared Error: {test_mse:,.2f}\\n')\n print(f'Test Root Mean Squared Error: {test_rmse:,.2f}')\n print(f'Test R-Square Value: {round(test_r,2)}')", "def estimate(self, states):\n scores = [state.get_score() for state in states]\n return np.array([score[0] - score[1] for score in scores])", "def compare(sampl_predict, sampl_real):\n difference = 0\n for i in range(len(sampl_predict)):\n if sampl_predict[i] != sampl_real[i]:\n difference += 1\n\n return difference", "def score(sentence, test):\n total = 0\n wrong = []\n i = 0\n while i < len(sentence):\n if sentence[i] == test[i]:\n total += 1\n else:\n # keep track of pos in list\n wrong.append(i)\n i += 1\n percent = float(total)/len(test)*100\n return percent, wrong", "def test(self, inputs, labels):\n n = inputs.shape[0]\n\n error = 0.0\n for idx in range(n):\n result = self.forward(inputs[idx:idx+1, :])\n error += abs(result - labels[idx:idx+1, :])\n\n print(\"error: \", error)\n error /= n\n print('accuracy: %.2f' % ((1 - error)*100) + '%')\n print('')", "def test_t_two_sample(self):\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n self.assertFloatEqual(t_two_sample(I, II), (-0.1184, 0.45385 * 2),\r\n 0.001)", "def test_ddiff_v1(self):\n print \"\\n\"\n for d in ddiff_v1(a, b): print d\n self.assertEqual(d, \"+FUN\")", "def seconds_elapsed(date1, date2):\n return (date1 - date2).total_seconds()", "def delong_roc_test(ground_truth, predictions_one, predictions_two):\n order, label_1_count = compute_ground_truth_statistics(ground_truth)\n predictions_sorted_transposed = np.vstack((predictions_one, predictions_two))[:, order]\n aucs, delongcov = fastDeLong(predictions_sorted_transposed, label_1_count)\n return calc_pvalue(aucs, delongcov)", "def distance(self, first_tape, second_tape):\n pairs = zip(first_tape, second_tape)\n return math.sqrt(abs(sum(map((lambda n: self.subsq(*n)), pairs))))", "def test_t_paired_specific_difference(self):\r\n x, y = self.x, self.y\r\n # difference is 0.2, so test should be non-significant if 0.2 passed\r\n self.failIf(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)\r\n # same, except that reversing list order reverses sign of difference\r\n self.failIf(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)\r\n # check that there's no significant difference from the true mean\r\n self.assertFloatEqual(\r\n t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)", "def assertTPsEqual(self, tp1, tp2):\n self.assertEqual(tp1, tp2, tp1.diff(tp2))\n self.assertTrue(fdrutilities.tpDiff2(tp1, tp2, 1, False))", "def score(self, test_data):\n\n\t\tins, outs = self._split_inputs_outputs(test_data)\n\t\treturn self.model.score(ins, outs)", "def calc_difference_from_reference(inputs, outputs, verbose=True):\n\n # Get a list of reference input/output files\n filename_ref_inputs = glob.glob(lal_cuda.full_path_datafile(\"inputs.dat*\"))\n filename_ref_outputs = [\n filename_ref_input_i.replace(\n \"inputs.dat\",\n \"outputs.dat\") for filename_ref_input_i in filename_ref_inputs]\n\n # Look to see if the given inputs are in the stored reference inputs\n filename_ref_output = None\n for filename_ref_input_i, filename_ref_output_i in zip(filename_ref_inputs, filename_ref_outputs):\n inputs_i = inputs.read(filename_ref_input_i)\n\n # Check to see if this set of inputs matches the set that has been passed\n if(inputs_i == inputs):\n inputs_ref = inputs_i\n filename_ref_output = filename_ref_output_i\n break\n\n # Perform check if a match has been found\n if(not filename_ref_output):\n lal_cuda.log.warning(\n \"Checking could not be performed: reference data set with given inputs (%s) not found.\" %\n (inputs))\n else:\n if(verbose):\n lal_cuda.log.open('Performing test...')\n\n # Read reference dataset's outputs\n outputs_ref = outputs.read(filename_ref_output)\n\n # Compute statistics of difference from test reference\n hpval_real_diff_avg = 0.\n hpval_imag_diff_avg = 0.\n hcval_real_diff_avg = 0.\n hcval_imag_diff_avg = 0.\n hpval_real_diff_max = 0.\n hpval_imag_diff_max = 0.\n hcval_real_diff_max = 0.\n hcval_imag_diff_max = 0.\n for (hp_i, hc_i, hp_ref_i, hc_ref_i) in zip(outputs.hp, outputs.hc, outputs_ref.hp, outputs_ref.hc):\n hpval_real_diff_i = calc_frac_diff(hp_i.real, hp_ref_i.real)\n hpval_imag_diff_i = calc_frac_diff(hp_i.imag, hp_ref_i.imag)\n hcval_real_diff_i = calc_frac_diff(hc_i.real, hc_ref_i.real)\n hcval_imag_diff_i = calc_frac_diff(hc_i.imag, hc_ref_i.imag)\n hpval_real_diff_avg += hpval_real_diff_i\n hpval_imag_diff_avg += hpval_imag_diff_i\n hcval_real_diff_avg += hcval_real_diff_i\n hcval_imag_diff_avg += hcval_imag_diff_i\n hpval_real_diff_max = max([hpval_real_diff_max, hpval_real_diff_i])\n hpval_imag_diff_max = max([hpval_imag_diff_max, hpval_imag_diff_i])\n hcval_real_diff_max = max([hcval_real_diff_max, hcval_real_diff_i])\n hcval_imag_diff_max = max([hcval_imag_diff_max, hcval_imag_diff_i])\n hpval_real_diff_avg /= float(len(outputs.hp))\n hpval_imag_diff_avg /= float(len(outputs.hp))\n hcval_real_diff_avg /= float(len(outputs.hc))\n hcval_imag_diff_avg /= float(len(outputs.hc))\n\n # Report results\n if(verbose):\n lal_cuda.log.comment(' Average/maximum real(hp) fractional difference: %.2e/%.2e' %\n (hpval_real_diff_avg, hpval_real_diff_max))\n lal_cuda.log.comment(' Average/maximum imag(hp) fractional difference: %.2e/%.2e' %\n (hpval_imag_diff_avg, hpval_imag_diff_max))\n lal_cuda.log.comment(' Average/maximum real(hc) fractional difference: %.2e/%.2e' %\n (hcval_real_diff_avg, hcval_real_diff_max))\n lal_cuda.log.comment(' Average/maximum imag(hc) fractional difference: %.2e/%.2e' %\n (hcval_imag_diff_avg, hcval_imag_diff_max))\n lal_cuda.log.close(\"Done.\")\n\n return {\n 'hpval_real_diff_avg': hpval_real_diff_avg,\n 'hpval_real_diff_max': hpval_real_diff_max,\n 'hpval_imag_diff_avg': hpval_imag_diff_avg,\n 'hpval_imag_diff_max': hpval_imag_diff_max,\n 'hcval_real_diff_avg': hcval_real_diff_avg,\n 'hcval_real_diff_max': hcval_real_diff_max,\n 'hcval_imag_diff_avg': hcval_imag_diff_avg,\n 'hcval_imag_diff_max': hcval_imag_diff_max}", "def perf_measure(y_actual, y_hat):\n TP = 0\n FP = 0\n TN = 0\n FN = 0\n\n for i in range(len(y_hat)): \n if y_actual[i]==y_hat[i]==1:\n TP += 1\n if y_hat[i]==1 and y_actual[i]!=y_hat[i]:\n FP += 1\n if y_actual[i]==y_hat[i]==0:\n TN += 1\n if y_hat[i]==0 and y_actual[i]!=y_hat[i]:\n FN += 1\n\n score = {\n \"TP\": TP,\n \"FP\": FP,\n \"TN\": TN,\n \"FN\": FN\n }\n\n return score", "def test_mc_t_two_sample(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n\r\n # With numpy array as input.\r\n exp = (-0.11858541225631833, 0.90756579317867436)\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)\r\n\r\n # With python list as input.\r\n exp = (-0.11858541225631833, 0.90756579317867436)\r\n I = [7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5]\r\n II = [8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2]\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)\r\n\r\n exp = (-0.11858541225631833, 0.45378289658933718)\r\n obs = mc_t_two_sample(I, II, tails='low')\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.4, 0.47, mc_t_two_sample, [I, II],\r\n {'tails': 'low'}, p_val_idx=3)\r\n\r\n exp = (-0.11858541225631833, 0.54621710341066287)\r\n obs = mc_t_two_sample(I, II, tails='high', permutations=99)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 99)\r\n self.assertCorrectPValue(0.4, 0.62, mc_t_two_sample, [I, II],\r\n {'tails': 'high', 'permutations': 99}, p_val_idx=3)\r\n\r\n exp = (-2.8855783649036986, 0.99315596652421401)\r\n obs = mc_t_two_sample(I, II, tails='high', permutations=99, exp_diff=1)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 99)\r\n self.assertCorrectPValue(0.55, 0.99, mc_t_two_sample, [I, II],\r\n {'tails': 'high', 'permutations': 99, 'exp_diff': 1}, p_val_idx=3)", "def compare_predictions():\n validation_labels = np.array(pd.read_csv(val_true_labels_dir + dataset_version + 'validation_labels.csv', index_col=0))\n validation_labels = np.reshape(validation_labels, (-1))\n\n diff_between_files = []\n also1s = []\n also2s = []\n for filename1 in os.listdir(val_predictions_dir):\n if filename1.endswith(\".csv\"):\n for filename2 in os.listdir(val_predictions_dir):\n if filename2.endswith(\".csv\"):\n if filename1 < filename2:\n wrong1 = 0\n wrong2 = 0\n diff_between = 0\n also1 = 0\n also2 = 0\n diff_corr1 = 0\n diff_corr2 = 0\n f1 = np.array(pd.read_csv(val_predictions_dir + filename1, index_col=0))\n f1 = np.reshape(f1, (-1))\n f2 = np.array(pd.read_csv(val_predictions_dir + filename2, index_col=0))\n f2 = np.reshape(f2, (-1))\n for line in range(f1.shape[0]):\n if f1[line] != validation_labels[line]:\n wrong1 += 1\n if f2[line] != validation_labels[line]:\n wrong2 += 1\n if f1[line] != f2[line]:\n diff_between += 1\n if f1[line] == validation_labels[line]:\n diff_corr1 += 1\n if f2[line] == validation_labels[line]:\n diff_corr2 += 1\n if f1[line] != validation_labels[line]:\n if f2[line] != validation_labels[line]:\n also2 += 1\n if f2[line] != validation_labels[line]:\n if f1[line] != validation_labels[line]:\n also1 += 1\n\n diff_between_files.append(diff_between)\n print(filename1)\n print('Wrongly predicted by 1: ' + str(100 * wrong1 / f1.shape[0]) + '%')\n print(filename2)\n print('Wrongly predicted by 2: ' + str(100 * wrong2 / f1.shape[0]) + '%')\n print()\n print('Differences between files: ' + str(100 * diff_between / f1.shape[0]) + '%')\n print(f'\\t of which correct by 1 {100 * diff_corr1 / diff_between}%, by 2 {100 * diff_corr2 / diff_between}%')\n also1s.append(also1 / wrong2)\n also2s.append(also2 / wrong1)\n print('Wrongly predicted by other among wrong ones: ' + str(100 * also2 / wrong1) + '%, ' + str(\n 100 * also1 / wrong2) + '%\\n\\n\\n')\n\n print('Max, min and avg differences between files:')\n print(str(100 * max(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * min(diff_between_files) / validation_labels.shape[0]) + '%')\n print(str(100 * np.mean(diff_between_files) / validation_labels.shape[0]) + '%')\n\n print('\\nWrongly predicted by first that were also wrongly predicted by second:')\n print('Max: ' + str(100 * max(also2s)) + '%')\n print('Min: ' + str(100 * min(also2s)) + '%')\n print('Avg: ' + str(100 * np.mean(also2s)) + '%')\n\n print('\\nWrongly predicted by second that were also wrongly predicted by first:')\n print('Max: ' + str(100 * max(also1s)) + '%')\n print('Min: ' + str(100 * min(also1s)) + '%')\n print('Avg: ' + str(100 * np.mean(also1s)) + '%')", "def _compute_ter_score_from_statistics(num_edits: Tensor, tgt_length: Tensor) ->Tensor:\n if tgt_length > 0 and num_edits > 0:\n score = num_edits / tgt_length\n elif tgt_length == 0 and num_edits > 0:\n score = tensor(1.0)\n else:\n score = tensor(0.0)\n return score", "def ttest():\n # open test results and perform regression analysis\n alphas = []\n betas = []\n iterations = {}\n with open(f\"Results/conclusion2.csv\") as f:\n csv_reader = csv.reader(f, delimiter=',')\n\n for run in csv_reader:\n max, max_i = get_max_run(run)\n if int(run[0]) not in iterations:\n iterations[int(run[0])] = {100 - int(run[1])-1: int(max)}\n else:\n iterations[int(run[0])][100 - int(run[1])-1] = int(max)\n\n for iteration in iterations:\n mono_levels = list(iterations[iteration].keys())\n pop_sizes = [iterations[iteration][i] for i in mono_levels]\n\n regress_result = regress(pop_sizes, mono_levels)\n alphas += [regress_result[1]]\n betas += [regress_result[0]]\n\n # plot scatter and regression line\n avg_alpha = sum(alphas)/len(alphas)\n avg_beta = sum(betas)/len(betas)\n stddev_beta = np.std(betas)\n vis.scatter_mono(iterations, avg_alpha, avg_beta)\n\n # perform t-test\n ttest_result = stats.ttest_ind(betas, [0 for i in betas], equal_var=True)\n t_stat = ttest_result[0]\n p_value = ttest_result[1]\n print(f'Results from t-test:')\n print(f'Avg beta: {avg_beta}, stddev beta: {stddev_beta}.')\n print(f't-stat: {t_stat}, p-value: {p_value}.')", "def real_result(self, other):\r\n self_in_game_skill = np.random.normal(self.skill,self.var)\r\n other_in_game_skill = np.random.normal(other.skill,other.var)\r\n if self_in_game_skill > other_in_game_skill:\r\n return 1\r\n else:\r\n return 0", "def absolute_difference(new_data, old_data):\n diff = 0\n assert len(new_data) == len(old_data)\n for new, old in zip(new_data, old_data):\n diff += np.sum(np.abs(new-old))\n return diff", "def calculate_test_error(result, test_label, test_sad):\n result = np.round(result).astype(int)\n nn_cost = np.mean(np.abs(test_label - result), axis=(1, 2, 3))\n\n # calculate switchable filter loss\n switch_cost = np.stack([nn_cost, test_sad])\n switch_cost = np.min(switch_cost, axis=0)\n\n return np.mean(nn_cost), np.mean(test_sad), np.mean(switch_cost)", "def accuracy(output1, output2):\n pred1 = output1\n pred2 = output2\n correct = torch.gt(pred1, pred2)\n return float(correct.sum())/correct.size(0)", "def compare_measurements(self, measurement1: Measurement, measurement2: Measurement) -> int:\n\n score_gain = 0.0\n for objective, weight in self._metric_weights.items():\n metric1 = measurement1.get_value_of_metric(tag=objective)\n metric2 = measurement2.get_value_of_metric(tag=objective)\n metric_diff = metric1 - metric2\n score_gain += weight * (metric_diff.value() / metric1.value())\n\n if score_gain > COMPARISON_SCORE_THRESHOLD:\n return 1\n elif score_gain < -COMPARISON_SCORE_THRESHOLD:\n return -1\n return 0", "def RPS(y_true, y_pred) -> float:\n output = 0.\n data_num = len(y_true)\n for i in range(data_num):\n times = len(y_true[i]) - 1 \n cumulative_sum = 0.\n score = 0.\n for time in range(times):\n cumulative_sum += y_true[i,time] - y_pred[i,time]\n score += cumulative_sum ** 2\n score /= times\n output += score\n \n output /= data_num\n return output" ]
[ "0.75622785", "0.6732271", "0.67147094", "0.67101526", "0.66661763", "0.64419353", "0.6416758", "0.64082766", "0.63968855", "0.6324951", "0.6282402", "0.6253952", "0.6186208", "0.61227715", "0.6105249", "0.60951906", "0.5997991", "0.59836954", "0.59664416", "0.5946647", "0.5934196", "0.59319234", "0.5923768", "0.5921423", "0.5902078", "0.58966976", "0.5885101", "0.5866355", "0.58485174", "0.58318895", "0.582768", "0.5815727", "0.58041936", "0.57987785", "0.5762548", "0.57474005", "0.57210344", "0.57144487", "0.56860846", "0.5651847", "0.56480896", "0.56405187", "0.5635259", "0.56347066", "0.56271046", "0.56252646", "0.5620798", "0.5620224", "0.56191456", "0.56160027", "0.5608956", "0.56043464", "0.5594804", "0.55783516", "0.5578319", "0.55775565", "0.5572447", "0.5566971", "0.55603766", "0.5549465", "0.55415237", "0.5539925", "0.5537972", "0.55274665", "0.5514851", "0.5500605", "0.54903215", "0.54861027", "0.54787153", "0.5476598", "0.54745907", "0.5466376", "0.5448777", "0.54476357", "0.5445646", "0.54453677", "0.5440288", "0.5440018", "0.54380953", "0.5433852", "0.54336846", "0.54251254", "0.541894", "0.5412928", "0.54028636", "0.53988624", "0.53966177", "0.53879356", "0.5387057", "0.53864527", "0.538412", "0.537976", "0.53764915", "0.53760463", "0.53729963", "0.5372909", "0.53722894", "0.5371259", "0.5354187", "0.5349182" ]
0.7724763
0
Determine whether two samples differ significantly. This uses a Student's twosample, twotailed ttest with alpha=0.95.
def is_significant(sample1, sample2): deg_freedom = len(sample1) + len(sample2) - 2 critical_value = tdist95conf_level(deg_freedom) t_score = tscore(sample1, sample2) return (abs(t_score) >= critical_value, t_score)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ttest_review(sample_1, sample_2, alpha=.05):\n\n result = stats.ttest_ind(sample_1, sample_2)\n crit_val, p_val = result\n \n ## Creating interpretation based on p-value results.\n\n if p_val < .05:\n print(f'The feature is statistically significant with a p-value of {p_val}.')\n\n else:\n print(f'The feature is not statistically significant with a p-value of {p_val}.')\n \n return p_val", "def tscore(sample1, sample2):\n if len(sample1) != len(sample2):\n raise ValueError(\"different number of values\")\n error = pooled_sample_variance(sample1, sample2) / len(sample1)\n diff = statistics.mean(sample1) - statistics.mean(sample2)\n return diff / math.sqrt(error * 2)", "def run_welchs_ttest(stat1, stat2, alpha, faster):\n m1 = stat1[MEAN]\n m2 = stat2[MEAN]\n\n s1 = stat1[STDDEV]\n s2 = stat2[STDDEV]\n\n n1 = stat1[ROUNDS]\n n2 = stat2[ROUNDS]\n\n df1 = n1 - 1 # degree of freedom of stat1\n df2 = n2 - 1 # degree of freedom of stat2\n\n sample_v1 = s1**2 / n1 # biased estimated sample variance of stat1\n sample_v2 = s2**2 / n2 # biased estimated sample variance of stat2\n\n biased_variance = np.sqrt(sample_v1 + sample_v2)\n # degree of freedom\n df = (sample_v1 + sample_v2) ** 2 / (\n sample_v1**2 / (df1) + sample_v2**2 / (df2)\n )\n\n mean_delta = m1 - m2\n t_stat = mean_delta / biased_variance\n\n if faster:\n # Null hypothesis is stat1 >= stat2.\n # Alternative hypothesis is stat1 < stat2.\n p_value = t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (-inf, x)\n upper_bound = mean_delta + t.ppf(1.0 - alpha, df) * biased_variance\n upper_bound = format(upper_bound, \".5f\")\n lower_bound = \"-inf\"\n else:\n # Null hypothesis is stat1 <= stat2.\n # Alternative hypothesis is stat1 > stat2.\n p_value = 1.0 - t.cdf(t_stat, df)\n\n # Compute one sided confidence interval (x, inf)\n upper_bound = \"inf\"\n lower_bound = mean_delta + t.ppf(alpha, df) * biased_variance\n lower_bound = format(lower_bound, \".5f\")\n\n return TTestResult(\n p_value=p_value,\n t_stat=t_stat,\n lower_bound=lower_bound,\n upper_bound=upper_bound,\n mean_delta=format(mean_delta, \".5f\"),\n )", "def _t_test(_sample_a, _sample_b):\n res = stats.ttest_ind(_sample_a, _sample_b, axis=0, equal_var=equal_var, nan_policy='propagate')\n print('Independent t-test\\nt-statistic: {}\\np-value: {}'.format(res[0], res[1]))\n print('-' * 10)", "def ttest_two_sided(arr1, arr2, alpha=0.05, verbose=False):\n res = stats.ttest_ind(arr1, arr2)\n if res[1] <= alpha:\n if verbose: print(\n f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are significantly DIFFERENT')\n is_significant = True\n else:\n if verbose: print(f'P-value = {round(res[1], 3)}, i.e. at alpha={alpha} the samples are from the SAME set')\n is_significant = False\n return res[1], is_significant", "def calculate_t_test(mean1, mean2, var1, var2, n1, n2, alpha):\n # Two Sample T Test (M0 == M1) (Two Tails)\n t = (mean1 - mean2) / sqrt((var1 / n1) + (var2 / n2)) # t statistic calculation for two sample\n df = n1 + n2 - 2 # degree of freedom for two sample t - set\n pval = 1 - stats.t.sf(np.abs(t), df) * 2 # two-sided pvalue = Prob(abs(t)>tt) # p - value\n cv = stats.t.ppf(1 - (alpha / 2), df)\n standart_error = cv * sqrt((var1 / n1) + (var2 / n2))\n confidence_intervals = [abs(mean1 - mean2) - standart_error, abs(mean1 - mean2) + standart_error, standart_error]\n acception = 'HO REJECTED!' if pval < (alpha / 2) else 'HO ACCEPTED!' # left tail\n acception = 'HO REJECTED!' if pval > 1 - (alpha / 2) else 'HO ACCEPTED!' # right tail\n return pval, confidence_intervals, acception", "def eeg_twosample_ttest(array1,array2):\t\n\tfrom scipy.stats import ttest_rel\n\ts1 = array1.shape\n\tp = np.zeros(s1[1])\n\tt = np.zeros(s1[1])\n\tfor i in range(s1[1]):\n\t\ttval,pval = ttest_rel(array1[:,i],array2[:,i])\n\t\tp[i]=pval\n\t\tt[i]=tval\n\t\t\n\treturn t,p", "def test_t_two_sample_switch(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n self.assertFloatEqual(t_two_sample(x, sample), (-1.5637254, 0.1929248))\r\n self.assertFloatEqual(t_two_sample(sample, x), (1.5637254, 0.1929248))\r\n\r\n # can't do the test if both samples have single item\r\n self.assertEqual(t_two_sample(x, x), (None, None))\r\n\r\n # Test special case if t=0.\r\n self.assertFloatEqual(t_two_sample([2], [1, 2, 3]), (0.0, 1.0))\r\n self.assertFloatEqual(t_two_sample([1, 2, 3], [2]), (0.0, 1.0))", "def test_t_two_sample(self):\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n self.assertFloatEqual(t_two_sample(I, II), (-0.1184, 0.45385 * 2),\r\n 0.001)", "def test_t_paired_specific_difference(self):\r\n x, y = self.x, self.y\r\n # difference is 0.2, so test should be non-significant if 0.2 passed\r\n self.failIf(t_paired(y, x, exp_diff=0.2)[0] > 1e-10)\r\n # same, except that reversing list order reverses sign of difference\r\n self.failIf(t_paired(x, y, exp_diff=-0.2)[0] > 1e-10)\r\n # check that there's no significant difference from the true mean\r\n self.assertFloatEqual(\r\n t_paired(y, x, exp_diff=0.2)[1], 1, 1e-4)", "def test_onesample_two_tailed(self):\n rng = np.random.default_rng(13489132474)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(-5, 2, 100)\n\n ttest = one_sample_ttest(data1, -5)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def report_ttest_2sample(null_hypothesis, sample1, sample2, paired, alpha=0.05):\n\n if paired:\n t_value, p_value = stats.ttest_rel(sample1, sample2)\n else:\n t_value, p_value = stats.ttest_ind(sample1, sample2)\n print('Test for null hypothesis \"{}\".'.format(null_hypothesis))\n print('Sample 1 mean: {}, Sample 1 SD: {}'.format(np.mean(sample1), np.std(sample1)))\n print('Sample 2 mean: {}, Sample 2 SD: {}'.format(np.mean(sample2), np.std(sample2)))\n print('t({})={}, p={}.'.format(len(sample1)-1, t_value, p_value))\n if p_value < alpha:\n print('Reject null hypothesis.\\n')\n else:\n print('Fail to reject null hypothesis.\\n')", "def ttest_2samp(x1, x2, alpha=0.05, paired=False, is_bernoulli=False, two_sided=True, return_tuple=False):\n x = np.asarray(x1)\n y = np.asarray(x2)\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = 'X1_bar = X2_bar'\n h1 = 'X1_bar != X2_bar'\n else:\n quant_order = 1 - alpha\n h0 = 'X1 <= X2'\n h1 = 'X1 > X2'\n\n # Sample sizes\n n1, n2 = len(x), len(y)\n\n if paired:\n # If samples are paired, we perform a 1-sample student test\n # We compare if the difference is different from 0.\n mean1, mean2 = x.mean(), y.mean()\n d = x - y\n t, cv, p = ttest(d, alpha=alpha, return_tuple=True)\n df = len(d)\n else:\n # Else samples are independent\n # Compute means\n mean1, mean2 = x.mean(), y.mean()\n # Compute standard deviations\n if is_bernoulli:\n s1 = mean1 * (1 - mean1)\n s2 = mean2 * (1 - mean2)\n else:\n s1 = desc.var(x)\n s2 = desc.var(y)\n # Compute grouped variance\n sd = np.sqrt(((n1 - 1) * s1 + (n2 - 1) * s2) / (n1 + n2 - 2))\n # Degrees of freedom\n df = n1 + n2 - 2\n # Calculate the t statistic\n t = (mean1 - mean2) / sd\n\n # calculate the critical value\n cv = scp.t.ppf(quant_order, df)\n # calculate the p-value\n if (n1 > 30) & (n2 > 30):\n p = 2.0 * (1.0 - scp.norm.cdf(math.fabs(t)))\n else:\n p = 2.0 * (1.0 - scp.t.cdf(math.fabs(t), df=df))\n\n extra = f\" * E(X1) = {round(mean1, 3)} and E(X2) = {round(mean2, 3)} \\n\"\n extra += \" * Performed test for paired samples \\n\" if paired else ''\n extra += \" * Large sample sizes, t ~ N(0, 1) from CLT\" if (n1 > 30) & (n2 > 30) else ' * Small sample sizes, assumed t ~ T(n-1)'\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='Two Samples Student test',\n h0=h0, h1=h1,\n alpha=alpha,\n extra=extra)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def t_test(sample1, sample2, paired=False, alpha=0.05,\n alternative='two-sided', correction='auto', r=0.707,\n show_graph=True, **kwargs):\n confidence = 1 - alpha\n df_result = pg.ttest(\n sample1,\n sample2,\n paired=paired,\n confidence=confidence,\n alternative=alternative,\n correction=correction,\n r=r\n )\n if show_graph:\n if paired:\n difference = [x - y for x, y in zip(sample1, sample2)]\n Visualization.histogram(difference, **kwargs)\n else:\n Visualization.density_plot(sample1, sample2,\n fig_size=(5, 4), **kwargs)\n return HypothesisTester.define_hypothesis(df_result, 'mean',\n alternative, paired,\n alpha).T", "def lttest_ind (a, b, printit=0, name1='Samp1', name2='Samp2', writemode='a'):\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = stdev(a)**2\r\n v2 = stdev(b)**2\r\n n1 = len(a)\r\n n2 = len(b)\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2)/float(df)\r\n t = (x1-x2)/math.sqrt(svar*(1.0/n1 + 1.0/n2))\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,min(a),max(a),\r\n name2,n2,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t,prob", "def significantly_different(\n self, list_a, list_b,\n significance_level=SIGNIFICANCE_LEVEL): # pragma: no cover\n step_result = self.api.m.python(\n 'Checking sample difference',\n self.api.resource('significantly_different.py'),\n [json.dumps(list_a), json.dumps(list_b), str(significance_level)],\n stdout=self.api.m.json.output())\n results = step_result.stdout\n if results is None:\n assert self.dummy_builds\n return True\n significantly_different = results['significantly_different']\n step_result.presentation.logs[str(significantly_different)] = [\n 'See json.output for details']\n return significantly_different", "def ttest(array1, array2):\n diff = np.mean(array1) - np.mean(array2)\n if diff < c.cart_p60:\n return c.low_score\n if array1.size <= 1 or array2.size <= 1:\n return min(diff, c.single_item_cart_max)\n return 1 - ttest_ind(array1, array2, equal_var=False).pvalue\n # return diff", "def _compare_pre_post_sampling(X_train, y_train, X_new, y_new):\n train_data_info = _basic_data_info(X_train, y_train)\n new_data_info = _basic_data_info(X_new, y_new)\n\n print(\"\\nNum samples increased from {} to {} samples\\n\".format(train_data_info[\"Num_samples\"], new_data_info[\"Num_samples\"]))\n\n # Create pandas Dataframe\n df = pd.DataFrame(np.nan, index = train_data_info['classes'], columns = ['og_dist', 'og_prop', 'new_dist', 'new_prop'])\n df.iloc[:, 0] = train_data_info[\"counts\"]\n df.iloc[:, 1] = train_data_info[\"percs\"]\n df.iloc[:, 2] = new_data_info[\"counts\"]\n df.iloc[:, 3] = new_data_info[\"percs\"]\n\n df.index.name = \"classes\"\n\n # Difference in distributions\n print(\"Count comparison is as follows: \\n\", df)", "def TestStatistic(self, data):\n group1, group2 = data\n test_stat = abs(group1.mean() - group2.mean())\n return test_stat", "def lttest_rel (a,b,printit=0,name1='Sample1',name2='Sample2',writemode='a'):\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length lists in ttest_rel.'\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n v1 = var(a)\r\n v2 = var(b)\r\n n = len(a)\r\n cov = 0\r\n for i in range(len(a)):\r\n cov = cov + (a[i]-x1) * (b[i]-x2)\r\n df = n-1\r\n cov = cov / float(df)\r\n sd = math.sqrt((v1+v2 - 2.0*cov)/float(n))\r\n t = (x1-x2)/sd\r\n prob = betai(0.5*df,0.5,df/(df+t*t))\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,min(a),max(a),\r\n name2,n,x2,v2,min(b),max(b),\r\n statname,t,prob)\r\n return t, prob", "def test_t_two_sample_no_variance(self):\r\n # By default should return (None, None) to mimic R's t.test.\r\n x = array([1, 1., 1])\r\n y = array([0, 0, 0.0])\r\n self.assertEqual(t_two_sample(x, x), (nan, nan))\r\n self.assertEqual(t_two_sample(x, y), (nan, nan))\r\n\r\n # Test none_on_zero_variance=False on various tail types. We use\r\n # self.assertEqual instead of self.assertFloatEqual because the latter\r\n # sees inf and -inf as being equal.\r\n\r\n # Two tailed: a < b\r\n self.assertEqual(t_two_sample(y, x, none_on_zero_variance=False),\r\n (float('-inf'), 0.0))\r\n\r\n # Two tailed: a > b\r\n self.assertEqual(t_two_sample(x, y, none_on_zero_variance=False),\r\n (float('inf'), 0.0))\r\n\r\n # One-tailed 'high': a < b\r\n self.assertEqual(t_two_sample(y, x, tails='high',\r\n none_on_zero_variance=False),\r\n (float('-inf'), 1.0))\r\n\r\n # One-tailed 'high': a > b\r\n self.assertEqual(t_two_sample(x, y, tails='high',\r\n none_on_zero_variance=False),\r\n (float('inf'), 0.0))\r\n\r\n # One-tailed 'low': a < b\r\n self.assertEqual(t_two_sample(y, x, tails='low',\r\n none_on_zero_variance=False),\r\n (float('-inf'), 0.0))\r\n\r\n # One-tailed 'low': a > b\r\n self.assertEqual(t_two_sample(x, y, tails='low',\r\n none_on_zero_variance=False),\r\n (float('inf'), 1.0))\r\n\r\n # Should still receive (nan, nan) if the lists have no variance and\r\n # have the same single value.\r\n self.assertEqual(t_two_sample(x, x, none_on_zero_variance=False),\r\n (nan, nan))\r\n self.assertEqual(t_two_sample(x, [1, 1], none_on_zero_variance=False),\r\n (nan, nan))", "def compare(sampl_predict, sampl_real):\n difference = 0\n for i in range(len(sampl_predict)):\n if sampl_predict[i] != sampl_real[i]:\n difference += 1\n\n return difference", "def test_mc_t_two_sample_no_mc(self):\r\n x = array([1, 1, 1])\r\n y = array([0, 0, 0])\r\n self.assertEqual(mc_t_two_sample(x, x), (nan, nan, [], nan))", "def test_mc_t_two_sample_single_obs_sample(self):\r\n sample = array([4.02, 3.88, 3.34, 3.87, 3.18])\r\n x = array([3.02])\r\n exp = (-1.5637254, 0.1929248)\r\n obs = mc_t_two_sample(x, sample)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])\r\n\r\n exp = (1.5637254, 0.1929248)\r\n obs = mc_t_two_sample(sample, x)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])\r\n\r\n # Test the case where we can have no variance in the permuted lists.\r\n x = array([1, 1, 2])\r\n y = array([1])\r\n exp = (0.5, 0.666666666667)\r\n obs = mc_t_two_sample(x, y)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertFloatEqual(len(obs[2]), 999)\r\n self.assertIsProb(obs[3])", "def test_unequal_variance_two_tailed(self):\n rng = np.random.default_rng(135481321)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 200)\n data2 = rng.normal(10, 2, 200)\n\n ttest = unequal_variance_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def test_mc_t_two_sample_unbalanced_obs(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n exp = (-0.10302479888889175, 0.91979753020527177)\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[-1])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_f_two_sample(self):\r\n\r\n # The expected values in this test are obtained through R.\r\n # In R the F test is var.test(x,y) different alternative hypotheses\r\n # can be specified (two sided, less, or greater).\r\n # The vectors are random samples from a particular normal distribution\r\n #(mean and sd specified).\r\n\r\n # a: 50 elem, mean=0 sd=1\r\n a = [-0.70701689, -1.24788845, -1.65516470, 0.10443876, -0.48526915,\r\n -0.71820656, -1.02603596, 0.03975982, -2.23404324, -0.21509363,\r\n 0.08438468, -0.01970062, -0.67907971, -0.89853667, 1.11137131,\r\n 0.05960496, -1.51172084, -0.79733957, -1.60040659, 0.80530639,\r\n -0.81715836, -0.69233474, 0.95750665, 0.99576429, -1.61340216,\r\n -0.43572590, -1.50862327, 0.92847551, -0.68382338, -1.12523522,\r\n -0.09147488, 0.66756023, -0.87277588, -1.36539039, -0.11748707,\r\n -1.63632578, -0.31343078, -0.28176086, 0.33854483, -0.51785630,\r\n 2.25360559, -0.80761191, 1.18983499, 0.57080342, -1.44601700,\r\n -0.53906955, -0.01975266, -1.37147915, -0.31537616, 0.26877544]\r\n\r\n # b: 50 elem, mean=0, sd=1.2\r\n b = [\r\n 0.081418743, 0.276571612, -\r\n 1.864316504, 0.675213612, -0.769202643,\r\n 0.140372825, -1.426250184, 0.058617884, -\r\n 0.819287409, -0.007701916,\r\n -0.782722020, -\r\n 0.285891593, 0.661980419, 0.383225191, 0.622444946,\r\n -0.192446150, 0.297150571, 0.408896059, -\r\n 0.167359383, -0.552381362,\r\n 0.982168338, 1.439730446, 1.967616101, -\r\n 0.579607307, 1.095590943,\r\n 0.240591302, -1.566937143, -\r\n 0.199091349, -1.232983905, 0.362378169,\r\n 1.166061081, -0.604676222, -\r\n 0.536560206, -0.303117595, 1.519222792,\r\n -0.319146503, 2.206220810, -\r\n 0.566351124, -0.720397392, -0.452001377,\r\n 0.250890097, 0.320685395, -\r\n 1.014632725, -3.010346273, -1.703955054,\r\n 0.592587381, -1.237451255, 0.172243366, -0.452641122, -0.982148581]\r\n\r\n # c: 60 elem, mean=5, sd=1\r\n c = [4.654329, 5.242129, 6.272640, 5.781779, 4.391241, 3.800752,\r\n 4.559463, 4.318922, 3.243020, 5.121280, 4.126385, 5.541131,\r\n 4.777480, 5.646913, 6.972584, 3.817172, 6.128700, 4.731467,\r\n 6.762068, 5.082983, 5.298511, 5.491125, 4.532369, 4.265552,\r\n 5.697317, 5.509730, 2.935704, 4.507456, 3.786794, 5.548383,\r\n 3.674487, 5.536556, 5.297847, 2.439642, 4.759836, 5.114649,\r\n 5.986774, 4.517485, 4.579208, 4.579374, 2.502890, 5.190955,\r\n 5.983194, 6.766645, 4.905079, 4.214273, 3.950364, 6.262393,\r\n 8.122084, 6.330007, 4.767943, 5.194029, 3.503136, 6.039079,\r\n 4.485647, 6.116235, 6.302268, 3.596693, 5.743316, 6.860152]\r\n\r\n # d: 30 elem, mean=0, sd =0.05\r\n d = [\r\n 0.104517366, 0.023039678, 0.005579091, 0.052928250, 0.020724823,\r\n -0.060823243, -0.019000890, -\r\n 0.064133996, -0.016321594, -0.008898334,\r\n -0.027626992, -0.051946186, 0.085269587, -\r\n 0.031190678, 0.065172938,\r\n -0.054628573, 0.019257306, -\r\n 0.032427056, -0.058767356, 0.030927400,\r\n 0.052247357, -\r\n 0.042954937, 0.031842104, 0.094130522, -0.024828465,\r\n 0.011320453, -0.016195062, 0.015631245, -0.050335598, -0.031658335]\r\n\r\n a, b, c, d = map(array, [a, b, c, d])\r\n self.assertEqual(map(len, [a, b, c, d]), [50, 50, 60, 30])\r\n\r\n # allowed error. This big, because results from R\r\n # are rounded at 4 decimals\r\n error = 1e-4\r\n\r\n self.assertFloatEqual(f_two_sample(a, a), (49, 49, 1, 1), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b), (49, 49, 0.8575, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(b, a), (49, 49, 1.1662, 0.5925),\r\n eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='low'),\r\n (49, 49, 0.8575, 0.2963), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, b, tails='high'),\r\n (49, 49, 0.8575, 0.7037), eps=error)\r\n self.assertFloatEqual(f_two_sample(a, c),\r\n (49, 59, 0.6587, 0.1345), eps=error)\r\n # p value very small, so first check df's and F value\r\n self.assertFloatEqualAbs(f_two_sample(d, a, tails='low')[0:3],\r\n (29, 49, 0.0028), eps=error)\r\n assert f_two_sample(d, a, tails='low')[3] < 2.2e-16 # p value\r", "def test_mc_t_two_sample(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n\r\n # With numpy array as input.\r\n exp = (-0.11858541225631833, 0.90756579317867436)\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)\r\n\r\n # With python list as input.\r\n exp = (-0.11858541225631833, 0.90756579317867436)\r\n I = [7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5]\r\n II = [8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2]\r\n obs = mc_t_two_sample(I, II)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.8, 0.9, mc_t_two_sample, [I, II],\r\n p_val_idx=3)\r\n\r\n exp = (-0.11858541225631833, 0.45378289658933718)\r\n obs = mc_t_two_sample(I, II, tails='low')\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 999)\r\n self.assertCorrectPValue(0.4, 0.47, mc_t_two_sample, [I, II],\r\n {'tails': 'low'}, p_val_idx=3)\r\n\r\n exp = (-0.11858541225631833, 0.54621710341066287)\r\n obs = mc_t_two_sample(I, II, tails='high', permutations=99)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 99)\r\n self.assertCorrectPValue(0.4, 0.62, mc_t_two_sample, [I, II],\r\n {'tails': 'high', 'permutations': 99}, p_val_idx=3)\r\n\r\n exp = (-2.8855783649036986, 0.99315596652421401)\r\n obs = mc_t_two_sample(I, II, tails='high', permutations=99, exp_diff=1)\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 99)\r\n self.assertCorrectPValue(0.55, 0.99, mc_t_two_sample, [I, II],\r\n {'tails': 'high', 'permutations': 99, 'exp_diff': 1}, p_val_idx=3)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_equivalence_to_filter_output(self):\n error = np.linalg.norm(self.postmean - self.mean[[-1, -10]])/np.linalg.norm(self.mean[-1])\n self.assertLess(error, 1e-12)", "def test_repeated_two_tailed(self):\n rng = np.random.default_rng(6464584234)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(10, 2, 100)\n data2 = data1 + rng.normal(0, .02, 100)\n\n ttest = repeated_ttest(data1, data2)\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def test_sad_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n sad = sad_similarity_measure(patch1, patch2)\n\n assert np.isclose(sad, 3.6, atol=1e-2)", "def test_twodstats():\n if __name__ == '__main__':\n logger = piff.config.setup_logger(2)\n else:\n logger = None\n\n model = piff.Gaussian(fastfit=True)\n interp = piff.Polynomial(order=1) # should find that order=1 is better\n # create background model\n stars, true_model = generate_starlist(100)\n psf = piff.SimplePSF(model, interp)\n psf.fit(stars, None, None)\n stars = psf.stars # These have the right fit parameters\n\n # check the coeffs of sigma and g2, which are actually linear fits\n # skip g1 since it is actually a 2d parabola\n # factor of 0.263 is to account for going from pixel xy to wcs uv\n np.testing.assert_almost_equal(psf.interp.coeffs[0].flatten(),\n np.array([0.4, 0, 1. / (0.263 * 2048), 0]), decimal=4)\n np.testing.assert_almost_equal(psf.interp.coeffs[2].flatten(),\n np.array([-0.1 * 1000 / 2048, 0, 0.1 / (0.263 * 2048), 0]),\n decimal=4)\n\n stats = piff.TwoDHistStats(nbins_u=5, nbins_v=5) # implicitly np.median\n stats.compute(psf, stars, logger=logger)\n # check the twodhists\n # get the average value in the bin\n u_i = 3\n v_i = 3\n icen = stats.twodhists['u'][v_i, u_i] / 0.263\n jcen = stats.twodhists['v'][v_i, u_i] / 0.263\n print('icen = ',icen)\n print('jcen = ',jcen)\n icenter = 1000\n jcenter = 2000\n # the average value in the bin should match up with the model for the average coordinates\n sigma, g1, g2 = psf_model(icen, jcen, icenter, jcenter)\n gsq = g1**2 + g2**2\n T = 2*sigma**2 * (1+gsq)/(1-gsq)\n T_average = stats.twodhists['T'][v_i, u_i]\n g1_average = stats.twodhists['g1'][v_i, u_i]\n g2_average = stats.twodhists['g2'][v_i, u_i]\n # assert equal to 4th decimal\n print('T, g1, g2 = ',[T,g1,g2])\n print('av T, g1, g2 = ',[T_average,g1_average,g2_average])\n np.testing.assert_almost_equal([T, g1, g2], [T_average, g1_average, g2_average],\n decimal=2)\n\n # Test the plotting and writing\n twodstats_file = os.path.join('output','twodstats.pdf')\n stats.write(twodstats_file)\n\n with np.testing.assert_raises(ValueError):\n stats.write() # If not given in constructor, must give file name here.\n\n # repeat for whisker\n stats = piff.WhiskerStats(nbins_u=21, nbins_v=21, reducing_function='np.mean')\n stats.compute(psf, stars)\n # Test the plotting and writing\n whisker_file = os.path.join('output','whiskerstats.pdf')\n stats.write(whisker_file)\n with np.testing.assert_raises(ValueError):\n stats.write()\n\n # With large number of bins, many will have no objects. This is ok.\n # Also, can use other np functions like max, std, instead to get different stats\n # Not sure when these would be useful, but they are allowed.\n # And, check usage where file_name is given in init.\n twodstats_file2 = os.path.join('output','twodstats.pdf')\n stats2 = piff.TwoDHistStats(nbins_u=50, nbins_v=50, reducing_function='np.std',\n file_name=twodstats_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars, logger=logger)\n stats2.write()\n\n whisker_file2 = os.path.join('output','whiskerstats.pdf')\n stats2 = piff.WhiskerStats(nbins_u=100, nbins_v=100, reducing_function='np.max',\n file_name=whisker_file2)\n with np.testing.assert_raises(RuntimeError):\n stats2.write() # Cannot write before compute\n stats2.compute(psf, stars)\n stats2.write()", "def test_statistic(self):\n for seed in range(5):\n\n random_state = np.random.RandomState(seed)\n\n for i in range(4, self.test_max_size + 1):\n arr1 = random_state.rand(i, 1)\n arr2 = random_state.rand(i, 1)\n\n stat = dcor_internals._distance_correlation_sqr_naive(\n arr1, arr2)\n stat_fast = dcor_internals._distance_correlation_sqr_fast(\n arr1, arr2)\n\n self.assertAlmostEqual(stat, stat_fast)", "def compare(self):\n samples = self.data[-2:]\n if len(samples) != 2:\n return\n\n timestamp_a, data_a = samples[0]\n timestamp_b, data_b = samples[1]\n LOG.debug(\"%s comparing sample from %s to %s\", self, timestamp_a, timestamp_b)\n changes = dict_compare(data_a, data_b)\n for key in changes:\n OUTPUT.info(\"%s:%s: %s -> %s\", self, key, get_value(data_a, key), get_value(data_b, key))", "def compare_samples(populations,parametric=False):\n from scipy.stats import mannwhitneyu, ttest_ind, f_oneway, kruskal, ranksums\n from statsmodels.stats.multicomp import pairwise_tukeyhsd\n populations = [np.array(pop) for pop in populations] #obscure line to take out missing values\n populations = [pop[~np.isnan(pop)] for pop in populations]\n\n if len(populations) == 2:\n if parametric:\n stat, p_value = ttest_ind(*populations)\n print(\"P-value t-test: {0:2.10f}\".format(p_value))\n else:\n stat, p_value1 = mannwhitneyu(*populations)\n print(\"P-value MWW: {0:2.10f}\".format(p_value))\n stat, p_value2 = ranksums(*populations)\n print(\"P-value Ranksum: {0:2.10f}\".format(p_value))\n \n if len(populations) > 2:\n if parametric:\n stat, p_value = f_oneway(*populations)\n print(\"P-value anova: {0:2.10f}\".format(p_value))\n else:\n stat, p_value = kruskal(*populations) \n print(\"P-value kruskal: {0:2.10f}\".format(p_value))\n \n if p_value < 0.05:\n flatten_pop = []\n label_pop = []\n for i,pop in enumerate(populations):\n flatten_pop += list(pop)\n label_pop += [\"pop{0}\".format(i)]*len(pop)\n \n res2 = pairwise_tukeyhsd(np.asarray(flatten_pop),label_pop)\n print(\"Printing pair comparisons using Tukey HSD\")\n print(res2)\n res2.plot_simultaneous(comparison_name=None,xlabel='diffs',ylabel='grups')\n \n print((\"Means: \" + \", {}\"*len(populations)).format(*[np.mean(_) for _ in populations]))\n print((\"STDs: \" + \", {}\"*len(populations)).format(*[np.std(_) for _ in populations]))\n \n \n return p_value", "def test_t_one_sample(self):\r\n x = array(range(-5, 5))\r\n y = array(range(-1, 10))\r\n self.assertFloatEqualAbs(t_one_sample(x), (-0.5222, 0.6141), 1e-4)\r\n self.assertFloatEqualAbs(t_one_sample(y), (4, 0.002518), 1e-4)\r\n # do some one-tailed tests as well\r\n self.assertFloatEqualAbs(\r\n t_one_sample(y, tails='low'), (4, 0.9987), 1e-4)\r\n self.assertFloatEqualAbs(\r\n t_one_sample(y, tails='high'), (4, 0.001259), 1e-4)", "def test_one_sample_right_tailed(self):\n rng = np.random.default_rng(615419864354)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(12.2, 1, 100)\n\n ttest = one_sample_ttest(data1, 12.2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def _compare_results(y_pred, y_pred_sampled, y_true):\n scores_og = _compute_scores(y_pred, y_true)\n scores_samp = _compute_scores(y_pred_sampled, y_true)\n\n # Aggreggate both results\n result_comp = pd.concat({\"Og\": scores_og, \"samp\": scores_samp}, axis = 1)\n\n return result_comp", "def ks_test(a,b):\n a,b = np.asarray(a),np.asarray(b)\n if len(a) != len(a):\n raise ValueError(\"a and b must have the same size\")\n \n return stats.ks_2samp(a,b)", "def test_noise_equiv_bandwidth():\n win = windows.blackmanharris(2000)\n assert np.isclose(2, 1.0 / utils.noise_equivalent_bandwidth(win), rtol=1e-2)", "def t_test1(data1,data2):\n if not isinstance(data1,np.ndarray):\n\tdata1 = np.array(data1)\n if not isinstance(data2,np.ndarray):\n\tdata2 = np.array(data2)\n\n N1, N2 = len(data1), len(data2)\n mean1, mean2 = np.mean(data1), np.mean(data2)\n # Eq. 14.2.1\n sD = np.sqrt( (np.sum( (data1 - np.ones(N1) * mean1) ** 2.) + np.sum( (data2 - np.ones(N2) * mean2) ** 2.)) / (N1 + N2 - 2.) * (1./N1 + 1./N2))\n T = (mean1 - mean2) / sD\n return t.cdf(T, N1 + N2 - 2),T,N1 + N2 - 2", "def mw_test(n1, n2):\r\n # find smaller sample, defined historically as n2. modify the names so we\r\n # don't risk modifying data outside the scope of the function.\r\n if len(n2) > len(n1):\r\n sn1, sn2 = array(n2), array(n1)\r\n else:\r\n sn1, sn2 = array(n1), array(n2)\r\n # sum the ranks of s2 by using the searchsorted magic. the logic is that we\r\n # use a sorted copy of the data from both groups (n1 and n2) to figure out\r\n # at what index we would insert the values from sample 2. by assessing the\r\n # difference between the index that value x would be inserted in if we were\r\n # doing left insertion versus right insertion, we can tell how many values\r\n # are tied with x. this allows us to calculate the average ranks easily.\r\n data = sorted(hstack([sn1, sn2]))\r\n ssl = searchsorted(data, sn2, 'left')\r\n ssr = searchsorted(data, sn2, 'right')\r\n sum_sn2_ranks = ((ssl + ssr + 1) / 2.).sum()\r\n ln1, ln2 = sn1.size, sn2.size\r\n C = (ln1 * ln2) + (ln2 * (ln2 + 1) / 2.) - sum_sn2_ranks\r\n U = max(C, ln1 * ln2 - C)\r\n # now we calculate the pvalue using the normal approximation and the two\r\n # tailed test. our formula corrects for ties, because in the case where\r\n # there are no ties, the forumla on the bottom of pg 429=the formula on the\r\n # bottom of pg 430.\r\n numerator = (U - ln1 * ln2 / 2.)\r\n # follwing three lines give the T value in the formula on page 430. same\r\n # logic as above; we calculate the left and right indices of the unique\r\n # values for all combined data from both samples, then calculate ti**3-ti\r\n # for each value.\r\n ux = unique(data)\r\n uxl = searchsorted(data, ux, 'left')\r\n uxr = searchsorted(data, ux, 'right')\r\n T = _corr_kw(uxr - uxl).sum()\r\n denominator = sqrt(((ln1 * ln2) / float((ln1 + ln2) * (ln1 + ln2 - 1))) * (((ln1 + ln2) ** 3\r\n - (ln1 + ln2) - T) / 12.))\r\n if denominator == 0:\r\n # Warning: probability of U can't be calculated by mw_test\r\n # because all ranks of data were tied. Returning nan as pvalue.\r\n return U, nan\r\n else:\r\n pval = zprob(numerator / float(denominator))\r\n return U, pval", "def test_mc_t_two_sample_no_permuted_variance(self):\r\n # Verified against R's t.test() and Deducer::perm.t.test().\r\n x = array([1, 1, 2])\r\n y = array([2, 2, 1])\r\n\r\n exp = (-0.70710678118654791, 0.51851851851851838)\r\n obs = mc_t_two_sample(x, y, permutations=10000)\r\n\r\n self.assertFloatEqual(obs[:2], exp)\r\n self.assertEqual(len(obs[2]), 10000)\r\n self.assertCorrectPValue(0.97, 1.0, mc_t_two_sample, [x, y],\r\n {'permutations': 10000}, p_val_idx=3)", "def test_t_paired_2tailed(self):\r\n x, y = self.x, self.y\r\n # check value of t and the probability for 2-tailed\r\n self.assertFloatEqual(t_paired(y, x)[0], 19.7203, 1e-4)\r\n self.assertFloatEqual(t_paired(y, x)[1], 1.301439e-11, 1e-4)", "def attest_rel (a,b,dimension=None,printit=0,name1='Samp1',name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n if len(a)<>len(b):\r\n raise ValueError, 'Unequal length arrays.'\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n = a.shape[dimension]\r\n df = float(n-1)\r\n d = (a-b).astype('d')\r\n\r\n denom = N.sqrt((n*N.add.reduce(d*d,dimension) - N.add.reduce(d,dimension)**2) /df)\r\n zerodivproblem = N.equal(denom,0)\r\n denom = N.where(zerodivproblem,1,denom) # avoid zero-division in 1st place\r\n t = N.add.reduce(d,dimension) / denom # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n\r\n if printit <> 0:\r\n statname = 'Related samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def testAlphaTwoSamplesMatchANormalDistribution(self):\n num_samples = 16384\n scale = 1.7\n rng = random.PRNGKey(0)\n samples = self._distribution.draw_samples(rng, 2 * jnp.ones(num_samples),\n scale * jnp.ones(num_samples))\n # Perform the Kolmogorov-Smirnov test against a normal distribution.\n ks_statistic = scipy.stats.kstest(samples, 'norm', (0., scale)).statistic\n self.assertLess(ks_statistic, 0.01)", "def _compare(self,esnA,esnB,should_be):\n X,y,Xp = self.task\n test = self.assertTrue if should_be==\"same\" else self.assertFalse\n test(np.all(np.equal(esnA.W, esnB.W)))\n test(np.all(np.equal(esnA.W_in, esnB.W_in)))\n test(np.all(np.equal(esnA.W_feedb, esnB.W_feedb)))\n test(np.all(np.equal(esnA.fit(X,y), esnB.fit(X,y))))\n test(np.all(np.equal(esnA.W_out, esnB.W_out)))\n test(np.all(np.equal(esnA.predict(Xp), esnB.predict(Xp))))", "def t_two_sample(x, y, tails=2):\n assert tails in (1,2), \"invalid: tails must be 1 or 2, found %s\"%str(tails)\n x, y = np.asarray(x), np.asarray(y)\n nx, ny = x.size, y.size\n df = nx + ny - 2\n s_xy = np.sqrt(((nx - 1)*x.var() + (ny - 1)*y.var()) / df)\n t_obs = (x.mean() - y.mean()) / (s_xy * np.sqrt(1./nx + 1./ny))\n p_value = tails * st.t.sf(abs(t_obs), df)\n return TtestResults(t_obs, p_value)", "def ttest_ind(\n x1,\n x2,\n alternative=\"two-sided\",\n usevar=\"pooled\",\n weights=(None, None),\n value=0,\n):\n cm = CompareMeans(\n DescrStatsW(x1, weights=weights[0], ddof=0),\n DescrStatsW(x2, weights=weights[1], ddof=0),\n )\n tstat, pval, dof = cm.ttest_ind(\n alternative=alternative, usevar=usevar, value=value\n )\n\n return tstat, pval, dof", "def ttest_ind_corrected(performance_a, performance_b, k=10, r=10):\n df = k * r - 1\n\n x = performance_a - performance_b\n m = np.mean(x)\n\n sigma_2 = np.var(x, ddof=1)\n denom = np.sqrt((1 / k * r + 1 / (k - 1)) * sigma_2)\n\n with np.errstate(divide='ignore', invalid='ignore'):\n t = np.divide(m, denom)\n\n prob = stats.t.sf(np.abs(t), df) * 2\n\n return t, prob", "def discrete_one_samp_ks(distribution1: np.array, distribution2: np.array, num_samples: int) -> Tuple[float, bool]:\n cutoff = 1.36 / math.sqrt(num_samples)\n ecdf1 = np.array([sum(distribution1[:i + 1]) for i in range(len(distribution1))])\n ecdf2 = np.array([sum(distribution2[:i + 1]) for i in range(len(distribution2))])\n max_diff = np.absolute(ecdf1 - ecdf2).max()\n return max_diff, max_diff < cutoff", "def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")", "def t_tests(self):\n se = self.se()\n t = self._coef / se\n p = 2 * stats.distributions.t.sf(np.abs(t), self._rdf)\n return (t, p)", "def comparison_test_2():\n for pose in SE2.interesting_points():\n se2a = se2_from_SE2(pose)\n se2b = se2_from_SE2_slow(pose)\n # printm('pose', pose, 'se2a', se2a, 'se2b', se2b)\n assert_allclose(se2a, se2b, atol=1e-8)", "def test_paired_difference_analyses(self):\r\n actual = paired_difference_analyses(\r\n self.personal_ids_to_state_values1,\r\n ['firmicutes-abundance',\r\n 'bacteroidetes-abundance'],\r\n ['Pre', 'Post'],\r\n output_dir=self.test_out,\r\n ymin=0.0,\r\n ymax=1.0)\r\n self.assertTrue(exists(join(self.test_out,\r\n 'paired_difference_comparisons.txt')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'firmicutes-abundance.pdf')))\r\n self.assertTrue(\r\n exists(join(self.test_out, 'bacteroidetes-abundance.pdf')))\r\n # three output paths returned\r\n self.assertEqual(len(actual[0]), 5)\r\n # expected t values returned, they should be less than (firmicutes) or greater (bacteroidetes) than 2 \r\n self.assertLess(abs(actual[1]['firmicutes-abundance'][4]), 2)\r\n self.assertLess(2, abs(actual[1]['bacteroidetes-abundance'][4]))", "def test_two_round_data_points(self):\r\n values = [2, 3]\r\n expect_mean_result = 2.5\r\n expected_sd_result = .5\r\n result = multipoint_mean_sd(values)\r\n\r\n self.assertEqual(expect_mean_result, result['mean_result'])\r\n self.assertEqual(expected_sd_result, result['sd_result'])", "def apaired(x,y):\r\n samples = ''\r\n while samples not in ['i','r','I','R','c','C']:\r\n print '\\nIndependent or related samples, or correlation (i,r,c): ',\r\n samples = raw_input()\r\n\r\n if samples in ['i','I','r','R']:\r\n print '\\nComparing variances ...',\r\n# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112\r\n r = obrientransform(x,y)\r\n f,p = F_oneway(pstats.colex(r,0),pstats.colex(r,1))\r\n if p<0.05:\r\n vartype='unequal, p='+str(round(p,4))\r\n else:\r\n vartype='equal'\r\n print vartype\r\n if samples in ['i','I']:\r\n if vartype[0]=='e':\r\n t,p = ttest_ind(x,y,None,0)\r\n print '\\nIndependent samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n if len(x)>20 or len(y)>20:\r\n z,p = ranksums(x,y)\r\n print '\\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)\r\n else:\r\n u,p = mannwhitneyu(x,y)\r\n print '\\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)\r\n\r\n else: # RELATED SAMPLES\r\n if vartype[0]=='e':\r\n t,p = ttest_rel(x,y,0)\r\n print '\\nRelated samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n t,p = ranksums(x,y)\r\n print '\\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)\r\n else: # CORRELATION ANALYSIS\r\n corrtype = ''\r\n while corrtype not in ['c','C','r','R','d','D']:\r\n print '\\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',\r\n corrtype = raw_input()\r\n if corrtype in ['c','C']:\r\n m,b,r,p,see = linregress(x,y)\r\n print '\\nLinear regression for continuous variables ...'\r\n lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]\r\n pstats.printcc(lol)\r\n elif corrtype in ['r','R']:\r\n r,p = spearmanr(x,y)\r\n print '\\nCorrelation for ranked variables ...'\r\n print \"Spearman's r: \",round(r,4),round(p,4)\r\n else: # DICHOTOMOUS\r\n r,p = pointbiserialr(x,y)\r\n print '\\nAssuming x contains a dichotomous variable ...'\r\n print 'Point Biserial r: ',round(r,4),round(p,4)\r\n print '\\n\\n'\r\n return None", "def ttest(x):\n from ..group.onesample import stat\n t = stat(x.T, id='student', axis=0)\n return np.squeeze(t)", "def error(self, in_sample=True):\n if in_sample:\n error = 0.0\n for i, point in enumerate(self.X):\n if self.Y[i] != self.rbf_classify(point):\n error += 1\n return error / 100\n else:\n error = 0.0\n for i, point in enumerate(self.test_X):\n if self.test_Y[i] != self.rbf_classify(point):\n error += 1\n return error / 10000", "def test_tau_score(sample_weight):\n np.testing.assert_almost_equal(\n tau_score(Y_true, Y_pred, sample_weight),\n _tau_score(Y_true, Y_pred, sample_weight))", "def ks_test(df1, df2):\n p_val_list = []\n stat_list = []\n for element in df1.columns:\n res = stats.ks_2samp(df1[element], df2[element])\n p_val_list.append(res[1])\n stat_list.append(res[0])\n n = np.argmax(stat_list)\n p_val = p_val_list[n]\n stat = stat_list[n]\n return p_val, stat, n, p_val_list, stat_list", "def t_test2(data1,data2):\n N1, N2 = len(data1), len(data2)\n mean1, mean2 = np.mean(data1), np.mean(data2)\n var1, var2= np.var(data1,ddof = 1), np.var(data2,ddof = 1)\n\n T = (mean1 - mean2) / np.sqrt(var1/N1 + var2/N2)\t# Eq. 14.2.3\n df = (var1/N1 + var2/N2)**2. / ( (var1/N1)**2./(N1 - 1) + (var2/N2)**2./(N2 - 1))\n return t.cdf(T, df), T, df", "def test_mixed():\n # assert the distribution of the samples is close to the distribution of the data\n # using a kstest for continuous + a cstest for categorical.", "def test_ssd_similarity_measure_values():\n \n patch1 = torch.tensor([1.3, 4.5, 7.2, 0.2, -0.6])\n patch2 = torch.tensor([0.2, 4.4, 7.6, 0.1, 1.3])\n\n ssd = ssd_similarity_measure(patch1, patch2)\n assert np.isclose(ssd, 5.0, atol=1e-2)", "def assertSamplesEqualUpToTimestamp(self, a, b, msg=None):\n\n self.assertEqual(a.metric, b.metric, msg or\n 'Samples %s and %s have different metrics' % (a, b))\n if isinstance(a.value, float) and isinstance(b.value, float):\n self.assertAlmostEqual(\n a.value, b.value, msg=msg or\n 'Samples %s and %s have different values' % (a, b))\n else:\n self.assertEqual(\n a.value, b.value, msg or\n 'Samples %s and %s have different values' % (a, b))\n self.assertEqual(a.unit, b.unit, msg or\n 'Samples %s and %s have different units' % (a, b))\n self.assertDictEqual(a.metadata, b.metadata, msg or\n 'Samples %s and %s have different metadata' % (a, b))\n # Deliberately don't compare the timestamp fields of the samples.", "def ttest(\n data, dataLabel=None, paired=False, decimals=4,\n textline=False, units=None\n ):\n\n # test calling values\n if data is None or not isinstance(data, dict) or len(data.keys()) != 2:\n raise ValueError('RSTATS.ttest: data must be a dictionary'\n + ' with at exactly 2 keys'\n + '\\nUse KW (anova) for more than 2 groups')\n\n k = list(data.keys())\n g = {}\n n = {}\n gmean = {}\n gstd = {}\n\n g[1] = data[k[0]]\n g[2] = data[k[1]]\n n[1] = len(g[1])\n n[2] = len(g[2])\n # (w1, p1) = Stats.shapiro(g1, a=None, reta=False)\n # (w2, p2) = Stats.shapiro(g2, a=None, reta=False)\n # Tb, pb = Stats.bartlett(g1, g2) # do bartletss for equal variance\n equalVar = False\n\n if paired:\n print (len(g[1]), len(g[2]))\n (t, p) = Stats.ttest_rel(g[1], g[2])\n else:\n (t, p) = Stats.ttest_ind(g[1], g[2], equal_var=equalVar)\n gmean[1] = np.mean(g[1])\n gstd[1] = np.std(g[1], ddof=1)\n gmean[2] = np.mean(g[2])\n gstd[2] = np.std(g[2], ddof=1)\n # df = (tstd[k]**2/tN[k] + dstd[k]**2/dN[k])**2 / (( (tstd[k]**2 /\n # tN[k])**2 / (tN[k] - 1) ) + ( (dstd[k]**2 / dN[k])**2 / (tN[k] - 1) ) )\n df = ((gstd[1]**2/n[1] + gstd[2]**2/n[2])**2\n / (((gstd[1]**2 / n[1])**2 / (n[1] - 1)\n + ((gstd[2]**2 / n[2])**2 / (n[1] - 1))))\n )\n if dataLabel is not None:\n testtype = 'Independent'\n if paired:\n testtype = 'Paired'\n n = max([len(l) for l in k])\n print ('\\n%s\\n %s T-test, Welch correction' % (dataLabel, testtype))\n # if p1 < 0.05 and p2 < 0.05:\n # print(u' Both data sets appear normally distributed: Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # else:\n # print(u' ****At least one Data set is NOT normally distributed****\\n Shapiro-Wilk Group 1 p = {:6.3f}, Group2 p = {:6.3f}'.format(p1, p2))\n # print (u' (performing test anyway, as requested)')\n # if equalVar:\n # print(u' Variances are equivalent (Bartletts test, p = {:.3f})'.format(pb))\n # else:\n # print(u' Variances are unequal (Bartletts test, p = {:.3f}); not assuming equal variances'.format(pb))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[0].rjust(n), gmean[1], gstd[1],\n len(g[1]), pc=decimals))\n print(u' {:s}={:8.{pc}f} (SD {:.{pc}f}, N = {:d})'.\n format(k[1].rjust(n), gmean[2], gstd[2],\n len(g[2]), pc=decimals))\n print(u' t({:6.2f})={:8.4f} p={:8.6f}\\n'.\n format(df, float(t), float(p)))\n # generate one line of text suitable for pasting into a paper\n if textline:\n if units is not None:\n units = ' ' + units\n else:\n units = ''\n fmtstring = u'{:s}: {:.{pc}f} (SD {:.{pc}f}, N={:d}){:s}; '\n print(u'(', end='')\n for s in range(1, 3):\n print(fmtstring.format(\n k[s-1], gmean[s], gstd[s], len(g[s]), units, \n pc=decimals), end='')\n print(u't{:.2f}={:.3f}, p={:s})\\n'.format(df, float(t), pformat(p)))\n\n return(df, float(t), float(p))", "def experiment(ww_train, rw_train, ww_test, rw_test):\n # First train the data on the training set\n\n ww_centroid = compute_centroid(ww_train)\n rw_centroid = compute_centroid(rw_train)\n correct_count = 0\n count = 0\n for row in ww_test:\n if euclidean_distance(row, ww_centroid) <= euclidean_distance(row, rw_centroid):\n correct_count+=1\n count+=1\n for row in rw_test:\n if euclidean_distance(row, rw_centroid) <= euclidean_distance(row, ww_centroid):\n correct_count+=1\n count+=1\n accuracy = correct_count/count\n result = \"{} total guesses, {} correct guesses, accuracy is {}\".format(count, correct_count, accuracy)\n print(result)\n return accuracy", "def attest_ind (a, b, dimension=None, printit=0, name1='Samp1', name2='Samp2',writemode='a'):\r\n if dimension == None:\r\n a = N.ravel(a)\r\n b = N.ravel(b)\r\n dimension = 0\r\n x1 = amean(a,dimension)\r\n x2 = amean(b,dimension)\r\n v1 = avar(a,dimension)\r\n v2 = avar(b,dimension)\r\n n1 = a.shape[dimension]\r\n n2 = b.shape[dimension]\r\n df = n1+n2-2\r\n svar = ((n1-1)*v1+(n2-1)*v2) / float(df)\r\n zerodivproblem = N.equal(svar,0)\r\n svar = N.where(zerodivproblem,1,svar) # avoid zero-division in 1st place\r\n t = (x1-x2)/N.sqrt(svar*(1.0/n1 + 1.0/n2)) # N-D COMPUTATION HERE!!!!!!\r\n t = N.where(zerodivproblem,1.0,t) # replace NaN/wrong t-values with 1.0\r\n probs = abetai(0.5*df,0.5,float(df)/(df+t*t))\r\n\r\n if type(t) == N.ndarray:\r\n probs = N.reshape(probs,t.shape)\r\n if probs.shape == (1,):\r\n probs = probs[0]\r\n \r\n if printit <> 0:\r\n if type(t) == N.ndarray:\r\n t = t[0]\r\n if type(probs) == N.ndarray:\r\n probs = probs[0]\r\n statname = 'Independent samples T-test.'\r\n outputpairedstats(printit,writemode,\r\n name1,n1,x1,v1,N.minimum.reduce(N.ravel(a)),\r\n N.maximum.reduce(N.ravel(a)),\r\n name2,n2,x2,v2,N.minimum.reduce(N.ravel(b)),\r\n N.maximum.reduce(N.ravel(b)),\r\n statname,t,probs)\r\n return\r\n return t, probs", "def ks_test(timeseries):\r\n\r\n hour_ago = time() - 3600\r\n ten_minutes_ago = time() - 600\r\n reference = scipy.array([x[1] for x in timeseries if x[0] >= hour_ago and x[0] < ten_minutes_ago])\r\n probe = scipy.array([x[1] for x in timeseries if x[0] >= ten_minutes_ago])\r\n\r\n if reference.size < 20 or probe.size < 20:\r\n return False\r\n\r\n ks_d,ks_p_value = scipy.stats.ks_2samp(reference, probe)\r\n\r\n if ks_p_value < 0.05 and ks_d > 0.5:\r\n adf = sm.tsa.stattools.adfuller(reference, 10)\r\n if adf[1] < 0.05:\r\n return True\r\n\r\n return False", "def compare_averages(ave_stats):\n pass", "def test_mc_t_two_sample_no_perms(self):\r\n exp = (-0.11858541225631833, 0.90756579317867436, [], nan)\r\n I = array([7.2, 7.1, 9.1, 7.2, 7.3, 7.2, 7.5])\r\n II = array([8.8, 7.5, 7.7, 7.6, 7.4, 6.7, 7.2])\r\n obs = mc_t_two_sample(I, II, permutations=0)\r\n self.assertFloatEqual(obs, exp)", "def ttest(x, mu=0, alpha=0.05, is_bernoulli=False, two_sided=True, return_tuple=False):\n\n # Define test degrees of freedom\n if two_sided:\n quant_order = 1 - (alpha / 2)\n h0 = f'X_bar = {mu}'\n h1 = f'X_bar != {mu}'\n else:\n quant_order = 1 - alpha\n h0 = f'X_bar <= {mu}'\n h1 = f'X_bar > {mu}'\n\n # Input vector as array\n x = np.asarray(x)\n # Sample size\n n = len(x)\n\n # Empirical mean\n x_bar = x.mean()\n # s estimator (variance)\n if is_bernoulli:\n s2 = x_bar * (1 - x_bar)\n else:\n s2 = desc.var(x)\n\n # Degrees of freedom\n df = n - 1\n\n # T statistic\n t = (x_bar - mu) / (math.sqrt(s2 / n))\n if two_sided:\n t = math.fabs(t)\n # p and critical values\n p = 2.0 * (1.0 - scp.t.cdf(t, df=df))\n\n if n > 30:\n cv = scp.norm.ppf(quant_order)\n else:\n cv = scp.t.ppf(quant_order, df=df)\n\n _summ = test_summary(df=df, critical_value=cv, t_value=t,\n p_value=p,\n title='One Sample Student test',\n h0=h0, h1=h1,\n alpha=alpha)\n\n if return_tuple:\n return t, cv, p\n else:\n return _summ", "def two_tailed_t_test(samples: np.ndarray, H0: float):\n empirical_mean = np.mean(samples, axis=0)\n number_samples = samples.shape[0]\n standard_error = np.std(samples, ddof=1, axis=0) / np.sqrt(number_samples)\n t_value = (empirical_mean - H0) / standard_error\n p_value = 2.0 * (1.0 - t(df=number_samples - 1).cdf(np.abs(t_value)))\n return t_value, p_value", "def test_sample(self):\n dist = self.many_samples([0, 0, 0, 1])\n self.assertEquals(3, dist.argMax())\n\n dist = self.many_samples([1, 0, 0, 0, 0])\n self.assertEquals(0, dist.argMax())\n\n dist = self.many_samples([0.5, 0, 0, 0.25, 0.25])\n self.assertAlmostEquals(dist[0], 0.5, delta=0.01)\n self.assertAlmostEquals(dist[3], 0.25, delta=0.01)\n self.assertAlmostEquals(dist[4], 0.25, delta=0.01)\n self.assertEquals(dist[1], 0)\n self.assertEquals(dist[2], 0)\n\n with self.assertRaises(AssertionError):\n diffp.sample([0.5, 0.5, 0.01])", "def test_new_log_diff():\n assert get_clip(audlist, log, 1) != get_clip(audio['NTF'], log, 1)", "def test_properlyAveraged(self):\n r0 = self.singleReader\n r1 = DetectorReader(DET_FILES['bwr1'])\n r1.read()\n for detName in self.sampler.detectors:\n expectedTallies, expectedErrors = (_getExpectedAverages(\n r0.detectors[detName], r1.detectors[detName]))\n uniq = self.sampler.detectors[detName]\n assert_allclose(uniq.tallies, expectedTallies, err_msg='tallies',\n **TOLERANCES['tallies'])\n assert_allclose(uniq.errors, expectedErrors, err_msg='errrors',\n **TOLERANCES['errors'])", "def compare():\n from sklearn import datasets\n from sklearn import linear_model\n from sklearn.metrics import mean_squared_error\n\n boston_data = datasets.load_boston()\n X, y = boston_data.data, boston_data.target\n linreg = linear_model.LinearRegression()\n lr_model = linreg.fit(X ,y)\n lr_mse = mean_squared_error(lr_model.predict(X), y)\n print 'Linear regression:', lr_mse\n\n ridge = linear_model.Ridge()\n ridge_model = ridge.fit(X, y)\n ridge_mse = mean_squared_error(ridge.predict(X), y)\n print 'Ridge regression:', ridge_mse\n\n lasso = linear_model.Lasso()\n lasso_model = lasso.fit(X, y)\n lasso_mse = mean_squared_error(lasso_model.predict(X), y)\n print 'Lasso regression:', lasso_mse", "def test_onesample_left_tailed(self):\n rng = np.random.default_rng(9876138761251)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(15, 1, 100)\n\n ttest = one_sample_ttest(data1, 15, 'left')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def test_analyze_time_series_std():\n\n statistics = analyze_time_series(np.ones(10))\n\n assert statistics.n_total_points == 10\n assert statistics.n_uncorrelated_points == 1\n assert np.isclose(statistics.statistical_inefficiency, 10.0)\n assert statistics.equilibration_index == 0", "def lpaired(x,y):\r\n samples = ''\r\n while samples not in ['i','r','I','R','c','C']:\r\n print '\\nIndependent or related samples, or correlation (i,r,c): ',\r\n samples = raw_input()\r\n\r\n if samples in ['i','I','r','R']:\r\n print '\\nComparing variances ...',\r\n# USE O'BRIEN'S TEST FOR HOMOGENEITY OF VARIANCE, Maxwell & delaney, p.112\r\n r = obrientransform(x,y)\r\n f,p = F_oneway(pstats.colex(r,0),pstats.colex(r,1))\r\n if p<0.05:\r\n vartype='unequal, p='+str(round(p,4))\r\n else:\r\n vartype='equal'\r\n print vartype\r\n if samples in ['i','I']:\r\n if vartype[0]=='e':\r\n t,p = ttest_ind(x,y,0)\r\n print '\\nIndependent samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n if len(x)>20 or len(y)>20:\r\n z,p = ranksums(x,y)\r\n print '\\nRank Sums test (NONparametric, n>20): ', round(z,4),round(p,4)\r\n else:\r\n u,p = mannwhitneyu(x,y)\r\n print '\\nMann-Whitney U-test (NONparametric, ns<20): ', round(u,4),round(p,4)\r\n\r\n else: # RELATED SAMPLES\r\n if vartype[0]=='e':\r\n t,p = ttest_rel(x,y,0)\r\n print '\\nRelated samples t-test: ', round(t,4),round(p,4)\r\n else:\r\n t,p = ranksums(x,y)\r\n print '\\nWilcoxon T-test (NONparametric): ', round(t,4),round(p,4)\r\n else: # CORRELATION ANALYSIS\r\n corrtype = ''\r\n while corrtype not in ['c','C','r','R','d','D']:\r\n print '\\nIs the data Continuous, Ranked, or Dichotomous (c,r,d): ',\r\n corrtype = raw_input()\r\n if corrtype in ['c','C']:\r\n m,b,r,p,see = linregress(x,y)\r\n print '\\nLinear regression for continuous variables ...'\r\n lol = [['Slope','Intercept','r','Prob','SEestimate'],[round(m,4),round(b,4),round(r,4),round(p,4),round(see,4)]]\r\n pstats.printcc(lol)\r\n elif corrtype in ['r','R']:\r\n r,p = spearmanr(x,y)\r\n print '\\nCorrelation for ranked variables ...'\r\n print \"Spearman's r: \",round(r,4),round(p,4)\r\n else: # DICHOTOMOUS\r\n r,p = pointbiserialr(x,y)\r\n print '\\nAssuming x contains a dichotomous variable ...'\r\n print 'Point Biserial r: ',round(r,4),round(p,4)\r\n print '\\n\\n'\r\n return None", "def test_two_unsampled_arms(self):\n self._test_two_unsampled_arms()", "def test_same_distances(self):\n \n\t\tm1 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tm2 = models.vgg11(weights='VGG11_Weights.IMAGENET1K_V1')\n\t\tavg_dW, avg_db, distances = self.watcher.distances(m1, m2)\n\t\t\n\t\tactual_mean_distance = avg_dW\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tactual_mean_distance = avg_db\n\t\texpected_mean_distance = 0.0\t \n\t\tself.assertEqual(actual_mean_distance,expected_mean_distance)\n\t\t\n\t\tprint(distances)", "def test_unequal_variance_right_tailed(self):\n rng = np.random.default_rng(887943278)\n\n passed = 0\n n_iter = 500\n for _ in range(n_iter):\n data1 = rng.normal(2, 1, 100)\n data2 = rng.normal(2, 1, 100)\n\n ttest = unequal_variance_ttest(data1, data2, 'right')\n\n if ttest['P_value'] < .05:\n passed +=1\n\n self.assertAlmostEqual(passed / n_iter, .05, delta=.01)", "def perform_wilcoxon_validation(series1, series2):\n differences, sorted_diffs = ExperimentUtil._calculate_differences(series1, series2)\n sorted_diffs.sort()\n position_diffs = ExperimentUtil._calculate_position_differences(differences, sorted_diffs)\n\n for index, score in enumerate(differences):\n if score < 0:\n position_diffs[index] = position_diffs[index] * -1\n\n sum_positive, sum_negative = ExperimentUtil._calculate_positive_negative_sum(position_diffs)\n T = min(sum_positive, sum_negative)\n # TODO: Se o tamanho de n for maior que 30, seria preciso usar a tabela T-Student\n if len(position_diffs) <= 30:\n # TODO: Com o valor de T, precisamos ver qual o valor critico e elaborar melhor a resposta no relatorio\n return T < ExperimentUtil.wilcox_table[len(position_diffs)]", "def get_y_times_diffs(self,sw):\n # Process the phrase scores first.\n # - for each phrase, interpolate across the ttables using the current weights\n # - sum the log probs across phrase pairs to get a score for each hypothesis\n # - take the weighted sum of these scores, to give a phrase feature total\n # for each hyp\n\n # Memoise\n if self.cached_sw == None or \\\n np.sum(np.abs(self.cached_sw['other'] - sw['other'])) != 0 or \\\n np.sum(np.abs(self.cached_sw['phrase'] - sw['phrase'])) != 0 or \\\n np.sum(np.abs(self.cached_sw['interp'] - sw['interp'])) != 0:\n\n # do the interpolation\n iw = sw['interp']\n interpolated = self.get_interpolated_phrase_probs(iw)\n # Use traditional python as not sure how to vectorise. This goes through\n # each hypothesis, logs the probability, applies the feature weights, then sums\n self.cached_y_times_diffs = np.zeros(len(interpolated))\n # Take the difference between the hypotheses\n for i,sample in enumerate(interpolated):\n self.cached_y_times_diffs[i] = \\\n np.sum(sw['phrase']* np.log(sample[0])) - \\\n np.sum(sw['phrase']* np.log(sample[1]))\n #print self.fvs, sw['other']\n #print sw['other'], self.fvs\n self.cached_y_times_diffs += np.sum(sw['other'] * self.fvs, axis=1) # add other scores\n self.cached_y_times_diffs *= self.y\n self.cached_sw = sw\n return self.cached_y_times_diffs", "def t_two_sample(a, b, tails=None, exp_diff=0, none_on_zero_variance=True):\r\n if tails is not None and tails != 'high' and tails != 'low':\r\n raise ValueError(\"Invalid tail type '%s'. Must be either None, \"\r\n \"'high', or 'low'.\" % tails)\r\n\r\n try:\r\n # see if we need to back off to the single-observation for single-item\r\n # groups\r\n n1 = len(a)\r\n if n1 < 2:\r\n return t_one_observation(sum(a), b, tails, exp_diff,\r\n none_on_zero_variance=none_on_zero_variance)\r\n\r\n n2 = len(b)\r\n if n2 < 2:\r\n t, prob = t_one_observation(sum(b), a, reverse_tails(tails),\r\n exp_diff, none_on_zero_variance=none_on_zero_variance)\r\n\r\n # Negate the t-statistic because we swapped the order of the inputs\r\n # in the t_one_observation call, as well as tails.\r\n if t != 0:\r\n t = -t\r\n\r\n return (t, prob)\r\n\r\n # otherwise, calculate things properly\r\n x1 = mean(a)\r\n x2 = mean(b)\r\n var1 = var(a)\r\n var2 = var(b)\r\n\r\n if var1 == 0 and var2 == 0:\r\n # Both lists do not vary.\r\n if x1 == x2 or none_on_zero_variance:\r\n result = (nan, nan)\r\n else:\r\n result = _t_test_no_variance(x1, x2, tails)\r\n else:\r\n # At least one list varies.\r\n df = n1 + n2 - 2\r\n svar = ((n1 - 1) * var1 + (n2 - 1) * var2) / df\r\n t = (x1 - x2 - exp_diff) / sqrt(svar * (1 / n1 + 1 / n2))\r\n\r\n if isnan(t) or isinf(t):\r\n result = (nan, nan)\r\n else:\r\n prob = t_tailed_prob(t, df, tails)\r\n result = (t, prob)\r\n except (ZeroDivisionError, ValueError, AttributeError, TypeError,\r\n FloatingPointError) as e:\r\n # invalidate if the sample sizes are wrong, the values aren't numeric or\r\n # aren't present, etc.\r\n result = (nan, nan)\r\n\r\n return result", "def binary_stats(y_true, y_pred, normalize=True, sample_weight=None):\n hamming_list = []\n precision_list = []\n recall_list = []\n f1_list = []\n for i in range(y_true.shape[0]):\n set_true = set( np.where(y_true[i])[0] )\n set_pred = set( np.where(y_pred[i])[0] )\n intersection = len(set_true.intersection(set_pred))\n if len(set_true) == 0 and len(set_pred) == 0:\n hamming = 1\n precision = 1\n recall = 1\n f1 = 1\n elif len(set_pred) == 0 or len(set_pred) == 0:\n hamming = intersection/float( len(set_true.union(set_pred)) )\n precision = 0.0\n recall = 0.0\n f1 = 0.0\n else:\n hamming = intersection/float( len(set_true.union(set_pred)) )\n precision = intersection/float(len(set_pred))\n recall = intersection/float(len(set_true))\n if precision + recall == 0.0:\n f1 = 0.0\n else:\n f1 = 2.0*(precision*recall) / (precision + recall)\n\n hamming_list.append(hamming)\n precision_list.append(precision)\n recall_list.append(recall)\n f1_list.append(f1)\n\n return np.mean(hamming_list), np.mean(precision_list), np.mean(recall_list), np.mean(f1_list)", "def test_u_statistic(self):\n for seed in range(5):\n\n random_state = np.random.RandomState(seed)\n\n for i in range(4, self.test_max_size + 1):\n arr1 = random_state.rand(i, 1)\n arr2 = random_state.rand(i, 1)\n\n u_stat = dcor_internals._u_distance_correlation_sqr_naive(\n arr1, arr2)\n u_stat_fast = dcor_internals._u_distance_correlation_sqr_fast(\n arr1, arr2)\n\n self.assertAlmostEqual(u_stat, u_stat_fast)", "def t_confidence_Interval_Difference_Of_Means(xSamples, ySamples, confidence):\n try:\n if len(xSamples) >= 30 or len(ySamples) >= 30:\n raise sampleSizeError(\"Should use normal distribution instead. m or n > 30.\")\n \n if confidence > 1:\n confidence = confidence / 100.0\n print(f\"Converting confidence interval to {confidence}\")\n\n elif type(confidence) != int or type(confidence) != float:\n raise ValueError(\"Confidence Interval must be a numeric value\")\n \n # Find mean and variance for both sample distributions\n n = len(xSamples) \n xBar = sample_mean(xSamples)\n xSampStd = sample_variance(xSamples) ** .5\n \n m = len(ySamples)\n yBar = sample_mean(ySamples)\n ySampStd = sample_variance(ySamples) ** .5\n \n # Find t at alpha/2 and the new distribution's sample size - 2\n # Calculate the sample pooling standard deviation\n tAlpha = (1 + confidence) / 2.0\n t = scipy.stats.t.ppf(tAlpha, (m + n - 2)) \n spsd = ((((n - 1)* (xSampStd**2)) + ((m - 1) * (ySampStd**2)))/(m + n - 2)) ** .5 \n \n # Find the lower and upper bound \n # (X-Y) (+/-) t((spsd * (((1/m)+(1/n)) **.5))\n lowerBound = (xBar - yBar) - t * (spsd * (((1/m)+(1/n)) **.5))\n upperBound = (xBar - yBar) + t * (spsd * (((1/m)+(1/n)) **.5))\n \n return lowerBound, upperBound\n \n except sampleSizeError as inst:\n print(inst.args[0])\n \n except ValueError as inst:\n print(inst.args[0])", "def t_paired(a, b, tails=None, exp_diff=0):\r\n n = len(a)\r\n if n != len(b):\r\n raise ValueError('Unequal length lists in ttest_paired.')\r\n try:\r\n diffs = array(a) - array(b)\r\n return t_one_sample(diffs, popmean=exp_diff, tails=tails)\r\n except (ZeroDivisionError, ValueError, AttributeError, TypeError,\r\n FloatingPointError):\r\n return (nan, nan)", "def test_less_equal(self):\n utils.compare_tracing_methods(\n SimpleCompareOpsModule(\"lessEqual\"),\n torch.randn(3, 4, 5),\n torch.randn(3, 4, 5),\n fusible_ops={\"aten::le\"},\n )", "def test_ndiff(self):\n print \"\\n\"\n for d in ndiff(a, b): print d", "def t_test(result, reference):\n \n # Check that result and reference are 1D and that they have the same length\n \n print('\\nChecking that result and reference are 1D and that they have the same length\\n')\n \n if (len(result.shape) == 1) and (len(reference.shape) == 1):\n \n if len(result) == len(reference):\n \n print('Performing t test\\n')\n \n t_stat, p_value = scipy.stats.ttest_ind(result, reference)\n \n print('t test completed successfully!\\n')\n \n print('t statistic: {} // p value: {}'.format(t_stat, p_value))\n \n return t_stat, p_value\n \n else:\n \n print('Result and reference vectors do not have the same length. Please input them so that they have the same length')\n \n else:\n \n print('Result or reference vectors are not 1D. Please reformat them to be 1D')", "def compare_sums_ks(array1, array2):\n return stats.ks_2samp(array1, array2)", "def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")" ]
[ "0.6815539", "0.6802423", "0.67800075", "0.67102456", "0.66998667", "0.6637655", "0.66326714", "0.6597813", "0.6578872", "0.6518927", "0.64484495", "0.64479405", "0.63393015", "0.6331243", "0.61787146", "0.6156587", "0.6153929", "0.61489004", "0.61462283", "0.61424893", "0.61002415", "0.6063822", "0.6023111", "0.6001927", "0.59954286", "0.59898496", "0.5981604", "0.5981604", "0.5981604", "0.59523964", "0.595151", "0.5934626", "0.5934626", "0.5934626", "0.5916602", "0.58794767", "0.58671194", "0.5863085", "0.58548874", "0.58515215", "0.58493", "0.5811041", "0.5807298", "0.5801103", "0.57911664", "0.57844317", "0.5769348", "0.5760049", "0.5751703", "0.574921", "0.5737285", "0.5730001", "0.5720306", "0.56988287", "0.5684903", "0.56742525", "0.56504226", "0.564686", "0.5645993", "0.5641611", "0.56357026", "0.5634199", "0.5629425", "0.56293774", "0.5607836", "0.5604346", "0.55979836", "0.5586519", "0.55851114", "0.55773735", "0.5575254", "0.55746394", "0.5556812", "0.5544942", "0.5536324", "0.55326223", "0.5528606", "0.55257696", "0.55217206", "0.55149114", "0.5513717", "0.5509388", "0.5464129", "0.54553646", "0.545195", "0.54487824", "0.54291725", "0.54267013", "0.5426312", "0.5424215", "0.54223555", "0.54209775", "0.5420226", "0.5416702", "0.54068196", "0.5394911", "0.5381112", "0.53723663", "0.5368849", "0.5360743" ]
0.6690653
5
Get status summary of given transport node.
def get_aggregation_status(cls, client_obj, get_aggregation_status=None): attr_map = {'node_uuid': 'uuid', 'bfd_admin_down_count': 'admin_down_count', 'bfd_init_count': 'init_count', 'bfd_up_count': 'up_count', 'bfd_down_count': 'down_count'} node_id = client_obj.id_ # TODO(gangarm): Check if we can use a better name in product sdk for # param_1_id, which is essentially node id. client_class_obj = gettransportnodestatus.GetTransportNodeStatus( connection_object=client_obj.connection, param_1_id=node_id) status_schema_object = client_class_obj.read() status_schema_dict = status_schema_object.get_py_dict_from_object() mapped_dict = utilities.map_attributes(attr_map, status_schema_dict) result_dict = dict() result_dict['response'] = mapped_dict result_dict['response_data'] = dict() result_dict['response_data']['status_code'] = ( client_class_obj.last_calls_status_code) return result_dict
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getNodeStatus(self,node):\n data = self.connect('get','nodes/%s/status' % (node),None)\n return data", "def getNodeTaskStatusByUPID(self,node,upid):\n data = self.connect('get','nodes/%s/tasks/%s/status' % (node,upid),None)\n return data", "def status(self):\n url = API_PATH[\"node_status\"].format(tuneUuid=self._parentTune.uuid())\n rsp_json = self._parse(self._get(url))\n\n for status_obj in rsp_json:\n if status_obj[\"nodeUuid\"] == self.uuid():\n return self._new_instance(NodeStatus, status_obj, node=self)\n return None", "def bdev_nvme_get_transport_statistics(client):\n return client.call('bdev_nvme_get_transport_statistics')", "def test_get_node_status(self):\n pass", "def status(self, tx_digest):\n\n # for the moment get requests must use the hex encoded hash name\n tx_digest_hex = binascii.hexlify(base64.b64decode(tx_digest)).decode()\n\n url = 'http://{}:{}/api/status/tx/{}'.format(self.host, self.port, tx_digest_hex)\n\n response = self._session.get(url).json()\n return response.get('status')", "def peer_status(mnode):\n cmd = \"gluster peer status\"\n return g.run(mnode, cmd)", "def detailed_status(self) -> str:\n return pulumi.get(self, \"detailed_status\")", "def status(self):\n return self._call_txtrader_api('status', {})", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def detailed_status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status\")", "def status(ctx):\n return show_network_status()", "def status(self):\n return {\n 'id': 'status',\n 'protocol_version': 'PV62',\n 'network': self.origin_node.network.name,\n 'td': self.origin_node.chain.head.header.difficulty,\n 'best_hash': self.origin_node.chain.head.header.hash,\n 'genesis_hash': self.origin_node.chain.genesis.header.hash,\n 'size': kB_to_MB(self._message_size['status'])\n }", "def test_get_node_status_batterystatus(self):\n pass", "def get_status(self):\n r = requests.get(self.base_url + '/status')\n return r.json()", "def getClusterStatus(self):\n data = self.connect('get','cluster/status', None)\n return data", "def get_statistics(self, task):\n url = 'http://' + task['host'] + ':5051/monitor/statistics.json'\n for task_stats in requests.get(url).json():\n if task_stats['executor_id'] == task['id']:\n return task_stats['statistics']", "def _get_nodes_stats(self, time):\n S = np.sum(self.status==0) * 100. / self.n_nodes\n I = np.sum(self.status==1) * 100. / self.n_nodes\n R = np.sum(self.status==2) * 100. / self.n_nodes\n return S, I, R", "def status_str(self, spaced=False):\n if self.args.vverbose:\n ## Print profile of all nodes\n status = self.pool.status(string=True)\n\n elif self.args.verbose:\n ## Print profile of usable nodes\n status = self.pool.status(min_state=PLNodeState.usable, string=True)\n\n else:\n ## Print list of usable nodes\n attribute = \"name\" if self.args.names else \"addr\"\n nodes = self.pool._get(attribute, min_state=PLNodeState.usable)\n if len(nodes) > 0:\n status = \"\\n\".join(nodes)+\"\\n\"\n else:\n status = \"No usable node found.\\n\"\n\n return status", "def rtt_get_status(self):\n status = structs.JLinkRTTerminalStatus()\n res = self.rtt_control(enums.JLinkRTTCommand.GETSTAT, status)\n return status", "def status(self):\n r = requests.get('/'.join([self.base_url, self.ENDPOINT_STATUS]))\n return r.json()", "def status(self):\n return self._get(path='status')", "async def get_task_status(task_id: TaskId):", "def get_node_statistics(self):\n return self._network.manager.getNodeStatistics(\n self._network.home_id, self.node_id\n )", "def node_status(self) -> Optional['outputs.CSIPowerStoreStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")", "def status(self) -> NodeStatus:\n return self._status", "def node_status(self) -> Optional['outputs.CSIVXFlexOSStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")", "def getNodeStatus(self,status = 0):\n if status:\n self.node_status = status\n return self.node_status", "def get_statement_summary_status(self):\n statement_summary_status_element = self.wait().until(EC.visibility_of_element_located(self.statement_summary_status_locator), 'statement summary status locator not found before specified time out')\n return statement_summary_status_element.text", "def status(self, station=1):\n return self.statuslist()[station][2]", "def node_status(self) -> Optional['outputs.CSIUnityStatusNodeStatus']:\n return pulumi.get(self, \"node_status\")", "def getServiceStatus(self):\n return self.jsonRequest(\"/api/v1/getServiceStatus\", {\"apiKey\": self._apiKey})", "def summary(self):\n res = \", \".join(\n elem[\"summary\"] for elem in self.status[\"health\"][\"summary\"]\n )\n if res:\n return res\n elif self.detail:\n return self.detail[0]\n return \"\"", "def get_node_details(self, node):\n node_details = self.parser.find_server_by_ip(node.get('ip')) or \\\n self.parser.find_server_by_hostname(node.get('host'))\n\n return node_details", "async def get_status():", "def status(self):\n return self._data['status']", "def task_status(self) -> str:\n return self._task_status", "def compute_single_node_status(\n node_name_message_map: Dict[str, Node], node_name: str\n) -> \"StatusValue\":\n\n node = node_name_message_map[node_name]\n\n if (\n node.status != Status.STATUS_UNSPECIFIED\n ): # if the current node's status was already computed\n return node.status\n\n status_count_map: Dict[\"StatusValue\", int] = defaultdict(int)\n for child_name in node.child_names:\n status_count_map[\n compute_single_node_status(node_name_message_map, child_name)\n ] += 1\n\n try:\n for dependency in node.dependencies:\n status_count_map[\n compute_single_node_status(\n node_name_message_map, dependency.target_name\n )\n ] += 1\n except AttributeError:\n pass\n\n try:\n for sli in node.slis:\n status_count_map[compute_sli_status(sli)] += 1\n except AttributeError:\n pass\n\n node.status = compute_status_from_count_map(status_count_map)\n\n if (\n node.override_status != Status.STATUS_UNSPECIFIED\n ): # if the current node's status was manually overwritten\n # notice we place this at the end, since we still want to compute the node's status\n # to display in the dropdown menu (regardless of the override)\n return node.override_status\n\n return node.status", "def getVirtualStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/qemu/%s/status/current' % (node,vmid),None)\n return data", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def status(self) -> str:\n return pulumi.get(self, \"status\")", "def node_statuses(self) -> pulumi.Output[Sequence['outputs.NodeBalancerConfigNodeStatus']]:\n return pulumi.get(self, \"node_statuses\")", "def GetStatus(self):\r\n return self.status", "def node_host_status(self, node):\n if node.is_online() or node.is_unreachable():\n return self.HOST_MONITORED\n else:\n return self.HOST_UNMONITORED", "def getContainerStatus(self,node,vmid):\n data = self.connect('get','nodes/%s/lxc/%s/status/current' % (node,vmid),None)\n return data", "def status_summary(self):\n base_query_set = super(PeeringSessionManager, self).get_queryset()\n summary = base_query_set.annotate(\n label=models.Case(\n models.When(provisioning_state=2, then=models.Case(\n models.When(admin_state=2, then=models.Case(\n models.When(operational_state=6,\n then=models.Value('Up')),\n default=models.Value('Down')\n )),\n default=models.Value('Admin Down')\n )),\n models.When(provisioning_state=1,\n then=models.Value('Provisioning')),\n default=models.Value('None'),\n output_field=models.CharField()\n )).values('label').annotate(value=models.Count('label'))\n return summary", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_status(self):\n return self.status", "def get_host_stats(self, refresh=False):\n return self.host_status", "def getStatus(self, request, context):\n \n statusDrone = str(self.vehicle.system_status).rpartition(':')[2]\n\t \n return droneconnect_pb2.Status(status = statusDrone)", "def get_peer_status(mnode):\n ret, out, _ = g.run(mnode, \"gluster peer status --xml\", log_level='DEBUG')\n if ret != 0:\n g.log.error(\"Failed to execute peer status command on node '%s'. \"\n \"Hence failed to parse the peer status.\", mnode)\n return None\n\n try:\n root = etree.XML(out)\n except etree.ParseError:\n g.log.error(\"Failed to parse the gluster peer status xml output.\")\n return None\n\n peer_status_list = []\n for peer in root.findall(\"peerStatus/peer\"):\n peer_dict = {}\n for element in peer.getchildren():\n if element.tag == \"hostnames\":\n hostnames_list = []\n for hostname in element.getchildren():\n hostnames_list.append(hostname.text)\n element.text = hostnames_list\n peer_dict[element.tag] = element.text\n peer_status_list.append(peer_dict)\n return peer_status_list", "def status(self):\n return self._query_status()['status']", "def fetch_status():\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((GEARMAND_HOST, GEARMAND_PORT))\n log_verbose('Connected to Gearmand at %s:%s' % (GEARMAND_HOST, GEARMAND_PORT))\n except socket.error, e:\n collectd.error('gearmand_info plugin: Error connecting to %s:%d - %r'\n % (GEARMAND_HOST, GEARMAND_PORT, e))\n return None\n fp = s.makefile('r')\n log_verbose('Sending info command')\n s.sendall('status\\r\\n')\n\n status = {}\n while True:\n data = fp.readline().strip()\n log_verbose('Received data: %r' % data)\n if not data or data == '.':\n break\n function, total, running, available_workers = data.split('\\t')\n status[function] = {\n 'total': total,\n 'running': running,\n 'available_workers': available_workers}\n\n s.close()\n return status", "def get_status(self):\n return self._status", "async def get_status(self) -> str:\n return await self.hw_device.status()", "def get_status(self):\n # find status\n # search in summary file first\n self.status = \"running\"\n status = self.search_summary(\"status\")\n if status:\n self.status = status.split()[1]\n # define running time\n # search in summary file first\n self.running_time = \"00:00:00\"\n running_time = self.search_summary(\"running-time\")\n if running_time:\n self.running_time = running_time.split()[1]\n # calculate running time\n else:\n now = datetime.datetime.now()\n elapsed_time = (now - self.ctime).seconds\n hours, remainder = divmod(elapsed_time, 3600)\n minutes, seconds = divmod(remainder, 60)\n self.running_time = (\n f\"{int(hours):02}:{int(minutes):02}:{int(seconds):02}\"\n )", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def detailed_status_message(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"detailed_status_message\")", "def status(self):\n return status_dict[self._get_property_(self.STATUS).upper()]", "def get_node_status(self, node, time):\n try:\n if self.inf_time[node] > time:\n return 0\n elif self.rec_time[node] > time:\n return 1\n else:\n return 2\n except IndexError:\n raise ValueError('Invalid node `{}`'.format(node))", "def get_service_status(self):\n return self.service.status()", "def Status(self):\r\n\t\treturn self._get_attribute('status')", "def get_status(self):\n return self.msg", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")", "def status(self) -> pulumi.Output[str]:\n return pulumi.get(self, \"status\")" ]
[ "0.7329152", "0.6466141", "0.6326778", "0.63179845", "0.6115346", "0.608754", "0.60856014", "0.6035681", "0.6000012", "0.59930164", "0.59930164", "0.5987499", "0.5987269", "0.5976228", "0.596927", "0.5969141", "0.5904546", "0.5899778", "0.5893478", "0.5890255", "0.58680993", "0.5855912", "0.5839369", "0.5828368", "0.58041847", "0.57981515", "0.57926434", "0.57631874", "0.57325095", "0.56889796", "0.56626624", "0.56311756", "0.56122154", "0.5610133", "0.5604132", "0.5600992", "0.5591853", "0.55876607", "0.5574926", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.55722845", "0.5567334", "0.5564049", "0.55555373", "0.55525416", "0.55491203", "0.5541352", "0.5541352", "0.5541352", "0.55301934", "0.5506681", "0.54977787", "0.54967535", "0.5487936", "0.5486832", "0.5476404", "0.5472001", "0.545304", "0.545304", "0.5452733", "0.5445787", "0.54414505", "0.54403174", "0.5440088", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177", "0.5432177" ]
0.0
-1
return autsizeable field names in idfobject
def autosize_fieldname(idfobject): # undocumented stuff in this code return [ fname for (fname, dct) in zip(idfobject.objls, idfobject["objidd"]) if "autosizable" in dct ]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def field_names(self):\n ...", "def objectFields(self):\n raise NotImplementedError", "def fields(self):", "def _fields_names(cls) -> List:\n return list(field.name for field in dataclasses.fields(cls))", "def fields(self):\n ...", "def get_field_names(self):\n return {rv[0] for rv in self.iter_fields()}", "def fields(cls):\n return cls._nameToValue", "def get_field_attr(name):\n # de variant met een repeating group (entiteit, dataitem) levert hier nog een probleem op.\n # is dat omdat er twee entiteiten in 1 scherm staan?\n fields = []\n opts = my.rectypes[name]._meta\n for x in opts.get_fields(): # fields:\n fldname = x.name\n fldtype = x.get_internal_type()\n if fldname == 'id' or fldtype in ('ForeignKey', 'ManyToManyField'):\n # if fldname == 'id' or any((x.many2one, x.many2many, x.one2many))\n continue\n try:\n length = x.max_length\n except AttributeError:\n length = -1\n fields.append((fldname, fldtype[:-5], length))\n return fields", "def fields(self):\r\n pass", "def field_names(self):\n return self.base_field_names() + list(self.data.keys())", "def get_field_names() -> Sequence[str]:\n raise NotImplementedError", "def db_fields(self):", "def list_fields(fc):\n return [f.name for f in arcpy.ListFields(fc)]", "def namespaced_fields(self):\n ...", "def fields(self):\r\n return self._by_name.iteritems()", "def get_default_field_names(self, declared_fields, model_info):\n return (\n list(declared_fields.keys()) +\n list(model_info.fields.keys())\n )", "def get_field_names(self, declared_fields, info):\n return self._requested_fields", "def get_fields(cls):\n return map(lambda x: getattr(cls, x), cls.get_field_names())", "def get_fields(self):\n field_list = []\n for field in self._meta.local_fields:\n if not field.primary_key:\n field_list.append([field.verbose_name.title(),\n self.__getattribute__(field.name),\n field.get_internal_type()])\n return field_list", "def f(self):\r\n return self.fields()", "def model_fields(self):\n converter = connections[self.db].introspection.identifier_converter\n model_fields = {}\n for field in self.model._meta.fields:\n name, column = field.get_attname_column()\n model_fields[converter(column)] = field\n return model_fields", "def fields(self):\n return [f[1] for f in sorted(self.dd.fields.items())]", "def field_names(self):\n\n entry_time_name = forms_builder.forms.models.FormEntry._meta.get_field('entry_time').verbose_name.title()\n document_title_name = Document._meta.get_field('name').verbose_name.title()\n document_url_name = Document._meta.get_field('url').verbose_name.title()\n\n form = self.form.all()[0]\n return ['user'] \\\n + [document_title_name, document_url_name] \\\n + [f.label\n for f in form.fields.all()] \\\n + [entry_time_name]", "def __fields(self):\n return [self.__class__.__dict__[f] for f in self.__class__._fields]", "def get_field_names(self):\n return self._keys", "def field_names(self):\n if not self._field_names:\n self._field_names.update(self.properties.keys())\n\n self._field_names = [attr for attr in self._field_names if not attr.startswith(\"_\")]\n\n return self._field_names", "def get_all_fields(self):\n fields = []\n for f in self._meta.fields:\n\n fname = f.name \n # resolve picklists/choices, with get_xyz_display() function\n get_choice = 'get_'+fname+'_display'\n if hasattr( self, get_choice):\n value = getattr( self, get_choice)()\n else:\n try :\n value = getattr(self, fname)\n except User.DoesNotExist:\n value = None\n\n # only display fields with values and skip some fields entirely\n if f.editable and value and f.name not in ('id', 'status', 'workshop', 'user', 'complete') :\n\n fields.append(\n {\n 'label':f.verbose_name, \n 'name':f.name, \n 'value':value,\n }\n )\n return fields", "def modelfields(entity) -> Dict[str, Field]:\n return entity.__modelfields__", "def get_field_names(cls):\n return cls._meta.get_field_names()", "def getFields(self):\n return sorted(self.schema.fields, key=lambda f: f.name)", "def field_names(self):\r\n return self._names", "def Fields(self):\n return self._fields", "def fieldNames(self):\n return self.__fieldNames", "def _field_names(self):\n return [self._sanitize_field_name(field_name)\n for field_name in self._all_fields]", "def getName(obj):", "def raw_fields(self):\n pass", "def get_fields():\n if not request.is_xhr:\n abort(403)\n fields = Field.query.all()\n result = {field.id:field.name for field in fields}\n return jsonify(result)", "def get_fields(ds):\n\n # Get layer\n layer = ds.GetLayer(0)\n # feature.GetFieldCount()\n layer_defn = layer.GetLayerDefn()\n field_names = [layer_defn.GetFieldDefn(i).GetName() for i in range(layer_defn.GetFieldCount())]\n\n return field_names", "def get_fieldnames(self):\n fieldnames = self._fields.keys()\n fieldnames.remove('time')\n fieldnames.remove('lon')\n fieldnames.remove('lat')\n return fieldnames", "def fields(self):\n return {k:getattr(self, k, None) for k in self.schema.fields}", "def inspect_model_fields(self, model: ModelRepresentation) -> None:\n c = model.count()\n title(f\"{model.name} ({c})\")\n print(model.fields_info())", "def get_colnames(self, model):\n return [\n field.column \n for field in model._meta.get_fields() \n if getattr(field, 'di_show', False)\n ]", "def _fields_as_string(self):\n return ', '.join([\n field.name for field in self.fields if field.required\n ] + [\n '[%s]' % field.name for field in self.fields if not field.required\n ])", "def all_fields(cls):\n return cls.__by_name.values()", "def get_readonly_fields(self, request, obj=None):\n return [field.name for field in self.model._meta.fields]", "def field_names(cls) -> tuple:\n return tuple((field.name for field in fields(cls)))", "def umm_fields(item):\n return scom.umm_fields(item)", "def _get_fields(self, table):\n fields = list()\n for column in table.columns:\n fields.append({'id': column.name, 'type': str(column.type)})\n return fields", "def _get_fields(self):\n if not self._cursor.description:\n return {}\n\n results = {}\n column = 0\n\n for des in self._cursor.description:\n fieldname = des[0]\n results[column] = fieldname\n column = column + 1\n\n return results", "def get_fields(self):\n fields = super(GeoModelSerializer, self).get_fields()\n # Set the geometry field name when it's undeclared.\n if not self.Meta.geom_field:\n for name, field in fields.items():\n if isinstance(field, GeometryField):\n self.Meta.geom_field = name\n break\n return fields", "def get_fields(self):\r\n return self.fields", "def show_fields(self,\r\n ef_temp=None):\r\n\r\n returnstr = EMPTYCHAR\r\n temp_dict = {}\r\n returnset = set()\r\n for k_temp in self.default_dict['field']:\r\n k_temp = str(k_temp)\r\n\r\n if self.default_dict['field'][k_temp] in temp_dict:\r\n temp_dict[self.default_dict['field'][k_temp]].add(k_temp)\r\n else:\r\n temp_dict[self.default_dict['field'][k_temp]] = {k_temp}\r\n\r\n for k_temp in temp_dict:\r\n returnstr += (k_temp+' : '\r\n +str(rangelist.range_find([Index(a_temp)\r\n for a_temp\r\n in temp_dict[k_temp]],reduce=True)).replace(SLASH,LONGDASH)+EOL)\r\n if ef_temp is None:\r\n\r\n return returnstr\r\n\r\n for f_temp in ef_temp:\r\n returnset = returnset.union(temp_dict[f_temp])\r\n return returnset", "def base_field_names(self):\n return self._base_field_names()", "def _get_field_name(self, instance):\n fields = getattr(instance, \"_fields\")\n return fields[self.id]", "def getVirtualFields(self):\n result = []\n objects_cls = self.getClass()\n if objects_cls:\n from cdb.platform.mom import entities, fields\n cls = entities.Class.ByKeys(objects_cls._getClassname())\n result = [f.field_name for f in cls.DDAllFields if isinstance(f, fields.DDVirtualField)]\n return result", "def namehack(field):\n if field.endswith((\"attribute\", \"views\")):\n return field + \"__name\"\n else:\n return field", "def getFields(iface):\n return getFieldsInOrder(iface)", "def get_field_names_for_model(self, model):\n return [field.name for field in model._meta.fields if field.name != \"id\" and not\n (field.get_internal_type() == \"DateTimeField\" and\n (field.auto_now is True or field.auto_now_add is True))]", "def get_fields(self):\n \n return self.metadata.keys()", "def _autoinc_fields(t, module):\n result = set()\n for (fname, f) in datamodel.sorted_fields(t):\n jfname = java.name(fname)\n method = java.CamelCase(jfname)\n (java_type, type_info, ftype) = datamodel.typeref(f, module)\n if ftype and 'autoinc' in syslx.patterns(ftype.attrs):\n result.add(fname)\n return result", "def get_fields_in_model(instance):\n assert isinstance(instance, Document)\n return instance._fields", "def fields(proto):\n return [x[0].name for x in proto.ListFields()]", "def get_api_fields(cls):\n return ['fqdn', 'ttl', 'description', 'views']", "def get_all_fields(context):\n\n schema = zope.component.getUtility(\n IDexterityFTI, name=context.portal_type).lookupSchema()\n fields = dict((fieldname, schema[fieldname]) for fieldname in schema)\n\n assignable = IBehaviorAssignable(context)\n for behavior in assignable.enumerateBehaviors():\n behavior_schema = behavior.interface\n fields.update((name, behavior_schema[name])\n for name in behavior_schema)\n\n return fields", "def get_fields(self):\n\n\t\treturn self.__fields", "def _all_data_fields(field):\n all_fields = PhotoTech.objects.all().values()\n return list(set([all_fields[x][field]\n for x in range(len(all_fields))]))", "def fields(self) -> List[Field]: # pragma: no cover\n pass", "def all_fields(item):\n return scom.all_fields(item)", "def define_fields(cls, dbmanager):\n\n # ATTN: UNFINISHED\n fieldlist = [\n # standard primary id number field\n mdbfield.DbfPrimaryId('id', {\n 'label': \"The primary key and id# for this group\"\n }),\n # globally unique resource reference\n mdbmixins.dbfmixin_gobselfreference(),\n ]\n\n return fieldlist", "def get_field_display_info(self, field_dict, field_name):\n raise NotImplementedError", "def properties(cls) -> str:\n with cls.prime_subfield.repr(\"int\"):\n irreducible_poly_str = str(cls._irreducible_poly)\n primitive_element_str = poly_to_str(integer_to_poly(int(cls.primitive_element), cls.characteristic))\n\n string = \"Galois Field:\"\n string += f\"\\n name: {cls.name}\"\n string += f\"\\n characteristic: {cls.characteristic}\"\n string += f\"\\n degree: {cls.degree}\"\n string += f\"\\n order: {cls.order}\"\n string += f\"\\n irreducible_poly: {irreducible_poly_str}\"\n string += f\"\\n is_primitive_poly: {cls.is_primitive_poly}\"\n string += f\"\\n primitive_element: {primitive_element_str}\"\n\n return string", "def undo_format_field_names(obj):\n if json_api_settings.FORMAT_FIELD_NAMES:\n return format_field_names(obj, \"underscore\")\n\n return obj", "def get_fields(self):\n return list(self.metadata.keys())", "def get_fields(self):\n \n fields = []\n for img in self.img_lst:\n fields += img.get_fields()\n \n fields = list(set(fields))\n \n return fields", "def names(self):\r\n return self.get_field(self.name_field)", "def _init_fields(self):\n if self._fields is None:\n M.mset('U', \"^\") # DBS Calls Require this\n f = self._fields = {}\n attrs = self.fieldnames = {}\n fieldid = \"0\"\n while 1:\n # Subscript 0 is field description, .1 is the title, 3 is help\n fieldid, info, title, fieldhelp = M.ddwalk(self._fileid, fieldid)\n #fieldid, info, title, fieldhelp = M.mexec(\n # \"\"\"set s0=$order(^DD(s2,s0)) Q:s0'=+s0 s s1=$G(^DD(s2,s0,0)),s3=$G(^DD(s2,s0,.1)),s4=$G(^DD(s2,s0,3))\"\"\",\n # M.INOUT(str(fieldid)), M.INOUT(\"\"), str(self._fileid), M.INOUT(\"\"), M.INOUT(\"\"))\n if fieldid == \"\" or fieldid[0] not in \"0123456789.\":\n break\n\n info = info.split(\"^\", 4) \n label = self._clean_label(info[0])\n try:\n ftype = info[1]\n except:\n ftype = None\n if ftype:\n finst = None\n for klass in FIELD_TYPES:\n if klass.isa(ftype):\n finst = f[fieldid] = klass(fieldid, label, info)\n finst.fileid = self.fileid\n finst.ownerdd = self\n attrs[label] = fieldid\n break\n if finst is None:\n print finst, \"FIELD [%s], spec [%s] was not identified\" % (label, ftype)\n continue\n finst.title = title\n finst.fieldhelp = fieldhelp\n else:\n assert finst, \"FIELD [%s] %s has no fieldspec\" % (label, info)\n\n return self._fields", "def id_field_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"id_field_names\")", "def id_field_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"id_field_names\")", "def id_field_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"id_field_names\")", "def id_field_names(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"id_field_names\")", "def as_field(identifier: str) -> str:\n return identifier.lower()", "def get_readonly_fields(self, request, obj):\n if obj:\n return ('isd_id',)\n return ()", "def __repr__(self):\n return str({f: self.get(f) for f in self._fields})", "def fieldhelp2(self, fieldid):\n txt = []\n dd_desc = M.Globals[\"^DD\"][self.fileid][fieldid][21]\n for k,v in dd_desc.keys_with_decendants():\n txt.append(dd_desc[k][0].value)\n return '\\n'.join(txt)", "def _get_fields(self):\n return self._fields", "def test_field_names(self):\n\n for mb_model in self.mb_model_list:\n mb_fields = mb_model._meta.fields\n db_cols = connection.introspection.get_table_description(\n self.cursor, mb_model._meta.db_table)\n\n for i in range(0, len(mb_model._meta.fields)):\n self.assertEqual(\n mb_fields[i].column,\n db_cols[i].name\n )", "def id_field_name(self):\n if self.is_root():\n return self.id_field.name\n else:\n return self.id_field", "def get_fields(self):\n\t\treturn self.__fields.copy()", "def __str__(self):\n return str([self.fields[col] for col in self.columns])", "def get_fields(data):\n return data['train'][data['train'].keys()[0]].attrs.keys()", "def _get_fields(self):\n standard_fields = self._get_standards()\n fields = {\n \"id\": \"coalesce(address, id)\",\n \"blockNumber\": \"blockNumber\",\n \"address\": \"address\",\n \"owner\": \"from\",\n \"bytecode\": \"code\"\n }\n fields.update(standard_fields)\n fields_string = \", \".join([\n \"{} AS {}\".format(field, alias)\n for alias, field in fields.items()\n ])\n return fields_string", "def get_fields_list(self):\n return self.description[\"fields\"][\"values\"].keys()", "def fields(self):\n _fields = {\n i: attrgetter(i) for i in ('pf_type', 'label',)\n }\n _fields['host'] = self.get_safely_instance_partial(Host, 'host')\n return _fields", "def get_loaded_field_names_cb(self, target, model, fields):\n names = [f.name for f in fields if not getattr(f, \"not_in_db\", False)]\n for field in fields:\n if getattr(field, \"not_in_db\", False):\n names += [f.name for f in field.fields]\n\n target[model] = set(names)", "def name_field(self):\r\n return 'name'", "def field_type(self):\n return \"\"", "def get_fieldlist(cls):\n return cls.fieldlist", "def get_fields(self, table_name):\n return self.get_table_meta(table_name)['fields']", "def _field_prefix(self):\n if self.layer_name == 'geninfo':\n return ''\n return self.layer_name + '.'", "def fields(self) -> Mapping[str, str]:\n return pulumi.get(self, \"fields\")" ]
[ "0.7096624", "0.7052038", "0.68935066", "0.66643006", "0.6553258", "0.6494789", "0.6485936", "0.6427995", "0.64223516", "0.63584894", "0.6328418", "0.63140696", "0.62944096", "0.62799215", "0.6263371", "0.62606794", "0.6165774", "0.6162808", "0.61518234", "0.611477", "0.6103165", "0.60989463", "0.60954493", "0.6091914", "0.606502", "0.6063161", "0.6052018", "0.6041478", "0.60404396", "0.5988789", "0.59862304", "0.59830904", "0.59774446", "0.5968202", "0.59401256", "0.5922599", "0.5921712", "0.5907362", "0.59049535", "0.5901101", "0.5893289", "0.58866894", "0.5884012", "0.5878338", "0.58637595", "0.58546245", "0.5842822", "0.5834063", "0.58326435", "0.5807791", "0.58033496", "0.5803071", "0.57862866", "0.57626367", "0.5758396", "0.57509965", "0.57493615", "0.5734577", "0.5734193", "0.5726508", "0.5719506", "0.57108694", "0.5707341", "0.5707045", "0.57009476", "0.56898606", "0.5687933", "0.5687137", "0.5680359", "0.56731904", "0.5673184", "0.5662813", "0.5661553", "0.5660002", "0.5658827", "0.5654978", "0.56526417", "0.56526417", "0.56526417", "0.56526417", "0.5647085", "0.5642449", "0.56396484", "0.5636533", "0.5629308", "0.5627344", "0.56243855", "0.56141895", "0.5609618", "0.56042165", "0.56017375", "0.55985", "0.55961394", "0.5595096", "0.55913866", "0.5588098", "0.5582615", "0.5580135", "0.556492", "0.5563847" ]
0.765945
0
Checks whether the given ISBN10 code is valid. >>> isISBN10('9971502100') True >>> isISBN10('9971502108') False
def isISBN10(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((i + 1) * int(code[i]) for i in range(9)) % 11 # convert check digit into its string representation return 'X' if check == 10 else str(check) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 10: return False # check whether first nine characters of given code are digits if not code[:9].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)", "def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0", "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def isbn_convert(isbn10):\r\n if not is_isbn_10(isbn10): return None\r\n return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def is_valid_issn(issn):\n try:\n return bool(validate_issn(issn))\n except (ValueError, TypeError):\n return False", "def isbn_10_check_digit(nine_digits):\r\n if len(nine_digits) != 9: return None\r\n try: int(nine_digits)\r\n except: return None\r\n remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)\r\n if remainder == 0: tenth_digit = 0\r\n else: tenth_digit = 11 - remainder\r\n if tenth_digit == 10: tenth_digit = 'X'\r\n return str(tenth_digit)", "def test_book_isbn_length_must_be_ten(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn length must be 10', str(res2))", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def is_issn(val):\n try:\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 8:\n return False\n r = sum([(8 - i) * (_convert_x_to_10(x)) for i, x in enumerate(val)])\n return not (r % 11)\n except ValueError:\n return False", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj", "def test_and_normalize_isbn(self, isbn):\n ret = {\"valid\": False, \"input_value\": str(isbn)}\n stripped_isbn = isbn.strip()\n unsplit_isbn = stripped_isbn.replace(\"-\", \"\")\n split_on_input = False\n if self.ISBN_SPLIT_RE.match(stripped_isbn):\n if len(stripped_isbn) < 17:\n ret[\"error_type\"] = 1\n return ret\n elif len(stripped_isbn) > 17:\n ret[\"error_type\"] = 2\n return ret\n else:\n split_on_input = True\n if self.ISBN_RE.match(unsplit_isbn):\n split_isbn = self.split_isbn(unsplit_isbn)[\"value\"]\n if split_on_input and split_isbn != stripped_isbn:\n ret[\"error_type\"] = 3\n return ret\n ret[\"normalised\"] = split_isbn\n ret[\"valid\"] = True\n return ret\n ret[\"error_type\"] = 0\n return ret", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def valid_ric(ticker, ric):\n split_ric = ric.split('.')\n ticker_ = split_ric[0]\n exchange = split_ric[1]\n database = helper.create_db()\n exchange_list = database.retrieve_column_as_list(\"exchanges\",\n \"exchange_code\")\n return ticker == ticker_ and exchange in exchange_list", "def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def barcode_is_10xgenomics(s):\n return bool(re.match(r'^SI\\-[A-Z0-9]+\\-[A-Z0-9]+$',s))", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def split_isbn(self, isbn):\n ret_value = {\n 'success': False,\n 'value': None\n }\n split_isbn = \"\"\n remaining_isbn = isbn\n\n if not self.ISBN_RE.match(isbn):\n ret_value['value'] = '\"' + str(isbn) + '\" is no valid 13-digit ISBN!'\n return ret_value\n for ean in self.ean_elements:\n prefix = ean.find(\"Prefix\").text\n if remaining_isbn.startswith(prefix):\n split_isbn += prefix\n remaining_isbn = remaining_isbn[len(prefix):]\n rules = ean.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for EAN prefix \"{}\" is inside a ' +\n 'range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, prefix)\n return ret_value\n group = remaining_isbn[:length]\n split_isbn += \"-\" + group\n remaining_isbn = remaining_isbn[length:]\n break\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid prefix.'\n ret_value['value'] = msg.format(isbn)\n return ret_value\n for group in self.registration_groups:\n prefix = group.find(\"Prefix\").text\n if split_isbn == prefix:\n rules = group.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for registration group \"{}\" is ' +\n 'inside a range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, split_isbn)\n return ret_value\n registrant = remaining_isbn[:length]\n split_isbn += \"-\" + registrant\n remaining_isbn = remaining_isbn[length:]\n check_digit = remaining_isbn[-1:]\n publication_number = remaining_isbn[:-1]\n split_isbn += \"-\" + publication_number + \"-\" + check_digit\n ret_value['success'] = True\n ret_value['value'] = split_isbn\n return ret_value\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid registration group element.'\n ret_value['value'] = msg.format(isbn)\n return ret_value", "def is_isni(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n try:\n r = 0\n for x in val[:-1]:\n r = (r + int(x)) * 2\n ck = (12 - r % 11) % 11\n return ck == _convert_x_to_10(val[-1])\n except ValueError:\n return False", "def check_for_integer(number):\r\n \r\n try:\r\n int(number) \r\n return True\r\n except ValueError:\r\n return False", "def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)", "def calc_check_digit_issn(issn):\n\n total = 0\n lissn = list(issn.replace('-', ''))\n\n for i, v in enumerate(lissn[:-1]):\n total = total + ((8-i) * int(v))\n\n remainder = total % 11\n\n if not remainder:\n check_digit = 0\n else:\n check_digit = 11 - remainder\n\n return 'X' if check_digit == 10 else str(check_digit)", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def verify(n):\n\n # Take the sum of all digits.\n sum_of_digits = sum(luhn_digits(n))\n\n # The number is valid iff the sum of digits modulo 10 is equal to 0\n return sum_of_digits % 10 == 0", "def intable(int_str, base=10):\n try:\n int(int_str, base)\n return True\n except:\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def is_int(n):\n try:\n int(n)\n return True\n except ValueError:\n return False", "def isInteger(number) :\n\n try:\n int(number)\n return True\n except ValueError:\n return False", "def check_card_number(self, card_number):\n database_cursor.execute(f\"SELECT number FROM card WHERE number = {card_number};\")\n result = database_cursor.fetchall()\n return result[0][0] == card_number if result else False", "def validate_bookid(self,book_id):\r\n if int(book_id) in [i.book_id for i in self.issued_books]:\r\n return True\r\n else:\r\n return False", "def isInteger(number) :\n\n try:\n int(number)\n return True \n except ValueError:\n return False", "def is_valid_birth_number(birth_number: int):\n if birth_number in range(1, 1000):\n return True\n else:\n return False", "def isValid(t_id):\n\tstr_id=str(t_id).strip()\n\treturn str_id.isdigit()", "def is_valid_birth_number(birth_number: int):\n if birth_number in range(1, 1000):\n return True\n return False", "def is_valid_integer(input_string):\n\n assert input_string is not None\n try:\n input_string = int(input_string)\n return True\n except ValueError:\n return False", "def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False", "def check(card_number):\n if re.search(r'\\d{4} \\d{4} \\d{4} \\d{4}', card_number): \n if sum(int(c) for c in card_number.replace(\" \",\"\"))%10 == 0:\n return True", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def testgetISBN(self):\r\n ebook1 = ElectronicResources()\r\n #ebook1.setListDevices([device1, device2])\r\n ebook1.setISBN(9780316485616)\r\n #ebook1.setEBookTitle('The Night Fire')\r\n #ebook1.setEBookAuthor('Harry Bosch')\r\n self.assertEqual(ebook1.getISBN(),9780316485616)", "def pintest(self, barcode, pin):\n u = self.dump(barcode)\n if 'ERRNUM' in u:\n return False\n return len(barcode) == 14 or pin == barcode[0] * 4", "def validate(input):\n regex = re.compile(r'(UL)?\\d{1,' + re.escape(str(barcode_digit_length)) + '}$', flags=re.IGNORECASE)\n if regex.match(input):\n is_valid = True\n else:\n is_valid = False\n return is_valid", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def is_valid(self, doi):\n doi = self.normalise(doi, include_prefix=True)\n\n if doi is None or match(\"^doi:10\\\\..+/.+$\", doi) is None:\n return False\n else:\n if not doi in self._data or self._data[doi] is None:\n return self.__doi_exists(doi)\n return self._data[doi].get(\"valid\")", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def validate_NRIC(nric):\n\tif len(nric) != 9: # invalid length\n\t\treturn \"Invalid length (must be exactly 9 characters, was given %d.)\" % len(\n\t\t nric)\n\n\t# Constants\n\tNRIC_ID = nric[0]\n\tLAST_LETTER = nric[-1]\n\tNUMBERS = nric[1:-1]\n\n\tif not match(r'[STFG]', nric):\n\t\t# First letter is not S, T, F or G\n\t\treturn \"Invalid NRIC ID: %s\" % NRIC_ID\n\n\t# The NRIC first and last letters should be a letter, the middle should\n\t# be all numbers (7 numbers exactly)\n\tif match(r'[STFG][0-9]+[A-Z]', nric) is None:\n\t\treturn \"Invalid format: %s\" % nric\n\n\tchecksum = calculate_checksum(NRIC_ID, NUMBERS)\n\tlast_letter_value = checksum % 11\n\tif last_letter_value == get_value(LAST_LETTER, NRIC_ID):\n\t\treturn \"Okay.\"\n\telse:\n\t\treturn \"Invalid NRIC, last letter must be %s.\" % get_letter(\n\t\t last_letter_value, NRIC_ID)", "def is_luhn_valid(card_number):\n is_valid = luhn_checksum(card_number) == 0\n return is_valid", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def is_hashed_base58_valid(base58):\n try:\n a2b_hashed_base58(base58)\n except EncodingError:\n return False\n return True", "def isInt(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def phone_number_validator(phone_number):\n if len(phone_number) != 10:\n return False\n if phone_number[0] == '0':\n return False\n try:\n int(phone_number)\n except ValueError:\n return False\n return True", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def is_int_value(int_value):\n try:\n int(int_value)\n except ValueError:\n return False\n return True", "def checkBarcode(barcode):\r\n barcode = barcode.strip()\r\n if validateFormat(barcode) is False:\r\n return 'barcode not valid'\r\n else:\r\n barcode = barcode.replace('-','')\r\n if len(barcode) == 12:\r\n fullbarcode = barcode + str(findlastdigit(barcode))\r\n return fullbarcode\r\n elif len(barcode) == 13:\r\n if findlastdigit(barcode) == int(barcode[-1]):\r\n return 'Valid'\r\n else:\r\n return 'Invalid'", "def __is_int(self,string):\r\n try: \r\n int(string)\r\n return True\r\n except ValueError:\r\n return False", "def validate_SSN(SSN_test):\n\n is_valid_SSN = False\n\n # if user breaks format but enters 9 digits, SSN is counted as valid\n if len(SSN_test) == 9 and SSN_test.isdigit():\n is_valid_SSN = True\n\n\n # otherwise, if the length is not 11 characters, and there aren't at least 2 dashes, entry immediately fails\n elif len(SSN_test) != 11 or (SSN_test.count(\"-\") != 2):\n pass\n\n # if the dashes are in the wrong place, entry fails\n elif (SSN_test[3] != \"-\") and (SSN_test[6] != \"-\"):\n pass\n\n # dashes are correct, but all other characters must be numbers\n else:\n valid_SSN1 = (SSN_test[0 : 3]).isdigit()\n valid_SSN2 = (SSN_test[4 : 6]).isdigit()\n valid_SSN3 = (SSN_test[7 : ]).isdigit()\n if (valid_SSN1 and valid_SSN2 and valid_SSN3):\n is_valid_SSN = True\n else:\n is_valid_SSN = False\n\n return is_valid_SSN", "def is_valid_year_number(year_number: int) -> bool:\n if year_number in range(100):\n return True\n else:\n return False", "def _is_int(test_val):\n try:\n int(test_val)\n return True\n except ValueError:\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def is_valid_bid(klass, bid):\n return bid and re.match(\"^\\d{3,5}$\", bid.strip())", "def is_valid(self):\n return phonenumbers.is_valid_number(self)", "def esCUITValida(cuit):\n # Convertimos el valor a una cadena\n cuit = str(cuit)\n # Aca removemos guiones, espacios y puntos para poder trabajar\n cuit = cuit.replace(\"-\", \"\") # Borramos los guiones\n cuit = cuit.replace(\" \", \"\") # Borramos los espacios\n cuit = cuit.replace(\".\", \"\") # Borramos los puntos\n # Si no tiene 11 caracteres lo descartamos\n if len(cuit) != 11:\n return False, cuit\n # Solo resta analizar si todos los caracteres son numeros\n if not cuit.isdigit():\n return False, cuit\n # Despues de estas validaciones podemos afirmar\n # que contamos con 11 numeros\n # Aca comienza la magia\n base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]\n aux = 0\n for i in range(10):\n aux += int(cuit[i]) * base[i]\n aux = 11 - (aux % 11)\n if aux == 11:\n aux = 0\n elif aux == 10:\n aux = 9\n if int(cuit[10]) == aux:\n return True, cuit\n else:\n return False, cuit", "def isint(s):\n try:\n x = int(s)\n return True\n except:\n return False", "def check_number(self):\n digits = self.number\n _sum = 0\n alt = False\n ix = []\n for x in str(digits):\n ix.append(int(x))\n for d in reversed(ix):\n assert 0 <= d <= 9\n if alt:\n d *= 2\n if d > 9:\n d -= 9\n _sum += d\n alt = not alt\n return (_sum % 10) == 0", "def is_integer(x):\n try:\n int(x)\n return True\n except ValueError:\n return False", "def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)", "def getISBN(self):\n return self.bookISBN", "def validate_account_number(num, should_exist=True):\n if len(num) != 8:\n return False\n elif num[0] == '0':\n return False\n else:\n if should_exist:\n return account_number_exists(num)\n else:\n return not account_number_exists(num)", "def is_int(string:str) -> bool:\n try:\n int(string)\n return True\n except:\n return False", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def siruta_is_valid(self, siruta):\n if type(siruta) != int:\n siruta = int(siruta)\n if siruta >= 10**6:\n return False\n weights = [1, 2, 3, 5, 7]\n checksum = 0\n checkdigit = siruta % 10\n index = 0\n while (index < 5):\n siruta = int(siruta / 10)\n left = (siruta % 10) * weights[index]\n checksum += sum(map(int, str(left))) # sum of digits of left\n index += 1\n checksum %= 10\n checksum = 11 - checksum\n checksum %= 10\n return checksum == checkdigit", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def is_in_stock(self, bookID):\n query = f\"\"\"SELECT quantity from {TABLE} where bookID = '{bookID}';\"\"\"\n self.cursor.execute(query)\n\n q = self.cursor.fetchone()\n\n if q[0] > 0:\n return True\n else:\n return False", "def is_valid(self):\n\n # get company id + filial id (first 12 digits)\n cnpj = self.cnpj[:12]\n \n # and following rules we stabilish some weight to multiply\n def weightlist(s=12):\n x = (list(range(2,10))*2)[:s]\n x.reverse()\n return x\n \n # while cnpj isn't complete\n while len(cnpj) < 14:\n\n # run trought numbers (x) mutiplying by weight (y) and then get\n # sum of rest of division by 11 as interger\n # (we have more than 9 digits so isn't simple as make calcs for CPF)\n r = int(sum([x*y for (x, y) in zip(cnpj, weightlist(len(cnpj)))]) % 11)\n\n # if digit is smaller than 2, turns 0\n if r < 2:\n f = 0\n else:\n f = 11 - r\n\n # append digit to cnpj\n cnpj.append(f)\n\n # if created number is same as original number, cnpj is valid\n return bool(cnpj == self.cnpj)", "def ni_number_check(number):\n ni_nuber = re.match(r\"^\\s*[a-zA-Z]{2}(?:\\s*\\d\\s*){6}[a-zA-Z]?\\s*$\", number)\n if ni_nuber:\n return True\n return False", "def book_id_check(book_num):\n try:\n global book_id\n book_id=int(book_num)\n if str(book_id) in valid_books.valid_book_ids:\n book_id_check.bID=\"Accepted\"\n global max_book_id\n max_book_id=len(valid_books.valid_book_ids)\n else:\n book_id_check.bID=\"Book ID not recognised\"\n except ValueError:\n book_id_check.bID=\"Book ID number not recognised\"\n return book_id_check.bID", "def is_number(c):\n return '0' <= c <= '9'", "def validatePhoneNumber(self):\n ## Declaring a Flag to control a while loop\n phone_number_ok = False\n ## While loop to have user retry their input if they enter incorrectly\n while not phone_number_ok:\n ## Asking for a phone number and checkig to see if it is 10 digits\n if self.phone_number.isdigit():\n if len(self.phone_number) == 10:\n phone_number_ok = True\n return True\n else:\n print(\"Please Enter a 10 digit phone number.\")\n return False\n \n else:\n print(\"You have enetered an invalid phone number. Please try again.\")\n return False", "def is_int(string):\n try:\n int(string)\n return True\n except ValueError:\n return False", "def __has_numbers(self, input_string):\n return bool(re.search(r'\\d', input_string))" ]
[ "0.82796675", "0.8265154", "0.7875872", "0.76414096", "0.7556362", "0.734542", "0.73319346", "0.72045577", "0.7138764", "0.7138109", "0.704493", "0.64504737", "0.6407236", "0.6328351", "0.63012135", "0.6227752", "0.6151876", "0.60880727", "0.60821617", "0.60790056", "0.6012908", "0.58996457", "0.585556", "0.5822463", "0.58159876", "0.58129185", "0.581137", "0.57795024", "0.5748956", "0.57382864", "0.5692514", "0.56570226", "0.5577275", "0.5574845", "0.5547365", "0.55198455", "0.5511975", "0.5511943", "0.5499345", "0.5489469", "0.54857653", "0.54417497", "0.5435852", "0.54170644", "0.5412326", "0.53471285", "0.53439194", "0.5338729", "0.53172165", "0.5279215", "0.5277962", "0.5277249", "0.5263795", "0.5246527", "0.5244099", "0.5236712", "0.52233016", "0.52135926", "0.52116823", "0.51804143", "0.5174245", "0.5174087", "0.51738065", "0.51717514", "0.5166531", "0.5162977", "0.5159363", "0.51460695", "0.5129307", "0.51269543", "0.50903654", "0.5079046", "0.50732", "0.50725734", "0.5067817", "0.5060259", "0.5056975", "0.5027129", "0.50240636", "0.5017469", "0.5014738", "0.49930835", "0.49873045", "0.49824518", "0.49768326", "0.49767223", "0.49755946", "0.4968461", "0.49624398", "0.4955072", "0.49509272", "0.49478984", "0.49454015", "0.49453244", "0.49418288", "0.49266568", "0.4919914", "0.49163935", "0.49144697", "0.49075526" ]
0.8820598
0
Checks whether the given ISBN13 code is valid. >>> isISBN13('9789743159664') True >>> isISBN13('9787954527409') False >>> isISBN13('8799743159665') False
def isISBN13(code): # helper function for computing ISBN-10 check digit def check_digit(code): # compute check digit check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12)) # convert check digit into a single digit return str((10 - check) % 10) # check whether given code is a string if not isinstance(code, str): return False # check whether given code contains 10 characters if len(code) != 13: return False # check whether first nine characters of given code are digits if not code[:12].isdigit(): return False # check the check digit return check_digit(code) == code[-1]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks", "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0", "def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])", "def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0", "def isbn13_convert(isbn13):\r\n if not is_isbn_13(isbn13): return None\r\n return isbn13[3:-1] + isbn_10_check_digit(isbn13[3:-1])", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def is_code_valid_checksum(processed_code):\n\n if processed_code.isnumeric():\n list_of_digits = [int(digit) for digit in processed_code]\n else:\n converted_digits = convert_code_to_decimal(processed_code)\n list_of_digits = [int(digit) for digit in converted_digits]\n\n return sum(list_of_digits) > 0 and get_calculated_checksum(list_of_digits) % 11 == 0", "def is_valid_issn(issn):\n try:\n return bool(validate_issn(issn))\n except (ValueError, TypeError):\n return False", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def is_ean13(val):\n if len(val) != 13:\n return False\n sequence = [1, 3]\n try:\n r = sum([int(x) * sequence[i % 2] for i, x in enumerate(val[:-1])])\n ck = (10 - r % 10) % 10\n return ck == int(val[-1])\n except ValueError:\n return False", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def validate_identifier(identifier: str) -> bool:\n if identifier[:2] == 'NR':\n return True\n\n if len(identifier) < 9:\n return False\n\n try:\n d = int(identifier[-7:])\n if d == 0:\n return False\n except ValueError:\n return False\n # TODO This is not correct for entity types that are not Coops\n if identifier[:-7] not in ('CP', 'XCP', 'BC'):\n return False\n\n return True", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def is_id_valid(id_code: str) -> bool:\n if id_code.isdigit():\n if len(str(id_code)) == 11:\n id_code = str(id_code)\n gender_number = int(id_code[0:1])\n day = int(id_code[5:7])\n month = int(id_code[3:5])\n year = id_code[1:3]\n birth_number = id_code[7:10]\n if is_valid_gender_number(gender_number) \\\n and is_valid_year_number(int(year)) \\\n and is_valid_month_number(int(month)) \\\n and is_valid_day_number(gender_number, int(year), int(month), int(day)) \\\n and is_valid_birth_number(int(birth_number)) \\\n and is_valid_control_number(str(id_code)):\n return True\n return False\n return False\n return False", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def is_valid(self):\n return phonenumbers.is_valid_number(self)", "def test_and_normalize_isbn(self, isbn):\n ret = {\"valid\": False, \"input_value\": str(isbn)}\n stripped_isbn = isbn.strip()\n unsplit_isbn = stripped_isbn.replace(\"-\", \"\")\n split_on_input = False\n if self.ISBN_SPLIT_RE.match(stripped_isbn):\n if len(stripped_isbn) < 17:\n ret[\"error_type\"] = 1\n return ret\n elif len(stripped_isbn) > 17:\n ret[\"error_type\"] = 2\n return ret\n else:\n split_on_input = True\n if self.ISBN_RE.match(unsplit_isbn):\n split_isbn = self.split_isbn(unsplit_isbn)[\"value\"]\n if split_on_input and split_isbn != stripped_isbn:\n ret[\"error_type\"] = 3\n return ret\n ret[\"normalised\"] = split_isbn\n ret[\"valid\"] = True\n return ret\n ret[\"error_type\"] = 0\n return ret", "def is_valid_month_number(month_number: int) -> bool:\n if month_number in range(13):\n return True\n else:\n return False", "def validate(input):\n regex = re.compile(r'(UL)?\\d{1,' + re.escape(str(barcode_digit_length)) + '}$', flags=re.IGNORECASE)\n if regex.match(input):\n is_valid = True\n else:\n is_valid = False\n return is_valid", "def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11", "def split_isbn(self, isbn):\n ret_value = {\n 'success': False,\n 'value': None\n }\n split_isbn = \"\"\n remaining_isbn = isbn\n\n if not self.ISBN_RE.match(isbn):\n ret_value['value'] = '\"' + str(isbn) + '\" is no valid 13-digit ISBN!'\n return ret_value\n for ean in self.ean_elements:\n prefix = ean.find(\"Prefix\").text\n if remaining_isbn.startswith(prefix):\n split_isbn += prefix\n remaining_isbn = remaining_isbn[len(prefix):]\n rules = ean.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for EAN prefix \"{}\" is inside a ' +\n 'range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, prefix)\n return ret_value\n group = remaining_isbn[:length]\n split_isbn += \"-\" + group\n remaining_isbn = remaining_isbn[length:]\n break\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid prefix.'\n ret_value['value'] = msg.format(isbn)\n return ret_value\n for group in self.registration_groups:\n prefix = group.find(\"Prefix\").text\n if split_isbn == prefix:\n rules = group.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for registration group \"{}\" is ' +\n 'inside a range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, split_isbn)\n return ret_value\n registrant = remaining_isbn[:length]\n split_isbn += \"-\" + registrant\n remaining_isbn = remaining_isbn[length:]\n check_digit = remaining_isbn[-1:]\n publication_number = remaining_isbn[:-1]\n split_isbn += \"-\" + publication_number + \"-\" + check_digit\n ret_value['success'] = True\n ret_value['value'] = split_isbn\n return ret_value\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid registration group element.'\n ret_value['value'] = msg.format(isbn)\n return ret_value", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def is_isni(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n try:\n r = 0\n for x in val[:-1]:\n r = (r + int(x)) * 2\n ck = (12 - r % 11) % 11\n return ck == _convert_x_to_10(val[-1])\n except ValueError:\n return False", "def validate_seq(sequence):\n sequence = sequence.strip()\n sequence = sequence.replace(\" \", \"\")\n sequence.upper()\n regex = re.compile('^[ACTGNRYSWKMBDHVEFILPQSXZ]*$', re.I)\n if regex.search(sequence) is not None:\n return True\n else:\n return False", "def CheckZipCode(zipcode):\n # see if there are enough digits\n if (len(zipcode) >= 5):\n # check if numerical\n try:\n int(zipcode)\n return True\n except:\n return False\n else:\n return False", "def validateFormat(barcode):\r\n validatesymbol = 0\r\n delimitedsymbol = 0\r\n if barcode[0] == '' or barcode[-1] == '':\r\n validatesymbol += 1\r\n for i in range(len(barcode)):\r\n try:\r\n int(barcode[i])\r\n except ValueError:\r\n if barcode[i] == '-':\r\n delimitedsymbol += 1\r\n else:\r\n validatesymbol += 1\r\n if delimitedsymbol == 0 and validatesymbol == 0:\r\n if len(barcode) == 12 or len(barcode) == 13:\r\n pass\r\n else:\r\n validatesymbol += 1\r\n if validatesymbol == 0:\r\n return True\r\n else:\r\n return False", "def is_valid_year_number(year_number: int) -> bool:\n if year_number in range(100):\n return True\n else:\n return False", "def seq_validator(sequence):\n\n # checks for ascii characters that should not appear in a fasta sequence\n seq_val = re.compile(r\"[.-@|\\s| -)|z-~|Z-`|EFIJLOPQX|efijlopqx+,]+\")\n\n if seq_val.search(sequence) is None:\n return True\n\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def isEncAddress(key):\n\tif re.search('^EAddr38[a-km-zA-HJ-NP-Z0-9]{56}$', key):\n\t\tif checkChecksum(key) is False:\n\t\t\treturn True, 'checksum'\n\t\treturn True, 'good'\n\telse:\n\t\treturn False, 'not valid'", "def is_issn(val):\n try:\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 8:\n return False\n r = sum([(8 - i) * (_convert_x_to_10(x)) for i, x in enumerate(val)])\n return not (r % 11)\n except ValueError:\n return False", "def is_valid(postal_code):\n return bool(re.match(UK_POST_CODE_REGEX, postal_code, re.VERBOSE)) if postal_code else False", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def is_valid_expiration_year(expiration_year: int) -> bool:\n return expiration_year.isnumeric() and 2020 <= int(expiration_year) <= 2030", "def is_valid(key):\n return key[0:2] == \"MR\" and key[2:].isdigit() and len(key) in [9, 10]", "def is_valid_issue_year(issue_year: int) -> bool:\n return issue_year.isnumeric() and 2010 <= int(issue_year) <= 2020", "def checkBarcode(barcode):\r\n barcode = barcode.strip()\r\n if validateFormat(barcode) is False:\r\n return 'barcode not valid'\r\n else:\r\n barcode = barcode.replace('-','')\r\n if len(barcode) == 12:\r\n fullbarcode = barcode + str(findlastdigit(barcode))\r\n return fullbarcode\r\n elif len(barcode) == 13:\r\n if findlastdigit(barcode) == int(barcode[-1]):\r\n return 'Valid'\r\n else:\r\n return 'Invalid'", "def check_ean(eancode):\n if not eancode:\n return True\n if len(eancode) <> 13:\n return False\n try:\n int(eancode)\n except:\n return False\n return ean_checksum(eancode) == int(eancode[-1])", "def verify(n):\n\n # Take the sum of all digits.\n sum_of_digits = sum(luhn_digits(n))\n\n # The number is valid iff the sum of digits modulo 10 is equal to 0\n return sum_of_digits % 10 == 0", "def test_book_isbn_length_must_be_ten(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn length must be 10', str(res2))", "def is_valid_postal_code(postal_code):\n is_code_valid = False\n postcode_regex = re.compile(r'^\\d{2}-\\d{3}$')\n\n if postcode_regex.search(postal_code) is not None:\n is_code_valid = True\n\n return is_code_valid", "def input_validation(input_: str) -> bool:\n return fullmatch('[1-9]', input_) is not None", "def _is_doi(s: str) -> bool:\n # Thanks to Andrew Gilmartin\n # https://www.crossref.org/blog/dois-and-matching-regular-expressions/\n match = re.match(r\"^10.\\d{4,9}/[-._;()/:A-Z0-9]+$\", s, re.IGNORECASE)\n\n return match is not None and match.group() is not None", "def is_luhn_valid(card_number):\n is_valid = luhn_checksum(card_number) == 0\n return is_valid", "def ni_number_check(number):\n ni_nuber = re.match(r\"^\\s*[a-zA-Z]{2}(?:\\s*\\d\\s*){6}[a-zA-Z]?\\s*$\", number)\n if ni_nuber:\n return True\n return False", "def is_code_contain_multiple_bad_digits(processed_code):\n return True if list(processed_code).count(\"?\") > 1 else False", "def is_valid_hex(hex_code: str) -> bool:\n\n match = re.search(r'^#(?:[0-9a-fA-F]{3}){1,2}$', hex_code)\n\n if match:\n return True\n else:\n return False", "def barcode_is_10xgenomics(s):\n return bool(re.match(r'^SI\\-[A-Z0-9]+\\-[A-Z0-9]+$',s))", "def is_valid_doi(doi):\n\n try:\n req = requests.get('http://dx.doi.org/%s' % doi, timeout=2.5)\n except (Timeout, RequestException) as e:\n logger.error('Can not validate doi: ' + str(e))\n raise\n else:\n return req.status_code == 200", "def citationContainsDOI(citation):\n if citation.startswith(\"doi:\"):\n return True\n elif citation.startswith(\"@doi:\"):\n return True\n elif citation.startswith(\"[@doi\"):\n return True\n else:\n return False", "def validate_account_number(num, should_exist=True):\n if len(num) != 8:\n return False\n elif num[0] == '0':\n return False\n else:\n if should_exist:\n return account_number_exists(num)\n else:\n return not account_number_exists(num)", "def is_valid_year_number(year_number: int) -> bool:\n if 0 <= int(year_number) < 100:\n return True\n return False", "def check_ont_address_format(address):\n if len(address) != 34:\n return False\n\n for ch in address:\n if ch not in '123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz':\n return False\n\n return True", "def is_valid(self, doi):\n doi = self.normalise(doi, include_prefix=True)\n\n if doi is None or match(\"^doi:10\\\\..+/.+$\", doi) is None:\n return False\n else:\n if not doi in self._data or self._data[doi] is None:\n return self.__doi_exists(doi)\n return self._data[doi].get(\"valid\")", "def esCUITValida(cuit):\n # Convertimos el valor a una cadena\n cuit = str(cuit)\n # Aca removemos guiones, espacios y puntos para poder trabajar\n cuit = cuit.replace(\"-\", \"\") # Borramos los guiones\n cuit = cuit.replace(\" \", \"\") # Borramos los espacios\n cuit = cuit.replace(\".\", \"\") # Borramos los puntos\n # Si no tiene 11 caracteres lo descartamos\n if len(cuit) != 11:\n return False, cuit\n # Solo resta analizar si todos los caracteres son numeros\n if not cuit.isdigit():\n return False, cuit\n # Despues de estas validaciones podemos afirmar\n # que contamos con 11 numeros\n # Aca comienza la magia\n base = [5, 4, 3, 2, 7, 6, 5, 4, 3, 2]\n aux = 0\n for i in range(10):\n aux += int(cuit[i]) * base[i]\n aux = 11 - (aux % 11)\n if aux == 11:\n aux = 0\n elif aux == 10:\n aux = 9\n if int(cuit[10]) == aux:\n return True, cuit\n else:\n return False, cuit", "def validate_SSN(SSN_test):\n\n is_valid_SSN = False\n\n # if user breaks format but enters 9 digits, SSN is counted as valid\n if len(SSN_test) == 9 and SSN_test.isdigit():\n is_valid_SSN = True\n\n\n # otherwise, if the length is not 11 characters, and there aren't at least 2 dashes, entry immediately fails\n elif len(SSN_test) != 11 or (SSN_test.count(\"-\") != 2):\n pass\n\n # if the dashes are in the wrong place, entry fails\n elif (SSN_test[3] != \"-\") and (SSN_test[6] != \"-\"):\n pass\n\n # dashes are correct, but all other characters must be numbers\n else:\n valid_SSN1 = (SSN_test[0 : 3]).isdigit()\n valid_SSN2 = (SSN_test[4 : 6]).isdigit()\n valid_SSN3 = (SSN_test[7 : ]).isdigit()\n if (valid_SSN1 and valid_SSN2 and valid_SSN3):\n is_valid_SSN = True\n else:\n is_valid_SSN = False\n\n return is_valid_SSN", "def is_valid_two_digit_char(code: str) -> bool:\n\n return 10 <= int(code) <= 26", "def is_valid_bid(klass, bid):\n return bid and re.match(\"^\\d{3,5}$\", bid.strip())", "def is_valid_key(key, crypt_method):\n logger.info(f\"key: {key}, crypt_method: {crypt_method}\")\n if crypt_method == 'C':\n while type(key) is not int or key not in range(0, 95):\n try:\n key = Check.is_integer(key)[1]\n if key not in range(0, 95):\n raise ValueError\n except (TypeError, ValueError):\n print(\"You must enter an integer between 1 and 95!\")\n key = input(\"Enter an encryption key\\n>> \")\n elif crypt_method in ('M', 'P'):\n pass\n else:\n return False\n return True, key", "def isSequenceValid(sequence):\n if not sequence:\n return False\n allowed_chars = set('GCAU')\n return set(sequence).issubset(allowed_chars)", "def istele(number):\n if number[:3] == '140':\n return True\n return False", "def is_valid_control_number(id_code: str) -> bool:\n check_numbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 1]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == 10:\n check_numbers = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]\n check_sum = 0\n for i in range(10):\n check_sum += int(id_code[i]) * check_numbers[i]\n check_sum = check_sum % 11\n if check_sum == int(id_code[-1]):\n return True\n return False", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_valid(number):\n try:\n return bool(validate(number))\n except ValidationError:\n return False", "def is_sms_valid(text=''):\n try:\n text.decode('ascii')\n except:\n return False\n if len(text) > 160:\n return False\n\n return True", "def is_valid_control_number(id_code: str) -> bool:\n sum = 1 * int(id_code[:1]) + 2 * int(id_code[1:2]) + 3 * int(id_code[2:3]) + 4 * int(id_code[3:4]) + 5 * \\\n int(id_code[4:5]) + 6 * int(id_code[5:6]) + 7 * int(id_code[6:7]) + 8 * int(id_code[7:8]) + 9 *\\\n int(id_code[8:9]) + 1 * int(id_code[9:10])\n control_number = sum % 11\n if int(control_number) == int(id_code[10:11]):\n return True\n elif int(control_number) == 10:\n sum = 3 * int(id_code[:1]) + 4 * int(id_code[1:2]) + 5 * int(id_code[2:3]) + 6 * int(id_code[3:4]) + 7 * \\\n int(id_code[4:5]) + 8 * int(id_code[5:6]) + 9 * int(id_code[6:7]) + 1 * int(id_code[7:8]) + 2 * \\\n int(id_code[8:9]) + 3 * int(id_code[9:10])\n control_number = sum % 11\n if control_number == int(id_code[10:11]):\n return True\n elif control_number == 10:\n if int(id_code[10:11]) == 0:\n return True\n else:\n return False\n else:\n return False", "def is_isbn_or_keyword(inputs):\n isbn_or_keyword='keyword'\n if len(inputs)==13 and inputs.isdigit():\n isbn_or_keyword='isbn'\n short_inputs=inputs.strip('-')\n if '-' in inputs and short_inputs.isdigit() and len(short_inputs)==10:\n isbn_or_keyword='isbn'\n return isbn_or_keyword", "def is_mci(code):\n assert isinstance(code, str)\n code_set = ('331.83', '294.9', 'G31.84', 'F09', '33183', '2949', 'G3184')\n return code.startswith(code_set)", "def isbn_convert(isbn10):\r\n if not is_isbn_10(isbn10): return None\r\n return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])", "def is_code_has_unknown_digit(processed_code):\n return True if list(processed_code).count(\"?\") == 0 else False", "def test_valid_luhn(self):\n assert luhn_checksum(\"79927398713\") == 0", "def is_valid_year(year_number):\n\n if (type(year_number) == int) and (START_YEAR <= year_number <= FINAL_YEAR):\n return True\n\n return False", "def validate_license(key: str) -> bool:\r\n return bool(\r\n re.match(r'^PB-[A-Z0-9]{8}(?:-[A-Z0-9]{8}){3}$', key)\r\n )", "def valid(formula):\r\n\r\n try:\r\n return not re.search(r'\\b0[0-9]', formula) and eval((formula) is True\r\n #except ArithmeticError:\r\n #return False\r\n except:\r\n return False", "def is_valid_language_code(code):\n try:\n iso639.languages.get(part3=code)\n return True\n except KeyError:\n return False", "def test_issn_incorrect_length(self):\n issns = ['0307-15', '0307-15789', '03071758', '0307175']\n for num in issns:\n with self.subTest(num=num):\n with self.assertRaisesRegex(ValueError, num):\n star_barcode.construct_postscript(\n issn=num,\n bwipp_location=self.bwipp,\n sequence=21,\n week=46,\n header_line=''\n )", "def spki_req_is_valid(spki_req):\n try:\n netscape_spki_from_b64(spki_req)\n return True\n except Exception:\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def _is_valid_code(self, code):\r\n return code in COUNTRY_CODES" ]
[ "0.8294502", "0.8156061", "0.7813687", "0.7593365", "0.7539352", "0.7096975", "0.70109516", "0.6998053", "0.6760535", "0.6702344", "0.6442007", "0.64231825", "0.6403078", "0.6313602", "0.6305555", "0.6260889", "0.6107894", "0.5969835", "0.5896946", "0.57390374", "0.56742215", "0.56619984", "0.5472034", "0.5456971", "0.54418916", "0.5413317", "0.5353483", "0.534153", "0.5326616", "0.5303506", "0.5217984", "0.5211934", "0.51902163", "0.51871204", "0.5084005", "0.5074594", "0.50719017", "0.50622916", "0.50617665", "0.5058008", "0.50500935", "0.5020174", "0.50175", "0.50040436", "0.4989212", "0.49833947", "0.49579582", "0.49529052", "0.4929644", "0.49093333", "0.49092564", "0.4906936", "0.4903256", "0.4901409", "0.4896878", "0.48937032", "0.4869668", "0.48673004", "0.48531428", "0.48336205", "0.48329458", "0.4825178", "0.48226863", "0.48027158", "0.47957835", "0.47878042", "0.47827426", "0.4766859", "0.4756467", "0.47523406", "0.4734664", "0.47339183", "0.47305772", "0.47181854", "0.4710365", "0.47096112", "0.46994412", "0.46975347", "0.46925443", "0.46924987", "0.46924987", "0.46924987", "0.46924987", "0.46924987", "0.46924987", "0.46814105", "0.46790826", "0.46769387", "0.46705437", "0.4666508", "0.46633697", "0.4661586", "0.46615326", "0.46581215", "0.4626793", "0.46239927", "0.4621767", "0.4616196", "0.45998105", "0.45958" ]
0.86895674
0
>>> isISBN('9789027439642', False) False >>> isISBN('9789027439642', True) True >>> isISBN('9789027439642') True >>> isISBN('080442957X') False >>> isISBN('080442957X', False) True
def isISBN(code, isbn13=True): return isISBN13(code) if isbn13 else isISBN10(code)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def areISBN(codes, isbn13=None):\n\n # initialize list of checks\n checks = []\n\n # construct list of checks\n for code in codes:\n\n if isinstance(code, str):\n\n if isbn13 is None:\n checks.append(isISBN(code, len(code) == 13))\n else:\n checks.append(isISBN(code, isbn13))\n\n else:\n\n checks.append(False)\n\n # return list of checks\n return checks", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0", "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def test_and_normalize_isbn(self, isbn):\n ret = {\"valid\": False, \"input_value\": str(isbn)}\n stripped_isbn = isbn.strip()\n unsplit_isbn = stripped_isbn.replace(\"-\", \"\")\n split_on_input = False\n if self.ISBN_SPLIT_RE.match(stripped_isbn):\n if len(stripped_isbn) < 17:\n ret[\"error_type\"] = 1\n return ret\n elif len(stripped_isbn) > 17:\n ret[\"error_type\"] = 2\n return ret\n else:\n split_on_input = True\n if self.ISBN_RE.match(unsplit_isbn):\n split_isbn = self.split_isbn(unsplit_isbn)[\"value\"]\n if split_on_input and split_isbn != stripped_isbn:\n ret[\"error_type\"] = 3\n return ret\n ret[\"normalised\"] = split_isbn\n ret[\"valid\"] = True\n return ret\n ret[\"error_type\"] = 0\n return ret", "def testgetISBN(self):\r\n ebook1 = ElectronicResources()\r\n #ebook1.setListDevices([device1, device2])\r\n ebook1.setISBN(9780316485616)\r\n #ebook1.setEBookTitle('The Night Fire')\r\n #ebook1.setEBookAuthor('Harry Bosch')\r\n self.assertEqual(ebook1.getISBN(),9780316485616)", "def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11", "def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)", "def is_isbn_or_keyword(inputs):\n isbn_or_keyword='keyword'\n if len(inputs)==13 and inputs.isdigit():\n isbn_or_keyword='isbn'\n short_inputs=inputs.strip('-')\n if '-' in inputs and short_inputs.isdigit() and len(short_inputs)==10:\n isbn_or_keyword='isbn'\n return isbn_or_keyword", "def getISBN(self):\n return self.bookISBN", "def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)", "def isbn_convert(isbn10):\r\n if not is_isbn_10(isbn10): return None\r\n return '978' + isbn10[:-1] + isbn_13_check_digit('978' + isbn10[:-1])", "def split_isbn(self, isbn):\n ret_value = {\n 'success': False,\n 'value': None\n }\n split_isbn = \"\"\n remaining_isbn = isbn\n\n if not self.ISBN_RE.match(isbn):\n ret_value['value'] = '\"' + str(isbn) + '\" is no valid 13-digit ISBN!'\n return ret_value\n for ean in self.ean_elements:\n prefix = ean.find(\"Prefix\").text\n if remaining_isbn.startswith(prefix):\n split_isbn += prefix\n remaining_isbn = remaining_isbn[len(prefix):]\n rules = ean.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for EAN prefix \"{}\" is inside a ' +\n 'range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, prefix)\n return ret_value\n group = remaining_isbn[:length]\n split_isbn += \"-\" + group\n remaining_isbn = remaining_isbn[length:]\n break\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid prefix.'\n ret_value['value'] = msg.format(isbn)\n return ret_value\n for group in self.registration_groups:\n prefix = group.find(\"Prefix\").text\n if split_isbn == prefix:\n rules = group.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for registration group \"{}\" is ' +\n 'inside a range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, split_isbn)\n return ret_value\n registrant = remaining_isbn[:length]\n split_isbn += \"-\" + registrant\n remaining_isbn = remaining_isbn[length:]\n check_digit = remaining_isbn[-1:]\n publication_number = remaining_isbn[:-1]\n split_isbn += \"-\" + publication_number + \"-\" + check_digit\n ret_value['success'] = True\n ret_value['value'] = split_isbn\n return ret_value\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid registration group element.'\n ret_value['value'] = msg.format(isbn)\n return ret_value", "def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj", "def create_book(self, title, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Book(title, isbn)", "def is_valid_issn(issn):\n try:\n return bool(validate_issn(issn))\n except (ValueError, TypeError):\n return False", "def searchbook(isbn):\r\n print(\"Searching for isbn \" + isbn + \" in googlebooks...\")\r\n result = _search_by_isbn(isbn)\r\n \r\n if result[\"totalItems\"] == 0:\r\n return None\r\n \r\n b = _item2book(result[\"items\"][0])\r\n return b", "def _is_doi(s: str) -> bool:\n # Thanks to Andrew Gilmartin\n # https://www.crossref.org/blog/dois-and-matching-regular-expressions/\n match = re.match(r\"^10.\\d{4,9}/[-._;()/:A-Z0-9]+$\", s, re.IGNORECASE)\n\n return match is not None and match.group() is not None", "def test_choose_book():\n assert choose_book(['string']) == True\n assert isinstance(choose_book(['string']), bool)\n assert callable(choose_book)", "def get_book_by_isbn(isbn):\n return Book.get_book(isbn)", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def isbn_prefix(isbn):\r\n if is_null(isbn): return ''\r\n if is_isbn_10(isbn): isbn = isbn_convert(isbn)\r\n if not is_isbn_13(isbn): return ''\r\n if isbn.startswith('979'):\r\n isbn = isbn[3:]\r\n try: return '979' + RE_PUB_PREFIX_979.search(isbn).group('pub')\r\n except: return '979' + isbn[3:5]\r\n elif isbn.startswith('978'):\r\n isbn = isbn[3:]\r\n try: return '978' + RE_PUB_PREFIX.search(isbn).group('pub')\r\n except: return ''\r\n return ''", "def validBookObject(bookObject):\n return (\"name\" in bookObject and\n \"price\" in bookObject and\n \"isbn\" in bookObject)", "def ISBN(self, default=None):\n return self.data.get('isbn', default)", "def create_novel(self, title, author, isbn):\n isbn_list = [book.get_isbn() for book in self.books.keys()]\n if isbn in isbn_list:\n print(\"ISBN {isbn} already exists. Please provide a unique ISBN.\".format(isbn=isbn))\n else:\n return Fiction(title, author, isbn)", "def citationContainsDOI(citation):\n if citation.startswith(\"doi:\"):\n return True\n elif citation.startswith(\"@doi:\"):\n return True\n elif citation.startswith(\"[@doi\"):\n return True\n else:\n return False", "def genesis_to_boolean(genesis_str):\n\n if genesis_str == 'Y':\n return True\n else:\n return False", "def normalize_isbn(val):\n if is_isbn10(val):\n val = isbnlib.to_isbn13(val)\n return isbnlib.mask(isbnlib.canonical(val))", "def str2bool(str):\n return int(str) != 0", "def is_id(string):\n regex = re.compile('[0-9a-f]{32}\\Z', re.I)\n if bool(regex.match(string)):\n return True\n\n return False", "def isnumber(n):\r\n N = str(n)\r\n if N.isdigit():\r\n return True\r\n else:\r\n return False", "def is_valid_bid(klass, bid):\n return bid and re.match(\"^\\d{3,5}$\", bid.strip())", "def is_doi(val):\n return doi_regexp.match(val)", "def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj", "def book_by_isbn(ISBN):\n data = {}\n for book in root.findall('Book'):\n for elem in book:\n isbn = book.find('ISBN').text\n if isbn == ISBN:\n data['id'] = book.attrib['id']\n data[elem.tag] = elem.text\n return data", "def is_issn(val):\n try:\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 8:\n return False\n r = sum([(8 - i) * (_convert_x_to_10(x)) for i, x in enumerate(val)])\n return not (r % 11)\n except ValueError:\n return False", "def is_isni(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n try:\n r = 0\n for x in val[:-1]:\n r = (r + int(x)) * 2\n ck = (12 - r % 11) % 11\n return ck == _convert_x_to_10(val[-1])\n except ValueError:\n return False", "def auto_gen_isbn():\n isbn_number = []\n\n while isbn_number == []:\n\n for i in range(10):\n dig = random.randint(0, 9)\n isbn_number.append(dig)\n\n pos = 10\n addition = 0\n for num in isbn_number:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result != 0:\n isbn_number = []\n\n else:\n break\n\n string = str()\n for num in isbn_number:\n car = str(num)\n string += car\n\n string = list(string)\n\n string = string[0] + string[1] + '-' + string[2] + string[3] \\\n + string[4] + string[5] + string[6] + '-' + string[7] \\\n + string[8] + '-' + string[9]\n\n return string", "def _is_ibis(name: str) -> bool:\n return name.startswith(DATA_NAME_PREFIX)", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def get_isbn(self):\n return self.isbn", "def is_mbid(mbid):\n try:\n mbid = uuid.UUID(mbid)\n good = True\n except ValueError as e:\n good = False\n except AttributeError:\n good = False\n\n return good", "def test_book_isbn_length_must_be_ten(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn length must be 10', str(res2))", "def barcode_is_valid(s):\n return (bool(re.match(r'^[ATGC]*$',s))\n or barcode_is_10xgenomics(s))", "def str_bool(s):\n if not s:\n return False\n if type(s) != str:\n # It's not a string and it's not falsy, soooo....\n return True\n s = s.lower()\n if s in [\"false\", \"0\", \"no\", \"n\"]:\n return False\n return True", "def checker(item):\n return item in INSTOCK", "def isbool(s):\n if type(s) in [int,float,bool]:\n return True\n elif (type(s) != str):\n return False\n return s in ['True','False']", "def __contains__(self, item: str) -> bool:\n return item in self.stoi", "def validate_bookid(self,book_id):\r\n if int(book_id) in [i.book_id for i in self.issued_books]:\r\n return True\r\n else:\r\n return False", "def s2b(s):\n s = s.lower()\n return s == 'true' or s == 'yes' or s == 'y' or s == '1'", "def clean_isbn(isbn):\n digits = set(\"0123456789\")\n return [int(x if x in digits else 10) for x in isbn.translate(None, \" -\")]", "def parse_isbn_url(url):\n parts = urlparse.urlparse(url)\n query = urlparse.parse_qs(parts.query)\n if ISBN_QS_KEY in query and len(query[ISBN_QS_KEY]):\n return query[ISBN_QS_KEY][0]", "def is_palin(s):\r\n if s == \"\":\r\n return True\r\n if s[0] == s[-1]:\r\n return is_palin(s[1:-1])\r\n else:\r\n return False", "def isTrue(s):\n return s == \"Yes\"", "def is_emirp(n) -> bool:\r\n if not is_prime(n):\r\n return False\r\n if not is_palindromic_number(n):\r\n return is_prime(int(str(n)[::-1]))\r\n return False", "def is_number(s):\r\n try:\r\n int(s)\r\n return True\r\n except ValueError:\r\n return False", "def barcode_is_10xgenomics(s):\n return bool(re.match(r'^SI\\-[A-Z0-9]+\\-[A-Z0-9]+$',s))", "def is_pandigital(n):\n # Accept both str and int\n if isinstance(n, int):\n n = str(n)\n\n digits = len(n)\n if digits >= 10:\n return False\n string_n = str(n)\n for i in range(1, digits + 1):\n if str(i) not in string_n:\n return False\n return True", "def getBool(string):\n return (True)", "def is_number(s):\n try:\n int(s)\n return True\n except ValueError:\n return False", "def _isCardinalNumber(strWord):\n return NumberFormula.CARDINALNUMBERREGEX.match(strWord) != None", "def restock_book(self, isbn, quantity):\n self.cursor.execute(\"\"\"SELECT COUNT(*) FROM book WHERE ISBN=%s\"\"\", (isbn,))\n if self.cursor.fetchone()[0]:\n self.cursor.execute(\"\"\"UPDATE book set stock=stock+%s WHERE ISBN=%s\"\"\", (quantity, isbn))\n self.db.commit()\n return True\n return False", "def valid_ric(ticker, ric):\n split_ric = ric.split('.')\n ticker_ = split_ric[0]\n exchange = split_ric[1]\n database = helper.create_db()\n exchange_list = database.retrieve_column_as_list(\"exchanges\",\n \"exchange_code\")\n return ticker == ticker_ and exchange in exchange_list", "def test_choose_author():\n assert choose_author(['string']) == True\n assert isinstance(choose_author(['string']), bool)\n assert callable(choose_author)", "def isNumber(string):\r\n for char in string:\r\n charNum = ord(char)\r\n if (charNum < 48 or charNum > 57):\r\n return False\r\n return True", "def isString(x):\n if type(x) == str:\n return True\n else:\n return False", "def isindex(str):\n try:\n int(str)\n return True\n except ValueError:\n return False", "def to_isbn(ean):\n clean = clean_isbn(ean)\n isbn = clean[3:-1]\n isbn.append(isbn_check_digit(isbn))\n return ''.join(str(d) for d in isbn)", "def __is_int(self,string):\r\n try: \r\n int(string)\r\n return True\r\n except ValueError:\r\n return False", "def _is_binary(name):\n\n return name in unidata.unicode_binary or name in unidata.unicode_alias['binary']", "def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS", "def to_bool(s: str):\n if s.strip().lower() == \"y\":\n return True\n else:\n return False", "def string_is_course_id(string):\n for i in range(min(len(string), 3)):\n if string[i].isdigit():\n return True\n return False", "def hunt(s):\n\n\t# ISBN-13s\n\tfor regexp in [r'(?:[^0-9]|^)((?:[0-9]-*){12}[0-9X])(?:[^0-9X]|$)',\n\t\t\t\t r'(?:[^0-9]|^)((?:[0-9]-*){9}[0-9X])(?:[^0-9X]|$)']:\n\t\tfor match in re.finditer(regexp, s):\n\t\t\tcandidate = match.group(1)\n\t\t\tif verify(candidate):\n\t\t\t\tyield candidate.replace(\"-\",\"\")", "def str_to_bool(s):\n if len(s) > 0 and s[0] in \"yYtT1\":\n return True\n return False", "def _is_binary(s):\n\tif (type(s) == types.StringType\n\t\t\tand (s == conj or s == disj or s == impl or s == iff)):\n\t\treturn 1\n\treturn 0", "def isPubidChar(ch):\n ret = libxml2mod.xmlIsPubidChar(ch)\n return ret", "def isbn_10_check_digit(nine_digits):\r\n if len(nine_digits) != 9: return None\r\n try: int(nine_digits)\r\n except: return None\r\n remainder = int(sum((i + 2) * int(x) for i, x in enumerate(reversed(nine_digits))) % 11)\r\n if remainder == 0: tenth_digit = 0\r\n else: tenth_digit = 11 - remainder\r\n if tenth_digit == 10: tenth_digit = 'X'\r\n return str(tenth_digit)", "def isint(s):\n try:\n x = int(s)\n return True\n except:\n return False", "def _validate_bbg_id(x):\n return len(x) == 12 and x[:3] == 'BBG' and str.isalnum(x[3:11]) and sum(map(\n lambda u: u in ['A', 'E', 'I', 'O', 'U'], x[3:11])) == 0 and str.isdigit(x[11])", "def is_bool (self, phrase):\r\n \r\n return isinstance(phrase,bool)", "def str_to_bool(a):\n if a == 'True':\n return True\n else:\n return False", "def str2bool(s):\n if s == \"True\":\n return True\n elif s == \"False\":\n return False\n else:\n raise ValueError", "def __bool__(self):\n return _libsbml.string___bool__(self)" ]
[ "0.7865637", "0.7720685", "0.7428631", "0.7180673", "0.71515095", "0.7052291", "0.68789357", "0.6845327", "0.6841594", "0.6841162", "0.6837004", "0.6813746", "0.68048567", "0.675236", "0.66869426", "0.6474136", "0.64726096", "0.64508146", "0.640185", "0.6327851", "0.621259", "0.61057997", "0.59980005", "0.5991922", "0.5906314", "0.5899321", "0.58419645", "0.5833934", "0.58107376", "0.5805409", "0.57469815", "0.5745379", "0.56333774", "0.55335677", "0.54847115", "0.54216284", "0.54182714", "0.5392693", "0.53637564", "0.53588164", "0.5343726", "0.53405625", "0.5329685", "0.52739286", "0.5272128", "0.526793", "0.52599025", "0.5248177", "0.5217534", "0.5202601", "0.5200103", "0.51828927", "0.5157966", "0.51200354", "0.51186264", "0.5114698", "0.51047224", "0.5069171", "0.5067766", "0.5039378", "0.50361514", "0.5027232", "0.5026579", "0.5018908", "0.5017459", "0.5014719", "0.49936447", "0.4989891", "0.49786505", "0.4977651", "0.4969697", "0.49553707", "0.4944828", "0.49422082", "0.4938852", "0.49352127", "0.49212718", "0.49208042", "0.49181664", "0.4916068", "0.49120146", "0.4909485", "0.4903137", "0.49025753", "0.49003938", "0.4897691", "0.4889553", "0.4882746", "0.48776397", "0.48725533", "0.48673624", "0.48671034", "0.4866686", "0.4854969", "0.48542488", "0.48353013", "0.48344886", "0.4833253", "0.48306918", "0.48264983" ]
0.7591935
2
>>> codes = ['0012345678', '0012345679', '9971502100', '080442957X', 5, True, 'The Practice of Computing Using Python', '9789027439642', '5486948320146'] >>> areISBN(codes) [False, True, True, True, False, False, False, True, False] >>> areISBN(codes, True) [False, False, False, False, False, False, False, True, False] >>> areISBN(codes, False) [False, True, True, True, False, False, False, False, False]
def areISBN(codes, isbn13=None): # initialize list of checks checks = [] # construct list of checks for code in codes: if isinstance(code, str): if isbn13 is None: checks.append(isISBN(code, len(code) == 13)) else: checks.append(isISBN(code, isbn13)) else: checks.append(False) # return list of checks return checks
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_isbn(val):\n if is_isbn10(val) or is_isbn13(val):\n if val[0:3] in [\"978\", \"979\"] or not is_ean13(val):\n return True\n return False", "def isISBN(code, isbn13=True):\n\n return isISBN13(code) if isbn13 else isISBN10(code)", "def isISBN(code):\n if not (\n isinstance(code, str) and # code must be a string\n len(code) == 10 and # code must contain 10 characters\n code[:9].isdigit() # first nine characters must be digits\n ):\n return False\n\n # check the check digit\n return checkdigit(code) == code[-1]", "def isbn_10_check_structure(isbn10):\r\n return True if re.match(RE_ISBN10, isbn10) else False", "def isISBN10(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((i + 1) * int(code[i]) for i in range(9)) % 11\n\n # convert check digit into its string representation\n return 'X' if check == 10 else str(check)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 10:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:9].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def isISBN13(code):\n\n # helper function for computing ISBN-10 check digit\n def check_digit(code):\n\n # compute check digit\n check = sum((3 if i % 2 else 1) * int(code[i]) for i in range(12))\n\n # convert check digit into a single digit\n return str((10 - check) % 10)\n\n # check whether given code is a string\n if not isinstance(code, str):\n return False\n\n # check whether given code contains 10 characters\n if len(code) != 13:\n return False\n\n # check whether first nine characters of given code are digits\n if not code[:12].isdigit():\n return False\n\n # check the check digit\n return check_digit(code) == code[-1]", "def verify(isbn):\n\n isbn = isbn.replace(\"-\", \"\")\n if not verify_format(isbn):\n return False\n\n isbn_sum = 0\n for digit, i in zip(isbn, range(10, 0, -1)):\n if digit == \"X\":\n isbn_sum += 10 * i\n else:\n isbn_sum += int(digit) * i\n\n return isbn_sum % 11 == 0", "def is_valid_isbn(isbn):\n clean = clean_isbn(isbn)\n return clean[-1] == isbn_check_digit(clean[:-1])", "def isbn_13_check_structure(isbn13):\r\n return True if re.match(RE_ISBN13, isbn13) else False", "def is_valid(isbn: str) -> bool:\n no_dashes = isbn.replace('-', '')\n\n nums = []\n\n valid_num = False\n\n if no_dashes:\n for char in no_dashes:\n if char == 'X':\n nums.append(10)\n elif char != 'X' and char.isalpha() or len(no_dashes) < 10 or len(no_dashes) > 10:\n break\n elif 'X' in no_dashes and no_dashes[-1] != 'X':\n break\n else:\n nums.append(int(char))\n\n char = 0\n value = 0\n\n if nums and len(nums) == 10:\n for n in range(10, 0, -1):\n value += (n * nums[char])\n char += 1\n valid_num = (value % 11 == 0)\n\n return valid_num", "def verify_format(isbn):\n\n return len(isbn) == 10 and (isbn[-1] == \"X\" or isbn[-1].isdigit()) \\\n and all(digit.isdigit() for digit in isbn[:-1])", "def valid_book(self, info):\n self.cursor.execute(\"SELECT ISBN, title, price, stock FROM book WHERE ISBN=%s\", (info['ISBN'],))\n for book in self.cursor.fetchall():\n return True, float(book[2]), book[1], book[3]\n return False, 0, 0, 0", "def is_isbn_10(isbn10):\r\n isbn10 = re.sub(r'[^0-9X]', '', isbn10.replace('x', 'X'))\r\n if len(isbn10) != 10: return False\r\n return False if isbn_10_check_digit(isbn10[:-1]) != isbn10[-1] else True", "def is_isbn_13(isbn13):\r\n isbn13 = re.sub(r'[^0-9X]', '', isbn13.replace('x', 'X'))\r\n if len(isbn13) != 13: return False\r\n if isbn13[0:3] not in ('978', '979'): return False\r\n return False if isbn_13_check_digit(isbn13[:-1]) != isbn13[-1] else True", "def checker(product):\n for item in INSTOCK:\n if item == product:\n return True\n return False", "def isbn_has_valid_check_digit(self, isbn):\n if not self.ISBN_RE.match(isbn):\n raise ValueError(str(isbn) + \" is no valid 13-digit ISBN!\")\n checksum = 0\n for index, digit in enumerate(isbn):\n if index % 2 == 0:\n checksum += int(digit)\n else:\n checksum += 3 * int(digit)\n return checksum % 10 == 0", "def isbn():\n message = 'Informe um ISBN válido'\n def _isbn(form, field):\n if not is_isbn10(field.data) and not is_isbn13(field.data):\n raise ValidationError(message)\n return _isbn", "def checker(item):\n return item in INSTOCK", "def checkBINn(L, n):\n binaire = [0,1]\n if len(L)==n:\n for i in range(len(L)):\n if L[i] not in binaire:\n return False\n return True\n else: \n return False", "def isbn_check_digit(isbn):\n return (11 - (sum(x * y for (x, y) in enumerate(reversed(isbn), start=2))\n % 11)) % 11", "def find_book_dois_in_crossref(isbn_list):\n ret_value = {\n \"success\": False,\n \"dois\": []\n }\n if type(isbn_list) != type([]) or len(isbn_list) == 0:\n ret_value['error_msg'] = \"Parameter must be a non-empty list!\"\n return ret_value\n filter_list = [\"isbn:\" + isbn.strip() for isbn in isbn_list]\n filters = \",\".join(filter_list)\n api_url = \"https://api.crossref.org/works?filter=\"\n url = api_url + filters + \"&rows=500\"\n request = Request(url)\n request.add_header(\"User-Agent\", USER_AGENT)\n try:\n ret = urlopen(request)\n content = ret.read()\n data = json.loads(content)\n if data[\"message\"][\"total-results\"] == 0:\n ret_value[\"success\"] = True\n else:\n for item in data[\"message\"][\"items\"]:\n if item[\"type\"] in [\"monograph\", \"book\"] and item[\"DOI\"] not in ret_value[\"dois\"]:\n ret_value[\"dois\"].append(item[\"DOI\"])\n if len(ret_value[\"dois\"]) == 0:\n msg = \"No monograph/book DOI type found in Crossref ISBN search result ({})!\"\n raise ValueError(msg.format(url))\n else:\n ret_value[\"success\"] = True\n except HTTPError as httpe:\n ret_value['error_msg'] = \"HTTPError: {} - {}\".format(httpe.code, httpe.reason)\n except URLError as urle:\n ret_value['error_msg'] = \"URLError: {}\".format(urle.reason)\n except ValueError as ve:\n ret_value['error_msg'] = str(ve)\n return ret_value", "def validate_isbn_math_relation(isbn_code: str):\n isbn_code_valid = False\n isbn_only_numbers = []\n msj = ''\n\n for character in isbn_code:\n if character in '0123456789':\n char_parse_int = int(character)\n isbn_only_numbers.append(char_parse_int)\n else:\n pass\n\n pos = 10\n addition = 0\n for num in isbn_only_numbers:\n mult = pos * num\n addition += mult\n pos -= 1\n\n final_result = addition % 11\n\n if final_result == 0:\n isbn_code_valid = True\n\n if not isbn_code_valid:\n msj = 'No se cumple la relación matemática'\n\n return isbn_code_valid, msj", "def verify(s):\n\t# Remove any spurious characters\n\ts = re.sub(r'[^0-9xX]', '', s).upper().strip()\n\n\tl = len(s)\n\n\tif l==10:\n\t\tif verify_10(s):\n\t\t\treturn s\n\telif l==13:\n\t\tif verify_13(s):\n\t\t\treturn s\n\n\t# It's not the right length to be an ISBN\n\treturn False", "def validate_isbn_format(isbn_code: str):\n format_valid = False\n isbn = list(isbn_code)\n msj = ''\n\n if len(isbn) == 13:\n\n isbn_numbers = []\n isbn_separator = []\n index = 0\n isbn_characters = []\n\n for character in isbn:\n\n if character in '0123456789':\n isbn_numbers.append(character)\n\n elif character not in '0123456789':\n isbn_characters.append(character)\n\n if character == '-':\n isbn_separator.append(character)\n\n if index > 0:\n if isbn[index - 1] not in '0123456789':\n msj = 'Se ingresaron dos separadores juntos'\n break\n else:\n msj = 'Se ingresó un caracter inválido'\n break\n\n index += 1\n\n if len(isbn_numbers) < 10:\n msj = 'Faltan dígitos'\n\n if len(isbn_separator) != 3:\n msj = 'No son 4 grupos de números.'\n\n if len(isbn_separator) < 3:\n diff = 3 - len(isbn_separator)\n msj += ' Faltan ' + str(diff) + ' separadores'\n else:\n diff = len(isbn_separator) - 3\n msj += ' Hay ' + str(diff) + ' separador sobrante'\n\n if msj == '':\n format_valid = True\n\n elif len(isbn) < 13:\n msj = 'Faltan caracteres'\n\n else:\n msj = 'Se excede la cantidad de carácteres'\n\n return format_valid, msj", "def testBadFormatISBN(self): \n val = format_isbn(\"1234567843534594123\")\n self.assertFalse(val)", "def check_code(item_code):\r\n # RA matches\r\n if re.match(r'^MCRNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAN[0-9]{3,4}(\\.[0-9])?C?(\\.T)?$', item_code):\r\n return True\r\n\r\n if re.match(r'^RAS[0-9]{5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^RNC[0-9]{4}\\.T$', item_code):\r\n return True\r\n\r\n if re.match(r'^RU[0-9]{5}(\\.T)?$', item_code):\r\n return True\r\n\r\n # Feature ID (RAN) matches\r\n if re.match(r'^RAN[0-9]{2,5}$', item_code):\r\n return True\r\n\r\n if re.match(r'^(?P<code>RAN[1,2](\\.[0-9]{3,4}))$', item_code):\r\n return True\r\n\r\n return False", "def isbimol(rxn_typ):\n return rxn_typ in BIMOL_REACTIONS", "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False", "def __contains__(self, code: str) -> bool:\n return code in self._all_codes_map", "def decode(code):\n def h(x):\n hs = []\n for i in range(len(code)):\n if code[i] != '0' and (code[i] == '?' or code[i] == x[i]):\n hs.append(True)\n else:\n hs.append(False)\n return all(hs)\n return h", "def check_completeness(ISM):\n for item in ISM:\n if item not in ['A', 'T', 'C', 'G', '-']:\n return False\n return True", "def validate_bookid(self,book_id):\r\n if int(book_id) in [i.book_id for i in self.issued_books]:\r\n return True\r\n else:\r\n return False", "def testgetISBN(self):\r\n ebook1 = ElectronicResources()\r\n #ebook1.setListDevices([device1, device2])\r\n ebook1.setISBN(9780316485616)\r\n #ebook1.setEBookTitle('The Night Fire')\r\n #ebook1.setEBookAuthor('Harry Bosch')\r\n self.assertEqual(ebook1.getISBN(),9780316485616)", "def test(types, _):\n return 'Date' in types and 'Postal Code' in types", "def containsBrackets(Code):\r\n \r\n for item in Code:\r\n if '(' in item or ')' in item:\r\n return True;\r\n return False;", "def isbn_lookup(isbn):\n base = \"https://www.googleapis.com/books/v1/volumes?q=isbn=\"\n# Unfortunately we can't use the superior \"with spam as eggs\" syntax here...\n search = urlopen(base + isbn + \"&prettyprint=false\")\n lines = search.read()\n search.close()\n for bool_pair in [(\"false\", \"False\"), (\"true\", \"True\")]:\n lines = lines.replace(*bool_pair)\n volume_info = literal_eval(lines)[\"items\"][0][\"volumeInfo\"]\n title = volume_info[\"title\"]\n authors = ', '.join(a for a in volume_info[\"authors\"])\n return \"Title:\\t\\t%s\\nAuthor(s):\\t%s\" % (title, authors)", "def testBadFormatISBNAgain(self): \n val = format_isbn(\"12345678\")\n self.assertFalse(val)", "def checkbandnumbers(bands, checkbands):\n for c in checkbands:\n if c not in bands:\n return False\n return True", "def test_search_by_ISBN(self):\n s1 = System()\n b1 = Books(\"1984\", \"George Orwell\", \"Harvill Secker\", \"1949\", \"0123456789123\")\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 0)\n s1.add_resource(b1)\n self.assertEqual(s1.search_by_ISBN(\"0123456789123\"), 1)", "def valid_ric(ticker, ric):\n split_ric = ric.split('.')\n ticker_ = split_ric[0]\n exchange = split_ric[1]\n database = helper.create_db()\n exchange_list = database.retrieve_column_as_list(\"exchanges\",\n \"exchange_code\")\n return ticker == ticker_ and exchange in exchange_list", "def ISBNs(self, default=[{}]):\n tmp = self.data.get('metadata', {}).get('isbns', default)\n return [HEP.ISBNObject(i) for i in tmp]", "def is_isin(value):\n return True", "def fn(p, s):\n ss = iter(s)\n return all(ch in ss for ch in p)", "def testFormatISBN(self): \n val = format_isbn(\"1234567894123\")\n self.assertEqual(val,\"123-4-567-89412-3\")", "def test_bool(bool_list):\n new_list = []\n for lst in bool_list:\n for item in lst:\n new_list.append(item)\n if True in new_list:\n return True\n else:\n return False", "def test_identifier_display_multiple_idents(self):\n Identifier(value='9788307018867', type='ISBN_13', book=self.book).save()\n Identifier(value='1234567891', type='ISBN_10', book=self.book).save()\n result = self.book.identifier_display\n expected = ['ISBN_13: 9788307018867', 'ISBN_10: 1234567891']\n\n self.assertEqual(len(result), len(expected))\n for ident in expected:\n assert ident in result", "def split_isbn(self, isbn):\n ret_value = {\n 'success': False,\n 'value': None\n }\n split_isbn = \"\"\n remaining_isbn = isbn\n\n if not self.ISBN_RE.match(isbn):\n ret_value['value'] = '\"' + str(isbn) + '\" is no valid 13-digit ISBN!'\n return ret_value\n for ean in self.ean_elements:\n prefix = ean.find(\"Prefix\").text\n if remaining_isbn.startswith(prefix):\n split_isbn += prefix\n remaining_isbn = remaining_isbn[len(prefix):]\n rules = ean.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for EAN prefix \"{}\" is inside a ' +\n 'range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, prefix)\n return ret_value\n group = remaining_isbn[:length]\n split_isbn += \"-\" + group\n remaining_isbn = remaining_isbn[length:]\n break\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid prefix.'\n ret_value['value'] = msg.format(isbn)\n return ret_value\n for group in self.registration_groups:\n prefix = group.find(\"Prefix\").text\n if split_isbn == prefix:\n rules = group.find(\"Rules\")\n length = self._get_range_length_from_rules(remaining_isbn, rules)\n if length == 0:\n msg = ('Invalid ISBN: Remaining fragment \"{}\" for registration group \"{}\" is ' +\n 'inside a range which is not marked for use yet')\n ret_value['value'] = msg.format(remaining_isbn, split_isbn)\n return ret_value\n registrant = remaining_isbn[:length]\n split_isbn += \"-\" + registrant\n remaining_isbn = remaining_isbn[length:]\n check_digit = remaining_isbn[-1:]\n publication_number = remaining_isbn[:-1]\n split_isbn += \"-\" + publication_number + \"-\" + check_digit\n ret_value['success'] = True\n ret_value['value'] = split_isbn\n return ret_value\n else:\n msg = 'ISBN \"{}\" does not seem to have a valid registration group element.'\n ret_value['value'] = msg.format(isbn)\n return ret_value", "def test_and_normalize_isbn(self, isbn):\n ret = {\"valid\": False, \"input_value\": str(isbn)}\n stripped_isbn = isbn.strip()\n unsplit_isbn = stripped_isbn.replace(\"-\", \"\")\n split_on_input = False\n if self.ISBN_SPLIT_RE.match(stripped_isbn):\n if len(stripped_isbn) < 17:\n ret[\"error_type\"] = 1\n return ret\n elif len(stripped_isbn) > 17:\n ret[\"error_type\"] = 2\n return ret\n else:\n split_on_input = True\n if self.ISBN_RE.match(unsplit_isbn):\n split_isbn = self.split_isbn(unsplit_isbn)[\"value\"]\n if split_on_input and split_isbn != stripped_isbn:\n ret[\"error_type\"] = 3\n return ret\n ret[\"normalised\"] = split_isbn\n ret[\"valid\"] = True\n return ret\n ret[\"error_type\"] = 0\n return ret", "def getISBN(self):\n return self.bookISBN", "def allIn(listA: Union[int, List[int]], listB: Union[int, List[int]]) -> bool:\n if isinstance(listA, int):\n listA = [listA]\n if isinstance(listB, int):\n return listB in listA\n else:\n for item in listB:\n if item not in listA:\n return False\n return True", "def verify_register(self, barcode: str):\n try:\n test = []\n self.db.cursor.execute(f'SELECT * FROM books where bar_code = {barcode}')\n for i in self.db.cursor.fetchall():\n test.append(i)\n except Exception as error:\n print(error)\n else:\n if len(test) >= 1:\n return True\n else:\n return False", "def check_other(seq_iter):\n\treturn any(filter(has_abba, seq_iter))", "def citationContainsDOI(citation):\n if citation.startswith(\"doi:\"):\n return True\n elif citation.startswith(\"@doi:\"):\n return True\n elif citation.startswith(\"[@doi\"):\n return True\n else:\n return False", "def contains(list_, filter_):\n for x in list_:\n if filter_(x):\n return True\n return False", "def is_sequence_of_int(items):\n return all(isinstance(item, int) for item in items)", "def valid_barcode(s):\n # implement this function!\n odd_digits = 0\n even_digits = 0\n result = 0\n for i in range(len(s) - 1):\n if i % 2 == 0:\n odd_digits += int(s[i])\n else:\n even_digits += int(s[i])\n result = (3 * odd_digits + even_digits) % 10\n if result != 0:\n result = 10 - result\n\n try:\n if int(s[-1]) == result and len(s) == 12:\n return True\n else:\n return False\n except IndexError:\n return False", "def item_exists(item_id):\n return item_id in all_items", "def is_isbn_or_keyword(inputs):\n isbn_or_keyword='keyword'\n if len(inputs)==13 and inputs.isdigit():\n isbn_or_keyword='isbn'\n short_inputs=inputs.strip('-')\n if '-' in inputs and short_inputs.isdigit() and len(short_inputs)==10:\n isbn_or_keyword='isbn'\n return isbn_or_keyword", "def test_book_isbn_must_only_be_numbers(self):\n\n\t\twith self.client:\n\t\t\tadd_book = {\n\t\t\t\t'title': 'Hello Books',\n\t\t\t\t'isbn': '56987451Ky'\n\t\t\t}\n\t\t\tlogin_data = self.login_test_user()\n\t\t\ttoken = login_data['auth_token']\n\t\t\tres = self.client.post(\n\t\t\t\tf'{URL_BOOKS}',\n\t\t\t\theaders=dict(Authorization=f'Bearer {token}'),\n\t\t\t\tcontent_type='application/json',\n\t\t\t\tdata=json.dumps(add_book)\n\t\t\t)\n\t\t\tres2 = json.loads(res.data.decode())\n\t\t\tself.assertIn('isbn must only include numbers', str(res2))\n\t\t\tself.assertEqual(res.status_code, 400)", "def contains(self, *args):\n return _libsbml.IdList_contains(self, *args)", "def is_valid_deck(deck):\n \n flag = True\n test_deck = []\n for i in range(1, len(deck) + 1):\n test_deck.append(i)\n for value in deck:\n if value not in test_deck:\n flag = False\n return flag", "def is_in(elt, seq):\n\treturn any(x is elt for x in seq)", "def test_in_list(self):\n\n # get available ids\n ids = list(DQ(\"(b.id) Book b\").tuples())\n ids = [id[0] for id in ids]\n\n # take just three of them\n c = {\"ids\": ids[:3]}\n dq = DQ(\"(b.id, b.name) Book{b.id in '$(ids)'} b\")\n r = list(dq.context(c).dicts())\n\n # make sure we got three of them\n self.assertEqual(len(r), 3)", "def exist(self,list,a):\r\n\t\ti = 0\r\n\t\tfor elem in list:\r\n\t\t\tif (elem == a):\r\n\t\t\t\ti=i+1\r\n\t\tif (i>0):\r\n\t\t\treturn True\r\n\t\telse:\r\n\t\t\treturn False", "def _is_ibis(name: str) -> bool:\n return name.startswith(DATA_NAME_PREFIX)", "def test_contains_true(self):\n self.assertTrue('BarcodeSequence' in self.tester)\n self.assertTrue('barcodesequence' in self.tester)", "def clean_isbn(isbn):\n digits = set(\"0123456789\")\n return [int(x if x in digits else 10) for x in isbn.translate(None, \" -\")]", "def __contains__(self, i):\n return i in self._ar", "def is_pandigital(numbers, require_all_digits = True):\n if require_all_digits:\n \tmust_contain = set('123456789')\n else:\t\n \tmust_contain = set([str(digit) for digit in xrange(1, len(numbers) + 1)])\n\n result = set(numbers)\n return len(numbers) == len(result) and result == must_contain\n # count = collections.Counter()\n # count.update([x for num in numbers for x in str(num)])\n # return all(x == 1 for x in count.values()) and set(count.keys()) == must_contain", "def all(a: list[int], b: int) -> bool:\n i = 0\n if len(a) == 0:\n return False\n else:\n while i < len(a):\n if a[i] == b:\n i += 1\n else:\n return False\n return True", "def nonzero(list_):\n for a in list_:\n if a!=0:\n return True\n return False", "def is_even(bin_num):\n if list(bin_num).count('1') % 2 == 0:\n return True\n else:\n return False", "def needs_recoding(strings):\n for string in strings:\n for char in string:\n if 127 < ord(char) < 256:\n return True\n return False", "def is_in(elt, seq):\n return any(x is elt for x in seq)", "def is_entry_in_list(entry, a_list):\n for item in a_list:\n if entry['description'] == item['description']:\n return True\n return False", "def validBookObject(bookObject):\n return (\"name\" in bookObject and\n \"price\" in bookObject and\n \"isbn\" in bookObject)", "def validate_individual_identifiers(identifier: str, cpf: bool = True) -> bool:\n identifier = re.sub(r\"\\-|\\.|/\", \"\", identifier)\n dv = identifier[:-2]\n\n CPF_WEIGHTS = (11, 10, 9, 8, 7, 6, 5, 4, 3, 2)\n CNPJ_WEIGHTS = (6, 5, 4, 3, 2, 9, 8, 7, 6, 5, 4, 3, 2)\n\n if cpf:\n check = calculate_id_digit(numbers=dv, weights=CPF_WEIGHTS[1:])\n check = calculate_id_digit(numbers=check, weights=CPF_WEIGHTS)\n else:\n check = calculate_id_digit(numbers=dv, weights=CNPJ_WEIGHTS[1:])\n check = calculate_id_digit(numbers=check, weights=CNPJ_WEIGHTS)\n\n return identifier == check", "def is_isni(val):\n val = val.replace(\"-\", \"\").replace(\" \", \"\").upper()\n if len(val) != 16:\n return False\n try:\n r = 0\n for x in val[:-1]:\n r = (r + int(x)) * 2\n ck = (12 - r % 11) % 11\n return ck == _convert_x_to_10(val[-1])\n except ValueError:\n return False", "def is_code_contain_multiple_bad_digits(processed_code):\n return True if list(processed_code).count(\"?\") > 1 else False", "def create_identifiers_lists(identifiers):\n issn_list = []\n isbn_list = []\n\n for ident in identifiers:\n if ident[\"scheme\"] == \"ISSN\":\n issn_list.append(ident[\"value\"])\n\n if ident[\"scheme\"] == \"ISBN\":\n isbn_list.append(ident[\"value\"])\n\n return issn_list, isbn_list", "def test_check_barcode(self):\r\n self.assertEqual(check_barcode('AA', None, ['AA']), (False, 'AA',\r\n False))\r\n self.assertEqual(check_barcode('GCATCGTCCACA', 'golay_12',\r\n ['GCATCGTCAACA']), (2, 'GCATCGTCAACA', True))\r\n # num errors for golay code is currently in bits\r\n self.assertEqual(check_barcode('GGTT', 4, ['TTTT']), (2, 'TTTT', True))", "def contains_sequence(dna1, dna2):\n return dna2 in dna1", "def is_postal_code(elem):\n return 'post' in elem.attrib['k']", "def subjects(data):\n\n subject = [\"R\", \"r\", \"NR\", \"nr\"]\n if data not in subject:\n return False\n return True", "def check_if_available(main_page,rented_reader, book_code,\n rented_book_data):\n\n for line in rented_reader:\n if line[0] == book_code:\n if line[-2] == 'FALSE':\n print('Books is unavailable')\n return\n else:\n rented_book_data = line\n change_books_status(main_page,book_code,\n rented_book_data)\n print(\"Congratulations, you've rented a book!\")\n return", "def check_code_and_rent(main_page, book_code):\n\n with open('rented.csv', 'r') as rented_base:\n rented_reader = csv.reader(rented_base)\n next(rented_reader)\n\n rented_book_data = []\n check_if_available(main_page, rented_reader, book_code,\n rented_book_data)\n\n if rented_book_data == []:\n print(\"There is no book with this code\")\n return 1", "def test_hindi(doc):\n hindi_dictionary = ['kai','hai','dhaan','dhan','jhona','pili','jankari','saaf','mela','narma','raja','brahma','jai','parbhani','sangli','jana']\n flag = any(hindi in doc for hindi in hindi_dictionary)\n return(flag)", "def flagSet():\r\n for flag in flags:\r\n if flags[flag]:\r\n return True\r\n return False", "def find(ss, list_seq):\n\tfor item in list_seq:\n\t\tif item in ss:\n\t\t\treturn True\n\treturn False", "def bases_mask_is_valid(bases_mask):\n try:\n for read in bases_mask.upper().split(','):\n if not re.match(r'^([IY][0-9]+|[IY]*)(N[0-9]+|N*)$',read):\n return False\n return True\n except AttributeError:\n return False", "def in_results(compound):\n name = decode(compound)\n return (name in results_bank)", "def is_id_valid(id_code: str) -> bool:\n if is_valid_gender_number(int(id_code[0:1])):\n if is_valid_year_number(int(id_code[1:3])):\n if is_valid_month_number(int(id_code[3:5])):\n if is_valid_day_number(int(id_code[0:1]), int(id_code[1:3]), int(id_code[3:5]), int(id_code[5:7])):\n if is_valid_birth_number(int(float(id_code[7:10]))):\n if is_valid_control_number(id_code):\n return True\n else:\n return False\n else:\n return False\n\n else:\n return False\n else:\n return False\n else:\n return False", "def is_apriori(Ck_item, Lksub1):\r\n for item in Ck_item:\r\n sub_Ck = Ck_item - frozenset([item])\r\n if sub_Ck not in Lksub1:\r\n return False\r\n return True", "def am_i_wilson(number):\n return number in (5, 13, 563)", "def checkIfInList(_addr, _list):\n for item in _list:\n if (_addr.this == item.this):\n return True\n \n return False", "def is_apriori(Ck_item, Lksub1):\n for item in Ck_item:\n sub_Ck = Ck_item - frozenset([item])\n if sub_Ck not in Lksub1:\n return False\n return True", "def xs_exists(i, r, g):\n # all istopoes\n act_i = ['U234', 'U235', 'U236', 'U238', 'PU238', 'PU239',\n 'PU240', 'PU241', 'PU242', 'NP237', 'AM241', 'AM243']\n fp_i = ['RH103', 'CS133', 'ND143', 'ND145', 'GD155', 'MO95', 'TC99', 'RU101', 'AG107', 'AG109', 'SM147', 'SM149', 'SM150',\n 'SM151', 'SM152', 'EU153', 'XE135', 'I135', 'IN115', 'CD106', 'CD108', 'CD110', 'CD111', 'CD112', 'CD113', 'CD114', 'CD116', 'B10', 'B11']\n\n if i != None and i not in act_i and i not in fp_i and i not in ['MACR', 'MACRT']:\n raise ValueError('Update of iso lists required not present iso', i)\n\n # Exclusive to cathegory reac\n act_r = ['fiss', 'nufi', 'spec']\n macr_r = ['ener', 'difc', 'tota']\n\n # for i=None\n if r != None and g != None:\n if 'tran' in r:\n if r[5] == '1' and g != '1':\n return False\n if r[5] == '2' and g != '2':\n return False\n\n # for i!=None\n if i != None:\n if i in act_i:\n if r in macr_r:\n return False\n\n if i in fp_i:\n if r in macr_r:\n return False\n if r in act_r:\n return False\n\n if 'MACR' in i:\n if 'tran2' in r:\n return False\n if 'tran3' in r:\n return False\n\n # excs are reaction n2n, n3n,... If the iso has high abso, then it doesnt shouw this r\n if i == 'GD155' or i == 'SM150' or i == 'XE135' or i == 'I135' or i == 'XE135' or i == 'B10':\n if r == 'excs':\n return False\n\n return True", "def __contains__(self, IB):\n return IB in self._mutants_by_IB", "def check(indivs, geno_list):\r\n\tfor i in xrange(0,len(indivs)):\r\n\t\tif indivs[i] not in geno_list:\r\n\t\t\t# print \"this is not in: \"+ indivs[i]\r\n\t\t\treturn False\r\n\treturn True", "def all(iterable):\n for item in iterable:\n if not item:\n return False\n return True" ]
[ "0.74519074", "0.72815156", "0.7090671", "0.65742284", "0.65578085", "0.6463269", "0.63166803", "0.6275632", "0.6265059", "0.62025166", "0.6028817", "0.59755796", "0.58285195", "0.5798127", "0.5781443", "0.5775947", "0.570381", "0.563113", "0.5504642", "0.54982454", "0.5482436", "0.5482395", "0.54187334", "0.5376089", "0.53284603", "0.52972955", "0.5294533", "0.5265732", "0.5236231", "0.5225542", "0.5185511", "0.5170268", "0.516427", "0.5117657", "0.5108568", "0.5106614", "0.51021886", "0.50825536", "0.50679314", "0.50621784", "0.5058007", "0.5056032", "0.5053477", "0.50527567", "0.5048325", "0.5033638", "0.50215185", "0.50027466", "0.49991518", "0.49951735", "0.4984209", "0.4967936", "0.4956636", "0.49440297", "0.49375695", "0.4930883", "0.4928989", "0.49254346", "0.4899163", "0.48842666", "0.48801666", "0.48638216", "0.48485163", "0.4845541", "0.48437014", "0.4836093", "0.48353106", "0.48328343", "0.48281726", "0.48248568", "0.48138347", "0.48061335", "0.48058206", "0.48048496", "0.48045924", "0.47997978", "0.4799108", "0.47980893", "0.47834623", "0.4783444", "0.4781964", "0.4780377", "0.47642913", "0.47607815", "0.47509912", "0.47468942", "0.47407404", "0.47325593", "0.47311842", "0.472956", "0.47277611", "0.47267264", "0.47205853", "0.47203246", "0.47181663", "0.47124", "0.47042623", "0.47024554", "0.46987897", "0.46928155" ]
0.8204821
0
Calculates a one's complement integer from the given input value's bits
def ones_complement(x, bits=16): return x ^ ((1 << bits) - 1)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def ones_complement(val):\n #mask = (1 << val.bit_length()) - 1\n #return int(hex(val ^ mask), 16)\n b = bin(val)\n b = b.replace('0', 'x')\n b = b.replace('1', '0')\n b = b.replace('x', '1')\n b = b.replace('1b', '0b')\n return int(b, 2)", "def complement(x):\n out = 1 - x\n return out", "def twos_complement(input_value, num_bits=16):\n mask = 2 ** (num_bits - 1)\n return -(input_value & mask) + (input_value & ~mask)", "def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set\n val = val - (2 ** bits) # compute negative value\n return val", "def twos_complement_to_unsigned(val, bits):\n if val >= 0:\n return val\n all_one = (1 << bits)-1\n val = ((-val)^all_one)+1\n\n return val", "def twos_complement(n, bits):\n if n < 0 or n >= 2**bits:\n raise ValueError\n\n return 2**bits - n", "def findComplement(self, num: int) -> int:\n n = num\n xor = 1\n while n > 0:\n num = num ^ xor\n xor = xor << 1\n n = n >> 1\n return num", "def create_bit_negative(self, value, bits):\n imm_code = bin(value).split('b')[1]\n imm_code = '0'*(bits - len(imm_code)) + imm_code\n if value < 0:\n imm_lst = []\n for bit in imm_code:\n imm_lst.append(bit)\n flip_bit = False\n place = bits - 1\n while place >= 0:\n if not flip_bit and imm_lst[place] == \"1\":\n flip_bit = True\n elif flip_bit:\n if imm_lst[place] == \"0\":\n imm_lst[place] = \"1\"\n else:\n imm_lst[place] = \"0\"\n place -= 1\n imm_code = \"\".join(imm_lst)\n return imm_code", "def find_complement(num):\n pass", "def complement(self)->'SInt':\r\n S = SInt(self.nbBytes)\r\n S.binaire = '0' * (len(self) - 1) + '1'\r\n S += super(SInt, self).complement()\r\n return S", "def negate(x):\n return x ^ 1", "def maskbits(x: int, n:int) -> int:\n if n >= 0:\n return x & ((1 << n) - 1)\n else:\n return x & (-1 << -n)", "def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n", "def bintogray(x: int) -> int:\n assert x >= 0\n return x ^ (x >> 1)", "def _get_binary(value, bits):\n\n # http://www.daniweb.com/code/snippet216539.html\n return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])", "def twos_comp(val, bits):\r\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n val = val - (1 << bits) # compute negative value\r\n return val # return positive value as is\r", "def twos_comp(val, bits):\r\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n val = val - (1 << bits) # compute negative value\r\n return val # return positive value as is\r", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def _vint_dezigzagify(number):\n\n assert number >= 0, 'number is less than 0'\n is_neg = number & 1\n num = number >> 1\n if is_neg:\n num = ~num\n return num", "def not_(bits: int) -> int:\n # The `& ALL_` is necessary so python doesn't treat bits as 2's compliment\n return ~bits & ALL_", "def invert(val):\n return -1 * coerce_to_int(val)", "def bitwiseComplement(self, N):\n if not N:\n return 1\n return N ^ (2 ** math.ceil(math.log(N+1e-5, 2)) - 1)", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val", "def _bits(num):\r\n return bin(int(num))[2:]", "def get_lowest_unset_bit(x):\n\n return ~x & (x + 1)", "def twos_complement(value: int, width: int) -> int:\n signmask = 1 << (width - 1)\n if (value & signmask) == 0:\n # Mask off sign bit.\n return value & (signmask - 1)\n else:\n # Two's complement.\n return -bit_invert(value, width - 1) - 1", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val", "def to_bit(number):\n if number in range (256):\n binary = bin(number)[2::]\n return '0'*(8-len(binary)) + binary\n return '-1'", "def __neg__(self) -> 'SInt':\r\n return self.complement()", "def get_integer_minus_one(integer):\n return int(integer) - 1", "def _FixInt(value: int) -> int:\n if value < 0:\n value &= 0xFFFFFFFF\n return value", "def _twosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits - 1) - 1\n lowerlimit = -2 ** (bits - 1)\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate two'2 complement\n if x >= 0:\n return x\n return x + 2 ** bits", "def twos_comp(val, bits):\n if( (val&(1<<(bits-1))) != 0 ):\n val = val - (1<<bits)\n return val", "def bin2dec(number):\n\tmysum = 0\n\tnumber = str(number)[::-1]\n\tfor i,x in enumerate(number):\n\t\tif int(x) > 0:\n\t\t\tmysum += (2**i)\n\treturn mysum", "def _fromTwosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits) - 1\n lowerlimit = 0\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate inverse(?) of two'2 complement\n limit = 2 ** (bits - 1) - 1\n if x <= limit:\n return x\n return x - 2 ** bits", "def comp1(numb):\n\n\tbi= binario(numb)\n\tc11 = []\n\tif int(numb) < 0:\n\t\tfor x in bi:\n\t\t\tc11.append(int( not x))\n\n\t\tc11.append(1)\n\t\tc11.reverse()\n\telse:\n\t\tc11 = bi\n\t\tc11.append(0)\n\t\tc11.reverse()\n\treturn c11", "def test_binary_complement_longer_mask(self):\n self.assertEqual(utils.binary_complement('11010', '000010'), '00000')", "def twos_comp(val, num_bits):\n if ((val & (1 << (num_bits - 1))) != 0):\n val = val - (1 << num_bits)\n return val", "def bit_in_place(x, n):\n return (x & 2**n)", "def get_lowest_set_bit(x):\n\n return x & -x", "def test_binary_complement_shorter_equal_mask(self):\n self.assertEqual(utils.binary_complement('11010', '00010'), '00000')\n self.assertEqual(utils.binary_complement('11010', '0010'), '00000')", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0:\n val = val - (1 << bits)\n return val", "def getTwosComplement(raw_val, length):\n val = raw_val\n if raw_val & (1 << (length - 1)):\n val = raw_val - (1 << length)\n return val", "def twos_comp(val, bits):\n if ((val & (1 << (bits - 1))) != 0):\n val = val - (1 << bits)\n return val", "def twos_comp(val, bits):\n if((val & (1 << (bits - 1))) != 0):\n val = val - (1 << bits)\n return val", "def canonicalizeInt(val):\n b = ByteArray(val)\n if len(b) == 0:\n b = ByteArray(0, length=1)\n if (b[0] & 0x80) != 0:\n b = ByteArray(0, length=len(b) + 1) | b\n return b", "def negate(a):\n res = 0\n d = 1 if a < 0 else -1\n while a != 0:\n res += d\n a += d\n return res", "def drop_lowest_set_bit(x):\n\n return x & (x - 1)", "def bsr(value, bits):\n minint = -2147483648\n if bits == 0:\n return value\n elif bits == 31:\n if value & minint:\n return 1\n else:\n return 0\n elif bits < 0 or bits > 31:\n raise ValueError('bad shift count')\n tmp = (value & 0x7FFFFFFE) // 2**bits\n if (value & minint):\n return (tmp | (0x40000000 // 2**(bits-1)))\n else:\n return tmp", "def drop_lowest_unset_bit(x):\n\n return x | (x + 1)", "def _vint_zigzagify(number):\n num = number << 1\n if number < 0:\n num = ~num\n return num", "def complement_base(base):\n return complements[base]", "def bitget(x, n):\n return (x >> n) & 1", "def replace_lowest_one_with_zero(x):\n return x & (x-1)", "def twos_comp(self,val=0, bits=0):\r\n\t\tif (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n\t\t\tval = val - (1 << bits) # compute negative value\r\n\t\treturn val", "def unset_dq_bits(value, okbits=32+64+512, verbose=False):\n bin_bits = np.binary_repr(okbits)\n n = len(bin_bits)\n for i in range(n):\n if bin_bits[-(i+1)] == '1':\n if verbose:\n print(2**i)\n \n value -= (value & 2**i)\n \n return value", "def _generate_bitmask(n: int = 2, n_bits: int = 8) -> int:\n all_ones = 2 ** n_bits - 1\n cancel_bits = 2 ** n - 1\n return all_ones - cancel_bits", "def bcd2int( value ):\n\tout = 0\n\tfor d in ( value >> 4, value ):\n\t\tfor p in ( 1, 2, 4 ,8 ):\n\t\t\tif d & 1:\n\t\t\t\tout += p\n\t\t\td >>= 1\n\t\tout *= 10\n\treturn int( out/10 )\n\t#return (value or 0) - 6 * ((value or 0) >> 4)", "def get_bit(num, i):\r\n return 1 if num & 1 << i else 0", "def bfx(value: int, msb: int, lsb: int) -> int:\n mask = bitmask((msb, lsb))\n return (value & mask) >> lsb", "def twos_comp(self, val, bits=8):\n if (val & (1 << (bits - 1))) != 0:\n val -= (1 << bits)\n return val", "def reverseInteger(x):\n sign = [1,-1][x < 0]\n res = sign * int(str(abs(x))[::-1])\n return res if -(2**31) < res < 2**31 else 0", "def bits_to_int(jit_bits: ir_bits.Bits, signed: bool) -> int:\n assert isinstance(jit_bits, ir_bits.Bits), jit_bits\n bit_count = jit_bits.bit_count()\n bits_value = jit_bits.to_uint()\n\n return (bits_value if not signed else bit_helpers.from_twos_complement(\n bits_value, bit_count))", "def twos_complement(num: str, _base: int = 16) -> str:\n _bytes = int(len(format(int(num, _base), \"x\")) / 2) or 1\n return format((1 << 8 * _bytes) - int(num, _base), f\"#0{2 + _bytes*2}x\")", "def _get_bit(self, num, bit, mask=1):\n return (int(num) >> bit) & mask", "def bit_component(x, i):\n return (x & 2**i) >> i", "def integer_to_binary(x, n_bits=N_BITS):\n bit_list = [0] * n_bits\n for i in range(n_bits-1, -1, -1):\n div = x // (2**i)\n mod = x % (2**i)\n bit_list[i] = (div > 0) * 1\n x = mod\n return bit_list[::-1]", "def test_bit_subtract_simple(self):\n ops = [bitwise_operations.bit_subtract(self.test_bin_ones, 0, 8, 1, False, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([0] * 1 + [1] * 4)\n assert bins[self.test_bin_ones] == expected_result", "def DEC(self, value):\n result = (value - 1) & 0xff\n self.reg.N = result >> 7\n self.reg.Z = result == 0\n return result", "def decompose(n):\n binary_rep = list(bin(n)[2:])\n binary_rep.reverse()\n s = 0\n while(binary_rep[s] == \"0\"): ##find last occurance of a bit 1\n s += 1\n return (s, n>>s) # = n/(2**s))", "def graytobin(x: int) -> int:\n assert x >= 0\n mask = x >> 1\n while(mask != 0):\n x = x ^ mask\n mask = mask >> 1\n return x", "def __gen_bit_values(number):\n number = int(number)\n while number:\n yield number & 0x1\n number >>= 1", "def nBitInteger(s, bits):\n try:\n value = int(s)\n if value < 0: # negative number: convert to 2's complement\n value = (2**bits) + value\n rep = ('{0:' + str(bits) + 'b}').format(value)\n if len(rep) > bits:\n raise AsmException(\"immediate value excceeds %d bit: %s\"\n % (bits, s))\n return value\n except ValueError:\n raise AsmException(\"invalid integer value: %s\" % s)", "def bin_to_progression(cnum):\n cnum = '{:b}'.format(cnum)\n lbits = len(cnum)\n cnum = tuple(bool(int(b)) for b in cnum)\n res = [-x if x % 2 == 0 else x for x in xrange(lbits)]\n for x in reversed(range(lbits)):\n if not cnum[x]:\n del res[x]\n return res", "def same_bits_down(x):\n\n bit_array = get_first(x, get_consecutive_10)\n\n if (bit_array == 0):\n return x\n\n lower, upper = split_bits_bit_array(x, bit_array)\n upper = swap_bits_bit_array(upper, bit_array)\n lower = shift_lowest_unset_bit_bit_array(lower, bit_array)\n\n return upper | lower", "def _varint_cost(n: int):\n result = 1\n while n >= 128:\n result += 1\n n >>= 7\n return result", "def bitarray2dec(in_bitarray):\n\n number = 0\n\n for i in range(len(in_bitarray)):\n number = number + in_bitarray[i]*pow(2, len(in_bitarray)-1-i)\n return number", "def octet(n):\n\t\n\treturn n & 0b11111111", "def to_signed(x, bits):\n if x >= 0:\n return x\n else:\n return int('1{:0{}b}'.format(2**(bits-1)+x, bits-1), 2)", "def from_int(self, value: int):\n data = []\n for bits in self.data:\n data.append(value & (1 << bits.max_bits) - 1)\n value >>= bits.max_bits\n return self.assign(data)", "def cast_bits(value, cur):\n if value is None:\n return None\n return Bits(bin=value)", "def get_bit(num, position):\n\treturn (num >> position) & 0b1", "def reverseBits(self, n: int) -> int:\n # 2021.03.26\n # 1st solution: naive\n \n # 2nd solution: optimized\n # time complexity O(1), space complexity O(1) \n # (since length of bits is fixed)\n \"\"\"\n val = 0\n for i in range(32):\n val *= 2\n try:\n assert n % 2 == 0\n except:\n val += 1\n n -= 1\n n //= 2\n return val\n \"\"\"\n \n # 3rd solution (r.f. leetcode solution)\n # Mask-and-shift solution, no loop necessary!\n # Both time and space complexity are still O(1)\n \n # Use bitwise shift operators << and >>, and bit logical operators & and |:\n # divide 32-bit n into two parts, then switch then, and update record in n:\n n = (n >> 16) | (n << 16)\n # 0x -> hexadecimal; \n # 0xff = 255 (11111111), 0xff00 = 65280 (1111111100000000)\n # divide into 4 8-bit parts, masked by 0xff00ff00 or 0x00ff00ff, \n # then switch the position and record into n.\n n = ((n & 0xff00ff00) >> 8) | ((n & 0x00ff00ff) << 8)\n # repeat ...\n # 0xcc = 0b11001100, 0xaa = 0b10101010\n n = ((n & 0xf0f0f0f0) >> 4) | ((n & 0x0f0f0f0f) << 4)\n n = ((n & 0xcccccccc) >> 2) | ((n & 0x33333333) << 2)\n n = ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)\n return n\n \n # 1st solution: runtime 48 ms (8%), memory 14.1 MB (71%)\n # 2nd solution: runtime 36 ms (37%), memory 14 MB (91%)\n # 3rd solution: runtime 28 ms (89%), memory 14.1 MB (91%)", "def bin2dec(x):\n return int(x, 2)", "def bitset(x, n, bv):\n if bv==1:\n x |= 2**n\n else:\n x ^= bit_in_place(x, n)\n return(x)", "def get_least_significant_bits(x, n):\n\n return x & ones(n)", "def shift_lowest_unset_bit_bit_array(x, bit_array):\n\n if (x == -1):\n return -1\n\n return (x * bit_array) // get_lowest_unset_bit(x)", "def test_bit_subtract_bit_size_signed(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 0, 8, 156, True, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([99] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def _vint_2sctosigned(self, number):\n assert number >= 0, 'number is less than 0'\n if (number >> (self._vint_2sc_max_bits - 1)) & 1:\n number = ~(~number & self._vint_2sc_mask)\n return number", "def get_bit(num, i):\n return num & (1 << i) != 0", "def test_right_binary_complement(self):\n self.assertEqual(utils.binary_complement('10110'), '01001')", "def _bin_backport(x):\n chars = []\n for n in range(7, -1, -1):\n y = x - 2**n\n if y >= 0:\n chars.append('1')\n x = y\n else:\n chars.append('0')\n return ''.join(chars)", "def codage(nbr):\n\tmask=1\n\tresult=0\n\tfor index in range(len(G)):\n\t\tif ((mask<<index)&nbr) != 0:\n\t\t\tresult^=G[len(G)-index-1]\n\treturn result", "def right_extend_lowest_unset_bit(x):\n\n return x & (x + 1)" ]
[ "0.80567867", "0.7861335", "0.75720483", "0.7485613", "0.7387961", "0.73278314", "0.7293334", "0.70973384", "0.6932238", "0.6895889", "0.68579566", "0.6638871", "0.663495", "0.66179526", "0.6587711", "0.6580346", "0.6573302", "0.6573302", "0.6572931", "0.6572931", "0.6572931", "0.65518785", "0.65473956", "0.64699966", "0.64692134", "0.64620656", "0.64620656", "0.6455367", "0.64527744", "0.64138126", "0.6404493", "0.64015096", "0.6393023", "0.63603383", "0.63573354", "0.6339522", "0.63279325", "0.6302109", "0.63001424", "0.6276355", "0.6261586", "0.62386763", "0.6228399", "0.62003744", "0.61948025", "0.61776173", "0.6162381", "0.61535877", "0.61393833", "0.61279655", "0.61264193", "0.61158335", "0.6114073", "0.6113807", "0.60840756", "0.605334", "0.6047886", "0.60277885", "0.5998137", "0.5991018", "0.59809095", "0.59773535", "0.5966666", "0.5960307", "0.5954052", "0.5952673", "0.595249", "0.5936338", "0.5922241", "0.59209406", "0.5918355", "0.5911202", "0.5890824", "0.5881289", "0.58528125", "0.58522636", "0.58522296", "0.5843375", "0.583977", "0.58333176", "0.58252144", "0.5816673", "0.5812065", "0.58070964", "0.5799814", "0.5791504", "0.57762945", "0.57757306", "0.5775459", "0.57744485", "0.5773984", "0.57658637", "0.57628787", "0.57455933", "0.5743324", "0.5742299", "0.574228", "0.5729873", "0.57187647", "0.5717247" ]
0.75856197
2
Calculates a two's complement integer from the given input value's bits
def twos_complement(input_value, num_bits=16): mask = 2 ** (num_bits - 1) return -(input_value & mask) + (input_value & ~mask)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def twos_complement(n, bits):\n if n < 0 or n >= 2**bits:\n raise ValueError\n\n return 2**bits - n", "def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_complement(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set\n val = val - (2 ** bits) # compute negative value\n return val", "def twos_complement_to_unsigned(val, bits):\n if val >= 0:\n return val\n all_one = (1 << bits)-1\n val = ((-val)^all_one)+1\n\n return val", "def ones_complement(val):\n #mask = (1 << val.bit_length()) - 1\n #return int(hex(val ^ mask), 16)\n b = bin(val)\n b = b.replace('0', 'x')\n b = b.replace('1', '0')\n b = b.replace('x', '1')\n b = b.replace('1b', '0b')\n return int(b, 2)", "def complement(x):\n out = 1 - x\n return out", "def findComplement(self, num: int) -> int:\n n = num\n xor = 1\n while n > 0:\n num = num ^ xor\n xor = xor << 1\n n = n >> 1\n return num", "def _twosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits - 1) - 1\n lowerlimit = -2 ** (bits - 1)\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate two'2 complement\n if x >= 0:\n return x\n return x + 2 ** bits", "def _fromTwosComplement(x, bits=16):\n _checkInt(bits, minvalue=0, description='number of bits')\n\n _checkInt(x, description='input')\n upperlimit = 2 ** (bits) - 1\n lowerlimit = 0\n if x > upperlimit or x < lowerlimit:\n raise ValueError('The input value is out of range. Given value is {0}, but allowed range is {1} to {2} when using {3} bits.' \\\n .format(x, lowerlimit, upperlimit, bits))\n\n # Calculate inverse(?) of two'2 complement\n limit = 2 ** (bits - 1) - 1\n if x <= limit:\n return x\n return x - 2 ** bits", "def twos_complement(value: int, width: int) -> int:\n signmask = 1 << (width - 1)\n if (value & signmask) == 0:\n # Mask off sign bit.\n return value & (signmask - 1)\n else:\n # Two's complement.\n return -bit_invert(value, width - 1) - 1", "def ones_complement(x, bits=16):\n return x ^ ((1 << bits) - 1)", "def create_bit_negative(self, value, bits):\n imm_code = bin(value).split('b')[1]\n imm_code = '0'*(bits - len(imm_code)) + imm_code\n if value < 0:\n imm_lst = []\n for bit in imm_code:\n imm_lst.append(bit)\n flip_bit = False\n place = bits - 1\n while place >= 0:\n if not flip_bit and imm_lst[place] == \"1\":\n flip_bit = True\n elif flip_bit:\n if imm_lst[place] == \"0\":\n imm_lst[place] = \"1\"\n else:\n imm_lst[place] = \"0\"\n place -= 1\n imm_code = \"\".join(imm_lst)\n return imm_code", "def _bits(num):\r\n return bin(int(num))[2:]", "def find_complement(num):\n pass", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val # return positive value as is", "def twos_comp(val, bits):\r\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n val = val - (1 << bits) # compute negative value\r\n return val # return positive value as is\r", "def twos_comp(val, bits):\r\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n val = val - (1 << bits) # compute negative value\r\n return val # return positive value as is\r", "def _get_binary(value, bits):\n\n # http://www.daniweb.com/code/snippet216539.html\n return ''.join([str((value >> y) & 1) for y in range(bits - 1, -1, -1)])", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val", "def complement(self)->'SInt':\r\n S = SInt(self.nbBytes)\r\n S.binaire = '0' * (len(self) - 1) + '1'\r\n S += super(SInt, self).complement()\r\n return S", "def bitmask(n: int) -> int:\n if n >= 0:\n return (1 << n) - 1\n else:\n return -1 << -n", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\n val = val - (1 << bits) # compute negative value\n return val", "def twos_comp(val, bits):\n if( (val&(1<<(bits-1))) != 0 ):\n val = val - (1<<bits)\n return val", "def bintogray(x: int) -> int:\n assert x >= 0\n return x ^ (x >> 1)", "def bin2dec(number):\n\tmysum = 0\n\tnumber = str(number)[::-1]\n\tfor i,x in enumerate(number):\n\t\tif int(x) > 0:\n\t\t\tmysum += (2**i)\n\treturn mysum", "def maskbits(x: int, n:int) -> int:\n if n >= 0:\n return x & ((1 << n) - 1)\n else:\n return x & (-1 << -n)", "def twos_comp(val, num_bits):\n if ((val & (1 << (num_bits - 1))) != 0):\n val = val - (1 << num_bits)\n return val", "def bitwiseComplement(self, N):\n if not N:\n return 1\n return N ^ (2 ** math.ceil(math.log(N+1e-5, 2)) - 1)", "def twos_comp(val, bits):\n if (val & (1 << (bits - 1))) != 0:\n val = val - (1 << bits)\n return val", "def bit_in_place(x, n):\n return (x & 2**n)", "def twos_comp(val, bits):\n if((val & (1 << (bits - 1))) != 0):\n val = val - (1 << bits)\n return val", "def getTwosComplement(raw_val, length):\n val = raw_val\n if raw_val & (1 << (length - 1)):\n val = raw_val - (1 << length)\n return val", "def twos_comp(val, bits):\n if ((val & (1 << (bits - 1))) != 0):\n val = val - (1 << bits)\n return val", "def invert(val):\n return -1 * coerce_to_int(val)", "def _vint_dezigzagify(number):\n\n assert number >= 0, 'number is less than 0'\n is_neg = number & 1\n num = number >> 1\n if is_neg:\n num = ~num\n return num", "def _vint_2sctosigned(self, number):\n assert number >= 0, 'number is less than 0'\n if (number >> (self._vint_2sc_max_bits - 1)) & 1:\n number = ~(~number & self._vint_2sc_mask)\n return number", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def mask(n):\n if n >= 0:\n return 2**n - 1\n else:\n return 0", "def not_(bits: int) -> int:\n # The `& ALL_` is necessary so python doesn't treat bits as 2's compliment\n return ~bits & ALL_", "def to_bit(number):\n if number in range (256):\n binary = bin(number)[2::]\n return '0'*(8-len(binary)) + binary\n return '-1'", "def ctwo_to_int(x):\n if is_neg_ctwo(x):\n return -(2**(len(x)-1) - int(x[1:], 2))\n else:\n return int(x[1:], 2)", "def bsr(value, bits):\n minint = -2147483648\n if bits == 0:\n return value\n elif bits == 31:\n if value & minint:\n return 1\n else:\n return 0\n elif bits < 0 or bits > 31:\n raise ValueError('bad shift count')\n tmp = (value & 0x7FFFFFFE) // 2**bits\n if (value & minint):\n return (tmp | (0x40000000 // 2**(bits-1)))\n else:\n return tmp", "def twos_complement(num: str, _base: int = 16) -> str:\n _bytes = int(len(format(int(num, _base), \"x\")) / 2) or 1\n return format((1 << 8 * _bytes) - int(num, _base), f\"#0{2 + _bytes*2}x\")", "def _FixInt(value: int) -> int:\n if value < 0:\n value &= 0xFFFFFFFF\n return value", "def bcd2int( value ):\n\tout = 0\n\tfor d in ( value >> 4, value ):\n\t\tfor p in ( 1, 2, 4 ,8 ):\n\t\t\tif d & 1:\n\t\t\t\tout += p\n\t\t\td >>= 1\n\t\tout *= 10\n\treturn int( out/10 )\n\t#return (value or 0) - 6 * ((value or 0) >> 4)", "def __neg__(self) -> 'SInt':\r\n return self.complement()", "def _u2i(number):\n mask = (2 ** 32) - 1\n if number & (1 << 31):\n v = number | ~mask\n else:\n v = number & mask\n if v >= 0:\n return v;\n else:\n if exceptions:\n raise _pigpioError(error(v))\n else:\n return v", "def twos_comp(self,val=0, bits=0):\r\n\t\tif (val & (1 << (bits - 1))) != 0: # if sign bit is set e.g., 8bit: 128-255\r\n\t\t\tval = val - (1 << bits) # compute negative value\r\n\t\treturn val", "def bits_to_int(jit_bits: ir_bits.Bits, signed: bool) -> int:\n assert isinstance(jit_bits, ir_bits.Bits), jit_bits\n bit_count = jit_bits.bit_count()\n bits_value = jit_bits.to_uint()\n\n return (bits_value if not signed else bit_helpers.from_twos_complement(\n bits_value, bit_count))", "def reverseBits(self, n: int) -> int:\n # 2021.03.26\n # 1st solution: naive\n \n # 2nd solution: optimized\n # time complexity O(1), space complexity O(1) \n # (since length of bits is fixed)\n \"\"\"\n val = 0\n for i in range(32):\n val *= 2\n try:\n assert n % 2 == 0\n except:\n val += 1\n n -= 1\n n //= 2\n return val\n \"\"\"\n \n # 3rd solution (r.f. leetcode solution)\n # Mask-and-shift solution, no loop necessary!\n # Both time and space complexity are still O(1)\n \n # Use bitwise shift operators << and >>, and bit logical operators & and |:\n # divide 32-bit n into two parts, then switch then, and update record in n:\n n = (n >> 16) | (n << 16)\n # 0x -> hexadecimal; \n # 0xff = 255 (11111111), 0xff00 = 65280 (1111111100000000)\n # divide into 4 8-bit parts, masked by 0xff00ff00 or 0x00ff00ff, \n # then switch the position and record into n.\n n = ((n & 0xff00ff00) >> 8) | ((n & 0x00ff00ff) << 8)\n # repeat ...\n # 0xcc = 0b11001100, 0xaa = 0b10101010\n n = ((n & 0xf0f0f0f0) >> 4) | ((n & 0x0f0f0f0f) << 4)\n n = ((n & 0xcccccccc) >> 2) | ((n & 0x33333333) << 2)\n n = ((n & 0xaaaaaaaa) >> 1) | ((n & 0x55555555) << 1)\n return n\n \n # 1st solution: runtime 48 ms (8%), memory 14.1 MB (71%)\n # 2nd solution: runtime 36 ms (37%), memory 14 MB (91%)\n # 3rd solution: runtime 28 ms (89%), memory 14.1 MB (91%)", "def bit_component(x, i):\n return (x & 2**i) >> i", "def nBitInteger(s, bits):\n try:\n value = int(s)\n if value < 0: # negative number: convert to 2's complement\n value = (2**bits) + value\n rep = ('{0:' + str(bits) + 'b}').format(value)\n if len(rep) > bits:\n raise AsmException(\"immediate value excceeds %d bit: %s\"\n % (bits, s))\n return value\n except ValueError:\n raise AsmException(\"invalid integer value: %s\" % s)", "def negate(x):\n return x ^ 1", "def test_binary_complement_longer_mask(self):\n self.assertEqual(utils.binary_complement('11010', '000010'), '00000')", "def bitget(x, n):\n return (x >> n) & 1", "def bin2dec(x):\n return int(x, 2)", "def reverseInteger(x):\n sign = [1,-1][x < 0]\n res = sign * int(str(abs(x))[::-1])\n return res if -(2**31) < res < 2**31 else 0", "def twos_comp(self, val, bits=8):\n if (val & (1 << (bits - 1))) != 0:\n val -= (1 << bits)\n return val", "def get_bit(num, i):\r\n return 1 if num & 1 << i else 0", "def decompose(n):\n binary_rep = list(bin(n)[2:])\n binary_rep.reverse()\n s = 0\n while(binary_rep[s] == \"0\"): ##find last occurance of a bit 1\n s += 1\n return (s, n>>s) # = n/(2**s))", "def get_integer_minus_one(integer):\n return int(integer) - 1", "def _get_bit(self, num, bit, mask=1):\n return (int(num) >> bit) & mask", "def complement_base(base):\n return complements[base]", "def bitarray2dec(in_bitarray):\n\n number = 0\n\n for i in range(len(in_bitarray)):\n number = number + in_bitarray[i]*pow(2, len(in_bitarray)-1-i)\n return number", "def test_binary_complement_shorter_equal_mask(self):\n self.assertEqual(utils.binary_complement('11010', '00010'), '00000')\n self.assertEqual(utils.binary_complement('11010', '0010'), '00000')", "def canonicalizeInt(val):\n b = ByteArray(val)\n if len(b) == 0:\n b = ByteArray(0, length=1)\n if (b[0] & 0x80) != 0:\n b = ByteArray(0, length=len(b) + 1) | b\n return b", "def bit_manipulation(self, a: str, b: str) -> str:\n x, y = int(a, 2), int(b, 2)\n while y:\n answer = x ^ y\n carry = (x & y) << 1\n x, y = answer, carry\n return bin(x)[2:]", "def integer_to_binary(x, n_bits=N_BITS):\n bit_list = [0] * n_bits\n for i in range(n_bits-1, -1, -1):\n div = x // (2**i)\n mod = x % (2**i)\n bit_list[i] = (div > 0) * 1\n x = mod\n return bit_list[::-1]", "def bfx(value: int, msb: int, lsb: int) -> int:\n mask = bitmask((msb, lsb))\n return (value & mask) >> lsb", "def _vint_signedto2sc(self, number):\n return number & self._vint_2sc_mask", "def convert_to_bits(n):\n result = []\n if n == 0:\n return [0]\n while n > 0:\n result = [int(n % 2)] + result\n n = n / 2\n return result", "def pick_byte2(input):\n val = int(input) >> 8\n val = val & 255\n return val", "def bitset(x, n, bv):\n if bv==1:\n x |= 2**n\n else:\n x ^= bit_in_place(x, n)\n return(x)", "def _lsb(self, i : int) -> int:\n\n return i & -i", "def from_int(self, value: int):\n data = []\n for bits in self.data:\n data.append(value & (1 << bits.max_bits) - 1)\n value >>= bits.max_bits\n return self.assign(data)", "def test_right_binary_complement(self):\n self.assertEqual(utils.binary_complement('10110'), '01001')", "def __sub__(self, other: 'SInt') -> 'SInt':\r\n return self + other.complement()", "def _vint_zigzagify(number):\n num = number << 1\n if number < 0:\n num = ~num\n return num", "def test_bit_subtract_bit_size_signed(self):\n ops = [bitwise_operations.bit_subtract(self.five_255_bin, 0, 8, 156, True, aerospike.BIT_OVERFLOW_FAIL, None)]\n\n self.as_connection.operate(self.test_key, ops)\n\n _, _, bins = self.as_connection.get(self.test_key)\n expected_result = bytearray([99] * 1 + [255] * 4)\n assert bins[self.five_255_bin] == expected_result", "def get_lowest_unset_bit(x):\n\n return ~x & (x + 1)", "def _bitsfor(maxval):\n maxvalbits = int(round(math.log(maxval) / math.log(2)))\n if maxval != (1 << maxvalbits):\n raise ValueError(\"maxval must be a power of 2, not %d\" % maxval)\n return maxvalbits", "def comp1(numb):\n\n\tbi= binario(numb)\n\tc11 = []\n\tif int(numb) < 0:\n\t\tfor x in bi:\n\t\t\tc11.append(int( not x))\n\n\t\tc11.append(1)\n\t\tc11.reverse()\n\telse:\n\t\tc11 = bi\n\t\tc11.append(0)\n\t\tc11.reverse()\n\treturn c11", "def _generate_bitmask(n: int = 2, n_bits: int = 8) -> int:\n all_ones = 2 ** n_bits - 1\n cancel_bits = 2 ** n - 1\n return all_ones - cancel_bits", "def int2sfi(integ, bits):\n\n if abs(integ) >= pow(2, bits-1):\n print \"ERROR: Cant represent signed %i with %i bits\" % (integ, bits)\n return \n\n if integ >= 0:\n form = \"0\"+str(bits)+\"b\"\n return \"0b\"+format(integ, form)\n else:\n form = \"0\"+str(bits-1)+\"b\"\n bin_str = format(abs(integ), form)\n bin_str = bin_str.replace(\"0\", \"2\")\n bin_str = bin_str.replace(\"1\", \"0\")\n bin_str = bin_str.replace(\"2\", \"1\")\n binary = int(bin_str, base=2) + 1\n return \"0b1\" + format(binary, form)", "def decode_signed_integer(num: int, size: int) -> int:\n if (num & (1 << (size - 1))) != 0:\n num = num - (1 << size)\n return num", "def decode_bin(tup_terme, nbr):\n terme, msq = tup_terme\n return \"\".join(['-' if msq & (1 << k) else '1' if terme & (1 << k )\n else '0' for k in range(nbr-1, -1, -1)])", "def test_bit_get_int_signed(self):\n ops = [bitwise_operations.bit_get_int(self.five_255_bin, 0, 8, True)]\n\n _, _, result = self.as_connection.operate(self.test_key, ops)\n\n expected_result = -1\n assert result[\"255\"] == expected_result", "def int2bin(n, bits=13):\n return \"\".join([str((n >> y) & 1) for y in range(bits - 1, -1, -1)])", "def cast_bits(value, cur):\n if value is None:\n return None\n return Bits(bin=value)", "def get_bit(num, i):\n return num & (1 << i) != 0", "def to_convert(A, B):\r\n different = 0\r\n C = A ^ B\r\n while C:\r\n different += 1\r\n C = C & C - 1\r\n return different", "def _varint_cost(n: int):\n result = 1\n while n >= 128:\n result += 1\n n >>= 7\n return result", "def to_signed(x, bits):\n if x >= 0:\n return x\n else:\n return int('1{:0{}b}'.format(2**(bits-1)+x, bits-1), 2)", "def bin_to_progression(cnum):\n cnum = '{:b}'.format(cnum)\n lbits = len(cnum)\n cnum = tuple(bool(int(b)) for b in cnum)\n res = [-x if x % 2 == 0 else x for x in xrange(lbits)]\n for x in reversed(range(lbits)):\n if not cnum[x]:\n del res[x]\n return res", "def bit_invert(value: int, width: int = 32) -> int:\n return ((1 << width) - 1) & (~value)", "def get_base_2(n):\n return str(bin(int(n))).removeprefix('0b')", "def octet(n):\n\t\n\treturn n & 0b11111111" ]
[ "0.773702", "0.7683835", "0.76752746", "0.7462103", "0.74167454", "0.7336416", "0.70802414", "0.70657045", "0.70034766", "0.6954941", "0.69006544", "0.68687546", "0.676691", "0.67314565", "0.67230797", "0.67230797", "0.67230797", "0.6720881", "0.6720881", "0.6685405", "0.6657924", "0.6655872", "0.66186553", "0.6578435", "0.6566544", "0.65465236", "0.6530952", "0.6446756", "0.64436054", "0.6426137", "0.6403771", "0.6396109", "0.6384296", "0.6375411", "0.6374073", "0.6370669", "0.6362905", "0.63612634", "0.6326967", "0.631797", "0.631797", "0.63115734", "0.63013184", "0.62788653", "0.6277477", "0.6219029", "0.619857", "0.61880875", "0.61862934", "0.6132913", "0.6118755", "0.61146444", "0.6100544", "0.6099348", "0.6095903", "0.60937303", "0.60895115", "0.60733336", "0.6062046", "0.604775", "0.6044385", "0.60388535", "0.6024703", "0.60003006", "0.59841394", "0.5969273", "0.5967889", "0.5952023", "0.59507096", "0.5949105", "0.5942718", "0.5941076", "0.59173745", "0.59017754", "0.59016746", "0.5900977", "0.588246", "0.58794314", "0.58753055", "0.5863087", "0.5862314", "0.586187", "0.5858823", "0.58397734", "0.5818572", "0.58124816", "0.5805855", "0.5804336", "0.57975984", "0.57945406", "0.5776787", "0.57763726", "0.5774252", "0.57731056", "0.57595253", "0.5755886", "0.57391983", "0.5737728", "0.5735868", "0.57271373" ]
0.79249084
0
Transfer models to target port
def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None: if target_port.name not in self.transfer_defs: print(f"No transfer definition found for target port '{target_port.name}'") return # transfer definitions for specified target port tds = self.transfer_defs[target_port.name] output_dir = os.path.join(script_dir, os.pardir, "output") training_type = "transfer" print(f"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'") if config_uids is not None: print(f"Transferring configs -> {config_uids} <-") window_width = 50 num_epochs = 25 train_lr = 0.01 fine_num_epochs = 20 fine_tune_lr = 1e-5 batch_size = 1024 # skip port if fully transferred num_not_transferred = 0 for td in tds: for config in self.transfer_configs: if not self._is_transferred(target_port.name, td.base_port_name, config.uid): # print(f"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})") num_not_transferred += 1 num_transfers = len(tds) * len(self.transfer_configs) print(f"Transferred count {num_transfers - num_not_transferred}/{num_transfers}") if num_not_transferred == 0: print(f"All transfers done for target port '{target_port.name}': Skipping") return X_ts, y_ts = load_data(target_port, window_width) baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts))) evaluator.set_naive_baseline(target_port, baseline) print(f"Naive baseline: {baseline}") # X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2, # random_state=42, shuffle=False) # train_optimizer = Adam(learning_rate=train_lr) # fine_tune_optimizer = Adam(learning_rate=fine_tune_lr) for td in tds: print(f".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.") print(f"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -") print(f"- - Window width {window_width} </> Batch size {batch_size} - -") # print(f"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -") base_port = self.pm.find_port(td.base_port_name) if base_port is None: raise ValueError(f"Unable to associate port with port name '{td.base_port_name}'") # model = inception_time(input_shape=(window_width, 37)) # print(model.summary()) # apply transfer config for config in self.transfer_configs: if config_uids is not None and config.uid not in config_uids: continue if self._is_transferred(target_port.name, td.base_port_name, config.uid): print(f"Skipping config {config.uid}") continue print(f"\n.:'':. APPLYING CONFIG {config.uid} ::'':.") print(f"-> -> {config.desc} <- <-") print(f"-> -> nth_subset: {config.nth_subset} <- <-") print(f"-> -> trainable layers: {config.train_layers} <- <-") _, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1]) model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid) file_path = os.path.join(output_dir, "model", td.target_port_name, model_file_name) X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2, random_state=42, shuffle=False) train_optimizer = Adam(learning_rate=train_lr) fine_tune_optimizer = Adam(learning_rate=fine_tune_lr) checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True) early = EarlyStopping(monitor="val_mae", mode="min", patience=10, verbose=2) redonplat = ReduceLROnPlateau(monitor="val_mae", mode="min", patience=3, verbose=2) callbacks_list = [checkpoint, early, redonplat] # optimizer = Adam(learning_rate=lr) # # # configure model # model.compile(optimizer=optimizer, loss="mse", metrics=["mae"]) # load base model model = load_model(td.base_model_path) # if config.uid == 0: # print(model.summary()) # else: # print(model.summary()) # del model X_train = X_train_orig X_test = X_test_orig y_train = y_train_orig y_test = y_test_orig # apply transfer configuration if config.nth_subset > 1: if X_train.shape[0] < config.nth_subset: print(f"Unable to apply nth-subset. Not enough data") X_train = X_train_orig[0::config.nth_subset] X_test = X_test_orig[0::config.nth_subset] y_train = y_train_orig[0::config.nth_subset] y_test = y_test_orig[0::config.nth_subset] print(f"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}") print(f"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}") print(f"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}") print(f"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}") modified = False # freeze certain layers for layer in model.layers: if layer.name not in config.train_layers: modified = True print(f"setting layer {layer.name} to False") layer.trainable = False else: print(f"layer {layer.name} stays True") if modified: print(f"modified. compiling") # re-compile model.compile(optimizer=train_optimizer, loss="mse", metrics=["mae"]) # trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)])) # non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)])) trainable_count = count_params(model.trainable_weights) non_trainable_count = count_params(model.non_trainable_weights) print(f"Total params: {trainable_count + non_trainable_count}") print(f"Trainable params: {trainable_count}") print(f"Non trainable params: {non_trainable_count}") # transfer model result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2, validation_data=(X_test, y_test), callbacks=callbacks_list) train_mae = result.history["mae"] val_mae = result.history["val_mae"] gc.collect() tune_result = None tune_train_mae = None tune_val_mae = None if config.tune: print(f"Fine-Tuning transferred model") # apply fine-tuning: unfreeze all but batch-normalization layers! for layer in model.layers: if not layer.name.startswith("batch_normalization"): layer.trainable = True model.compile(optimizer=fine_tune_optimizer, loss="mse", metrics=["mae"]) # print(f"model for fine tuning") # print(model.summary()) tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2, validation_data=(X_test, y_test), callbacks=callbacks_list) tune_train_mae = tune_result.history["mae"] tune_val_mae = tune_result.history["val_mae"] model.load_weights(file_path) # set evaluation def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float: if _tune_val_mae is not None: _val_mae = _val_mae + _tune_val_mae return min(val_mae) evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid) y_pred = model.predict(X_test) grouped_mae = evaluator.group_mae(y_test, y_pred) evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid) # save history history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name, config.uid) history_path = os.path.join(output_dir, "data", target_port.name, history_file_name) np.save(history_path, [result.history, tune_result.history if tune_result else None]) # plot history plot_dir = os.path.join(output_dir, "plot") plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type, td.base_port_name, config.uid, tune_train_mae, tune_val_mae) # evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid) plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type, td.base_port_name, config.uid) self.set_transfer(target_port.name, td.base_port_name, config.uid) del checkpoint, early, redonplat del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test gc.collect() tf.keras.backend.clear_session() gc.collect() del X_ts, y_ts
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def deploy_to_device(self):\n if self.device_ids is not None and len(self.device_ids) > 1:\n if not isinstance(self.model, torch.nn.DataParallel):\n self.model = torch.nn.DataParallel(self.model, self.device_ids)\n\n self.model = self.model.to(self.device)\n self.criterion = self.criterion.to(self.device)", "def to(self, device):\n self.device = device\n self.model.to(self.device)", "def module_transfer_to_device(self) -> None:\n for name, module in self.modules.items():\n module.to(self.device)\n if self.device.type == 'cuda':\n self.modules[name] = torch.nn.DataParallel(module, self.gpu_ids)\n return", "def change_port( self ):\n # disconnect and delete controller\n self.delete_controller()\n \n # update port\n self.update_port()", "def __call__(self):\n topo.sim.connect(str(self.src),str(self.dest),\n self.projection_type,\n **self.parameters)", "def to_device(model, device):\n p = next(model.parameters())\n if p.device == device:\n return\n model.to(device)", "def install_sample(self, datapath, table_id):\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n # Incoming port 1.\n in_port = 1;\n for timeout in range(60, 1 ,-1):\n # Incoming Ethernet destination\n match = self.create_match(parser,\n {ofproto.OXM_OF_METADATA: timeout})\n # Output to port 2.\n output = parser.OFPActionOutput(2, 0)\n write = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n [output])\n instructions = [write]\n flow_mod = self.create_flow_add(datapath, 100, timeout,\n table_id, match, instructions)\n datapath.send_msg(flow_mod)\n\n print \"sent flow_mod\"", "def transfer_weights(src_model, dest_model):\r\n # ingore the first layer Input()\r\n # layer 1-24 to 1-24\r\n for i in range(1, 24):\r\n dest_model.layers[i].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 1-24 successfully!\")\r\n\r\n # layer 25-45 to 65-85\r\n for i in range(25, 45):\r\n dest_model.layers[i+40].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 25-45 successfully!\")\r\n\r\n # layer 46-65 to 126-145\r\n for i in range(46, 65):\r\n dest_model.layers[i+80].set_weights(src_model.layers[i].get_weights())\r\n print(\"Partially load weights from layer 46-65 successfully!\")\r\n\r\n # 69 to 189\r\n dest_model.layers[69+120].set_weights(src_model.layers[69].get_weights())\r\n print(\"Partially load weights from layer 69 successfully!\")", "def forward(opt):\n my_utils.plant_seeds(randomized_seed=opt.randomize)\n os.makedirs(opt.output_dir, exist_ok=True)\n\n trainer = t.Trainer(opt)\n trainer.build_dataset_train_for_matching()\n trainer.build_dataset_test_for_matching()\n trainer.build_network()\n trainer.build_losses()\n trainer.network.eval()\n\n if opt.eval_list and os.path.isfile(opt.eval_list):\n source_target_files = np.loadtxt(opt.eval_list, dtype=str)\n source_target_files = source_target_files.tolist()\n for i, st in enumerate(source_target_files):\n source, target = st\n cat1, fname1 = source.split('/')\n fname1 = os.path.splitext(fname1)[0]\n cat2, fname2 = target.split('/')\n fname2 = os.path.splitext(fname2)[0]\n if len(opt.shapenetv1_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv1_path, cat1, fname1, \"model.obj\"), os.path.join(opt.shapenetv1_path, cat2, fname2, \"model.obj\"))\n elif len(opt.shapenetv2_path) > 0:\n source_target_files[i] = (os.path.join(opt.shapenetv2_path, cat1, fname1, \"models\", \"model_normalized.obj\"), os.path.join(opt.shapenetv2_path, cat2, fname2, \"models\", \"model_normalized.obj\"))\n elif (opt.eval_source != \"\" and opt.eval_source[-4:] == \".txt\") and (opt.eval_target != \"\" and opt.eval_target[-4:] == \".txt\"):\n source_target_files = [(figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_source), figure_2_3.convert_path(opt.shapenetv1_path, opt.eval_target))]\n\n rot_mat = get_3D_rot_matrix(1, np.pi/2)\n rot_mat_rev = get_3D_rot_matrix(1, -np.pi/2)\n isV2 = len(opt.shapenetv2_path) > 0\n for i, source_target in enumerate(source_target_files):\n basename = get_model_id(source_target[0], isV2) + \"-\" + get_model_id(source_target[1], isV2)\n path_deformed = os.path.join(opt.output_dir, basename + \"-Sab.ply\")\n path_source = os.path.join(opt.output_dir, basename + \"-Sa.ply\")\n path_target = os.path.join(opt.output_dir, basename +\"-Sb.ply\")\n\n mesh_path = source_target[0]\n print(mesh_path)\n source_mesh_edge = get_shapenet_model.link(mesh_path)\n\n mesh_path = source_target[1]\n target_mesh_edge = get_shapenet_model.link(mesh_path)\n\n\n print(\"Deforming source in target\")\n\n source = source_mesh_edge.vertices\n target = target_mesh_edge.vertices\n\n pymesh.save_mesh_raw(path_source, source, source_mesh_edge.faces, ascii=True)\n pymesh.save_mesh_raw(path_target, target, target_mesh_edge.faces, ascii=True)\n\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat)\n target = target.dot(rot_mat)\n\n source = torch.from_numpy(source).cuda().float().unsqueeze(0)\n target = torch.from_numpy(target).cuda().float().unsqueeze(0)\n\n with torch.no_grad():\n source, _, _, _, _ = loss.forward_chamfer(trainer.network, source, target, local_fix=None,\n distChamfer=trainer.distChamfer)\n\n try:\n source = source.squeeze().cpu().detach().numpy()\n if len(opt.shapenetv2_path) > 0:\n source = source.dot(rot_mat_rev)\n P2_P1_mesh = pymesh.form_mesh(vertices=source, faces=source_mesh_edge.faces)\n pymesh.save_mesh(path_deformed, P2_P1_mesh, ascii=True)\n\n # print(\"computing signal tranfer form source to target\")\n # high_frequencies.high_frequency_propagation(path_source, path_deformed, path_target)\n except Exception as e:\n print(e)\n import pdb; pdb.set_trace()\n path_deformed = path_deformed[:-4] + \".pts\"\n save_pts(path_deformed, source.squeeze().cpu().detach().numpy())", "def transfer(self, addr, port, object_id):\n return libplasma.transfer(self.conn, object_id, addr, port)", "def transfer(self):\n pass", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def hard_update_target_network(self,step):\n \n if step % self.C == 0:\n pars = self.model.get_weights()\n self.target_model.set_weights(pars)", "def port_setup(robot_name, num_cameras):\n\tglobal local_in_port\n\tglobal local_out_port\n\tglobal local_GPS_port\n\tglobal local_Dest_port\n\n\tglobal local_in_port_name\n\tglobal local_out_port_name\n\tglobal local_GPS_port_name\n\tglobal local_Dest_port_name\n\n\tglobal local_Radio_in_port\n\tglobal local_Radio_out_port\n\n\tglobal ors_in_port_name\n\tglobal ors_out_port_name\n\tglobal ors_GPS_port_name\n\tglobal ors_Dest_port_name\n\tglobal ors_Radio_in_port_name\n\tglobal ors_Radio_out_port_name\n\n\t# Define the names for all the ports\n\tport_prefix = \"/ors/robots/\" + robot_name + \"/\"\n\tlocal_port_prefix = \"/atrv_client/\" + robot_name + \"/\"\n\tview_prefix = \"/img/\" + robot_name + \"/\"\n\n\tors_in_port_name = port_prefix + \"in\"\n\tors_out_port_name = port_prefix + \"out\"\n\n\tors_Dest_port_name = port_prefix + \"Motion_Controller/in\"\n\tors_GPS_port_name = port_prefix + \"GPS/out\"\n\n\tors_Radio_out_port_name = port_prefix + \"Radio/out\"\n\tors_Radio_in_port_name = port_prefix + \"Radio/in\"\n\n\tlocal_in_port_name = local_port_prefix + \"in/\"\n\tlocal_out_port_name = local_port_prefix + \"out/\"\n\n\tlocal_GPS_port_name = local_port_prefix + \"GPS/in/\"\n\tlocal_Dest_port_name = local_port_prefix + \"Motion_Controller/out/\"\n\n\tlocal_Radio_in_port_name = local_port_prefix + \"Radio/in\"\n\tlocal_Radio_out_port_name = local_port_prefix + \"Radio/out\"\n\n\t# Start the yarp network connection\n\tyarp.Network.init()\n\n\t# Open the client ports\n\tlocal_in_port = yarp.BufferedPortBottle()\n\tlocal_in_port.open(local_in_port_name)\n\tlocal_out_port = yarp.BufferedPortBottle()\n\tlocal_out_port.open(local_out_port_name)\n\n\tlocal_GPS_port = yarp.BufferedPortBottle()\n\tlocal_GPS_port.open(local_GPS_port_name)\n\tlocal_Dest_port = yarp.BufferedPortBottle()\n\tlocal_Dest_port.open(local_Dest_port_name)\n\n\tlocal_Radio_out_port = yarp.BufferedPortBottle()\n\tlocal_Radio_out_port.open(local_Radio_out_port_name)\n\tlocal_Radio_in_port = yarp.BufferedPortBottle()\n\tlocal_Radio_in_port.open(local_Radio_in_port_name)\n\n\t# Connect the client ports to the simulator ports\n\tyarp.Network.connect (local_out_port_name, ors_in_port_name)\n\tyarp.Network.connect (ors_out_port_name, local_in_port_name)\n\n\tyarp.Network.connect (ors_GPS_port_name, local_GPS_port_name)\n\tyarp.Network.connect (local_Dest_port_name, ors_Dest_port_name)\n\n\tyarp.Network.connect (local_Radio_out_port_name, ors_Radio_in_port_name)\n\tyarp.Network.connect (ors_Radio_out_port_name, local_Radio_in_port_name)\n\n\n\t# Connect the cameras to yarpview windows\n\tprint (\" * Initializing yarpview windows.\")\n\tfor id in range(int(num_cameras)):\n\t\t# Build the name of the camera\n\t\tcamera_name = \"Camera{0}\".format(id+1)\n\n\t\t# Prepare the ports to be used\n\t\timg_view_port = view_prefix + camera_name\n\t\tatrv_camera_port = port_prefix + camera_name\n\n\t\tyarp.Network.connect (atrv_camera_port, img_view_port)", "def to(self, device):\n self.detector.to(device)\n # self.recognizer.to(device)\n self.shared_conv.to(device)", "def _generate_transfers(self) -> Dict[str, List[TransferDefinition]]:\n config = read_json(self.config_path)\n transfer_defs = {}\n ports = list(config[\"ports\"])\n permutations = list(itertools.permutations(ports, r=2))\n\n # for pair in _permute(config[\"ports\"]):\n for pair in permutations:\n base_port, target_port = self.pm.find_port(pair[0]), self.pm.find_port(pair[1])\n if target_port is None:\n raise ValueError(f\"No port found: Unable to transfer from base-port with name '{base_port.name}'\")\n if target_port is None:\n raise ValueError(f\"No port found: Unable to transfer to target-port with name '{pair[1]}'\")\n\n trainings = self.pm.load_trainings(base_port, self.output_dir, self.routes_dir, training_type=\"base\")\n # print(f\"loaded trainings. base port {base_port.name}:\\n{trainings.keys()}\")\n if len(trainings.keys()) < 1:\n print(f\"No base-training found for port '{base_port.name}'. Skipping\")\n continue\n\n training = list(trainings.values())[-1][0]\n # print(f\"training ({len(trainings.values())}): {training}\")\n # print(f\"Pair {base_port.name} ({len(trainings)} base-trains) -> {target_port.name}. \"\n # f\"Using latest at '{training.start_time}'\")\n verify_output_dir(self.output_dir, target_port.name)\n td = TransferDefinition(base_port_name=base_port.name,\n base_model_path=training.model_path,\n target_port_name=target_port.name,\n target_routes_dir=os.path.join(self.routes_dir, target_port.name),\n target_model_dir=os.path.join(self.output_dir, \"model\", target_port.name),\n target_output_data_dir=os.path.join(self.output_dir, \"data\", target_port.name),\n target_plot_dir=os.path.join(self.output_dir, \"plot\", target_port.name),\n target_log_dir=os.path.join(self.output_dir, \"log\", target_port.name))\n name = target_port.name\n if name in transfer_defs:\n transfer_defs[target_port.name].append(td)\n else:\n transfer_defs[target_port.name] = [td]\n return transfer_defs", "def transfer(self, address, direction, repeats):\n if direction == \"in\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"inlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"outlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n elif direction == \"out\":\n out_data = (\n \"/\"\n + str(address)\n + self.switch_valve(\"outlet\")\n + self.goto_position(stroke_volume)\n + self.switch_valve(\"inlet\")\n + self.goto_position(0)\n + self.repeat(repeats)\n + \"R\"\n + \"\\r\"\n )\n if self.is_ready(address):\n self.connection.write(out_data.encode())\n print(\"Pump \" + str(address) + \" is transferring from inlet to outlet \" + str(repeats) + \" times.\")\n self.is_ready(address)\n print(\"Done.\")\n else:\n pass # return error", "def copy_para(from_model, to_model):\n for i, j in zip(from_model.trainable_weights, to_model.trainable_weights):\n j.assign(i)", "def invoke(self, msg, req):\n node = Node.create()\n node.acquire_lock()\n\n if msg.name == 'forward':\n try:\n with node.graph.as_default():\n if node.num_devices == 5:\n output, name = Model_5.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 6:\n output, name = Model_6.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 7:\n output, name = Model_7.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n elif node.num_devices == 8:\n output, name = Model_8.forward(req['input'], req['next'], node)\n Thread(target=self.send, args=(output, name, req['tag'])).start()\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n elif msg.name == 'update':\n \"\"\"update this node's task configuration,based on the received massage \"\"\"\n try:\n node.num_devices = req['num_devices']\n available_ip = req['available_ip']\n\n update_ip(get_file(node.num_devices), available_ip)\n load_ip(node)\n\n node.release_lock()\n return\n\n except Exception, e:\n node.log('Error', e.message)\n\n else:\n raise schema.AvroException('unexpected message:', msg.getname())", "def sync_target_network(self):\n for t, e in zip(\n self.target_network.trainable_variables, self.online_network.trainable_variables\n ):\n t.assign(e)", "def connectionMade(self):\n self.protocol.makeConnection(BridgeTransport(self.transport))", "def forward(self, output, target):\n raise NotImplementedError", "def cmd_port(args):", "def update_target_network(self):\r\n self.send(self.server_conn, (sys._getframe().f_code.co_name, {}))", "def run_model(project=None, model=None, raw=None, dyr=None, xls=None, path=None, server='tcp://127.0.0.1:5678'):\n ret = 0\n if (not project) or (not model):\n logging.error('RT-LAB project or model undefined.')\n sys.exit(-1)\n if (not raw) and (not xls):\n logging.error('PSS/E raw file or ePHASORsim Excel file undefined.')\n sys.exit(-1)\n if not dyr:\n logging.debug('PSS/E dyr file not specified')\n\n sim = SimControl(project, model, path)\n\n simulink = os.path.join(path,project, 'simulink')\n models = os.path.join(path,project, 'models')\n if not os.path.isdir(simulink):\n logging.error('No <{}> directory found.'.format(simulink))\n if not os.path.isdir(models):\n logging.error('No <{}> directory found.'.format(models))\n sys.exit(1)\n else:\n logging.info('Using <{}> directory'.format(models))\n modelPath = models\n else:\n logging.info('Using <{}> directory'.format(simulink))\n modelPath = simulink\n\n\n sim_data = LTBSetup(raw=raw, dyr=dyr, xls=xls, path=modelPath, model=model, simObject=sim)\n\n streaming = Streaming(name='sim', server=server, ltb_data=sim_data)\n\n sim.open()\n sim.load()\n\n sim_data.get_sysparam()\n sim_data.get_varheader_idxvgs()\n sim.set_settings(sim_data.Settings)\n # sim_data.Idxvgs['Line'].update(sim.add_branch_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Line']))\n # sim_data.Idxvgs['Bus'].update(sim.add_bus_power_to_idxvgs())\n # sim_data.Varheader.extend(sim.add_vars_varheader(sim_data.Idxvgs['Bus']))\n streaming.send_init()\n logging.debug('Varheader, SysParam and Idxvgs sent.')\n sleep(0.5)\n\n sim.start()\n\n streaming.run()", "def run(self):\n self.socket.connect()\n with open('src/inputs/output.file', 'rb') as f:\n self.sent_bytes = f.read()\n self.socket.send(self.sent_bytes)\n self.socket.disconnect()\n self.socket.close()", "def trainNet():", "def bind_transport_to_device(device, protocol_refs):\n transport = protocol_refs.result()[0]\n protocol = protocol_refs.result()[1]\n \n protocol.device = device\n device.transport = transport\n device.send_request('model?power?volume?mute?source?freq?')", "def save_model(self):\n self.pred_net.save((self.save_path / \"iqn_pred_net\").absolute().as_posix())\n self.target_net.save((self.save_path / \"iqn_target_net\").absolute().as_posix())", "def update_target_network(self):\n self.target_Qmodel = clone_model(self.Qmodel)\n self.target_Qmodel.set_weights(self.Qmodel.get_weights())\n\n # target network is never compiled\n self.target_Qmodel.compile(loss='mse', optimizer=Adam())", "def mv_step(self):\n # def mv_all(self):\n self.device_reg_data &= ~(0x1 << 3)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def patch(self):\n\t\t\n\t\t# Create tunnels\n\t\t(module, self.tunnel_source) = create_tunnel(self.remote_source_info)\n\t\tself.modules += [ module ]\n\t\t(module, self.tunnel_sink) = create_tunnel(self.remote_sink_info)\n\t\tself.modules += [ module ]\n\t\t\n\t\t# Connect them to the local devices\n\t\tself.modules = self.modules + [\n\t\t\tadd_loopback(self.tunnel_source, self.local_sink),\n\t\t\tadd_loopback(self.local_source, self.tunnel_sink)\n\t\t]", "def _send_model_to_solver(self, cpostr):\n pass", "def set_target(self, host, port):\r\n pass", "def transfer(self,source,target):\n self.__transfer_dict[tuple([source,target])]()", "def makeModel(self):\n\n # Get the script\n modelScript = os.path.join(self.datapath, 'make3FGLxml.py')\n if not os.path.isfile(modelScript):\n # download it\n print(\"\\t=== Downloading make3FGLxml.py ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/analysis/user/make3FGLxml.py -O {}'.format(modelScript))\n\n # Create the model using Tyrel's script\n galModel = os.path.join(self.diffpath, 'gll_iem_v06.fits')\n isoModel = os.path.join(self.diffpath, 'iso_'+self.irf+'_v06.txt')\n if (not os.path.isfile(galModel)) or (not os.path.isfile(isoModel)):\n print(\"\\t=== Unable to find the diffuse models, check the variable '$FERMI_DIR' ===\")\n return\n if not os.path.isdir(self.extpath):\n print(\"\\t=== Unable to find models of extended sources, check the variable '$LATEXTDIR' ===\")\n return\n if not os.path.isfile(self.fermicat):\n # download it\n print(\"\\t=== Downloading 3FGL catalog ===\")\n os.system('wget https://fermi.gsfc.nasa.gov/ssc/data/access/lat/4yr_catalog/gll_psc_v16.fit -O {}'.format(self.fermicat))\n\n os.popen(\"python {} {} {} -o {} -G {} -g 'gll_iem_v06'\\\n -I {} -i 'iso_source_v06' -e {} -r 5 -R 10 -ER 10\\\n -s 9 -m False -GIF False\".format(modelScript, self.fermicat,\n self.ft1, self.model, galModel, isoModel, self.extpath))\n\n # Add the target to the model\n tmpName = self.model + '.tmp'\n rfil = open(self.model, 'r')\n wfil = open(tmpName, 'w')\n # Copy the XML to the temporary model\n wfil.writelines([l for l in rfil.readlines() if not l=='</source_library>']) # copy everything but the last line\n wfil.write(' <source ROI_Center_Distance=\"0.00\" name=\"TARGET\" type=\"PointSource\">\\n')\n wfil.write(' <spectrum type=\"PowerLaw2\">\\n')\n wfil.write(' <parameter free=\"1\" max=\"1000\" min=\"1e-05\" name=\"Integral\" scale=\"1e-08\" value=\"0.3591824258\"/>\\n')\n wfil.write(' <parameter free=\"1\" max=\"1\" min=\"-5\" name=\"Index\" scale=\"1\" value=\"-2.7\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"LowerLimit\" scale=\"1\" value=\"100\"/>\\n')\n wfil.write('<parameter free=\"0\" max=\"1000000\" min=\"20\" name=\"UpperLimit\" scale=\"1\" value=\"100000\"/>\\n')\n wfil.write(' </spectrum>\\n')\n wfil.write(' <spatialModel type=\"SkyDirFunction\">\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"RA\" scale=\"1.0\" value=\"'+str(self.ra)+'\"/>\\n')\n wfil.write(' <parameter free=\"0\" max=\"360.0\" min=\"-360.0\" name=\"DEC\" scale=\"1.0\" value=\"'+str(self.dec)+'\"/>\\n')\n wfil.write(' </spatialModel>\\n')\n wfil.write(' </source>\\n')\n wfil.write('</source_library>\\n')\n rfil.close()\n wfil.close()\n\n os.remove(self.model)\n os.rename(tmpName, self.model)\n \n print(\"\\t=== Source model {} added ===\".format(self.model))\n return", "def write_model_to_tensorboard(self, *args, **kwargs):\n pass", "def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def run(self, api, media_type, data, no_of_ports):\n # router id is retrieved with router get command\n # the first router id available is used for the operation\n router_id = self._get_or_create_midonet_router(media_type['router'])\n post_api = \"routers/\" + router_id + \"/\" + api\n # set header with content-type and authentication token\n header = {\"Content-Type\": media_type['port'], \"X-Auth-Token\": \"%s\"\n % AUTH_TOKEN}\n cidr = data[\"networkAddress\"] + '/' + data[\"networkLength\"]\n ip_list = netaddr.IPNetwork(cidr)\n\n for _ in range(no_of_ports):\n # port address is generated randomly in the cidr\n port_address = str(random.choice(ip_list))\n LOG.debug(\"port_address is: %s\" % port_address)\n data[\"portAddress\"] = port_address\n # create port\n self._create_router_port(\"POST\", post_api, header, data)", "def update_target_network(self):\n variables = self.online_network.trainable_variables\n variables_copy = [tf.Variable(v) for v in variables]\n self.target_network.trainable_variables = variables_copy", "def load_weigths_into_target_network(self):\n logging.debug(\"Transfer Weight!\")\n self.network.save_weights(self._save_path)\n self.target_network.load_weights(self._save_path)", "def _send_data_to_nn(self,wbtData):\n\t\tself._neuralNetwork.stdin.write(\"COMM IN\\n\") # this shitty COMM IN is not really needed..to modify in closedloop.py\n\t\tself._neuralNetwork.stdin.write(wbtData)", "def deploy_model(client, model_name):\n print(\"Deploying AutoML Tables model...\")\n deploy_model_response = client.deploy_model(model_name)\n api = client.transport._operations_client\n while deploy_model_response.done is False:\n deploy_model_response = api.get_operation(deploy_model_response.name)\n time.sleep(10)\n print(\"Done\")", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def update_target_model(self):\n self.target_model.set_weights(self.model.get_weights())", "def forward(self, obs):\n\t\tpass", "def Forwarding(self):\n # Mapping between sending core on the tester (key) and receiving core on the\n # tester (t_rx), receiving core on the SUT (s_rx) and sending core on\n # the SUT (s_tx).\n core_map = {\n 1:{'t_rx':2, 's_rx':1, 's_tx':1, 's_mac':[0x77, 0x77, 0x77, 0x00, 0x00, 0x01]},\n 2:{'t_rx':1, 's_rx':2, 's_tx':2, 's_mac':[0x77, 0x77, 0x77, 0x00, 0x00, 0x02]},\n 3:{'t_rx':4, 's_rx':3, 's_tx':3, 's_mac':[0x77, 0x77, 0x77, 0x00, 0x00, 0x03]},\n 4:{'t_rx':3, 's_rx':4, 's_tx':4, 's_mac':[0x77, 0x77, 0x77, 0x00, 0x00, 0x04]},\n }\n\n for tx_core in sorted(self._cores):\n # The cores must be running for the RX task, so disable all TX\n # (except for the port that is tested) by setting the speed to 0.\n self._tester.set_speed(self._cores, 0)\n self._tester.start_all()\n self._tester.dump_rx(core_map[tx_core]['t_rx'], 1)\n self._tester.set_count(1, [tx_core])\n self._tester.set_speed([tx_core], 100)\n\n logging.verbose('Sending a packet on tester core %d', tx_core)\n self._sut.start_all()\n sleep(0.5)\n self._tester.stop_all()\n self._sut.stop_all()\n\n _, t_tx, _, _ = self._tester.rx_stats([tx_core])\n t_rx, _, _, _ = self._tester.rx_stats([core_map[tx_core]['t_rx']], 1)\n self.equal(t_tx, 1, 'Tester core {} must send a single packet'.format(tx_core))\n self.equal(t_rx, 1, '... and tester core {} must receive a single packet'.format(core_map[tx_core]['t_rx']))\n\n # Test packet contents\n self._tester.get_data(True)\n pkt_dump = self._tester.get_packet_dump()\n port_id, data_len, payload = pkt_dump.port_id(), pkt_dump.data_len(), pkt_dump.payload().tolist()\n logging.debug(\"Packet dump: port %d, len %d, contents: %s\", port_id, data_len, payload)\n\n self.equal(data_len, 60, \"... and packet must be 60 bytes\")\n self.cmp(payload[ 0: 6], [0x00, 0x00, 0x01, 0x00, 0x00, 0x01], \" ... and dst MAC must change\")\n self.cmp(payload[ 6:12], core_map[tx_core]['s_mac'], \" ... and src MAC must be correct\")\n self.cmp(payload[12:14], [0x08, 0x00], \" ... and EtherType must be IPv4\")\n self.cmp(payload[14:15], [0x45], \" ... and IP version and IHL must not change\")\n self.cmp(payload[15:16], [0x00], \" ... and DSCP and ECN must not change\")\n self.cmp(payload[16:18], [0x00, 0x1c], \" ... and total IP length must not change\")\n self.cmp(payload[18:20], [0x00, 0x01], \" ... and identification field must not change\")\n self.cmp(payload[20:22], [0x00, 0x00], \" ... and flags and fragment offset must not change\")\n self.cmp(payload[22:23], [0x40], \" ... and TTL must not change\")\n self.cmp(payload[23:24], [0x11], \" ... and protocol must be UDP\")\n self.cmp(payload[24:26], [0xf7, 0x7D], \" ... and checksum must be correct\")\n self.cmp(payload[26:30], [192, 168, 1, 1], \" ... and src IP must not change\")\n self.cmp(payload[30:34], [192, 168, 1, 1], \" ... and dst IP must not change\")\n self.cmp(payload[34:36], [0x13, 0x88], \" ... and UDP src port must not change\")\n self.cmp(payload[36:38], [0x13, 0x88], \" ... and UDP dst port must not change\")\n self.cmp(payload[38:40], [0x00, 0x08], \" ... and UDP length must not change\")\n self.cmp(payload[40:42], [0x55, 0x7b], \" ... and UDP checksum must be correct\")\n\n # Test where packets have been handled by the SUT\n for rx_core in sorted(self._cores):\n s_rx, s_tx, _, _ = self._sut.rx_stats([rx_core])\n if rx_core == core_map[tx_core]['s_rx']:\n self.equal(s_rx, 1, '... and SUT core {} must receive a single packet'.format(rx_core))\n self.equal(s_tx, 1, '... and SUT core {} must transmit a single packet'.format(rx_core))\n else:\n self.equal(s_rx, 0, '... and SUT core {} must not receive packets'.format(rx_core))\n self.equal(s_tx, 0, '... and SUT core {} must not transmit packets'.format(rx_core))\n\n\n # Test where the packet has been received by the tester\n # Skip core that's supposed to receive the packet, it has been\n # tested is the previous assertion\n if rx_core == core_map[tx_core]['t_rx']:\n continue\n\n t_rx, _, _, _ = self._tester.rx_stats([rx_core], 1)\n self.equal(t_rx, 0, '... and tester core {} must not receive packets'.format(rx_core))\n\n self.reset_remotes()", "def _assign_port_to_device(self):\n for i in range(0, len(self.stlink_devices)):\n self.stlink_devices[i]['usb_port'] = self.get_port_from_serial(self.stlink_devices[i]['serial'])", "def send_destination(self):\n\n print('send the target to the robot')\n move_base_action_goal=MoveBaseActionGoal()\n move_base_action_goal.goal.target_pose.header.frame_id=\"map\"\n move_base_action_goal.goal.target_pose.pose.orientation.w=1\n move_base_action_goal.goal.target_pose.pose.position.x=self.x_des\n move_base_action_goal.goal.target_pose.pose.position.y=self.y_des\n print('des_x='+str(self.x_des))\n print('des_y='+str(self.y_des))\n self.des_pub.publish(move_base_action_goal)", "def dst_nat_into_vrf():\n\t\n device_params = {\n 'device_type': 'mikrotik_routeros',\n 'port': '11209',\n 'username': 'admin'}\n \t\t\n device_params['ip'] = input('IP Address of managed device: ')\n nd_port = input('SSH port. Blank, if default(11209): ')\n if nd_port:\n device_params['port'] = nd_port\n nd_user = input('Username. Blank, if default (admin): ')\n if nd_user:\n device_params['username'] = nd_user\n device_params['password'] = getpass.getpass()\n outside_address = input('Put outside address for dstnat(default - 93.189.145.82): ')\n if not outside_address:\n outside_address = '93.189.145.82'\n #outside_int = input('Put outside interface (default - ether2(DC Kraud outside int)): ')\n #if not outside_port:\n # outside_port = 'ether2'\n outside_port_dstnat = input('Put outside port for dstnat(Public port): ')\n inside_port = input('Put destination port(only port):') \n inside_address = input('Put inside address for dstnat (Inside adress): ')\n commands = []\n commands.append(f'/ip firewall mangle add action=mark-connection chain=prerouting connection-state=new dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 new-connection-mark=into-vrf passthrough=yes protocol=tcp comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\"')\n commands.append(f'/ip firewall nat add action=dst-nat chain=dstnat comment=\"DST_NAT_MANGLE_RULE_BY_SCRIPT FOR LEAKING FROM VRF\" dst-address={outside_address} dst-port={outside_port_dstnat} in-interface=ether2 protocol=tcp to-addresses={inside_address} to-ports={inside_port}')\n \n with ConnectHandler(**device_params) as ssh:\n for comm in commands:\n ssh.send_command(comm)\n return print(f'\"{commands[0]}\" and \"{commands[1]}\" are sent to device')", "def _export_model(self):\n graph = ComputeGraph.from_onnx(self.onnx_model.graph)\n\n print(\"Running constant propagation\")\n constant_states = constant_propagation(graph)\n\n self._remove_constants(graph, constant_states)\n self._remove_nops(graph, constant_states)\n\n # Add shape information from constant propagation:\n for var, res in constant_states.items():\n if var in graph.shape_dict:\n shape = graph.shape_dict[var]\n if res.shape != shape:\n print(\"Warning: Shapes do not match: \", var, res.shape, shape)\n if res.shape is not None:\n print(\"Replacing shape {} with {}\".format(shape, res.shape))\n graph.shape_dict[var] = res.shape\n elif res.shape is not None:\n graph.shape_dict[var] = res.shape\n\n print(\"Inference graph:\")\n for node in graph.nodes:\n inputs = node.inputs\n input_shapes = (str(graph.shape_dict[i]) for i in node.inputs if i in graph.shape_dict)\n outputs = node.outputs\n output_shapes = (str(graph.shape_dict[o]) for o in node.outputs if o in graph.shape_dict)\n print(\"{:<24} {:<20} {:<30} {:<30} {:<20} {:<30}\".format(node.name,\n node.op_type,\n \",\".join(inputs),\n \",\".join(input_shapes),\n \",\".join(outputs),\n \",\".join(output_shapes)))\n\n memory_manager = MemoryManager()\n\n self._generate_weights_file(graph)\n\n self.dummy_input = generate_dummy_main(graph)\n\n self.reference_input = generate_reference_main(graph)\n\n self._generate_network_initialization(graph, memory_manager)\n\n self._generate_network_cleanup(graph, memory_manager)\n\n implementations = self._select_implementations(graph, memory_manager)\n schedule = self._get_schedule(graph, implementations)\n # self._print_live_ranges(schedule)\n\n input_names = [\"input_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.inputs]\n output_names = [\"output_\"+name.replace('.', '_').replace(':', '_').replace('/', '_')\n for name, type, shape in graph.outputs]\n\n \"\"\"Currently we only allow single input (no batch processing) to the CNN, but this may be multi-channel input\"\"\"\n inputs = graph.inputs\n if len(inputs) > 1:\n print(\"ERROR: Multiple inputs not supported!\")\n exit(1)\n else:\n input_shape = graph.shape_dict[inputs[0].name]\n print(\"Input shape: {}\".format(input_shape))\n\n if len(input_shape) == 4:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 3:\n if input_shape[0] != 1:\n print(\"ERROR: Inference for batch_size > 1 currently not supported!\")\n exit(1)\n\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n elif len(input_shape) == 2:\n print(\"Input is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n input_defs = [\"pico_cnn::naive::Tensor *\"+n for n in input_names]\n\n outputs = graph.outputs\n if len(outputs) > 1:\n print(\"ERROR: Multiple outputs not supported\")\n exit(1)\n else:\n output_shape = graph.shape_dict[outputs[0].name]\n print(\"Output shape: {}\".format(output_shape))\n\n if len(output_shape) == 2:\n print(\"Output is one-dimensional (batch_size = 1 and num_input_channels = 1)\")\n output_defs = [\"pico_cnn::naive::Tensor *\" + n for n in output_names]\n elif len(output_shape) == 3:\n print(\"ERROR: Unknown output shape of network: {}\".format(output_shape))\n exit(1)\n elif len(output_shape) == 4:\n print(\"ERROR: Multi-dimensional output is currently not supported.\")\n exit(1)\n\n network_def = \"void Network::run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n network_def_header = \"void run(\" + \", \".join(input_defs) + \", \" + \", \".join(output_defs) + \")\"\n\n layer_declaration_code = \"\"\n layer_allocation_code = \"\"\n layer_execution_code = \"\"\n layer_deletion_code = \"\"\n\n \"\"\"Iterate over all tasks in the schedule, put some debug info in the code and the pico-cnn implementation.\"\"\"\n for task in schedule:\n num, node, impl = task\n layer_allocation_code += \" //Layer \" + str(num) + \" \" + node.name + \" \" + node.op_type + \"\\n\"\n layer_allocation_code += \" //Attributes\\n\"\n for key, val in node.attrs.items():\n layer_allocation_code += \" // \" + str(key) + \": \" + str(val) + \"\\n\"\n layer_allocation_code += \" //Parameters\\n\"\n layer_allocation_code += \" //Inputs: \" + \",\".join(node.inputs) + \"\\n\"\n layer_allocation_code += \" //Outputs: \" + \",\".join(node.outputs) + \"\\n\"\n layer_allocation_code += \" //Shape:\\n\"\n for i in node.inputs:\n layer_allocation_code += \" // {}: {}\\n\".format(i, graph.get_shape(i))\n for o in node.outputs:\n layer_allocation_code += \" // {}: {}\\n\".format(o, graph.get_shape(o))\n\n if impl:\n layer_declaration_code += impl.generate_declaration()\n layer_declaration_code += \"\\n\"\n\n layer_allocation_code += impl.generate_allocation()\n layer_allocation_code += \"\\n\"\n\n layer_execution_code += impl.generate_execution()\n layer_execution_code += \"\\n\"\n\n layer_deletion_code += impl.generate_deletion()\n layer_deletion_code += \"\\n\"\n\n else:\n print(\"ERROR: Unsupported layer: {}! Aborting code generation.\".format(node.op_type))\n return 1\n\n self.constructor_code += layer_allocation_code + \"\\n\"\n self.destructor_code += layer_deletion_code + \"\\n\"\n\n # # TODO: What does this loop do?\n # for id, buffer in memory_manager.buffers.items():\n # if graph.is_tensor(id):\n # continue\n # if graph.is_input(id):\n # continue\n # if graph.is_output(id):\n # continue\n\n network_code: Text = \"#include \\\"network.h\\\"\\n\\n\"\n network_code += \"Network::Network() {\\n\\n\"\n network_code += self.constructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += \"Network::~Network() {\\n\"\n network_code += self.destructor_code + \"\\n\"\n network_code += \"}\\n\\n\"\n network_code += network_def+\"{\\n\"\n network_code += layer_execution_code\n\n network_code += \"}\\n\\n\"\n\n network_header = \"#ifndef NETWORK_H\\n\"\n network_header += \"#define NETWORK_H\\n\\n\"\n network_header += \"#include \\\"pico-cnn/pico-cnn.h\\\"\\n\\n\"\n network_header += \"class Network {\\n\"\n network_header += \"public:\\n\"\n network_header += \"Network();\\n\"\n network_header += \"~Network();\\n\"\n network_header += network_def_header + \"; \\n\\n\"\n network_header += self.buffer_declaration + \"\\n\"\n network_header += layer_declaration_code\n network_header += \"};\\n\"\n network_header += \"#endif //NETWORK_H\\n\"\n\n self.network_code = network_code\n self.network_header = network_header\n\n \"\"\"\n Create Makefile containing a target for the generated dummy input and a network specific one.\n The code for the network specific input has to be written manually.\n \"\"\"\n # TODO: Does this need to be more sophisticated?\n self.makefile = \"CC = g++\\n\"\n self.makefile += \"CFLAGS = -std=c++11 -Wall -O2 -march=native -DINFO\\n\"\n self.makefile += \"LDFLAGS = -L../../../pico-cnn\\n\"\n self.makefile += \"LD_LIBS = -lpico-cnn -lm\\n\\n\"\n self.makefile += \"# list of all generated .cpp files.\\n\"\n self.makefile += \"NETWORK_LIST = network.cpp\"\n self.makefile += \"\\n\\ndummy_input: dummy_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) dummy_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) $(LDFLAGS) $(LD_LIBS) -o dummy_input\"\n self.makefile += \"\\n\\nreference_input: reference_input.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\"\n self.makefile += \"$(CC) reference_input.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o reference_input\"\n self.makefile += \"\\n\\n{}: {}.cpp $(NETWORK_LIST) libpico-cnn.a\\n\\t\".format(self.model_name, self.model_name)\n self.makefile += \"$(CC) {}.cpp $(NETWORK_LIST) -I../../.. $(CFLAGS) \" \\\n \"$(LDFLAGS) $(LD_LIBS) -o {}\".format(self.model_name, self.model_name)\n self.makefile += \"\\n\\nall: dummy_input reference_input {}\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: clean\\n\"\n self.makefile += \"clean:\\n\\trm -rf {} dummy_input reference_input\\n\".format(self.model_name)\n self.makefile += \"\\n\\n.PHONY: libpico-cnn.a\\n\"\n self.makefile += \"libpico-cnn.a:\\n\\t$(MAKE) -C ../../../pico-cnn\"\n\n self.save(\"./generated_code/{}\".format(self.model_name))", "def update_targets(self):\n self.actor.update_target_network()\n self.critic.update_target_network()", "def handle_connect(self):\n #print \"Controller initiated on: %s:%s\" % (self.address, self.port)\n if not self.needs_migration:\n self.buffer.append(messages.of_hello)", "def run(self):\n self.network_ctrl.connect_with_remote_system()\n cmd = self.create_command(self.on_or_off, self.port)\n self.network_ctrl.send_command(cmd)\n\n check = self._port_status(self.port)\n result = self.network_ctrl.send_command(check)\n result = result[0]\n if self.on_or_off:\n if result == \"1\":\n self.router.mode = Mode.normal\n logging.info(\"[+] Successfully switched on port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching on port \" + str(self.port))\n else:\n if result == \"0\":\n self.router.mode = Mode.off\n logging.info(\"[+] Successfully switched off port \" + str(self.port))\n else:\n self.router.mode = Mode.unknown\n logging.info(\"[-] Error switching off port \" + str(self.port))\n\n self.network_ctrl.exit()", "def execute(cf):\n\n ##Ports and parameters\n train_set = cf.get_input(\"train_set\") #training set. Typically even_file\n test_set = cf.get_input(\"test_set\") #test set. Typically odd_file\n WM1 = cf.get_input(\"WM1\")\n WM2 = cf.get_input(\"WM2\")\n WM3 = cf.get_input(\"WM3\")\n WM4 = cf.get_input(\"WM4\")\n WM5 = cf.get_input(\"WM5\")\n WM6 = cf.get_input(\"WM6\")\n WM7 = cf.get_input(\"WM7\")\n WM8 = cf.get_input(\"WM8\")\n WM9 = cf.get_input(\"WM9\")\n WM10 = cf.get_input(\"WM10\")\n WM11 = cf.get_input(\"WM11\")\n WM12 = cf.get_input(\"WM12\")\n WM13 = cf.get_input(\"WM13\")\n WM14 = cf.get_input(\"WM14\")\n WM15 = cf.get_input(\"WM15\")\n WM16 = cf.get_input(\"WM16\")\n WM17 = cf.get_input(\"WM17\")\n WM18 = cf.get_input(\"WM18\")\n WM19 = cf.get_input(\"WM19\")\n WM20 = cf.get_input(\"WM20\")\n WMdir = cf.get_input(\"WMdir\")\n WMdir2 = cf.get_input(\"WMdir2\")\n basefreqs = cf.get_input(\"BaseFrequencies\")\n ufemodel_path = cf.get_input(\"UFEmodel\")\n\n bestWM = cf.get_output(\"BestWM\")\n log_file = cf.get_output(\"log_file\")\n interm = cf.get_output(\"intermediate\")\n\n genome = cf.get_parameter('genome', 'string')\n motevo_path = cf.get_parameter('motevo_path', 'string')\n aligned = cf.get_parameter(\"aligned\", \"boolean\")\n\n os.mkdir(interm)\n\n\n\n # Read stuff in\n WMs = [i for i in[WM1, WM2, WM3, WM4, WM5, WM6, WM7, WM8, WM9, WM10, WM11, WM12, WM13, WM14, WM15, WM16, WM17, WM18, WM19, WM20] if i]\n\n if WMdir:\n WMs += [os.path.join(WMdir, wm) for wm in os.listdir(WMdir)]\n\n if WMdir2:\n WMs += [os.path.join(WMdir2, wm) for wm in os.listdir(WMdir2)]\n\n f = open(basefreqs)\n ATfreq = float(f.readline().strip().split()[1])\n GCfreq = float(f.readline().strip().split()[1])\n f.close()\n\n\n # Compute stuff: optimal priors and then likelihood of test set\n optpriors = []\n logliks = []\n\n for i, WM in enumerate(WMs):\n\n wmlen = len(open(WM).readlines())-4\n\n # 1. Fit prior on training set with EM\n tag = 'fitP_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=1, bgorder=0, bgprior=0.99)\n r = runMotevo(motevo_path, train_set, params, WM, interm, tag)\n if r != 0:\n print 'motevo failed ', tag\n sys.exit(1)\n\n # prior file:\n # WM_name final_prior nr_of_sites density\n # /import/bc2/home/nimwegen/GROUP/hseq_pipeline/severin/Anduril/Pipeline/PipeLineSource/TESTRUN/NRF1_Z2/OUTPUT/NRF1_FgBg-runmotevoPG2_1/Logo 0.016554 635.008 0.251863\n # background 0.983446 37724.8 0.748137\n # UFEwm 0 0 0\n\n optprior = float(open(priors).readlines()[1].split()[1])\n bgprior=(1-optprior)\n print bgprior\n\n # 2. Compute log-likelihood on test set with optimal prior from training set and without EM\n tag = 'compLL_%i' %(i+1)\n params, sites, priors, loglikfile = giveMotevoParamFile(genome, wmlen, interm, tag, aligned, ufemodel_path, ATfreq, GCfreq, emprior=0, bgorder=0, bgprior=bgprior)\n runMotevo(motevo_path, train_set, params, WM, interm, tag)\n\n a = loadtxt(loglikfile, usecols=[1])\n ll = sum(a)\n\n logliks.append(ll)\n optpriors.append(optprior)\n\n print logliks\n\n\n\n #replace name in WM file with bestWM\n lines = open(WMs[argmax(logliks)]).readlines()\n lines[1] = 'NA BestWM\\n'\n bwm = open(bestWM, 'w')\n bwm.write(''.join(lines))\n\n\n l = open(log_file, 'w')\n\n l.write('WM_name\\tWM_path\\tlog_likelihood\\topt_prior\\n')\n\n names = ['WM_%i\\t%s\\t%.4f\\t%s' %(i+1, WMs[i], logliks[i], optpriors[i]) for i in arange(len(WMs))]\n\n l.write('\\n'.join(names))\n l.close()\n\n\n return 0", "def _update_target_model(self):\n self.target_network.model.set_weights(self.policy_network.model.get_weights())", "def copy_mx_model_to(model_path, model_epoch, output_folder):\n target_path = os.path.join(output_folder, os.path.basename(model_path))\n logger.info(\"Copying image model from {} to {}\".format(model_path,\n target_path))\n suffix = ['-symbol.json', '-%04d.params' % (model_epoch,)]\n for s in suffix:\n copyfile(model_path + s, target_path + s)\n return target_path", "def do_destination(self, args):\n self.destination = int(args)", "def push_model(config):\n util_logger.info('Backing up the model files to wandb')\n martifact = wandb.Artifact('%s_model' % config.wandb_name, type='model')\n martifact.add_dir(os.path.join(config.output_dir,\"best_model\"))\n #matrifact.add_file(os.path.join(config.output_dir,\"trainer_config.json\"))\n wandb.log_artifact(martifact)", "def _deploy_model_to_sagemaker(self) -> None:\n\n self.logger.info('Waiting for SageMaker to activate endpoint', send_db=True) # Update Information\n\n payload: dict = self.task.parsed_payload\n\n self.logger.info(\"Deploying model to SageMaker\", send_db=True)\n sagemaker.deploy(\n payload['app_name'],\n self.downloaded_model_path,\n # local path of model with suffix\n execution_role_arn=env_vars['SAGEMAKER_ROLE'],\n region_name=payload['sagemaker_region'],\n mode=payload['deployment_mode'],\n instance_type=payload['instance_type'],\n instance_count=int(payload['instance_count'])\n )", "def __init__(self, port):\n self.port = port\n self.action_type = 'output'", "def send_to_engine(self, wi):\n pass", "def sync_model(model):\n size = float(dist.get_world_size())\n\n for param in model.parameters():\n dist.broadcast(param.data, 0)", "def update_target_network(self):\n\n\t\tprint \"Updating Target DQN...\"\n\t\t\n\t\tself.update_operation.run()", "def send_to_port(self):\r\n time.sleep(2)\r\n # ser.write(\"R\".encode())\r\n ser.flush()\r\n ser.write(\"{},{},{},{},{}\".format(self.x_Pos, self.y_Pos, self.t_Tap, self.U_on, self.u_off).encode())\r\n # ser.flush()\r\n # while (1 == 1):\r\n # mydata = ser.readline().lstrip()\r\n # print(mydata.decode('utf-8'))\r\n # value = str(mydata)\r", "def mv_all(self):\n # def mv_step(self):\n self.device_reg_data &= ~(0x1 << 2)\n bus.write_byte_data(self.device_address, self.device_reg_mode1, self.device_reg_data)", "def setup_to_transfer_learn(model, base_model):\n for layer in base_model.layers:\n layer.trainable = False\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def copy_model(self, tf_seed=0):\n\n # Assemble network_list\n target = NDN(self.network_list, ffnet_out=self.ffnet_out,\n noise_dist=self.noise_dist, tf_seed=tf_seed)\n\n target.poisson_unit_norm = self.poisson_unit_norm\n target.data_pipe_type = self.data_pipe_type\n target.batch_size = self.batch_size\n\n # Copy all the parameters\n for nn in range(self.num_networks):\n for ll in range(self.networks[nn].num_layers):\n target.networks[nn].layers[ll].weights = \\\n self.networks[nn].layers[ll ].weights.copy()\n target.networks[nn].layers[ll].biases = \\\n self.networks[nn].layers[ll].biases.copy()\n target.networks[nn].layers[ll].reg = \\\n self.networks[nn].layers[ll].reg.reg_copy()\n target.networks[nn].input_masks = deepcopy(self.networks[nn].input_masks)\n return target", "def map_uni(self, src_port, dst_port, command_logger=None):\r\n\r\n #Isolate source and destination port numbers from list provided by Cloudshell\r\n source = src_port[2]\r\n dest = dst_port[2]\r\n #Define URI to set rules via REST\r\n uri = 'http://' + self.address + '/rest/rules?'\r\n #Define rule names for the Packetmaster\r\n rulename = source + ' to ' + dest\r\n #Create the parameters for the rule to be added to the Packetmaster\r\n params = {'name': rulename,\r\n 'priority': 32768,\r\n 'match[in_port]': source,\r\n 'actions': dest}\r\n #Make REST post request for the rule to be created\r\n try:\r\n response = requests.post(uri, data=params, auth=(self.username, self.password))\r\n except ConnectionError as e:\r\n raise e", "def check_port_connections(self):\n all_ports = crest.get_all_ports(self.model)\n influences_to_target = {p: [] for p in all_ports}\n updates_to_target = {p: [] for p in all_ports}\n actions_to_target = {p: [] for p in all_ports}\n\n # fill data stores\n for inf in crest.get_all_influences(self.model):\n influences_to_target[inf.target].append(inf)\n\n for up in crest.get_all_updates(self.model):\n updates_to_target[up.target].append(up)\n\n for action in crest.get_all_actions(self.model):\n actions_to_target[action.target].append(action)\n\n for port in all_ports:\n assert not (len(influences_to_target[port]) > 0 and (\n len(updates_to_target[port]) > 0 or len(actions_to_target[port]) > 0)\n ), f\"There are [influences and (updates or actions)] writing to port {port._name} (entity: {port._parent._name})\"\n\n assert len(influences_to_target[port]) < 2, f\"There are two influences writing to {port._name}\"\n\n states = [update.state for update in updates_to_target[port]]\n assert len(states) == len(set(states)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple updates linked to the same state\"\n\n transitions = [action.transition for action in actions_to_target[port]]\n assert len(transitions) == len(set(transitions)), f\"Port {port._name} (entity: {port._parent._name}) is written by multiple actions linked to the same transition\"", "def update_target_network(self):\n self.target_dqn.set_weights.remote(self.dqn.get_weights.remote())", "def forward_ports(app, ports,ip):\n for p in ports[0:(len(ports)-1)]:\n\tprint p\n os.system('iptables -t nat -A PREROUTING -i eth0 -p tcp --dport %d -j DNAT --to %s:%d' % (p, ip, p))\n # the last port in ports is for remote access on 22 of LXC\n os.system('iptables -t nat -A PREROUTING -i eth0 -p tcp --dport %d -j DNAT --to %s:22' % (ports[len(ports)-1], ip))\n print \"Done port forwarding.\"", "def to_device(self, device):\n for i in range(self.num_layers):\n getattr(self, \"conv{}\".format(i+1)).to_device(device)\n self.to(device)\n return self", "def transfer_learning(nclass, base_model, loss=losses.binary_crossentropy):\n base_model.layers.pop()\n base_model.layers[-1].outbound_nodes = []\n base_model.outputs = [base_model.layers[-1].output]\n x = base_model.get_layer(\"dense_1\").output\n\n inp = base_model.input\n\n # x = base_model.output\n x = Dense(64, activation=\"relu\")(x)\n x = Dense(16, activation=\"relu\")(x)\n x = Dense(8, activation=\"relu\")(x)\n x = Dense(nclass, name=\"Output\", activation=\"sigmoid\")(x)\n\n model = models.Model(inputs=inp, outputs=x)\n\n # Freeze layers\n for layer in base_model.layers:\n layer.trainable = False\n\n opt = optimizers.Adam(0.001)\n model.compile(optimizer=opt, loss=loss, metrics=[\"acc\"])\n model.summary()\n return model", "def target_interfaces(self):", "def target_interfaces(self):", "def add_out_port(self, m: int, content: str, **opts) -> None:", "def tcp_forward(self, host_port, device_port):\n if self._ssh_connection:\n # We have to hop through a remote host first.\n # 1) Find some free port on the remote host's localhost\n # 2) Setup forwarding between that remote port and the requested\n # device port\n remote_port = self._ssh_connection.find_free_port()\n self._ssh_connection.create_ssh_tunnel(\n remote_port, local_port=host_port)\n host_port = remote_port\n self.forward(\"tcp:%d tcp:%d\" % (host_port, device_port))", "def setup(self):\n print(\"setup\")\n \n self.modelToUse = 1\n if self.train:\n print(\"train\")\n else:\n print(\"no train\")\n \n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n self.envSize = 17\n \n #init model\n if self.train or not os.path.isfile(\"my-saved-model.pt\"):\n self.logger.info(\"Setting up model from scratch.\")\n if self.modelToUse == 0:\n self.policy_net = Model_global_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_global_view(self.envSize, self.envSize, 6).to(device)\n elif self.modelToUse == 1:\n self.policy_net = Model_local_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_local_view(self.envSize, self.envSize, 6).to(device)\n else:\n self.policy_net = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model = Model_combined_view(self.envSize, self.envSize, 6).to(device)\n self.model.load_state_dict(self.policy_net.state_dict())\n self.model.eval()\n else:\n self.logger.info(\"Loading model from saved state.\")\n with open(\"my-saved-model.pt\", \"rb\") as file:\n if self.modelToUse == 0:\n self.model = Model_global_view(self.envSize, self.envSize, 6)\n elif self.modelToUse == 1:\n self.model = Model_local_view(self.envSize, self.envSize, 6)\n else:\n self.model = Model_combined_view(self.envSize, self.envSize, 6)\n if torch.cuda.is_available():\n self.model.load_state_dict(torch.load(file))\n self.model.to(device)\n else:\n self.model.load_state_dict(torch.load(file, map_location=device))", "def run_example(host, port):\r\n print \"host is %s:%d\"%(host,port)\r\n setup_db(\"sqlite:///torflow.sqlite\", echo=False)\r\n\r\n #l_session = tc_session()\r\n #print l_session.query(((func.count(Extension.id)))).filter(and_(FailedExtension.table.c.row_type=='extension', FailedExtension.table.c.from_node_idhex == \"7CAA2F5F998053EF5D2E622563DEB4A6175E49AC\")).one()\r\n #return\r\n #for e in Extension.query.filter(FailedExtension.table.c.row_type=='extension').all():\r\n # if e.from_node: print \"From: \"+e.from_node.idhex+\" \"+e.from_node.nickname\r\n # if e.to_node: print \"To: \"+e.to_node.idhex+\" \"+e.to_node.nickname\r\n #tc_session.remove()\r\n #return\r\n\r\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n s.connect((host,port))\r\n c = Connection(s)\r\n th = c.launch_thread()\r\n c.authenticate(control_pass)\r\n c.set_event_handler(TorCtl.ConsensusTracker(c))\r\n c.add_event_listener(ConsensusTrackerListener())\r\n c.add_event_listener(CircuitListener())\r\n\r\n print `c.extend_circuit(0,[\"moria1\"])`\r\n try:\r\n print `c.extend_circuit(0,[\"\"])`\r\n except TorCtl.ErrorReply: # wtf?\r\n print \"got error. good.\"\r\n except:\r\n print \"Strange error\", sys.exc_info()[0]\r\n \r\n c.set_events([EVENT_TYPE.STREAM, EVENT_TYPE.CIRC,\r\n EVENT_TYPE.NEWCONSENSUS, EVENT_TYPE.NEWDESC,\r\n EVENT_TYPE.ORCONN, EVENT_TYPE.BW], True)\r\n\r\n th.join()\r\n return", "def sync_target_models(self):\n for model, target_model in self.model_groups:\n for var, target_var in zip(\n model.trainable_variables, target_model.trainable_variables\n ):\n target_var.assign((1 - self.tau) * target_var + self.tau * var)", "def setup_to_transfer_learn(model):\n for layer in model.layers:\n layer.trainable = False\n\n #model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def train(self, absList, modelFilename):\n pass", "def _send_all_data(self):\n admin_context = qcontext.get_admin_context()\n networks = {}\n ports = {}\n\n all_networks = super(QuantumRestProxyV2,\n self).get_networks(admin_context) or []\n for net in all_networks:\n networks[net.get('id')] = {\n 'id': net.get('id'),\n 'name': net.get('name'),\n 'op-status': net.get('admin_state_up'),\n }\n\n subnets = net.get('subnets', [])\n for subnet_id in subnets:\n subnet = self.get_subnet(admin_context, subnet_id)\n gateway_ip = subnet.get('gateway_ip')\n if gateway_ip:\n # FIX: For backward compatibility with wire protocol\n networks[net.get('id')]['gateway'] = gateway_ip\n\n ports = []\n net_filter = {'network_id': [net.get('id')]}\n net_ports = super(QuantumRestProxyV2,\n self).get_ports(admin_context,\n filters=net_filter) or []\n for port in net_ports:\n port_details = {\n 'id': port.get('id'),\n 'attachment': {\n 'id': port.get('id') + '00',\n 'mac': port.get('mac_address'),\n },\n 'state': port.get('status'),\n 'op-status': port.get('admin_state_up'),\n 'mac': None\n }\n ports.append(port_details)\n networks[net.get('id')]['ports'] = ports\n try:\n resource = '/topology'\n data = {\n 'networks': networks,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n return ret\n except RemoteRestError as e:\n LOG.error(_('QuantumRestProxy: Unable to update remote network: '\n '%s'), e.message)\n raise", "def run(self) -> None:\n self.model = self.trainer.train_model(self.model, self.data)", "def forward(self, inputs):\r\n #print (len(inputs))\r\n out = self.fc1(inputs)\r\n out = self.fc2(out)\r\n self.out = out\r\n return out\r\n #raise NotImplementedError('Implement the forward method of the model')\r", "def send_flow_mod(self, datapath, flow_info, src_port, dst_port):\r\n parser = datapath.ofproto_parser\r\n actions = []\r\n actions.append(parser.OFPActionOutput(dst_port))\r\n if len(flow_info) == 7:\r\n if flow_info[-3] == 6:\r\n if flow_info[-2] == True:\r\n match = parser.OFPMatch(\r\n in_port=src_port, eth_type=flow_info[0],\r\n ipv4_src=flow_info[1], ipv4_dst=flow_info[2],\r\n ip_proto=6, tcp_src=flow_info[-1][0],tcp_dst=flow_info[-1][1])\r\n else:\r\n pass\r\n elif flow_info[-3] == 17:\r\n if flow_info[-2] == True:\r\n match = parser.OFPMatch(\r\n in_port=src_port, eth_type=flow_info[0],\r\n ipv4_src=flow_info[1], ipv4_dst=flow_info[2],\r\n ip_proto=17, udp_src=flow_info[-1][0],udp_dst=flow_info[-1][1])\r\n else:\r\n pass\r\n elif len(flow_info) == 4:\r\n match = parser.OFPMatch(\r\n in_port=src_port, eth_type=flow_info[0],\r\n ipv4_src=flow_info[1], ipv4_dst=flow_info[2])\r\n elif len(flow_info)==5:\r\n match=parser.OFPMatch(in_port=src_port,eth_type=flow_info[0],ip_protocol=flow_info[1],\r\n ipv4_src = flow_info[2], ipv4_dst = flow_info[3])\r\n else:\r\n pass\r\n\r\n self.add_flow(datapath, 30, match, actions,\r\n idle_timeout=1, hard_timeout=0)", "def train(self):\n self.emission_model(self.train_data)\n self.transition_model(self.train_data)", "def skel_model(action, install_path_mp, install_path_zfs, jname):\n # init vars\n # mp - mount point, zfs - zfs point\n skel_path_mp = '%s-SKELETON' % install_path_mp\n skel_path_zfs = '%s-SKELETON' % install_path_zfs\n rw_path_mp = '%s-RW' % install_path_mp\n rw_path_zfs = '%s-RW' % install_path_zfs\n \n if action == 'init':\n# create SKELETON MODEL\n# http://www.freebsd.org/doc/en_US.ISO8859-1/books/handbook/jails-application.html\n log(\" INFO: Init BASE-SKELETON zfs START\")\n# Create a skeleton for the read-write portion of the system\n os.system('zfs create %s' % skel_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (skel_path_mp, skel_path_zfs))\n os.system('zfs create %s' % rw_path_zfs)\n os.system('zfs set mountpoint=%s %s' % (rw_path_mp, rw_path_zfs))\n\n os.system('mkdir -p %s/home %s/usr-X11R6 %s/distfiles %s/usr-share-keys/pkg' % (skel_path_mp, skel_path_mp, skel_path_mp, skel_path_mp))\n os.system('mv %s/etc %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/usr/local %s/usr-local' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/tmp %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/var %s' % (install_path_mp, skel_path_mp ))\n os.system('mv %s/root %s' % (install_path_mp, skel_path_mp ))\n# mergemaster to install missing configuration files. Then, remove the the extra directories that mergemaster creates:\n# os.system('mergemaster -t %s/var/tmp/temproot -D %s -i' % (skel_path, skel_path))\n# os.system('rm -R %(key)s/bin %(key)s/boot %(key)s/lib %(key)s/libexec %(key)s/mnt %(key)s/proc %(key)s/rescue %(key)s/sbin %(key)s/sys %(key)s/usr %(key)s/dev' % {'key': skel_path})\n# Now, symlink the read-write file system to the read-only file system. Ensure that the symlinks are created in the correct s/ locations as the creation of directories in the wrong locations will cause the installation to fail.\n os.chdir('%s' % install_path_mp)\n os.system('mkdir SROOT')\n os.system('ln -s SROOT/etc etc')\n os.system('ln -s SROOT/home home')\n os.system('ln -s SROOT/root root')\n os.system('ln -s /SROOT/usr-local usr/local')\n os.system('ln -s /SROOT/usr-share-keys usr/share/keys')\n os.system('ln -s /SROOT/usr-X11R6 usr/X11R6')\n os.system('ln -s /SROOT/distfiles usr/ports/distfiles')\n os.system('ln -s SROOT/tmp tmp')\n os.system('ln -s SROOT/var var')\n# Create a generic /home/j/skel/etc/make.conf containing this line\n os.system('echo \\\"WRKDIRPREFIX?= /SROOT/portbuild\\\" > %s/etc/make.conf' % skel_path_mp )\n# Create zfs BASE-SKELETON snapshot which will be used for installation \n os.system('zfs snapshot %s@install' % skel_path_zfs)\n log(\" INFO: Init BASE-SKELETON zfs FINISH\")\n \n# install SKELETON jail \n if action == 'install':\n# install RW fs for jail\n os.system('zfs send %s/BASE-SKELETON@install | zfs receive -F %s/BASE-RW/%s' % (jzfs, jzfs, jname))\n# remove receive snapshot \n os.system('zfs destroy %s/BASE-RW/%s@install' % (jzfs, jname))\n# create jail local config - mount skel model for jail hosme dir\n if jname == 'BASE-update':\n os.system('echo \\\"%sBASE %s%s nullfs rw 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n else:\n os.system('echo \\\"%sBASE %s%s nullfs ro 0 0\\\" > %sBASE-RW/%s/etc/fstab' % (jpath, jpath, jname, jpath, jname))\n \n os.system('echo \\\"%sBASE-RW/%s %s%s/SROOT nullfs rw 0 0\\\" >> %sBASE-RW/%s/etc/fstab' % (jpath, jname, jpath, jname, jpath, jname))\n temp_add_cfg = ['### BASE mount settings ###', 'mount.fstab=\"%sBASE-RW/%s/etc/fstab\";' % (jpath, jname), 'mount.devfs;']\n return temp_add_cfg", "def update_ports( self ):\n self.ports = self.getComPorts()\n self.updatePortsUI()", "def updateObjectsToController(self):\n newobj = { 'orderedStreams' : self.orderedStreams,\n 'outputfiles' : list(self.outputfiles),\n }\n\n self.controller.send(newobj)\n self.controller.close()", "def modify_mstp_ports(self, ports, instance=0, **kwargs):\n pass", "def forward(self, batch):\n raise NotImplementedError", "def forward(self, batch):\n raise NotImplementedError", "def Run(port):\n\tport.write(\"R\");", "def transfer_weights(self):\n W, target_W = self.model.get_weights(), self.target_model.get_weights()\n for i in range(len(W)):\n target_W[i] = self.tau * W[i] + (1 - self.tau)* target_W[i]\n self.target_model.set_weights(target_W)", "def _route_to_dest(self):\n # Ask the network\n self.route = self.network.determine_route(self.start, self.dest)\n # Set the index to where we are now\n self.route_index = 0", "def test_create_hyperflex_server_model(self):\n pass", "def task_process(args):\n if args.mode == 'change model':\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('rm -rf ctpn_change_{}x{}.onnx'.format(h, w))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n os.system('{} change_model.py --input_path={}/ctpn_{}x{}.onnx --output_path={}/ctpn_change_{}x{}.onnx' \\\n .format(args.interpreter, args.src_dir, h, w,args.res_dir, h, w)) \n if args.mode == 'preprocess':\n for i in range(config.center_len):\n os.system('mkdir -p {}_{}x{}'.format(args.res_dir, config.center_list[i][0], config.center_list[i][1]))\n os.system('{} ctpn_preprocess.py --src_dir={} --save_path={}' \\\n .format(args.interpreter, args.src_dir, args.res_dir))\n if args.mode == 'ais_infer':\n fps_all = 0\n os.system('mkdir -p {}/inf_output'.format(args.res_dir))\n for i in range(config.center_len):\n h, w = config.center_list[i][0], config.center_list[i][1]\n\n os.system('{} --model={} --input={}_{}x{} --dymHW {},{} --device {} --batchsize={} --output={}/inf_output' \\\n .format(args.interpreter, args.om_path, args.src_dir ,h , w, h, w,args.device, args.batch_size, args.res_dir))\n\n sumary_path = glob.glob('{}/inf_output/*ary.json'.format(args.res_dir))[0]\n with open(sumary_path, 'r') as f:\n output = json.load(f)\n throughput = output['throughput'] \n fps_all = fps_all + throughput * config.center_count[i]\n os.system('rm -f {}'.format(sumary_path))\n os.system('mv {}/inf_output/*/*.bin {}'.format(args.res_dir, args.res_dir))\n os.system('rm {}/inf_output -rf'.format(args.res_dir))\n fps_all = fps_all / config.imgs_len\n print(\"====performance data====\")\n print('CTPN bs{} models fps:{}'.format(args.batch_size, fps_all))" ]
[ "0.60630095", "0.60588175", "0.57406735", "0.56002295", "0.551898", "0.55181533", "0.5453672", "0.5411719", "0.5400127", "0.53960764", "0.538731", "0.53615785", "0.53615785", "0.5356729", "0.53435755", "0.5335418", "0.5325586", "0.5305902", "0.52706647", "0.52651966", "0.5254695", "0.5242576", "0.52215403", "0.5216141", "0.52030295", "0.5190994", "0.51713437", "0.5158228", "0.51548004", "0.51527536", "0.5142012", "0.51380986", "0.5126755", "0.5120609", "0.51149243", "0.5112653", "0.5110006", "0.5108842", "0.5104", "0.51031005", "0.5078795", "0.5073278", "0.50731695", "0.5063123", "0.5063123", "0.5063123", "0.5057669", "0.5046557", "0.50463575", "0.50306344", "0.5024758", "0.5017902", "0.50124055", "0.50082636", "0.50038576", "0.49923655", "0.49917555", "0.49910045", "0.49900806", "0.49899632", "0.49746057", "0.49671668", "0.49649343", "0.49647447", "0.49614623", "0.49609798", "0.49603257", "0.496016", "0.49513453", "0.49512476", "0.49508289", "0.49479753", "0.49467978", "0.49465528", "0.49425712", "0.49421856", "0.49421856", "0.49385056", "0.49264163", "0.49166414", "0.4913971", "0.4908327", "0.49077472", "0.4902476", "0.48936537", "0.48911983", "0.48892412", "0.48801106", "0.4877697", "0.48745883", "0.4871531", "0.486688", "0.48575234", "0.48554197", "0.48554197", "0.48547557", "0.48531434", "0.48463568", "0.48455048", "0.48360866" ]
0.6467971
0
Generate TransferDefinitions based on transferconfig.json, containing those ports that have a base training for transferring to another port
def _generate_transfers(self) -> Dict[str, List[TransferDefinition]]: config = read_json(self.config_path) transfer_defs = {} ports = list(config["ports"]) permutations = list(itertools.permutations(ports, r=2)) # for pair in _permute(config["ports"]): for pair in permutations: base_port, target_port = self.pm.find_port(pair[0]), self.pm.find_port(pair[1]) if target_port is None: raise ValueError(f"No port found: Unable to transfer from base-port with name '{base_port.name}'") if target_port is None: raise ValueError(f"No port found: Unable to transfer to target-port with name '{pair[1]}'") trainings = self.pm.load_trainings(base_port, self.output_dir, self.routes_dir, training_type="base") # print(f"loaded trainings. base port {base_port.name}:\n{trainings.keys()}") if len(trainings.keys()) < 1: print(f"No base-training found for port '{base_port.name}'. Skipping") continue training = list(trainings.values())[-1][0] # print(f"training ({len(trainings.values())}): {training}") # print(f"Pair {base_port.name} ({len(trainings)} base-trains) -> {target_port.name}. " # f"Using latest at '{training.start_time}'") verify_output_dir(self.output_dir, target_port.name) td = TransferDefinition(base_port_name=base_port.name, base_model_path=training.model_path, target_port_name=target_port.name, target_routes_dir=os.path.join(self.routes_dir, target_port.name), target_model_dir=os.path.join(self.output_dir, "model", target_port.name), target_output_data_dir=os.path.join(self.output_dir, "data", target_port.name), target_plot_dir=os.path.join(self.output_dir, "plot", target_port.name), target_log_dir=os.path.join(self.output_dir, "log", target_port.name)) name = target_port.name if name in transfer_defs: transfer_defs[target_port.name].append(td) else: transfer_defs[target_port.name] = [td] return transfer_defs
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def transfer(self, target_port: Port, evaluator: Evaluator, config_uids: List[int] = None) -> None:\n if target_port.name not in self.transfer_defs:\n print(f\"No transfer definition found for target port '{target_port.name}'\")\n return\n # transfer definitions for specified target port\n tds = self.transfer_defs[target_port.name]\n output_dir = os.path.join(script_dir, os.pardir, \"output\")\n training_type = \"transfer\"\n print(f\"TRANSFERRING MODELS TO TARGET PORT '{target_port.name}'\")\n if config_uids is not None:\n print(f\"Transferring configs -> {config_uids} <-\")\n window_width = 50\n num_epochs = 25\n train_lr = 0.01\n fine_num_epochs = 20\n fine_tune_lr = 1e-5\n batch_size = 1024\n\n # skip port if fully transferred\n num_not_transferred = 0\n for td in tds:\n for config in self.transfer_configs:\n if not self._is_transferred(target_port.name, td.base_port_name, config.uid):\n # print(f\"Not transferred: {td.base_port_name} -> {target_port.name} ({config.uid})\")\n num_not_transferred += 1\n num_transfers = len(tds) * len(self.transfer_configs)\n print(f\"Transferred count {num_transfers - num_not_transferred}/{num_transfers}\")\n if num_not_transferred == 0:\n print(f\"All transfers done for target port '{target_port.name}': Skipping\")\n return\n X_ts, y_ts = load_data(target_port, window_width)\n\n baseline = mean_absolute_error(y_ts, np.full_like(y_ts, np.mean(y_ts)))\n evaluator.set_naive_baseline(target_port, baseline)\n print(f\"Naive baseline: {baseline}\")\n # X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,\n # random_state=42, shuffle=False)\n # train_optimizer = Adam(learning_rate=train_lr)\n # fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)\n\n for td in tds:\n print(f\".:'`!`':. TRANSFERRING PORT {td.base_port_name} TO {td.target_port_name} .:'`!`':.\")\n print(f\"- - Epochs {num_epochs} </> </> Learning rate {train_lr} - -\")\n print(f\"- - Window width {window_width} </> Batch size {batch_size} - -\")\n # print(f\"- - Number of model's parameters {num_total_trainable_parameters(model)} device {device} - -\")\n base_port = self.pm.find_port(td.base_port_name)\n if base_port is None:\n raise ValueError(f\"Unable to associate port with port name '{td.base_port_name}'\")\n\n # model = inception_time(input_shape=(window_width, 37))\n # print(model.summary())\n\n # apply transfer config\n for config in self.transfer_configs:\n if config_uids is not None and config.uid not in config_uids:\n continue\n if self._is_transferred(target_port.name, td.base_port_name, config.uid):\n print(f\"Skipping config {config.uid}\")\n continue\n print(f\"\\n.:'':. APPLYING CONFIG {config.uid} ::'':.\")\n print(f\"-> -> {config.desc} <- <-\")\n print(f\"-> -> nth_subset: {config.nth_subset} <- <-\")\n print(f\"-> -> trainable layers: {config.train_layers} <- <-\")\n _, _, start_time, _, _ = decode_keras_model(os.path.split(td.base_model_path)[1])\n model_file_name = encode_keras_model(td.target_port_name, start_time, td.base_port_name, config.uid)\n file_path = os.path.join(output_dir, \"model\", td.target_port_name, model_file_name)\n\n X_train_orig, X_test_orig, y_train_orig, y_test_orig = train_test_split(X_ts, y_ts, test_size=0.2,\n random_state=42, shuffle=False)\n train_optimizer = Adam(learning_rate=train_lr)\n fine_tune_optimizer = Adam(learning_rate=fine_tune_lr)\n\n checkpoint = ModelCheckpoint(file_path, monitor='val_mae', mode='min', verbose=2, save_best_only=True)\n early = EarlyStopping(monitor=\"val_mae\", mode=\"min\", patience=10, verbose=2)\n redonplat = ReduceLROnPlateau(monitor=\"val_mae\", mode=\"min\", patience=3, verbose=2)\n callbacks_list = [checkpoint, early, redonplat]\n\n # optimizer = Adam(learning_rate=lr)\n #\n # # configure model\n # model.compile(optimizer=optimizer, loss=\"mse\", metrics=[\"mae\"])\n\n # load base model\n model = load_model(td.base_model_path)\n # if config.uid == 0:\n # print(model.summary())\n # else:\n # print(model.summary())\n # del model\n\n X_train = X_train_orig\n X_test = X_test_orig\n y_train = y_train_orig\n y_test = y_test_orig\n\n # apply transfer configuration\n if config.nth_subset > 1:\n if X_train.shape[0] < config.nth_subset:\n print(f\"Unable to apply nth-subset. Not enough data\")\n X_train = X_train_orig[0::config.nth_subset]\n X_test = X_test_orig[0::config.nth_subset]\n y_train = y_train_orig[0::config.nth_subset]\n y_test = y_test_orig[0::config.nth_subset]\n print(f\"Orig shape: {X_train_orig.shape} {config.nth_subset} th-subset shape: {X_train.shape}\")\n print(f\"Orig shape: {X_test_orig.shape} {config.nth_subset} th-subset shape: {X_test.shape}\")\n print(f\"Orig shape: {y_train_orig.shape} {config.nth_subset} th-subset shape: {y_train.shape}\")\n print(f\"Orig shape: {y_test_orig.shape} {config.nth_subset} th-subset shape: {y_test.shape}\")\n modified = False\n # freeze certain layers\n for layer in model.layers:\n if layer.name not in config.train_layers:\n modified = True\n print(f\"setting layer {layer.name} to False\")\n layer.trainable = False\n else:\n print(f\"layer {layer.name} stays True\")\n if modified:\n print(f\"modified. compiling\")\n # re-compile\n model.compile(optimizer=train_optimizer, loss=\"mse\", metrics=[\"mae\"])\n # trainable_count = int(np.sum([K.count_params(p) for p in set(model.trainable_weights)]))\n # non_trainable_count = int(np.sum([K.count_params(p) for p in set(model.non_trainable_weights)]))\n trainable_count = count_params(model.trainable_weights)\n non_trainable_count = count_params(model.non_trainable_weights)\n print(f\"Total params: {trainable_count + non_trainable_count}\")\n print(f\"Trainable params: {trainable_count}\")\n print(f\"Non trainable params: {non_trainable_count}\")\n\n # transfer model\n result = model.fit(X_train, y_train, epochs=num_epochs, batch_size=batch_size, verbose=2,\n validation_data=(X_test, y_test), callbacks=callbacks_list)\n train_mae = result.history[\"mae\"]\n val_mae = result.history[\"val_mae\"]\n gc.collect()\n tune_result = None\n tune_train_mae = None\n tune_val_mae = None\n\n if config.tune:\n print(f\"Fine-Tuning transferred model\")\n # apply fine-tuning: unfreeze all but batch-normalization layers!\n for layer in model.layers:\n if not layer.name.startswith(\"batch_normalization\"):\n layer.trainable = True\n model.compile(optimizer=fine_tune_optimizer, loss=\"mse\", metrics=[\"mae\"])\n # print(f\"model for fine tuning\")\n # print(model.summary())\n tune_result = model.fit(X_train, y_train, epochs=fine_num_epochs, batch_size=batch_size, verbose=2,\n validation_data=(X_test, y_test), callbacks=callbacks_list)\n tune_train_mae = tune_result.history[\"mae\"]\n tune_val_mae = tune_result.history[\"val_mae\"]\n model.load_weights(file_path)\n\n # set evaluation\n def _compute_mae(_val_mae: List[float], _tune_val_mae: List[float]) -> float:\n if _tune_val_mae is not None:\n _val_mae = _val_mae + _tune_val_mae\n return min(val_mae)\n\n evaluator.set_mae(target_port, start_time, _compute_mae(val_mae, tune_val_mae), base_port, config.uid)\n y_pred = model.predict(X_test)\n grouped_mae = evaluator.group_mae(y_test, y_pred)\n evaluator.set_mae(target_port, start_time, grouped_mae, base_port, config.uid)\n\n # save history\n history_file_name = encode_history_file(training_type, target_port.name, start_time, td.base_port_name,\n config.uid)\n history_path = os.path.join(output_dir, \"data\", target_port.name, history_file_name)\n np.save(history_path, [result.history, tune_result.history if tune_result else None])\n\n # plot history\n plot_dir = os.path.join(output_dir, \"plot\")\n plot_history(train_mae, val_mae, plot_dir, target_port.name, start_time, training_type,\n td.base_port_name, config.uid, tune_train_mae, tune_val_mae)\n # evaluator.plot_grouped_mae(target_port, training_type, start_time, config.uid)\n plot_predictions(y_pred, y_test, plot_dir, target_port.name, start_time, training_type,\n td.base_port_name, config.uid)\n self.set_transfer(target_port.name, td.base_port_name, config.uid)\n del checkpoint, early, redonplat\n del X_train_orig, X_test_orig, y_train_orig, y_test_orig, model, X_train, y_train, X_test, y_test\n gc.collect()\n tf.keras.backend.clear_session()\n gc.collect()\n del X_ts, y_ts", "def generate_config(context):\n\n\n properties = context.properties\n project_id = properties.get('project', context.env['project'])\n\n network = context.properties.get('networkURL', generate_network_uri(\n project_id,\n context.properties.get('network','')\n ))\n target_vpn_gateway = context.env['name'] + '-tvpng'\n esp_rule = context.env['name'] + '-esp-rule'\n udp_500_rule = context.env['name'] + '-udp-500-rule'\n udp_4500_rule = context.env['name'] + '-udp-4500-rule'\n vpn_tunnel = context.env['name'] + '-vpn'\n router_vpn_binding = context.env['name'] + '-router-vpn-binding'\n resources = []\n if 'ipAddress' in context.properties:\n ip_address = context.properties['ipAddress']\n static_ip = ''\n else:\n static_ip = context.env['name'] + '-ip'\n resources.append({\n # The reserved address resource.\n 'name': static_ip,\n # https://cloud.google.com/compute/docs/reference/rest/v1/addresses\n 'type': 'gcp-types/compute-v1:addresses',\n 'properties': {\n 'name': properties.get('name', static_ip),\n 'project': project_id,\n 'region': context.properties['region']\n }\n })\n ip_address = '$(ref.' + static_ip + '.address)'\n\n resources.extend([\n {\n # The target VPN gateway resource.\n 'name': target_vpn_gateway,\n # https://cloud.google.com/compute/docs/reference/rest/v1/targetVpnGateways\n 'type': 'gcp-types/compute-v1:targetVpnGateways',\n 'properties':\n {\n 'name': properties.get('name', target_vpn_gateway),\n 'project': project_id,\n 'network': network,\n 'region': context.properties['region'],\n }\n },\n {\n # The forwarding rule resource for the ESP traffic.\n 'name': esp_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-esp'.format(properties.get('name')) if 'name' in properties else esp_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'ESP',\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 4500.\n 'name': udp_4500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-4500'.format(properties.get('name')) if 'name' in properties else udp_4500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 4500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n {\n # The forwarding rule resource for the UDP traffic on port 500\n 'name': udp_500_rule,\n # https://cloud.google.com/compute/docs/reference/rest/v1/forwardingRules\n 'type': 'gcp-types/compute-v1:forwardingRules',\n 'properties':\n {\n 'name': '{}-udp-500'.format(properties.get('name')) if 'name' in properties else udp_500_rule,\n 'project': project_id,\n 'IPAddress': ip_address,\n 'IPProtocol': 'UDP',\n 'portRange': 500,\n 'region': context.properties['region'],\n 'target': '$(ref.' + target_vpn_gateway + '.selfLink)',\n }\n },\n\n ])\n router_url_tag = 'routerURL'\n router_name_tag = 'router'\n\n if router_name_tag in context.properties:\n router_url = context.properties.get(router_url_tag, generate_router_uri(\n context.env['project'],\n context.properties['region'],\n context.properties[router_name_tag]))\n # Create dynamic routing VPN\n resources.extend([\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n # https://cloud.google.com/compute/docs/reference/rest/v1/vpnTunnels\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties':\n {\n 'name': properties.get('name', vpn_tunnel),\n 'project': project_id,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'router': router_url,\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)'\n },\n 'metadata': {\n 'dependsOn': [esp_rule,\n udp_500_rule,\n udp_4500_rule]\n }\n }])\n else:\n # Create static routing VPN\n resources.append(\n {\n # The VPN tunnel resource.\n 'name': vpn_tunnel,\n 'type': 'gcp-types/compute-v1:vpnTunnels',\n 'properties': {\n 'name': vpn_tunnel,\n 'description':\n 'A vpn tunnel',\n 'ikeVersion':\n 2,\n 'peerIp':\n context.properties['peerAddress'],\n 'region':\n context.properties['region'],\n 'sharedSecret':\n context.properties['sharedSecret'],\n 'targetVpnGateway':\n '$(ref.' + target_vpn_gateway + '.selfLink)',\n 'localTrafficSelector':\n context.properties['localTrafficSelector'],\n 'remoteTrafficSelector':\n context.properties['remoteTrafficSelector'],\n\n },\n 'metadata': {\n 'dependsOn': [esp_rule, udp_500_rule, udp_4500_rule]\n }\n },\n )\n\n return {\n 'resources':\n resources,\n 'outputs':\n [\n {\n 'name': 'targetVpnGateway',\n 'value': target_vpn_gateway\n },\n {\n 'name': 'staticIp',\n 'value': static_ip\n },\n {\n 'name': 'espRule',\n 'value': esp_rule\n },\n {\n 'name': 'udp500Rule',\n 'value': udp_500_rule\n },\n {\n 'name': 'udp4500Rule',\n 'value': udp_4500_rule\n },\n {\n 'name': 'vpnTunnel',\n 'value': vpn_tunnel\n },\n {\n 'name': 'vpnTunnelUri',\n 'value': '$(ref.'+vpn_tunnel+'.selfLink)'\n }\n ]\n }", "def plot_transfer_effect(self, port: Union[str, Port]) -> None:\n if isinstance(port, str):\n orig_port = port\n port = self.pm.find_port(port)\n if port is None:\n raise ValueError(f\"Unable to associate port with port name '{orig_port}'\")\n transfer_trainings = self.pm.load_trainings(port, output_dir=self.output_dir, routes_dir=self.routes_dir,\n training_type=\"transfer\")\n if len(transfer_trainings) < 1:\n print(f\"No training of type 'transfer' found for port {port.name}. Skipping plot_transfer_effect\")\n return\n\n transfer_training = transfer_trainings[-1]\n _, _, start_time, _, source_port_name = decode_model_file(os.path.split(transfer_training.model_path)[1])\n\n base_trainings = self.pm.load_trainings(source_port_name, output_dir=self.output_dir,\n routes_dir=self.routes_dir, training_type=\"base\")\n base_trainings = [t for t in base_trainings if t.start_time == start_time]\n if len(base_trainings) != 1:\n raise ValueError(f\"Unable to identify base-training for start_time '{start_time}': \"\n f\"Got {len(base_trainings)}, expected exactly 1\")\n base_training = base_trainings[0]\n base_key = self._encode_base_key(source_port_name, base_training.start_time)\n # print(f\"normal keys: {self.mae_base.keys()}\")\n # print(f\"grouped keys: {self.mae_base_groups.keys()}\")\n # print(f\"transferred normal keys: {self.mae_transfer.keys()}\")\n # print(f\"transferred grouped keys: {self.mae_transfer_groups.keys()}\")\n transfer_key = self._encode_transfer_key(source_port_name, port.name, start_time)\n base_data = self.mae_base_groups[base_key]\n transfer_data = self.mae_transfer_groups[transfer_key]\n path = os.path.join(self.output_dir, \"eval\", f\"transfer-effect_{source_port_name}-{port.name}.png\")\n plot_transfer_effect(base_data, transfer_data, source_port_name, port.name, path)", "def generate_simple_flows(tgen_names, num_bots, bot_msg_size, bot_msg_rate, num_comps, comp_msg_size, comp_msg_rate):\n bot_tgen_ips = [TGEN_IP_PATTERN.format(TGEN_SUBNET_BASE + i) for i in range(num_bots)]\n bot_tgen_ports = [TGEN_PORT_BASE + i for i in range(num_bots)]\n\n # competitor IPs and port numbers start after those assigned to the bots\n comp_tgen_ips = [TGEN_IP_PATTERN.format(TGEN_SUBNET_BASE + i) for i in range(num_bots, num_bots+num_comps)]\n comp_tgen_ports = [TGEN_PORT_BASE + i for i in range(num_bots, num_bots+num_comps)]\n\n # build up flows for bots. Each bot talks to every other bot.\n bot_flows = {}\n for i in range(num_bots):\n bot_flows[i] = {\"flows\": [], \"tgen_name\": tgen_names[i]}\n\n # add a flow for each neighbor bot\n for j in range(len(bot_tgen_ips)):\n # don't add flows to self\n if i != j:\n # send from a unique source port based on the DESTINATION node.\n # use a destination port based on the SOURCE node number\n bot_flows[i][\"flows\"].append({\"src_port\": bot_tgen_ports[j],\n \"dst_ip\": bot_tgen_ips[j],\n \"dst_port\": bot_tgen_ports[i],\n \"msg_rate\": bot_msg_rate,\n \"msg_size\": bot_msg_size,\n })\n\n # build up flows for competitor nodes. Each competitor node talks to every other competitor node.\n comp_flows = {}\n for i in range(num_comps):\n comp_flows[i] = {\"flows\": [], \"tgen_name\": tgen_names[i+num_bots]}\n\n # add a flow for each neighbor bot\n for j in range(len(comp_tgen_ips)):\n # don't add flows to self\n if i != j:\n # send from a unique source port based on the DESTINATION node.\n # use a destination port based on the SOURCE node number\n comp_flows[i][\"flows\"].append({\"src_port\": comp_tgen_ports[j],\n \"dst_ip\": comp_tgen_ips[j],\n \"dst_port\": comp_tgen_ports[i],\n \"msg_rate\": comp_msg_rate,\n \"msg_size\": comp_msg_size,\n })\n\n return bot_flows, comp_flows", "def gen_port_resources(self, server, ports):\n if (self.SuppressServerStatuses is False):\n print \"\\t* Adding all the port interface resources\"\n data = {}\n port_idx = \"0\"\n for idx, port in enumerate(ports):\n\n # get fixedips\n fixed_ip = port._info[\"fixed_ips\"]\n fixed_ip_address = fixed_ip[0][\"ip_address\"]\n\n # filter all_nets by subnet_id\n net_data = []\n fip = None\n for x in self.all_nets:\n for fip in fixed_ip:\n if x[0][\"id\"] in fip[\"subnet_id\"]:\n net_data.append(x)\n\n if len(net_data) > 0:\n net = net_data[0][1]\n subnet = net_data[0][2]\n\n networkID = [netw['id'] for netw in self.neutronclient.list_networks()['networks'] if netw['name'] == net][0]\n networkIsShared = self.neutronclient.show_network(networkID)['network']['shared']\n\n if networkIsShared is True:\n port_properties_ = {\n \"network_id\": networkID,\n \"fixed_ips\": [\n {\"subnet_id\": fip[\"subnet_id\"]}\n ]\n }\n else:\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": [\n {\"subnet_id\": {\"get_resource\": subnet}}\n ]\n }\n if self.staticips:\n fixed_ips = []\n for address in server.addresses:\n server_ip_address = server.addresses[address][0]['addr']\n if server_ip_address == fixed_ip_address:\n fixed_ips.append({\"ip_address\": server_ip_address})\n\n port_properties_ = {\n \"network_id\": {\"get_resource\": net},\n \"fixed_ips\": fixed_ips\n }\n data = {\"type\": \"OS::Neutron::Port\",\"properties\": port_properties_}\n else:\n print \"!!Probable error grabbing port information for server %s!!\" % (server.name)\n data = {\"type\": \"OS::Neutron::Port\"}\n\n self.compute_data[\"resources\"][\"%s_port%s\" % (server.name, port_idx)] = data\n if len(ports) >= 1:\n port_idx = str(1 + idx)", "def hh_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_hh1:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n\n for device in ci_addrs.switches_hh2:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_hh2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_hh2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_hh_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def create_net(args):\n\n # Load config file for this experiment\n xinfo = yaml.load(open(args.exp)) # experiment info\n\n # copy config to run directory\n assert osp.isdir(args.cache_dir), 'Working directory not found: ' + args.cache_dir\n # output config file\n yaml.dump(xinfo, open(args.exp_config_path, 'w'),\n default_flow_style=False)\n\n # Load dataset config file\n dcfg_path = osp.join(args.data_config_path, xinfo['INPUT']['DATASET'])\n dinfo = yaml.load(open(dcfg_path)) # dataset info\n data_dir = dinfo['ROOT']\n\n layout = xinfo['INPUT']['LAYOUT']\n inps = [s.strip() for l in layout for s in l.split(',')]\n outs = [s.strip() for s in xinfo['REFINE']['TARGETS'].split(',')]\n\n supports = ['seg', 'flow', 'norm', 'rgb', 'depth']\n\n nets = {}\n for split in ['train', 'test']:\n net_inps = []\n net_outs = []\n for inp in inps:\n match = re.search('^(gt|pr)({})'.format('|'.join(supports)), inp)\n assert match is not None, 'Error in config INPUT-LAYOUT: ' + inp\n\n modality = match.group(2)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality][match.group(1) + '-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_inps.append((inp, path, nchannels))\n\n for out in outs:\n # TODO: read target type: zero couplings, tight, loose couplings\n match = re.search('({})'.format('|'.join(supports)), out)\n assert match is not None, 'Error in config REFINE-TARGET: '+ out\n\n modality = match.group(1)\n nchannels = dinfo[modality]['n']\n path = osp.join(data_dir, dinfo[modality]['gt-' + split])\n\n assert osp.exists(path), 'File not found: ' + path\n net_outs.append((out, path, nchannels))\n\n loss_params = dict()\n mapping = None\n if 'mapping' in dinfo['seg']:\n idx = dinfo['seg']['mapping']\n mapping = dict(zip(idx, xrange(len(idx))))\n\n if split == 'train':\n\n # if the class weights is not in the dataset config file\n if 'gt-train-weights' not in dinfo['seg']:\n print 'Generating median frequency balancing weights.'\n (weights, mapping) = gcw.get_mfb(osp.join(dinfo['ROOT'], dinfo['seg']['gt-train']),\n dinfo['seg']['ignore_label'],\n mapping)\n # save back to dataset config\n dinfo['seg']['gt-train-weights'] = weights\n yaml.dump(dinfo, open(dcfg_path, 'w'), default_flow_style=False)\n else:\n weights = dinfo['seg']['gt-train-weights']\n # update data\n # update loss parameter\n ignore_label = dinfo['seg']['ignore_label']\n ignore_label = mapping[ignore_label] if mapping is not None else ignore_label\n loss_params['loss_param'] = {\n 'ignore_label': ignore_label,\n 'class_weighting': weights\n }\n\n # generate net prototxt\n loader = dinfo['NAME'] + '_loader'\n net_proto = arch.create_net(net_inps, net_outs, split, loader, layout, mapping, **loss_params)\n\n # output to file\n path = osp.join(args.cache_dir, getattr(args, 'exp_{}_path'.format(split)))\n open(path, 'w').write(str(net_proto))\n nets[split] = net_proto\n\n return nets", "def all_net(configuration):\n net_dict_all = {\n \"design\" : ['H1', 'L1', 'V1' ],\n \"GW170817\" : ['H1', 'L1', 'V1' ],\n \"GW170814\" : ['H1', 'L1', 'V1' ],\n \"GW170817_without_Virgo\" : ['H1', 'L1' ],\n \"ET\" : [\"ET_L_Eu\", \"ET_L_Eu_2\"], # Triangular ET\n \"ET1\" : ['H1', 'L1', 'V1', 'ETdet1', 'ETdet2' ], # Triangular ET +LVC\n \"ET2\" : ['H1', 'L1', 'V1', 'ETdet1', 'ETdet3' ], # L-shaped at 2 places +LVC\n \"ET3\" : ['ETdet1', 'ETdet3', 'ETdet4'], # 3 L-shaped ET at three different places\n \"ET3L_EU\" : [\"ET_L_Eu\", \"ET_L_Aus_Eu\", \"ET_L_Argentina\"],\n \"3ET\" : [\"ET_L_US\", \"ET_L_Aus_US\", \"ET_L_Central_Africa\"],\n \"3CE\" : [\"CE_US\", \"CE_Aus_US\", \"CE_Central_Africa\"],\n \"1CE-ET\" : [\"CE_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"2CE-ET\" : [\"CE_US\", \"CE_Aus_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"], #named 1 and 2 to distinguish from CE-ET (below) in Mills et al 2018.\n \"CE-ET\" : [\"CE_US\", \"CE_Aus_US\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"Voyager-ET\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n # next three networks are for calculating the impact of duty cycle on the Voyager-ET network\n \"VoyagerLI-ET\" : [\"LBB_L1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"VoyagerHI-ET\" : [\"LBB_H1\", \"LBB_I1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \"VoyagerHL-ET\" : [\"LBB_H1\", \"LBB_L1\", \"ET_L_Eu\", \"ET_L_Eu_2\"],\n \n \"VoyagerETtri\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ET_Tri_Eu_1\", \"ET_Tri_Eu_2\", \"ET_Tri_Eu_3\"],\n \"Voyager\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\"],\n \"VoyagerWithAL\" : [\"LBB_H1\", \"LBB_L1\", \"LBB_I1\", \"ALV1\", \"ALK1\"],\n \"3_TriangularET\" : [\"ET_L_US\", \"ET_L_Aus_US\", \"ET_L_Central_Africa\",\"ET_L_US_2\", \"ET_L_Aus_US_2\", \"ET_L_Central_Africa_2\"],\n # for comparing to klimenko et al 2011:\n 'LHVA2' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A-\"],\n 'LHVA' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A\"],\n 'LHVJ' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_K1\"],\n 'LHVAJ' : [\"LBB_L1\",\"LBB_H1\",\"LBB_V1\",\"LBB_A\",\"LBB_K1\"],\n # for calculating alignment factor distributions in inclincation paper\n \"HL\" : [\"H1\", \"L1\"],\n \"HLV\" : [\"H1\", \"L1\", \"V1\" ],\n \"HLVK\" : [\"L1\",\"H1\",\"V1\",\"K1\"],\n \"HLVKI\" : [\"L1\",\"H1\",\"V1\",\"K1\", \"I1\"],\n \n\n #for optimizing the orientations of ET3L_EU w.r.t. polarization metric (see optimizing polarization notebook)\n #first optimize for the two detector network:\n \"ET2L_EU\" : [\"ET_L_Eu\", \"ET_L_Aus_Eu\"],\n \"2ET\" : [\"ET_L_US\", \"ET_L_Aus_US\"],\n #ranges\n }\n return(net_dict_all[configuration])", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n self._net_outputs = self.online_convnet(self.state_ph, training=True)\n self._q_argmax = tf.argmax(self._net_outputs.q_values, axis=1)[0]\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n training=True)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)", "def rr1_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr1_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr') \n assign_ports_n5k34()", "def task_generate_tasks():\n \n yield {\n 'basename': 'generate_tasks',\n 'name': None,\n # 'doc': 'docs for X',\n 'watch': ['trains/'],\n 'task_dep': ['create_folders'],\n }\n \n for root, dirs, files in os.walk('trains/',topdown=False):\n for f in files:\n #print(f)\n yield template_train_model(os.path.join(root,f))", "def kk_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_kk_all:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_kk_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_kk_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_kk_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')", "def build_configs():", "def generate_config(context):\n\n enable_flow_logs = context.properties.get('enableFlowLogs', False)\n\n subnetwork_resource = {\n 'name': context.properties['resourceName'],\n 'type': 'gcp-types/compute-beta:subnetworks',\n 'properties': {\n # Required properties.\n 'name':\n context.properties['name'],\n 'network':\n context.properties['network'],\n 'ipCidrRange':\n context.properties['ipCidrRange'],\n 'region':\n context.properties['region'],\n 'project':\n context.properties['projectId'],\n\n # Optional properties, with defaults.\n 'enableFlowLogs':\n enable_flow_logs,\n 'privateIpGoogleAccess':\n context.properties.get('privateIpGoogleAccess', False),\n 'secondaryIpRanges':\n context.properties.get('secondaryIpRanges', []),\n }\n }\n \n if enable_flow_logs:\n # If flow logs are enabled, we want to adjust the default config in two ways:\n # (1) Increase the sampling ratio (defaults to 0.5) so we sample all traffic.\n # (2) Reduce the aggregation interval to 30 seconds (default is 5secs) to save on\n # storage.\n subnetwork_resource['properties']['logConfig'] = {\n 'aggregationInterval': 'INTERVAL_30_SEC',\n 'enable': True,\n 'flowSampling': 1.0,\n 'metadata': 'INCLUDE_ALL_METADATA',\n }\n\n # Pass the 'dependsOn' property to the subnetwork resource if present.\n if 'dependsOn' in context.properties:\n subnetwork_resource['metadata'] = {\n 'dependsOn': context.properties['dependsOn']\n }\n\n output = [\n {\n 'name': 'name',\n 'value': subnetwork_resource['name'],\n },\n {\n 'name': 'selfLink',\n 'value': '$(ref.{}.selfLink)'.format(subnetwork_resource['name']),\n },\n ]\n\n return {'resources': [subnetwork_resource], 'outputs': output}", "def configure(task):\n r = task.run(\n name=\"Base Configuration\",\n task=template_file,\n template=\"base.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # r.result holds the result of rendering the template\n config = r.result\n\n r = task.run(\n name=\"Loading extra underlay data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/underlay.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"underlay\"] = r.result\n\n r = task.run(\n name=\"Loading extra evpn data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/evpn.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"evpn\"] = r.result\n\n r = task.run(\n name=\"Loading extra vxlan data\",\n task=load_yaml,\n file=f\"extra_data/{task.host}/vxlan.yaml\",\n severity_level=0,\n )\n # r.result holds the data contained in the yaml files\n # we load the data inside the host itself for further use\n task.host[\"vxlan\"] = r.result\n\n r = task.run(\n name=\"Interfaces Configuration\",\n task=template_file,\n template=\"interfaces.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we append the generated configuration\n config += r.result\n\n r = task.run(\n name=\"Routing Configuration\",\n task=template_file,\n template=\"routing.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"EVPN Configuration\",\n task=template_file,\n template=\"evpn.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n config += r.result\n\n r = task.run(\n name=\"Role-specific Configuration\",\n task=template_file,\n template=f\"{task.host['role']}.j2\",\n path=f\"templates/{task.host.nos}\",\n severity_level=0,\n )\n # we update our hosts' config\n config += r.result\n\n task.run(\n name=\"Loading Configuration on the device\",\n task=napalm_configure,\n replace=True,\n configuration=config,\n )", "def networkx_resource_generator (func_name, seed=0, max_cpu=40, max_mem=16000,\n max_storage=30, max_link_bw=70,\n abc_nf_types_len=10,\n supported_nf_cnt=6, max_link_delay=2,\n sap_cnt=10,\n **kwargs):\n rnd = random.Random()\n rnd.seed(seed)\n nx_graph = get_networkx_func(func_name, seed=seed, **kwargs)\n\n nf_types = list(string.ascii_uppercase)[:abc_nf_types_len]\n nffg = NFFG(id=\"net-\" + func_name + \"-seed\" + str(seed))\n gen = NameGenerator()\n\n for infra_id in nx_graph.nodes_iter():\n infra = nffg.add_infra(id=\"infra\" + str(infra_id),\n bandwidth=rnd.random() * max_link_bw * 1000,\n cpu=rnd.random() * max_cpu,\n mem=rnd.random() * max_mem,\n storage=rnd.random() * max_storage)\n infra.add_supported_type(rnd.sample(nf_types, supported_nf_cnt))\n\n for i, j in nx_graph.edges_iter():\n infra1 = nffg.network.node[\"infra\" + str(i)]\n infra2 = nffg.network.node[\"infra\" + str(j)]\n nffg.add_undirected_link(port1=infra1.add_port(id=gen.get_name(\"port\")),\n port2=infra2.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.random() * max_link_bw)\n\n infra_ids = [i.id for i in nffg.infras]\n for s in xrange(0, sap_cnt):\n sap_obj = nffg.add_sap(id=gen.get_name(\"sap\"))\n sap_port = sap_obj.add_port(id=gen.get_name(\"port\"))\n infra_id = rnd.choice(infra_ids)\n infra = nffg.network.node[infra_id]\n nffg.add_undirected_link(port1=sap_port,\n port2=infra.add_port(id=gen.get_name(\"port\")),\n p1p2id=gen.get_name(\"link\"),\n p2p1id=gen.get_name(\"link\"),\n dynamic=False,\n delay=rnd.random() * max_link_delay,\n bandwidth=rnd.uniform(max_link_bw / 2.0,\n max_link_bw))\n\n return nffg", "def get_port_fields(module, system, host):\n host_fc_initiators = find_host_initiators_data(module, system, host, initiator_type='FC')\n host_iscsi_initiators = find_host_initiators_data(module, system, host, initiator_type='ISCSI')\n\n field_dict = dict(\n ports=[],\n )\n\n connectivity_lut = {\n 0: \"DISCONNECTED\",\n 1: \"DEGRADED\",\n 2: \"DEGRADED\",\n 3: \"CONNECTED\"\n }\n\n ports = host.get_ports()\n for port in ports:\n if str(type(port)) == \"<class 'infi.dtypes.wwn.WWN'>\":\n found_initiator = False\n for initiator in host_fc_initiators:\n if initiator['address'] == str(port).replace(\":\", \"\"):\n found_initiator = True\n #print(\"initiator targets:\", initiator['targets'])\n unique_initiator_target_ids = \\\n {target['node_id'] for target in initiator['targets']}\n port_dict = {\n \"address\": str(port),\n \"address_long\": initiator['address_long'],\n \"connectivity\": connectivity_lut[len(unique_initiator_target_ids)],\n \"targets\": initiator['targets'],\n \"type\": initiator['type'],\n }\n\n if not found_initiator:\n address_str = str(port)\n address_iter = iter(address_str)\n long_address = ':'.join(a+b for a, b in zip(address_iter, address_iter))\n port_dict = {\n \"address\": str(port),\n \"address_long\": long_address,\n \"connectivity\": connectivity_lut[0],\n \"targets\": [],\n \"type\": \"FC\"\n }\n\n field_dict['ports'].append(port_dict)\n\n if str(type(port)) == \"<class 'infi.dtypes.iqn.IQN'>\":\n found_initiator = False\n for initiator in host_iscsi_initiators:\n if initiator['address'] == str(port):\n found_initiator = True\n #print(\"initiator targets:\", initiator['targets'])\n unique_initiator_target_ids = \\\n {target['node_id'] for target in initiator['targets']}\n port_dict = {\n \"address\": str(port),\n \"connectivity\": connectivity_lut[len(unique_initiator_target_ids)],\n \"targets\": initiator['targets'],\n \"type\": initiator['type'],\n }\n\n if not found_initiator:\n port_dict = {\n \"address\": str(port),\n \"connectivity\": connectivity_lut[0],\n \"targets\": [],\n \"type\": \"ISCSI\"\n }\n\n field_dict['ports'].append(port_dict)\n\n return field_dict", "def build_network(self, inputs, targets, training=False):\n raise NotImplementedError", "def generateTopology():\n switches = {}\n interfaces = {}\n links = {}\n return (switches,links)", "def _build_networks(self):\n # Calling online_convnet will generate a new graph as defined in\n # self._get_network_template using whatever input is passed, but will always\n # share the same weights.\n self.online_convnet = tf.make_template('Online', self._network_template)\n self.target_convnet = tf.make_template('Target', self._network_template)\n self._net_outputs = self.online_convnet(self.state_ph)\n\n self._replay_net_outputs = self.online_convnet(self._replay.states)\n self._replay_next_target_net_outputs = self.target_convnet(\n self._replay.next_states)\n\n if self.acting_policy == 'hyperbolic':\n self._q_argmax = tf.argmax(self._net_outputs.hyp_q_value, axis=1)[0]\n elif self.acting_policy == 'largest_gamma':\n self._q_argmax = tf.argmax(self._net_outputs.q_values[-1], axis=1)[0]\n else:\n raise NotImplementedError", "def schema_generators():\n return {\n \"trips\": trips_schema,\n \"status_changes\": status_changes_schema,\n \"events\": events_schema,\n \"vehicles\": vehicles_schema,\n \"stops\": stops_schema\n }", "def create_packet_definition(packet_to_send):\n source_mac = \"00:00:00:00:00:01\"\n destination_mac = \"00:00:00:00:00:02\"\n source_ip = \"10.10.10.1\"\n destination_ip = \"10.10.10.2\"\n source_ip6 = 'fe80::214:f2ff:fe07:af0'\n destination_ip6 = 'ff02::1'\n sport = 1\n dport = 2\n tos = 4\n if packet_to_send[\"type\"] == \"ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {}})\n elif packet_to_send[\"type\"] == \"tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"vlan\"],\n \"prio\": packet_to_send[\"priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"tcp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"double_tagged_ip\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"outer_vlan\"], \"type\": 0x8100,\n \"prio\": packet_to_send[\"outer_priority\"]}},\n {\"Dot1Q\": {\"vlan\": packet_to_send[\"inner_vlan\"], \"type\": 0x0800,\n \"prio\": packet_to_send[\"inner_priority\"]}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"tos\": tos}})\n elif packet_to_send[\"type\"] == \"arp\":\n packet_definition = (\n {\"Ether\": {\"src\": source_mac, \"dst\": 'FF:FF:FF:FF:FF:FF', \"type\": 0x0806}},\n {\"ARP\": {\"op\": 1, \"hwsrc\": source_mac,\n \"psrc\": source_ip, \"pdst\": destination_ip}},)\n elif packet_to_send[\"type\"] == \"arp_reply_tagged\":\n packet_definition = ({\"Ether\": {\"src\": source_mac, \"dst\": destination_mac, \"type\": 0x8100}},\n {\"Dot1Q\": {\"vlan\": 2}},\n {\"ARP\": {\"op\": 2, \"hwsrc\": source_mac, \"hwdst\": destination_mac,\n \"pdst\": destination_ip, \"psrc\": source_ip}}, )\n elif packet_to_send[\"type\"] == \"icmp\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x0800}},\n {\"IP\": {\"dst\": destination_ip, \"src\": source_ip, \"proto\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n elif packet_to_send[\"type\"] == \"ipv6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"plen\": 64, \"tc\": 225}})\n elif packet_to_send[\"type\"] == \"tcp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 6}},\n {\"TCP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"udp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 17}},\n {\"UDP\": {\"sport\": sport, \"dport\": dport}})\n elif packet_to_send[\"type\"] == \"icmp6\":\n packet_definition = ({\"Ether\": {\"dst\": destination_mac, \"src\": source_mac, \"type\": 0x86dd}},\n {\"IPv6\": {\"dst\": destination_ip6, \"src\": source_ip6, \"version\": 6,\n \"hlim\": 255, \"tc\": 224, \"nh\": 1}},\n {\"ICMP\": {\"type\": 8, \"code\": 0}})\n return packet_definition", "def generateConfig(run,subrun,conditions):\n \n configname = (conditions.numcdir + \"/\" + str(run) + \"/\" + str(subrun)\n + \"/numc_config_\" + str(run) + \"_\" + str(subrun) + \".cfg\")\n \n configContents = \"\"\n \n configContents += \"[software]\\n\"\n if conditions.oldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/setup.sh\\n\"\n elif conditions.newoldneut:\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/setup.sh\\n\"\n else:\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280/src/neutgeom/setup.sh\\n\"\n #configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.1_nd280_wBBBA05/src/neutgeom/setup.sh\\n\"\n configContents += \"neut_setup_script = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/setup.sh\\n\"\n \n configContents += \"[geometry]\\n\"\n\n configContents += \"baseline = \" + conditions.geometry +\"\\n\"\n if conditions.waterair == \"water\":\n configContents += \"p0d_water_fill = 1\\n\"\n else:\n configContents += \"p0d_water_fill = 0\\n\"\n \n configContents += \"\"\"\n \n[configuration]\nmodule_list = neutMC\n\n[filenaming]\n\"\"\"\n configContents += \"comment = \" + conditions.comment + \"\\n\"\n configContents += \"run_number = \" + str(run) +\"\\n\"\n configContents += \"subrun = \" + str(subrun) + \"\\n\"\n\n if conditions.oldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.2_nd280_ROOTv5r34p09n01/src/neutgeom/neut.card\n\"\"\"\n elif conditions.newoldneut:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.1.4.3_nd280/src/neutgeom/neut.card\n\"\"\"\n else:\n configContents += \"\"\" \n\n[neutrino]\nneut_card = /project/t/tanaka/T2K/neut/branches/5.3.2_nd280/src/neutgeom/neut.card\n\"\"\"\n\n configContents += \"flux_file = \" + conditions.ram_disk + \"/\" + conditions.flux_base + \"\\n\"\n\n#flux_file = flux_file\n#\"\"\"\n\n# configContents += \"flux_file_path = \" + conditions.ram_disk + \"/\" + conditions.flux_base\n\n# configContents += \"\"\" \n#flux_file_start = 1\n#flux_file_stop = 300\n#\"\"\"\n\n configContents += \"maxint_file = \" + conditions.maxint_file_local + \"\\n\"\n\n# default: 5e17 but for basket MC special production higher\n configContents += \"\"\" \npot = 5.0e17\nneutrino_type = beam\n\"\"\"\n if conditions.baskmagn == \"basket\":\n configContents += \"\"\" \nflux_region = basket\nmaster_volume = Basket \nrandom_start = 1\n\"\"\"\n elif conditions.baskmagn == \"magnet\":\n configContents += \"\"\" \nflux_region = magnet\nmaster_volume = Magnet \nrandom_start = 1\n\"\"\"\n else:\n print \"Unknown basket/magnet condition\"\n \n\n configContents += \"random_seed = \" + str(getRandom()) +\"\\n\"\n configContents += \"neut_seed1 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed2 = \" + str(getRandom())+\"\\n\" \n configContents += \"neut_seed3 = \" + str(getRandom())+\"\\n\" \n\n configContents += \"\\n\"\n configContents += \"[nd280mc]\\n\"\n configContents += \"mc_type=Neut_RooTracker \\n\"\n\n #print configContents\n\n try:\n macFile = open(configname,\"w\")\n macFile.write(configContents)\n \n except:\n print \"can't write config file\" \n \n\n return configname", "def defineTasks(self,partition):\n recv_slots = partition.recvSlices()\n strm_slots = partition.streamSlices()\n recvNodes = partition.recvNodesFromSlots()\n strmNodes = partition.streamNodesFromSlots()\n opt = '/'+self.manager.hostName()+'/'+partition.manager.name()+'/'+partition.name+'/'\n cl0 = '/Class0'+opt\n cl1 = '/Class1'+opt\n\n partition.setDataSources([])\n tasks = []\n pn = self.partitionName()\n print '---------------------- Partition name is:',pn\n for i in xrange(len(recv_slots)):\n slot = recv_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_SND' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageSend'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setRecvSenders(tasks)\n tasks = []\n for i in xrange(len(strm_slots)):\n slot = strm_slots[i]\n node = slot[:slot.find(':')]\n sub_farm = 'SF%02d'%(i,)\n short_name = sub_farm+'_HLT' # Keep this name to ensure storageMon is working!\n task = pn+'_'+node+'_'+short_name\n tasks.append(node+'/'+task+'/'+short_name+'/RecStorageRecv'+cl1+'(\"'+sub_farm+'\",'+str(i)+',)')\n partition.setStreamReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in recvNodes:\n for itm in self.rcvInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setRecvInfrastructure(infra)\n partition.setRecvReceivers(tasks)\n cnt = 0\n tasks = []\n infra = []\n for j in strmNodes:\n for itm in self.strInfra.data:\n i,cl=itm.split('/')\n infra.append(j+'/'+pn+'_'+j+'_'+i+'/'+i+'/'+i+'/'+cl+opt+'(\"'+str(cnt)+'\",)')\n cnt = cnt + 1\n partition.setStreamInfrastructure(infra)\n partition.setStreamSenders(tasks)\n if partition.saveTasks():\n tasks = partition.collectTasks(tasks={},with_data_sources=0)\n return tasks\n return None", "def _generate_config(self, type, org, node):\n args = {}\n if type == \"peer\":\n args.update({\"peer_id\": \"{}.{}\".format(node, org)})\n args.update({\"peer_address\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_gossip_externalEndpoint\": \"{}.{}:{}\".format(node, org, 7051)})\n args.update(\n {\"peer_chaincodeAddress\": \"{}.{}:{}\".format(node, org, 7052)})\n args.update({\"peer_tls_enabled\": True})\n args.update({\"peer_localMspId\": \"{}MSP\".format(org.capitalize())})\n\n a = NodeConfig(org)\n a.peer(node, **args)\n else:\n args.update({\"General_ListenPort\": 7050})\n args.update(\n {\"General_LocalMSPID\": \"{}OrdererMSP\".format(org.capitalize())})\n args.update({\"General_TLS_Enabled\": True})\n args.update({\"General_BootstrapFile\": \"genesis.block\"})\n\n a = NodeConfig(org)\n a.orderer(node, **args)", "def build_net(graph, training=True, validation=False):\n\n with graph.as_default(): \n x = tf.placeholder(tf.float32, [None] + resize_shape, 'x')\n # TODO: use len(labels_map)\n y = tf.placeholder(tf.int32, [None, 17], 'y')\n phase_train = tf.placeholder(tf.bool, name='phase_train')\n keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n keep_prob_fc1 = tf.placeholder(tf.float32, name='keep_prob_fc1')\n learning_rate = tf.placeholder(tf.float32, name='learning_rate')\n\n # Create Input Pipeline for Train, Validation and Test Sets\n if training:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[:index_split_train_val],\n labels=labels_onehot_list[:index_split_train_val],\n batch_size=batch_size,\n n_epochs=n_epochs,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training,\n randomize=True)\n elif validation:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=image_paths[index_split_train_val:],\n labels=labels_onehot_list[index_split_train_val:],\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training) \n else:\n batch, batch_labels, batch_image_paths = dsutils.create_input_pipeline(\n image_paths=test_image_paths,\n labels=test_onehot_list,\n batch_size=batch_size,\n # only one epoch for test output\n n_epochs=1,\n shape=input_shape,\n crop_factor=resize_factor,\n training=training)\n\n Ws = []\n \n current_input = x\n\n for layer_i, n_output in enumerate(n_filters):\n with tf.variable_scope('layer{}'.format(layer_i)):\n # 2D Convolutional Layer with batch normalization and relu\n h, W = utils.conv2d(x=current_input,\n n_output=n_output,\n k_h=filter_sizes[layer_i],\n k_w=filter_sizes[layer_i])\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, 'relu' + str(layer_i))\n\n # Apply Max Pooling Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.max_pool(value=h,\n ksize=[1, 2, 2, 1],\n strides=[1, 2, 2, 1],\n padding='SAME')\n\n # Apply Dropout Every 2nd Layer\n if layer_i % 2 == 0:\n h = tf.nn.dropout(h, keep_prob)\n\n Ws.append(W)\n current_input = h\n\n h = utils.linear(current_input, fc_size, name='fc_t')[0]\n h = tf.layers.batch_normalization(h, training=phase_train)\n h = tf.nn.relu(h, name='fc_t/relu')\n h = tf.nn.dropout(h, keep_prob_fc1)\n\n logits = utils.linear(h, len(labels_map), name='fc_t2')[0]\n h = tf.nn.sigmoid(logits, 'fc_t2')\n\n # must be the same type as logits\n y_float = tf.cast(y, tf.float32)\n\n cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits,\n labels=y_float)\n loss = tf.reduce_mean(cross_entropy)\n\n if training:\n # update moving_mean and moving_variance so it will be available at inference time\n update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)\n with tf.control_dependencies(update_ops):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n else:\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss)\n \n saver = tf.train.Saver()\n init = tf.group(tf.global_variables_initializer(), tf.local_variables_initializer())\n return batch, batch_labels, batch_image_paths, init, x, y, phase_train, keep_prob, keep_prob_fc1, learning_rate, h, loss, optimizer, saver", "def build(self):\n\n LOG.debug('-' * 80)\n LOG.debug(\"build\")\n LOG.debug('-' * 80)\n for b in self._bridges:\n bridge = b['bridge']\n # TODO(tomohiko) Need to something when not bridge['provided']?\n if bridge['provided']:\n LOG.info('Skipped building bridge=%r', bridge)\n\n for h in self._hosts:\n host = h['host']\n if host.get('tunnel_zone'):\n tz_data = host.get('tunnel_zone')\n tzs = self._api.get_tunnel_zones()\n\n # Ensure that TZ exists\n tz = [t for t in tzs if t.get_name() == tz_data['name']]\n if tz == []:\n if is_vxlan_enabled():\n tz = self._api.add_vxlan_tunnel_zone()\n else:\n tz = self._api.add_gre_tunnel_zone()\n tz.name(tz_data['name'])\n tz.create()\n else:\n tz = tz[0]\n\n # Ensure that the host is in the TZ\n tz_hosts = tz.get_hosts()\n tz_host = filter(\n lambda x: x.get_host_id() == host['mn_host_id'],\n tz_hosts)\n if tz_host == []:\n tz_host = tz.add_tunnel_zone_host()\n tz_host.ip_address(tz_data['ip_addr'])\n tz_host.host_id(host['mn_host_id'])\n tz_host.create()\n\n\n if host['provided'] == True:\n LOG.info('Skipped building host=%r', host)\n else:\n #TODO(tomoe): when we support provisioning Midolman host with\n # this tool.\n pass\n interfaces = host['interfaces']\n\n futures = []\n for i in interfaces:\n iface = Interface(i['interface'], host)\n self._interfaces[(host['id'], i['interface']['id'])] = iface\n f = iface.create()\n futures.append(f)\n\n wait_on_futures(futures)\n\n LOG.debug('-' * 80)\n LOG.debug(\"end build\")\n LOG.debug('-' * 80)", "def _makeConfig(self, store):\n config = PortConfiguration()\n config.parent = CommandStub(store, \"port\")\n return config", "def definitions(self) -> Dict[str, GraphOutput]:\n # Get the right output dictionary.\n d = self._manual_outputs if len(self._manual_outputs) > 0 else self._default_outputs\n\n # Extract port definitions (Neural Types) and return an immutable dictionary,\n # so the user won't be able to modify its content by an accident!\n return frozendict({k: v.ntype for k, v in d.items()})", "def generate_nnie_config(nnie_cfg, config, nnie_out_path='./config.json', tensor_type='float'):\n u8_start = False if tensor_type == 'float' else False\n default_config = {\n \"default_net_type_token\": \"nnie\",\n \"rand_input\": False,\n \"data_num\": 100,\n \"input_path_map\": {\n \"data\": \"./image_bins\",\n },\n \"nnie\": {\n \"max_batch\": 1,\n \"output_names\": [],\n \"mapper_version\": 11,\n \"u8_start\": u8_start,\n \"device\": \"gpu\",\n \"verbose\": False,\n \"image_path_list\": [\"./image_list.txt\"],\n \"mean\": [128, 128, 128],\n \"std\": [1, 1, 1]\n }\n }\n image_path_list = nnie_cfg['image_path_list']\n assert os.path.exists(image_path_list)\n with open(image_path_list, 'r') as f:\n image_list = [item.strip() for item in f.readlines()]\n\n mean = config.to_kestrel.get('pixel_means', [123.675, 116.28, 103.53])\n std = config.to_kestrel.get('pixel_stds', [58.395, 57.12, 57.375])\n resize_hw = config.to_kestrel.get('resize_hw', (224, 224))\n resize_hw = tuple(resize_hw)\n data_num = len(image_list)\n image_bin_path = generate_image_bins(image_list, mean, std, resize_hw)\n default_config['data_num'] = data_num\n default_config['input_path_map']['data'] = image_bin_path\n default_config['nnie']['max_batch'] = nnie_cfg.get('max_batch', 1)\n default_config['nnie']['mapper_version'] = nnie_cfg.get('mapper_version', 11)\n default_config['nnie']['image_path_list'] = [image_path_list]\n default_config['nnie']['mean'] = [128] * len(std)\n default_config['nnie']['std'] = [1] * len(std)\n with open(nnie_out_path, \"w\") as f:\n json.dump(default_config, f, indent=2)\n\n return nnie_out_path", "def _gen_config():\n cfg = {\"frontends\": {}, \"backends\": {}}\n for machine in Machine.objects(\n monitoring__hasmonitoring=True,\n ):\n frontend, backend = _gen_machine_config(machine)\n cfg[\"frontends\"][machine.id] = frontend\n cfg[\"backends\"][machine.id] = backend\n return cfg", "def genConfig():\n\n cfg = open('/home/sevudan/Scripts/projects/topogen/result.cfg','w')\n template = getTemplate()\n G = topo.topology()\n gen_config_lo(G, cfg)\n # Get node from list nodes.\n for node in sorted(G.nodes):\n d = dict(G[node])\n hostname = node\n # Get attributes for node.\n peer = d.keys()\n for peer_node in peer:\n params = d.get(peer_node)\n conf = template.render(\n node=hostname,\n description = peer_node,\n ifd = params.get('ifd'),\n local_ifl = params.get('local_ifl'),\n peer_ifl = params.get('peer_ifl'),\n ifa = params.get('ip_address')\n )\n result = '{}{}'.format(conf,'\\n')\n cfg.write(result)\n cfg.close()", "def genMultiTenantPattern():\n global tenant_list, app_tenant\n tenant_app = {}\n tenant_api = {}\n\n # divide apps among tenants\n i = 0\n for app in app_list:\n i += 1\n tenant = tenant_list[i%len(tenant_list)]\n apis = app_api_subs.get(app)\n\n if tenant['name'] not in tenant_app:\n tenant_app[tenant['name']] = []\n tenant_app.get(tenant['name']).append(app)\n\n if tenant['name'] not in tenant_api:\n tenant_api[tenant['name']] = set()\n for api in apis:\n tenant_api.get(tenant['name']).add(api)\n\n app_tenant[app] = tenant['name']\n \n for tenant in tenant_api:\n tenant_api[tenant] = list(tenant_api.get(tenant))\n \n # generate tenant_details.yaml file\n with open(abs_path + '/../../data/scenario/tenant_details.yaml', 'w') as f:\n yaml.dump({'tenant_apps': tenant_app}, f, sort_keys=False)\n yaml.dump({'tenant_apis': tenant_api}, f, sort_keys=False)", "def _send_all_data(self):\n admin_context = qcontext.get_admin_context()\n networks = {}\n ports = {}\n\n all_networks = super(QuantumRestProxyV2,\n self).get_networks(admin_context) or []\n for net in all_networks:\n networks[net.get('id')] = {\n 'id': net.get('id'),\n 'name': net.get('name'),\n 'op-status': net.get('admin_state_up'),\n }\n\n subnets = net.get('subnets', [])\n for subnet_id in subnets:\n subnet = self.get_subnet(admin_context, subnet_id)\n gateway_ip = subnet.get('gateway_ip')\n if gateway_ip:\n # FIX: For backward compatibility with wire protocol\n networks[net.get('id')]['gateway'] = gateway_ip\n\n ports = []\n net_filter = {'network_id': [net.get('id')]}\n net_ports = super(QuantumRestProxyV2,\n self).get_ports(admin_context,\n filters=net_filter) or []\n for port in net_ports:\n port_details = {\n 'id': port.get('id'),\n 'attachment': {\n 'id': port.get('id') + '00',\n 'mac': port.get('mac_address'),\n },\n 'state': port.get('status'),\n 'op-status': port.get('admin_state_up'),\n 'mac': None\n }\n ports.append(port_details)\n networks[net.get('id')]['ports'] = ports\n try:\n resource = '/topology'\n data = {\n 'networks': networks,\n }\n ret = self.servers.put(resource, data)\n if not self.servers.action_success(ret):\n raise RemoteRestError(ret[2])\n return ret\n except RemoteRestError as e:\n LOG.error(_('QuantumRestProxy: Unable to update remote network: '\n '%s'), e.message)\n raise", "def rr2_assign_ports():\n print('******* Assigning ports')\n for device in ci_addrs.switches_rr1_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr1_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n for device in ci_addrs.switches_rr2_12:\n print ('******* Connecting to ', device.get('ip'))\n net_connect = ConnectHandler(**device)\n output_vlan_cp = net_connect.send_config_set(cp_rr2_config_commands)\n print (output_vlan_cp)\n output_vlan_cloud = net_connect.send_config_set(cloud_rr2_config_commands)\n print (output_vlan_cloud)\n output_vlan_f5 = net_connect.send_config_set(f5_config_commands)\n print (output_vlan_f5)\n net_connect.send_config_set('wr')\n assign_ports_n5k34()", "def assemblenet_plus_generator(block_fn,\n layers,\n num_classes,\n data_format='channels_last'):\n\n def model(inputs, is_training):\n \"\"\"Creation of the model graph.\"\"\"\n\n tf.logging.info(FLAGS.model_structure)\n tf.logging.info(FLAGS.model_edge_weights)\n structure = json.loads(FLAGS.model_structure)\n\n if FLAGS.use_object_input:\n feature_shape = inputs[0].shape\n original_inputs = inputs[0]\n object_inputs = inputs[1]\n else:\n feature_shape = inputs.shape\n original_inputs = inputs\n object_inputs = None\n\n batch_size = feature_shape[0] // FLAGS.num_frames\n original_num_frames = FLAGS.num_frames\n num_frames = original_num_frames\n\n grouping = {-3: [], -2: [], -1: [], 0: [], 1: [], 2: [], 3: []}\n for i in range(len(structure)):\n grouping[structure[i][0]].append(i)\n\n stem_count = len(grouping[-3]) + len(grouping[-2]) + len(grouping[-1])\n\n assert stem_count != 0\n stem_filters = 128 // stem_count\n\n if grouping[-2]:\n # Instead of loading optical flows as inputs from data pipeline, we are\n # applying the \"Representation Flow\" to RGB frames so that we can compute\n # the flow within TPU/GPU on fly. It's essentially optical flow since we\n # do it with RGBs.\n flow_inputs = rf.rep_flow(\n original_inputs,\n batch_size,\n original_num_frames,\n num_iter=40,\n is_training=is_training,\n bottleneck=1,\n scope='rep_flow')\n streams = []\n\n for i in range(len(structure)):\n with tf.variable_scope('Node_' + str(i)):\n if structure[i][0] == -1:\n inputs = asn.rgb_conv_stem(original_inputs,\n original_num_frames,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -2:\n inputs = asn.flow_conv_stem(flow_inputs,\n stem_filters,\n structure[i][1],\n is_training,\n data_format)\n streams.append(inputs)\n elif structure[i][0] == -3:\n # In order to use the object inputs, you need to feed your object\n # input tensor here.\n inputs = object_conv_stem(object_inputs,\n data_format)\n streams.append(inputs)\n else:\n block_number = structure[i][0]\n\n combined_inputs = [streams[structure[i][1][j]]\n for j in range(0, len(structure[i][1]))]\n\n tf.logging.info(grouping)\n nodes_below = []\n for k in range(-3, structure[i][0]):\n nodes_below = nodes_below + grouping[k]\n\n peers = []\n if FLAGS.attention_mode:\n lg_channel = -1\n tf.logging.info(nodes_below)\n for k in nodes_below:\n tf.logging.info(streams[k].shape)\n lg_channel = max(streams[k].shape[3], lg_channel)\n\n for node_index in nodes_below:\n attn = tf.reduce_mean(streams[node_index], [1, 2])\n\n attn = tf.layers.dense(\n inputs=attn,\n units=lg_channel,\n kernel_initializer=tf.random_normal_initializer(stddev=.01))\n peers.append(attn)\n\n combined_inputs = fusion_with_peer_attention(\n combined_inputs,\n index=i,\n attention_mode=FLAGS.attention_mode,\n attention_in=peers,\n use_5d_mode=False,\n data_format=data_format)\n\n graph = asn.block_group(\n inputs=combined_inputs,\n filters=structure[i][2],\n block_fn=block_fn,\n blocks=layers[block_number],\n strides=structure[i][4],\n is_training=is_training,\n name='block_group' + str(i),\n block_level=structure[i][0],\n num_frames=num_frames,\n temporal_dilation=structure[i][3],\n data_format=data_format)\n\n streams.append(graph)\n\n outputs = asn.multi_stream_heads(streams,\n grouping[3],\n original_num_frames,\n num_classes,\n data_format)\n\n return outputs\n\n model.default_image_size = 224\n return model", "def generate_config(self):\n\n # Change crypto-config.yaml and add organizations\n yaml = YAML()\n with open(os.path.join(self.config_path, \"crypto-config-template.yaml\"), \"r\") as crypto_config_file:\n config = yaml.load(crypto_config_file)\n\n config[\"OrdererOrgs\"][0][\"Specs\"] = []\n for orderer_index in range(1, self.num_validators + 1):\n orderer_host, _ = self.experiment.get_peer_ip_port_by_id(orderer_index)\n config[\"OrdererOrgs\"][0][\"Specs\"].append({\n \"Hostname\": \"orderer%d\" % orderer_index,\n \"SANS\": [orderer_host]\n })\n\n config[\"PeerOrgs\"] = []\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n organization_config = {\n \"Name\": \"Org%d\" % organization_index,\n \"Domain\": \"org%d.example.com\" % organization_index,\n \"EnableNodeOUs\": True,\n \"Template\": {\n \"Count\": 1,\n \"SANS\": [organization_host]\n },\n \"Users\": {\n \"Count\": 1\n }\n }\n config[\"PeerOrgs\"].append(organization_config)\n\n with open(os.path.join(self.config_path, \"crypto-config.yaml\"), \"w\") as crypto_config_file:\n yaml.dump(config, crypto_config_file)\n\n # Change configtx.yaml\n yaml = YAML()\n with open(os.path.join(self.config_path, \"configtx-template.yaml\"), \"r\") as configtx_file:\n config = yaml.load(configtx_file)\n\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n org_admin = \"Org%dMSP.admin\" % organization_index\n org_peer = \"Org%dMSP.peer\" % organization_index\n org_client = \"Org%dMSP.client\" % organization_index\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n\n organization_config = {\n \"Name\": \"Org%dMSP\" % organization_index,\n \"ID\": \"Org%dMSP\" % organization_index,\n \"MSPDir\": \"crypto-config/peerOrganizations/org%d.example.com/msp\" % organization_index,\n \"Policies\": {\n \"Readers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s', '%s')\" % (org_admin, org_peer, org_client)\n },\n \"Writers\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s', '%s')\" % (org_admin, org_peer)\n },\n \"Admins\": {\n \"Type\": \"Signature\",\n \"Rule\": \"OR('%s')\" % (org_admin)\n }\n },\n \"AnchorPeers\": [{\n \"Host\": organization_host,\n \"Port\": 7000 + organization_index\n }]\n }\n\n commented_map = CommentedMap(organization_config)\n commented_map.yaml_set_anchor(\"Org%d\" % organization_index, always_dump=True)\n config[\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"TwoOrgsChannel\"][\"Application\"][\"Organizations\"].append(commented_map)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Consortiums\"][\"SampleConsortium\"][\"Organizations\"]\\\n .append(commented_map)\n\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"] = []\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"] = []\n\n for organization_index in range(1, self.num_validators + 1):\n organization_host, _ = self.experiment.get_peer_ip_port_by_id(organization_index)\n consenter_port = 7000 + organization_index\n consenter_info = {\n \"Host\": organization_host,\n \"Port\": consenter_port,\n \"ClientTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index,\n \"ServerTLSCert\": \"crypto-config/ordererOrganizations/example.com/orderers/\"\n \"orderer%d.example.com/tls/server.crt\" % organization_index\n }\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"EtcdRaft\"][\"Consenters\"].append(consenter_info)\n config[\"Profiles\"][\"SampleMultiNodeEtcdRaft\"][\"Orderer\"][\"Addresses\"].append(\n \"%s:%d\" % (organization_host, consenter_port))\n\n with open(os.path.join(self.config_path, \"configtx.yaml\"), \"w\") as configtx_file:\n round_trip_dump(config, configtx_file, Dumper=RoundTripDumper)", "def get_output_hot(self):\n hot_cont = hot.HOT()\n prop = dict()\n\n # MARK: CAN be better... relative straight forward\n if self.sep_access_port:\n port_suffix = ('pt', 'pt_in', 'pt_out')\n else:\n port_suffix = ('pt_in', 'pt_out')\n for srv_grp in self.srv_grp_lst:\n for srv in srv_grp:\n networks = list()\n # Remote access, ingress and egress ports\n for suffix in port_suffix:\n port_name = '_'.join((srv['name'], suffix))\n prop = {\n 'name': port_name,\n 'network_id': self.network_id['net'],\n # A list of subnet IDs\n 'fixed_ips': [{'subnet_id': self.network_id['subnet']}],\n # TODO: Add support for security group\n # 'security_groups': [self.network_id['sec_grp']]\n }\n networks.append(\n {'port': '{ get_resource: %s }' % port_name})\n hot_cont.resource_lst.append(\n hot.Resource(port_name, 'port', prop))\n\n if self.fip_port:\n prop = {\n 'floating_network': self.network_id['public'],\n }\n if self.fip_port == 'pt':\n prop['port_id'] = '{ get_resource: %s }' % (\n srv['name'] + '_pt')\n elif self.fip_port == 'pt_in':\n prop['port_id'] = '{ get_resource: %s }' % (\n srv['name'] + '_pt_in')\n elif self.fip_port == 'pt_out':\n prop['port_id'] = '{ get_resource: %s }' % (\n srv['name'] + '_pt_out')\n else:\n raise ServerChainError('Invalid floating IP port!')\n\n hot_cont.resource_lst.append(\n hot.Resource(srv['name'] + '_fip', 'fip', prop))\n\n prop = {\n 'name': srv['name'],\n 'image': srv['image'],\n 'flavor': srv['flavor'],\n 'networks': networks\n }\n\n if srv.get('ssh', None):\n prop['key_name'] = srv['ssh']['pub_key_name']\n\n # MARK: Only test RAW bash script\n if srv.get('init_script', None):\n logger.debug('Read the init bash script: %s'\n % srv['init_script'])\n with open(srv['init_script'], 'r') as f:\n # MARK: | is needed after user_data\n prop.update(\n {'user_data': '|\\n' + f.read()}\n )\n\n hot_cont.resource_lst.append(\n hot.Resource(srv['name'], 'server', prop))\n\n return hot_cont.output_yaml_str()", "def generate_files_from_network(id):\n\tfolder_prefix = \"results/\"+id+\"/\"\n\tnetwork_prefix = \"results/\"+id+\"_\"\n\tg = open(network_prefix+'network.json', 'r')\n\tdata = json.load(g)\n\tnames = []\n\tfor node in data:\n\t\tmy_name = data[node]['my_name']\n\t\tnames.append(my_name)\n\t\ttargets = data[node]['target']\n\t\tn_receive = data[node]['receivers']\n\n\t\t#generate_python_file_from_node(folder_prefix, my_name, targets, n_receive)\n\n\tg.close()\n\n\n\n\twith open(folder_prefix+'run.sh', 'w') as f:\n\t\tfor name in names:\n\t\t\tif name!=names[-1]:\n\t\t\t\tf.write('python ../../run_node.py '+name+' '+id+' &\\n')\n\t\t\telse:\n\t\t\t\tf.write('python ../../run_node.py '+name+' '+id+' \\n')\n\n\n\twith open(folder_prefix+'start.sh', 'w') as f:\n\t\tf.write('simulaqron reset\\nsimulaqron set backend qutip\\nsimulaqron start --nodes ')\n\t\tfor name in names:\n\t\t\tif name!=names[-1]:\n\t\t\t\tf.write(name+',')\n\t\t\telse:\n\t\t\t\tf.write(name)\n\treturn", "def create_network(outfname_train, outfname_deploy, N_conv_layers=3, N_fully_connected_layers=3, batch_size_train=100,batch_size_test=100, source_train='datatrain', source_test='datatest', num_output_conv=32, kernel_size=3, weight_std_conv=0.01, activation='relu', num_output_fully_connected=64, weight_std_fully_connected=0.01, do_batchnorm=1, do_last_batchnorm=1, scale=1,shift=0, weight_std_affine=0, use_softmax=0, num_classes=3, input_dim_1=1,input_dim_2=3, input_dim_3=32, input_dim_4=32, use_lowrank=1, T_dimension=None, softmax_weight=1, lowrank_weight=1, data_type='lmdb'):\n\n if T_dimension==None:\n T_dimension = num_classes\n \n train_txt = \"\"\n deploy_txt = \"\"\n\n train_txt += data_layer(name='data_layer', source_train=source_train, batch_size_train=batch_size_train, source_test=source_test, batch_size_test=batch_size_test, data_type=data_type)\n\n deploy_txt += deploy_data_layer(name='data_layer', input_dim_1=input_dim_1, input_dim_2=input_dim_2, input_dim_3=input_dim_3, input_dim_4=input_dim_4)\n\n last_name = 'data'\n\n ####### CONVOLUTIONAL LAYERS\n for i in range(N_conv_layers):\n conv_name = 'conv%i' % (i+1)\n top = conv_name\n\n conv_txt = convolution_layer(conv_name, last_name, num_output=num_output_conv, kernel_size=kernel_size, weight_std=weight_std_conv)\n\n train_txt += conv_txt\n deploy_txt += conv_txt\n \n if activation == 'pool':\n pool_name = 'pool%i' % (i+1)\n activation_txt = pooling_layer(pool_name, conv_name)\n last_name = pool_name\n elif activation == 'relu':\n relu_name = 'relu%i' % (i+1)\n activation_txt = relu_layer(relu_name, conv_name)\n last_name = conv_name\n else:\n raise Exception('Unknown activation')\n \n\n train_txt += activation_txt\n deploy_txt += activation_txt\n\n \n\n ####### FULLY CONNECTED LAYERS\n for i in range(N_fully_connected_layers):\n fully_connected_name = 'ip%i' % (i+1)\n\n fully_connected_txt = fully_connected_layer(fully_connected_name, last_name, num_output=num_output_fully_connected, weight_std=weight_std_fully_connected)\n\n relu_name = 'iprelu%i' % (i+1)\n relu_txt = relu_layer(relu_name, fully_connected_name)\n\n batchnorm_name = 'ipbn%i' % (i+1)\n\n if do_batchnorm and i<N_fully_connected_layers-1:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_txt = ''\n \n last_name = batchnorm_name\n \n elif do_last_batchnorm:\n batchnorm_txt_train = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=False, phase='TRAIN', deploy=False)\n batchnorm_txt_test = batchnorm_layer(batchnorm_name, fully_connected_name, use_global_stats=True, phase='TEST', deploy=False)\n \n batchnorm_txt_deploy = batchnorm_layer(batchnorm_name, fully_connected_name, deploy=True)\n scale_name = 'ipbnscaled%i' % (i+1)\n\n scale_txt = scale_layer(scale_name, batchnorm_name, scale=scale,shift=shift)\n \n last_name = scale_name\n else:\n batchnorm_txt_train = ''\n batchnorm_txt_test = ''\n batchnorm_txt_deploy = ''\n last_name = fully_connected_name\n scale_txt = ''\n \n train_txt += fully_connected_txt + relu_txt + batchnorm_txt_train + batchnorm_txt_test + scale_txt\n deploy_txt += fully_connected_txt + relu_txt + batchnorm_txt_deploy + scale_txt\n \n\n\n\n\n # add affine layer on top of funnel layer \n affine_name = 'affine' # (matrix T)\n affine_txt = fully_connected_layer(affine_name, last_name, num_output=T_dimension, weight_std=weight_std_affine)\n\n train_txt += affine_txt\n deploy_txt += affine_txt\n \n # apply lowrank loss to output of 'affine' layer [conv - fully_connected -\n # funnel - affine - lowrank] the lowrank output is located in affine. The\n # 'funnel' layer is used to allow softmax to separate between classes before\n # LRT\n if use_lowrank:\n lowrank_txt = lowrank_layer('lowrank_loss', affine_name, loss_weight=lowrank_weight)\n train_txt += lowrank_txt\n\n if use_softmax:\n # apply softmax loss to output of funnel layer [conv - fully_connected - funnel - softmax]\n # add one affine layer to reduce from num_output_fully_connected to num_classes\n\n # apr 4. trying on top of fully connected layer\n funnel_name = 'funnel'\n funnel_txt = fully_connected_layer(funnel_name, last_name, num_output=num_classes, weight_std=weight_std_fully_connected)\n\n train_txt += funnel_txt\n deploy_txt += funnel_txt\n\n softmax_txt = softmax_layer('softmax_loss', funnel_name, loss_weight=softmax_weight)\n train_txt += softmax_txt\n\n write_to_file(outfname_train, train_txt)\n write_to_file(outfname_deploy, deploy_txt)\n\n \n return train_txt, deploy_txt", "def create_dict_tg():\n for i in range(NUM_OF_SHARDS):\n key_tg = \"tg_s\" + str(i)\n array_target_group = parse_network_config(key_tg)\n dict_tg_https_wss[\"tg_https\"].append(array_target_group[0])\n dict_tg_https_wss[\"tg_wss\"].append(array_target_group[1])", "def get_train_generators(cf, logger):\n config_file = os.environ[CONFIG_ENV_VAR]\n config = load_config(config_file)\n\n all_sections = find_all_subdir_sections(config)\n\n # separate into training and validation folds randomly\n fold_ratios = config[\"train_validation_splits\"]\n # rng = np.random.default_rng(seed=config[\"split_random_seed\"])\n # rng.shuffle(all_sections)\n rnd = random.Random(config[\"split_random_seed\"])\n rnd.shuffle(all_sections)\n split_idx = round(fold_ratios[0] * len(all_sections))\n train_sections = all_sections[:split_idx]\n val_sections = all_sections[split_idx:]\n\n logger.info(\n \"Loaded %d annotation sections, using %d train, %d val\"\n % (len(all_sections), len(train_sections), len(val_sections))\n )\n\n train_pipeline = create_data_gen_pipeline(\n train_sections, cf=cf, annotation_config=config, is_training=True\n )\n val_pipeline = create_data_gen_pipeline(\n val_sections, cf=cf, annotation_config=config, is_training=False\n )\n batch_gen = {\n \"train\": train_pipeline,\n \"val_sampling\": val_pipeline,\n \"n_val\": len(val_sections),\n }\n # batch_gen[\"val_patient\"] = create_data_gen_pipeline(\n # val_sections, cf=cf, annotation_config=config, is_training=False\n # )\n\n return batch_gen", "def abc_transfer_steps(self):\n return [\n (20, 'abc_transfer_wizard'),\n (40, 'abc_create_invoice'),\n (60, 'abc_confirm_invoice')]", "def install_sample(self, datapath, table_id):\n parser = datapath.ofproto_parser\n ofproto = datapath.ofproto\n # Incoming port 1.\n in_port = 1;\n for timeout in range(60, 1 ,-1):\n # Incoming Ethernet destination\n match = self.create_match(parser,\n {ofproto.OXM_OF_METADATA: timeout})\n # Output to port 2.\n output = parser.OFPActionOutput(2, 0)\n write = parser.OFPInstructionActions(ofproto.OFPIT_APPLY_ACTIONS,\n [output])\n instructions = [write]\n flow_mod = self.create_flow_add(datapath, 100, timeout,\n table_id, match, instructions)\n datapath.send_msg(flow_mod)\n\n print \"sent flow_mod\"", "def _generate_table(self):\n for i in xrange(32):\n dest = [0]\n gw = [0]\n self._table.append(\n {'destination': dest, 'gateway': gw}\n )", "def create_feed_forward_per_arm_network(observation_spec, global_layers,\n arm_layers, common_layers):\n\n def _create_action_spec(output_dim):\n return tensor_spec.BoundedTensorSpec(\n shape=(), minimum=0, maximum=output_dim - 1, dtype=tf.int32)\n\n global_output_dim = global_layers[-1]\n global_network = q_network.QNetwork(\n input_tensor_spec=observation_spec[bandit_spec_utils.GLOBAL_FEATURE_KEY],\n action_spec=_create_action_spec(global_output_dim),\n fc_layer_params=global_layers[:0])\n arm_output_dim = arm_layers[-1]\n one_dim_per_arm_obs = tensor_spec.TensorSpec(\n shape=observation_spec[bandit_spec_utils.PER_ARM_FEATURE_KEY].shape[1:],\n dtype=tf.float32)\n arm_network = q_network.QNetwork(\n input_tensor_spec=one_dim_per_arm_obs,\n action_spec=_create_action_spec(arm_output_dim),\n fc_layer_params=arm_layers[:0])\n common_input_dim = global_output_dim + arm_output_dim\n common_input_spec = tensor_spec.TensorSpec(\n shape=(common_input_dim,), dtype=tf.float32)\n common_network = q_network.QNetwork(\n input_tensor_spec=common_input_spec,\n action_spec=_create_action_spec(1),\n fc_layer_params=common_layers)\n return GlobalAndArmFeatureNetwork(observation_spec, global_network,\n arm_network, common_network)", "def configure_steps(\n self,\n config: ConfigDict,\n len_train: int,\n len_test: int,\n ):\n # Set required defaults if not present\n if \"batch_size\" not in config:\n batch_size = 2 * jax.device_count()\n else:\n batch_size = config[\"batch_size\"]\n if \"num_epochs\" not in config:\n num_epochs = 10\n else:\n num_epochs = config[\"num_epochs\"]\n\n # Determine sharded vs. batch partition\n if batch_size % jax.device_count() > 0:\n raise ValueError(\"Batch size must be divisible by the number of devices\")\n self.local_batch_size: int = batch_size // jax.process_count()\n\n # Training steps\n self.steps_per_epoch: int = len_train // batch_size\n config[\"steps_per_epoch\"] = self.steps_per_epoch # needed for creating lr schedule\n self.num_steps: int = int(self.steps_per_epoch * num_epochs)\n\n # Evaluation (over testing set) steps\n num_validation_examples: int = len_test\n if \"steps_per_eval\" not in config:\n self.steps_per_eval: int = num_validation_examples // batch_size\n else:\n self.steps_per_eval = config[\"steps_per_eval\"]\n\n # Determine monitoring steps\n if \"steps_per_checkpoint\" not in config:\n self.steps_per_checkpoint: int = self.steps_per_epoch * 10\n else:\n self.steps_per_checkpoint = config[\"steps_per_checkpoint\"]\n\n if \"log_every_steps\" not in config:\n self.log_every_steps: int = self.steps_per_epoch * 20\n else:\n self.log_every_steps = config[\"log_every_steps\"]", "def batch_run_cfg2json():\n cfg_path = os.environ.get(\"CFG_FILE_PATH\")\n cfg_list = ['any_n1.cfg',\n 'ir_grism_n2.cfg',\n 'ir_grism_n4.cfg',\n 'ir_any_n2.cfg',\n 'ir_any_n4.cfg',\n 'uvis_any_n2.cfg',\n 'uvis_any_n4.cfg',\n 'uvis_any_n6.cfg',\n 'uvis_any_pre2012_n2.cfg',\n 'uvis_any_pre2012_n4.cfg',\n 'uvis_any_pre2012_n6.cfg',\n 'wfc_any_n2.cfg',\n 'wfc_any_n4.cfg',\n 'wfc_any_n6.cfg',\n 'sbc_blue_n2.cfg',\n 'sbc_blue_n6.cfg',\n 'sbc_any_n2.cfg',\n 'sbc_any_n6.cfg',\n 'hrc_any_n2.cfg',\n 'hrc_any_n4.cfg',\n 'hrc_any_n6.cfg']\n for cfgfile in cfg_list:\n cfgfile = os.path.join(cfg_path, cfgfile)\n cfg2json(cfgfile)\n\n cfg_path = os.path.realpath(__file__).replace(\"devutils/pars_utils.py\", \"pars/\")\n out_path = os.path.realpath(__file__).replace(\"devutils/pars_utils.py\", \"pars/hap_pars/any/\")\n cfg_list = [\"astrodrizzle_filter_hap.cfg\", \"astrodrizzle_single_hap.cfg\", \"astrodrizzle_total_hap.cfg\"]\n for cfgfile in cfg_list:\n cfgfile = os.path.join(cfg_path, cfgfile)\n cfg2json(cfgfile, outpath=out_path)", "def main(ft_setups, ft_strategies):\n\n num_procs = 16\n\n # initialize level parameters\n level_params = dict()\n level_params['restol'] = 1e-09\n\n # initialize step parameters\n step_params = dict()\n step_params['maxiter'] = 50\n\n # initialize space transfer parameters\n space_transfer_params = dict()\n space_transfer_params['finter'] = True\n space_transfer_params['rorder'] = 2\n space_transfer_params['iorder'] = 6\n\n # initialize sweeper parameters\n sweeper_params = dict()\n sweeper_params['quad_type'] = 'RADAU-RIGHT'\n sweeper_params['num_nodes'] = [3]\n\n # initialize controller parameters\n controller_params = dict()\n controller_params['logger_level'] = 30\n\n for setup in ft_setups:\n if setup == 'HEAT':\n # initialize problem parameters\n problem_params = dict()\n problem_params['nu'] = 0.5\n problem_params['freq'] = 1\n problem_params['nvars'] = [255, 127]\n problem_params['bc'] = 'dirichlet-zero'\n\n level_params['dt'] = 0.5\n\n space_transfer_params['periodic'] = False\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = heatNd_forced # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = imex_1st_order # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 8.0\n\n elif setup == 'ADVECTION':\n # initialize problem parameters\n problem_params = dict()\n problem_params['c'] = 1.0\n problem_params['nvars'] = [256, 128]\n problem_params['freq'] = 2\n problem_params['order'] = 2\n problem_params['bc'] = 'periodic' # boundary conditions\n\n level_params['dt'] = 0.125\n\n space_transfer_params['periodic'] = True\n\n # fill description dictionary for easy step instantiation\n description = dict()\n description['problem_class'] = advectionNd # pass problem class\n description['problem_params'] = problem_params # pass problem parameters\n description['sweeper_class'] = generic_implicit # pass sweeper (see part B)\n description['sweeper_params'] = sweeper_params # pass sweeper parameters\n description['level_params'] = level_params # pass level parameters\n description['step_params'] = step_params # pass step parameters\n description['space_transfer_class'] = mesh_to_mesh # pass spatial transfer class\n description['space_transfer_params'] = space_transfer_params # pass paramters for spatial transfer\n\n # setup parameters \"in time\"\n t0 = 0.0\n Tend = 2.0\n\n else:\n raise NotImplementedError('setup not implemented')\n\n # do a reference run without any faults to see how things would look like (and to get maxiter/ref_niter)\n ft.strategy = 'NOFAULT'\n\n controller = controller_nonMPI_hard_faults(\n num_procs=num_procs, controller_params=controller_params, description=description\n )\n\n # get initial values on finest level\n P = controller.MS[0].levels[0].prob\n uinit = P.u_exact(t0)\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n ref_niter = max([item[1] for item in sortedlist_stats])\n\n print('Will sweep over %i steps and %i iterations now...' % (num_procs, ref_niter))\n\n # loop over all strategies\n for strategy in ft_strategies:\n ft_iter = range(1, ref_niter + 1)\n ft_step = range(0, num_procs)\n\n print('------------------------------------------ working on strategy ', strategy)\n\n iter_count = np.zeros((len(ft_step), len(ft_iter)))\n\n # loop over all steps\n xcnt = -1\n for step in ft_step:\n xcnt += 1\n\n # loop over all iterations\n ycnt = -1\n for iter in ft_iter:\n ycnt += 1\n\n ft.hard_step = step\n ft.hard_iter = iter\n ft.strategy = strategy\n\n # call main function to get things done...\n uend, stats = controller.run(u0=uinit, t0=t0, Tend=Tend)\n\n # stats magic: get iteration counts to find maxiter/niter\n sortedlist_stats = get_sorted(stats, level=-1, type='niter', sortby='process')\n niter = max([item[1] for item in sortedlist_stats])\n iter_count[xcnt, ycnt] = niter\n\n print(iter_count)\n\n np.savez(\n 'data/' + setup + '_results_hf_' + strategy,\n iter_count=iter_count,\n description=description,\n ft_step=ft_step,\n ft_iter=ft_iter,\n )", "def test_port_create_with_binding_information(self):\n network, segments, subnets = self._create_test_segments_with_subnets(3)\n\n # Map the host to the middle segment (by mocking host/segment mapping)\n self._setup_host_mappings([\n (segments[1]['segment']['id'], 'fakehost'),\n (segments[1]['segment']['id'], 'otherhost'),\n (segments[0]['segment']['id'], 'thirdhost')])\n\n response = self._create_port(self.fmt,\n net_id=network['network']['id'],\n tenant_id=network['network']['tenant_id'],\n is_admin=True,\n arg_list=(portbindings.HOST_ID,),\n **{portbindings.HOST_ID: 'fakehost'})\n res = self.deserialize(self.fmt, response)\n self._validate_immediate_ip_allocation(res['port']['id'])\n\n # Since host mapped to middle segment, IP must come from middle subnet\n self._assert_one_ip_in_subnet(response, subnets[1]['subnet']['cidr'])", "def create_resource_config_files(host_config, resource_config, type_map, bus_map, trecs_root_dir, output_dir, resource_config_dir, model_listen_port, agent_listen_port):\n for host in host_config:\n if host['host_type'] != 'RA':\n continue\n\n resource_name = host['attached_resource_name']\n\n init_data = {\n 'RA': {\n 'ip': '127.0.0.1',\n 'listen_port': agent_listen_port\n },\n 'bus_index': bus_map[resource_name],\n 'listen_port': model_listen_port,\n 'log_path': path.join(output_dir, 'csv', '{}.csv'.format(resource_name))\n }\n\n resource = next(resource for resource in resource_config['resources'] if resource['resource_name'] == resource_name)\n for key in resource.keys():\n if key.endswith('_path'):\n cwd = getcwd()\n chdir(resource_config_dir)\n resource[key] = path.abspath(resource[key])\n chdir(cwd)\n\n final_config = init_data.copy()\n final_config.update(resource)\n\n config_file_name = '{}_config.json'.format(resource_name)\n with open(\n path.join(trecs_root_dir, 'run', config_file_name), 'w'\n ) as init_file:\n dump(final_config, init_file)", "def init_test_input_pipeline(self, config):\n\n print('Initiating test input pipelines')\n\n ######################\n # Calibrate parameters\n ######################\n\n # Update num classes in config\n config.num_classes = self.num_classes - len(self.ignored_labels)\n config.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]\n\n # Update network model in config\n config.network_model = self.network_model\n\n # Update num classes in config\n\n if config.network_model == 'multi_segmentation':\n config.num_classes = self.num_parts\n elif config.network_model == 'segmentation':\n if self.ShapeNetPartType in self.label_names:\n config.num_classes = self.num_parts[self.name_to_label[self.ShapeNetPartType]]\n else:\n raise ValueError('Wrong object name given for ShapeNetPart single object segmentation')\n\n # Calibrate generators to batch_num\n self.batch_limit = self.calibrate_batches(config)\n\n # From config parameter, compute higher bound of neighbors number in a neighborhood\n hist_n = int(np.ceil(4 / 3 * np.pi * (config.density_parameter + 1) ** 3))\n\n # Initiate neighbors limit with higher bound\n self.neighborhood_limits = np.full(config.num_layers, hist_n, dtype=np.int32)\n\n # Calibrate max neighbors number\n self.calibrate_neighbors(config)\n\n ################################\n # Initiate tensorflow parameters\n ################################\n\n # Reset graph\n tf.reset_default_graph()\n\n # Set random seed (You also have to set it in network_architectures.weight_variable)\n #np.random.seed(42)\n #tf.set_random_seed(42)\n\n # Get generator and mapping function\n gen_function, gen_types, gen_shapes = self.get_batch_gen('test', config)\n map_func = self.get_tf_mapping(config)\n\n ##############\n # Test dataset\n ##############\n\n # Create batched dataset from generator\n self.test_data = tf.data.Dataset.from_generator(gen_function,\n gen_types,\n gen_shapes)\n\n self.test_data = self.test_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.test_data = self.test_data.prefetch(10)\n\n #################\n # Common iterator\n #################\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.test_data.output_types, self.test_data.output_shapes)\n self.flat_inputs = iter.get_next()\n\n # create the initialisation operations\n self.test_init_op = iter.make_initializer(self.test_data)", "def gen_config(self):\n if self.want:\n wantd = {\n (entry[\"process_id\"], entry.get(\"vrf\")): entry\n for entry in self.want.get(\"processes\", [])\n }\n else:\n wantd = {}\n if self.have:\n haved = {\n (entry[\"process_id\"], entry.get(\"vrf\")): entry\n for entry in self.have.get(\"processes\", [])\n }\n else:\n haved = {}\n\n # turn all lists of dicts into dicts prior to merge\n for thing in wantd, haved:\n for _pid, proc in iteritems(thing):\n for area in proc.get(\"areas\", []):\n virtual_link = {\n entry[\"id\"]: entry\n for entry in area.get(\"virtual_link\", [])\n }\n if bool(virtual_link):\n area[\"virtual_link\"] = virtual_link\n ranges = {\n entry[\"address\"]: entry\n for entry in area.get(\"ranges\", [])\n }\n if bool(ranges):\n area[\"ranges\"] = ranges\n\n proc[\"areas\"] = {\n entry[\"area_id\"]: entry for entry in proc.get(\"areas\", [])\n }\n if proc.get(\"distribute_list\"):\n if \"acls\" in proc.get(\"distribute_list\"):\n proc[\"distribute_list\"][\"acls\"] = {\n entry[\"name\"]: entry\n for entry in proc[\"distribute_list\"].get(\n \"acls\", []\n )\n }\n\n # if state is merged, merge want onto have\n if self.state == \"merged\":\n wantd = dict_merge(haved, wantd)\n\n # if state is deleted, limit the have to anything in want\n # set want to nothing\n if self.state == \"deleted\":\n haved = {\n k: v for k, v in iteritems(haved) if k in wantd or not wantd\n }\n wantd = {}\n\n # delete processes first so we do run into \"more than one\" errors\n if self.state == \"deleted\":\n haved_del = deepcopy(haved)\n want_process = {}\n for k, t_want in iteritems(haved_del):\n want_process[\"process_id\"] = t_want.get(\"process_id\")\n if not (len(t_want) == 2 and not t_want.get(\"areas\")):\n self._compare(want=want_process, have=haved_del.get(k, {}))\n if self.state == \"overridden\":\n haved_del = deepcopy(haved)\n want = {}\n for k, t_want in iteritems(haved_del):\n if k not in wantd:\n want[\"process_id\"] = t_want.get(\"process_id\")\n if not (len(t_want) == 2 and not t_want.get(\"areas\")):\n self._compare(want=want, have=haved_del.get(k, {}))\n\n for k, want in iteritems(wantd):\n self._compare(want=want, have=haved.pop(k, {}))", "def make_tfx_configs(metadata: Dict) -> Dict:\n system_config = get_config(metadata, \"system_configurations\")\n \n\n # %% pipeline_root\n # TFX produces two types of outputs, files and metadata.\n # - Files will be created under \"pipeline_root\" directory.\n pipeline_root = {\n \"description\": \"\"\"TFX produces two types of outputs, files and metadata.\n Files will be created under 'pipeline_root' directory.\"\"\",\n \"type\": \"string\",\n \"value\": os.path.join(\n system_config[\"gcs_bucket_name\"],\n \"tfx_pipeline_output\",\n metadata[\"pipeline_name\"] + \"_\" + metadata[\"pipeline_version\"],\n ),\n }\n metadata[\"system_configurations\"][\"pipeline_root\"] = pipeline_root\n\n # %% model_serve_dir\n # The last component of the pipeline, \"Pusher\" will produce serving model under\n # model_serve_dir.\n model_serve_dir = {\n \"description\": \"\",\n \"type\": \"string\",\n \"value\": os.path.join(pipeline_root[\"value\"], \"serving_model\"),\n }\n metadata[\"system_configurations\"][\"model_serve_dir\"] = model_serve_dir\n\n return metadata", "def create_netlist(self):\n self.add_modules()\n self.add_pins()\n self.create_instances()", "def gen_network_parameters(self):\n\n print \"\\t* Adding net and subnet parameters to compute template\"\n\n # add all the routers\n all_routers = self.neutronclient.list_routers()[\"routers\"]\n self.all_ports = self.neutronclient.list_ports()[\"ports\"]\n\n self.tenant_routers = filter(lambda router: router['tenant_id'] == self.tenant_id , all_routers)\n\n for idx, router in enumerate(self.tenant_routers):\n\n router_gateway = router[\"external_gateway_info\"]\n try:\n data = {\"type\": \"string\",\n \"description\": \"ID of public network\",\n \"default\": router_gateway[\"network_id\"]\n }\n self.compute_data[\"parameters\"][\"public_net_%s\" % str(idx)] = data\n except:\n print \"\\t! Could not add external_gateway_info for %s\" % router[\"name\"]\n\n networks = self.neutronclient.list_networks()[\"networks\"]\n # filter all networks that match\n filtered_networks = [net for net in networks if (net[\"tenant_id\"] == self.tenant_id or\n (net[\"shared\"] is True) and net['router:external'] is False) and (net[\"name\"] != \"public\")]\n\n # obtain subnet information\n shared_net_id = 0\n for network in filtered_networks:\n for subnet in network[\"subnets\"]:\n if network[\"shared\"] != True:\n subnet_info = self.neutronclient.show_subnet(subnet)[\"subnet\"]\n\n # generate private net\n # private name\n data = {\"type\": \"string\",\n \"description\": \"Name of network\",\n \"default\": network[\"name\"]}\n self.compute_data[\"parameters\"][\"%s_net_name\" % (network[\"name\"])] = data\n\n # private cidr\n data = {\"type\": \"string\",\n \"description\": \"Network address (CIDR notation)\",\n \"default\": subnet_info[\"cidr\"]}\n self.compute_data[\"parameters\"][\"%s_%s_cidr\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private gateway\n data = {\"type\": \"string\",\n \"description\": \"Network gateway address\",\n \"default\": subnet_info[\"gateway_ip\"]}\n self.compute_data[\"parameters\"][\"%s_%s_gateway\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool start\n data = {\"type\": \"string\",\n \"description\": \"Start of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"start\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_start\" % (network[\"name\"], subnet_info[\"name\"])] = data\n\n # private pool end\n data = {\"type\": \"string\",\n \"description\": \"End of network IP address allocation pool\",\n \"default\": subnet_info[\"allocation_pools\"][0][\"end\"]}\n self.compute_data[\"parameters\"][\"%s_%s_pool_end\" % (network[\"name\"], subnet_info[\"name\"])] = data\n else:\n print \"\\t* Adding shared network: %s\" % network[\"name\"]\n data = {\"type\": \"string\",\n \"description\": \"ID of detected shared network\",\n \"default\": network[\"id\"]\n }\n self.compute_data[\"parameters\"][\"shared_net_%s\" % str(shared_net_id)] = data\n shared_net_id += 1", "def get_config(self):\n\n # Use topological sort to get the correct order of modules.\n self.dag_topology_sort()\n mconfig = {}\n module_connection = {}\n for mod in self.mod_wrapper:\n # Generate pipeline configuration.\n mconf = {}\n output_conf = []\n module = self.mod_wrapper[mod]\n for _, binding in module.output_bindings.bindings.items():\n dep_conf = []\n output = {}\n if binding.bindings:\n for dep in binding.bindings:\n dep_item = {}\n _, dname = dep.get_name()\n if dep.is_pipeline_executor_interface():\n dep_item[\"global_output_index\"] = int(dname)\n else:\n dep_item[\"mod_idx\"] = dep.get_owner_idx()\n dep_item[\"input_name\"] = dname\n dep_conf.append(dep_item)\n\n # The value of output_idx start from 0.\n output[\"output_idx\"] = int(binding.name)\n output[\"dependencies\"] = dep_conf\n output_conf.append(output)\n\n mconf[\"mod_idx\"] = module.idx\n mconf[\"cpu_affinity\"] = module.cpu_affinity\n mconf[\"output\"] = output_conf\n\n module_connection[mod] = {\n \"pipeline\": mconf,\n \"target_host\": module.target_host,\n \"mod_name\": \"default\",\n \"build\": module.build_func,\n \"params\": module.params,\n \"target\": module.target,\n \"fcompile\": module.fcompile,\n \"dev\": module.dev,\n \"export_cc\": module.export_cc,\n }\n\n # Creating a map including pipeline inputs and subgraph inputs.\n input_connection = []\n for input_name in self.input_bindings.bindings:\n input_dict = self.input_bindings.bindings[input_name].get_binding_dict()\n if \"interface_name\" not in input_dict[\"connection\"][0]:\n raise RuntimeError(\"interface_name is missing in connection config!\")\n # Creating the map including global interfaces and subgraph interfaces.\n input_map = {\n \"global_interface_name\": input_dict[\"interface_name\"],\n \"mod_idx\": input_dict[\"connection\"][0][\"mod_idx\"],\n \"module_interface_name\": input_dict[\"connection\"][0][\"interface_name\"],\n }\n input_connection.append(input_map)\n\n # Create a map including global parameters groups and modules.\n param_connection = []\n for param_name in self.param_group_bindings.bindings:\n param_dict = self.param_group_bindings.bindings[param_name].get_binding_dict()\n param_map = {\n \"global_param_name\": param_dict[\"interface_name\"],\n \"mod_idx\": param_dict[\"connection\"][0][\"mod_idx\"],\n }\n param_connection.append(param_map)\n\n mconfig[\"module_connection\"] = module_connection\n mconfig[\"input_connection\"] = input_connection\n mconfig[\"param_connection\"] = param_connection\n return mconfig", "def build(self, config):\n nets = OrderedDict()\n\n nets['shared'] = NeuralNet(self.tensor_in, config['net_g']['shared'],\n name='shared')\n\n nets['pitch_time_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['pitch_time_private'],\n name='pt_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['time_pitch_private'] = [\n NeuralNet(nets['shared'].tensor_out,\n config['net_g']['time_pitch_private'],\n name='tp_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['merged_private'] = [\n NeuralNet(tf.concat([nets['pitch_time_private'][idx].tensor_out,\n nets['time_pitch_private'][idx].tensor_out],\n -1),\n config['net_g']['merged_private'],\n name='merged_'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n nets['refiner_private'] = [\n NeuralNet(nets['merged_private'][idx].tensor_out,\n config['net_r']['private'],\n slope_tensor=self.slope_tensor,\n name='refiner_private'+str(idx))\n for idx in range(config['num_track'])\n ]\n\n return (tf.concat([nn.tensor_out for nn in nets['private']], -1), nets,\n tf.concat([nn.layers[-1].preactivated\n for nn in nets['private']], -1))", "def get_flows(self, num_flows_per_entry):\n flows = []\n for tenant in self._tenants:\n for contract in tenant.get_children(only_class=Contract):\n providing_epgs = contract.get_all_providing_epgs()\n consuming_epgs = contract.get_all_consuming_epgs()\n for providing_epg in providing_epgs:\n vlan_ifs = providing_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n providing_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n providing_phys_if = phys_ifs[0].name\n for consuming_epg in consuming_epgs:\n vlan_ifs = consuming_epg.get_all_attached(L2Interface)\n if len(vlan_ifs):\n consuming_vlan = vlan_ifs[0].encap_id\n phys_ifs = vlan_ifs[0].get_all_attached(Interface)\n if len(phys_ifs):\n consuming_phys_if = phys_ifs[0].name\n if providing_vlan == consuming_vlan and providing_phys_if == consuming_phys_if:\n # Skip this case since traffic would be switched outside fabric\n continue\n for filter_entry in contract.get_all_filter_entries():\n for i in range(0, num_flows_per_entry):\n flow = Flow()\n flow.ethertype = filter_entry.etherT\n if flow.ethertype == 'arp':\n flow.arp_opcode = filter_entry.arpOpc\n flow.populate_random_ip_addresses()\n elif flow.ethertype == 'ip':\n flow.populate_random_ip_addresses()\n flow.proto = filter_entry.prot\n if flow.proto == '6' or flow.proto == '17':\n dFromPort = int(filter_entry.dFromPort)\n dToPort = int(filter_entry.dToPort)\n sFromPort = int(filter_entry.sFromPort)\n sToPort = int(filter_entry.sToPort)\n if dFromPort == 0:\n dFromPort = 1\n dToPort += 1\n if sFromPort == 0:\n sFromPort = 1\n sToPort += 1\n if dToPort > 65534:\n dToPort = 65534\n if sToPort > 65534:\n sToPort = 65534\n flow.dport = str(random_number(dFromPort,\n dToPort))\n flow.sport = str(random_number(sFromPort,\n sToPort))\n if flow.proto == '6':\n flow.tcp_rules = filter_entry.tcpRules\n flow.svlan = providing_vlan\n flow.dvlan = consuming_vlan\n flow.src_intf = providing_phys_if\n flow.dst_intf = consuming_phys_if\n\n # Is the flow expected to succeed ?\n flow.expected_action = 'drop'\n providing_bd = providing_epg.get_bd()\n consuming_bd = consuming_epg.get_bd()\n if providing_bd and consuming_bd:\n if providing_bd == consuming_bd:\n if providing_bd.get_context() == consuming_bd.get_context():\n flow.expected_action = 'permit'\n flow.populate_random_mac_addresses()\n flows.append(flow)\n return flows", "def get_initial_graph(n_in, code_spec, n_out):\n G = nx.MultiDiGraph()\n\n G.add_node(\"source\", label=\"SOURCE\", shape=\"cylinder\", color=\"gold\")\n G.add_node(\"sink\", label=\"SINK\", shape=\"cylinder\", color=\"gold\")\n\n for task in tasks:\n task_source_key = f\"{task.name}-source\"\n G.add_node(task_source_key, label=task_source_key, shape=\"cylinder\", color=\"gold\")\n G.add_edge(\"source\", task_source_key)\n task_sink_key = f\"{task.name}-sink\"\n G.add_node(task_sink_key, label=task_sink_key, shape=\"cylinder\", color=\"gold\")\n G.add_edge(task_sink_key, \"sink\")\n for i, input in enumerate(task.inputs):\n input_key = f\"{task.name}-input-{i}\"\n G.add_node(input_key, label=input_key, shape=\"circle\", color=\"blue\",\n node_type=\"input\", input=input)\n G.add_edge(task_source_key, input_key)\n encoder_key = f\"{input_key}-encoder\"\n G.add_node(encoder_key, label=encoder_key, shape=\"diamond\", color=\"black\",\n node_type=\"encoder\", input=input, output=CODE)\n G.add_edge(input_key, encoder_key)\n for box_number in range(initial_boxes):\n if box_number not in G.nodes():\n G.add_node(box_number, label=box_number, shape=\"square\", color=\"black\")\n G.add_edge(encoder_key, box_number)\n for o, output in enumerate(task.outputs):\n output_key = f\"{task.name}-output-{o}\"\n G.add_node(output_key, label=output_key, shape=\"triangle\", color=\"red\",\n node_type=\"output\", output=output)\n G.add_edge(output_key, task_sink_key)\n decoder_key = f\"{output_key}-decoder\"\n G.add_node(decoder_key, label=decoder_key, shape=\"diamond\", color=\"black\",\n node_type=\"decoder\", input=CODE, output=output)\n for box_number in range(initial_boxes):\n G.add_edge(box_number, decoder_key)\n G.add_edge(decoder_key, output_key)\n return G", "def generate_test_deepstream_servers(self):\n def generate_servers_for_block(names, organization_name, data):\n item_dict = {}\n for name in names:\n item_dict['{}_{}'.format(name, organization_name)] = {\n 'organization': organization_name,\n 'mac_addr': self.generate_random_mac_addr(),\n **data,\n }\n return item_dict\n\n server_data = {\n 'ip_addr': 'rtsp://192.168.1.1',\n 'connected_at': timezone.now(),\n 'last_echo_at': timezone.now()\n }\n self.ds_o1_dict =\\\n generate_servers_for_block(\n ['ds0', 'ds1', 'ds2_del', 'ds3_del', 'ds4_del', 'ds5_del'],\n 'o1',\n server_data)\n self.ds_o2_dict =\\\n generate_servers_for_block(\n ['ds0', 'ds1', 'ds2_del', 'ds3_del', 'ds4_del', 'ds5_del'],\n 'o2',\n server_data)\n self.ds_sub1_o1_dict =\\\n generate_servers_for_block(\n ['ds0', 'ds1', 'ds2_del', 'ds3_del', 'ds4_del', 'ds5_del'],\n 'sub1_o1',\n server_data)\n self.ds_sub1_o2_dict =\\\n generate_servers_for_block(\n ['ds0', 'ds1', 'ds2_del', 'ds3_del'],\n 'sub1_o2',\n server_data)\n\n self.ds_dict = {\n **self.ds_o1_dict,\n **self.ds_o2_dict,\n **self.ds_sub1_o1_dict,\n **self.ds_sub1_o2_dict,\n }\n\n # generate blocks in database\n self.deepstream_servers = \\\n self.create_servers_from_data(self.ds_dict, self.orgs)", "def config_from_ptc_def(self, cfg, impl_cfg):\n bridges = cfg.bridges if cfg.bridges else {}\n \"\"\" :type: dict [str, BridgeDef]\"\"\"\n interfaces = cfg.interfaces if cfg.interfaces else {}\n \"\"\" :type: dict [str, InterfaceDef]\"\"\"\n ip_rules = cfg.ip_forward_rules if cfg.ip_forward_rules else []\n \"\"\" :type list [IPForwardRuleDef]\"\"\"\n route_rules = cfg.route_rules if cfg.route_rules else []\n \"\"\" :type list [RouteRuleDef]\"\"\"\n self.name = cfg.name\n\n # Configure bridges now, but hold off on interfaces until\n # we get to wiring\n for name, br in bridges.iteritems():\n b = Bridge(name, self, br.mac_address, br.ip_addresses, br.options)\n self.bridges[name] = b\n\n for iface in interfaces.itervalues():\n link_br = None\n if iface.linked_bridge is not None:\n if iface.linked_bridge not in self.bridges:\n raise ObjectNotFoundException(\n 'Linked bridge ' + iface.linked_bridge +\n ' on interface not found on host ' + self.name)\n\n link_br = self.bridges[iface.linked_bridge]\n\n # Set up an interface here, but it will be replaced by\n # a virtual interface if this host/interface is defined as a\n # near-pair in a wiring config\n self.interfaces[iface.name] = Interface(\n iface.name, self, iface.mac_address,\n iface.ip_addresses, link_br, iface.vlans)\n\n main_iface = None\n if 'eth0' in self.interfaces:\n main_iface = self.interfaces['eth0']\n elif len(self.interfaces) > 0:\n main_iface = self.interfaces.values()[0]\n\n if main_iface and len(main_iface.ip_list) > 0:\n self.main_ip = main_iface.ip_list[0].ip\n\n for ip_rule in ip_rules:\n self.ip_forward_rules.append((ip_rule.exterior, ip_rule.interior))\n\n for route in route_rules:\n self.route_rules.append((route.dest, route.gw, route.dev))\n\n # Configure the host with all of the apps it will be running\n for app_cfg in impl_cfg.apps:\n # Module name is the whole string, while class name is the\n # last name after the last dot (.)\n self.LOG.debug('Configuring host: ' + self.name +\n ' with application: ' + app_cfg.class_name)\n app_class = get_class_from_fqn(app_cfg.class_name)\n app_id = uuid.uuid4()\n a = app_class(self, app_id)\n \"\"\" :type: ptm.application.application.Application\"\"\"\n a.configure_logging(log_file_name=self.log_file_name,\n debug=self.debug)\n a.configure(cfg, app_cfg)\n self.applications.append(a)\n app_type = a.get_type()\n if app_type not in self.applications_by_type:\n self.applications_by_type[app_type] = []\n else:\n # Check if multiple copies of this app type are allowed\n if app_type not in application.APPLICATION_MULTI_ALLOWED:\n raise exceptions.ArgMismatchException(\n \"Cannot run more than one application of type: \" +\n a.type_as_str(app_type) + \" on a single host\")\n self.LOG.debug(\n 'Configuring application: ' + a.get_name() + ' as a: ' +\n application.Application.type_as_str(app_type))\n self.applications_by_type[app_type].append(a)", "def get_base_config():\n return dict(\n dim=768,\n ff_dim=3072,\n num_heads=12,\n num_layers=12,\n attention_dropout_rate=0.0,\n dropout_rate=0.1,\n representation_size=768,\n classifier='token'\n )", "def create_features(self, cfg_path):\n def parse_cfg(cfg_path):\n blocks = []\n fp = open(cfg_path, 'r')\n block = None\n line = fp.readline()\n while line != '':\n line = line.rstrip()\n if line == '' or line[0] == '#':\n line = fp.readline()\n continue\n elif line[0] == '[':\n if block:\n blocks.append(block)\n block = dict()\n block['type'] = line.lstrip('[').rstrip(']')\n # set default value\n if block['type'] == 'convolutional':\n block['batch_normalize'] = 0\n else:\n key, value = line.split('=')\n key = key.strip()\n if key == 'type':\n key = '_type'\n value = value.strip()\n block[key] = value\n line = fp.readline()\n\n if block:\n blocks.append(block)\n fp.close()\n return blocks\n\n blocks = parse_cfg(cfg_path)\n\n models = nn.Sequential()\n conv_id = 0\n prev_filters = 0\n max_pool_id = 0\n \n for block in blocks:\n if block['type'] == 'net':\n prev_filters = int(block['channels'])\n continue\n elif block['type'] == 'convolutional':\n conv_id += 1\n # is_bn = int(block['batch_normalize']) # extraction.conv.weight has no batch_normalize, but it needed.\n filters = int(block['filters'])\n kernel_size = int(block['size'])\n stride = int(block['stride'])\n is_pad = int(block['pad'])\n pad_size = (kernel_size - 1) // 2 if is_pad else 0\n activation = block['activation']\n models.add_module(f\"conv{conv_id}\", nn.Conv2d(prev_filters, filters, kernel_size, stride, pad_size, bias=False))\n models.add_module(f\"bn{conv_id}\", nn.BatchNorm2d(filters))\n if activation =='leaky':\n models.add_module(f\"leaky{conv_id}\", nn.LeakyReLU(0.1, inplace=True))\n prev_filters = filters\n\n elif block['type'] == 'maxpool':\n max_pool_id += 1\n pool_size = int(block['size'])\n stride = int(block['stride'])\n models.add_module(f\"maxpool{max_pool_id}\", nn.MaxPool2d(pool_size, stride))\n \n # elif block['type'] == 'avgpool':\n # models.add_module(\"avgpool\", nn.AvgPool2d(7))\n\n # elif block['type'] == 'connected':\n # filters = int(block['output'])\n # models.add_module(\"fc\", nn.Linear(prev_filters, filters))\n \n # elif block['type'] == 'softmax':\n # models.add_module(\"softmax\", nn.Softmax())\n\n # print(models)\n return models", "def get_transfer(\n self,\n converge_criteria=0.005,\n iter_max=200,\n save_iters=False,\n fix_bb_xfer=False,\n ):\n transfer_shape = (\n self.num_maps * len(self.specs),\n self.nbins_cmb / len(self.specs),\n )\n\n opts = dict(\n converge_criteria=converge_criteria,\n fix_bb_xfer=fix_bb_xfer,\n apply_gcorr=self.apply_gcorr,\n weighted_bins=self.weighted_bins,\n )\n\n save_name = \"transfer_all\"\n if self.weighted_bins:\n save_name = \"{}_wbins\".format(save_name)\n\n ret = self.load_data(\n save_name,\n \"transfer\",\n to_attrs=False,\n shape_ref=\"qb_transfer\",\n shape=transfer_shape,\n value_ref=opts,\n )\n\n if ret is not None:\n self.qb_transfer = ret[\"qb_transfer\"]\n return ret[\"qb_transfer\"]\n\n self.qb_transfer = OrderedDict()\n for spec in self.specs:\n self.qb_transfer[\"cmb_\" + spec] = OrderedDict()\n\n success = False\n msg = \"\"\n\n for im0, m0 in enumerate(self.map_tags):\n if not self.fit_transfer[self.map_tags_orig[im0]]:\n for spec in self.specs:\n self.qb_transfer[\"cmb_{}\".format(spec)][m0] = np.ones(\n self.nbins_cmb // len(self.specs)\n )\n self.log(\"Setting map {} transfer to unity\".format(m0), \"info\")\n success = True\n continue\n\n self.log(\n \"Computing transfer function for map {}/{}\".format(\n im0 + 1, self.num_maps\n ),\n \"info\",\n )\n self.clear_precalc()\n cbl = self.bin_cl_template(map_tag=m0, transfer_run=True)\n ret = self.fisher_iterate(\n cbl,\n m0,\n transfer_run=True,\n iter_max=iter_max,\n converge_criteria=converge_criteria,\n save_iters=save_iters,\n )\n qb = ret[\"qb\"]\n\n success = ret[\"success\"]\n if not success:\n msg = \"Error in fisher_iterate for map {}\".format(m0)\n\n # fix negative amplitude bins\n for k, v in qb.items():\n if np.any(v < 0):\n (negbin,) = np.where(v < 0)\n self.warn(\n \"Transfer function amplitude {} < 0\"\n \"for {} bin {} of map {}\".format(v, k, negbin, m0)\n )\n # XXX cludge\n # This happens in first bin\n # try linear interp between zero and next value\n try:\n qb[k][negbin] = qb[k][negbin + 1] / 2.0\n self.warn(\n \"Setting Transfer function in negative bin to small \"\n \"positive. This is probably due to choice of bins or \"\n \"insufficient number of signal sims\"\n )\n except Exception as e:\n msg = \"Unable to adjust negative bins for map {}: {}\".format(\n m0, str(e)\n )\n success = False\n\n # fix the BB transfer to EE, if desired\n if fix_bb_xfer:\n qb[\"cmb_bb\"] = qb[\"cmb_ee\"]\n\n # fix TB/EB transfer functions\n if len(self.specs) > 4:\n qb[\"cmb_eb\"] = np.sqrt(np.abs(qb[\"cmb_ee\"] * qb[\"cmb_bb\"]))\n qb[\"cmb_tb\"] = np.sqrt(np.abs(qb[\"cmb_tt\"] * qb[\"cmb_bb\"]))\n\n for stag, qbdat in qb.items():\n self.qb_transfer[stag][m0] = qbdat\n\n self.save_data(\n \"{}{}\".format(\"\" if success else \"ERROR_\", save_name),\n from_attrs=[\"nbins\", \"bin_def\", \"qb_transfer\", \"map_tags\"],\n cls_shape=self.cls_shape,\n success=success,\n **opts\n )\n\n if not success:\n raise RuntimeError(\"Error computing transfer function: {}\".format(msg))\n\n return self.qb_transfer", "def create_firewall(context):\n return [{\n 'type': 'templates/firewall.py',\n 'name': 'fc-firewall',\n 'properties': {\n 'projectId':\n '$(ref.fc-project.projectId)',\n 'network':\n '$(ref.fc-network.selfLink)',\n 'dependsOn':\n '$(ref.fc-network.resourceNames)',\n 'rules': [\n {\n 'name': 'allow-internal',\n 'description': 'Allow internal traffic on the network.',\n 'allowed': [{\n 'IPProtocol': 'icmp',\n }, {\n 'IPProtocol': 'tcp',\n 'ports': ['0-65535'],\n }, {\n 'IPProtocol': 'udp',\n 'ports': ['0-65535'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['10.128.0.0/9'],\n 'priority': 65534,\n },\n {\n 'name': 'leonardo-ssl',\n 'description': 'Allow SSL traffic from Leonardo-managed VMs.',\n 'allowed': [{\n 'IPProtocol': 'tcp',\n 'ports': ['443'],\n }],\n 'direction': 'INGRESS',\n 'sourceRanges': ['0.0.0.0/0'],\n 'targetTags': ['leonardo'],\n },\n ],\n },\n }]", "def get_tecogan_model(conf, r_inputs, r_targets, scope_name, tecogan=True):\n # r_inputs, r_targets : shape (batch, conf.train.rnn_n, h, w, c)\n rnn_length = conf.train.rnn_n\n if tecogan:\n r_inputs, r_targets = get_tecogan_inputs(r_inputs, r_targets)\n rnn_length = rnn_length * 2 - 1\n\n # get the consecutive frame sequences from the input sequence\n frame_t_pre, frame_t = r_inputs[:, 0:-1, :, :, :], r_inputs[:, 1:, :, :, :]\n\n # Get flow estimations\n fnet_output = get_fnet_output(\n conf, rnn_length, frame_t_pre, frame_t, scope_name)\n\n # Get the generated HR output frames\n gen_outputs = get_generator_output(\n conf, rnn_length, r_inputs, fnet_output.flow_hr, scope_name)\n\n s_gen_output = F.reshape(gen_outputs, (conf.train.batch_size * rnn_length,\n conf.train.crop_size * 4,\n conf.train.crop_size*4, 3), inplace=False)\n s_targets = F.reshape(r_targets, (conf.train.batch_size * rnn_length, conf.train.crop_size * 4,\n conf.train.crop_size * 4, 3), inplace=False)\n\n # Content loss (l2 loss)\n content_loss = F.mean(\n F.sum(F.squared_error(s_gen_output, s_targets), axis=[3]))\n # Warp loss (l2 loss)\n warp_loss = get_warp_loss(\n conf, rnn_length, frame_t, frame_t_pre, fnet_output.flow_lr)\n\n if tecogan:\n d_data = get_d_data(conf, fnet_output.flow_hr,\n gen_outputs, r_targets, rnn_length)\n # Build the tempo discriminator for the real part and fake part\n t_d = get_t_d(conf, r_inputs, d_data)\n\n # Discriminator layer loss:\n d_layer_loss = get_d_layer(t_d.real_layers, t_d.fake_layers)\n # vgg loss (cosine similarity)\n loss_vgg = get_vgg_loss(s_gen_output, s_targets)\n # ping pong loss (an l1 loss)\n gen_out_first = gen_outputs[:, 0:conf.train.rnn_n-1, :, :, :]\n gen_out_last_rev = gen_outputs[:, -1:-conf.train.rnn_n:-1, :, :, :]\n pp_loss = F.mean(F.abs(gen_out_first - gen_out_last_rev))\n # adversarial loss\n t_adversarial_loss = F.mean(\n -F.log(t_d.tdiscrim_fake_output + conf.train.eps))\n\n # Overall generator loss\n gen_loss = content_loss + pp_loss * conf.gan.pp_scaling + conf.gan.ratio * \\\n t_adversarial_loss + conf.gan.vgg_scaling * loss_vgg + \\\n conf.gan.dt_ratio_0 * d_layer_loss\n\n # Discriminator loss\n t_discrim_fake_loss = F.log(\n 1 - t_d.tdiscrim_fake_output + conf.train.eps)\n t_discrim_real_loss = F.log(t_d.tdiscrim_real_output + conf.train.eps)\n t_discrim_loss = F.mean(-(t_discrim_fake_loss + t_discrim_real_loss))\n\n fnet_loss = gen_loss + warp_loss\n\n set_persistent_all(r_targets, r_inputs, loss_vgg, gen_out_first, gen_out_last_rev, pp_loss,\n d_layer_loss, content_loss, warp_loss, gen_loss, t_adversarial_loss,\n t_discrim_loss, t_discrim_real_loss, d_data.t_vel, d_data.t_gen_output,\n s_gen_output, s_targets)\n\n Network = collections.namedtuple('Network', 'content_loss, warp_loss, fnet_loss, vgg_loss,'\n 'gen_loss, pp_loss, sum_layer_loss,t_adversarial_loss,'\n 't_discrim_loss,t_gen_output,t_discrim_real_loss')\n return Network(\n content_loss=content_loss,\n warp_loss=warp_loss,\n fnet_loss=fnet_loss,\n vgg_loss=loss_vgg,\n gen_loss=gen_loss,\n pp_loss=pp_loss,\n sum_layer_loss=d_layer_loss,\n t_adversarial_loss=t_adversarial_loss,\n t_discrim_loss=t_discrim_loss,\n t_gen_output=d_data.t_gen_output,\n t_discrim_real_loss=t_discrim_real_loss\n )\n\n gen_loss = content_loss\n fnet_loss = gen_loss + warp_loss\n set_persistent_all(content_loss, s_gen_output,\n warp_loss, gen_loss, fnet_loss)\n\n Network = collections.namedtuple(\n 'Network', 'content_loss, warp_loss, fnet_loss, gen_loss')\n return Network(\n content_loss=content_loss,\n warp_loss=warp_loss,\n fnet_loss=fnet_loss,\n gen_loss=gen_loss,\n )", "def set_up_all(self):\n # Based on h/w type, choose how many ports to use\n self.dut_ports = self.dut.get_ports(self.nic)\n # Verify that enough ports are available\n self.verify(len(self.dut_ports) >= 1, \"Insufficient ports\")\n\n localPort = self.tester.get_local_port(self.dut_ports[0])\n self.tester_itf = self.tester.get_interface(localPort)\n self.tester_mac = self.tester.get_mac(localPort)\n self.pf_interface = self.dut.ports_info[self.dut_ports[0]]['intf']\n self.pf_mac = self.dut.get_mac_address(0)\n self.pf_pci = self.dut.ports_info[self.dut_ports[0]]['pci']\n self.pmdout = PmdOutput(self.dut)\n self.cores = \"1S/2C/1T\"\n self.pkt1 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/SCTP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.pkt2 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/UDP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt3 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.3')/TCP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt4 = \"Ether(dst='%s')/IP(src='10.0.0.1',dst='192.168.0.2')/('X'*48)\" % self.pf_mac\n self.pkt5 = \"Ether(dst='%s')/IPv6(src='2001::1',dst='2001::2',nh=132)/SCTP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.pkt6 = \"Ether(dst='%s')/IPv6(src='2001::1',dst='2001::2')/UDP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt7 = \"Ether(dst='%s')/IPv6(src='2001::2',dst='2001::3')/TCP(dport=50, sport=50)/('X'*48)\" % self.pf_mac\n self.pkt8 = \"Ether(dst='%s')/IPv6(src='2001::2',dst='2001::3')/('X'*48)\" % self.pf_mac\n self.prio_pkt1 = \"Ether(dst='%s')/Dot1Q(prio=1)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.prio_pkt2 = \"Ether(dst='%s')/Dot1Q(prio=2)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac\n self.prio_pkt3 = \"Ether(dst='%s')/Dot1Q(prio=3)/IP(src='10.0.0.1',dst='192.168.0.2')/TCP(dport=80, sport=80)/('X'*48)\" % self.pf_mac", "def _config_chn_ins(ssh_clt, topo_info):\n # MARK: Assume the iterface name pattern: eth0, eth1, eth2...\n for ifce_name in ['eth1', 'eth2']:\n print('## Setup interface: %s' % ifce_name)\n ssh_clt.exec_command('sudo ip link set %s up' % ifce_name)\n time.sleep(1)\n print('## Assign IP via DHCP')\n ssh_clt.exec_command('sudo dhclient %s' % ifce_name)\n time.sleep(1)\n print('## Remove duplicate route table items...')\n ssh_clt.exec_command('sudo ip route delete %s dev %s'\n % (conf.NET_ARGS['pvt_subnet_cidr'], ifce_name)\n )\n time.sleep(1)\n\n print('## Add static routing to source and destination...')\n ssh_clt.exec_command('sudo ip route add %s dev eth1' % topo_info['src_ip'])\n time.sleep(1)\n ssh_clt.exec_command('sudo ip route add %s dev eth2' % topo_info['dst_ip'])\n time.sleep(1)\n\n print('## Enable Linux Kernel IP forwarding...')\n ssh_clt.exec_command('echo 1 | sudo tee /proc/sys/net/ipv4/ip_forward')\n time.sleep(1)\n print('# Config Finished\\n')", "def _build_networks(self):\n self.online_convnet = self._create_network(name='Online')\n self.target_convnet = self._create_network(name='Target')\n\n # Compute the Q-values which are used for action selection in the current\n # state.\n self._net_outputs = self.online_convnet(self.state_ph,\n self.num_quantile_samples)\n # Shape of self._net_outputs.quantile_values:\n # num_quantile_samples x num_actions.\n # e.g. if num_actions is 2, it might look something like this:\n # Vals for Quantile .2 Vals for Quantile .4 Vals for Quantile .6\n # [[0.1, 0.5], [0.15, -0.3], [0.15, -0.2]]\n # Q-values = [(0.1 + 0.15 + 0.15)/3, (0.5 + 0.15 + -0.2)/3].\n self._q_values = tf.reduce_mean(self._net_outputs.quantile_values, axis=0)\n self._q_argmax = tf.argmax(self._q_values, axis=0)\n self._policy_logits = tf.nn.softmax(self._q_values / self.tau, axis=0)\n self._stochastic_action = tf.random.categorical(\n self._policy_logits[None, Ellipsis],\n num_samples=1,\n dtype=tf.int32)[0][0]\n\n self._replay_net_outputs = self.online_convnet(self._replay.states,\n self.num_tau_samples)\n # Shape: (num_tau_samples x batch_size) x num_actions.\n self._replay_net_quantile_values = self._replay_net_outputs.quantile_values\n self._replay_net_quantiles = self._replay_net_outputs.quantiles\n\n # Do the same for next states in the replay buffer.\n self._replay_net_target_outputs = self.target_convnet(\n self._replay.next_states, self.num_tau_prime_samples)\n # Shape: (num_tau_prime_samples x batch_size) x num_actions.\n vals = self._replay_net_target_outputs.quantile_values\n self._replay_net_target_quantile_values = vals\n\n # Compute Q-values which are used for action selection for the states and\n # next states in the replay buffer.\n target_next_action = self.target_convnet(self._replay.next_states,\n self.num_quantile_samples)\n target_action = self.target_convnet(self._replay.states,\n self.num_quantile_samples)\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_next_quantile_values_action = target_next_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_next_quantile_values_action = tf.reshape(\n target_next_quantile_values_action,\n [self.num_quantile_samples, self._replay.batch_size, self.num_actions])\n\n # Shape: (num_quantile_samples x batch_size) x num_actions.\n target_quantile_values_action = target_action.quantile_values\n # Shape: num_quantile_samples x batch_size x num_actions.\n target_quantile_values_action = tf.reshape(target_quantile_values_action,\n [self.num_quantile_samples,\n self._replay.batch_size,\n self.num_actions])\n # Shape: batch_size x num_actions.\n self._replay_next_target_q_values = tf.squeeze(tf.reduce_mean(\n target_next_quantile_values_action, axis=0))\n self._replay_target_q_values = tf.squeeze(tf.reduce_mean(\n target_quantile_values_action, axis=0))\n\n self._replay_next_qt_argmax = tf.argmax(\n self._replay_next_target_q_values, axis=1)", "def setup_protocol(self):\n self.ctx.inputs = {\n 'codename': self.inputs.codename,\n 'parameters': {},\n 'settings': {},\n 'options': ParameterData(dict={\n 'resources': {\n 'num_machines': 1\n },\n 'max_wallclock_seconds': 1800,\n }),\n }\n\n if self.inputs.protocol == 'standard':\n self.report('running the workchain in the \"{}\" protocol'.format(self.inputs.protocol.value))\n self.ctx.protocol = {\n 'kpoints_mesh_offset': [0., 0., 0.],\n 'kpoints_mesh_density': 0.2,\n 'convergence_threshold': 2.E-06,\n 'smearing': 'marzari-vanderbilt',\n 'degauss': 0.02,\n 'occupations': 'smearing',\n 'tstress': True,\n 'pseudo_familyname': 'SSSP',\n 'pseudo_data': {\n 'H': {'cutoff': 55, 'dual': 8, 'pseudo': '031US'},\n 'He': {'cutoff': 55, 'dual': 4, 'pseudo': 'SG15'},\n 'Li': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Be': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'B': {'cutoff': 40, 'dual': 8, 'pseudo': '031PAW'},\n 'C': {'cutoff': 50, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'N': {'cutoff': 55, 'dual': 8, 'pseudo': 'THEOS'},\n 'O': {'cutoff': 45, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'F': {'cutoff': 50, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Ne': {'cutoff': 200, 'dual': 8, 'pseudo': '100PAW'},\n 'Na': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Mg': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Al': {'cutoff': 30, 'dual': 8, 'pseudo': '100PAW'},\n 'Si': {'cutoff': 30, 'dual': 8, 'pseudo': '100US'},\n 'P': {'cutoff': 30, 'dual': 8, 'pseudo': '100US'},\n 'S': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Cl': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Ar': {'cutoff': 120, 'dual': 8, 'pseudo': '100US'},\n 'K': {'cutoff': 50, 'dual': 8, 'pseudo': '100US'},\n 'Ca': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Sc': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ti': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'V': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Cr': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.5'},\n 'Mn': {'cutoff': 70, 'dual': 12, 'pseudo': '031PAW'},\n 'Fe': {'cutoff': 90, 'dual': 12, 'pseudo': '031PAW'},\n 'Co': {'cutoff': 55, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ni': {'cutoff': 45, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Cu': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Zn': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ga': {'cutoff': 35, 'dual': 8, 'pseudo': '031US'},\n 'Ge': {'cutoff': 40, 'dual': 8, 'pseudo': '100PAW'},\n 'As': {'cutoff': 30, 'dual': 8, 'pseudo': '031US'},\n 'Se': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Br': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Kr': {'cutoff': 100, 'dual': 8, 'pseudo': '031US'},\n 'Rb': {'cutoff': 50, 'dual': 4, 'pseudo': 'SG15'},\n 'Sr': {'cutoff': 35, 'dual': 8, 'pseudo': '100US'},\n 'Y': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Zr': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Nb': {'cutoff': 35, 'dual': 8, 'pseudo': '031PAW'},\n 'Mo': {'cutoff': 35, 'dual': 4, 'pseudo': 'SG15'},\n 'Tc': {'cutoff': 30, 'dual': 4, 'pseudo': 'SG15'},\n 'Ru': {'cutoff': 40, 'dual': 4, 'pseudo': 'SG15'},\n 'Rh': {'cutoff': 45, 'dual': 8, 'pseudo': '100PAW'},\n 'Pd': {'cutoff': 55, 'dual': 8, 'pseudo': '100PAW'},\n 'Ag': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Cd': {'cutoff': 40, 'dual': 8, 'pseudo': '031US'},\n 'In': {'cutoff': 35, 'dual': 8, 'pseudo': '031US'},\n 'Sn': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Sb': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Te': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'I': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Xe': {'cutoff': 120, 'dual': 8, 'pseudo': '100US'},\n 'Cs': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ba': {'cutoff': 40, 'dual': 4, 'pseudo': 'SG15'},\n 'Hf': {'cutoff': 35, 'dual': 8, 'pseudo': '031US'},\n 'Ta': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'W': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Re': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Os': {'cutoff': 35, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Ir': {'cutoff': 40, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Pt': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.4'},\n 'Au': {'cutoff': 45, 'dual': 4, 'pseudo': 'SG15'},\n 'Hg': {'cutoff': 30, 'dual': 8, 'pseudo': 'GBRV-1.2'},\n 'Tl': {'cutoff': 30, 'dual': 8, 'pseudo': '031US'},\n 'Pb': {'cutoff': 40, 'dual': 8, 'pseudo': '031PAW'},\n 'Bi': {'cutoff': 35, 'dual': 8, 'pseudo': '031PAW'},\n 'Po': {'cutoff': 45, 'dual': 8, 'pseudo': '100US'},\n 'Rn': {'cutoff': 45, 'dual': 8, 'pseudo': '100US'},\n 'La': {'cutoff': 55, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Ce': {'cutoff': 45, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Pr': {'cutoff': 50, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Nd': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Sm': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Eu': {'cutoff': 55, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Tb': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Dy': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Ho': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Er': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Tm': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Yb': {'cutoff': 40, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n 'Lu': {'cutoff': 45, 'dual': 8, 'pseudo': 'Wentzcovitch'},\n }\n }", "def build_configs(subnets, host_count, dev_div, domain=None):\n global VERBOSE\n jsons = [] # subnet breakdown\n unlabeled_hosts = [] # number of hosts in the network w/o roles\n ip_addr = [] # keeping track of the 2nd and 3rd octets in IP\n roles = dict.fromkeys(dev_div.keys(), 0)\n\n if len(subnets)/254 > 254:\n print(\"WARNING: You're about to see some really sick IPs. Have fun.\")\n\n for n in subnets:\n addy = (randint(0,253), randint(0,253))\n while addy in ip_addr:\n addy = (randint(0,253), randint(0,253))\n ip_addr.append(addy)\n jsons.append({\n \"start_ip\" : '10.{}.{}.2'.format(addy[0],addy[1]),\n \"subnet\" : '10.{}.{}.0/24'.format(addy[0], addy[1]),\n \"hosts\" : n,\n \"roles\" : roles.copy()\n })\n unlabeled_hosts.append(n)\n if VERBOSE:\n print(\"start_ip: {}\\t number of hosts: {}\\t\".format(jsons[-1]['start_ip'], jsons[-1]['hosts']))\n\n # divvy up the roles, now that the subnets are defined\n labeled_hosts = 0\n for dev in dev_div:\n dev_total = dev_div[dev]\n labeled_hosts += dev_total\n while dev_total > 0:\n while True:\n n = randrange(0, len(subnets))\n if (unlabeled_hosts[n] > 0):\n jsons[n]['roles'][dev] += 1\n unlabeled_hosts[n] -= 1\n break\n dev_total -= 1\n if labeled_hosts != host_count:\n print(\"WARNING: Labeled hosts ({}) didn't equal host count ({})\".format(labeled_hosts, host_count))\n\n return jsons", "def _copy_node_type_with_flowrules (cls, type_iter, target, log):\n for obj in type_iter:\n if obj.id not in target:\n c_obj = target.add_node(deepcopy(obj))\n log.debug(\"Copy NFFG node: %s\" % c_obj)\n else:\n for p in obj.ports:\n if p.id not in target.network.node[obj.id].ports:\n new_port = target.network.node[obj.id].add_port(id=p.id,\n properties=p.properties)\n log.debug(\"Copy port %s to NFFG element %s\" % (p, obj))\n if hasattr(p, 'flowrules'):\n log.debug(\"Merging flowrules of port %s of node %s\" %\n (p.id, obj.id))\n for fr in p.flowrules:\n if fr.id not in (f.id for f in new_port.flowrules):\n new_port.flowrules.append(copy.deepcopy(fr))\n else:\n old_port = target.network.node[obj.id].ports[p.id]\n for fr in p.flowrules:\n if fr.id not in (f.id for f in old_port.flowrules):\n old_port.flowrules.append(copy.deepcopy(fr))\n return target", "def testNeutronSFC(self):\n\n headers = {'Content-type': 'application/json'}\n\n print('->>>>>>> Create ports p1 - p6 ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n # Get network id\n network_resp = requests.get(\n \"http://0.0.0.0:19696/v2.0/networks?name=default\", headers=headers)\n self.assertEqual(network_resp.status_code, 200)\n network_id = json.loads(network_resp.content)[\"networks\"][0][\"id\"]\n\n port_responses = list(map(lambda name: requests.post(\"http://0.0.0.0:19696/v2.0/ports\",\n data='{\"port\": {\"name\": \"%s\", \"network_id\": \"%s\"}}' %\n (name, network_id),\n headers=headers),\n [\"p1\", \"p2\", \"p3\", \"p4\", \"p5\", \"p6\"]))\n\n for port in port_responses:\n self.assertEqual(port.status_code, 201)\n\n port_ids = list(map(lambda response: json.loads(response.content)[\"port\"][\"id\"], port_responses))\n\n listflavorsresponse = requests.get(\"http://0.0.0.0:18774/v2.1/id_bla/flavors\", headers=headers)\n self.assertEqual(listflavorsresponse.status_code, 200)\n flavors = json.loads(listflavorsresponse.content)[\"flavors\"]\n m1_tiny_flavor = list(filter(lambda flavor: flavor[\"name\"] == \"m1.tiny\", flavors))[0]\n\n listimagesdetailsresponse = requests.get(\"http://0.0.0.0:18774/v2.1/id_bla/images/detail\", headers=headers)\n self.assertEqual(listimagesdetailsresponse.status_code, 200)\n images = json.loads(listimagesdetailsresponse.content)[\"images\"]\n ubuntu_image = list(filter(lambda image: image[\"name\"] == \"ubuntu:trusty\", images))[0]\n\n server_url = \"http://0.0.0.0:18774/v2.1/id_bla/servers\"\n server_template = \\\n '{\"server\": {' \\\n '\"name\": \"%s\",' \\\n '\"networks\": [{\"port\": \"%s\"}, {\"port\": \"%s\"}],' \\\n '\"flavorRef\": \"%s\",' \\\n '\"imageRef\": \"%s\"' \\\n '}}'\n server_responses = map(lambda spec: (\n requests.post(server_url,\n data=server_template % (\n spec[\"name\"],\n spec[\"ingress\"],\n spec[\"egress\"],\n m1_tiny_flavor[\"id\"],\n ubuntu_image[\"id\"]\n ),\n headers=headers)\n ), [\n {\"name\": \"s1\", \"ingress\": \"p1\", \"egress\": \"p2\"},\n {\"name\": \"s2\", \"ingress\": \"p3\", \"egress\": \"p4\"},\n {\"name\": \"s3\", \"ingress\": \"p5\", \"egress\": \"p6\"},\n ])\n for response in server_responses:\n self.assertEqual(response.status_code, 200)\n\n print('->>>>>>> test Neutron SFC Port Pair Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs\"\n pp1_resp = requests.post(url, data='{\"port_pair\": {\"name\": \"pp1\", \"ingress\": \"%s\", \"egress\": \"%s\"}}' % (\n port_ids[0], port_ids[1]), headers=headers)\n self.assertEqual(pp1_resp.status_code, 201)\n pp2_resp = requests.post(url, data='{\"port_pair\": {\"name\": \"pp2\", \"ingress\": \"%s\", \"egress\": \"%s\"}}' % (\n port_ids[2], port_ids[3]), headers=headers)\n self.assertEqual(pp2_resp.status_code, 201)\n pp3_resp = requests.post(url, data='{\"port_pair\": {\"name\": \"pp3\", \"ingress\": \"%s\", \"egress\": \"%s\"}}' % (\n port_ids[4], port_ids[5]), headers=headers)\n self.assertEqual(pp3_resp.status_code, 201)\n\n pp1_id = json.loads(pp1_resp.content)[\"port_pair\"][\"id\"]\n pp2_id = json.loads(pp2_resp.content)[\"port_pair\"][\"id\"]\n pp3_id = json.loads(pp3_resp.content)[\"port_pair\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Port Pair Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s\" % pp3_id\n pp3_update_resp = requests.put(\n url, data='{\"port_pair\": {\"description\": \"port_pair_update\"}}', headers=headers)\n self.assertEqual(pp3_update_resp.status_code, 200)\n self.assertEqual(json.loads(pp3_update_resp.content)[\n \"port_pair\"][\"description\"], \"port_pair_update\")\n\n print('->>>>>>> test Neutron SFC Port Pair Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s\" % pp3_id\n pp3_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(pp3_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Port Pair List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs\"\n pp_list_resp = requests.get(url, headers=headers)\n self.assertEqual(pp_list_resp.status_code, 200)\n pp_list = json.loads(pp_list_resp.content)[\"port_pairs\"]\n # only pp1 and pp2 should be left\n self.assertEqual(len(pp_list), 2)\n\n print('->>>>>>> test Neutron SFC Port Pair List filtered by id ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs?id=%s\" % pp_list[0][\"id\"]\n pp_list_filtered_by_id_resp = requests.get(url, headers=headers)\n pp_list_filtered_by_id = json.loads(pp_list_filtered_by_id_resp.content)[\"port_pairs\"]\n self.assertEqual(pp_list_filtered_by_id_resp.status_code, 200)\n self.assertEqual(len(pp_list_filtered_by_id), 1)\n self.assertEqual(pp_list_filtered_by_id[0], pp_list[0])\n\n print('->>>>>>> test Neutron SFC Port Pair Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pairs/%s\" % pp2_id\n pp2_show_resp = requests.get(url, headers=headers)\n self.assertEqual(pp2_show_resp.status_code, 200)\n self.assertEqual(json.loads(pp2_show_resp.content)\n [\"port_pair\"][\"name\"], \"pp2\")\n\n print('->>>>>>> test Neutron SFC Port Pair Group Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups\"\n ppg1_resp = requests.post(\n url, data='{\"port_pair_group\": {\"name\": \"ppg1\", \"port_pairs\": [\"%s\"]}}' % (pp1_id), headers=headers)\n self.assertEqual(ppg1_resp.status_code, 201)\n ppg2_resp = requests.post(\n url, data='{\"port_pair_group\": {\"name\": \"ppg2\", \"port_pairs\": [\"%s\"]}}' % (pp2_id), headers=headers)\n self.assertEqual(ppg2_resp.status_code, 201)\n ppg3_resp = requests.post(\n url, data='{\"port_pair_group\": {\"name\": \"ppg3\", \"port_pairs\": [\"%s\"]}}' % (pp2_id), headers=headers)\n self.assertEqual(ppg3_resp.status_code, 201)\n\n ppg1_id = json.loads(ppg1_resp.content)[\"port_pair_group\"][\"id\"]\n ppg2_id = json.loads(ppg2_resp.content)[\"port_pair_group\"][\"id\"]\n ppg3_id = json.loads(ppg3_resp.content)[\"port_pair_group\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Port Pair Group Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s\" % ppg3_id\n ppg3_update_resp = requests.put(\n url, data='{\"port_pair_group\": {\"description\": \"port_pair_group_update\"}}', headers=headers)\n self.assertEqual(ppg3_update_resp.status_code, 200)\n self.assertEqual(json.loads(ppg3_update_resp.content)[\n \"port_pair_group\"][\"description\"], \"port_pair_group_update\")\n\n print('->>>>>>> test Neutron SFC Port Pair Group Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s\" % ppg3_id\n ppg3_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(ppg3_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Port Pair Group List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups\"\n ppg_list_resp = requests.get(url, headers=headers)\n self.assertEqual(ppg_list_resp.status_code, 200)\n # only ppg1 and ppg2 should be left\n self.assertEqual(\n len(json.loads(ppg_list_resp.content)[\"port_pair_groups\"]), 2)\n\n print('->>>>>>> test Neutron SFC Port Pair Group Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_pair_groups/%s\" % ppg2_id\n ppg2_show_resp = requests.get(url, headers=headers)\n self.assertEqual(ppg2_show_resp.status_code, 200)\n self.assertEqual(json.loads(ppg2_show_resp.content)[\n \"port_pair_group\"][\"name\"], \"ppg2\")\n\n print('->>>>>>> test Neutron SFC Flow Classifier Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers\"\n fc1_resp = requests.post(\n url, data='{\"flow_classifier\": {\"name\": \"fc1\", \"logical_source_port\": \"p1\", \"source_port_range_min\": 22, \"source_port_range_max\": 4000}}', headers=headers)\n self.assertEqual(fc1_resp.status_code, 201)\n fc2_resp = requests.post(\n url, data='{\"flow_classifier\": {\"name\": \"fc2\", \"logical_source_port\": \"p2\", \"source_port_range_min\": 22, \"source_port_range_max\": 4000}}', headers=headers)\n self.assertEqual(fc2_resp.status_code, 201)\n\n fc1_id = json.loads(fc1_resp.content)[\"flow_classifier\"][\"id\"]\n fc2_id = json.loads(fc2_resp.content)[\"flow_classifier\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Flow Classifier Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s\" % fc2_id\n fc2_update_resp = requests.put(\n url, data='{\"flow_classifier\": {\"description\": \"flow_classifier_update\"}}', headers=headers)\n self.assertEqual(fc2_update_resp.status_code, 200)\n self.assertEqual(json.loads(fc2_update_resp.content)[\n \"flow_classifier\"][\"description\"], \"flow_classifier_update\")\n\n print('->>>>>>> test Neutron SFC Flow Classifier Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s\" % fc2_id\n fc2_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(fc2_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Flow Classifier List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers\"\n fc_list_resp = requests.get(url, headers=headers)\n self.assertEqual(fc_list_resp.status_code, 200)\n self.assertEqual(len(json.loads(fc_list_resp.content)\n [\"flow_classifiers\"]), 1) # only fc1\n\n print('->>>>>>> test Neutron SFC Flow Classifier Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/flow_classifiers/%s\" % fc1_id\n fc1_show_resp = requests.get(url, headers=headers)\n self.assertEqual(fc1_show_resp.status_code, 200)\n self.assertEqual(json.loads(fc1_show_resp.content)[\n \"flow_classifier\"][\"name\"], \"fc1\")\n\n print('->>>>>>> test Neutron SFC Port Chain Create ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains\"\n pc1_resp = requests.post(url, data='{\"port_chain\": {\"name\": \"pc1\", \"port_pair_groups\": [\"%s\"], \"flow_classifiers\": [\"%s\"]}}' % (\n ppg1_id, fc1_id), headers=headers)\n self.assertEqual(pc1_resp.status_code, 201)\n pc2_resp = requests.post(url, data='{\"port_chain\": {\"name\": \"pc2\", \"port_pair_groups\": [\"%s\"], \"flow_classifiers\": [\"%s\"]}}' % (\n ppg1_id, fc1_id), headers=headers)\n self.assertEqual(pc2_resp.status_code, 201)\n\n pc1_id = json.loads(pc1_resp.content)[\"port_chain\"][\"id\"]\n pc2_id = json.loads(pc2_resp.content)[\"port_chain\"][\"id\"]\n\n print('->>>>>>> test Neutron SFC Port Chain Update ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains/%s\" % pc2_id\n pc2_update_resp = requests.put(\n url, data='{\"port_chain\": {\"description\": \"port_chain_update\"}}', headers=headers)\n self.assertEqual(pc2_update_resp.status_code, 200)\n self.assertEqual(json.loads(pc2_update_resp.content)[\n \"port_chain\"][\"description\"], \"port_chain_update\")\n\n print('->>>>>>> test Neutron SFC Port Chain Delete ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains/%s\" % pc2_id\n pc2_delete_resp = requests.delete(url, headers=headers)\n self.assertEqual(pc2_delete_resp.status_code, 204)\n\n print('->>>>>>> test Neutron SFC Port Chain List ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains\"\n pc_list_resp = requests.get(url, headers=headers)\n self.assertEqual(pc_list_resp.status_code, 200)\n self.assertEqual(len(json.loads(pc_list_resp.content)\n [\"port_chains\"]), 1) # only pc1\n\n print('->>>>>>> test Neutron SFC Port Chain Show ->>>>>>>>>>>>>>>')\n print('->>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>>')\n url = \"http://0.0.0.0:19696/v2.0/sfc/port_chains/%s\" % pc1_id\n pc1_show_resp = requests.get(url, headers=headers)\n self.assertEqual(pc1_show_resp.status_code, 200)\n self.assertEqual(json.loads(pc1_show_resp.content)\n [\"port_chain\"][\"name\"], \"pc1\")", "def deploy_net(self, desired_config): # pylint: disable=too-many-locals\n self._bigip.refresh_net()\n\n # Get the list of route tasks\n LOGGER.debug(\"Getting route tasks...\")\n existing = self._bigip.get_routes()\n desired = desired_config.get('routes', dict())\n\n (create_routes, update_routes, delete_routes) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # Get the list of arp tasks\n LOGGER.debug(\"Getting arp tasks...\")\n existing = self._bigip.get_arps()\n desired = desired_config.get('arps', dict())\n\n (create_arps, update_arps, delete_arps) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # Get the list of tunnel tasks\n LOGGER.debug(\"Getting tunnel tasks...\")\n existing = self._bigip.get_fdb_tunnels()\n desired = desired_config.get('fdbTunnels', dict())\n (create_tunnels, update_tunnels, delete_tunnels) = (\n self._get_resource_tasks(existing, desired)[0:3])\n\n # If there are pre-existing (user-created) tunnels that we are\n # managing, we want to only update these tunnels.\n LOGGER.debug(\"Getting pre-existing tunnel update tasks...\")\n desired = desired_config.get('userFdbTunnels', dict())\n update_existing_tunnels = self._get_user_tunnel_tasks(desired)\n\n LOGGER.debug(\"Building task lists...\")\n create_tasks = create_arps + create_tunnels + create_routes\n update_tasks = update_arps + update_tunnels + update_existing_tunnels + update_routes\n delete_tasks = delete_arps + delete_tunnels + delete_routes\n\n taskq_len = len(create_tasks) + len(update_tasks) + len(delete_tasks)\n\n return self._run_tasks(\n taskq_len, create_tasks, update_tasks, delete_tasks)", "def generate_configs(executor, root=None, dest=None):\n from ramutils.cli.expconf import create_expconf\n\n futures = []\n submit = functools.partial(executor.submit, create_expconf)\n\n make_args_ = functools.partial(make_args, root=root, dest=dest) \\\n if root is not None else make_args\n\n for experiment in EXPERIMENTS:\n if \"FR1\" in experiment:\n submit(make_args_(experiment))\n\n if experiment in [\"FR5\", \"CatFR5\"] or \"TICL\" in experiment:\n anodes = ANODES[:1]\n cathodes = CATHODES[:1]\n min_amplitudes = MIN_AMPLITUDES[:1]\n amplitudes = MAX_AMPLITUDES[:1]\n\n get_args = functools.partial(make_args_, experiment,\n anodes=anodes, cathodes=cathodes,\n target_amplitudes=amplitudes)\n\n if \"TICL\" not in experiment:\n futures.append(submit(get_args()))\n else:\n futures.append(submit(get_args(trigger_pairs=[\"LM5_LM6\"])))\n\n return futures", "def _get_basic_firewall_gnp(self, host, firewall_networks, config):\n\n for network in firewall_networks:\n\n gnp_name = host.personality + \"-\" + network.type + \"-if-gnp\"\n addr_pool = self.dbapi.address_pool_get(network.pool_uuid)\n ip_version = IPAddress(f\"{addr_pool.network}\").version\n nodetype_selector = f\"has(nodetype) && nodetype == '{host.personality}'\"\n iftype_selector = f\"has(iftype) && iftype contains '{network.type}'\"\n selector = f\"{nodetype_selector} && {iftype_selector}\"\n ICMP = \"ICMP\"\n if (ip_version == 6):\n ICMP = \"ICMPv6\"\n\n firewall_gnp = dict()\n firewall_gnp[\"apiVersion\"] = \"crd.projectcalico.org/v1\"\n firewall_gnp[\"kind\"] = \"GlobalNetworkPolicy\"\n firewall_gnp[\"metadata\"] = {\"name\": gnp_name}\n\n firewall_gnp[\"spec\"] = dict()\n firewall_gnp[\"spec\"].update({\"applyOnForward\": False})\n firewall_gnp[\"spec\"].update({\"order\": 100})\n firewall_gnp[\"spec\"].update({\"selector\": selector})\n firewall_gnp[\"spec\"].update({\"types\": [\"Ingress\", \"Egress\"]})\n firewall_gnp[\"spec\"].update({\"egress\": list()})\n\n for proto in [\"TCP\", \"UDP\", ICMP]:\n rule = {\"metadata\": dict()}\n rule[\"metadata\"] = {\"annotations\": dict()}\n rule[\"metadata\"][\"annotations\"] = {\"name\":\n f\"stx-egr-{host.personality}-{network.type}-{proto.lower()}{ip_version}\"}\n rule.update({\"protocol\": proto})\n rule.update({\"ipVersion\": ip_version})\n rule.update({\"action\": \"Allow\"})\n firewall_gnp[\"spec\"][\"egress\"].append(rule)\n\n firewall_gnp[\"spec\"].update({\"ingress\": list()})\n for proto in [\"TCP\", \"UDP\", ICMP]:\n rule = {\"metadata\": dict()}\n rule[\"metadata\"] = {\"annotations\": dict()}\n rule[\"metadata\"][\"annotations\"] = {\"name\":\n f\"stx-ingr-{host.personality}-{network.type}-{proto.lower()}{ip_version}\"}\n rule.update({\"protocol\": proto})\n rule.update({\"ipVersion\": ip_version})\n rule.update({\"action\": \"Allow\"})\n firewall_gnp[\"spec\"][\"ingress\"].append(rule)\n config[PLATFORM_FIREWALL_CLASSES[network.type]] = copy.copy(firewall_gnp)", "def create_exporters(self):\n for node_cfg in self.node_cfg_list:\n self.create_node(node_cfg)", "def build_model(fix_first_layers_gen_b=True, fix_last_layer_gen_b=fix_last_layer_gen_new,\n fix_2last_layer_gen_b=fix_2last_layer_gen_new,\n fix_first_layers_disc_b=True, fix_last_layer_disc_b=fix_last_layer_disc_new,\n fix_2last_layer_disc_b=fix_2last_layer_disc_new\n ):\n\n with tf.name_scope('placeholders'):\n x_true = tf.placeholder(tf.float32, [None, 28, 28, 1])\n z = tf.placeholder(tf.float32, [None, input_dim])\n\n x_generated = generator(z, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_gen_b, fix_last_layer=fix_last_layer_gen_b,\n fix_2last_layer=fix_2last_layer_gen_b, architecture=architecture)\n\n if architecture == 'DCGAN':\n d_true1 = discriminator1(x_true, reuse=False, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n d_true = discriminator2(d_true1, reuse=False, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n d_generated1 = discriminator1(x_generated, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n d_generated = discriminator2(d_generated1, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n else: # WGAN-GP\n d_true = discriminator(x_true, reuse=False, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n d_generated = discriminator(x_generated, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n if architecture == 'DCGAN':\n with tf.name_scope('loss'):\n g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_generated,\n labels=tf.ones_like(d_generated)))\n d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_generated,\n labels=tf.zeros_like(d_generated))) +\\\n tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=d_true,\n labels=tf.ones_like(d_true)))\n d_loss = d_loss/2.\n\n with tf.name_scope('optimizer'):\n optimizer = tf.train.AdamOptimizer(learning_rate=2*learning_rate, beta1=0.5)\n\n g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_train = optimizer.minimize(g_loss, var_list=g_vars)\n d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_train = optimizer.minimize(d_loss, var_list=d_vars)\n\n else: # WGAN-GP\n with tf.name_scope('regularizer'):\n epsilon = tf.random_uniform([batch_size, 1, 1, 1], 0.0, 1.0)\n x_hat = epsilon * x_true + (1 - epsilon) * x_generated\n\n # without splitting the discriminator\n d_hat = discriminator(x_hat, reuse=True, n_features_first=n_features_first,\n n_features_reduction_factor=n_features_reduction_factor,\n fix_first_layers=fix_first_layers_disc_b, fix_last_layer=fix_last_layer_disc_b,\n fix_2last_layer=fix_2last_layer_disc_b, architecture=architecture)\n\n gradients = tf.gradients(d_hat, x_hat)[0]\n ddx = tf.sqrt(tf.reduce_sum(gradients ** 2, axis=[1, 2]))\n d_regularizer = tf.reduce_mean((ddx - 1.0) ** 2)\n\n with tf.name_scope('loss'):\n g_loss = -tf.reduce_mean(d_generated)\n wasserstein_dist = tf.reduce_mean(d_true) - tf.reduce_mean(d_generated)\n d_loss = -wasserstein_dist + lambda_reg * d_regularizer\n\n with tf.name_scope('optimizer'):\n optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, beta1=0, beta2=0.9)\n # FK: TODO: beta1 = 0.5 in IWGAN, here 0 -> change? In experiments (only 1000 epochs) it seemed better with 0\n\n g_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='generator')\n g_train = optimizer.minimize(g_loss, var_list=g_vars)\n d_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='discriminator')\n d_train = optimizer.minimize(d_loss, var_list=d_vars)\n\n # initialize variables\n session.run(tf.global_variables_initializer())\n\n if architecture == 'DCGAN':\n return x_true, z, x_generated, d_true1, d_true, d_generated1, d_generated, g_loss, d_loss, optimizer, \\\n g_vars, g_train, d_vars, d_train\n else: # WGANGP\n return x_true, z, x_generated, d_true, d_generated, epsilon, x_hat, d_hat, gradients, ddx, d_regularizer, \\\n g_loss, wasserstein_dist, d_loss, optimizer, g_vars, g_train, d_vars, d_train", "def nodes_from_dict(nd=None,**kwargs):\n\n if not nd:\n err_msg = \"ERROR: No nodes data provided\"\n print(err_msg)\n return 1\n \n nodes = []\n\n ####################\n #Create BUS objects#\n ####################\n busd = {}\n for i, row in nd[\"buses\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n logger.info(\"bus {} will be created\".format(row[\"label\"]))\n bus = solph.Bus(label=row[\"label\"])\n nodes.append(bus)\n busd[row[\"label\"]] = bus\n \n if row[\"excess\"] and not pd.isnull(row[\"excess\"]):\n # Automatically add Sink for curtailment (excess)\n # Add variable cost for excess cost --> minimise curtailment\n nodes.append(\n solph.Sink(\n label=row[\"label\"] + \"_excess\",\n inputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs = row[\"excess costs\"]\n )\n },\n )\n )\n # Automatically add Source for shortage\n # Add variable cost for shortage --> minimize shortage\n if row[\"shortage\"] and not pd.isnull(row[\"shortage\"]):\n nodes.append(\n solph.Source(\n label = row[\"label\"] + \"_shortage\",\n outputs={\n busd[row[\"label\"]]:solph.Flow(\n variable_costs=row[\"shortage costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects#\n ########################\n for i, row in nd[\"commodity_sources\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs={\n busd[row[\"to\"]]: solph.Flow(\n variable_costs = row[\"variable costs\"]\n )\n },\n )\n )\n ########################\n # Create Source objects with fixed time series from 'renewables' table\n ########################\n \"\"\"\n A source can represent a pv-system, a wind power plant, an import of natural gas or a slack variable to avoid creating an in-feasible model.\n While a wind power plant will have an hourly feed-in depending on the weather conditions the natural_gas import might be restricted by \n maximum value (nominal_value) and an annual limit (summed_max). As we do have to pay for imported gas we should set variable costs. \n Comparable to the demand series an fix is used to define a fixed the normalised output of a wind power plant. \n Alternatively, you might use max to allow for easy curtailment. The nominal_value sets the installed capacity.\n \"\"\"\n for i, row in nd[\"renewables\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static outflow values\n outflow_args = {}\n\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0] == row[\"label\"]:\n outflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # outflow_args[\"fix\"]=nd[\"timeseries\"][col]\n \n # TODO add NON-CONVEX to outflow_args\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n # with investment mode, nominal_value must be None\n logger.info(\"Invest {} capacity\".format(row[\"label\"]))\n invest_args = {}\n if not row[\"epc_invest\"] or pd.isnull(row[\"epc_invest\"]):\n epc_invest = economics.annuity(row[\"capex\"],20,0.08)\n else:\n epc_invest=row[\"epc_invest\"]\n invest_args[\"ep_costs\"] = epc_invest\n\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"]=row[\"min\"]\n\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"]=row[\"existing\"]\n \n outflow_args[\"investment\"] = solph.Investment(**invest_args) \n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"]\n \n # create\n nodes.append(\n solph.Source(\n label=row[\"label\"],\n outputs = {\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n }\n )\n )\n #######################\n # Create Sink objects # \n #######################\n \"\"\"\n A sink is normally used to define the demand within an energy model but it can also be used to detect excesses.\n\n The example shows the electricity demand of the electricity_bus defined above.\n - 'nd['timeseries'][col]' should be sequence of normalised values\n - 'nominal_value' is the maximum demand the normalised sequence is multiplied with.\n - Giving 'nd['timeseries'][col]' as parameter 'fix' means that the demand cannot be changed by the solver. \n \n In contrast to the 'demand sink' the 'excess sink' has normally less restrictions but is open to take the whole excess.\n \"\"\"\n for i, de in nd[\"demand\"].iterrows():\n if de[\"active\"] and not pd.isnull(de[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"nominal_value\":de[\"nominal value\"]\n }\n # get time series for node and parameter\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==de[\"label\"]:\n # inflow_args[col.split(\".\")[1]]=nd[\"timeseries\"][col]\n # TODO: veriry other key than 'fix'?????\n inflow_args[\"fix\"]=nd[\"timeseries\"][col] \n \n # Create Sink object and append to nodes\n nodes.append(\n solph.Sink(\n label=de[\"label\"],\n inputs={\n busd[de[\"from\"]]:solph.Flow(**inflow_args)\n }\n )\n )\n #############################\n # Create Transformer object #\n #############################\n \"\"\"\n An instance of the Transformer class can represent a node with multiple input and output flows such as:\n - a power plant\n - a transport line \n - or any kind of a transforming process as electrolysis, a cooling device or a heat pump. \n The efficiency has to be constant within one time step to get a linear transformation.\n You can define a different efficiency for every time step (e.g. the thermal powerplant efficiency according \n to the ambient temperature) but this series has to be predefined and cannot be changed within the optimisation.\n\n A condensing power plant can be defined by a transformer with one input (fuel) and one output (electricity)\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n solph.Transformer(\n label=\"pp_gas\",\n inputs={bgas: solph.Flow()},\n outputs={b_el: solph.Flow(nominal_value=10e10)},\n conversion_factors={electricity_bus: 0.58})\n ```\n\n A CHP power plant would be defined in the same manner but with two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4})\n ```\n A CHP power plant with 70% coal and 30% natural gas can be defined with two inputs and two outputs:\n ```\n b_gas = solph.Bus(label='natural_gas')\n b_coal = solph.Bus(label='hard_coal')\n b_el = solph.Bus(label='electricity')\n b_th = solph.Bus(label='heat')\n\n solph.Transformer(\n label='pp_chp',\n inputs={b_gas: Flow(), b_coal: Flow()},\n outputs={b_el: Flow(nominal_value=30),\n b_th: Flow(nominal_value=40)},\n conversion_factors={b_el: 0.3, b_th: 0.4,\n b_coal: 0.7, b_gas: 0.3})\n ```\n \"\"\"\n for i, row in nd[\"transformers\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n # set static inflow values\n inflow_args = {\n \"variable_costs\":row[\"variable input costs\"]\n }\n # inflow_args = {}\n outflow_args = {}\n # get time series for inflow transformer\n for col in nd[\"timeseries\"].columns.values:\n if col.split(\".\")[0]==row[\"label\"]:\n # inflow_args[col.split(\".\")[1]] = nd[\"timeseries\"][col]\n inflow_args[\"fix\"] = nd[\"timeseries\"][col]\n \n #TODO: multi inputs/outputs and add investment\n\n if row[\"capex inflow\"] and not pd.isnull(row[\"capex inflow\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex inflow\"],20,0.08)\n\n if row[\"max inflow\"] and not pd.isnull(row[\"max inflow\"]):\n invest_args[\"maximum\"] = row[\"max inflow\"]\n\n if row[\"min inflow\"] and not pd.isnull(row[\"min inflow\"]):\n invest_args[\"minimum\"] = row[\"min inflow\"]\n\n if row[\"existing inflow\"] and not pd.isnull(row[\"existing inflow\"]):\n invest_args[\"existing\"] = row[\"existing inflow\"]\n\n inflow_args[\"investment\"] = solph.Investment(**invest_args)\n else: \n outflow_args[\"nominal_value\"] = row[\"capacity\"] # should be specify capacity inflow or outflow\n\n # create\n nodes.append(\n solph.Transformer(\n label=row[\"label\"],\n inputs = {\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to\"]]:solph.Flow(**outflow_args)\n },\n conversion_factors = {\n busd[row[\"to\"]]:row[\"efficiency\"]\n }\n )\n )\n ##################################\n # Create Transformer CHP objects #\n ##################################\n for i, row in nd[\"transformers_chp\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_elec_args = {}\n outflow_heat_args = {}\n\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n if row[\"capex elec\"] and not pd.isnull(row[\"capex elec\"]):\n logger.info(\"Invest {} inflow capacity\".format(row[\"label\"])) \n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex elec\"],20,0.08)\n if row[\"max elec\"] and not pd.isnull(row[\"max elec\"]):\n invest_args[\"maximum\"] = row[\"max elec\"]\n if row[\"min elec\"] and not pd.isnull(row[\"min elec\"]):\n invest_args[\"minimum\"] = row[\"min elec\"]\n if row[\"existing elec\"] and not pd.isnull(row[\"existing elec\"]):\n invest_args[\"existing\"] = row[\"existing elec\"]\n \n outflow_elec_args[\"investment\"] = solph.Investment(**invest_args)\n investment = solph.Investment(**invest_args)\n else:\n # inflow_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_elec_args[\"nominal_value\"] = row[\"capacity_el\"]\n outflow_heat_args[\"nominal_value\"] = row[\"capacity_heat\"]\n\n # Create\n nodes.append(\n solph.Transformer(\n label = row[\"label\"],\n inputs ={\n busd[row[\"from\"]]:solph.Flow(**inflow_args)\n },\n outputs={\n busd[row[\"to_el\"]]:solph.Flow(**outflow_elec_args),\n busd[row[\"to_heat\"]]:solph.Flow(**outflow_heat_args)\n },\n conversion_factors={\n busd[row[\"to_el\"]]:row[\"efficiency_el\"],\n busd[row[\"to_heat\"]]:row[\"efficiency_heat\"]\n }\n )\n )\n\n ##########################\n # Create Storage objects #\n ##########################\n for i, row in nd[\"storages\"].iterrows():\n if row[\"active\"] and not pd.isnull(row[\"active\"]):\n\n inflow_args = {}\n outflow_args = {}\n\n if row[\"capex\"] and not pd.isnull(row[\"capex\"]):\n logger.info(\"Invest {} storage capacity\".format(row[\"label\"]))\n\n invest_args = {}\n invest_args[\"ep_costs\"] = economics.annuity(row[\"capex\"],20,0.08)\n if row[\"max\"] and not pd.isnull(row[\"max\"]):\n invest_args[\"maximum\"] = row[\"max\"]\n if row[\"min\"] and not pd.isnull(row[\"min\"]):\n invest_args[\"minimum\"] = row[\"min\"]\n if row[\"existing\"] and not pd.isnull(row[\"existing\"]):\n invest_args[\"existing\"] = row[\"existing\"]\n\n investment=solph.Investment(\n **invest_args\n )\n nominal_capacity=None\n \n #TODO add if row[\"capex inflow\"] and if row[\"capex outflow\"]\n #TODO read relation_capacity_inflow/outflow from excel\n \n else:\n investment = None\n nominal_capacity = row[\"nominal capacity\"] \n \n if row[\"capacity inflow\"] and row[\"capacity inflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity inflow or capacity inflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity inflow\"]:\n inflow_args[\"nominal_value\"] = row[\"capacity inflow\"]\n if row[\"capacity inflow ratio\"]:\n capacity_inflow_ratio = row[\"capacity inflow ratio\"]\n else:\n capacity_inflow_ratio = None\n inflow_args[\"variable_costs\"] = row[\"variable input costs\"]\n\n \n if row[\"capacity outflow\"] and row[\"capacity outflow ratio\"]:\n logger.error(\"{} is overdetermined, only capacity outflow or capacity outflow ratio shoul be set\".format(row[\"label\"]))\n return 1\n if row[\"capacity outflow\"]:\n outflow_args[\"nominal_value\"] = row[\"capacity outflow\"]\n if row[\"capacity outflow ratio\"]:\n capacity_outflow_ratio = row[\"capacity outflow ratio\"]\n else:\n capacity_outflow_ratio = None\n\n outflow_args[\"variable_costs\"] = row[\"variable output costs\"]\n\n nodes.append(\n solph.components.GenericStorage(\n label=row[\"label\"],\n inputs = {\n busd[row[\"bus\"]]:solph.Flow(**inflow_args)\n },\n outputs = {\n busd[row[\"bus\"]]:solph.Flow(**outflow_args)\n },\n investment=investment,\n nominal_storage_capacity=nominal_capacity,\n loss_rate = row[\"capacity loss\"],\n initial_storage_level = row[\"initial capacity\"],\n max_storage_level=row[\"capacity max\"],\n min_storage_level=row[\"capacity min\"],\n invest_relation_input_capacity = capacity_inflow_ratio,\n invest_relation_output_capacity = capacity_outflow_ratio,\n inflow_conversion_factor = row[\"efficiency inflow\"],\n outflow_conversion_factor = row[\"efficiency outflow\"]\n )\n )\n #######################\n # Create Link objects #\n #######################\n \"\"\"\n A Link object with 1...2 inputs and 1...2 outputs\n Note: This component is experimental. Use it with care\n \"\"\"\n for i, p in nd[\"powerlines\"].iterrows():\n if p[\"active\"] and not pd.isnull(p[\"active\"]):\n bus1 = busd[p[\"bus_1\"]]\n bus2 = busd[p[\"bus_2\"]]\n nodes.append(\n solph.custom.Link(\n label = \"powerline\" + \"_\" + p[\"bus_1\"] + \"_\" + p[\"bus_2\"],\n inputs = {\n bus1:solph.Flow(),\n bus2:solph.Flow()\n },\n outputs = {\n bus1: solph.Flow(nominal_value = p[\"capacity\"]),\n bus2: solph.Flow(nominal_value=p[\"capacity\"]),\n },\n conversion_factors={\n (bus1,bus2):p[\"efficiency\"],\n (bus2,bus1):p[\"efficiency\"]\n }\n )\n ) \n return nodes", "def test_deep_copy(api):\n config = api.config()\n f1, f2 = config.flows.flow(name='f1').flow(name='f2')\n f1.packet.ethernet().ipv4().tcp()\n f2.packet.ethernet().ipv4().udp()\n f3 = f1.clone()\n f3.name = 'f3'\n config.flows.append(f3)\n f4 = copy.deepcopy(f2)\n f4.name = 'f4'\n config.flows.append(f4)\n print(config)\n assert(len(config.flows) == 4)\n assert(config.flows[-2].name == f3.name)\n assert(config.flows[-1].name == f4.name)", "def build_graph(self, graph, inst_name, port_nets):\n return", "def mutate_toxiproxy(self, body, spec):\n # 1. Precompute the ports that need to be proxied\n # for every port specified in the containers' definitions\n containers_ports = []\n for container in body.spec.template.spec.containers:\n for port in container.ports:\n containers_ports.append(port.container_port)\n containers_mapping = dict()\n # add a mapping port, that starts at 10000\n counter = 10000\n for port in containers_ports:\n while counter in containers_ports or counter in containers_mapping.values():\n counter += 1\n containers_mapping[port] = counter\n\n # 2. Prepare proxies in the toxiproxy format\n proxies = spec.get(\"proxies\", [])\n for ingress_port, proxy_port in containers_mapping.items():\n proxies.append(dict(\n name=\"auto%s\" % ingress_port,\n listen=\"0.0.0.0:%s\" % proxy_port,\n upstream=\"127.0.0.1:%s\" % ingress_port,\n ingress_port=ingress_port,\n ))\n\n # 3. Prepare the Toxiproxy setup command through the startup probe\n toxiproxy_cli = spec.get(\"toxiproxyCli\", \"/go/bin/toxiproxy-cli\")\n populate_cmd = \"true\"\n for proxy in proxies:\n populate_cmd += \" && {cli} create {name} -l {listen} -u {upstream}\".format(\n cli=toxiproxy_cli,\n **proxy\n )\n toxics = spec.get(\"toxics\", [])\n for toxic in toxics:\n # if the user specifies directly port, assume they mean the autogenerated one\n name = toxic.get(\"targetProxy\")\n try:\n parsed = int(name)\n name = \"auto\" + name\n except ValueError:\n pass\n populate_cmd += \" && {cli} toxic add {name} -t {type} {attributes}\".format(\n cli=toxiproxy_cli,\n name=name,\n type=toxic.get(\"toxicType\"),\n attributes=\" \".join([\n \"-a {name}={value}\".format(\n **attr\n ) for attr in toxic.get(\"toxicAttributes\", [])\n ])\n )\n\n # add the toxiproxy side-car container\n body.spec.template.spec.containers.append(\n kubernetes.client.V1Container(\n name=\"chaos-toxiproxy\",\n image=spec.get(\"imageToxiproxy\", DEFAULT_TOXIPROXY_IMAGE),\n startup_probe=kubernetes.client.V1Probe(\n _exec=kubernetes.client.V1ExecAction(\n command=[\"/bin/sh\", \"-c\", populate_cmd],\n )\n ),\n )\n )\n\n # precompute the iptables command\n def get_port(listen_string):\n parts = listen_string.split(\":\")\n if len(parts) == 2:\n return parts[-1]\n return \"80\"\n iptables_cmd = \"iptables -t filter -A INPUT -s 127.0.0.1 -j ACCEPT\"\n names = []\n for proxy in proxies:\n ingress_port = proxy.get(\"ingress_port\")\n if ingress_port is not None:\n iptables_cmd += (\n ' && iptables -t nat -A PREROUTING -i eth0'\n ' -p tcp --dport {ingress_port}'\n ' -j REDIRECT --to-port {egress_port}'\n ' -m comment --comment \"{comment}\"'\n ).format(\n ingress_port=str(ingress_port),\n egress_port=get_port(proxy.get(\"listen\")),\n comment=proxy.get(\"name\")\n )\n names.append(\n \"{src}->{dst}\".format(\n src=ingress_port,\n dst=get_port(proxy.get(\"listen\")),\n )\n )\n iptables_cmd += ' && echo \"iptables rules setup successfully: {names}\"'.format(\n names=\", \".join(names)\n )\n\n # add an init container with the iptables config\n if body.spec.template.spec.init_containers is None:\n body.spec.template.spec.init_containers = []\n body.spec.template.spec.init_containers.append(\n kubernetes.client.V1Container(\n name=\"iptables-setup\",\n command=[\"/bin/sh\", \"-c\", iptables_cmd],\n args=[],\n image=spec.get(\"imageIptables\", DEFAULT_IPTABLES_IMAGE),\n security_context=kubernetes.client.V1SecurityContext(\n run_as_user=spec.get(\"user\"),\n capabilities=kubernetes.client.V1Capabilities(\n add=[\n \"NET_ADMIN\"\n ]\n )\n )\n )\n )", "def generate_networks(config, shared, logger, data_handlers, device,\n create_mnet=True, create_hnet=True, create_hhnet=True,\n create_dis=True):\n num_tasks = len(data_handlers)\n if hasattr(config, 'cl_scenario'):\n num_heads = 1 if config.cl_scenario == 2 else num_tasks\n else:\n assert hasattr(config, 'multi_head')\n num_heads = num_tasks if config.multi_head else 1\n\n # Sanity check!\n for i in range(1, num_tasks):\n assert np.prod(data_handlers[i].in_shape) == \\\n np.prod(data_handlers[0].in_shape)\n if data_handlers[0].classification:\n assert data_handlers[i].num_classes == data_handlers[0].num_classes\n else:\n assert np.prod(data_handlers[i].out_shape) == \\\n np.prod(data_handlers[0].out_shape)\n\n # Parse user \"wishes\".\n use_hnet = False\n use_hhnet = False\n use_dis = False\n no_mnet_weights = False\n\n if hasattr(config, 'mnet_only'):\n use_hnet = not config.mnet_only\n use_hhnet = not config.mnet_only and not shared.prior_focused and \\\n not config.no_hhnet\n # Note, without the hypernet, there is no weight distribution and therefore\n # no discriminator needed.\n use_dis = use_hnet and not config.no_dis\n no_mnet_weights = not config.mnet_only\n if hasattr(config, 'distill_iter'):\n # Note, if distillation is used, the hnet is first trained independent\n # of a hyper-hypernetwork, which is why it needs its own weights.\n no_hnet_weights = use_hhnet and config.distill_iter == -1\n else:\n no_hnet_weights = use_hhnet\n\n ####################\n ### Main network ###\n ####################\n if 'gmm' in shared.experiment_type or \\\n 'regression' in shared.experiment_type:\n mnet_type = 'mlp'\n in_shape = data_handlers[0].in_shape\n\n elif 'mnist' in shared.experiment_type:\n if hasattr(config, 'net_type'):\n logger.debug('Main network will be of type: %s.' % config.net_type)\n mnet_type = config.net_type\n else:\n logger.debug('Main network will be an MLP.')\n mnet_type = 'mlp'\n\n\n assert len(data_handlers[0].in_shape) == 3 # MNIST\n in_shape = data_handlers[0].in_shape\n # Note, that padding is currently only applied when transforming the\n # image to a torch tensor.\n if isinstance(data_handlers[0], PermutedMNIST):\n assert len(data_handlers[0].torch_in_shape) == 3 # MNIST\n in_shape = data_handlers[0].torch_in_shape\n\n else:\n assert 'cifar' in shared.experiment_type\n\n in_shape = [32, 32, 3]\n if 'zenke' in shared.experiment_type:\n assert not hasattr(config, 'net_type')\n mnet_type = 'zenke'\n else:\n assert 'resnet' in shared.experiment_type\n mnet_type = config.net_type\n\n if mnet_type == 'mlp':\n if len(in_shape) > 1:\n n_x = np.prod(in_shape)\n in_shape = [n_x]\n else:\n assert len(in_shape) == 3\n assert mnet_type in ['lenet', 'resnet', 'wrn', 'zenke']\n\n\n if data_handlers[0].classification:\n out_shape = [data_handlers[0].num_classes * num_heads]\n else:\n assert len(data_handlers[0].out_shape) == 1\n out_shape = [data_handlers[0].out_shape[0] * num_heads]\n\n if not create_mnet:\n # FIXME We would need to allow the passing of old `mnet`s.\n raise NotImplementedError('This function doesn\\'t support yet to ' +\n 'construct networks without constructing ' +\n 'a main network first.')\n\n logger.info('Creating main network ...')\n mnet = sutils.get_mnet_model(config, mnet_type, in_shape, out_shape,\n device, no_weights=no_mnet_weights)\n\n # Initialize main net weights, if any.\n assert not hasattr(config, 'custom_network_init')\n if hasattr(config, 'normal_init'):\n mnet.custom_init(normal_init=config.normal_init,\n normal_std=config.std_normal_init, zero_bias=True)\n else:\n mnet.custom_init(zero_bias=True)\n\n #####################\n ### Discriminator ###\n #####################\n dis = None\n if use_dis and create_dis:\n logger.info('Creating discriminator ...')\n if config.use_batchstats:\n in_shape = [mnet.num_params * 2]\n else:\n in_shape = [mnet.num_params]\n dis = sutils.get_mnet_model(config, config.dis_net_type, in_shape, [1],\n device, cprefix='dis_', no_weights=False)\n dis.custom_init(normal_init=config.normal_init,\n normal_std=config.std_normal_init, zero_bias=True)\n\n #####################\n ### Hypernetwork ###\n #####################\n def _hyperfan_init(net, mnet, cond_var, uncond_var):\n if isinstance(net, HMLP):\n net.apply_hyperfan_init(method='in', use_xavier=False,\n uncond_var=uncond_var, cond_var=cond_var,\n mnet=mnet)\n elif isinstance(net, ChunkedHMLP):\n net.apply_chunked_hyperfan_init(method='in', use_xavier=False,\n uncond_var=uncond_var, cond_var=cond_var, mnet=mnet, eps=1e-5,\n cemb_normal_init=False)\n elif isinstance(net, StructuredHMLP):\n # FIXME We should adapt `uncond_var`, as chunk embeddings are\n # additionally inputted as unconditional inputs.\n # FIXME We should provide further instructions on what individual\n # chunks represent (e.g., batchnorm scales and shifts should be\n # initialized differently).\n for int_hnet in net.internal_hnets:\n net.apply_hyperfan_init(method='in', use_xavier=False,\n uncond_var=uncond_var, cond_var=cond_var, mnet=None)\n else:\n raise NotImplementedError('No hyperfan-init implemented for ' +\n 'hypernetwork of type %s.' % type(net))\n\n hnet = None\n if use_hnet and create_hnet:\n logger.info('Creating hypernetwork ...')\n\n # For now, we either produce all or no weights with the hypernet.\n # Note, it can be that the mnet was produced with internal weights.\n assert mnet.hyper_shapes_learned is None or \\\n len(mnet.param_shapes) == len(mnet.hyper_shapes_learned)\n\n chunk_shapes = None\n num_per_chunk = None\n assembly_fct = None\n if config.imp_hnet_type == 'structured_hmlp':\n if mnet_type == 'resnet':\n chunk_shapes, num_per_chunk, assembly_fct = \\\n resnet_chunking(mnet,\n gcd_chunking=config.imp_shmlp_gcd_chunking)\n elif mnet_type == 'wrn':\n chunk_shapes, num_per_chunk, assembly_fct = \\\n wrn_chunking(mnet,\n gcd_chunking=config.imp_shmlp_gcd_chunking,\n ignore_bn_weights=False, ignore_out_weights=False)\n else:\n raise NotImplementedError('\"structured_hmlp\" not implemented ' +\n 'for network of type %s.' % mnet_type)\n\n # The hypernet is an implicit distribution, that only receives noise\n # as input, which are unconditional inputs.\n hnet = sutils.get_hypernet(config, device, config.imp_hnet_type,\n mnet.param_shapes, 0, cprefix='imp_',\n no_uncond_weights=no_hnet_weights, no_cond_weights=True,\n uncond_in_size=config.latent_dim, shmlp_chunk_shapes=chunk_shapes,\n shmlp_num_per_chunk=num_per_chunk, shmlp_assembly_fct=assembly_fct)\n #if isinstance(hnet, StructuredHMLP):\n # print(num_per_chunk)\n # for ii, int_hnet in enumerate(hnet.internal_hnets):\n # print(' Internal hnet %d with %d outputs.' % \\\n # (ii, int_hnet.num_outputs))\n\n ### Initialize hypernetwork.\n if not no_hnet_weights:\n if not config.hyper_fan_init:\n rtu.apply_custom_hnet_init(config, logger, hnet)\n else:\n _hyperfan_init(hnet, mnet, -1, config.latent_std**2)\n\n ### Apply noise trick if requested by user.\n if config.full_support_perturbation != -1:\n hnet = HPerturbWrapper(hnet, hnet_uncond_in_size=config.latent_dim,\n sigma_noise=config.full_support_perturbation)\n\n shared.noise_dim = hnet.num_outputs\n else:\n shared.noise_dim = config.latent_dim\n\n ##########################\n ### Hyper-hypernetwork ###\n ##########################\n hhnet = None\n if use_hhnet and create_hhnet:\n if not create_hnet:\n # FIXME We require an existing hnet to do this.\n raise NotImplementedError('This function doesn\\'t allow yet the ' +\n 'creation of a hyper-hypernet without ' +\n 'first creating a hypernetwork.')\n logger.info('Creating hyper-hypernetwork ...')\n\n assert hnet is not None\n assert len(hnet.unconditional_param_shapes) == len(hnet.param_shapes)\n hhnet = sutils.get_hypernet(config, device, config.hh_hnet_type,\n hnet.unconditional_param_shapes, num_tasks,\n cprefix='hh_')\n\n ### Initialize hypernetwork.\n if not config.hyper_fan_init:\n rtu.apply_custom_hnet_init(config, logger, hhnet)\n else:\n # Note, hyperfan-init doesn't take care of task-embedding\n # intialization.\n init_conditional_embeddings(hhnet,\n normal_std=config.std_normal_temb)\n\n _hyperfan_init(hhnet, hnet, config.std_normal_temb**2, -1)\n\n return mnet, hnet, hhnet, dis", "def get_clean_steps(self, node, ports):\n return [{'step': 'create_configuration',\n 'interface': 'raid',\n 'priority': 0},\n {'step': 'delete_configuration',\n 'interface': 'raid',\n 'priority': 0},\n {'step': 'erase_devices',\n 'interface': 'deploy',\n 'priority': 0},\n {'step': 'update_firmware',\n 'interface': 'management',\n 'priority': 0}]", "def get_model(point_cloud, is_training, bn_decay=None):\n #print(point_cloud.shape())\n batch_size = point_cloud.get_shape()[0].value\n BLOCK_SIZE1 = point_cloud.get_shape()[1].value\n BLOCK_SIZE2 = point_cloud.get_shape()[2].value\n \n #print batch_size, num_point, dim_point \n pixel_points = point_cloud[:, :, :, :2]\n\n input_image = point_cloud\n net1 = tf_util.conv2d(input_image, 128, [1, 1], # 3 is replaced by two \n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv11', bn_decay=bn_decay)\n #### Net1 \n net1 = tf_util.conv2d(net1, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv12', bn_decay=bn_decay)\n \n\n net1 = tf_util.max_pool2d(net1, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool12') \n\n net1 = tf_util.conv2d(net1, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv15', bn_decay=bn_decay) \n net1 = tf_util.conv2d(net1, 64, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv16', bn_decay=bn_decay)\n\n\n #### Net2 \n\n\n net2 = tf_util.conv2d(input_image, 128, [1, 1], # 3 is replaced by two \n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv21', bn_decay=bn_decay)\n net2 = tf_util.conv2d(net2, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv22', bn_decay=bn_decay)\n \n\n net2 = tf_util.max_pool2d(net2, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool22') \n net2 = tf_util.conv2d(net2, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv25', bn_decay=bn_decay) \n net2 = tf_util.conv2d(net2, 128, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv26', bn_decay=bn_decay)\n\n\n #### Net3 \n\n\n net3 = tf_util.conv2d(input_image, 128, [1, 1], # 3 is replaced by two \n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv31', bn_decay=bn_decay)\n net3 = tf_util.conv2d(net3, 256, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv32', bn_decay=bn_decay)\n \n\n net3 = tf_util.max_pool2d(net3, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool32') \n net3 = tf_util.conv2d(net3, 256, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv35', bn_decay=bn_decay) \n net3 = tf_util.conv2d(net3, 512, [1,1],\n padding='VALID', stride=[1,1],\n bn=True, is_training=is_training,\n scope='conv36', bn_decay=bn_decay)\n\n\n\n\n net1 = tf_util.max_pool2d(net1, [2,2], stride=[2,2], \n padding='VALID', scope='maxpool3') \n\n #### Concatenation of Net1, Net2, Net3 \n\n net2 = tf_util.max_pool2d(net2, [4,4], stride=[4,4], \n padding='VALID', scope='maxpool4') \n #print net2.shape \n\n net3 = tf_util.max_pool2d(net3, [8,8], stride=[1,1], \n padding='VALID', scope='maxpool5') \n\n net1 = tf.reshape(net1, [batch_size, -1])\n net2 = tf.reshape(net2, [batch_size, -1])\n net3 = tf.reshape(net3, [batch_size, -1])\n net = tf.concat([net1, net2, net3], 1)\n \n\n #### Fully Connected Layers - DropOut --- Bigger Version \n\n #### Try with smaller Network, i.e, 1024 parameters \n\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,\n scope='fc1', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,\n scope='dp1')\n\n net = tf_util.fully_connected(net, 1024, bn=True, is_training=is_training,\n scope='fc2', bn_decay=bn_decay)\n net = tf_util.dropout(net, keep_prob=0.5, is_training=is_training,\n scope='dp2')\n\n nett = tf_util.fully_connected(net, 40, bn=True, is_training=is_training,\n scope='fc5', bn_decay=bn_decay)\n netr = tf_util.fully_connected(net, 40, bn=True, is_training=is_training,\n scope='fc6', bn_decay=bn_decay)\n \n\n nett = tf_util.fully_connected(nett, 3, activation_fn=None, scope='fc7')\n netr = tf_util.fully_connected(netr, 4, activation_fn=None, scope='fc8')\n\n net = tf.concat([nett, netr], 1)\n\n return net, pixel_points # Note that pixel co-ordinates are not used directly during the training ", "def _process_task_inputs(self):\n _input = self._task.input\n log.debug('Input parsing for %s and node %s from container', self._task.project_id, self._task.internal_id)\n log.debug(_input)\n\n input_ports = dict()\n for port in _input:\n log.debug(port)\n self._process_task_input(port, input_ports)\n\n log.debug('DUMPING json')\n #dump json file\n if input_ports:\n file_name = os.path.join(self._executor.in_dir, 'input.json')\n with open(file_name, 'w') as f:\n json.dump(input_ports, f)\n\n log.debug('DUMPING DONE')", "def generate_params(sw):\n\n # List of vlan ids to use for this permutation\n vlan_ids = []\n # Physical ports required for this permutation per L3 interface\n phy_ports = []\n # L3 interfaces to be created\n l3_interfaces = 0\n # List of ip address for every host\n ip_address_hs = []\n # VxLAN interfaces to be created\n vxlan_ids = []\n # VNIs to be created\n vnis = {}\n # VTEP Peers to be created\n vtep_peers = []\n\n vlan_ids = [VLAN1, VLAN2, VLAN3]\n vxlan_ids = [TUN_NUM]\n vnis = {VNI: {'vlan': [VLAN1], 'vtep_peer': [VTEP_PEER_IP]}}\n l3_interfaces = 1\n phy_ports = [sw.vtysh_ports['if01'], sw.vtysh_ports['if02'],\n sw.vtysh_ports['if03'], sw.vtysh_ports['if04']]\n ip_address_hs = [H3_IP, H4_IP]\n vtep_peers = [H3_IP, H4_IP]\n\n return {'vlan_ids': vlan_ids,\n 'vxlan_ids': vxlan_ids,\n 'vnis': vnis,\n 'vtep_peers': vtep_peers,\n 'l3_interfaces': l3_interfaces,\n 'phy_ports': phy_ports,\n 'ip_address_hs': ip_address_hs}", "def init_input_pipeline(self, config):\n\n ######################\n # Calibrate parameters\n ######################\n\n print('Initiating input pipelines')\n\n # Update num classes in config\n config.num_classes = self.num_classes - len(self.ignored_labels)\n config.ignored_label_inds = [self.label_to_idx[ign_label] for ign_label in self.ignored_labels]\n\n print('ignored_label_inds:')\n print(config.ignored_label_inds)\n\n # Update network model in config\n config.network_model = self.network_model\n\n print('network_model:')\n print(config.network_model)\n\n # Calibrate generators to batch_num\n print('Calibrate generators to batch_num')\n self.batch_limit = self.calibrate_batches(config)\n\n # From config parameter, compute higher bound of neighbors number in a neighborhood\n hist_n = int(np.ceil(4 / 3 * np.pi * (config.density_parameter + 1) ** 3))\n\n # Initiate neighbors limit with higher bound\n print('Initiate neighbors limit with higher bound')\n self.neighborhood_limits = np.full(config.num_layers, hist_n, dtype=np.int32)\n\n # Calibrate max neighbors number\n print('Calibrate max neighbors number')\n self.calibrate_neighbors(config)\n\n ################################\n # Initiate tensorflow parameters\n ################################\n\n # Reset graph\n print('Reset graph')\n tf.reset_default_graph()\n\n # Set random seed (You also have to set it in network_architectures.weight_variable)\n #np.random.seed(42)\n #tf.set_random_seed(42)\n\n # Get generator and mapping function\n print('Get generator')\n gen_function, gen_types, gen_shapes = self.get_batch_gen('training', config)\n gen_function_val, _, _ = self.get_batch_gen('validation', config)\n print('Get mapping function')\n map_func = self.get_tf_mapping(config)\n\n ##################\n # Training dataset\n ##################\n\n # Create batched dataset from generator\n self.train_data = tf.data.Dataset.from_generator(gen_function,\n gen_types,\n gen_shapes)\n\n self.train_data = self.train_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.train_data = self.train_data.prefetch(10)\n\n ##############\n # Test dataset\n ##############\n\n # Create batched dataset from generator\n self.val_data = tf.data.Dataset.from_generator(gen_function_val,\n gen_types,\n gen_shapes)\n\n # Transform inputs\n self.val_data = self.val_data.map(map_func=map_func, num_parallel_calls=self.num_threads)\n\n # Prefetch data\n self.val_data = self.val_data.prefetch(10)\n\n #################\n # Common iterator\n #################\n\n # create a iterator of the correct shape and type\n iter = tf.data.Iterator.from_structure(self.train_data.output_types, self.train_data.output_shapes)\n self.flat_inputs = iter.get_next()\n\n # create the initialisation operations\n self.train_init_op = iter.make_initializer(self.train_data)\n self.val_init_op = iter.make_initializer(self.val_data)", "def update_config_external_template(config):\r\n\r\n # best parameters from the paper\r\n config['train_batch_size'] = 16384\r\n config['lr'] = 3e-4\r\n config['sgd_minibatch_size'] = 4096\r\n config['num_sgd_iter'] = 4\r\n config['rollout_fragment_length'] = 100\r\n\r\n # run ID to communicate to the http trainer\r\n config['run_uid'] = '_setme'\r\n\r\n # stable baselines accepts full episodes\r\n config[\"batch_mode\"] = \"complete_episodes\"\r\n\r\n # stable baselines server address\r\n config[\"http_remote_port\"] = \"http://127.0.0.1:50001\"\r\n\r\n # no gpus, stable baselines might use them\r\n config['num_gpus'] = 0\r\n\r\n # set trainer class\r\n config['_trainer'] = \"External\"\r\n config['_policy'] = \"PPO\"\r\n\r\n # tuned\r\n config['num_envs_per_worker'] = 10\r\n config['num_workers'] = 3\r\n return config", "def generate_params(sw):\n\n # List of vlan ids to use for this permutation\n vlan_ids = []\n # Physical ports required for this permutation per L3 interface\n phy_ports = []\n # L3 interfaces to be created\n l3_interfaces = 0\n # List of ip address required for this permutation\n ip_address_sw = []\n # List of ip address for every host\n ip_address_hs = []\n # VxLAN interfaces to be created\n vxlan_ids = []\n # VNIs to be created\n vnis = {}\n # VTEP Peers to be created\n vtep_peers = []\n\n vlan_ids = [VLAN1, VLAN2]\n vxlan_ids = [TUN_NUM]\n vnis = {VNI: {'vlan': [VLAN1], 'vtep_peer': [H2_IP]}}\n l3_interfaces = 1\n phy_ports = [sw.vtysh_ports['if01'], sw.vtysh_ports['if02']]\n ip_address_sw = [S1_IP]\n ip_address_hs = [H1_IP, H2_IP]\n vtep_peers = [H2_IP]\n\n return {'vlan_ids': vlan_ids,\n 'vxlan_ids': vxlan_ids,\n 'vnis': vnis,\n 'vtep_peers': vtep_peers,\n 'l3_interfaces': l3_interfaces,\n 'phy_ports': phy_ports,\n 'ip_address_sw': ip_address_sw,\n 'ip_address_hs': ip_address_hs}", "def _generator_network(self):\n final_layer = { 'func': self.final_func,\n 'size': self.input_size,\n 'type': \"dense\"\n }\n\n rev_arch = tf_reverse_architecture(self.architecture, final_layer=final_layer, batch_size=self.batch_size)\n reuse_dict = tf_build_reuse_dict(tf.trainable_variables()) if self.equal_weights else None\n\n # In any case we need to invoke creating the reversed architecture\n last_input = tf_build_architecture(rev_arch,\n batch_in=self.z_latent,\n scope_prefix=\"generate\",\n transpose=True,\n reuse_dict=reuse_dict,\n variables_collection=self.training_scope,\n data_format=self.data_format)\n\n return tf_ensure_flat(last_input)", "def generate_specs_build(self):\n from django_swagger_utils.drf_server.generators.swagger_generator import SwaggerGenerator\n\n swagger_gen = SwaggerGenerator(self.parser, self.paths, self.app_name)\n # generating request_response files\n swagger_gen.generate_request_response()\n # testing properties\n swagger_gen.generate_definitions()\n # generating global parameters\n swagger_gen.generate_parameters()\n # generating global response\n swagger_gen.generate_responses()\n # generating urls\n swagger_gen.generate_urls()", "def setup_to_transfer_learn(model, base_model):\n for layer in base_model.layers:\n layer.trainable = False\n model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])", "def generate_hosts(quantity, host_type):\n assert type(quantity) == int\n assert host_type in ['source', 'host']\n for n in range(0, quantity, 1):\n if host_type == 'host':\n yield {'hostname': 'host-{}'.format(uuid.uuid4()),\n 'lastReceived': random.randint(1154394061 * 1000, 1505330380 * 1000)}\n elif host_type == 'source':\n yield {'sourcePath': \".\".join(map(str, (random.randint(0, 254) for _ in range(4)))),\n 'lastReceived': random.randint(1154394061 * 1000, 1505330380 * 1000)}", "def _create_vports(self):\n vports = self._api.select_vports()\n imports = []\n for port in self._api.config.ports:\n if port.name not in vports.keys():\n index = len(vports) + len(imports) + 1\n imports.append({\n 'xpath': '/vport[%i]' % index,\n 'name': port.name,\n 'rxMode': 'captureAndMeasure',\n 'txMode': 'interleaved'\n })\n self._import(imports)\n for name, vport in self._api.select_vports().items():\n self._api.ixn_objects[name] = vport['href']", "def __init__(self,\n dir_recognition_networks='models/recognition_networks',\n list_recognition_networks=None,\n fn_weights='deep_feature_loss_weights.json',\n config_cochlear_model={}):\n if not os.path.isabs(fn_weights):\n fn_weights = os.path.join(dir_recognition_networks, fn_weights)\n with open(fn_weights, 'r') as f_weights:\n deep_feature_loss_weights = json.load(f_weights)\n if list_recognition_networks is None:\n print((\"`list_recognition_networks` not specified --> \"\n \"searching for all checkpoints in {}\".format(dir_recognition_networks)))\n list_fn_ckpt = glob.glob(os.path.join(dir_recognition_networks, '*index'))\n list_fn_ckpt = [fn_ckpt.replace('.index', '') for fn_ckpt in list_fn_ckpt]\n else:\n list_fn_ckpt = []\n for network_key in list_recognition_networks: \n tmp = glob.glob(os.path.join(dir_recognition_networks, '{}*index'.format(network_key)))\n msg = \"Failed to find exactly 1 checkpoint for recognition network {}\".format(network_key)\n assert len(tmp) == 1, msg\n list_fn_ckpt.append(tmp[0].replace('.index', ''))\n print(\"{} recognition networks included for deep feature loss:\".format(len(list_fn_ckpt)))\n config_recognition_networks = {}\n for fn_ckpt in list_fn_ckpt:\n network_key = os.path.basename(fn_ckpt).split('.')[0]\n if 'taskA' in network_key:\n n_classes_dict = {\"task_audioset\": 517}\n else:\n n_classes_dict = {\"task_word\": 794}\n config_recognition_networks[network_key] = {\n 'fn_ckpt': fn_ckpt,\n 'fn_arch': fn_ckpt[:fn_ckpt.rfind('_task')] + '.json',\n 'n_classes_dict': n_classes_dict,\n 'weights': deep_feature_loss_weights[network_key],\n }\n print('|__ {}: {}'.format(network_key, fn_ckpt))\n self.config_recognition_networks = config_recognition_networks\n self.config_cochlear_model = config_cochlear_model\n self.build_auditory_model()\n self.sess = None\n self.vars_loaded = False", "def build_feed_dict(self, input_frames, gt_output_frames, generator):\n feed_dict = {}\n batch_size = np.shape(gt_output_frames)[0]\n\n ##\n # Get generated frames from GeneratorModel\n ##\n\n g_feed_dict = {generator.input_frames_train: input_frames,\n generator.gt_frames_train: gt_output_frames}\n g_scale_preds = self.sess.run(generator.scale_preds_train, feed_dict=g_feed_dict)\n\n ##\n # Create discriminator feed dict\n ##\n for scale_num in xrange(self.num_scale_nets):\n scale_net = self.scale_nets[scale_num]\n\n # resize gt_output_frames\n scaled_gt_output_frames = np.empty([batch_size, scale_net.height, scale_net.width, 3])\n for i, img in enumerate(gt_output_frames):\n\t\t# for skimage.transform.resize, images need to be in range [0, 1], so normalize to\n # [0, 1] before resize and back to [-1, 1] after\n sknorm_img = (img / 2) + 0.5\n\n\n # https://github.com/dyelax/Adversarial_Video_Generation/issues/18\n sknorm_img = np.minimum(sknorm_img, 1)\n sknorm_img = np.maximum(sknorm_img, 0)\n\n\n\n resized_frame = resize(sknorm_img, [scale_net.height, scale_net.width, 3])\n scaled_gt_output_frames[i] = (resized_frame - 0.5) * 2\n\n # combine with resized gt_output_frames to get inputs for prediction\n scaled_input_frames = np.concatenate([g_scale_preds[scale_num],\n scaled_gt_output_frames])\n\n # convert to np array and add to feed_dict\n feed_dict[scale_net.input_frames] = scaled_input_frames\n\n # add labels for each image to feed_dict\n batch_size = np.shape(input_frames)[0]\n feed_dict[self.labels] = np.concatenate([np.zeros([batch_size, 1]),\n np.ones([batch_size, 1])])\n\n return feed_dict", "def start_net(self):\n super(FaucetTopoTestBase, self).start_net()\n # Create a dictionary of host information that might be used in a test later on.\n # This makes it easier to retrieve certain information and consolidates it into one\n # location.\n self.host_information = {}\n for host_id, host_name in self.topo.hosts_by_id.items():\n host_obj = self.net.get(host_name)\n vlan = self.host_vlans[host_id]\n ip_interface = ipaddress.ip_interface(self.host_ip_address(host_id, vlan))\n self.set_host_ip(host_obj, ip_interface)\n self.host_information[host_id] = {\n 'host': host_obj,\n 'ip': ip_interface,\n 'mac': host_obj.MAC(),\n 'vlan': vlan,\n 'bond': None,\n 'ports': {}\n }\n # Add information of hosts chosen dpid, port map values\n # TODO: This redoes logic from get_config()\n for i, dpid in enumerate(self.dpids):\n index = 1\n for host_id, links in self.host_links.items():\n if i in links:\n n_links = links.count(i)\n for _ in range(n_links):\n port = self.port_maps[dpid]['port_%d' % index]\n self.host_information[host_id]['ports'].setdefault(dpid, [])\n self.host_information[host_id]['ports'][dpid].append(port)\n index += 1\n # Store faucet vip interfaces\n self.faucet_vips = {}\n for vlan in range(self.n_vlans):\n self.faucet_vips[vlan] = ipaddress.ip_interface(self.faucet_vip(vlan))\n # Setup the linux bonds for LACP connected hosts\n self.setup_lacp_bonds()\n # Add host routes to hosts for inter vlan routing\n self.setup_intervlan_host_routes()", "def process_flow_upload(self, configlist):\n\n # config_tree = {}\n switches = [str(t[0]) for t in self.get_switches()]\n for swconfig in configlist: # for each\n dpid = list(swconfig.keys())[0]\n if dpid not in switches:\n break\n for flow in swconfig[dpid]:\n flow['dpid'] = dpid\n flow['operation'] = 'add'\n result = self.process_flow_message(flow)\n # table_id = flow['table_id']\n # tables = config_tree.setdefault(dpid, {})\n # table_flows = tables.setdefault(table_id, [])\n # table_flows.append(flow)\n\n # for sw in config_tree:\n # sorted_tables = sorted(config_tree[sw].keys(), reverse=True)\n # for tpid in sorted_tables:\n # flows = config_tree[sw][tpid]\n # for flow in flows:\n # result = self.process_flow_message(flow)\n\n return 'Flows added successfully!'" ]
[ "0.6021851", "0.5251869", "0.51448715", "0.5136205", "0.5126871", "0.5095791", "0.50636524", "0.49946162", "0.49695787", "0.49350056", "0.49320048", "0.49314785", "0.4922587", "0.48967493", "0.48889312", "0.4869051", "0.48633268", "0.4862456", "0.4857697", "0.484664", "0.48465016", "0.48330945", "0.48329172", "0.48281318", "0.48279575", "0.48007402", "0.47873983", "0.47758648", "0.47714445", "0.47679576", "0.4751456", "0.47420257", "0.47356084", "0.4733918", "0.4733291", "0.47329792", "0.47250372", "0.47215644", "0.47200528", "0.47160423", "0.47154304", "0.47119513", "0.47103855", "0.46930432", "0.46837676", "0.46794352", "0.46766517", "0.4673624", "0.4673502", "0.46699142", "0.46581393", "0.4655591", "0.46524528", "0.46498978", "0.46492246", "0.46485943", "0.46438298", "0.46390662", "0.46340582", "0.46114495", "0.46108505", "0.4609569", "0.4607617", "0.46058002", "0.46033484", "0.459594", "0.45942038", "0.45930362", "0.45880756", "0.45806834", "0.45763513", "0.4570102", "0.45667082", "0.45597115", "0.4555689", "0.4554767", "0.45529598", "0.45507616", "0.4550692", "0.454981", "0.4532301", "0.4522807", "0.45218328", "0.4521653", "0.45174608", "0.4514811", "0.4511889", "0.45117083", "0.45008585", "0.44994575", "0.4498104", "0.44918144", "0.44903746", "0.44881278", "0.44868612", "0.44836813", "0.4483433", "0.44818634", "0.44816977", "0.4479843" ]
0.8338399
0
Initialization of each point
def __init__(self, p0: Point, p1: Point, c0: Point = None, c1: Point = None) -> None: self.p0 = p0 self.p1 = p1 self.c0 = c0 if c0 is not None else p0 self.c1 = c1 if c1 is not None else p1
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, points):\n self.points = points\n self.init()", "def __init__(self, pts=[]):\n self.set_points(pts)", "def __init__(self, points):\n\t\tself.points = points", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All walks start at (0, 0)\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n # All motion starts at (0,0).\n self.x_values = [0]\n self.y_values = [0]", "def _InitialPoints(self):\n raise NotImplementedError, \"a sampling algorithm was not provided\"", "def __init__(self, x, y, n_points):\n\n self.x = x\n self.y = y\n self.n_points = n_points", "def __init__(self, num_points = 5000):\n self.num_points = num_points\n\n #all walks start at 0.0\n self.x_values = [0]\n self.y_values = [0]", "def init(self, init_points):\n\n # Generate random points\n l = [numpy.random.uniform(x[0], x[1], size=init_points) for x in self.bounds]\n\n # Concatenate new random points to possible existing points from self.explore method.\n self.init_points += list(map(list, zip(*l)))\n\n # Create empty list to store the new values of the function\n y_init = []\n\n # Evaluate target function at all initialization points (random + explore)\n for x in self.init_points:\n\n if self.verbose:\n print('Initializing function at point: ', dict(zip(self.keys, x)), end='')\n\n y_init.append(self.f(**dict(zip(self.keys, x))))\n\n if self.verbose:\n print(' | result: %f' % y_init[-1])\n\n # Append any other points passed by the self.initialize method (these also have\n # a corresponding target value passed by the user).\n self.init_points += self.x_init\n\n # Append the target value of self.initialize method.\n y_init += self.y_init\n\n # Turn it into numpy array and store.\n self.X = numpy.asarray(self.init_points)\n self.Y = numpy.asarray(y_init)\n\n # Updates the flag\n self.initialized = True", "def __init__(self, points):\n self.endpoints = points", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init_primitive(self, point_x, point_y, point_z):\n self.x = point_x\n self.y = point_y\n self.z = point_z", "def set_starting_points(self, number_of_points):\n n = int(number_of_points)\n self.init_value = n\n self.number_of_points = n\n self.x, self.y = [], []\n self.pp = [1] * 10\n self.pp_values = self.pp.copy()\n self.pp_mapping()\n r = 40\n for i in range(n):\n self.x.append(50 + r*math.cos(2*math.pi * i/n))\n self.y.append(50 + r*math.sin(2*math.pi * i/n))\n for i in self.text_boxes:\n i.set_val(\"1\")\n self.redraw()", "def __init__(self):\n self.x = 0\n self.y = 0", "def __init__(self):\n self.x = 0\n self.y = 0", "def pointsSetUp(self):\r\n self.background.draw(self.surface)\r\n for i in range(len(self.points)):\r\n self.points[i].organize()\r\n self.points[i].update()\r\n self.points[i].addNumber(i)\r\n self.points[i].setActiveTurn()", "def __init__(self):\n self.X = None\n self.y = None", "def __init__(self):\n self.X = None\n self.y = None", "def __init__(self):\n self.X = None\n self.y = None", "def __init__(self):\n self.X = None\n self.y = None", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y\n super(Point, self).__init__()", "def set_points(self, val=None):\r\n self._points = self.nx*self.ny*self.nz", "def __init__(self, points, verbose=False):\n assert(type(points) == np.ndarray)\n assert(points.dtype==int)\n assert(points.shape[1] == 3)\n assert(points.shape[0]>1)\n\n # Make points unique to avoid duplicate vertices:\n self.points = np.unique(points, axis=0)\n self.verbose = verbose\n self.g = self.__generate()", "def __init__(self, *points):\n self_points = []\n self._min_values = []\n self._max_values = []\n\n if len(points) == 1:\n self.points = points[1]\n elif len(points) > 1:\n self.points = points", "def __init__(self):\n\n self.X = None\n self.y = None", "def __init__(self):\n\n self.X = None\n self.y = None", "def __init__(self, point=None):\n\t\tif point is None:\n\t\t\tpoint = (0,0)\n\t\tself.x = point[0]\n\t\tself.y = point[1]", "def __init__(self, pt1, pt2):\n self.set_points(pt1, pt2)", "def SetInitialPoints(self, x0, radius=0.05):\n raise NotImplementedError, \"must be overwritten...\"", "def __init__(self, d):\n self._coords = [0]*d", "def func_init(self):\n self.points.set_data([], [])\n for line in self.lines:\n line.set_data([],[])\n self.annotation.set_text('')\n\n return tuple(self.lines) + (self.points, self.annotation)", "def init_atom_coords(self) -> None:\n ...", "def initialize(self):\n\n for timestep in self.x:\n self.y_previous.append(self.equation(timestep))\n self.y_current.append(self.equation(timestep))\n\n self.y_previous[0] = 0\n self.y_current[0] = 0\n self.y_previous[99] = 0\n self.y_current[99] = 0", "def __init__(self):\n\n self.points = None\n self.centroid_activation_frames = None\n self.noiseless_frames = None\n self.frames = None", "def __init__(self, num_points=5000):\n self.num_points = num_points\n\n #Todos los caminos comienzan en (0, 0).\n self.x_values = [0]\n self.y_values = [0]", "def __init__(self, d):\n\t\tself._coords = [0] * d", "def InitPointInsertion(self, vtkPoints, ):\n ...", "def __init__(self, x_0, y_0, initX, initY,h=5):\n self.x_0=x_0\n self.y_0=y_0\n self.x_init=initX\n self.y_init=initY\n self.step=h", "def InitPointInsertion(self, vtkPoints, p_int):\n ...", "def __init__(self):\n\n\t\tself.position = np.array([0, 0])", "def __init__(self):\n self.eyepoint = np.array([*self.eyepoint], dtype=np.float32)\n self.lookat = np.array([*self.lookat], dtype=np.float32)\n self.up = np.array([*self.up], dtype=np.float32)", "def initialize(self):\n#TODO: choose user defined START position\n values_type = np.dtype(float)\n self.visual_field = np.zeros(self.number_of_locs, dtype=values_type)\n self.weighted_sums = np.zeros(self.number_of_locs, dtype=values_type)\n self.prior_prob = 1.0 / np.prod(self.number_of_locs)\n self.post_probs = np.full(\n self.number_of_locs, self.prior_prob, dtype=values_type\n )\n starting_location = np.array(START)\n self.focus = get_index_of_in(starting_location,self.senzory_map)\n self.target_location = [\n x for x in xrange(self.number_of_locs) if x != self.focus\n ][random.randint(0,self.number_of_locs-2)]", "def init_hit_points(self, hit_points):\n self.hit_points = [hit_points, hit_points]", "def addPoints(self, points):\r\n self.points = points", "def _createPoints(self):\n self.doc2quest = self._docMapping()\n\n self.unigram, self.bigram = invertedIndex(self.documents)\n self.points = [dataPoint(key, self) for key in self.questions.keys()]", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x = 0, y = 0):\n self.x = x\n self.y = y", "def initDefaults(self):\n return _libsbml.Point_initDefaults(self)", "def __init__(self, x: float = 0, y: float = 0):\n self.data: [float, float] = [x, y]", "def fill_vectors(self):\n # use random numbers for generating plot data:\n random.seed(9) # fix the seed for testing\n for index in range(self.npoints):\n self.vector_x.append(index) # x coordinates\n for y in range(self.ncurves):\n self.vector_y[y].append(random.uniform(0,8))", "def initialiseData(self):\n self.currentPosition = 0\n self.xs = scipy.linspace(0.0, self.numberOfPoints*self.resolution, self.numberOfPoints)\n self.cursorXS = self.getCurrentPositionArray()\n self.cursorVertical = scipy.array([self.verticalLimit,0.0])\n self.array0 = scipy.zeros(self.numberOfPoints)\n self.array1 = scipy.zeros(self.numberOfPoints)\n self.array2 = scipy.zeros(self.numberOfPoints)\n self.array3 = scipy.zeros(self.numberOfPoints)\n self.array4 = scipy.zeros(self.numberOfPoints)\n self.array5 = scipy.zeros(self.numberOfPoints)\n self.array6 = scipy.zeros(self.numberOfPoints)\n self.array7 = scipy.zeros(self.numberOfPoints)\n self.channels = [self.array0,self.array1,self.array2,self.array3,\n self.array4,self.array5,self.array6,self.array7]\n self.arrayPlotData = chaco.ArrayPlotData(xs=self.xs,channel0=self.array0,channel1=self.array1,\n channel2=self.array2,channel3=self.array3,\n channel4=self.array4,channel5=self.array5,\n channel6=self.array6,channel7=self.array7,\n cursorXS = self.cursorXS, cursorVertical=self.cursorVertical)#will be the ArrayPlotData We need", "def __init__(self, x=0, y=0):\n self._x = x\n self._y = y", "def initiate(self):\n pts = []\n for point in self.points:\n pt = gr.Point(point[0],point[1])\n pts.append(pt)\n\n self.vis = [gr.Polygon(pts)]\n\n self.draw()", "def __init__(self,x=0, y=0):\n self.x = x\n self.y = y", "def _add_points(self):\n if not '_list_of_points' in self.__dict__.keys():\n self._list_of_points = [] \n for point in self['point'].items():\n self._list_of_points.append(point[1])", "def initialize(self):\n self.SIZE = self.vectors.shape[0]\n # todo can use max distance to allocation farthest apart points\n self.centroids = self.vectors[[random.randint(1, self.SIZE) for x in range(self.K)], :]", "def __init__(self, points, type_of_kmeans='default', distance_type='euclidian'):\n self.type_of_kmeans = type_of_kmeans\n self.distance_type = distance_type\n self.points = points\n self.labels = []\n\t## uma lista contendo os centroids mais proximos de cada ponto\n self.lista_centroid_mais_proximos = None", "def __init__(self, X, y):\n pass", "def __init__(self, P):\n self._n = len(P) # control point iterator\n self._P = P\n self._X, self._Y, self._Z, self._W = self.sep() \n self._bc = self._bn()", "def __init__ (self, points):\n\n self.points = tuple (points)\n # maximum number of enitities in this mount\n self.amount = len (self.points)\n # list of all entities in this mount\n # indices correspond with self.points\n self.mounts = [None] * self.amount", "def __init__(self, n_points, kind='cubic'):\n self.n_points = n_points\n self.kind = kind", "def __init__(self, x1, y1):\n self.x = x1\n self.y = y1", "def initPointingSequence(self):\n if not self.opsim_visits:\n self.readOpsimData()\n # Number of visits\n self.npoints = len(self.opsim_visits)\n # Starting date\n self.mjds = self.opsim_visits[0]['expMJD']\n # Ending date\n self.mjde = self.opsim_visits[-1]['expMJD']", "def _init_variables(self, spectra):\n if not spectra.is_sorted():\n spectra.sort()\n self._points = spectra.points()\n self._n = len(self._points)\n assert self._n >= 2\n self._hull_points = []", "def __init__(self, points, n_x=1, n_y=1, n_z=1, size_x=None, size_y=None, size_z=None, regular_bounding_box=True):\n self._points = points\n self.x_y_z = [n_x, n_y, n_z]\n self.sizes = [size_x, size_y, size_z]\n self.regular_bounding_box = regular_bounding_box", "def __init__(self, pointType):\r\n self.members = []", "def __init__(self,x=0,y=0):\n self.x = x\n self.y = y\n pass", "def __init__(self, x, y, data):\n super().__init__(x=x, y=y, data=data, has_analytic_ft=False)\n self._ee = {}\n self._mtf = None\n self._nu_p = None\n self._dnx = None\n self._dny = None", "def __init__(self, X, y):\n self.X = X\n self.y = y", "def __init__(self):\n self.lattices = []\n self.meshfns = []", "def initial_point(self, initial_point: Sequence[float] | None) -> None:\n self._initial_point = initial_point", "def __init__(self):\n super().__init__()\n self._points = 0\n self._segments = []\n self.fill_list()\n # i = random.randint(0, len(self._segments) - 1)\n # self.set_text(self._segments[i])\n self.reset()", "def initialize(self):\r\n N = self.N\r\n self.mean = array(self.x0, copy=True)\r\n self.sigma = self.sigma0\r\n self.sigmai = np.ones(N)\r\n self.ps = np.zeros(N) # path for individual and globalstep-size(s)\r\n self.r = np.zeros(N)\r\n self.pr = 0 # cumulation for zr = N(0,1)\r\n self.sigma_r = 0", "def x_init(self):\n pass", "def __init__(self, x, y):\r\n self.x=x\r\n self.y=y", "def __init__(self, x, y):\n # assigning the initial position\n self.x = x\n self.y = y", "def __init__(self, x, y):", "def reset(self):\n super(PolygonTool, self).reset()\n # self.__nsides = None\n # self.__increment = None\n # self.__external = False # make this adjustable?\n self.__center = None\n for _i in range(self.__nsides):\n self.__xpts[_i] = 0.0\n self.__ypts[_i] = 0.0", "def generate_points(num_points):\n for i in xrange(0, num_points):\n pass", "def __init__( self, seed=(1, 0, 0) ):\n x, y, z = seed\n self._coords = matrix( [[x], [y], [z], [1.]], 'd' )", "def initialize(self):\r\n self.countiter = 0\r\n self.xcurrent = self.xstart[:]\r\n raise NotImplementedError('method initialize() must be implemented in derived class')", "def __init__(self,sweep_points,flight_points):\n self.sweep_points=sweep_points\n self.flight_points=flight_points", "def __init__(self, x0, y0, x1, y1):\n\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1" ]
[ "0.78019327", "0.74978656", "0.74227774", "0.71523833", "0.71523833", "0.71206594", "0.7075273", "0.70572877", "0.70357925", "0.6961247", "0.69041634", "0.68821687", "0.68821687", "0.68765813", "0.6853304", "0.6852539", "0.6852539", "0.68471855", "0.6817271", "0.6817271", "0.6817271", "0.6817271", "0.68060356", "0.68036157", "0.67870915", "0.6770435", "0.6769301", "0.6769301", "0.67668605", "0.67614186", "0.6757746", "0.6733204", "0.6688014", "0.6685688", "0.6619169", "0.6609523", "0.6607819", "0.6599338", "0.6554198", "0.655265", "0.6533133", "0.6494554", "0.6480429", "0.64773566", "0.6469693", "0.64623773", "0.6456097", "0.64401954", "0.64401954", "0.64401954", "0.64401954", "0.64401954", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435577", "0.6435328", "0.6434347", "0.64213955", "0.6417252", "0.6412008", "0.6410573", "0.6383527", "0.6383129", "0.6375714", "0.63678133", "0.6360471", "0.6360089", "0.6354016", "0.6331872", "0.6331296", "0.63304615", "0.6320691", "0.63136303", "0.6312577", "0.6310214", "0.6305744", "0.62899274", "0.6274194", "0.6273105", "0.6265331", "0.6262112", "0.62584805", "0.6258355", "0.62552416", "0.62510455", "0.6242629", "0.6239836", "0.6239287", "0.6235477", "0.62340266", "0.62267286", "0.62259203" ]
0.65025926
41
Alternate initializer for compact input of coordinates
def from_floats(cls, *floats): curves = [] for i in range(0, len(floats) - 2, 6): p1 = (floats[i], floats[i + 1]) c1 = (floats[i + 2], floats[i + 3]) c2 = (floats[i + 4], floats[i + 5]) p2 = (floats[i + 6], floats[i + 7]) curves.append(CubicBezierCurve(p1, p2, c1, c2)) return cls(curves)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def __init__(self, lon: float = 0, lat: float = 0):\n super(Point2D, self).__init__(lon, lat)", "def __init__(self, d):\n\t\tself._coords = [0] * d", "def __init__(self, d):\n self._coords = [0]*d", "def __init__(self, coordinates):\n\n if len(coordinates) != 5:\n raise ValueError(\n \"Point coordinates are wrong defined. They must be \"\n \"defined as (x, y, z, azimuth, elevation).\"\n f\"Provided coordinates: {coordinates}.\"\n )\n\n # Cast and store input coordinates.\n self._coordinates = np.asarray(coordinates, dtype=np.float64).squeeze()\n\n # Provide center to `Electrode`.\n super().__init__(coordinates[:3])", "def __init__(self, coordinates, relative=False, data_type='complex'):\n\n super().__init__(\n coordinates=coordinates, relative=relative, data_type=data_type\n )", "def __init__(self, coordinates, relative=False, data_type='complex'):\n\n super().__init__(\n coordinates=coordinates, relative=relative, data_type=data_type\n )", "def __init__(self, x, y, z):\n self.x = float(x)\n self.y = float(y)\n self.z = float(z)", "def __init__(self, x=0, y=0, z=0):\n if type(x) is tuple:\n self.x = x[0]\n self.y = x[1]\n self.z = x[2]\n elif type(x) is vector:\n self.x = x.x\n self.y = x.y\n self.z = x.z\n else:\n self.x = x\n self.y = y\n self.z = z", "def __init__(self, coordinates):\n self.coordinates = coordinates", "def __init__(self, x: float = 0, y: float = 0):\n self.data: [float, float] = [x, y]", "def __init__(self, x, y, u):\n self.x = x\n self.y = y\n self.u = u", "def __init__(self, lat, long):\n\n self.lat = float(lat)\n self.long = float(long)\n self.cartesian = None", "def __init__(self, x,y=None,z=None):\r\n if isinstance(x, xyz):\r\n self.x = x.x\r\n self.y = x.y\r\n self.x = x.z\r\n else:\r\n self.x = float(x)\r\n self.y = float(y)\r\n self.z = float(z)", "def __init__( self, seed=(1, 0, 0) ):\n x, y, z = seed\n self._coords = matrix( [[x], [y], [z], [1.]], 'd' )", "def __init__(self, x=None, y=None):\n if y is None:\n if x is None:\n object.__setattr__(self, 'x', 0)\n object.__setattr__(self, 'y', 0)\n else:\n object.__setattr__(self, 'x', x[0])\n object.__setattr__(self, 'y', x[1])\n else:\n object.__setattr__(self, 'x', x)\n object.__setattr__(self, 'y', y)", "def __init__(self, x = np.float32(0.0), y = np.float32(0.0), z = np.float32(0.0)):\n\n self._x = np.float32( x )\n self._y = np.float32( y )\n self._z = np.float32( z )", "def __init__(self, coordinates, strength=1.0):\n\n super().__init__(coordinates=coordinates, strength=strength)", "def __init__(self, coordinates, strength=1.0):\n\n super().__init__(coordinates=coordinates, strength=strength)", "def __init__(self, coordinates, strength=1.0):\n\n super().__init__(coordinates=coordinates, strength=strength)", "def init_atom_coords(self) -> None:\n ...", "def __init__(self, x: float, y: float):\n self.x = x\n self.y = y", "def __init__(self, x: float, y: float):\n self.x = x\n self.y = y", "def __init__(self, p0: Point, p1: Point, c0: Point = None, c1: Point = None) -> None:\n self.p0 = p0\n self.p1 = p1\n self.c0 = c0 if c0 is not None else p0\n self.c1 = c1 if c1 is not None else p1", "def __init__(self, x, y):\n self.xCoordinate = x\n self.yCoordinate = y\n self.value = None", "def __init__(self, x0, y0, x1, y1):\n\n self.x0 = x0\n self.y0 = y0\n self.x1 = x1\n self.y1 = y1", "def __init__(self, x, y, z):\n self.x = x\n self.y = y\n self.z = z", "def __init__(self,x=0,y=0,z=0):\n self.x = x\n self.y = y\n self.z = z", "def from_xyz(cls, x, y, z):\n obj = cls()\n obj._x = x\n obj._y = y\n obj._z = z\n return obj", "def __init__(self, coor):\n self._coordinates = coor", "def __init__(self, coordinates, strength=1.0, length=1.0):\n\n super().__init__(\n coordinates=coordinates, strength=strength, length=length)", "def __init__(self, coordinates, strength=1.0, length=1.0):\n\n super().__init__(\n coordinates=coordinates, strength=strength, length=length)", "def __init__(self, x=0, y=0):\n self._x = x\n self._y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, x=0, y=0):\r\n self.x = x\r\n self.y = y", "def __init__(self, offset, offsetFlipped, extent):\n if len(offset) != 2:\n raise RuntimeError(\"offset=%r does not have two values\" % (offset,))\n self.offset = tuple(int(val) for val in offset)\n\n if len(offsetFlipped) != 2:\n raise RuntimeError(\"offsetFlipped=%r does not have two values\" % (offsetFlipped,))\n self.offsetFlipped = tuple(bool(val) for val in offsetFlipped)\n\n if extent is None:\n self.extent = (None, None)\n else:\n if len(extent) != 2:\n raise RuntimeError(\"extent=%r does not have two values\" % (extent,))\n if None in extent:\n self.extent = (None, None)\n else:\n self.extent = tuple(int(val) for val in extent)", "def __init_primitive(self, point_x, point_y, point_z):\n self.x = point_x\n self.y = point_y\n self.z = point_z", "def make_simple_coords():\n \n x = np.array([144, 124, 97, 165, 114, 60, 165, 0, 76, 50, 147])\n y = np.array([ 0, 3, 21, 28, 34, 38, 51, 54, 58, 56, 61])\n coords = np.vstack((x,y)).T\n return coords", "def __init__(self, *args, **kwds):\n if args or kwds:\n super(tipCoords, self).__init__(*args, **kwds)\n # message fields cannot be None, assign default values for those that are\n if self.thumb is None:\n self.thumb = [0.] * 3\n if self.index is None:\n self.index = [0.] * 3\n if self.middle is None:\n self.middle = [0.] * 3\n if self.ring is None:\n self.ring = [0.] * 3\n if self.little is None:\n self.little = [0.] * 3\n else:\n self.thumb = [0.] * 3\n self.index = [0.] * 3\n self.middle = [0.] * 3\n self.ring = [0.] * 3\n self.little = [0.] * 3", "def __init__(self, x1, y1):\n self.x = x1\n self.y = y1", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x=0, y=0):\n self.x = x\n self.y = y", "def __init__(self, x = 0, y = 0):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self._x = x\n self._y = y", "def __init__(self,x,y,z):\n self.x=x\n self.y=y\n self.z=z", "def initialCoordinates():\r\n return (-250,-250)", "def __init__(self, coordinates, length=1.0):\n\n # Cast coordinates.\n coordinates = np.asarray(coordinates, dtype=np.float64).squeeze()\n\n # Check which format was provided.\n is_point = coordinates.shape == (5, )\n is_flat = coordinates.shape == (6, )\n is_dipole = coordinates.shape == (2, 3)\n\n # Store depending on format.\n if is_point:\n\n # Add length to attributes which have to be serialized.\n self._serialize = {'length'} | self._serialize\n\n # If magnetic, get the loop which area corresponds to length.\n if self.xtype == 'magnetic':\n points = point_to_square_loop(coordinates, length)\n\n # If electric, get the dipole.\n else:\n points = point_to_dipole(coordinates, length)\n\n # Store length and original input coordinates.\n self._length = length\n self._coordinates = coordinates\n\n elif is_flat or is_dipole:\n\n if is_flat:\n # Re-arrange for points.\n points = np.array([coordinates[::2], coordinates[1::2]])\n\n # Store original input.\n self._coordinates = coordinates\n\n else:\n # Input is already in the format for Electrode.\n points = coordinates\n\n # If magnetic, get the loop which area corresponds to its length.\n if self.xtype == 'magnetic':\n azimuth, elevation, length = dipole_to_point(points)\n center = tuple(np.sum(points, 0)/2)\n coo = (*center, azimuth, elevation)\n points = point_to_square_loop(coo, length)\n\n # Store original input.\n self._coordinates = coordinates\n\n # Ensure the two poles are distinct.\n if np.allclose(points[0, :], points[1, :]):\n raise ValueError(\n \"The two electrodes are identical, use the format \"\n \"(x, y, z, azimuth, elevation) instead. \"\n f\"Provided coordinates: {coordinates}.\"\n )\n\n else:\n raise ValueError(\n \"Coordinates are wrong defined. They must be defined either \"\n \"as a point, (x, y, z, azimuth, elevation), or as two points, \"\n \"(x1, x2, y1, y2, z1, z2) or [[x1, y1, z1], [x2, y2, z2]]. \"\n f\"Provided coordinates: {coordinates}.\"\n )\n\n super().__init__(points)", "def extra_coords(self) -> ExtraCoordsABC:", "def __init__(s,i,j):\n # Posição do centro\n s.cx, s.cy = convert(i,j)\n # Cor (pode ser passada para o construtor no futuro)\n s.cor = (200,200,200)\n\n # Vértices do hexágono\n s.pontos = (\n (s.cx, s.cy-L),\n (s.cx+l, s.cy-L/2),\n (s.cx+l, s.cy+L/2),\n (s.cx, s.cy+L),\n (s.cx-l, s.cy+L/2),\n (s.cx-l, s.cy-L/2),\n )", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self._x, self._y = x, y", "def __init__(self, x, y):\r\n self.x=x\r\n self.y=y", "def __init__(self, x, y):", "def __init__(self,x=0, y=0):\n self.x = x\n self.y = y", "def prepare_coords(coords):\n coords = np.asarray(coords).astype(np.float)\n if coords.ndim == 1:\n coords = np.array([coords])\n return coords", "def from_tuple(cls, coords):\n return cls(*coords)", "def __init__(self, x, y):\n self.x, self.y = x, y", "def __init__(self, X, y):\n pass", "def __init__(self, nodes: Dict[Hashable, List[List]], crs=None):\n\n for coords, _ in nodes.values():\n if len(coords) != 2:\n raise ValueError(\n 'Coordinate vertices for a gr3 type must be 2D, but got '\n f'coordinates {coords}.')\n\n self._id = list(nodes.keys())\n self._coords = np.array(\n [coords for coords, _ in nodes.values()])\n self._crs = CRS.from_user_input(crs) if crs is not None else crs\n self._values = np.array(\n [value for _, value in nodes.values()])", "def __init__(self,x=0,y=0):\n self.x = x\n self.y = y\n pass", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init__(self, initX, initY):\n self.x = initX\n self.y = initY", "def __init__(self, x, y):\n\t\tself.x = x\n\t\tself.y = y", "def __init__(self, coordinates, name=None, **kwargs):\n\n # validate and set coordinates\n coordinates = make_coord_array(coordinates)\n self.set_trait(\"coordinates\", coordinates)\n self.not_a_trait = coordinates\n\n # precalculate once\n if self.coordinates.size == 0:\n pass\n\n elif self.coordinates.size == 1:\n self._is_monotonic = True\n\n elif self.coordinates.ndim > 1:\n self._is_monotonic = None\n self._is_descending = None\n self._is_uniform = None\n\n else:\n deltas = self.deltas\n if np.any(deltas <= 0):\n self._is_monotonic = False\n self._is_descending = False\n self._is_uniform = False\n else:\n self._is_monotonic = True\n self._is_descending = self.coordinates[1] < self.coordinates[0]\n self._is_uniform = np.allclose(deltas, deltas[0])\n if self._is_uniform:\n self._start = self.coordinates[0]\n self._stop = self.coordinates[-1]\n self._step = (self._stop - self._start) / (self.coordinates.size - 1)\n\n # set common properties\n super(ArrayCoordinates1d, self).__init__(name=name, **kwargs)", "def default_coord(height, width, h_offset=0, w_offset=0):\n coord_mat = np.zeros((height+h_offset, width+w_offset, 2), dtype='int')\n for i in range(height):\n for j in range(width):\n coord_mat[i, j] = (i, j)\n return coord_mat", "def __init__(self, x, y):\n if len(x) == len(y):\n self.x = x\n self.y = y\n logger.info(\"Creating Diva 2D valatxy object\")\n else:\n logger.error(\"Input vectors have not the same length\")\n raise Exception(\"Input vectors have not the same length\")", "def _get_coords(p_coords):\n l_ret = CoordinateInformation()\n if isinstance(p_coords, list):\n l_list = p_coords\n else:\n l_list = p_coords.strip('\\[\\]')\n l_list = l_list.split(',')\n try:\n l_ret.X_Easting = float(l_list[0])\n l_ret.Y_Northing = float(l_list[1])\n l_ret.Z_Height = float(l_list[2])\n except Exception as e_err:\n print('Error {}'.format(e_err))\n l_ret.X_Easting = 0.0\n l_ret.Y_Northing = 0.0\n l_ret.Z_Height = 0.0\n return l_ret", "def __init__(\n self, coordinates, features, original_labels=None, inverse_maps=None,\n ):\n self.coordinates = coordinates\n self.features = features\n self.original_labels = original_labels\n self.inverse_maps = inverse_maps", "def __init__(self, start =None, end =None):\n self.start = start if start else PositionValue2D()\n self.end = end if end else PositionValue2D()", "def __init__(self, x, y):\n\t\t\n\t\tself.x, self.y = x, y", "def __init__(self, c1, c2, c3=None, dim=2, coordsys=Cartesian):\n if not isinstance(c1, Number) or not isinstance(c2, Number) \\\n or (not isinstance(c3, Number) and c3 is not None):\n raise TypeError(\"Input coordinate(s) is/are invalid\")\n if not isinstance(dim, int) or isinstance(dim, bool):\n raise TypeError(\"Input dimension is invalid\")\n if dim < 2 or dim > 3:\n raise TypeError(\"Module only supports 2-dimensional and 3-dimensional vectors\")\n if coordsys not in (Cartesian, Cartesian_3, Polar, PhySpherical, MathSpherical):\n raise TypeError(\"Invalid coordinate system\")\n self.c1 = c1\n self.c2 = c2\n self.c3 = c3\n self.__dim = dim\n if dim == 2:\n self.c3 = None\n if coordsys == Cartesian_3:\n self.__coordsys = Cartesian\n elif coordsys in (MathSpherical, PhySpherical):\n self.__coordsys = Polar\n if dim == 3:\n if self.c3 is None: self.c3 = 0\n if coordsys == Cartesian:\n self.__coordsys = Cartesian_3\n elif coordsys == Polar:\n self.__coordsys = MathSpherical", "def __init__(self, location_id, x=0, y=0):\r\n self.location_id = location_id\r\n self.x = x\r\n self.y = y", "def __init__(self,x,y):\n self.x = x\n self.y = y", "def __init__(self, x, y):\n self.x = x\n self.y = y\n self.x1 = self.x + 30 # largeur et hauteur fixees\n self.y1 = self.y + 30" ]
[ "0.63444114", "0.63369143", "0.6208486", "0.6187477", "0.61472523", "0.61472523", "0.614598", "0.6136781", "0.61134976", "0.6090093", "0.607837", "0.6063738", "0.60410446", "0.6038614", "0.6038257", "0.6028245", "0.60152465", "0.60152465", "0.60152465", "0.5991858", "0.59773946", "0.59773946", "0.5969061", "0.5949068", "0.59456927", "0.5943764", "0.5913851", "0.5912751", "0.5873406", "0.5864023", "0.5864023", "0.58632857", "0.58502686", "0.58502686", "0.58502686", "0.58502686", "0.58502686", "0.58495396", "0.58388335", "0.5833825", "0.58277094", "0.58189386", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.58127034", "0.579987", "0.57908154", "0.57899344", "0.5782968", "0.5778517", "0.57466906", "0.5738471", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.57282645", "0.5719043", "0.57162523", "0.5710618", "0.5701256", "0.5700636", "0.5698315", "0.5689983", "0.56888586", "0.5687935", "0.56808305", "0.5674495", "0.5674495", "0.5644065", "0.5641283", "0.563374", "0.5627624", "0.56177783", "0.5611206", "0.5603349", "0.55951506", "0.5564222", "0.5563543", "0.5553116", "0.5531215" ]
0.0
-1
Compares each curve with the next to verify continuity. Note that this function treats curves as directed, thus two curves that start at the same point will return `False` when compared.
def assert_continuous(*curves: CubicBezierCurve) -> bool: if not curves: raise ValueError("CurveChecker.assert_continuous() cannot be called on an empty list") previous_curve = curves[0] for curve in curves[1:]: if previous_curve.p1 != curve.p0: return False previous_curve = curve return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True", "def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def curvesSimilar(t1, y1, t2, y2, tol):\n # Make synchornized version of t2,y2 called t2sync,y2sync.\n t2sync=[]\n y2sync=[]\n for timepoint1 in t1:\n (index, timepoint2)=getNearestTime(timepoint1, t2sync)\n t2sync.append(timepoint2)\n y2sync.append(y2[index])\n\n # Get R^2 value equivalent:\n normalizedError=[(y1[x]-y2sync[x])**2/y1[x]**2 for x in range(len(y1))]/len(y1)\n\n if normalizedError > tol:\n return False\n else: \n return True", "def edges_is_closed_curve(edges):\n e_prev = first = edges[0]\n for e in edges[1:]:\n if e_prev[1] != e[0]:\n if e_prev[1] == first[0]:\n # new loop\n first = e\n else:\n return False\n e_prev = e\n if e_prev[1] != first[0]:\n return False\n return True", "def comparison_test():\n for pose in SE2.interesting_points():\n se2 = se2_from_SE2(pose)\n SE2a = SE2_from_se2_slow(se2)\n SE2b = SE2_from_se2(se2)\n # printm('pose', pose, 'se2', se2)\n # printm('SE2a', SE2a, 'SE2b', SE2b)\n SE2.assert_close(SE2a, pose)\n # print('SE2a = pose Their distance is %f' % d)\n SE2.assert_close(SE2b, pose)\n # print('SE2b = pose Their distance is %f' % d)\n assert_allclose(SE2a, SE2b, atol=1e-8, err_msg=\"SE2a != SE2b\")\n assert_allclose(SE2a, pose, atol=1e-8, err_msg=\"SE2a != pose\")\n assert_allclose(SE2b, pose, atol=1e-8, err_msg=\"SE2b != pose\")", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def _isConsecutive(self, chord1, chord2):\n for voice1, note1 in enumerate(chord2.getNotes()):\n if note1 != None:\n for voice2, note2 in enumerate(chord2.getNotes()[voice1+1:]):\n if note2 != None:\n voice2 += voice1 + 1\n if note1.distance(note2) in [6, 7, 12]:\n if (chord1.getNote(voice1).distance(chord1.getNote(voice2)) % 12) in [0, 6, 7]: # Check if parallel\n return True\n elif chord1.getNote(voice1) < note1 and chord1.getNote(voice2) < note2: # Check if consecutive upward\n return True\n elif chord1.getNote(voice1) > note1 and chord1.getNote(voice2) > note2: # Check if consecutive downward\n return True\n\n return False", "def done_comparator(self, readback: float, setpoint: float) -> bool:\n kwargs = {}\n if self.atol is not None:\n kwargs[\"atol\"] = self.atol\n if self.rtol is not None:\n kwargs[\"rtol\"] = self.rtol\n return np.isclose(readback, setpoint, **kwargs)", "def Checker(a,b,n,x):\n if n==0:\n if abs(a[0]-b[0])>=x: #if the changes in eta from one time step to another is more than .05mm\n return True #return true to continue the loop\n else:\n return False #stop the loop (this only happens if all of the points had a change of less than .05mm)\n elif abs(a[n]-b[n])>=x: #this checks each of the points in the channel \n return True #if any have too big a change the loop continues\n else: #if that point in the channel has small enough change\n Checker(a,b,n-1) #check the next point in the channel", "def test_circular_vs_linear():\n vac_sys = circular_or_linear_system(False)\n P_circ = [chamber.P for chamber in vac_sys.chambers()]\n P0_circ = [0.9999876873794514, 0.0012312620548606836]\n\n vac_sys = circular_or_linear_system(True)\n P_straight = [chamber.P for chamber in vac_sys.chambers()]\n P0_straight = [0.999975405344194, 0.0024594655806102796]\n assert all(isclose(P, P0) for P, P0 in zip(P_circ, P0_circ))\n assert all(isclose(P, P0) for P, P0 in zip(P_straight, P0_straight))\n\n assert isclose(P_circ[0], P_straight[0], abs_tol=.01)\n assert isclose(P_circ[1], 2 * P_straight[1], abs_tol=.01)", "def _is_converged(self):\n if self._last_operating_point is None:\n return False\n\n # Tolerance for comparing operating points. If all states changes\n # within this tolerance in the Euclidean norm then we've converged.\n TOLERANCE = 1e-4\n for ii in range(self._horizon):\n last_x = self._last_operating_point[0][ii]\n current_x = self._current_operating_point[0][ii]\n\n if np.linalg.norm(last_x - current_x) > TOLERANCE:\n return False\n\n return True", "def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")", "def checarPs(self,p1,p2):\n return abs(p1-p2) < 0.00001", "def compare_curve(geometry_x, geometry_y):\n arct = CreateGeometryFromWkt(geometry_x)\n pgis = CreateGeometryFromWkt(geometry_y)\n\n intersection_length = Geometry.Length(Geometry.Intersection(arct, pgis))\n arct_length = Geometry.Length(arct)\n pgis_length = Geometry.Length(pgis)\n # result = compare_float(intersection_length, arct_length, pgis_length,EPOCH_CURVE)\n result = compare3float_relative(pgis_length, arct_length,\n intersection_length, EPOCH_CURVE_RELATIVE)\n return result", "def are_symmetrically_related(self, point_a, point_b, tol=0.001):\n if np.allclose(self.operate(point_a), point_b, atol=tol):\n return True\n if np.allclose(self.operate(point_b), point_a, atol=tol):\n return True\n return False", "def is_point_on_curve(a, b, p, x, y):\n assert isinstance(a, Bn)\n assert isinstance(b, Bn)\n assert isinstance(p, Bn) and p > 0\n assert (isinstance(x, Bn) and isinstance(y, Bn)) \\\n or (x == None and y == None)\n\n if x == None and y == None:\n return True\n\n lhs = (y * y) % p\n rhs = (x*x*x + a*x + b) % p\n on_curve = (lhs == rhs)\n\n return on_curve", "def _same(p1,p2,prec=0.0001):\n #return num.all(num.equal(p1,p2))\n t1 = num.fabs(p1[0]-p2[0]) < prec\n t2 = num.fabs(p1[1]-p2[1]) < prec\n if t1 and t2:\n #print \"same\", p1,p2\n return True", "def is_recurrent(self):\n G = self._get_recurrence_graph()\n # C = G.strongly_connected_components()\n first_component = next(nx.strongly_connected_components(G))\n abs_numbers = {abs(x) for x in first_component}\n # return sorted(list(set([abs(x) for x in C[0]]))) == \\\n # range(1, self.num_branches()+1)\n return abs_numbers == set(range(1, self.num_branches()+1))", "def check_changes(yield_curve, forecast):\r\n df = pd.read_csv('data/diff_df.csv').set_index('DATE')\r\n if list(df.index)[-1] != list(yield_curve.index)[-1]:\r\n forecast = pd.read_csv('data/forecast.csv')\r\n yield_curve = df", "def _chain_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.allclose(a_seg, b_seg,\n rtol=RTOL, atol=ATOL):\n return False\n return True", "def same_edge(self, other, precision=0):\n return self.id == other.id \\\n and self.start_node == other.start_node \\\n and self.end_node == other.end_node \\\n and abs(self.cost - other.cost) <= precision \\\n and abs(self.reverse_cost - other.reverse_cost) <= precision \\\n and self.reversed == other.reversed", "def point_on_curve(self, P):\n x, y = modp(self.p, P.x, P.y)\n lhs = y ** 2\n rhs = x ** 3 + x * self.a + self.b\n return lhs == rhs", "def all_close(goal, actual, tolerance):\n #all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def get_similar_lines(self, Coe1, Coe2):\n line1_victor = [Coe1[1], -Coe1[0]]\n line2_victor = [Coe2[1], -Coe2[0]]\n victor = line1_victor[1] * line2_victor[0] - line2_victor[1] * line1_victor[0]\n if 0 <= round(victor, 2) <= 0.2:\n return True\n else:\n return False", "def part1b_1():\n xs = exampleInput\n backward = submission.computeBackward(simpleCRF, xs)\n for i in xrange(len(xs)):\n grader.requireIsEqual( 1.0, sum( backward[i].values() ) )", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n\n return True", "def _same_votes(data_set):\r\n current = data_set[0].dat_votes\r\n for data_point in data_set:\r\n if data_point.dat_votes != current: return False #if we ever get to a data point that's different from the first, this case does not apply\r\n\r\n return True #if we proceed through all the data without stopping, return True\r", "def is_cyclic(self):\n return self._.b[0] == 2 and self._.c[-1] in [1, 2] and \\\n all(x == 1 for x in self._.b[1:-1] + self._.c[1:-1])", "def is_point_on_curve(self, P):\n x, y, = P[0], P[1]\n left = y * y\n right = (x * x * x) + (self.a * x) + self.b\n return (left - right) % self.p == 0", "def all_close(goal, actual, tolerance):\n all_equal = True\n if type(goal) is list:\n for index in range(len(goal)):\n if abs(actual[index] - goal[index]) > tolerance:\n return False\n elif type(goal) is geometry_msgs.msg.PoseStamped:\n return all_close(goal.pose, actual.pose, tolerance)\n elif type(goal) is geometry_msgs.msg.Pose:\n return all_close(pose_to_list(goal), pose_to_list(actual), tolerance)\n return True", "def check_sequences_close(\n first: Sequence[Sequence[float]],\n second: Sequence[Sequence[float]],\n) -> None:\n assert len(first) == len(second)\n for f, s in zip(first, second):\n assert f == pytest.approx(s)", "def is_ppc(C1, C2, i):\n c1, c2 = sorted(C1), sorted(C2)\n for k in range(len(c1)):\n if i <= c2[k]:\n # return False\n break\n if c1[k] != c2[k]:\n return False\n return True", "def _compare(self, x,y, pr=False):\n batched = self.ex.batched(x, y)\n looped = self.ex.looped(x, y)\n #print(f'batched value {batched}')\n #print(f'looped value {looped}')\n \n self.assertTrue(\n torch.equal(batched, looped)\n )", "def _check_convergence(current_position,\n next_position,\n current_objective,\n next_objective,\n next_gradient,\n grad_tolerance,\n f_relative_tolerance,\n x_tolerance):\n grad_converged = _check_within_tolerance(next_gradient, grad_tolerance)\n x_converged = _check_within_tolerance(next_position - current_position,\n x_tolerance)\n f_converged = _check_within_tolerance(\n next_objective - current_objective,\n f_relative_tolerance * current_objective)\n return grad_converged | x_converged | f_converged", "def is_next_order(current: str, target: str):\n c = c_major_chord_pos[current]\n t = c_major_chord_pos[target]\n if c == 7:\n c = 0\n if c + 1 == t:\n return True\n return False", "def find_curve(self):\n self.set_a()\n while True:\n while not self.check_a():\n self.seed_update()\n self.set_a()\n self.seed_update()\n self.set_b()\n while not self.check_b():\n self.seed_update()\n self.set_b()\n if not self.secure():\n self.seed_update()\n continue\n self.generate_generator()\n break", "def testSplineCurveIsC1Smooth(self):\n x1 = jnp.linspace(0., 8., 10000)\n x2 = x1 + 1e-7\n\n fn = self.variant(distribution.partition_spline_curve)\n y1 = fn(x1)\n y2 = fn(x2)\n grad = jax.grad(lambda z: jnp.sum(fn(z)))\n dy1 = grad(x1)\n dy2 = grad(x2)\n\n chex.assert_tree_all_close(y1, y2, atol=1e-5, rtol=1e-5)\n chex.assert_tree_all_close(dy1, dy2, atol=1e-5, rtol=1e-5)", "def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)", "def _degree_has_changed(first, second):\n return len(set(first) ^ set(second)) != 0", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def __eq__(self, line):\n \n return abs( 1 - np.dot(sm.unitvec(self.vec), sm.unitvec(line.vec))) < 10*_eps", "def __le__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set <= other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)", "def inequality_step_1_b(r, z_k, points):\n lhs = 0\n for point in points:\n lhs = lhs + z_k.C - ((z_k.v_tilde[0] + z_k(*point)[0][0])**2 \\\n + (z_k.v_tilde[1] + z_k(*point)[0][1])**2)\n\n lhs = 2*r**3*lhs # todo: should not multiply by 2\n rhs = step_1_integral(z_k)\n print \"Inequality 1b:\"\n print \"2*r^3 sum(C-|v_tilde+v_k(t_j,x_j)|^2) = %g\" % lhs\n print \"c_2*iint(C-|v_tilde+v_k(t_j,x_j)|^2)dxdt = %g\" % rhs\n\n return lhs > rhs", "def all_consecutive(s):\n for (x, y) in pairwise(sorted(s)):\n if y - x != 1:\n return False\n return True", "def test_acyclic_chains():\n names = ['robot', 'box1', 'box2']\n chains = lambda: FactoredRandomGeometricGraph.acyclic_chains(names)\n\n expected_number = 16\n actual_number = sum(1 for _ in chains())\n assert actual_number == expected_number, \\\n \"Expected {} chains; actual value was {}\".format(\n expected_number, actual_number)\n\n assert all(\n FactoredRandomGeometricGraph.is_acyclic(chain)\n for chain in chains())", "def __basic_adaptive_comp_theorem(self):\n global_epsilon, global_delta = self._epsilon_delta\n epsilon_sum, delta_sum = \\\n map(sum, zip(*self._private_data_epsilon_delta_access_history))\n return epsilon_sum > global_epsilon or delta_sum > global_delta", "def converged(self) -> bool:", "def converged(self) -> bool:", "def converged(self) -> bool:", "def check(self, expected, debug=True):\r\n for v1, v2, go in expected:\r\n for gj, oj in go:\r\n r1 = is_same_graph(v1, v2, givens=gj, debug=debug)\r\n assert r1 == oj\r\n r2 = is_same_graph(v2, v1, givens=gj, debug=debug)\r\n assert r2 == oj", "def IsEqualOrder(self,other):\n return self.InferPolynomialDegree() == other.InferPolynomialDegree()", "def isFinished(self):\n\n currentValue = numpy.power(10, self.idxCurrentF / self.nbPtsF)\n if currentValue == 0:\n return True\n\n # It can be more than one line for the previous alignment value.\n # We iterate until we find a better value or to the end of the lines.\n for i in self:\n while i.nextLine[self.idx] > currentValue and not i.isFinished:\n i.next();\n \n return not any(i.nextLine[self.idx] <= currentValue for i in self)", "def curvecontrol(p1,p2, u_or_d):\r\n## four possibile orders:\r\n## A p1 lower and to left of p2\r\n## B p1 lower and to right of p2\r\n## C p1 higher and to left of p2\r\n## D p1 higher and to right of p2\r\n## B and C are reverse of each other\r\n## A and D are reverse of each other\r\n## so only 2 types of pairs really\r\n## each has a curve up or curve down possibility\r\n## start by converting D to A, and C to B\r\n e1 = 0.0001\r\n e2 = 0.9\r\n e1c = 1 - e1\r\n e2c = 0.5\r\n cp1 = []\r\n cp2 = []\r\n if p2[1] < p1[1]:\r\n resort = True\r\n ptemp = p2\r\n p2 = p1\r\n p1 = ptemp\r\n else:\r\n resort = False\r\n if p1[0] < p2[0]: ## type A\r\n if u_or_d: ## curve up\r\n cp1.append( ((p2[0]-p1[0]) * e1) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e2c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( ((p2[0]-p1[0]) * e2) + p1[0])\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( ((p2[0]-p1[0]) * e1c) + p1[0])\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n else: ## type B\r\n if u_or_d: ## curve up\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e1))\r\n cp1.append( ((p2[1]-p1[1]) * e2) + p1[1])\r\n cp2.append( p1[0] - ((p1[0]-p2[0]) * e2c))\r\n cp2.append( ((p2[1]-p1[1]) * e1c) + p1[1])\r\n else:\r\n cp1.append( p1[0]-((p1[0]-p2[0]) * e2))\r\n cp1.append( ((p2[1]-p1[1]) * e1) + p1[1])\r\n cp2.append( p1[0]-((p1[0]-p2[0]) * e1c))\r\n cp2.append( ((p2[1]-p1[1]) * e2c) + p1[1])\r\n if resort:\r\n ptemp = cp2\r\n cp2 = cp1\r\n cp1 = ptemp\r\n return cp1,cp2", "def is_cyclic(self):\n if self._is_cyclic is not None:\n return self._is_cyclic\n\n if len(self.generators) == 1:\n self._is_cyclic = True\n self._is_abelian = True\n return True\n\n if self._is_abelian is False:\n self._is_cyclic = False\n return False\n\n order = self.order()\n\n if order < 6:\n self._is_abelian = True\n if order != 4:\n self._is_cyclic = True\n return True\n\n factors = factorint(order)\n if all(v == 1 for v in factors.values()):\n if self._is_abelian:\n self._is_cyclic = True\n return True\n\n primes = list(factors.keys())\n if PermutationGroup._distinct_primes_lemma(primes) is True:\n self._is_cyclic = True\n self._is_abelian = True\n return True\n\n if not self.is_abelian:\n self._is_cyclic = False\n return False\n\n self._is_cyclic = all(\n any(g**(order//p) != self.identity for g in self.generators)\n for p, e in factors.items() if e > 1\n )\n return self._is_cyclic", "def test_perform_pairwise_tests_single_comp(self):\r\n # Verified with R's t.test function.\r\n exp = [['foo', 'bar', -6.5999999999999996, 0.0070804795641244006,\r\n 0.0070804795641244006, 0.100000000001, 0.10000000000001]]\r\n np.random.seed(self.value_for_seed)\r\n obs = _perform_pairwise_tests(self.labels1, self.dists1, 'two-sided',\r\n 999)\r\n self.compare_multiple_level_array(obs, exp)", "def fp_eq(x: float, y: float) -> bool:\n return fabs(x-y) < 10**-12", "def torch_the_same(X, Y, eps=1e-8):\n return (X - Y).abs().min() < eps", "def converge(self, _iter, centroids_old, centroids_new):\n #return self.equality_check(centroids_old, centroids_new) or _iter == 0\n return _iter == 0", "def comparison_test_2():\n for pose in SE2.interesting_points():\n se2a = se2_from_SE2(pose)\n se2b = se2_from_SE2_slow(pose)\n # printm('pose', pose, 'se2a', se2a, 'se2b', se2b)\n assert_allclose(se2a, se2b, atol=1e-8)", "def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True", "def test_cx_equivalence_1cx(self, seed=1):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=12)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def __comparing_points(self, point1, point2) -> bool:\n return (abs(point1.x - point2.x) <= self.dirt_pos_tolerance and abs(\n point1.y - point2.y) <= self.dirt_pos_tolerance)", "def collinear(a:tuple, b:tuple, c:tuple)->bool:\n return ((b[1] - c[1]) * (a[0] - b[0])) == ((a[1] - b[1]) * (b[0] - c[0]))", "def are_similar(first_coords: List[Tuple[int, int]], second_coords: List[Tuple[int, int]]) -> bool:\n # Step 1: Get angles of each triangle\n # Step 2: Compare grades of two triangles\n # Step 3: If two angles are equal then first triangle is similar to second triangle\n pass", "def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:\n if len(points) < 3:\n raise ValueError(\"CurveChecker.assert_collinear() must be called with at least three points\")\n\n thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]\n for t0, t1 in zip(thetas, thetas[1:]):\n if abs(t0 - t1) > tolerance:\n return False\n\n return True", "def approx_eq(a, b):\n return abs(a-b) < approx_eq.eps", "def test_compare_confs(self):\n ch4_1 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.6300326, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertTrue(converter.compare_confs(ch4_1, ch4_1))\n self.assertEqual(converter.compare_confs(ch4_1, ch4_1, rmsd_score=True), 0.0)\n\n ch4_2 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.630032999999999999, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertTrue(converter.compare_confs(ch4_1, ch4_2))\n self.assertAlmostEqual(converter.compare_confs(ch4_1, ch4_2, rmsd_score=True), 0.0, places=4)\n\n ch4_3 = {'symbols': ('C', 'H', 'H', 'H', 'H'),\n 'isotopes': (12, 1, 1, 1, 1),\n 'coords': ((0.0, 0.0, 0.0),\n (0.81, 0.6300326, 0.6300326),\n (-0.6300326, -0.6300326, 0.6300326),\n (-0.6300326, 0.6300326, -0.6300326),\n (0.6300326, -0.6300326, -0.6300326))}\n self.assertFalse(converter.compare_confs(ch4_1, ch4_3))\n self.assertAlmostEqual(converter.compare_confs(ch4_1, ch4_3, rmsd_score=True), 0.0973755, 5)\n\n occco_1 = {'symbols': ('O', 'C', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.645138856907744, 0.4855258453193825, -1.269098295435587),\n (-1.826155038047953, 0.3950495294463964, 0.14042133077839267),\n (-1.809422000751755, 1.7890058616007063, 0.7598491740499427),\n (-0.5144047765537177, 2.5412406931938802, 0.46679697338621373),\n (-0.4166415293412751, 2.87702090326361, -0.9112962716273996),\n (-1.7523716337780308, -0.4153106947877299, -1.6252584016046343),\n (-2.785951955397608, -0.09251469392615361, 0.33828359656775064),\n (-1.0240452528998532, -0.22437445960667116, 0.5540970106165476),\n (-2.6481280678220354, 2.3642983699082096, 0.34958017984053996),\n (-1.948589699885299, 1.7026595551213293, 1.842711580226941),\n (0.3611954633790452, 1.9455154222553817, 0.7436960027091665),\n (-0.4846097814890448, 3.4746190180148613, 1.0370826136306412),\n (-0.7517118479102434, 2.0995465744609016, -1.4084474547843668))}\n self.assertTrue(converter.compare_confs(occco_1, occco_1))\n self.assertEqual(converter.compare_confs(occco_1, occco_1, rmsd_score=True), 0.0)\n\n occco_2 = {'symbols': ('O', 'C', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-1.7947360038254172, -0.2800502342395655, -0.94718691702442),\n (-1.3484923712374388, 0.3115024061252556, 0.26578718691263836),\n (0.048477626636679344, -0.18145884392561823, 0.6324014558703148),\n (0.06768533157823732, -1.6675011458530478, 0.9732242593946415),\n (-0.3530408338556874, -2.4414941005488964, -0.14539995931042984),\n (-1.6993365552401258, -1.24904809118412, -0.8336640895923935),\n (-1.3364382254722125, 1.3960688766201377, 0.12095890535651209),\n (-2.0720954664081472, 0.08255875951942339, 1.0547463625584224),\n (0.7166058894071794, -0.006871191098481536, -0.21939247767770087),\n (0.42416153066953804, 0.38997256785153595, 1.4878132135000073),\n (-0.6073710844027356, -1.8828156181698352, 1.8073934203380306),\n (1.0743805139106757, -1.9882575918786236, 1.2595102280098387),\n (0.35195568839394714, -2.3791987519096245, -0.81652943836054))}\n self.assertFalse(converter.compare_confs(occco_1, occco_2))\n self.assertAlmostEqual(converter.compare_confs(occco_1, occco_2, rmsd_score=True), 1.00940798, 5)\n\n occco_3 = {'symbols': ('O', 'C', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.1449415573976087, 0.22095421320805855, -1.1761667487081628),\n (-1.8458576972054956, 0.34850267283416353, 0.21018774322610537),\n (-1.7946333596155588, 1.8224048429109774, 0.6001541290065803),\n (-0.7643941488427433, 2.603963636650936, -0.21014179435545444),\n (-1.160550573561837, 2.7172772321690157, -1.5709018700345496),\n (-2.2541573481252226, -0.7309807530987936, -1.3543442072197793),\n (-2.624019853257233, -0.1650323593077789, 0.7836186270202112),\n (-0.885183881101036, -0.13905038811734782, 0.4040751763466029),\n (-2.7832838991835382, 2.2645645737400506, 0.427600829327477),\n (-1.562954529947271, 1.9038127448704014, 1.6674459390753227),\n (0.21794991835079414, 2.1225495495404854, -0.17169963700580776),\n (-0.6621011473071822, 3.6174144955483376, 0.18956005497753062),\n (-1.544021016988015, 1.848253867499642, -1.8191893347265315))}\n self.assertTrue(converter.compare_confs(occco_1, occco_3))\n self.assertAlmostEqual(converter.compare_confs(occco_1, occco_3, rmsd_score=True), 0.0, places=4)\n\n occco_4 = {'symbols': ('O', 'C', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': ((-2.149273800177333, 0.3878561319363274, -1.067994536622897),\n (-1.8905197406541119, 0.16731522843849514, 0.3147948930907352),\n (-2.2871289950009728, 1.398560959638718, 1.1233863494505283),\n (-1.5514369216953363, 2.6559008371480006, 0.6687883285931627),\n (-1.97578532983754, 3.0584350426221696, -0.6271056229665328),\n (-1.9649535743321451, -0.45216201833697967, -1.5265252922322194),\n (-2.469954748031858, -0.7010050003658949, 0.6439649209501082),\n (-0.8252433635746496, -0.052453567250684806, 0.43760424203717113),\n (-3.36502756891988, 1.5624906595612045, 1.0051865026931144),\n (-2.0814509723064316, 1.2139856682154606, 2.18312358675434),\n (-0.4688204416342118, 2.495542838358611, 0.6472342384223745),\n (-1.759053255113454, 3.4819317812208626, 1.3557210162758644),\n (-2.0785703072969466, 2.2346795710060765, -1.151280970188824))}\n self.assertTrue(converter.compare_confs(occco_1, occco_4))\n self.assertAlmostEqual(converter.compare_confs(occco_1, occco_4, rmsd_score=True), 0.0, places=4)\n\n occco_5 = {'symbols': ('O', 'C', 'C', 'C', 'O', 'H', 'H', 'H', 'H', 'H', 'H', 'H', 'H'),\n 'isotopes': (16, 12, 12, 12, 16, 1, 1, 1, 1, 1, 1, 1, 1),\n 'coords': (\n (-2.4583263702786398, 0.0718285271168676, -1.073208642860504),\n (-1.937015981064579, 0.0890930305098165, 0.25179580507902494),\n (-2.348398726927575, 1.376978495961958, 0.9583384372437844),\n (-1.8770165015484177, 2.6245423907540975, 0.21659228399276115),\n (-2.569162634647042, 2.781228761671768, -1.015409941988472),\n (-2.2457141392131477, -0.8022675472463131, -1.4481857725877),\n (-2.3295554159352525, -0.7779156570908263, 0.792431073403323),\n (-0.8469088168553344, 0.006439213644634347, 0.19675913068823528),\n (-3.4425715658912557, 1.4040720104173179, 1.0273387203822888),\n (-1.939605978646692, 1.375711847707625, 1.9743606178959265),\n (-0.8029097001610058, 2.5806396078360736, 0.010183430117069694),\n (-2.07095189274604, 3.516416462694289, 0.8201309204066712),\n (-2.650256378769789, 1.8823222470150054, -1.4017891959903757))}\n self.assertTrue(converter.compare_confs(occco_1, occco_5))\n self.assertAlmostEqual(converter.compare_confs(occco_1, occco_5, rmsd_score=True), 0.0, places=4)", "def check_pairs(self, all_pr, curr):\n flag = True\n for pair_ox in all_pr:\n if (curr[0] == pair_ox or curr[1] == pair_ox):\n flag = False\n return flag", "def converged(self):\n if len(self.rundir) >= 2:\n if io.ionic_steps(self.rundir[-1]) <= 3:\n return True\n if self.settings[\"nrg_convergence\"] != None:\n if io.job_complete(self.rundir[-1]) and io.job_complete(self.rundir[-2]):\n o1 = io.Oszicar(os.path.join(self.rundir[-1],\"OSZICAR\"))\n o2 = io.Oszicar(os.path.join(self.rundir[-2],\"OSZICAR\"))\n if abs( o1.E[-1] - o2.E[-1]) < self.settings[\"nrg_convergence\"]:\n return True\n\n return False", "def new_convergence_function(previous_variables, new_variables, conv_ctr, conv_ctr_cap=20):\n for pre, new in zip(previous_variables, new_variables):\n dif1 = np.abs(new) - 0.9 * np.abs(pre)\n dif2 = 1.1 * np.abs(pre) - np.abs(new)\n if not (dif1 > 0).all() or not (dif2 > 0).all():\n return 0, False\n return conv_ctr + 1, conv_ctr + 1 > conv_ctr_cap", "def comp_edge(_P, P): # Used in scan_P_().\n _x0 = _P['x0']\n _xn = _x0 + _P['L']\n x0 = P['x0']\n xn = x0 + P['L']\n\n if _xn < xn: # End-point relative position.\n return True, x0 < _xn # Overlap.\n else:\n return False, _x0 < xn", "def coplanar_points_are_on_same_side_of_line(a, b, p1, p2):\n check_shape_any(a, (3,), (-1, 3), name=\"a\")\n vg.shape.check(locals(), \"b\", a.shape)\n vg.shape.check(locals(), \"p1\", a.shape)\n vg.shape.check(locals(), \"p2\", a.shape)\n\n # Uses \"same-side technique\" from http://blackpawn.com/texts/pointinpoly/default.html\n along_line = b - a\n return vg.dot(vg.cross(along_line, p1 - a), vg.cross(along_line, p2 - a)) >= 0", "def all_equal(x, y, eps=None):\n if eps:\n return all([abs(i - j) <= eps\n for i, j in zip(x, y)\n if i is not None and j is not None])\n return all([i == j for i, j in zip(x, y)])", "def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])", "def next_chain_link(x, y):\n\n gamma = np.random.rand()\n alpha = w(y)/w(x)\n\n return alpha >= gamma", "def next_chain_link(x, y):\n\n gamma = np.random.rand()\n alpha = w(y)/w(x)\n\n return alpha >= gamma", "def almost_equal(x, y):\n return abs(x-y) < FP_PREC", "def compare(self):\n same = self.eta()[0] and self.omega()[0] and self.data()[0]\n return same", "def parallel(self, L):\n return self.slope() == L.slope()", "def test_gele_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 < 50\n assert c2.df.iloc[0][0]\n\n c2 = c1 > 50\n assert c2.df.iloc[-1][0]", "def converged(old, new):\n # https://github.com/amirgholami/PyHessian/commit/0f7e0f63a0f132998608013351ba19955fc9d861#diff-ba06409ffbc677fe556485172e62649fe7a069631390f5a780766bff3289b06bR149-R150 # noqa: B950\n return (old - new).abs() / (old.abs() + 1e-6) < tol", "def test_coherence():\r\n\r\n for method in methods:\r\n f, c = tsa.coherence(tseries, csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])\r\n npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))", "def testcomparefrac ( self ):\r\n\t\tfor fracTup1, fracTup2, result in self.knownCmpValues:\r\n\t\t\tfrac1 = eval ( 'frac.'+fracTup1 )\r\n\t\t\tfrac2 = eval ( 'frac.'+fracTup2 )\r\n\t\t\tif result != None:\t\t\t\t\t\r\n\t\t\t\tself.assertEqual ( frac1.compare ( frac2 )[0], result )\r\n\t\t\telse:\r\n\t\t\t\tself.assertEqual ( frac1.compare ( frac2 ), result )", "def test_get_band_pass_contingency_one_time(self):\n\n (\n this_num_true_positives, this_num_false_positives,\n this_num_false_negatives, this_num_true_negatives\n ) = learning_curves._get_band_pass_contingency_one_time(\n actual_target_matrix=ACTUAL_TARGET_MATRIX,\n probability_matrix=PROBABILITY_MATRIX,\n eval_mask_matrix=MASK_MATRIX\n )\n\n self.assertTrue(numpy.isclose(\n this_num_true_positives, FOURIER_NUM_TRUE_POSITIVES, atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_num_false_positives, FOURIER_NUM_FALSE_POSITIVES,\n atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_num_false_negatives, FOURIER_NUM_FALSE_NEGATIVES,\n atol=TOLERANCE\n ))\n self.assertTrue(numpy.isclose(\n this_num_true_negatives, FOURIER_NUM_TRUE_NEGATIVES, atol=TOLERANCE\n ))", "def test_four_stocks_two_optimal_paths(self):\n stock_prices_first_order = np.array([\n [10, 15, 20, 15, 10], \n [5, 15, 10, 15, 20], \n [30, 25, 20, 25, 30], \n [10, 15, 30, 40, 50],], dtype=float) \n\n stock_prices_second_order = np.array([\n [10, 15, 30, 40, 50], \n [10, 15, 20, 15, 10], \n [5, 15, 10, 15, 20], \n [30, 25, 20, 25, 30],], dtype=float)\n \n test_case_first_order = StockMarket(5, stock_prices_first_order, 1.0)\n test_case_second_order = StockMarket(5, stock_prices_second_order, 1.0)\n\n test_case_first_order.dynamic_programming_bottom_up()\n test_case_second_order.dynamic_programming_bottom_up()\n\n last_day = len(stock_prices_second_order[0])-1 \n self.assertEqual(test_case_first_order.max_gain_on_day(last_day), test_case_second_order.max_gain_on_day(last_day))\n self.assertNotEqual(test_case_first_order.backtracing_portfolio, test_case_second_order.backtracing_portfolio)", "def _chain_equal(a,b):\n for a_part, b_part in zip(a.parts, b.parts):\n for a_seg, b_seg in zip(a_part, b_part):\n if not np.array_equal(a_seg, b_seg):\n return False\n return True", "def is_solvable(self) -> bool:\r\n inv_count = 0\r\n arr = self.current_state.flatten()\r\n for i in range(0, 9):\r\n for j in range(i + 1, 9):\r\n if arr[j] and arr[i] and arr[i] > arr[j]:\r\n inv_count += 1\r\n return inv_count % 2 == 0", "def are_equal(self, sp1, sp2):\n for s1 in sp1.keys():\n spin1 = getattr(s1, \"spin\", 0)\n oxi1 = getattr(s1, \"oxi_state\", 0)\n for s2 in sp2.keys():\n spin2 = getattr(s2, \"spin\", 0)\n oxi2 = getattr(s2, \"oxi_state\", 0)\n if (s1.symbol == s2.symbol and oxi1 == oxi2 and\n spin2 == -spin1):\n break\n else:\n return False\n return True", "def allCrossing(forms):\n if len(forms) == 1: forms = forms[0]\n l = len(forms)\n for i in range(l):\n for j in range(i + 1, l):\n if not forms[i].crossForm(forms[j]):\n return False\n return True", "def is_stable(p1, p2, p3, tol=0.001):\n p = Point(0, 0, 0)\n u = vector_from_to(p1, p2)\n v = vector_from_to(p1, p3)\n n = cross(u, v)\n w = vector_from_to(p1, p)\n n2 = dot(n, n)\n beta = dot(cross(u, w), n) / n2\n gamma = dot(cross(w, v), n) / n2\n alpha = 1 - gamma - beta\n # then coordinate of the projected point (p_) of point p\n # p_ = alpha * p1 + beta * p2 + gamma * p3\n min_val = -tol\n max_val = 1 + tol\n cond1 = min_val <= alpha <= max_val\n cond2 = min_val <= beta <= max_val\n cond3 = min_val <= gamma <= max_val\n return cond1 and cond2 and cond3", "def _compare_vector(arr1, arr2):\n\n length = len(arr1)\n if len(arr2) != length:\n return False\n\n for i in range(length):\n element_1 = float(arr1[i])\n element_2 = float(arr2[i])\n\n\n diff = abs(abs(element_1) - abs(element_2))\n if diff != 0.0:\n rel = diff / min(abs(element_1), abs(element_2))\n \n # For a basis set, a relatively coarse comparison\n # should be acceptible\n if rel > 1.0e-10:\n return False\n\n return True", "def point_isclose(a, b, *args, **kwargs):\n for x, y in zip(a, b):\n if not isclose(x, y, *args, **kwargs):\n return False\n return True", "def is_converged(clusters1, clusters2, k, num_of_cords):\r\n for i in range(k):\r\n for j in range(num_of_cords):\r\n if clusters1[i][j] != clusters2[i][j]:\r\n return False\r\n return True", "def part1b_0():\n xs = exampleInput\n _, forward = submission.computeForward(simpleCRF, xs)\n for i in xrange(len(xs)):\n grader.requireIsEqual( 1.0, sum( forward[i].values() ) )", "def has_next(self) -> bool:\n return (self._high - self._low) > self._tol", "def _test_if_cartesian_coordinates_const(self, step_index):\n steps = self.program.steps\n startStep = steps[step_index]\n playbackFrame = startStep.playback_frames[-1]\n refFramePose = get_frame_pose(startStep, playbackFrame)\n endStep = steps[step_index + 1]\n for index, playbackFrame in enumerate(endStep.playback_frames):\n framePose = get_frame_pose(endStep, playbackFrame)\n delta = 1e-06\n for i in range(3):\n msg = f\"Step {endStep.name} playback frame {index},{i} has not the same position. refFrame{refFramePose[:3]}, step frame {framePose[:3]}\"\n self.assertAlmostEqual(framePose[i], refFramePose[i], msg=msg, delta=delta)" ]
[ "0.6867624", "0.65761954", "0.60859615", "0.60859615", "0.6069512", "0.5920149", "0.5899444", "0.58113146", "0.5743151", "0.5739253", "0.5737969", "0.57277334", "0.5719145", "0.570918", "0.5702754", "0.5661689", "0.5660275", "0.5631335", "0.5627203", "0.5584733", "0.55798703", "0.5552405", "0.55463606", "0.5544422", "0.5531419", "0.55269027", "0.5524913", "0.5511427", "0.5511427", "0.5511427", "0.55002296", "0.5495753", "0.54859555", "0.5481982", "0.5474404", "0.5468211", "0.5468116", "0.5462931", "0.5462409", "0.54571736", "0.5450319", "0.54261035", "0.54258525", "0.54180545", "0.54005545", "0.5395583", "0.5389517", "0.5388669", "0.53854156", "0.538492", "0.53709304", "0.53709304", "0.53709304", "0.5370761", "0.53646964", "0.53635204", "0.5352692", "0.53521883", "0.5349754", "0.5340991", "0.53390586", "0.5329344", "0.5321495", "0.5318741", "0.5318105", "0.53172904", "0.5316199", "0.53159046", "0.5310924", "0.5307782", "0.5300116", "0.5299591", "0.5297531", "0.5295911", "0.52914995", "0.5289234", "0.52890104", "0.5287549", "0.5286718", "0.5286718", "0.5279552", "0.5273422", "0.5263256", "0.52625763", "0.52616674", "0.5259697", "0.5249805", "0.5249489", "0.52388394", "0.5236634", "0.5233904", "0.5231214", "0.5224129", "0.5221641", "0.5220341", "0.5213372", "0.521327", "0.5209934", "0.5200025", "0.5196792" ]
0.67138547
1
Verifies that the adjacent slopes between points are within specified tolerance of one another. Note that assert_collinear assumes ordered points; three actually collinear points passed with the middle point as the first or last argument will return `False`
def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool: if len(points) < 3: raise ValueError("CurveChecker.assert_collinear() must be called with at least three points") thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])] for t0, t1 in zip(thetas, thetas[1:]): if abs(t0 - t1) > tolerance: return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def test_endpoint_slope(b,c,d,x_n_minus_1,x_n,expected_slope):\n\tactual_slope = b + 2*c*(x_n-x_n_minus_1) + 3*d*(x_n-x_n_minus_1)**2\n\tresult = abs(actual_slope-expected_slope)<0.001\n\treturn(result)", "def collinear(a:tuple, b:tuple, c:tuple)->bool:\n return ((b[1] - c[1]) * (a[0] - b[0])) == ((a[1] - b[1]) * (b[0] - c[0]))", "def collinear(cls, *vectors, e=10e-10):\n l = len(vectors)\n if l == 2:\n v1 = vectors[0]\n v2 = vectors[1]\n return abs(v1.x * v2.y - v1.y - v2.x) < e\n else:\n for i in range(l):\n for j in range(i + 1, l):\n if not cls.collinear(vectors[i], vectors[j]):\n return False\n return True", "def test_b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients,expected_slope):\n\tB = b_coefficients(x1,x2,x3,y1,y2,y3,CCoefficients,DCoefficients)\n\tresult = abs(B[0]-expected_slope)< 0.001\n\treturn(result)\n\tassert B[0]==expected_slope, \"First b coefficient (%f) does not equal initial slope (%f).\" (B[0],expected_slope)", "def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta", "def assert_data_with_normal_vector_has_slope(nvect, expected_slope):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood(nvect)\n extractor = EigenValueVectorizeFeatureExtractor()\n slope = extractor.extract(pc, neighborhood, None, None, None)[6]\n np.testing.assert_allclose(slope, expected_slope, atol=1e-6)", "def assert_differentiable(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_differentiable() cannot be called on an empty list\")\n\n if not assert_continuous(*curves):\n return False\n\n for curve0, curve1 in zip(curves, curves[1:]):\n if not assert_collinear(curve0.c1, curve1.p0, curve1.c0):\n return False\n return True", "def checkStraightLine(coordinates: List[List[int]]) -> bool:\n\t# initializing our comparison slope value\n\tnum = coordinates[1][1] - coordinates[0][1]\n\tden = coordinates[1][0] - coordinates[0][0]\n\tif den == 0:\n\t\tslope = math.inf\n\telse:\n\t\tslope = num / den\n\n\t# checking the initial slope against all other slopes\n\tslope_check = 0\n\tfor i in range(2, len(coordinates)):\n\t\tnum = coordinates[i][1] - coordinates[i-1][1]\n\t\tden = coordinates[i][0] - coordinates[i-1][0]\n\t\tif den == 0:\n\t\t\tslope_check = math.inf\n\t\telse:\n\t\t\tslope_check = num/den\n\n\t\tif slope_check != slope:\n\t\t\treturn False\n\n\treturn True", "def collinear(a1, b1, a2, b2, a3, b3):\n a = x1 * (b2 - b3) + a2 * (b3 - b1) + a3 * (b1 - b2)\n \n if (a == 0):\n print \"Yes\"\n else:\n print \"No\"", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def linear_constraint(u, Lin_lhs, Lin_rhs, tol = 0.05):\n return Lin_lhs.dot(u) <= Lin_rhs", "def approx_eq(x, y, tolerance=1e-15):\n return abs(x - y) < tolerance", "def _check_convergence(current_position,\n next_position,\n current_objective,\n next_objective,\n next_gradient,\n grad_tolerance,\n f_relative_tolerance,\n x_tolerance):\n grad_converged = _check_within_tolerance(next_gradient, grad_tolerance)\n x_converged = _check_within_tolerance(next_position - current_position,\n x_tolerance)\n f_converged = _check_within_tolerance(\n next_objective - current_objective,\n f_relative_tolerance * current_objective)\n return grad_converged | x_converged | f_converged", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def approx_eq(x, y, tolerance = 0.000001):\n\treturn abs(x - y) < tolerance", "def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)", "def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])", "def is_coplanar(points, tol=0.01):\n tol2 = tol ** 2\n if len(points) == 4:\n v01 = subtract_vectors(points[1], points[0])\n v02 = subtract_vectors(points[2], points[0])\n v23 = subtract_vectors(points[3], points[0])\n res = dot_vectors(v02, cross_vectors(v01, v23))\n return res**2 < tol2\n # len(points) > 4\n # compare length of cross product vector to tolerance\n u = subtract_vectors(points[1], points[0])\n v = subtract_vectors(points[2], points[1])\n w = cross_vectors(u, v)\n for i in range(1, len(points) - 2):\n u = v\n v = subtract_vectors(points[i + 2], points[i + 1])\n wuv = cross_vectors(w, cross_vectors(u, v))\n if wuv[0]**2 > tol2 or wuv[1]**2 > tol2 or wuv[2]**2 > tol2:\n return False\n return True", "def contains(self, points, abs_tol=ABS_TOL):\n test = self.A.dot(points) - self.b[:, np.newaxis] < abs_tol\n return np.all(test, axis=0)", "def _check_approx_fixed_point(V_current, V_previous, tol):\n\n # Compute the sup norm between `V_current` and `V_previous`\n sup_norm = np.max(np.abs(V_current - V_previous))\n\n # Algorithm termination condition\n fp = sup_norm <= tol\n\n return fp, sup_norm", "def _allclose(x, y, rtol=1e-7, atol=1e-14):\n for a, b in zip(x, y):\n if np.abs(a - b) > (atol + rtol * np.abs(b)):\n return False\n return True", "def _raise_assert_on_np_is_close_all(self, np0, np1):\r\n\r\n return self.assertTrue(np.isclose(np0, np1).all())", "def test_lfc_and_el_below_lcl():\n dewpoint = [264.5351, 261.13443, 259.0122, 252.30063, 248.58017, 242.66582] * units.kelvin\n temperature = [273.09723, 268.40173, 263.56207, 260.257, 256.63538,\n 252.91345] * units.kelvin\n pressure = [1017.16, 950, 900, 850, 800, 750] * units.hPa\n el_pressure, el_temperature = el(pressure, temperature, dewpoint)\n lfc_pressure, lfc_temperature = lfc(pressure, temperature, dewpoint)\n assert_nan(lfc_pressure, pressure.units)\n assert_nan(lfc_temperature, temperature.units)\n assert_nan(el_pressure, pressure.units)\n assert_nan(el_temperature, temperature.units)", "def within_tolerance(x, y, tolerance): \r\n return abs(x) <= tolerance and abs(y) <= tolerance", "def get_overland_vector(catchpoints, closest, tol = 0.1, min_slope = 0.00001):\n\n length = get_distance_vector(catchpoints, closest)\n slope = (catchpoints[:,2] - closest[:,2]) / length / 100000\n\n for l, s in zip(length, slope):\n if l < tol: l, s = tol, min_slope\n\n return length / 2., slope", "def test_xy(self):\n x = np.array([[1,3], [2,8], [1,3]])\n y = np.array([1,1,-1])\n lro = LogisticRegressionOptimiser(x,y)\n expected = np.array([[1,3], [2,8], [-1,-3]])\n for i in 0,1,2:\n for j in 0,1:\n self.assertEqual(lro.xy[i][j], expected[i][j])", "def positive_slope(line:tuple)->bool:\n return line[0][1] < line[1][1] == line[0][0] < line[1][0]", "def test_linear():\n import nose.tools as nt\n A = -0.11; B = -0.13; g = 9.81; m = 50.; T = 10.; dt = 0.01;\n Cd = 1.2; rho = 1.0; A = 0.5;\n a = Cd*rho*A/(2.*m)\n def exact(t):\n return A*t+B\n\n def src(t):\n return m*g + m*a*abs(exact(t-dt/2.))*exact(t+dt/2.) + m*A\n \n v, t = solver(T, dt, B, Cd, rho, A, m, Source=src)\n ve = exact(t)\n diff = abs(ve - v)\n nt.assert_almost_equal(diff.max(), 0, delta=1e-12)", "def are_linear(self,\n angle_tol: Angle = Angle(1, units='deg')) -> bool:\n if len(self) < 2: # Must have at least 2 atoms colinear\n return False\n\n if len(self) == 2: # Two atoms must be linear\n return True\n\n tol = np.abs(1.0 - np.cos(angle_tol.to('rad')))\n\n # Take the normalised first vector\n vec0 = self[1].coord - self[0].coord\n vec0 /= np.linalg.norm(vec0)\n\n for atom in self[2:]:\n vec = atom.coord - self[0].coord\n cos_theta = np.dot(vec, vec0) / np.linalg.norm(vec)\n\n # Both e.g. <179° and >1° should satisfy this condition for\n # angle_tol = 1°\n if np.abs(np.abs(cos_theta) - 1) > tol:\n return False\n\n return True", "def test_lcl_convergence_issue():\n pressure = np.array([990, 973, 931, 925, 905]) * units.hPa\n temperature = np.array([14.4, 14.2, 13, 12.6, 11.4]) * units.degC\n dewpoint = np.array([14.4, 11.7, 8.2, 7.8, 7.6]) * units.degC\n lcl_pressure, _ = lcl(pressure[0], temperature[0], dewpoint[0])\n assert_almost_equal(lcl_pressure, 990 * units.hPa, 0)", "def test_single_linear_regression_coefficients(single_linear_regression_model):\n print(single_linear_regression_model)\n expected_coefficients = [(0, 151.27), (1, 303.90)]\n no_of_betas = len(single_linear_regression_model.B)\n for n in range(no_of_betas):\n assert single_linear_regression_model.B[n] == pytest.approx(\n expected_coefficients[n][1], 0.001\n )", "def point_isclose(a, b, *args, **kwargs):\n for x, y in zip(a, b):\n if not isclose(x, y, *args, **kwargs):\n return False\n return True", "def semi_plan_check(coords_list, normal_plane, point_on_plane, tol=1e-8):\n center_to_coords = coords_list - \\\n np.repeat(point_on_plane.reshape((-1,3)), len(coords_list), axis=0)\n normal_plane = \\\n np.repeat(normal_plane.reshape((-1,3)), len(coords_list), axis=0)\n inner_product = np.sum(center_to_coords*normal_plane,axis=1)\n flag = np.zeros(inner_product.shape, dtype=bool)\n flag[inner_product >= 0] = True\n return flag", "def equalWithinTolerance(a, b, tol):\n return abs(a - b) <= tol", "def test_distances(self):\n\n cent_1 = np.array([0.5, 0.5])\n verts_1 = np.array([[0., 1.], [0., 0.], [1., 0.], [1., 1.]])\n cent_2 = cent_1 - 0.5\n verts_2 = verts_1 - np.array([0.5, 0.5])\n\n # Compare the center-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.cvdist(verts_1, cent_1) == po.cvdist(verts_2, cent_2)))\n # Compare the vertex-vertex distances between point sets with rigidly shifted coordinates\n self.assertTrue(all(po.vvdist(verts_1) == po.vvdist(verts_2)))", "def are_close(coord1, coord2, tolerance=10):\n return vincenty(coord1, coord2).meters < tolerance", "def _check_polyline(x_coords_metres, y_coords_metres):\n\n error_checking.assert_is_numpy_array_without_nan(x_coords_metres)\n error_checking.assert_is_numpy_array(x_coords_metres, num_dimensions=1)\n num_vertices = len(x_coords_metres)\n\n error_checking.assert_is_numpy_array_without_nan(y_coords_metres)\n error_checking.assert_is_numpy_array(\n y_coords_metres, exact_dimensions=numpy.array([num_vertices]))", "def is_on(a, b, c):\r\n return(isCollinear(a, b, c) and (within(a[0], c[0], b[0]) if a[0] != b[0] else\r\n within(a[1], c[1], b[1])))", "def assert_compare(x, y, atol=1e-5, method='ALL'):\n mae = 0\n mse = 0\n rmse = 0\n result = 0\n if method == 'MAE':\n mae = np.abs(x-y).mean()\n result = mae\n elif method == 'RMSE':\n rmse = np.sqrt(np.square(x - y).mean())\n result = rmse\n #result=np.sqrt(((x - y) ** 2).mean())\n elif method == 'MSE':\n mse = np.square(x - y).mean()\n result = mse\n #result=((x - y) ** 2).mean()\n else:\n mae = np.abs(x-y).mean()\n rmse = np.sqrt(np.square(x - y).mean())\n mse = np.square(x - y).mean()\n\n if result > atol or (method == 'ALL' and (mae > atol or rmse > atol or mse > atol)):\n f = six.StringIO()\n f.write(\n 'assert_compare failed: \\n' +\n ' atol: {} \\n'.format(atol) +\n ' method: {}\\n'.format(method) +\n ' MAE: {}\\n'.format(mae) +\n ' MSE: {}\\n'.format(mse) +\n ' RMSE: {}\\n'.format(rmse) +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n logging.warning(f.getvalue())\n return False\n else:\n return True", "def equality_constrained_linear_least_squares(A, B, y, z):\n return lapack.dgglse(A, B, y, z)[3]", "def is_convex(reg, abs_tol=ABS_TOL):\n if not is_fulldim(reg):\n return True\n if len(reg) == 0:\n return True\n outer = envelope(reg)\n if is_empty(outer):\n # Probably because input polytopes were so small and ugly..\n return False, None\n Pl, Pu = reg.bounding_box\n Ol, Ou = outer.bounding_box\n bboxP = np.hstack([Pl, Pu])\n bboxO = np.hstack([Ol, Ou])\n if (\n sum(abs(bboxP[:, 0] - bboxO[:, 0]) > abs_tol) > 0 or\n sum(abs(bboxP[:, 1] - bboxO[:, 1]) > abs_tol) > 0):\n return False, None\n if is_fulldim(outer.diff(reg)):\n return False, None\n else:\n return True, outer", "def test_lineclip():\n # %% LOWER to UPPER test\n x1, y1, x2, y2 = plc.cohensutherland(1, 5, 4, 3,\n 0, 0, 4, 6)\n\n assert [x1, y1, x2, y2] == approx([2, 3, 3.3333333333333, 5])\n # %% no intersection test\n x1, y1, x2, y2 = plc.cohensutherland(1, 5, 4, 3,\n 0, 0.1, 0, 0.1)\n\n assert x1 is None and y1 is None and x2 is None and y2 is None\n # %% left to right test\n x1, y1, x2, y2 = plc.cohensutherland(1, 5, 4, 3,\n 0, 4, 5, 4)\n\n assert [x1, y1, x2, y2] == [1, 4, 4, 4]", "def perfect_collinearity_test(X, min_rows=\"infer\", max_rows=None):\n # Sets the minimum number of rows to start with.\n if min_rows == \"infer\":\n rows_to_use = 2*X.shape[1]\n if rows_to_use > X.shape[0]:\n rows_to_use = X.shape[0]\n else:\n rows_to_use = min_rows\n \n # Sets the maximum number of rows to use.\n if max_rows is None:\n max_rows = X.shape[0]\n \n columns_in_dataframe = X.columns\n \n # Template for printing even columns\n template = \"{0:%s}{1:13}{2:16}\" % len(max(X.columns, key=lambda x: len(x)))\n \n # Series to save results\n results = pd.Series()\n \n # Runs a regression of every x against all other X variables.\n # Starts with a small dataset and if R^2 == 1, doubles the size\n # of the dataset until greater than max_rows.\n for temp_y_variable in columns_in_dataframe:\n rows_to_use_base = rows_to_use\n while True:\n X_master = X[:rows_to_use_base]\n temp_X_variables = [col for col in columns_in_dataframe if col != temp_y_variable]\n y_temp = X_master[temp_y_variable]\n X_temp = X_master[temp_X_variables]\n lin_model = LinearRegression()\n lin_model.fit(X_temp, y_temp)\n R_2 = lin_model.score(X_temp, y_temp)\n if R_2 != 1 and R_2 >= 0 or rows_to_use_base >= max_rows:\n if R_2 == 1:\n print(\"\")\n print(temp_y_variable + \": PERFECT COLLINEARITY ********\")\n temp_series = pd.Series(lin_model.coef_, index=temp_X_variables)\n print(list(temp_series[temp_series.round(9) != 0].index))\n print(\"\")\n else:\n print(template.format(temp_y_variable, \" VIF = \" + str(round((1.0/(1.0-R_2)),1)), \"R^2 = \" + str(round(R_2,4))))\n results[temp_y_variable] = R_2\n break\n rows_to_use_base += rows_to_use_base\n if rows_to_use_base > X.shape[0]:\n rows_to_use_base = X.shape[0]\n return results", "def is_point_on_polyline(point, polyline, tol=0.0):\n for i in xrange(len(polyline) - 1):\n a = polyline[i]\n b = polyline[i + 1]\n c = closest_point_on_segment(point, (a, b))\n if distance_point_point(point, c) <= tol:\n return True\n return False", "def within_tolerance(a_vec, b_vec, tol_vec):\n\tfor a, b, tol in zip(a_vec, b_vec, tol_vec):\n\t\tif abs(a - b) > tol:\n\t\t\treturn False\n\treturn True", "def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])", "def assert_matches_mapping(\n *,\n source_corners: np.ndarray,\n points: np.ndarray,\n linear_transform: Callable[[np.ndarray], np.ndarray],\n atol: float = 1e-8,\n rtol: float = 1e-8\n):\n dest_corners = linear_transform(source_corners)\n\n actual_points = transform_corners(\n points, source_corners=source_corners, dest_corners=dest_corners\n )\n\n desired_points = linear_transform(points)\n\n assert_allclose(actual=actual_points, desired=desired_points, atol=atol, rtol=rtol)", "def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0", "def is_point_on_line(point, line, tol=0.0):\n d = distance_point_line(point, line)\n return d <= tol", "def nearlyEqual(self, x, y):\n return abs(x-y) < self.absoluteerrorrange", "def is_on_line(point_a, point_b, point_c):\r\n return (point_b[0] - point_a[0]) * (point_c[1] - point_a[1]) - (point_b[1] - point_a[1]) * (point_c[0] - point_a[0])", "def test_lcl_grid_surface_lcls():\n pressure = np.array([1000, 990, 1010]) * units.hPa\n temperature = np.array([15, 14, 13]) * units.degC\n dewpoint = np.array([15, 10, 13]) * units.degC\n lcl_pressure, lcl_temperature = lcl(pressure, temperature, dewpoint)\n pres_truth = np.array([1000, 932.1719, 1010]) * units.hPa\n temp_truth = np.array([15, 9.10424, 13]) * units.degC\n assert_array_almost_equal(lcl_pressure, pres_truth, 4)\n assert_array_almost_equal(lcl_temperature, temp_truth, 4)", "def test_validate_coord(c, ans):\n result = _validate_coord(c)\n np.testing.assert_allclose(result, ans)", "def test_score(coefs, intercept, method):\n X, y = _create_dataset(coefs, intercept, noise=1.0)\n lad = LADRegression(method=method)\n lad.fit(X, y)\n assert lad.score(X, y) > 0.9", "def test_vector_fails(self, a, b, rtol, atol):\n with pytest.raises(AssertionError):\n self.func(a, b, rtol=rtol, atol=atol)", "def at_loc((x, y), (cx, cy), eps=0.000035):\n\treturn (x - cx)**2 + (y - cy)**2 <= eps**2", "def zpe_check(test_inst, zpe):\n for z_row in zpe:\n corr, coord = float(z_row[CORR_KEY]), float(z_row[COORD_KEY])\n if corr == 0:\n test_inst.assertAlmostEqual(6.0, coord)\n else:\n test_inst.assertTrue(corr < 0.0 or math.isinf(corr))", "def check_points(nodeL, nodeR, points, city):\n A = points\n B = city\n C = nodeL\n D = nodeR\n\n d1 = (B[0] - A[0]) * (C[1] - A[1]) - (B[1] - A[1]) * (C[0] - A[0])\n d2 = (B[0] - A[0]) * (D[1] - A[1]) - (B[1] - A[1]) * (D[0] - A[0])\n\n if (d1 < 0) & (d2 < 0):\n return True\n if (d1 > 0) & (d2 > 0):\n return True\n else:\n return False", "def test_critic_abspearson_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"CRITIC\", \"AbsPearson\")\n expected_w_vector = np.array(\n [0.50000000, 0.25000000, 0.25000000],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def test_are_vertices_adjacent_last_before_first(self):\n\n self.assertTrue(skeleton_lines._are_vertices_adjacent(\n vertex_indices=VERTEX_INDICES_ADJACENT_WRAPAROUND[::-1],\n num_vertices_in_polygon=NUM_VERTICES_FOR_ADJACENCY_TEST))", "def test_vector(self, a, b, rtol, atol, expected):\n assert np.all(self.func(a, b, rtol=rtol, atol=atol) == expected)", "def allclose_anyaware(x, y, rtol=1e-5, atol=1e-8):\n if not SWITCH_ON or has_tensor([x, y]):\n return znp.all(znp.less_equal(znp.abs(x - y), znp.abs(y) * rtol + atol))\n else:\n x = np.array(x)\n y = np.array(y)\n if any(ar.dtype == object for ar in (x, y)):\n from zfit.core.space import LimitRangeDefinition\n\n equal = []\n for x1, y1 in zip(x[0], y[0]):\n if isinstance(x1, LimitRangeDefinition) or isinstance(\n y1, LimitRangeDefinition\n ):\n equal.append(x1 < y1 or x1 > y1)\n else:\n equal.append(np.allclose(x1, y1, rtol=rtol, atol=atol))\n allclose = np.array(equal)[None, :]\n else:\n allclose = np.allclose(x, y, rtol=rtol, atol=atol)\n\n return allclose", "def test_defect_calculation():\n slope1, slope2 = 2., 3.\n step1, step2 = Fraction(5), Fraction(7)\n cosim = ramp_cosimulation(slope1, slope2, step1, step2)\n t_end = Fraction(20)\n defect = cs.evaluate(cosim, t_end)\n\n alpha = Fraction(int(lcm(step1.numerator, step2.numerator)),\n int(gcd(step1.denominator, step2.denominator)))\n num1, num2 = tuple(map(int, [alpha / step for step in (step1, step2)]))\n big = max(num1, num2) + 1\n small = min(num1, num2) - 1\n assert defect.connection['Ramp1', 'u'] > small * slope2 * step2\n assert defect.connection['Ramp1', 'u'] < big * slope2 * step2\n assert defect.connection['Ramp2', 'u'] > small * slope1 * step1\n assert defect.connection['Ramp2', 'u'] < big * slope1 * step1\n\n assert defect.output['Ramp1', 'y'] == pytest.approx(slope1 * step1)\n assert defect.output['Ramp2', 'y'] == pytest.approx(slope2 * step2)", "def validate_normalize_approximately(_vector, x, y, z):\n epsilon = 0.00001\n under_test = _vector.norm\n assert x - epsilon <= under_test.x <= x + epsilon\n assert y - epsilon <= under_test.y <= y + epsilon\n assert z - epsilon <= under_test.z <= z + epsilon", "def test_coefs_and_intercept__no_noise_regularization(coefs, intercept):\n X, y = _create_dataset(coefs, intercept)\n\n lads = [LADRegression(alpha=alpha, l1_ratio=0.).fit(X, y) for alpha in range(3)]\n coef_size = np.array([np.sum(lad.coef_ ** 2) for lad in lads])\n\n for i in range(2):\n assert coef_size[i] >= coef_size[i + 1]", "def test_straight_line(self):\n test_x = np.linspace(0, 9, 10)\n test_y = np.linspace(0, 18, 10)\n result_y = utils.straight_line(test_x, 2, 0)\n assert_almost_equal(result_y, test_y)", "def validate_clockwise_points(points):\n \n if len(points) != 8:\n raise Exception(\"Points list not valid.\" + str(len(points)))\n \n point = [\n [int(points[0]) , int(points[1])],\n [int(points[2]) , int(points[3])],\n [int(points[4]) , int(points[5])],\n [int(points[6]) , int(points[7])]\n ]\n edge = [\n ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),\n ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),\n ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),\n ( point[0][0] - point[3][0])*( point[0][1] + point[3][1])\n ]\n \n summatory = edge[0] + edge[1] + edge[2] + edge[3];\n return summatory <= 0", "def isinsidelineXY(l,p):\n\n return linePointXY(l,p,distance=True) < epsilon", "def check_evaluation_points(x, y):\n assert x.ndim == y.ndim == 1\n assert x.shape == y.shape\n assert x.dtype == y.dtype == np.float64", "def compare3float_relative(x_base, y_check, z_intersection, relative_error):\n return compare2float_relative(x_base, y_check, relative_error) and \\\n compare2float_relative(x_base, z_intersection, relative_error) and \\\n compare2float_relative(y_check, z_intersection, relative_error)", "def test_failure_and_non_convergence(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n # Choose a bad initial position.\n initial_values = np.zeros(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(objective_and_gradient, initial_values)\n _, converged, failed = self.evaluate(roots)\n\n # Reference values - we should not have converged and should have failed.\n converged_bench = np.array([False, False, False])\n failed_bench = np.array([True, True, True])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)", "def _point_almost_equal(a,b, rtol=RTOL, atol=ATOL):\n return np.allclose(a._Point__loc, b._Point__loc,\n rtol=rtol, atol=atol)", "def testSetSlopeWithNegativeFloat(self):\n def setSlope():\n self.node.slope = -20.1\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.slope\n )", "def test_coherence_linear_dependence():\r\n t = np.linspace(0, 16 * np.pi, 2 ** 14)\r\n x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + \\\r\n 0.1 * np.random.rand(t.shape[-1])\r\n N = x.shape[-1]\r\n\r\n alpha = 10\r\n m = 3\r\n noise = 0.1 * np.random.randn(t.shape[-1])\r\n y = alpha * np.roll(x, m) + noise\r\n\r\n f_noise = fftpack.fft(noise)[0:N / 2]\r\n f_x = fftpack.fft(x)[0:N / 2]\r\n\r\n c_t = (1 / (1 + (f_noise / (f_x * (alpha ** 2)))))\r\n\r\n method = {\"this_method\": 'welch',\r\n \"NFFT\": 2048,\r\n \"Fs\": 2 * np.pi}\r\n\r\n f, c = tsa.coherence(np.vstack([x, y]), csd_method=method)\r\n c_t = np.abs(signaltools.resample(c_t, c.shape[-1]))\r\n\r\n npt.assert_array_almost_equal(c[0, 1], c_t, 2)", "def is_collinear(self, directed_edge):\n\n return self.orientation(directed_edge.begin) == 0 and self.orientation(directed_edge.end) == 0", "def test_are_vertices_adjacent_first_before_last(self):\n\n self.assertTrue(skeleton_lines._are_vertices_adjacent(\n vertex_indices=VERTEX_INDICES_ADJACENT_WRAPAROUND,\n num_vertices_in_polygon=NUM_VERTICES_FOR_ADJACENCY_TEST))", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def has_converged(x, y, epsilon=EPSILON):\n for a, b in itertools.izip(x, y):\n if abs(a - b) > epsilon:\n return False\n return True", "def assert_allclose(x, y, atol=1e-5, rtol=1e-4, verbose=True):\n try:\n #logging.warning(\"int8_outputis {} and fp32 output is {} \".format(x, y))\n np.testing.assert_allclose(\n x, y, atol=atol, rtol=rtol, verbose=verbose)\n return True\n except AssertionError as e:\n f = six.StringIO()\n f.write(str(e) + '\\n\\n')\n f.write(\n 'assert_allclose failed: \\n' +\n ' shape: {} {}\\n'.format(x.shape, y.shape) +\n ' dtype: {} {}\\n'.format(x.dtype, y.dtype))\n if x.shape == y.shape:\n xx = x if x.ndim != 0 else x.reshape((1,))\n yy = y if y.ndim != 0 else y.reshape((1,))\n err = np.abs(xx - yy)\n i = np.unravel_index(np.argmax(err), err.shape)\n f.write(\n ' i: {}\\n'.format(i) +\n ' x[i]: {}\\n'.format(xx[i]) +\n ' y[i]: {}\\n'.format(yy[i]) +\n ' err[i]: {}\\n'.format(err[i]))\n opts = np.get_printoptions()\n try:\n np.set_printoptions(threshold=10000)\n f.write('x: ' + np.array2string(x, prefix='x: ') + '\\n')\n f.write('y: ' + np.array2string(y, prefix='y: ') + '\\n')\n finally:\n np.set_printoptions(**opts)\n #raise AssertionError(f.getvalue())\n logging.warning(f.getvalue())\n return False", "def is_colinear(a, b, c):\n return (b[0] - a[0]) * (c[1] - a[1]) - (b[1] - a[1]) * (c[0] - a[0]) == 0", "def test_lcl_convergence():\n with pytest.raises(RuntimeError):\n lcl(1000. * units.mbar, 30. * units.degC, 20. * units.degC, max_iters=2)", "def test_exception():\n mat2D = MatrixDouble([[0.0, 0.0], [1.0, 1.0], [2.0, 2.0], [3.0, 3.0]])\n mat3D = MatrixDouble([[0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [2.0, 2.0, 2.0], [3.0, 3.0, 3.0]])\n\n with pytest.raises(ValueError):\n simplify_line_2d(mat3D, 0.1, True)\n\n with pytest.raises(ValueError):\n simplify_line_3d(mat2D, 0.1, True)", "def test_intersect_line_in_one_point(start, end):\n circle = ConstructionCircle((0, 0), 1.0)\n assert len(circle.intersect_line(ConstructionLine(start, end))) == 1", "def isclose ( a , b , rel_tol = 1.e-9 , abs_tol = 0.0 ) :\n return abs ( a - b ) <= max ( rel_tol * max ( abs ( a ) , abs ( b ) ) , abs_tol )", "def test_compare_Expsmall_line(self):\r\n # exp function crosses over to line func as A_shell-->0\r\n self.model.setParam(\"A_shell1\", 0.000001)\r\n self.model2.setParam(\"A_shell1\", 1)\r\n # change the function to a line function\r\n self.model2.setParam(\"func_shell1\", 1)\r\n \r\n #Compare exp(A=0.000001) to linear (where A_shell is null) function \r\n self.assertAlmostEqual(self.model.run(0.1),self.model2.run(0.1),4)", "def is_point_inside_triangle(point, v1, v2, v3):\n # calling the function solve_linear_3\n ans_tuple = solve_linear_3([[v1[0], v2[0], v3[0]], [v1[1], v2[1], v3[1]],\n [1, 1, 1]], [point[0], point[1], 1])\n # run over the tuple returned by solve_linear_3\n for cor in ans_tuple:\n # if one value is negative return the tuple with false and coefficients\n if cor < 0:\n return (False, ans_tuple)\n # if no value is negative return the tuple with true and coefficients\n return (True, ans_tuple)", "def test_closeness_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_invalid_which(multiple_intersections):\n levels, temperatures, dewpoints = multiple_intersections\n with pytest.raises(ValueError):\n lfc(levels, temperatures, dewpoints, which='test')\n with pytest.raises(ValueError):\n el(levels, temperatures, dewpoints, which='test')", "def check_point_right(nodeL, nodeR, city):\n A = get_city_points(city)\n B = get_node_points(nodeL)\n C = get_node_points(nodeR)\n slope = _slope(A, B)\n (F, G) = calibrator(A, B, slope)\n sign = math.copysign(1, ((G[0] - F[0]) * (C[1] - F[1]) - (G[1] - F[1]) * (C[0] - F[0])))\n\n if slope == \"horizontal\":\n if sign == 1:\n if A[0] > B[0]:\n return True\n else:\n return False\n else:\n if A[0] < B[0]:\n return True\n else:\n return False\n\n if slope == \"vertical\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"inclined\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False\n\n if slope == \"declined\":\n if sign == 1:\n if A[1] < B[1]:\n return True\n else:\n return False\n else:\n if A[1] > B[1]:\n return True\n else:\n return False", "def verify(self, y):\n left = self.sgroup.exponentiate(self.a, y)\n right = (self.x * self.sgroup.exponentiate(self.b, self.c)) % self.sgroup.p\n is_ok = (left == right)\n return is_ok", "def test_simple_robust_regression_corner_case():\n # No variation in x-space\n x = np.array([4, 4, 4, 4, 4])\n y = np.array([1, 2, 3, 4, 5])\n out = simple_robust_regression(x, y)\n assert np.isnan(out[\"standard_error_intercept\"])\n assert np.isnan(out[\"standard_errors\"][0])\n assert np.isnan(out[\"conf_intervals\"][0][0])\n assert np.isnan(out[\"conf_intervals\"][0][1])", "def test_nearest_boundary_odd():\n assert _nearest_boundary(10, 19, 14, 0) == 0\n assert _nearest_boundary(10, 19, 14, 1) == 1", "def close(double x, double y, double rtol, double atol):\n # Test for nan\n if isnan(x) or isnan(y):\n return False\n\n # Make sure they are both inf or non-inf\n cdef int xinf, yinf\n xinf = isinf(x)\n yinf = isinf(y)\n\n if not xinf == yinf:\n return False\n\n if xinf:\n # If they are both inf, make sure the signs are the same.\n return (x > 0) == (y > 0)\n else:\n # Otherwise, make sure they are close.\n return fabs(x-y) <= atol + rtol * fabs(y)", "def test_suite():\r\n test(slope(5, 3, 4, 2) == 1.0)\r\n test(slope(1, 2, 3, 2) == 0.0)\r\n test(slope(1, 2, 3, 3) == 0.5)\r\n test(slope(2, 4, 1, 2) == 2.0)", "def perfect_collinearity_test_simple(X, min_rows=\"infer\", max_rows=None):\n # Sets the minimum number of rows to start with.\n if min_rows == \"infer\":\n rows_to_use = 2*X.shape[1]\n if rows_to_use > X.shape[0]:\n rows_to_use = X.shape[0]\n else:\n rows_to_use = min_rows\n \n # Sets the maximum number of rows to use.\n if max_rows is None:\n max_rows = X.shape[0]\n \n columns_in_dataframe = X.columns\n\n \n # Series to save results\n results = pd.Series()\n \n # Runs a regression of every x against all other X variables.\n # Starts with a small dataset and if R^2 == 1, doubles the size\n # of the dataset until greater than max_rows.\n for temp_y_variable in columns_in_dataframe:\n rows_to_use_base = rows_to_use\n while True:\n X_master = X[:rows_to_use_base]\n temp_X_variables = [col for col in columns_in_dataframe if col != temp_y_variable]\n y_temp = X_master[temp_y_variable]\n X_temp = X_master[temp_X_variables]\n lin_model = LinearRegression()\n lin_model.fit(X_temp, y_temp)\n R_2 = lin_model.score(X_temp, y_temp)\n if R_2 != 1 and R_2 >= 0 or rows_to_use_base >= max_rows:\n results[temp_y_variable] = R_2\n break\n rows_to_use_base += rows_to_use_base\n if rows_to_use_base > X.shape[0]:\n rows_to_use_base = X.shape[0]\n return results", "def test_critic_linear(self):\n z_matrix = np.array(\n [[0.0, 0.0, 1.0],\n [0.1, 0.2, 0.8],\n [0.2, 0.4, 0.6],\n [0.3, 0.7, 0.3],\n [0.6, 0.8, 0.2],\n [0.8, 0.9, 0.1],\n [1.0, 1.0, 0.0]],\n dtype=np.float64)\n obtained_w_vector = mcdm.weigh(z_matrix, \"CRITIC\")\n expected_w_vector = np.array(\n [0.25000000, 0.25857023, 0.49142977],\n dtype=np.float64)\n np.testing.assert_allclose(obtained_w_vector, expected_w_vector)\n self.assertEqual(obtained_w_vector.dtype, expected_w_vector.dtype)", "def IsApproximatelyEqual(x, y, epsilon = 1e-6):\n # Check absolute precision.\n if -epsilon <= x - y <= epsilon:\n return True\n\n # Is x or y too close to zero?\n if -epsilon <= x <= epsilon or -epsilon <= y <= epsilon:\n return False\n\n # Check relative precision.\n return (-epsilon <= (x - y) / x <= epsilon or -epsilon <= (x - y) / y <= epsilon)" ]
[ "0.70717025", "0.6318565", "0.6101531", "0.60696363", "0.605181", "0.59607244", "0.5879398", "0.5877427", "0.5806856", "0.57670236", "0.5622218", "0.5608094", "0.5563991", "0.55154955", "0.5489862", "0.54876494", "0.54695606", "0.5420858", "0.5409832", "0.5381098", "0.53751147", "0.53664863", "0.53404224", "0.5323383", "0.53217673", "0.5308804", "0.5283994", "0.528149", "0.52712834", "0.52668166", "0.526033", "0.5242198", "0.5234444", "0.5233096", "0.5223213", "0.5209235", "0.52004415", "0.5192932", "0.5173626", "0.5173501", "0.516473", "0.5145385", "0.51432854", "0.51426727", "0.514215", "0.5139835", "0.5133334", "0.5126847", "0.5118995", "0.51168287", "0.51093256", "0.5096358", "0.50829375", "0.50800675", "0.5073235", "0.5069947", "0.5059459", "0.5054132", "0.50262976", "0.50262177", "0.5024755", "0.50214934", "0.5016887", "0.5013341", "0.5007325", "0.5005283", "0.50050706", "0.498848", "0.49873948", "0.4974422", "0.4973985", "0.4971127", "0.4965195", "0.4964109", "0.49635497", "0.49490047", "0.49467298", "0.49454722", "0.4944825", "0.49440366", "0.49440366", "0.49424034", "0.49311727", "0.49221343", "0.49191618", "0.49180484", "0.49174452", "0.4914863", "0.4909817", "0.49097258", "0.49069774", "0.4905218", "0.49014097", "0.4897541", "0.48906967", "0.48871952", "0.48856843", "0.48826453", "0.4881277", "0.48805457" ]
0.80137265
0
Verifies differentiability of curves by checking collinearity of adjacent curves' control points
def assert_differentiable(*curves: CubicBezierCurve) -> bool: if not curves: raise ValueError("CurveChecker.assert_differentiable() cannot be called on an empty list") if not assert_continuous(*curves): return False for curve0, curve1 in zip(curves, curves[1:]): if not assert_collinear(curve0.c1, curve1.p0, curve1.c0): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def isCollinear(a,b,c):\r\n #return slope(a, b) == slope(b, c) == slope(c, a) #DOES NOT WORK\r\n #return (b[0] - a[0]) * (c[1] - a[1]) == (c[0] - a[0]) * (b[1] - a[1]) \r\n #return distance(a,b) + distance(b,c) == distance(a,c)\r\n x1 = a[0]\r\n y1 = a[1]\r\n x2 = b[0]\r\n y2 = b[1]\r\n x3 = c[0]\r\n y3 = c[1] \r\n if (x1*(y2 - y3)) + (x2*(y3 - y1)) + (x3*(y1-y2)) == 0: \r\n return True\r\n else:\r\n return False", "def assert_collinear(*points: Point, tolerance: float = 1e-2) -> bool:\n if len(points) < 3:\n raise ValueError(\"CurveChecker.assert_collinear() must be called with at least three points\")\n\n thetas = [np.arctan2(p0[1] - p1[1], p0[0] - p1[0]) for p0, p1 in zip(points, points[1:])]\n for t0, t1 in zip(thetas, thetas[1:]):\n if abs(t0 - t1) > tolerance:\n return False\n\n return True", "def hasCollinearPoints(listOfPoints):\r\n for points in listOfPoints:\r\n if isCollinear(points[0], points[1], points[2]): #If any of the points are collinear\r\n return True\r\n else:\r\n pass\r\n return False #If none of the points are collinear\r", "def collinear(a1, b1, a2, b2, a3, b3):\n a = x1 * (b2 - b3) + a2 * (b3 - b1) + a3 * (b1 - b2)\n \n if (a == 0):\n print \"Yes\"\n else:\n print \"No\"", "def test_coherence_regularized():\r\n for method in methods:\r\n f, c = tsa.coherence_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])", "def collinear(a:tuple, b:tuple, c:tuple)->bool:\n return ((b[1] - c[1]) * (a[0] - b[0])) == ((a[1] - b[1]) * (b[0] - c[0]))", "def checkCollinearity(x):\n C_mat = x.corr()\n fig = plt.figure(figsize = (15,15))\n sb.heatmap(C_mat, vmax = .8, square = True)\n plt.show()", "def test_coherence():\r\n\r\n for method in methods:\r\n f, c = tsa.coherence(tseries, csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0])\r\n npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))", "def test_closeness_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_coherence_linear_dependence():\r\n t = np.linspace(0, 16 * np.pi, 2 ** 14)\r\n x = np.sin(t) + np.sin(2 * t) + np.sin(3 * t) + \\\r\n 0.1 * np.random.rand(t.shape[-1])\r\n N = x.shape[-1]\r\n\r\n alpha = 10\r\n m = 3\r\n noise = 0.1 * np.random.randn(t.shape[-1])\r\n y = alpha * np.roll(x, m) + noise\r\n\r\n f_noise = fftpack.fft(noise)[0:N / 2]\r\n f_x = fftpack.fft(x)[0:N / 2]\r\n\r\n c_t = (1 / (1 + (f_noise / (f_x * (alpha ** 2)))))\r\n\r\n method = {\"this_method\": 'welch',\r\n \"NFFT\": 2048,\r\n \"Fs\": 2 * np.pi}\r\n\r\n f, c = tsa.coherence(np.vstack([x, y]), csd_method=method)\r\n c_t = np.abs(signaltools.resample(c_t, c.shape[-1]))\r\n\r\n npt.assert_array_almost_equal(c[0, 1], c_t, 2)", "def test_cov_changebasis(self):\n cov_xp = np.array([[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]])\n\n cov_symmetric = np.array([[0, 2, 1, 3], [8, 10, 9, 11], [4, 6, 5, 7], [12, 14, 13, 15]])\n\n assert np.all(symplectic.xxpp_to_xpxp(cov_xp) == cov_symmetric)\n assert np.all(symplectic.xpxp_to_xxpp(cov_symmetric) == cov_xp)", "def check_correctness(S, bc_start='not-a-knot', bc_end='not-a-knot',\n tol=1e-14):\n x = S.x\n c = S.c\n dx = np.diff(x)\n dx = dx.reshape([dx.shape[0]] + [1] * (c.ndim - 2))\n dxi = dx[:-1]\n\n # Check C2 continuity.\n assert_allclose(c[3, 1:], c[0, :-1] * dxi**3 + c[1, :-1] * dxi**2 +\n c[2, :-1] * dxi + c[3, :-1], rtol=tol, atol=tol)\n assert_allclose(c[2, 1:], 3 * c[0, :-1] * dxi**2 +\n 2 * c[1, :-1] * dxi + c[2, :-1], rtol=tol, atol=tol)\n assert_allclose(c[1, 1:], 3 * c[0, :-1] * dxi + c[1, :-1],\n rtol=tol, atol=tol)\n\n # Check that we found a parabola, the third derivative is 0.\n if x.size == 3 and bc_start == 'not-a-knot' and bc_end == 'not-a-knot':\n assert_allclose(c[0], 0, rtol=tol, atol=tol)\n return\n\n # Check periodic boundary conditions.\n if bc_start == 'periodic':\n assert_allclose(S(x[0], 0), S(x[-1], 0), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 1), S(x[-1], 1), rtol=tol, atol=tol)\n assert_allclose(S(x[0], 2), S(x[-1], 2), rtol=tol, atol=tol)\n return\n\n # Check other boundary conditions.\n if bc_start == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[0], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, 0], c[0, 1], rtol=tol, atol=tol)\n elif bc_start == 'clamped':\n assert_allclose(S(x[0], 1), 0, rtol=tol, atol=tol)\n elif bc_start == 'natural':\n assert_allclose(S(x[0], 2), 0, rtol=tol, atol=tol)\n else:\n order, value = bc_start\n assert_allclose(S(x[0], order), value, rtol=tol, atol=tol)\n\n if bc_end == 'not-a-knot':\n if x.size == 2:\n slope = (S(x[1]) - S(x[0])) / dx[0]\n assert_allclose(S(x[1], 1), slope, rtol=tol, atol=tol)\n else:\n assert_allclose(c[0, -1], c[0, -2], rtol=tol, atol=tol)\n elif bc_end == 'clamped':\n assert_allclose(S(x[-1], 1), 0, rtol=tol, atol=tol)\n elif bc_end == 'natural':\n assert_allclose(S(x[-1], 2), 0, rtol=2*tol, atol=2*tol)\n else:\n order, value = bc_end\n assert_allclose(S(x[-1], order), value, rtol=tol, atol=tol)", "def assert_continuous(*curves: CubicBezierCurve) -> bool:\n if not curves:\n raise ValueError(\"CurveChecker.assert_continuous() cannot be called on an empty list\")\n\n previous_curve = curves[0]\n for curve in curves[1:]:\n if previous_curve.p1 != curve.p0:\n return False\n previous_curve = curve\n return True", "def test_closeness_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n clo_cen_after_single_area_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation in area 1\")", "def test_closeness_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n clo_cen_after_multi_area_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.16666666666666666,\n '5': 0.16666666666666666,\n '6': 0.5333333333333333,\n '7': 0.3333333333333333,\n '8': 0.3333333333333333\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\n \"FINAL CLOSENESS CENTRALITY failure: perturbation in areas 1, 2, 3\")", "def main():\n df = pd.read_csv('data/ch5_q8_simulation.csv')\n\n # Part b\n plt.figure()\n plt.scatter(df['x'], df['y'])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.title('Scatterplot y vs. x')\n plt.savefig('plots/8a.png')\n\n # Part c\n response_var = 'y'\n pred_vars_lin = ['x']\n pred_vars_quad = ['x', 'x2']\n pred_vars_cub = ['x', 'x2', 'x3']\n pred_vars_quar = ['x', 'x2', 'x3', 'x4']\n\n poly_terms = pd.DataFrame({'x2': np.power(df['x'], 2),\n 'x3': np.power(df['x'], 3),\n 'x4': np.power(df['x'], 4)})\n df = pd.concat([df, poly_terms], axis=1)\n\n CV_error_lin = loocv(df, response_var, pred_vars_lin)\n CV_error_quad = loocv(df, response_var, pred_vars_quad)\n CV_error_cub = loocv(df, response_var, pred_vars_cub)\n CV_error_quar = loocv(df, response_var, pred_vars_quar)\n\n print('Part c')\n print('CV error (linear) = {:.3f}'.format(CV_error_lin))\n print('CV error (quadratic) = {:.3f}'.format(CV_error_quad))\n print('CV error (cubic) = {:.3f}'.format(CV_error_cub))\n print('CV error (quartic) = {:.3f}'.format(CV_error_quar))\n\n # Part d\n np.random.seed(801)\n y = np.random.randn(100)\n x = np.random.randn(100)\n y = x - 2 * np.power(x, 2) + np.random.randn(100)\n\n df = pd.DataFrame({'x': x,\n 'x2': np.power(x, 2),\n 'x3': np.power(x, 3),\n 'x4': np.power(x, 4),\n 'y': y})\n\n CV_error_lin = loocv(df, response_var, pred_vars_lin)\n CV_error_quad = loocv(df, response_var, pred_vars_quad)\n CV_error_cub = loocv(df, response_var, pred_vars_cub)\n CV_error_quar = loocv(df, response_var, pred_vars_quar)\n\n print('Part d')\n print('CV error (linear) = {:.3f}'.format(CV_error_lin))\n print('CV error (quadratic) = {:.3f}'.format(CV_error_quad))\n print('CV error (cubic) = {:.3f}'.format(CV_error_cub))\n print('CV error (quartic) = {:.3f}'.format(CV_error_quar))\n\n # Part f\n model = sm.OLS(df.loc[:, response_var], df.loc[:, pred_vars_quar]).fit()\n print(model.summary())", "def test_closeness_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_closeness_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_control_cs_valid():\n sdf_graph = cs.convert_to_sdf(example.control.gauss_seidel(1., 5., 1.))\n assert sdf.validate_graph(sdf_graph)", "def test_indegree_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n indeg_cen_after_multi_area_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.16666666666666666,\n '5': 0.16666666666666666,\n '6': 0.5,\n '7': 0.16666666666666666,\n '8': 0.16666666666666666\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\n \"FINAL INDEGREE CENTRALITY failure: perturbation in areas 1, 2, 3\")", "def test_coherency_regularized():\r\n\r\n for method in methods:\r\n f, c = tsa.coherency_regularized(tseries, 0.05, 1000,\r\n csd_method=method)\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())", "def test_cov_changebasis(self):\n C = so.changebasis(2)\n cov_xp = np.array(\n [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]\n )\n\n cov_symmetric = np.array(\n [[0, 2, 1, 3], [8, 10, 9, 11], [4, 6, 5, 7], [12, 14, 13, 15]]\n )\n\n assert np.all(C @ cov_xp @ C.T == cov_symmetric)\n assert np.all(C.T @ cov_symmetric @ C == cov_xp)", "def test_indegree_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n indeg_cen_after_single_area_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.17647058823529413,\n '7': 0.058823529411764705,\n '8': 0.058823529411764705,\n '9': 0.11764705882352941,\n '10': 0.058823529411764705,\n '11': 0.11764705882352941,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.11764705882352941,\n '15': 0.0,\n '16': 0.11764705882352941,\n '17': 0.058823529411764705,\n '18': 0.058823529411764705,\n '19': 0.17647058823529413\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation in area 1\")", "def test_subtract_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 - 100\n assert (c2.df.iloc[0][0] + 99) < 0.0001", "def test_indegree_centrality_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n indeg_cen_2closed = {\n '1': 0.0,\n '2': 0.05555555555555555,\n '3': 0.0,\n '4': 0.05555555555555555,\n '5': 0.05555555555555555,\n '6': 0.16666666666666666,\n '7': 0.05555555555555555,\n '8': 0.05555555555555555,\n '9': 0.1111111111111111,\n '10': 0.05555555555555555,\n '11': 0.1111111111111111,\n '12': 0.1111111111111111,\n '13': 0.1111111111111111,\n '14': 0.1111111111111111,\n '15': 0.0,\n '16': 0.1111111111111111,\n '17': 0.05555555555555555,\n '18': 0.05555555555555555,\n '19': 0.16666666666666666\n }\n\n indeg_cen_3closed = {\n '1': 0.0,\n '2': 0.0,\n '3': 0.05555555555555555,\n '4': 0.05555555555555555,\n '5': 0.05555555555555555,\n '6': 0.16666666666666666,\n '7': 0.05555555555555555,\n '8': 0.05555555555555555,\n '9': 0.1111111111111111,\n '10': 0.05555555555555555,\n '11': 0.1111111111111111,\n '12': 0.1111111111111111,\n '13': 0.1111111111111111,\n '14': 0.1111111111111111,\n '15': 0.0,\n '16': 0.1111111111111111,\n '17': 0.05555555555555555,\n '18': 0.05555555555555555,\n '19': 0.16666666666666666\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_2closed.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_3closed.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")", "def collinear(cls, *vectors, e=10e-10):\n l = len(vectors)\n if l == 2:\n v1 = vectors[0]\n v2 = vectors[1]\n return abs(v1.x * v2.y - v1.y - v2.x) < e\n else:\n for i in range(l):\n for j in range(i + 1, l):\n if not cls.collinear(vectors[i], vectors[j]):\n return False\n return True", "def test_degree_centrality_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n deg_cen_2closed = {\n '1': 0.05555555555555555,\n '2': 0.1111111111111111,\n '3': 0.05555555555555555,\n '4': 0.1111111111111111,\n '5': 0.1111111111111111,\n '6': 0.2777777777777778,\n '7': 0.1111111111111111,\n '8': 0.16666666666666666,\n '9': 0.16666666666666666,\n '10': 0.1111111111111111,\n '11': 0.16666666666666666,\n '12': 0.2222222222222222,\n '13': 0.2222222222222222,\n '14': 0.2777777777777778,\n '15': 0.05555555555555555,\n '16': 0.16666666666666666,\n '17': 0.16666666666666666,\n '18': 0.05555555555555555,\n '19': 0.2777777777777778\n }\n\n deg_cen_3closed = {\n '1': 0.05555555555555555,\n '2': 0.05555555555555555,\n '3': 0.1111111111111111,\n '4': 0.1111111111111111,\n '5': 0.1111111111111111,\n '6': 0.2777777777777778,\n '7': 0.1111111111111111,\n '8': 0.16666666666666666,\n '9': 0.16666666666666666,\n '10': 0.1111111111111111,\n '11': 0.16666666666666666,\n '12': 0.2222222222222222,\n '13': 0.2222222222222222,\n '14': 0.2777777777777778,\n '15': 0.05555555555555555,\n '16': 0.16666666666666666,\n '17': 0.16666666666666666,\n '18': 0.05555555555555555,\n '19': 0.2777777777777778\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_2closed.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\"FINAL DEGREE CENTRALITY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_3closed.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\"FINAL DEGREE CENTRALITY failure: perturbation of element 1\")", "def test_reliability2contingency():\n import matplotlib.pyplot as plt\n import veripy.contingency as contingency\n \n fcsts = np.array([100., 90, 80, 70, 60, 50, 40, 30, 20, 10, 0])\n obs = np.array([10., 9, 8, 7, 6, 5, 4, 3, 2, 1, 0])\n obs = np.ones_like(fcsts)\n obs = np.minimum(obs, fcsts)\n\n a, b, c, d = reliability2contingency(fcsts, obs)\n pod = contingency.pod(a, b, c, d)\n pofd = contingency.pofd(a, b, c, d)\n \n fig = plt.figure(figsize=(12,12))\n ax = plt.subplot(111)\n ax.plot(pofd, pod)\n diag = np.linspace(0, 1)\n ax.plot(diag, diag, color='black', linestyle='dashed', linewidth=0.5)\n ax.set_yticks(np.arange(0, 1+1e-6, .1))\n ax.set_xticks(ax.get_yticks())\n ax.set_aspect('equal')\n ax.grid()\n ax.set_title('Area Under Curve: %.4f' % (-1 * np.trapz(pod, pofd)))\n plt.show()", "def test_multiple_conditions(self):\n matrices = [\n np.array([[0, 0.6], [1.0, 0.0]]),\n np.array([[0, 0.0], [1.0, 0.0]]),\n np.array([[0, 0.1], [1.0, 0.0]]),\n ]\n coefficients = get_importance_coeffs(['A', 'B'], ['A'], matrices)\n assert coefficients['A'] == 1.0\n assert coefficients['B'] == 0.6", "def test_gradient_convergence(self):\n pass", "def checkconvexity(self): # 3\n res = self.__obj.checkconvexity()\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)", "def test_degree_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n deg_cen_after_multi_area_perturbation = {\n '2': 0.16666666666666666,\n '3': 0.16666666666666666,\n '4': 0.3333333333333333,\n '5': 0.16666666666666666,\n '6': 0.8333333333333334,\n '7': 0.3333333333333333,\n '8': 0.3333333333333333\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\n \"FINAL DEGREE CENTRALITY failure: perturbation in areas 1, 2, 3\")", "def test_coherence_matlab():\r\n\r\n ts = np.loadtxt(os.path.join(test_dir_path, 'tseries12.txt'))\r\n\r\n ts0 = ts[1]\r\n ts1 = ts[0]\r\n\r\n method = {}\r\n method['this_method'] = 'welch'\r\n method['NFFT'] = 64\r\n method['Fs'] = 1.0\r\n method['noverlap'] = method['NFFT'] / 2\r\n\r\n ttt = np.vstack([ts0, ts1])\r\n f, cxy_mlab = tsa.coherence(ttt, csd_method=method)\r\n cxy_matlab = np.loadtxt(os.path.join(test_dir_path, 'cxy_matlab.txt'))\r\n\r\n npt.assert_almost_equal(cxy_mlab[0][1], cxy_matlab, decimal=5)", "def test_gele_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 < 50\n assert c2.df.iloc[0][0]\n\n c2 = c1 > 50\n assert c2.df.iloc[-1][0]", "def test_degree_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n deg_cen_after_element_perturbation = {\n '2': 0.058823529411764705,\n '3': 0.058823529411764705,\n '4': 0.11764705882352941,\n '5': 0.11764705882352941,\n '6': 0.29411764705882354,\n '7': 0.11764705882352941,\n '8': 0.17647058823529413,\n '9': 0.17647058823529413,\n '10': 0.11764705882352941,\n '11': 0.17647058823529413,\n '12': 0.23529411764705882,\n '13': 0.23529411764705882,\n '14': 0.29411764705882354,\n '15': 0.058823529411764705,\n '16': 0.17647058823529413,\n '17': 0.17647058823529413,\n '18': 0.058823529411764705,\n '19': 0.29411764705882354\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\"FINAL DEGREE CENTRALITY failure: perturbation of element 1\")", "def test_cx_equivalence_1cx(self, seed=1):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=12)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 1)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def test_indegree_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n indeg_cen_after_element_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.17647058823529413,\n '7': 0.058823529411764705,\n '8': 0.058823529411764705,\n '9': 0.11764705882352941,\n '10': 0.058823529411764705,\n '11': 0.11764705882352941,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.11764705882352941,\n '15': 0.0,\n '16': 0.11764705882352941,\n '17': 0.058823529411764705,\n '18': 0.058823529411764705,\n '19': 0.17647058823529413\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")", "def checkselfdual(self):\n\n self.selfdual = True\n for n in range(1,min(8,len(self.dirichlet_coefficients))):\n if abs(imag_part(self.dirichlet_coefficients[n]/self.dirichlet_coefficients[0])) > 0.00001:\n self.selfdual = False", "def test_outdegree_centrality_after_element_perturbation_isolating():\n F = FaultDiagnosis(\"tests/TOY_graph_nofaultresistant.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n outdeg_cen_after_element_perturbation = {\n '2': 0.058823529411764705,\n '3': 0.058823529411764705,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.11764705882352941,\n '7': 0.058823529411764705,\n '8': 0.11764705882352941,\n '9': 0.058823529411764705,\n '10': 0.058823529411764705,\n '11': 0.058823529411764705,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.17647058823529413,\n '15': 0.058823529411764705,\n '16': 0.058823529411764705,\n '17': 0.11764705882352941,\n '18': 0.0,\n '19': 0.11764705882352941\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\"FINAL OUTDEGREE CENTRALITY failure: perturbation of element 1\")", "def test_compare_Expsmall_line(self):\r\n # exp function crosses over to line func as A_shell-->0\r\n self.model.setParam(\"A_shell1\", 0.000001)\r\n self.model2.setParam(\"A_shell1\", 1)\r\n # change the function to a line function\r\n self.model2.setParam(\"func_shell1\", 1)\r\n \r\n #Compare exp(A=0.000001) to linear (where A_shell is null) function \r\n self.assertAlmostEqual(self.model.run(0.1),self.model2.run(0.1),4)", "def test_degree_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n deg_cen_after_single_area_perturbation = {\n '2': 0.058823529411764705,\n '3': 0.058823529411764705,\n '4': 0.11764705882352941,\n '5': 0.11764705882352941,\n '6': 0.29411764705882354,\n '7': 0.11764705882352941,\n '8': 0.17647058823529413,\n '9': 0.17647058823529413,\n '10': 0.11764705882352941,\n '11': 0.17647058823529413,\n '12': 0.23529411764705882,\n '13': 0.23529411764705882,\n '14': 0.29411764705882354,\n '15': 0.058823529411764705,\n '16': 0.17647058823529413,\n '17': 0.17647058823529413,\n '18': 0.058823529411764705,\n '19': 0.29411764705882354\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(deg_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.degree_centrality.values())),\n err_msg=\"FINAL DEGREE CENTRALITY failure: perturbation in area 1\")", "def _CheckConvergence(self):\n self.is_converged = True\n self.are_converged[0] = (abs(self.delta_e) < self.conv_delta_e)\n self.are_converged[1] = (self.grad_rms < self.conv_grad_rms)\n self.are_converged[2] = (self.grad_max < self.conv_grad_max)\n self.are_converged[3] = (self.disp_rms < self.conv_disp_rms)\n self.are_converged[4] = (self.disp_max < self.conv_disp_max)\n for i in range(5):\n if self.must_converge[i] and not self.are_converged[i]:\n self.is_converged = False", "def _check_curve(layer: ogr.Layer) -> None:\n # Check if the feature geometry is polygonal:\n feature_defn = layer.GetLayerDefn()\n layer.ResetReading()\n feature = layer.GetNextFeature()\n while feature is not None:\n geom = feature.GetGeometryRef()\n name_wkt = geom.ExportToWkt()\n\n # Approximate a curvature by a polygon geometry:\n if 'curv' in name_wkt.lower():\n linear_geom = geom.GetLinearGeometry()\n new_feature = ogr.Feature(feature_defn)\n new_feature.SetGeometryDirectly(linear_geom)\n layer.CreateFeature(new_feature)\n layer.DeleteFeature(feature.GetFID())\n\n feature = layer.GetNextFeature()", "def is_on_curve(self):\n if self.infinity:\n return True\n left = self.y * self.y\n right = self.x * self.x * self.x + self.ec.a * self.x + self.ec.b\n\n return left == right", "def test_cx_equivalence_2cx(self, seed=2):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=18)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n qc.cx(qr[0], qr[1])\n\n qc.u(rnd[12], rnd[13], rnd[14], qr[0])\n qc.u(rnd[15], rnd[16], rnd[17], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 2)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def test_add_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 + 100\n assert (c2.df.iloc[0][0] - 101) < 0.0001", "def test_outdegree_centrality_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n outdeg_cen_2closed = {\n '1': 0.05555555555555555,\n '2': 0.05555555555555555,\n '3': 0.05555555555555555,\n '4': 0.05555555555555555,\n '5': 0.05555555555555555,\n '6': 0.1111111111111111,\n '7': 0.05555555555555555,\n '8': 0.1111111111111111,\n '9': 0.05555555555555555,\n '10': 0.05555555555555555,\n '11': 0.05555555555555555,\n '12': 0.1111111111111111,\n '13': 0.1111111111111111,\n '14': 0.16666666666666666,\n '15': 0.05555555555555555,\n '16': 0.05555555555555555,\n '17': 0.1111111111111111,\n '18': 0.0,\n '19': 0.1111111111111111\n }\n\n outdeg_cen_3closed = {\n '1': 0.05555555555555555,\n '2': 0.05555555555555555,\n '3': 0.05555555555555555,\n '4': 0.05555555555555555,\n '5': 0.05555555555555555,\n '6': 0.1111111111111111,\n '7': 0.05555555555555555,\n '8': 0.1111111111111111,\n '9': 0.05555555555555555,\n '10': 0.05555555555555555,\n '11': 0.05555555555555555,\n '12': 0.1111111111111111,\n '13': 0.1111111111111111,\n '14': 0.16666666666666666,\n '15': 0.05555555555555555,\n '16': 0.05555555555555555,\n '17': 0.1111111111111111,\n '18': 0.0,\n '19': 0.1111111111111111\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_2closed.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\"FINAL OUTDEGREE CENTRALITY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_3closed.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\"FINAL OUTDEGREE CENTRALITY failure: perturbation of element 1\")", "def test_outdegree_centrality_after_single_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1'])\n\n outdeg_cen_after_single_area_perturbation = {\n '2': 0.058823529411764705,\n '3': 0.058823529411764705,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.11764705882352941,\n '7': 0.058823529411764705,\n '8': 0.11764705882352941,\n '9': 0.058823529411764705,\n '10': 0.058823529411764705,\n '11': 0.058823529411764705,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.17647058823529413,\n '15': 0.058823529411764705,\n '16': 0.058823529411764705,\n '17': 0.11764705882352941,\n '18': 0.0,\n '19': 0.11764705882352941\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_after_single_area_perturbation.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\"FINAL OUTDEGREE CENTRALITY failure: perturbation in area 1\")", "def test_cx_equivalence_0cx(self, seed=0):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=6)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 0)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def part1c_0():\n xs = exampleInput\n T = submission.computeEdgeMarginals(simpleCRF, xs)\n for t in T:\n grader.requireIsEqual( 1.0, sum(t.values()) )", "def ok(self, point):\n [x1, x2, x3, x4, x5, x6] = point.decisions\n if x1 + x2 -2 < 0:\n return False\n if 6 - x1 - x2 < 0:\n return False\n if 2 - x2 + x1 < 0:\n return False\n if 2 - x1 + 3*x2 < 0:\n return False\n if 4 - (x3 - 3)**2 - x4 < 0:\n return False\n if (x5 - 3)**3 + x6 - 4 < 0:\n return False\n for i, d in enumerate(point.decisions):\n if d < self.decisions[i].low or d > self.decisions[i].high:\n print i, d, self.decisions[i].low, self.decisions[i].high\n return False\n return True", "def _check_convergence(current_position,\n next_position,\n current_objective,\n next_objective,\n next_gradient,\n grad_tolerance,\n f_relative_tolerance,\n x_tolerance):\n grad_converged = _check_within_tolerance(next_gradient, grad_tolerance)\n x_converged = _check_within_tolerance(next_position - current_position,\n x_tolerance)\n f_converged = _check_within_tolerance(\n next_objective - current_objective,\n f_relative_tolerance * current_objective)\n return grad_converged | x_converged | f_converged", "def new_convergence_function(previous_variables, new_variables, conv_ctr, conv_ctr_cap=20):\n for pre, new in zip(previous_variables, new_variables):\n dif1 = np.abs(new) - 0.9 * np.abs(pre)\n dif2 = 1.1 * np.abs(pre) - np.abs(new)\n if not (dif1 > 0).all() or not (dif2 > 0).all():\n return 0, False\n return conv_ctr + 1, conv_ctr + 1 > conv_ctr_cap", "def test_csendes(self):\n fun = get_problem('csendes', self.dimension, -1.0, 1.0)\n self.assertEqual(fun(self.array), 0.0)", "def test_divide_curve():\n c1 = Curve(data=data_num, mnemonic='test')\n c2 = c1 / 2\n assert (c2.df.iloc[0][0] - 0.5) < 0.0001", "def test_caekl_2(d):\n rvs = [[0], [1]]\n assert I(d, rvs) == pytest.approx(J(d, rvs))", "def test_outdegree_centrality_after_multi_area_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_area_perturbation(['area1', 'area2', 'area3'])\n\n outdeg_cen_after_multi_area_perturbation = {\n '2': 0.16666666666666666,\n '3': 0.16666666666666666,\n '4': 0.16666666666666666,\n '5': 0.0,\n '6': 0.3333333333333333,\n '7': 0.16666666666666666,\n '8': 0.16666666666666666\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_after_multi_area_perturbation.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\n \"FINAL OUTDEGREE CENTRALITY failure: perturbation in areas 1, 2, 3\")", "def test_closeness_centrality_after_element_perturbation_parallel():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\", parallel=True)\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_closeness_centrality_after_element_perturbation_parallel():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\", parallel=True)\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_after_element_perturbation = {\n '2': 0,\n '3': 0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.18823529411764706,\n '7': 0.11764705882352941,\n '8': 0.11764705882352941,\n '9': 0.15126050420168066,\n '10': 0.12538699690402477,\n '11': 0.1660899653979239,\n '12': 0.1859114015976761,\n '13': 0.16020025031289112,\n '14': 0.1859114015976761,\n '15': 0,\n '16': 0.1711229946524064,\n '17': 0.12981744421906694,\n '18': 0.17346938775510204,\n '19': 0.22145328719723184\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_closeness_centrality_after_element_perturbation_initially_closed(self):\n F = FaultDiagnosis(\"tests/TOY_graph_initiallyopen.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n clo_cen_2closed = {\n '1': 0,\n '2': 0.05555555555555555,\n '3': 0,\n '4': 0.07407407407407407,\n '5': 0.05555555555555555,\n '6': 0.1736111111111111,\n '7': 0.11574074074074076,\n '8': 0.11574074074074076,\n '9': 0.14327485380116958,\n '10': 0.12077294685990338,\n '11': 0.15648148148148147,\n '12': 0.17451690821256038,\n '13': 0.15146750524109012,\n '14': 0.17451690821256038,\n '15': 0,\n '16': 0.16071428571428573,\n '17': 0.125,\n '18': 0.16363636363636364,\n '19': 0.20584045584045585\n }\n\n clo_cen_3closed = {\n '1': 0,\n '2': 0,\n '3': 0.05555555555555555,\n '4': 0.05555555555555555,\n '5': 0.07407407407407407,\n '6': 0.17777777777777778,\n '7': 0.1111111111111111,\n '8': 0.1111111111111111,\n '9': 0.14285714285714285,\n '10': 0.11842105263157894,\n '11': 0.17386831275720163,\n '12': 0.1866925064599483,\n '13': 0.16055555555555556,\n '14': 0.1866925064599483,\n '15': 0,\n '16': 0.1616161616161616,\n '17': 0.12260536398467432,\n '18': 0.17307692307692307,\n '19': 0.22299382716049382\n }\n\n if F.G.final_status == {'2': 1, '3': 0}:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_2closed.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")\n else:\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(clo_cen_3closed.values())),\n np.asarray(sorted(F.G.closeness_centrality.values())),\n err_msg=\"FINAL CLOSENESS CENTRALITY failure: perturbation of element 1\")", "def test_outdegree_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n\n outdeg_cen_after_element_perturbation = {\n '2': 0.058823529411764705,\n '3': 0.058823529411764705,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.11764705882352941,\n '7': 0.058823529411764705,\n '8': 0.11764705882352941,\n '9': 0.058823529411764705,\n '10': 0.058823529411764705,\n '11': 0.058823529411764705,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.17647058823529413,\n '15': 0.058823529411764705,\n '16': 0.058823529411764705,\n '17': 0.11764705882352941,\n '18': 0.0,\n '19': 0.11764705882352941\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\"FINAL OUTDEGREE CENTRALITY failure: perturbation of element 1\")", "def test_defect_calculation():\n slope1, slope2 = 2., 3.\n step1, step2 = Fraction(5), Fraction(7)\n cosim = ramp_cosimulation(slope1, slope2, step1, step2)\n t_end = Fraction(20)\n defect = cs.evaluate(cosim, t_end)\n\n alpha = Fraction(int(lcm(step1.numerator, step2.numerator)),\n int(gcd(step1.denominator, step2.denominator)))\n num1, num2 = tuple(map(int, [alpha / step for step in (step1, step2)]))\n big = max(num1, num2) + 1\n small = min(num1, num2) - 1\n assert defect.connection['Ramp1', 'u'] > small * slope2 * step2\n assert defect.connection['Ramp1', 'u'] < big * slope2 * step2\n assert defect.connection['Ramp2', 'u'] > small * slope1 * step1\n assert defect.connection['Ramp2', 'u'] < big * slope1 * step1\n\n assert defect.output['Ramp1', 'y'] == pytest.approx(slope1 * step1)\n assert defect.output['Ramp2', 'y'] == pytest.approx(slope2 * step2)", "def check_deterministic_constraints(self, x):\n return True", "def check_deterministic_constraints(self, x):\n return True", "def _check_optimality(self):\n\n dual_obj = -0.5* np.dot(self.beta, self.beta) + np.sum(self.alpha)\n\n prim_obj = 0.5* np.dot(self.beta, self.beta) + self.C * np.sum( np.maximum(1 - np.multiply(np.dot(self.X, self.beta), self.y), 0))\n\n # print (prim_obj - dual_obj)\n self.gap = prim_obj - dual_obj\n if self.gap <= 1e-6:\n return True\n else:\n return False", "def sanity_check(self):\n score = 0\n curvatures = self._curvature()\n if abs(curvatures[0] - curvatures[1]) / max(curvatures) > 0.15:\n # difference in curvature is more than 15%\n score -= 1\n\n diff_std = np.std(self.right_fitx - self.left_fitx)\n if diff_std > 30:\n # std of the difference between the right lane and left lane is more than 30 pixel\n score -= 1\n\n # roughly parallel\n if abs(self.left_fit[0] - self.right_fit[0]) / max(self.left_fit[0], self.right_fit[0]) > 0.15:\n # difference in slope is more than 15%\n score -= 1\n\n return score", "def curves_CV_cost(theta):\n return (1 / (2 * Xval.shape[0])) * sum((curves_hypothesis(theta, Xval.shape[0], inputs=Xval) - yval.ravel()) ** 2)", "def test_compare_different_expectations(self):\n\n pd_single = norm(0, 1)\n pd = []\n for i in range(0, 3):\n pd.append(pd_single)\n meas = [-1, 0, 1]\n meanCRIGN1, singleCRIGN1 = crign.crign(pd, meas)\n\n pd2 = []\n for i in range(0, 3):\n pd2.append(norm(i, 1))\n meas2 = [-1, 1, 3]\n\n meanCRIGN2, singleCRIGN2 = crign.crign(pd2, meas2)\n\n is_good = np.isclose(singleCRIGN1, singleCRIGN2).all()\n assert_true(is_good, msg=\"Relation of individual CRIGN values should return roughly the same value.\")", "def is_ccw(point_a, point_b, point_c):\r\n return is_on_line(point_a, point_b, point_c) > 0", "def test_cx_equivalence_3cx(self, seed=3):\n state = np.random.default_rng(seed)\n rnd = 2 * np.pi * state.random(size=24)\n\n qr = QuantumRegister(2, name=\"q\")\n qc = QuantumCircuit(qr)\n\n qc.u(rnd[0], rnd[1], rnd[2], qr[0])\n qc.u(rnd[3], rnd[4], rnd[5], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[6], rnd[7], rnd[8], qr[0])\n qc.u(rnd[9], rnd[10], rnd[11], qr[1])\n\n qc.cx(qr[0], qr[1])\n\n qc.u(rnd[12], rnd[13], rnd[14], qr[0])\n qc.u(rnd[15], rnd[16], rnd[17], qr[1])\n\n qc.cx(qr[1], qr[0])\n\n qc.u(rnd[18], rnd[19], rnd[20], qr[0])\n qc.u(rnd[21], rnd[22], rnd[23], qr[1])\n\n sim = UnitarySimulatorPy()\n unitary = execute(qc, sim).result().get_unitary()\n self.assertEqual(two_qubit_cnot_decompose.num_basis_gates(unitary), 3)\n self.assertTrue(Operator(two_qubit_cnot_decompose(unitary)).equiv(unitary))", "def test_conservation(self):\n self.c_s_tot = (\n self.c_s_n_tot(self.solution.t)\n + self.c_s_p_tot(self.solution.t)\n + self.c_SEI_n_tot(self.solution.t)\n + self.c_SEI_p_tot(self.solution.t)\n + self.c_Li_n_tot(self.solution.t)\n + self.c_Li_p_tot(self.solution.t)\n )\n diff = (self.c_s_tot[1:] - self.c_s_tot[:-1]) / self.c_s_tot[:-1]\n if \"profile\" in self.model.options[\"particle\"]:\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"surface form\"] == \"differential\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=10)\n elif self.model.options[\"SEI\"] == \"ec reaction limited\":\n np.testing.assert_array_almost_equal(diff, 0, decimal=12)\n else:\n np.testing.assert_array_almost_equal(diff, 0, decimal=15)", "def test_failure_and_non_convergence(self):\n\n # Set up the problem of finding the square roots of three numbers.\n constants = np.array([4.0, 9.0, 16.0])\n # Choose a bad initial position.\n initial_values = np.zeros(len(constants))\n\n def objective_and_gradient(values):\n objective = values**2 - constants\n gradient = 2.0 * values\n return objective, gradient\n\n # Obtain and evaluate a tensor containing the roots.\n roots = newton_root_finder(objective_and_gradient, initial_values)\n _, converged, failed = self.evaluate(roots)\n\n # Reference values - we should not have converged and should have failed.\n converged_bench = np.array([False, False, False])\n failed_bench = np.array([True, True, True])\n\n # Assert that the values we obtained are close to the true values.\n np.testing.assert_array_equal(converged, converged_bench)\n np.testing.assert_array_equal(failed, failed_bench)", "def test_indegree_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n indeg_cen_after_element_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.17647058823529413,\n '7': 0.058823529411764705,\n '8': 0.058823529411764705,\n '9': 0.11764705882352941,\n '10': 0.058823529411764705,\n '11': 0.11764705882352941,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.11764705882352941,\n '15': 0.0,\n '16': 0.11764705882352941,\n '17': 0.058823529411764705,\n '18': 0.058823529411764705,\n '19': 0.17647058823529413\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")", "def test_indegree_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n indeg_cen_after_element_perturbation = {\n '2': 0.0,\n '3': 0.0,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.17647058823529413,\n '7': 0.058823529411764705,\n '8': 0.058823529411764705,\n '9': 0.11764705882352941,\n '10': 0.058823529411764705,\n '11': 0.11764705882352941,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.11764705882352941,\n '15': 0.0,\n '16': 0.11764705882352941,\n '17': 0.058823529411764705,\n '18': 0.058823529411764705,\n '19': 0.17647058823529413\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(indeg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.indegree_centrality.values())),\n err_msg=\"FINAL INDEGREE CENTRALITY failure: perturbation of element 1\")", "def is_contradiction_(transition):\n is_contr = False\n\n # check implications of lower left corner\n if np.argmax(transition[0]) == 0:\n if np.argmax(transition[2]) == 2 or np.argmax(transition[2]) == 3:\n is_contr = True\n elif np.argmax(transition[0]) == 1:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 2:\n is_contr = True\n if np.argmax(transition[2]) != 1:\n is_contr = True\n elif np.argmax(transition[0]) == 2:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 1:\n is_contr = True\n elif np.argmax(transition[0]) == 3:\n if np.argmax(transition[1]) != 3:\n is_contr = True\n if np.argmax(transition[2]) == 0 or np.argmax(transition[2]) == 2:\n is_contr = True\n\n # check implicatiosn of upper right corner\n if np.argmax(transition[2]) == 0:\n if np.argmax(transition[0]) == 1 or np.argmax(transition[0]) == 3:\n is_contr = True\n elif np.argmax(transition[2]) == 1:\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 2:\n is_contr = True\n elif np.argmax(transition[2]) == 2:\n if np.argmax(transition[0]) != 2:\n is_contr = True\n if np.argmax(transition[1]) == 0 or np.argmax(transition[1]) == 1:\n is_contr = True\n elif np.argmax(transition[2]) == 3:\n if np.argmax(transition[1]) != 3:\n is_contr = True\n if np.argmax(transition[0]) == 0 or np.argmax(transition[0]) == 1:\n is_contr = True\n\n return is_contr", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def test_solve_nestedcs():\n\n import numpy as np\n from crpm.setup_nestedcs import setup_nestedcs\n from crpm.fwdprop import fwdprop\n from crpm.lossfunctions import loss\n from crpm.gradientdecent import gradientdecent\n\n #init numpy seed\n np.random.seed(40017)\n\n #setup model\n model, data = setup_nestedcs()\n\n #calculate initial mean squared error\n pred, _ = fwdprop(data[0:2,], model)\n icost, _ = loss(\"mse\", pred, data[-1,])\n #print(icost)\n\n #train model\n pred, cost, _ = gradientdecent(model, data[0:2,], data[-1,], \"mse\")\n\n #print(model)\n #print(icost)\n #print(cost)\n assert icost > cost\n assert cost < .08", "def validate_points(a, b):\r\n\tdiff_y = b[0] - a[0]\r\n\tdiff_x = b[1] - a[1]\r\n\r\n\treturn (diff_y == 0 and diff_x != 0) or (diff_x == 0 and diff_y != 0) or abs(diff_x) == abs(diff_y)", "def test_basic(self):\r\n if (not theano.tensor.nnet.conv.imported_scipy_signal and\r\n theano.config.cxx == \"\"):\r\n raise SkipTest(\"conv2d tests need SciPy or a c++ compiler\")\r\n\r\n self.validate((1, 4, 5), (2, 2, 3), verify_grad=True)\r\n self.validate((7, 5), (5, 2, 3), verify_grad=False)\r\n self.validate((3, 7, 5), (2, 3), verify_grad=False)\r\n self.validate((7, 5), (2, 3), verify_grad=False)", "def test_vmec_objective_grad(self): \n self.set_up()\n self.assertRaises(ValueError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='volumee')\n \n boundary = np.copy(self.vmecOptimization.boundary_opt)\n boundary_new = np.hstack((boundary,boundary))\n self.assertRaises(ValueError,\n self.vmecOptimization.vmec_objective_grad,\n boundary=boundary_new)\n self.assertRaises(RuntimeError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='iota') \n self.assertRaises(RuntimeError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='iota_prime') \n self.assertRaises(RuntimeError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='iota_target') \n self.assertRaises(RuntimeError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='well_ratio') \n self.assertRaises(RuntimeError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='well') \n self.assertRaises(RuntimeError,\n self.vmecOptimization.vmec_objective_grad,\n which_objective='axis_ripple') \n # Jacobian\n dfdomega = self.vmecOptimization.vmec_objective_grad(\n boundary=boundary,which_objective='jacobian')\n boundary = self.vmecOptimization.boundary_opt\n dfdomega_fd = finite_difference_derivative(boundary, lambda boundary :\n self.vmecOptimization.vmec_objective(boundary=boundary,\n which_objective='jacobian'),epsilon=1e-4,\n method='centered')\n\n self.assertTrue(np.allclose(dfdomega,dfdomega_fd,atol=1e-5))\n # Radius\n dfdomega = self.vmecOptimization.vmec_objective_grad(\n boundary=boundary,which_objective='radius')\n boundary = self.vmecOptimization.boundary_opt\n dfdomega_fd = finite_difference_derivative(boundary, lambda boundary :\n self.vmecOptimization.vmec_objective(boundary=boundary,\n which_objective='radius'),epsilon=1e-4,\n method='centered')\n self.assertTrue(np.allclose(dfdomega,dfdomega_fd,atol=1e-5))\n # normalized_jacobian\n dfdomega = self.vmecOptimization.vmec_objective_grad(\n boundary=boundary,which_objective=\n 'normalized_jacobian')\n boundary = self.vmecOptimization.boundary_opt\n dfdomega_fd = finite_difference_derivative(boundary, lambda boundary :\n self.vmecOptimization.vmec_objective(boundary=boundary,\n which_objective='normalized_jacobian'),epsilon=1e-4,\n method='centered')\n self.assertTrue(np.allclose(dfdomega,dfdomega_fd,atol=1e-5))\n # iota\n inputObject = VmecInput('input.rotating_ellipse_highres')\n boundary = np.copy(self.vmecOptimization.boundary_opt)\n boundary[0] = 1.1*boundary[0]\n self.vmecOptimization.inputObject = inputObject\n self.vmecOptimization.vmecInputFilename = 'input.rotating_ellipse_highres'\n self.vmecOptimization.delta_curr = 10\n dfdomega = self.vmecOptimization.vmec_objective_grad(\n boundary=boundary,which_objective='iota',\n weight_function = axis_weight)\n dfdomega_fd = finite_difference_derivative(boundary, lambda boundary :\n self.vmecOptimization.vmec_objective(boundary=boundary,\n which_objective='iota', weight_function = axis_weight),\n epsilon=1e-3, method='centered')\n self.assertTrue(np.allclose(dfdomega,dfdomega_fd,atol=1e-2))\n \n # To do : finish FD testing\n self.tear_down()", "def test_lcl_convergence_issue():\n pressure = np.array([990, 973, 931, 925, 905]) * units.hPa\n temperature = np.array([14.4, 14.2, 13, 12.6, 11.4]) * units.degC\n dewpoint = np.array([14.4, 11.7, 8.2, 7.8, 7.6]) * units.degC\n lcl_pressure, _ = lcl(pressure[0], temperature[0], dewpoint[0])\n assert_almost_equal(lcl_pressure, 990 * units.hPa, 0)", "def check_cl_constraints(vector):\n\tglobal __cl_constraints\n\n\tfor con in __cl_constraints:\n\t\t# a vector is not allowed to hold for both\n\t\tif vector[con[0]] == 1 and vector[con[1]] == 1:\n\t\t\treturn False\n\treturn True", "def test_coefficient_orders(self):\n for i in range(2, 5):\n spec = {2*j: 0 for j in range(i)}\n bcs_ref = BoundaryConditions(spec, 2*i-2)\n bcs_main = BoundaryConditions(spec, 2*i)\n\n coeffs_ref = get_ext_coeffs(bcs_ref)[i-1]\n coeffs_main = get_ext_coeffs(bcs_main)[i-1]\n\n assert coeffs_ref == coeffs_main", "def testNonLinearity():\n vis = VISinformation()\n data = np.linspace(1, vis['fullwellcapacity'], 10000)\n nonlin = CCDnonLinearityModel(data.copy())\n\n txt = '%s' % datetime.datetime.isoformat(datetime.datetime.now())\n\n fig = plt.figure(frameon=False)\n\n left, width = 0.1, 0.8\n rect1 = [left, 0.3, width, 0.65]\n rect2 = [left, 0.1, width, 0.2]\n\n ax1 = fig.add_axes(rect1, title='VIS Non-linearity Model')\n ax2 = fig.add_axes(rect2) #left, bottom, width, height\n\n ax1.axhline(y=0, c='k', ls='--')\n ax1.plot(data, (nonlin/data - 1.)*100, 'r-', label='Model')\n\n ax2.axhline(y=0, c='k', ls='--')\n ax2.plot(data, (nonlin - data)/vis['gain'], 'g-')\n\n ax1.axvline(x=97, c='k', ls='--')\n ax2.axvline(x=97, c='k', ls='--')\n\n ax1.set_xticklabels([])\n ax2.set_xlabel('Real Charge [electrons]')\n ax1.set_ylabel('(Output / Real - 1)*100')\n ax2.set_ylabel('O - R [ADUs]')\n\n ax1.set_xlim(0, vis['fullwellcapacity'])\n ax2.set_xlim(0, vis['fullwellcapacity'])\n ax1.set_ylim(-.15, .2)\n\n ax1.text(0.83, 1.12, txt, ha='left', va='top', fontsize=9, transform=ax1.transAxes, alpha=0.2)\n ax1.legend(shadow=True, fancybox=True, numpoints=1, scatterpoints=1, markerscale=1.0)\n plt.savefig('NonlinearityModel.pdf')\n\n ax1.set_ylim(-.1, 8)\n ax2.set_ylim(0, 2)\n ax1.set_xlim(50, 800)\n ax2.set_xlim(50, 800)\n plt.savefig('NonlinearityModel2.pdf')\n\n plt.close()", "def test_coefficients(self):\n\n coefs = self.cs.coefficients\n\n self.assertEqual(coefs, (1, 0, 1, 0, 0, -1))", "def test_outdegree_centrality_after_element_perturbation():\n F = FaultDiagnosis(\"tests/TOY_graph.csv\")\n F.simulate_element_perturbation([\"1\"])\n\n outdeg_cen_after_element_perturbation = {\n '2': 0.058823529411764705,\n '3': 0.058823529411764705,\n '4': 0.058823529411764705,\n '5': 0.058823529411764705,\n '6': 0.11764705882352941,\n '7': 0.058823529411764705,\n '8': 0.11764705882352941,\n '9': 0.058823529411764705,\n '10': 0.058823529411764705,\n '11': 0.058823529411764705,\n '12': 0.11764705882352941,\n '13': 0.11764705882352941,\n '14': 0.17647058823529413,\n '15': 0.058823529411764705,\n '16': 0.058823529411764705,\n '17': 0.11764705882352941,\n '18': 0.0,\n '19': 0.11764705882352941\n }\n\n np.testing.assert_array_almost_equal(\n np.asarray(sorted(outdeg_cen_after_element_perturbation.values())),\n np.asarray(sorted(F.G.outdegree_centrality.values())),\n err_msg=\"FINAL OUTDEGREE CENTRALITY failure: perturbation of element 1\")", "def test_vmec_shape_gradient(self): \n self.set_up()\n self.assertRaises(ValueError,\n self.vmecOptimization.vmec_shape_gradient,\n which_objective='volumee')\n self.tear_down()", "def test_verify(perfectModelEnsemble_initialized_control):\n assert perfectModelEnsemble_initialized_control.verify(\n metric=\"mse\", comparison=\"m2e\", dim=[\"init\", \"member\"]\n )", "def _is_circle_contractive(self,r,tol):\n B=np.diag(self.b)\n M=np.dot(B,self.A)+np.dot(self.A.T,B)-np.outer(self.b,self.b)\n X=M+B/r\n v,d=np.linalg.eig(X)\n if v.min()>-tol:\n return 1\n else:\n return 0", "def is_collinear(self, directed_edge):\n\n return self.orientation(directed_edge.begin) == 0 and self.orientation(directed_edge.end) == 0", "def test_cross(self):\n self.assertEqual(solution.cross(solution.ROWS, solution.COLS), self.boxes)", "def test_epipolar(dxy_0, ep_vec, dxy, tol):\n delta=np.abs(np.dot((dxy-dxy_0), [ep_vec[1], -ep_vec[0]]))\n disp_mag=np.sqrt((dxy[:,0]-dxy_0[0])**2 +(dxy[:,1]-dxy_0[1])**2)\n good=(delta < tol) | (delta < 0.02 * disp_mag )\n return good, delta", "def test_basic1(self):\r\n self.validate((2, 2, 3, 3), (2, 2, 2, 2), 'valid', verify_grad=False)", "def test_linear_buckling_iso_CCSS(plot_static=False, plot_lb=False):\n # number of nodes\n nx = 5 # along x\n ny = 5 # along y\n\n # getting integration points\n nint = 4\n points, weights = get_points_weights(nint=nint)\n\n # geometry\n a = 3 # along x\n b = 3 # along y\n\n # material properties\n E = 200e9\n nu = 0.3\n laminaprop = (E, E, nu)\n stack = [0]\n h = 0.001\n lam = read_stack(stack=stack, plyt=h, laminaprop=laminaprop)\n\n # creating mesh\n x = np.linspace(0, a, nx)\n y = np.linspace(0, b, ny)\n xmesh, ymesh = np.meshgrid(x, y)\n\n # node coordinates and position in the global matrix\n ncoords = np.vstack((xmesh.T.flatten(), ymesh.T.flatten())).T\n nids = 1 + np.arange(ncoords.shape[0])\n nid_pos = dict(zip(nids, np.arange(len(nids))))\n\n # identifying nodal connectivity for plate elements\n # similar than Nastran's CQUAD4\n #\n # ^ y\n # |\n #\n # 4 ________ 3\n # | |\n # | | --> x\n # | |\n # |_______|\n # 1 2\n\n\n nids_mesh = nids.reshape(nx, ny)\n n1s = nids_mesh[:-1, :-1].flatten()\n n2s = nids_mesh[1:, :-1].flatten()\n n3s = nids_mesh[1:, 1:].flatten()\n n4s = nids_mesh[:-1, 1:].flatten()\n\n num_elements = len(n1s)\n print('num_elements', num_elements)\n\n N = DOF*nx*ny\n Kr = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kc = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=INT)\n Kv = np.zeros(KC0_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n KGr = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGc = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=INT)\n KGv = np.zeros(KG_SPARSE_SIZE*num_elements, dtype=DOUBLE)\n init_k_KC0 = 0\n init_k_KG = 0\n\n plates = []\n for n1, n2, n3, n4 in zip(n1s, n2s, n3s, n4s):\n plate = BFSPlate2D()\n plate.n1 = n1\n plate.n2 = n2\n plate.n3 = n3\n plate.n4 = n4\n plate.c1 = DOF*nid_pos[n1]\n plate.c2 = DOF*nid_pos[n2]\n plate.c3 = DOF*nid_pos[n3]\n plate.c4 = DOF*nid_pos[n4]\n plate.ABD = lam.ABD\n plate.lex = a/(nx - 1)\n plate.ley = b/(ny - 1)\n plate.init_k_KC0 = init_k_KC0\n plate.init_k_KG = init_k_KG\n update_KC0(plate, points, weights, Kr, Kc, Kv)\n init_k_KC0 += KC0_SPARSE_SIZE\n init_k_KG += KG_SPARSE_SIZE\n plates.append(plate)\n\n KC0 = coo_matrix((Kv, (Kr, Kc)), shape=(N, N)).tocsc()\n\n # applying boundary conditions\n\n # locating nodes\n bk = np.zeros(KC0.shape[0], dtype=bool) # constrained DOFs, can be used to prescribe displacements\n\n x = ncoords[:, 0]\n y = ncoords[:, 1]\n\n # applying boundary conditions\n # simply supported\n check = isclose(x, 0) | isclose(x, a) | isclose(y, 0) | isclose(y, b)\n bk[2::DOF] = check\n check = isclose(x, 0) | isclose(x, a)\n bk[3::DOF] = check\n # point supports\n check = isclose(x, a/2) & (isclose(y, 0) | isclose(y, b))\n bk[0::DOF] = check\n check = isclose(y, b/2) & (isclose(x, 0) | isclose(x, a))\n bk[1::DOF] = check\n\n # unconstrained nodes\n bu = ~bk # logical_not\n\n # defining external force vector\n fext = np.zeros(KC0.shape[0], dtype=float)\n\n # applying unitary load along u at x=a\n # nodes at vertices get 1/2 the force\n for plate in plates:\n pos1 = nid_pos[plate.n1]\n pos2 = nid_pos[plate.n2]\n pos3 = nid_pos[plate.n3]\n pos4 = nid_pos[plate.n4]\n if isclose(x[pos3], a):\n Nxx = -1\n xi = +1\n elif isclose(x[pos1], 0):\n Nxx = +1\n xi = -1\n else:\n continue\n lex = plate.lex\n ley = plate.ley\n indices = []\n c1 = DOF*pos1\n c2 = DOF*pos2\n c3 = DOF*pos3\n c4 = DOF*pos4\n cs = [c1, c2, c3, c4]\n for ci in cs:\n for i in range(DOF):\n indices.append(ci + i)\n fe = np.zeros(4*DOF, dtype=float)\n for j in range(nint):\n eta = points[j]\n plate.update_Nu(xi, eta)\n Nu = np.asarray(plate.Nu)\n fe += ley/2*weights[j]*Nu*Nxx\n fext[indices] += fe\n\n Kuu = KC0[bu, :][:, bu]\n fextu = fext[bu]\n\n # static solver\n uu = spsolve(Kuu, fextu)\n u = np.zeros(KC0.shape[0], dtype=float)\n u[bu] = uu\n\n if plot_static:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n uplot = u[0::DOF].reshape(nx, ny).T\n vplot = u[1::DOF].reshape(nx, ny).T\n print('u extremes', uplot.min(), uplot.max())\n print('v extremes', vplot.min(), vplot.max())\n levels = np.linspace(uplot.min(), uplot.max(), 300)\n plt.contourf(xmesh, ymesh, uplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n # eigenvalue solver\n\n # getting integration points\n for plate in plates:\n update_KG(u, plate, points, weights, KGr, KGc, KGv)\n KG = coo_matrix((KGv, (KGr, KGc)), shape=(N, N)).tocsc()\n KGuu = KG[bu, :][:, bu]\n\n # solving modified generalized eigenvalue problem\n # Original: (KC0 + lambda*KG)*v = 0\n # Modified: (-1/lambda)*KC0*v = KG*v #NOTE here we find (-1/lambda)\n num_eigenvalues = 5\n eigvals, eigvecsu = eigsh(A=KGuu, k=num_eigenvalues, which='SM', M=Kuu,\n tol=1e-6, sigma=1., mode='cayley')\n eigvals = -1./eigvals\n eigvecs = np.zeros((KC0.shape[0], num_eigenvalues), dtype=float)\n eigvecs[bu, :] = eigvecsu\n\n if plot_lb:\n import matplotlib\n matplotlib.use('TkAgg')\n import matplotlib.pyplot as plt\n plt.gca().set_aspect('equal')\n mode = 0\n wplot = eigvecs[2::DOF, mode].reshape(nx, ny).T\n levels = np.linspace(wplot.min(), wplot.max(), 300)\n plt.contourf(xmesh, ymesh, wplot, levels=levels)\n plt.colorbar()\n plt.show()\n\n kc = eigvals[0]/(E*np.pi**2*(h/b)**2/(12*(1 - nu**2))*h)\n assert isclose(kc, 6.6, rtol=0.05)", "def check_deterministic():\n model1, loss_func, X1, y1 = setup()\n loss1 = loss_func(model1(X1), y1)\n\n model2, loss_func, X2, y2 = setup()\n loss2 = loss_func(model2(X2), y2)\n\n assert torch.allclose(loss1, loss2)\n assert torch.allclose(X1, X2)\n assert torch.allclose(y1, y2)\n assert all(\n torch.allclose(p1, p2)\n for p1, p2 in zip(model1.parameters(), model2.parameters())\n )", "def testSplineCurveInverseIsCorrect(self):\n x_knot = jnp.arange(0, 16, 0.01)\n alpha = self.variant(distribution.inv_partition_spline_curve)(x_knot)\n x_recon = self.variant(distribution.partition_spline_curve)(alpha)\n chex.assert_tree_all_close(x_recon, x_knot, atol=1e-5, rtol=1e-5)", "def test_prop33_false(self):\n dec = TwoQubitDecomposeUpToDiagonal()\n qc = QuantumCircuit(2)\n qc.u(0.1, 0.2, 0.3, 0)\n qc.u(0.4, 0.5, 0.6, 1)\n qc.cx(0, 1)\n qc.u(0.1, 0.2, 0.3, 0)\n qc.u(0.4, 0.5, 0.6, 1)\n qc.cx(0, 1)\n qc.u(0.5, 0.2, 0.3, 0)\n qc.u(0.2, 0.4, 0.1, 1)\n qc.cx(1, 0)\n qc.u(0.1, 0.2, 0.3, 0)\n qc.u(0.4, 0.5, 0.6, 1)\n mat = Operator(qc).data\n self.assertFalse(dec._cx2_test(mat))", "def test_coherency():\r\n\r\n for method in methods:\r\n f, c = tsa.coherency(tseries, csd_method=method)\r\n\r\n npt.assert_array_almost_equal(c[0, 1], c[1, 0].conjugate())\r\n npt.assert_array_almost_equal(c[0, 0], np.ones(f.shape))\r\n\r\n if method is not None and method['this_method'] != \"multi_taper_csd\":\r\n f_theoretical = utils.get_freqs(method['Fs'], method['NFFT'])\r\n npt.assert_array_almost_equal(f, f_theoretical)\r\n npt.assert_array_almost_equal(f, f_theoretical)", "def test_for_discontinuity(a_n,b_n,c_n,d_n,x_n,x_n_plus_1,y_n_plus_1):\n\ty_n_final = a_n + b_n*(x_n_plus_1-x_n) + c_n*(x_n_plus_1-x_n)**2 + d_n*(x_n_plus_1-x_n)**3\n\tresult = abs(y_n_final-y_n_plus_1)<0.001\n\treturn(result)", "def check_cost_operator(C, obj_f, offset=0):\n m_diag = cost_operator_to_vec(C, offset=offset)\n m_diag = np.real(get_adjusted_state(m_diag))\n for k, v in state_to_ampl_counts(m_diag, eps=-1).items():\n x = np.array([int(_k) for _k in k])\n assert(np.isclose(obj_f(x), v))" ]
[ "0.6670978", "0.6513759", "0.6198429", "0.6138979", "0.61240935", "0.61158717", "0.60906714", "0.6050059", "0.6006302", "0.59238374", "0.5918185", "0.59094197", "0.58958864", "0.5882234", "0.5876733", "0.5855049", "0.579847", "0.579847", "0.577797", "0.57669806", "0.5764593", "0.574557", "0.5733494", "0.573222", "0.5716096", "0.5713724", "0.56938845", "0.56743795", "0.5668846", "0.5661076", "0.56454754", "0.56402826", "0.5640162", "0.5626067", "0.5622496", "0.5615207", "0.5612343", "0.56093764", "0.56055504", "0.55822986", "0.55801165", "0.5576409", "0.5572955", "0.55607754", "0.55598813", "0.5558613", "0.55538255", "0.5545366", "0.55405515", "0.5517967", "0.5507625", "0.550523", "0.55012965", "0.549728", "0.54957855", "0.54765064", "0.5473467", "0.54693705", "0.54693705", "0.54459435", "0.54379267", "0.5425018", "0.54117906", "0.54117906", "0.5406689", "0.5398506", "0.53970706", "0.53925127", "0.5389935", "0.53850985", "0.5380305", "0.5375869", "0.5371727", "0.5371727", "0.5364517", "0.5353883", "0.5350692", "0.5337976", "0.5334573", "0.5333696", "0.53332883", "0.5324223", "0.5320968", "0.53177", "0.5317299", "0.53153193", "0.5304492", "0.5304135", "0.52983826", "0.52937174", "0.52724135", "0.5267454", "0.52647245", "0.5263724", "0.5263318", "0.52583426", "0.52578807", "0.5254217", "0.525244", "0.5251494" ]
0.68860763
0
Converts a path to a string representation for inclusion in an SVG file as
def path_to_string(path: Path) -> str: assert_continuous(path) pieces = ["M {} {}".format(path[0].p0[0], path[0].p0[1])] for curve in iter(path): # iter cast not strictly necessary piece = "C {} {} {} {} {} {}".format( int(round(curve.c0[0])), int(round(curve.c0[1])), int(round(curve.c1[0])), int(round(curve.c1[1])), int(round(curve.p1[0])), int(round(curve.p1[1])) ) pieces.append(piece) return " ".join(pieces)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def as_string(path: pathlib.Path) -> str:\n return path.as_posix()", "def path_to_str(path):\n if hasattr(path, '__fspath__'):\n path = as_str_any(path.__fspath__())\n return path", "def save_svg(string, file_name):\n file_handle = file(file_name, \"w\")\n file_handle.write(string)\n file_handle.close()", "def _path_to_string(path):\n return '.'.join(path)", "def getSVGpath(filePath):\n openfile = open(filePath, 'r')\n textFile = openfile.read()\n openfile.close()\n textSel = re.search('<path[^/>]*..', textFile).group()\n textPathPos = re.search('d=\"[^\"]*', textSel).group()\n tokens = re.split('[\\s,\"]', textPathPos)\n return tokens", "def path_str(path):\n\toutput = \"PATH: \"\n\tif path:\n\t\tfor i in path:\n\t\t\toutput += str(i.data) + \" -> \"\n\telse:\n\t\toutput += \"Empty\"\n\treturn output", "def save_as_svg(file_name, path = DEFAULT_PATH):\n plt.ioff()\n plt.savefig(path + file_name + '.svg')\n plt.close()", "def dvi_to_svg(dvi_file: str) -> str:\n file_type = get_tex_config()[\"intermediate_filetype\"]\n result = dvi_file.replace(\".\" + file_type, \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n \"\\\"{}\\\"\".format(dvi_file),\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n \"\\\"{}\\\"\".format(result),\n \">\",\n os.devnull\n ]\n os.system(\" \".join(commands))\n return result", "def construct_svg_path(path, transform=None):\n if transform is None:\n transform = IdentityTransform()\n\n steps = []\n for vert, code in path.iter_segments(simplify=False):\n vert = transform.transform(vert.reshape(-1, 2)).ravel()\n step = PATH_DICT[code]\n if step != 'Z':\n step += ' '.join(map(str, vert))\n steps.append(step)\n\n return ' '.join(steps)", "def get_svgout(self):\n return tempfile.mktemp(dir=self.tmpdir, suffix='.svg')", "def path_filename_representation(path):\n # Strip leading / and replace / with .\n return re.sub(r\"^/(.*)$\", r\"\\1\", path).replace(\"/\", \".\")", "def segments_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n segs = [int(v) for v in self.segments.split(',')]\n data = []\n for i in xrange(0, len(segs), 2):\n v0 = 2 * segs[i]\n v1 = 2 * segs[i + 1]\n data.append(u\"M%s,%sL%s,%s\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n ))\n return u\"\".join(data)", "def dvi_to_svg(dvi_file, regen_if_exists=False):\n result = dvi_file.replace(\".dvi\", \".svg\")\n if not os.path.exists(result):\n commands = [\n \"dvisvgm\",\n dvi_file,\n \"-n\",\n \"-v\",\n \"0\",\n \"-o\",\n result,\n \">\",\n get_null()\n ]\n os.system(\" \".join(commands))\n return result", "def convertString(path):\n if (\"win\" in sys.platform):\n return path.replace(\"/\",\"\\\\\")\n elif (\"linux\" in sys.platform):\n return path.replace(\"\\\\\",\"/\")", "def stringyfy(path):\n try:\n # Pathlib support\n path = path.__fspath__()\n except AttributeError:\n pass\n if hasattr(path, 'name'): # passed in a file\n path = path.name\n if isinstance(path, str):\n return path\n raise ValueError(f'Cannot convert {path} to a path')", "def get_filename(checksum):\n return '%s.svg' % checksum", "def processed_json_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('.json', '.bin')", "def processed_texture_path(path):\n return path.replace(RAW_ASSETS_PATH, ASSETS_PATH).replace('png', 'webp')", "def format_path(path):\n if len(path) > 1:\n result = [crayons.yellow(path[0].name)]\n\n previous = path[0]\n for item in path[1:]:\n result.append(' -> ')\n result.append(crayons.yellow(item.name))\n result.append(': Line ')\n result.append(crayons.cyan(str(item.is_imported_from[previous.full_path][0])))\n previous = item\n result.append(' =>> ')\n\n result.append(crayons.magenta(path[0].name))\n return ''.join(str(x) for x in result)\n else:\n return ''", "def convert_path(path: str, path_type: PathType = PathType.AUTO) -> str:\r\n path_template = get_template_from_path(path)\r\n path = get_path_from_template(path_template, path_type)\r\n return path", "def dump_graph(self) -> str:\n graph_dot_file = f'{self._name}.dot'\n graph_diagram_file = f'{self._name}.svg'\n write_dot(self._graph, graph_dot_file)\n subprocess.check_output(\n shlex.split(f'dot -Tsvg {graph_dot_file} -o {graph_diagram_file}')\n )\n return graph_diagram_file", "def ps2svg(sFile, method=\"default\"):\n\n sBack = \"\"\n oErr = ErrHandle()\n try:\n # Read the file\n sText = \"\"\n with open(sFile, \"r\") as f:\n sText = f.read()\n if method == \"default\":\n sBack = ps2svg_string(sText)\n elif method == \"simple\":\n sBack = ps2svg_simple(sText)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack", "def Sourceify(path):\n return path", "def getSvgHtml(svgFile, width, height):\n html = '<object type=\"image/svg+xml\" data=\"%s\" width=\"%s\" height=\"%s\"/>'\n return html % (svgFile, width, height)", "def export_as_svg(self):\n from ExportCommand import ExportCommand\n\n options = QFileDialog.Options()\n options |= QFileDialog.DontUseNativeDialog\n file_path, _ = QFileDialog.getSaveFileName(self, \"Export as svg\", os.getcwd(), \"svg file(*.svg)\",\n options=options)\n if file_path:\n cmd = ExportCommand(self.graphicsView.scene(), 'svg')\n cmd.display_message.connect(self.onAddMessage)\n if cmd.execute(file_path):\n QMessageBox.information(self, self.tr('Information'), self.tr('Successfully export to svg file'))\n else:\n QMessageBox.information(self, self.tr('Error'), self.tr('Fail to export to svg file'))", "def resource_string(self, path):\n\t\tdata = pkg_resources.resource_string(__name__, path)\n\t\treturn data.decode(\"utf8\")", "def openSVG(path):\n from xml.dom import minidom\n doc = minidom.parse(open(path))\n svg = doc.getElementsByTagName(\"svg\")[0]\n sizeMatch = re.match(r\"(\\d+) (\\d+) (\\d+) (\\d+)\", svg.getAttribute(\"viewBox\"))\n w, h = int(sizeMatch.group(3)), int(sizeMatch.group(4))\n return svg, w, h", "def jsonpath_to_xpath(path):\n return '/' + path.replace('.', \"/\")", "def format_path(path):\n if not path:\n return path\n\n path = re.sub(r'/+', '/', path)\n\n if path == '/':\n return (u\"\" if isinstance(path, unicode) else \"\")\n else:\n return '/' + path.strip('/')", "def _path_to_str(var):\n if not isinstance(var, (Path, str)):\n raise ValueError(\"All path parameters must be either strings or \"\n \"pathlib.Path objects. Found type %s.\" % type(var))\n else:\n return str(var)", "def resource_string(path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def as_xml_path(scenario_path: pathlib.Path) -> pathlib.Path:\n return scenario_path.parent / (scenario_path.stem + \".xml\")", "def gen_symbols(path, strip):\n\n symbols = ''\n svg_namespace = 'http://www.w3.org/2000/svg'\n etree.register_namespace('', svg_namespace)\n\n for root, dirs, files in os.walk(os.path.abspath(path)):\n for wwsfile in files:\n basename, extension = os.path.splitext(wwsfile)\n if extension == '.svg':\n filepath = os.path.join(root, wwsfile)\n try:\n svg = etree.parse(filepath)\n svg_root = svg.getroot()\n\n attribs = svg_root.attrib\n desc = svg.find('{'+svg_namespace+'}desc')\n svg_root.remove(desc)\n title = svg.find('{'+svg_namespace+'}title')\n svg_root.remove(title)\n metadata = svg.find('{'+svg_namespace+'}metadata')\n svg_root.remove(metadata)\n\n viewbox_attrib = 'viewBox'\n if viewbox_attrib in attribs:\n viewbox = attribs[viewbox_attrib]\n else:\n viewbox = f\"0 0 {attribs['width']} {attribs['height']}\"\n\n basename2 = basename.replace(strip, '')\n symbols += f'<symbol id=\"{basename2}\" viewBox=\"{viewbox}\">'\n\n for element in svg_root:\n symbols += etree.tostring(element).decode('utf-8')\n symbols += '</symbol>'\n\n except Exception as err:\n warnings.warn(f'Could not parse file {filepath}: {err}')\n\n return symbols", "def frame_string(path):\n filename = os.path.split(path)[1]\n return os.path.splitext(filename)[0]", "def get_template_from_path(path: str) -> str:\r\n path = path.replace(\"\\\\\", \"/\")\r\n return path", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def resource_string(self, path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def _repr_svg_(self):\n pass", "def svg2paths(svg_file_location,\n return_svg_attributes=False,\n convert_circles_to_paths=True,\n convert_ellipses_to_paths=True,\n convert_lines_to_paths=True,\n convert_polylines_to_paths=True,\n convert_polygons_to_paths=True,\n convert_rectangles_to_paths=True):\n if os_path.dirname(svg_file_location) == '':\n svg_file_location = os_path.join(getcwd(), svg_file_location)\n\n doc = parse(svg_file_location)\n\n def dom2dict(element):\n \"\"\"Converts DOM elements to dictionaries of attributes.\"\"\"\n keys = list(element.attributes.keys())\n values = [val.value for val in list(element.attributes.values())]\n return dict(list(zip(keys, values)))\n\n def parse_trafo(trafo_str):\n \"\"\"Returns six matrix elements for a matrix transformation for any \n valid SVG transformation string.\"\"\"\n trafos = trafo_str.split(')')[:-1]\n trafo_matrix = np.array([1., 0., 0., 0., 1., 0., 0., 0., 1.]).reshape(\n (3, 3)) # Start with neutral matrix\n\n for trafo_sub_str in trafos:\n trafo_sub_str = trafo_sub_str.lstrip(', ')\n value_str = trafo_sub_str.split('(')[1]\n values = list(map(float, value_str.split(',')))\n if 'translate' in trafo_sub_str:\n x = values[0]\n y = values[1] if (len(values) > 1) else 0.\n trafo_matrix = np.dot(trafo_matrix, np.array(\n [1., 0., x, 0., 1., y, 0., 0., 1.]).reshape((3, 3)))\n elif 'scale' in trafo_sub_str:\n x = values[0]\n y = values[1] if (len(values) > 1) else 0.\n trafo_matrix = np.dot(trafo_matrix,\n np.array([x, 0., 0., 0., y, 0., 0., 0.,\n 1.]).reshape((3, 3)))\n elif 'rotate' in trafo_sub_str:\n a = values[0] * np.pi / 180.\n x = values[1] if (len(values) > 1) else 0.\n y = values[2] if (len(values) > 2) else 0.\n am = np.dot(np.array(\n [np.cos(a), -np.sin(a), 0., np.sin(a), np.cos(a), 0., 0.,\n 0., 1.]).reshape((3, 3)),\n np.array(\n [1., 0., -x, 0., 1., -y, 0., 0., 1.]).reshape(\n (3, 3)))\n am = np.dot(\n np.array([1., 0., x, 0., 1., y, 0., 0., 1.]).reshape(\n (3, 3)), am)\n trafo_matrix = np.dot(trafo_matrix, am)\n elif 'skewX' in trafo_sub_str:\n a = values[0] * np.pi / 180.\n trafo_matrix = np.dot(trafo_matrix,\n np.array(\n [1., np.tan(a), 0., 0., 1., 0., 0.,\n 0., 1.]).reshape((3, 3)))\n elif 'skewY' in trafo_sub_str:\n a = values[0] * np.pi / 180.\n trafo_matrix = np.dot(trafo_matrix,\n np.array(\n [1., 0., 0., np.tan(a), 1., 0., 0.,\n 0., 1.]).reshape((3, 3)))\n else: # Assume matrix transformation\n while len(values) < 6:\n values += [0.]\n trafo_matrix = np.dot(trafo_matrix,\n np.array([values[::2], values[1::2],\n [0., 0., 1.]]))\n\n trafo_list = list(trafo_matrix.reshape((9,))[:6])\n return trafo_list[::3] + trafo_list[1::3] + trafo_list[2::3]\n\n def parse_node(node):\n \"\"\"Recursively iterate over nodes. Parse the groups individually to \n apply group transformations.\"\"\"\n # Get everything in this tag\n data = [parse_node(child) for child in node.childNodes]\n if len(data) == 0:\n ret_list = []\n attribute_dictionary_list_int = []\n else:\n # Flatten the lists\n ret_list = []\n attribute_dictionary_list_int = []\n for item in data:\n if type(item) == tuple:\n if len(item[0]) > 0:\n ret_list += item[0]\n attribute_dictionary_list_int += item[1]\n\n if node.nodeName == 'g':\n # Group found\n # Analyse group properties\n group = dom2dict(node)\n if 'transform' in group.keys():\n trafo = group['transform']\n\n # Convert all transformations into a matrix operation\n am = parse_trafo(trafo)\n am = np.array([am[::2], am[1::2], [0., 0., 1.]])\n\n # Apply transformation to all elements of the paths\n def xy(p):\n return np.array([p.real, p.imag, 1.])\n\n def z(coords):\n return coords[0] + 1j * coords[1]\n\n ret_list = [Path(*[bpoints2bezier([z(np.dot(am, xy(pt)))\n for pt in seg.bpoints()])\n for seg in path])\n for path in ret_list]\n return ret_list, attribute_dictionary_list_int\n elif node.nodeName == 'path':\n # Path found; parsing it\n path = dom2dict(node)\n d_string = path['d']\n return [parse_path(d_string)] + ret_list, [\n path] + attribute_dictionary_list_int\n elif convert_polylines_to_paths and node.nodeName == 'polyline':\n attrs = dom2dict(node)\n path = parse_path(polyline2pathd(node['points']))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n elif convert_polygons_to_paths and node.nodeName == 'polygon':\n attrs = dom2dict(node)\n path = parse_path(polygon2pathd(attrs['points']))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n elif convert_lines_to_paths and node.nodeName == 'line':\n line = dom2dict(node)\n d_string = ('M' + line['x1'] + ' ' + line['y1'] +\n 'L' + line['x2'] + ' ' + line['y2'])\n path = parse_path(d_string)\n return [path] + ret_list, [line] + attribute_dictionary_list_int\n elif convert_ellipses_to_paths and node.nodeName == 'ellipse':\n attrs = dom2dict(node)\n path = parse_path(ellipse2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n\t\telif convert_circles_to_paths and node.nodeName == 'circle':\n\t\t\tattrs = dom2dict(node)\n path = parse_path(ellipse2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n\t\telif convert_rectangles_to_paths and node.nodeName == 'rect':\n attrs = dom2dict(node)\n path = parse_path(rect2pathd(attrs))\n return [path] + ret_list, [attrs] + attribute_dictionary_list_int\n else:\n return ret_list, attribute_dictionary_list_int", "def completePath(path):\n return os.getcwd() + convertString(path)", "def create_svg(self, name_dict):\n s = StringIO.StringIO()\n for svg_line in open(self.options.input_file, 'r').readlines():\n # Modify the line to handle replacements from extension GUI\n svg_line = self.expand_extra_vars(svg_line, name_dict)\n # Modify the line to handle variables in svg file\n svg_line = self.expand_vars(svg_line, name_dict)\n s.write(svg_line)\n # Modify the svg to include or exclude groups\n root = etree.fromstring(s.getvalue())\n self.filter_layers(root, name_dict)\n svgout = self.get_svgout()\n try:\n f = open(svgout, 'w')\n f.write(etree.tostring(root,\n encoding='utf-8',\n xml_declaration=True))\n except IOError:\n errormsg(_('Cannot open \"' + svgout + '\" for writing'))\n finally:\n f.close()\n s.close()\n return svgout", "def get_full_res_path(path):\n path = re.sub(r'fill=((\\d)+x(\\d)+)\\/', '', path)\n return re.sub(r'fit=((\\d+)x(\\d+))?\\/', 'fit=100000x100000/', path)", "def format_path (in_path):\n return os.path.realpath(os.path.expanduser(in_path))", "def path_converter(text_path):\n\n out = ''\n out_list = []\n\n for i, t in enumerate(text_path):\n \n if t == ' ':\n out_list.append('\\\\ ')\n elif t == \"(\" :\n out_list.append('\\(')\n elif t == \")\" :\n out_list.append('\\)') \n else:\n out_list.append(t)\n \n return out.join(out_list)", "def get_icon_path(filepath: str, ico_name: str):\n ui_path = Path(filepath).parent\n icon_path = ui_path.joinpath(\"ico\", ico_name)\n return str(icon_path)", "def format_path(file: str) -> str:\n return os.path.abspath([file.replace('/', os.path.sep)][0])", "def output_svg(self, string_to_output):\n self._output_object.add_report(string_to_output)", "def triangles_svg_path(self):\n verts = self.vertices.split(',') # leave as string\n tris = [int(v) for v in self.triangles.split(',')]\n data = []\n for i in xrange(0, len(tris), 3):\n v0 = 2 * tris[i]\n v1 = 2 * tris[i + 1]\n v2 = 2 * tris[i + 2]\n data.append(u\"M%s,%sL%s,%sL%s,%sz\" % (\n verts[v0], verts[v0 + 1],\n verts[v1], verts[v1 + 1],\n verts[v2], verts[v2 + 1],\n ))\n return u\"\".join(data)", "def path_to_string(path, separator):\n i = 0\n path_string = \"Path :\" + separator + \"[\"\n while i < len(path):\n if isinstance(path[i], Firewall.Firewall):\n path_string += path[i].hostname\n elif isinstance(path[i], Ip.Ip):\n path_string += path[i].to_string()\n\n if i < len(path) - 1:\n path_string += \",\" + separator\n i += 1\n path_string += \"]\"\n\n return path_string", "def getpath(self, path):\n return self._join(path)", "def paster_in_svg(self, src, elem):\n loger.info(\"start svg pasting\")\n with open(src) as f:\n tree = etree.parse(f)\n root = tree.getroot()\n element = tree.xpath('image')\n\n if element:\n # Replaces <gco_CharacterString> text\n for key, value in element[0].attrib.iteritems():\n if value == 'avatar':\n # element[0].attrib[key] = os.path.abspath(elem)\n element[0].attrib[key] = \"/home/kryvonis/PycharmProjects/Book_Creator/image_end/1.png\"\n # Save back to the XML file\n etree.ElementTree(root).write(src, pretty_print=True)\n loger.info('svg created - OK')", "def svg2png (fName, width=600, app=None, oFilename=\"\"):\n from PyQt5.QtSvg import QSvgRenderer\n from PyQt5.QtGui import QImage, QPainter, QColor, QGuiApplication\n from math import sqrt\n\n if not app:\n app=QGuiApplication([])\n svg, w, h = openSVG(fName)\n groups = svg.getElementsByTagName(\"g\")\n scale = width/w\n for g in groups:\n if \"stroke-width\" in g.attributes:\n g.setAttribute(\"stroke-width\", str(float(g.getAttribute(\"stroke-width\"))/sqrt(scale)))\n qsr=QSvgRenderer(svg.toxml().encode(\"utf-8\"))\n img=QImage(int(w*scale), int(h*scale), QImage.Format_ARGB32)\n img.fill(QColor(\"white\"))\n p=QPainter(img)\n qsr.render(p)\n p.end()\n if not oFilename:\n oFilename = re.sub(r\"\\.svg$\", f\"-{width}px.png\", fName)\n img.save(oFilename)\n return oFilename", "def __merger_svg(self):\n pass", "def load_svg(file_path):\n assert os.path.exists(file_path)\n doc = parse(file_path)\n\n svg = doc.getElementsByTagName('svg')[0]\n svg_attributes = dom2dict(svg)\n\n defs = g = ''\n for i, tag in enumerate(svg.childNodes):\n if tag.localName == 'defs':\n defs = tag.toxml()\n if tag.localName == 'g':\n g = tag.toxml()\n\n doc.unlink()\n\n return defs, g, svg_attributes", "def _convert_all_svg_to_pdf(path):\n\n svg_files = get_files(path, u\"svg\", full_path=True)\n for svg_file in svg_files:\n pdf_file = f\"{svg_file.rsplit(u'.', 1)[0]}.pdf\"\n logging.info(f\"Converting {svg_file} to {pdf_file}\")\n execute_command(\n f\"inkscape -D -z --file={svg_file} --export-pdf={pdf_file}\"\n )", "def get_aug_path(file_path: str) -> str:\n return \"/files%s\" % file_path", "def resource_string(path):\n data = pkg_resources.resource_string(__name__, path)\n return data.decode(\"utf8\")", "def get_icon_path(name):\r\n # get paths\r\n paths = os.environ.get(\"XBMLANGPATH\")\r\n\r\n # validate paths\r\n if not paths:\r\n return\r\n\r\n # loop paths\r\n for path in paths.split(os.pathsep):\r\n icon_path = os.path.join(path, name)\r\n if os.path.exists(icon_path):\r\n return icon_path.replace(\"\\\\\", \"/\")", "def load_icons() -> str:\n return _read_text('icons-svg-inline.html')", "def render_svg(svg):\n b64 = base64.b64encode(svg.encode('utf-8')).decode(\"utf-8\")\n html = r'<img src=\"data:image/svg+xml;base64,%s\"/>' % b64\n st.write(html, unsafe_allow_html=True)", "def save_plot(p, file_name, path='../static/images/'):\n p.output_backend = \"svg\"\n export_svgs(p, filename=path + file_name + '.svg')", "def path2str(a,b,path):\n (s1,s2) = zip(*path)\n line1 = \" \"+\"\".join([get_char(s1,a,x) for x in range(1,len(s1))])\n line2 = \" \"+\"\".join([get_char(s2,b,x) for x in range(1,len(s2))])\n return \"%s\\n%s\"%(line1,line2)", "def format_path(path_string, selection):\n return path_string.format(selection, selection.namespace())", "def path(self, toNative=True):\n return self.text(toNative=toNative)", "def _repr_svg_(self):\n try:\n return self.mol._repr_svg_()\n except AttributeError:\n return None", "def _purepath_to_str(\n self, path: Union[Path, PurePath, str]\n ) -> Union[Path, PurePath, str]:\n if isinstance(path, PurePath):\n path = str(path)\n return path", "def abspath(path: str) -> str:\n pass", "def to_svg(self, outfile, scaling, precision, attributes):\n outfile.write('<g id=\"')\n outfile.write(self.name.replace(\"#\", \"_\"))\n outfile.write('\" ')\n outfile.write(attributes)\n outfile.write(\">\\n\")\n for polygon in self.polygons:\n polygon.to_svg(outfile, scaling, precision)\n for path in self.paths:\n path.to_svg(outfile, scaling, precision)\n for label in self.labels:\n label.to_svg(outfile, scaling, precision)\n for reference in self.references:\n reference.to_svg(outfile, scaling, precision)\n outfile.write(\"</g>\\n\")", "def convert(filename,\nRenderer: \"\"\"By default, the schematic is converted to an SVG file,\n written to the standard output. It may also be rendered using TK.\"\"\",\n):\n \n with open(filename, \"rb\") as file:\n objects = read(file)\n stat = os.stat(file.fileno())\n \n sheet = objects[1]\n assert sheet[\"RECORD\"] == Record.SHEET\n (sheetstyle, size) = {SheetStyle.A4: (\"A4\", (1150, 760)), SheetStyle.A3: (\"A3\", (1550, 1150)), SheetStyle.A: (\"A\", (950, 760))}[sheet.get(\"SHEETSTYLE\", SheetStyle.A4)]\n if \"USECUSTOMSHEET\" in sheet:\n size = tuple(int(sheet[\"CUSTOM\" + \"XY\"[x]]) for x in range(2))\n \n # Units are 1/100\" or 10 mils\n renderer = Renderer(size, \"in\", 1/100,\n margin=0.3, line=1, down=-1, textbottom=True)\n \n for n in range(int(sheet[\"FONTIDCOUNT\"])):\n n = format(1 + n)\n fontsize = int(sheet[\"SIZE\" + n]) * 0.875\n family = sheet[\"FONTNAME\" + n].decode(\"ascii\")\n kw = dict()\n italic = sheet.get(\"ITALIC\" + n)\n if italic:\n kw.update(italic=True)\n bold = sheet.get(\"BOLD\" + n)\n if bold:\n kw.update(bold=True)\n renderer.addfont(\"font\" + n, fontsize, family, **kw)\n renderer.setdefaultfont(\"font\" + sheet[\"SYSTEMFONT\"].decode(\"ascii\"))\n renderer.start()\n \n arrowhead = dict(base=5, shoulder=7, radius=3)\n arrowtail = dict(base=7, shoulder=0, radius=2.5)\n diamond = dict(base=10, shoulder=5, radius=2.5)\n \n pinmarkers = {\n PinElectrical.INPUT: arrowhead,\n PinElectrical.IO: diamond,\n PinElectrical.OUTPUT: arrowtail,\n PinElectrical.PASSIVE: None,\n PinElectrical.POWER: None,\n }\n \n def gnd(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n renderer.vline(-4, +4, offset=(13, 0), width=1.5)\n renderer.vline(-1, +1, offset=(16, 0), width=1.5)\n def rail(renderer):\n renderer.hline(10)\n renderer.vline(-7, +7, offset=(10, 0), width=1.5)\n def arrowconn(renderer):\n renderer.hline(10, endarrow=arrowhead)\n def dchevron(renderer):\n renderer.hline(5)\n renderer.polyline(((8, +4), (5, 0), (8, -4)))\n renderer.polyline(((11, +4), (8, 0), (11, -4)))\n connmarkers = {\n PowerObjectStyle.ARROW: (arrowconn, 12),\n PowerObjectStyle.BAR: (rail, 12),\n PowerObjectStyle.GND: (gnd, 20),\n }\n \n def nc(renderer):\n renderer.line((+3, +3), (-3, -3), width=0.6)\n renderer.line((-3, +3), (+3, -3), width=0.6)\n renderer.addobjects((gnd, rail, arrowconn, dchevron, nc))\n \n with renderer.view(offset=(0, size[1])) as base:\n base.rectangle((size[0], -size[1]), width=0.6)\n base.rectangle((20, -20), (size[0] - 20, 20 - size[1]), width=0.6)\n for axis in range(2):\n for side in range(2):\n for n in range(4):\n translate = [None] * 2\n translate[axis] = size[axis] / 4 * (n + 0.5)\n translate[axis ^ 1] = 10\n if side:\n translate[axis ^ 1] += size[axis ^ 1] - 20\n translate[1] *= -1\n with base.view(offset=translate) as ref:\n label = chr(ord(\"1A\"[axis]) + n)\n ref.text(label, horiz=ref.CENTRE, vert=ref.CENTRE)\n if n + 1 < 4:\n x = size[axis] / 4 / 2\n if axis:\n ref.hline(-10, +10, offset=(0, -x),\n width=0.6)\n else:\n ref.vline(-10, +10, offset=(x, 0), width=0.6)\n \n if \"TITLEBLOCKON\" in sheet:\n if not os.path.isabs(filename):\n cwd = os.getcwd()\n pwd = os.getenv(\"PWD\")\n if os.path.samefile(pwd, cwd):\n cwd = pwd\n filename = os.path.join(pwd, filename)\n with base.view(offset=(size[0] - 20, 20 - size[1])) as block:\n points = ((-350, 0), (-350, 80), (-0, 80))\n block.polyline(points, width=0.6)\n block.hline(-350, 0, offset=(0, 50), width=0.6)\n block.vline(-30, offset=(-300, 50), width=0.6)\n block.vline(-30, offset=(-100, 50), width=0.6)\n block.hline(-350, 0, offset=(0, 20), width=0.6)\n block.hline(-350, 0, offset=(0, 10), width=0.6)\n block.vline(20, 0, offset=(-150, 0), width=0.6)\n \n block.text(\"Title\", (-345, 70))\n block.text(\"Size\", (-345, 40))\n block.text(sheetstyle, (-340, 30), vert=block.CENTRE)\n block.text(\"Number\", (-295, 40))\n block.text(\"Revision\", (-95, 40))\n block.text(\"Date\", (-345, 10))\n d = format(date.fromtimestamp(stat.st_mtime), \"%x\")\n block.text(d, (-300, 10))\n block.text(\"File\", (-345, 0))\n block.text(filename, (-300, 0))\n block.text(\"Sheet\", (-145, 10))\n block.text(\"of\", (-117, 10))\n block.text(\"Drawn By:\", (-145, 0))\n \n for obj in objects:\n if (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\"} and\n obj[\"RECORD\"] == Record.JUNCTION and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n col = colour(obj[\"COLOR\"])\n renderer.circle(2, location, fill=col)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"IOTYPE\", \"ALIGNMENT\"} == {\"RECORD\", \"OWNERPARTID\", \"STYLE\", \"WIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"AREACOLOR\", \"TEXTCOLOR\", \"NAME\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.PORT and obj[\"OWNERPARTID\"] == b\"-1\"):\n width = int(obj[\"WIDTH\"])\n if \"IOTYPE\" in obj:\n points = ((0, 0), (5, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (5, +5))\n else:\n points = ((0, -5), (width - 5, -5),\n (width, 0), (width - 5, +5), (0, +5))\n if (obj.get(\"ALIGNMENT\") == b\"2\") ^ (obj[\"STYLE\"] != b\"7\"):\n labelpoint = (10, 0)\n horiz = renderer.LEFT\n else:\n labelpoint = (width - 10, 0)\n horiz = renderer.RIGHT\n if obj[\"STYLE\"] == b\"7\":\n shapekw = dict(rotate=+90, offset=(0, +width))\n else:\n shapekw = dict()\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(offset=offset) as view:\n view.polygon(points,\n width=0.6,\n outline=colour(obj[\"COLOR\"]),\n fill=colour(obj[\"AREACOLOR\"]),\n **shapekw)\n \n with contextlib.ExitStack() as context:\n if obj[\"STYLE\"] == b\"7\":\n view = context.enter_context(view.view(rotate=+1))\n view.text(\n overline(obj[\"NAME\"]),\n colour=colour(obj[\"TEXTCOLOR\"]),\n offset=labelpoint,\n vert=view.CENTRE, horiz=horiz,\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} >= {\"RECORD\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\"} and\n obj[\"RECORD\"] == Record.WIRE and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"LINEWIDTH\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polyline(points, colour=colour(obj[\"COLOR\"]))\n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] in {b\"46\", b\"48\", b\"44\"} or\n obj.keys() - {\"USECOMPONENTLIBRARY\", \"DESCRIPTION\", \"DATAFILECOUNT\", \"MODELDATAFILEENTITY0\", \"MODELDATAFILEKIND0\", \"DATALINKSLOCKED\", \"DATABASEDATALINKSLOCKED\", \"ISCURRENT\", \"INDEXINSHEET\", \"INTEGRATEDMODEL\", \"DATABASEMODEL\"} == {\"RECORD\", \"OWNERINDEX\", \"MODELNAME\", \"MODELTYPE\"} and\n obj[\"RECORD\"] == b\"45\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj.get(\"USECOMPONENTLIBRARY\", b\"T\") == b\"T\" and obj[\"MODELTYPE\"] in {b\"PCBLIB\", b\"SI\", b\"SIM\", b\"PCB3DLib\"} and obj.get(\"DATAFILECOUNT\", b\"1\") == b\"1\" and obj.get(\"ISCURRENT\", b\"T\") == b\"T\" and obj.get(\"INTEGRATEDMODEL\", b\"T\") == b\"T\" and obj.get(\"DATABASEMODEL\", b\"T\") == b\"T\" and obj.get(\"DATALINKSLOCKED\", b\"T\") == b\"T\" and obj.get(\"DATABASEDATALINKSLOCKED\", b\"T\") == b\"T\" or\n obj.keys() >= {\"RECORD\", \"AREACOLOR\", \"BORDERON\", \"CUSTOMX\", \"CUSTOMY\", \"DISPLAY_UNIT\", \"FONTIDCOUNT\", \"FONTNAME1\", \"HOTSPOTGRIDON\", \"HOTSPOTGRIDSIZE\", \"ISBOC\", \"SHEETNUMBERSPACESIZE\", \"SIZE1\", \"SNAPGRIDON\", \"SNAPGRIDSIZE\", \"SYSTEMFONT\", \"USEMBCS\", \"VISIBLEGRIDON\", \"VISIBLEGRIDSIZE\"} and\n obj[\"RECORD\"] == Record.SHEET and obj[\"AREACOLOR\"] == b\"16317695\" and obj[\"BORDERON\"] == b\"T\" and obj.get(\"CUSTOMMARGINWIDTH\", b\"20\") == b\"20\" and obj.get(\"CUSTOMXZONES\", b\"6\") == b\"6\" and obj.get(\"CUSTOMYZONES\", b\"4\") == b\"4\" and obj[\"DISPLAY_UNIT\"] == b\"4\" and obj[\"FONTNAME1\"] == b\"Times New Roman\" and obj[\"HOTSPOTGRIDON\"] == b\"T\" and obj[\"ISBOC\"] == b\"T\" and obj[\"SHEETNUMBERSPACESIZE\"] == b\"4\" and obj[\"SIZE1\"] == b\"10\" and obj[\"SNAPGRIDON\"] == b\"T\" and obj[\"SYSTEMFONT\"] == b\"1\" and obj.get(\"TITLEBLOCKON\", b\"T\") == b\"T\" and obj[\"USEMBCS\"] == b\"T\" and obj[\"VISIBLEGRIDON\"] == b\"T\" and obj[\"VISIBLEGRIDSIZE\"] == b\"10\" or\n obj.keys() == {\"HEADER\", \"WEIGHT\"} and\n obj[\"HEADER\"] == b\"Protel for Windows - Schematic Capture Binary File Version 5.0\" or\n obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"DESIMP0\", \"DESIMPCOUNT\", \"DESINTF\", \"OWNERINDEX\"} and\n obj[\"RECORD\"] == b\"47\" and obj[\"DESIMPCOUNT\"] == b\"1\" or\n obj.keys() == {\"RECORD\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"FILENAME\"} and\n obj[\"RECORD\"] == b\"39\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n pass\n \n elif (obj.keys() - {\"ISMIRRORED\", \"ORIENTATION\", \"INDEXINSHEET\", \"COMPONENTDESCRIPTION\", \"SHEETPARTFILENAME\", \"DESIGNITEMID\", \"DISPLAYMODE\", \"NOTUSEDBTABLENAME\", \"LIBRARYPATH\"} == {\"RECORD\", \"OWNERPARTID\", \"UNIQUEID\", \"AREACOLOR\", \"COLOR\", \"CURRENTPARTID\", \"DISPLAYMODECOUNT\", \"LIBREFERENCE\", \"LOCATION.X\", \"LOCATION.Y\", \"PARTCOUNT\", \"PARTIDLOCKED\", \"SOURCELIBRARYNAME\", \"TARGETFILENAME\"} and\n obj[\"RECORD\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"AREACOLOR\"] == b\"11599871\" and obj[\"COLOR\"] == b\"128\" and obj[\"PARTIDLOCKED\"] == b\"F\" and obj[\"TARGETFILENAME\"] == b\"*\"):\n pass\n \n elif (obj.keys() - {\"TEXT\", \"OWNERINDEX\", \"ISHIDDEN\", \"READONLYSTATE\", \"INDEXINSHEET\", \"UNIQUEID\", \"LOCATION.X\", \"LOCATION.X_FRAC\", \"LOCATION.Y\", \"LOCATION.Y_FRAC\", \"ORIENTATION\", \"ISMIRRORED\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"NAME\"} and\n obj[\"RECORD\"] == Record.PARAMETER and obj[\"OWNERPARTID\"] == b\"-1\"):\n if obj.get(\"ISHIDDEN\") != b\"T\" and obj.keys() >= {\"TEXT\", \"LOCATION.X\", \"LOCATION.Y\"}:\n orient = obj.get(\"ORIENTATION\")\n kw = {\n None: dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"1\": dict(vert=renderer.BOTTOM, horiz=renderer.LEFT),\n b\"2\": dict(vert=renderer.TOP, horiz=renderer.RIGHT),\n }[orient]\n if orient == b\"1\":\n kw.update(angle=+90)\n val = obj[\"TEXT\"]\n if val.startswith(b\"=\"):\n match = val[1:].lower()\n for o in objects:\n if o.get(\"RECORD\") != Record.PARAMETER or o.get(\"OWNERINDEX\") != obj[\"OWNERINDEX\"]:\n continue\n if o[\"NAME\"].lower() != match:\n continue\n val = o[\"TEXT\"]\n break\n else:\n raise LookupError(\"Parameter value for |OWNERINDEX={}|TEXT={}\".format(obj[\"OWNERINDEX\"].decode(\"ascii\"), obj[\"TEXT\"].decode(\"ascii\")))\n renderer.text(val.decode(\"ascii\"),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n **kw)\n else:\n text(renderer, obj, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ISMIRRORED\", \"LOCATION.X_FRAC\", \"LOCATION.Y_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\", \"NAME\", \"READONLYSTATE\"} and\n obj[\"RECORD\"] == Record.DESIGNATOR and obj[\"OWNERPARTID\"] == b\"-1\" and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"NAME\"] == b\"Designator\" and obj[\"READONLYSTATE\"] == b\"1\"):\n desig = obj[\"TEXT\"].decode(\"ascii\")\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if int(owner[\"PARTCOUNT\"]) > 2:\n desig += chr(ord(\"A\") + int(owner[\"CURRENTPARTID\"]) - 1)\n renderer.text(desig, (int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n colour=colour(obj[\"COLOR\"]),\n font=\"font\" + obj[\"FONTID\"].decode(),\n )\n \n elif (obj.keys() >= {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"LOCATIONCOUNT\", \"X1\", \"X2\", \"Y1\", \"Y2\"} and\n obj[\"RECORD\"] == Record.POLYLINE and obj.get(\"ISNOTACCESIBLE\", b\"T\") == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\"):\n if obj[\"OWNERPARTID\"] == b\"-1\":\n current = True\n else:\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n current = (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\"))\n if current:\n polyline(renderer, obj)\n \n elif (obj.keys() - {\"OWNERPARTDISPLAYMODE\", \"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"COLOR\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\"} and\n obj[\"RECORD\"] == Record.LINE and obj[\"ISNOTACCESIBLE\"] == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n renderer.line(\n colour=colour(obj[\"COLOR\"]),\n width=int(obj[\"LINEWIDTH\"]),\n a=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n b=(int(obj[\"CORNER.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"NAME\", \"SWAPIDPIN\", \"OWNERPARTDISPLAYMODE\", \"ELECTRICAL\", \"DESCRIPTION\", \"SWAPIDPART\", \"SYMBOL_OUTEREDGE\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"DESIGNATOR\", \"FORMALTYPE\", \"LOCATION.X\", \"LOCATION.Y\", \"PINCONGLOMERATE\", \"PINLENGTH\"} and\n obj[\"RECORD\"] == Record.PIN and obj[\"FORMALTYPE\"] == b\"1\"):\n if obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n pinlength = int(obj[\"PINLENGTH\"])\n pinconglomerate = int(obj[\"PINCONGLOMERATE\"])\n offset = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n rotate = pinconglomerate & 3\n with renderer.view(offset=offset, rotate=rotate) as view:\n kw = dict()\n points = list()\n if \"SYMBOL_OUTEREDGE\" in obj:\n view.circle(2.85, (3.15, 0), width=0.6)\n points.append(6)\n points.append(pinlength)\n electrical = obj.get(\"ELECTRICAL\", PinElectrical.INPUT)\n marker = pinmarkers[electrical]\n if marker:\n kw.update(startarrow=marker)\n view.hline(*points, **kw)\n \n if pinconglomerate >> 1 & 1:\n invert = -1\n kw = dict(angle=180)\n else:\n invert = +1\n kw = dict()\n if pinconglomerate & 8 and \"NAME\" in obj:\n view.text(overline(obj[\"NAME\"]),\n vert=view.CENTRE,\n horiz=view.RIGHT * invert,\n offset=(-7, 0),\n **kw)\n if pinconglomerate & 16:\n designator = obj[\"DESIGNATOR\"].decode(\"ascii\")\n view.text(designator,\n horiz=view.LEFT * invert,\n offset=(+9, 0),\n **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"ORIENTATION\", \"STYLE\", \"ISCROSSSHEETCONNECTOR\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"SHOWNETNAME\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.POWER_OBJECT and obj[\"OWNERPARTID\"] == b\"-1\"):\n orient = obj.get(\"ORIENTATION\")\n if obj.get(\"ISCROSSSHEETCONNECTOR\") == b\"T\":\n marker = dchevron\n offset = 14\n else:\n (marker, offset) = connmarkers.get(obj[\"STYLE\"], (None, 0))\n \n col = colour(obj[\"COLOR\"])\n translate = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n with renderer.view(colour=col, offset=translate) as view:\n kw = dict()\n if orient:\n kw.update(rotate=int(orient))\n view.draw(marker, **kw)\n \n if obj[\"SHOWNETNAME\"] != b\"F\":\n orients = {\n b\"2\": (renderer.RIGHT, renderer.CENTRE, (-1, 0)),\n b\"3\": (renderer.CENTRE, renderer.TOP, (0, -1)),\n None: (renderer.LEFT, renderer.CENTRE, (+1, 0)),\n b\"1\": (renderer.CENTRE, renderer.BOTTOM, (0, +1)),\n }\n (horiz, vert, pos) = orients[orient]\n t = obj[\"TEXT\"].decode(\"ascii\")\n pos = (p * offset for p in pos)\n view.text(t, pos, horiz=horiz, vert=vert)\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"ISSOLID\", \"LINEWIDTH\", \"CORNERXRADIUS\", \"CORNERYRADIUS\", \"TRANSPARENT\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"AREACOLOR\", \"COLOR\", \"CORNER.X\", \"CORNER.Y\", \"ISNOTACCESIBLE\", \"LOCATION.X\", \"LOCATION.Y\"} and\n obj[\"RECORD\"] in {Record.RECTANGLE, Record.ROUND_RECTANGLE} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"ISSOLID\", b\"T\") == b\"T\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (obj[\"OWNERPARTID\"] == owner[\"CURRENTPARTID\"] and\n obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\") == owner.get(\"DISPLAYMODE\", b\"0\")):\n kw = dict(width=0.6, outline=colour(obj[\"COLOR\"]))\n if \"ISSOLID\" in obj:\n kw.update(fill=colour(obj[\"AREACOLOR\"]))\n a = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n b = (int(obj[\"CORNER.\" + x]) for x in \"XY\")\n \n if obj[\"RECORD\"] == Record.ROUND_RECTANGLE:\n r = list()\n for x in \"XY\":\n radius = obj.get(\"CORNER{}RADIUS\".format(x))\n if radius is None:\n radius = 0\n else:\n radius = int(radius)\n r.append(int(radius))\n renderer.roundrect(r, a, b, **kw)\n else:\n renderer.rectangle(a, b, **kw)\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERPARTID\", \"COLOR\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.NET_LABEL and obj[\"OWNERPARTID\"] == b\"-1\"):\n renderer.text(overline(obj[\"TEXT\"]),\n colour=colour(obj[\"COLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"OWNERPARTDISPLAYMODE\", \"STARTANGLE\", \"SECONDARYRADIUS\"} == {\"RECORD\", \"OWNERPARTID\", \"OWNERINDEX\", \"COLOR\", \"ENDANGLE\", \"ISNOTACCESIBLE\", \"LINEWIDTH\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\"} and\n obj[\"RECORD\"] in {Record.ARC, Record.ELLIPTICAL_ARC} and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"LINEWIDTH\"] == b\"1\" and obj.get(\"OWNERPARTDISPLAYMODE\", b\"1\") == b\"1\"):\n owner = objects[1 + int(obj[\"OWNERINDEX\"])]\n if (owner[\"CURRENTPARTID\"] == obj[\"OWNERPARTID\"] and\n owner.get(\"DISPLAYMODE\", b\"0\") == obj.get(\"OWNERPARTDISPLAYMODE\", b\"0\")):\n r = int(obj[\"RADIUS\"])\n if obj[\"RECORD\"] == Record.ELLIPTICAL_ARC:\n r2 = obj.get(\"SECONDARYRADIUS\")\n if r2 is None:\n r2 = 0\n else:\n r2 = int(r2)\n else:\n r2 = r\n \n start = float(obj.get(\"STARTANGLE\", 0))\n end = float(obj[\"ENDANGLE\"])\n centre = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.arc((r, r2), start, end, centre,\n colour=colour(obj[\"COLOR\"]),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"LINEWIDTH\"} > {\"RECORD\", \"AREACOLOR\", \"COLOR\", \"ISNOTACCESIBLE\", \"ISSOLID\", \"LOCATIONCOUNT\", \"OWNERINDEX\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == Record.POLYGON and obj[\"AREACOLOR\"] == b\"16711680\" and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"LINEWIDTH\", b\"1\") == b\"1\" and obj[\"OWNERPARTID\"] == b\"1\"):\n points = list()\n for location in range(int(obj[\"LOCATIONCOUNT\"])):\n location = format(1 + location)\n points.append(tuple(int(obj[x + location]) for x in \"XY\"))\n renderer.polygon(fill=colour(obj[\"COLOR\"]), points=points)\n elif (obj.keys() - {\"INDEXINSHEET\", \"ISNOTACCESIBLE\", \"OWNERINDEX\", \"ORIENTATION\", \"JUSTIFICATION\", \"COLOR\"} == {\"RECORD\", \"FONTID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"TEXT\"} and\n obj[\"RECORD\"] == Record.LABEL):\n if obj[\"OWNERPARTID\"] == b\"-1\" or obj[\"OWNERPARTID\"] == objects[1 + int(obj[\"OWNERINDEX\"])][\"CURRENTPARTID\"]:\n text(renderer, obj)\n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"COLOR\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\"} and\n obj[\"RECORD\"] == b\"22\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n col = colour(obj[\"COLOR\"])\n location = (int(obj[\"LOCATION.\" + x]) for x in \"XY\")\n renderer.draw(nc, location, colour=col)\n elif (obj.keys() - {\"CLIPTORECT\"} == {\"RECORD\", \"ALIGNMENT\", \"AREACOLOR\", \"CORNER.X\", \"CORNER.Y\", \"FONTID\", \"ISSOLID\", \"LOCATION.X\", \"LOCATION.Y\", \"OWNERPARTID\", \"Text\", \"WORDWRAP\"} and\n obj[\"RECORD\"] == b\"28\" and obj[\"ALIGNMENT\"] == b\"1\" and obj[\"AREACOLOR\"] == b\"16777215\" and obj.get(\"CLIPTORECT\", b\"T\") == b\"T\" and obj[\"ISSOLID\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"WORDWRAP\"] == b\"T\"):\n lhs = int(obj[\"LOCATION.X\"])\n renderer.text(\n font=\"font\" + obj[\"FONTID\"].decode(\"ascii\"),\n offset=(lhs, int(obj[\"CORNER.Y\"])),\n width=int(obj[\"CORNER.X\"]) - lhs,\n text=obj[\"Text\"].decode(\"ascii\").replace(\"~1\", \"\\n\"),\n vert=renderer.TOP,\n )\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LINEWIDTH\", \"COLOR\", \"LOCATIONCOUNT\", \"X1\", \"Y1\", \"X2\", \"Y2\", \"X3\", \"Y3\", \"X4\", \"Y4\"} and\n obj[\"RECORD\"] == Record.BEZIER and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj[\"OWNERPARTID\"] == b\"1\" and obj[\"LINEWIDTH\"] == b\"1\" and obj[\"LOCATIONCOUNT\"] == b\"4\"):\n col = colour(obj[\"COLOR\"])\n points = list()\n for n in range(4):\n n = format(1 + n)\n points.append(tuple(int(obj[x + n]) for x in \"XY\"))\n renderer.cubicbezier(*points, colour=col)\n \n elif (obj.keys() - {\"RADIUS_FRAC\", \"SECONDARYRADIUS_FRAC\"} == {\"RECORD\", \"OWNERINDEX\", \"ISNOTACCESIBLE\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"RADIUS\", \"SECONDARYRADIUS\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\"} and\n obj[\"RECORD\"] == Record.ELLIPSE and obj[\"ISNOTACCESIBLE\"] == b\"T\" and obj.get(\"RADIUS_FRAC\", b\"94381\") == b\"94381\" and obj[\"SECONDARYRADIUS\"] == obj[\"RADIUS\"] and obj.get(\"SECONDARYRADIUS_FRAC\", b\"22993\") == b\"22993\" and obj[\"ISSOLID\"] == b\"T\"):\n renderer.circle(\n r=int(obj[\"RADIUS\"]),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\", \"SYMBOLTYPE\"} == {\"RECORD\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"XSIZE\", \"YSIZE\", \"COLOR\", \"AREACOLOR\", \"ISSOLID\", \"UNIQUEID\"} and\n obj[\"RECORD\"] == Record.SHEET_SYMBOL and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"ISSOLID\"] == b\"T\" and obj.get(\"SYMBOLTYPE\", b\"Normal\") == b\"Normal\"):\n renderer.rectangle((int(obj[\"XSIZE\"]), -int(obj[\"YSIZE\"])),\n width=0.6,\n outline=colour(obj[\"COLOR\"]), fill=colour(obj[\"AREACOLOR\"]),\n offset=(int(obj[\"LOCATION.\" + x]) for x in \"XY\"),\n )\n \n elif (obj.keys() - {\"INDEXINSHEET\"} == {\"RECORD\", \"OWNERINDEX\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"COLOR\", \"FONTID\", \"TEXT\"} and\n obj[\"RECORD\"] in {Record.SHEET_NAME, Record.SHEET_FILE_NAME} and obj.get(\"INDEXINSHEET\", b\"-1\") == b\"-1\" and obj[\"OWNERPARTID\"] == b\"-1\"):\n text(renderer, obj)\n \n elif (obj.keys() == {\"RECORD\", \"OWNERINDEX\", \"INDEXINSHEET\", \"OWNERPARTID\", \"LOCATION.X\", \"LOCATION.Y\", \"CORNER.X\", \"CORNER.Y\", \"EMBEDIMAGE\", \"FILENAME\"} and\n obj[\"RECORD\"] == Record.IMAGE and obj[\"OWNERINDEX\"] == b\"1\" and obj[\"OWNERPARTID\"] == b\"-1\" and obj[\"EMBEDIMAGE\"] == b\"T\" and obj[\"FILENAME\"] == b\"newAltmLogo.bmp\"):\n location = list()\n corner = list()\n for x in \"XY\":\n location.append(int(obj[\"LOCATION.\" + x]))\n corner.append(int(obj[\"CORNER.\" + x]))\n renderer.rectangle(location, corner, width=0.6)\n \n else:\n print(\"\".join(\"|{}={!r}\".format(p, v) for (p, v) in sorted(obj.items())), file=stderr)\n \n renderer.finish()", "def fix_svg(svg):\n xml = ET.fromstring(svg)\n for x in xml.findall('path'):\n x.attrib['fill'] = '#ffffff'\n x.attrib['stroke-width'] = '1'\n x.attrib['opacity'] = '1'\n x.attrib['stroke'] = '#ff0000'\n return ET.tostring(xml)", "def vsi_path(path): # -> Any | str:\n ...", "def escape_path(path):\n if ' ' in path and not (path.startswith('\"') and path.endswith('\"')):\n return '\"' + path + '\"'\n else:\n return path", "def _GeneratePathStr(path):\n return ((len(path) - 1) * ' ') + path[-1] if path else ''", "def create_svg(svg_tag, img_width, img_height, out_path):\n script_dir = utils.get_script_dir()\n svg_template_path = utils.join_paths_str(script_dir, \"./templates/template.svg\")\n with open(svg_template_path, \"rt\") as fin:\n with open(out_path, \"wt\") as fout:\n for line in fin:\n fout.write(\n line.replace(\"INSERT_WIDTH\", str(img_width))\n .replace(\"INSERT_HEIGHT\", str(img_height))\n .replace(\"INSERT_OBJECT\", svg_tag)\n )", "def make_image(self, path):\n\t\treturn self.ui.get_icon(path)", "def svg(self) -> str:\n return SPOUSE_LINK_TEMPLATE.format(**self._fields)", "def convert(\n path_in, path_out, pathway_iri, wp_id, pathway_version, scale=100, theme=\"plain\"\n):\n if not path.exists(path_in):\n raise Exception(f\"Missing file '{path_in}'\")\n\n if path.exists(path_out):\n print(f\"File {path_out} already exists. Skipping.\")\n return True\n\n dir_in = path.dirname(path_in)\n base_in = path.basename(path_in)\n # example base_in: 'WP4542.gpml'\n [stub_in, ext_in_with_dot] = path.splitext(base_in)\n # gettting rid of the leading dot, e.g., '.gpml' to 'gpml'\n ext_in = LEADING_DOT_RE.sub(\"\", ext_in_with_dot)\n\n if ext_in != \"gpml\":\n # TODO: how about *.gpml.xml?\n raise Exception(f\"Currently only accepting *.gpml for path_in\")\n gpml_f = path_in\n\n dir_out = path.dirname(path_out)\n # example base_out: 'WP4542.svg'\n base_out = path.basename(path_out)\n [stub_out, ext_out_with_dot] = path.splitext(base_out)\n # getting rid of the leading dot, e.g., '.svg' to 'svg'\n ext_out = LEADING_DOT_RE.sub(\"\", ext_out_with_dot)\n\n tree = ET.parse(gpml_f, parser=parser)\n root = tree.getroot()\n\n if root is None:\n raise Exception(\"no root element\")\n if root.tag is None:\n raise Exception(\"no root tag\")\n\n gpml_version = re.sub(r\"{http://pathvisio.org/GPML/(\\w+)}Pathway\", r\"\\1\", root.tag)\n if ext_out != \"gpml\" and gpml_version != LATEST_GPML_VERSION:\n old_f = f\"{dir_in}/{stub_in}.{gpml_version}.gpml\"\n rename(gpml_f, old_f)\n convert(old_f, gpml_f, pathway_iri, wp_id, pathway_version, scale)\n\n # trying to get wd ids via sparql via pywikibot\n site = pywikibot.Site(\"wikidata\", \"wikidata\")\n repo = site.data_repository() # this is a DataSite object\n wd_sparql = sparql.SparqlQuery(\n endpoint=\"https://query.wikidata.org/sparql\", repo=repo\n )\n # (self, endpoint=None, entity_url=None, repo=None, 2 max_retries=None, retry_wait=None)\n\n if ext_out in [\"gpml\", \"owl\", \"pdf\", \"pwf\", \"txt\"]:\n subprocess.run(shlex.split(f\"pathvisio convert {path_in} {path_out}\"))\n elif ext_out == \"png\":\n # TODO: look at using --scale as an option (instead of an argument),\n # for both pathvisio and gpmlconverter.\n # TODO: move the setting of a default value for scale into\n # pathvisio instead of here.\n subprocess.run(shlex.split(f\"pathvisio convert {path_in} {path_out} {scale}\"))\n # Use interlacing? See https://github.com/PathVisio/pathvisio/issues/78\n # It's probably not worthwhile. If we did it, we would need to install\n # imagemagick and then run this:\n # mv \"$path_out\" \"$path_out.noninterlaced.png\"\n # convert -interlace PNG \"$path_out.noninterlaced.png\" \"$path_out\"\n elif ext_out in [\"json\", \"jsonld\"]:\n gpml2json(path_in, path_out, pathway_iri, wp_id, pathway_version, wd_sparql)\n elif ext_out in [\"svg\", \"pvjssvg\"]:\n #############################\n # SVG\n #############################\n\n json_f = f\"{dir_out}/{stub_in}.json\"\n if not path.isfile(json_f):\n gpml2json(path_in, json_f, pathway_iri, wp_id, pathway_version, wd_sparql)\n\n json2svg(json_f, path_out, pathway_iri, wp_id, pathway_version, theme)\n else:\n raise Exception(f\"Invalid output extension: '{ext_out}'\")", "def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result", "def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result", "def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, six.integer_types):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result", "def get_resource(filename: str, path: str | None = None) -> str:\n root = Path(__file__).parent\n full_path = root if path is None else root / Path(path)\n return str(full_path / filename)", "def ps2svg_string(sPostscript):\n\n def group_numbers(result, times = 1):\n nums = []\n for sNum in result.groups():\n if re.match(r'[a-zA-Z]+', sNum):\n # This is just a string\n nums.append(sNum)\n else:\n # This must be a floating point number\n nums.append(\"{:.6f}\".format(times * float(sNum) ))\n return nums\n\n sBack = \"\"\n lst_out = []\n oErr = ErrHandle()\n path_style = \"fill:none;stroke:#000000;stroke-width:16;stroke-linecap:round;stroke-linejoin:round;stroke-miterlimit:10;stroke-dasharray:none;stroke-opacity:1\"\n point_style = \"font-variant:normal;font-weight:normal;font-size:13.39669991px;font-family:Times;-inkscape-font-specification:Times-Roman;writing-mode:lr-tb;fill:#0000FF;fill-opacity:1;fill-rule:nonzero;stroke:none\"\n try:\n # Recognize the initial lines we are looking for\n re_Line = re.compile( r'^\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+l$')\n re_point = re.compile(r'^([0-9]+\\.?[0-9]*)\\s+([0-9]+\\.?[0-9]*)\\s+translate\\s+([0-9]+\\.?[0-9]*)\\s+rotate$')\n re_label = re.compile(r'^\\(([a-zA-Z]+)\\)\\s+show$')\n\n lst_out.append(sIntro)\n\n # Split into lines\n lines = sPostscript.split(\"\\n\")\n section = \"pre\"\n idx = 14\n point_info = []\n bFirstPoint = True\n oorsprong = dict(x=0.0, y=0.0)\n for line in lines:\n # Check if we have a line \n if section == \"pre\":\n result = re_Line.search(line)\n if result:\n section = \"lines\"\n else:\n # We are not in a lines section\n pass\n if section == \"lines\":\n result = re_Line.search(line)\n if result:\n nums = group_numbers(result, 10)\n # Convert into path line\n sPathLine = '<path id=\"path{}\" style=\"{}\" d=\"M {},{} {},{}\" />'.format(\n idx, path_style, nums[0], nums[1], nums[2], nums[3])\n idx += 2\n lst_out.append(sPathLine)\n else:\n # We have exited the lines section\n section = \"point\"\n lst_out.append('<g transform=\"scale(10)\" id=\"g{}\">'.format(idx))\n idx += 2\n elif section == \"point\":\n # Look for a point\n result = re_point.search(line)\n if result:\n # We have found a point: get it in\n nums = group_numbers(result, 1)\n\n # Is this the first point?\n if bFirstPoint:\n lst_out.append('<text id=\"text{}\" style=\"{}\" transform=\"matrix(1,0,0,-1,{},{})\">'.format(\n idx, point_style, nums[0], nums[1]))\n idx += 2\n oorsprong['x'] = float(nums[0])\n oorsprong['y'] = float(nums[1])\n bFirstPoint = False\n\n # In all situations: position w.r.t. oorsprong\n pos_x = \"{:.6f}\".format(float(nums[0]) - oorsprong['x']) \n pos_y = \"{:.6f}\".format(oorsprong['y'] - float(nums[1]) )\n point_info.append(pos_y)\n point_info.append(pos_x)\n\n section = \"label\"\n elif section == \"label\":\n # Look for a label\n result = re_label.search(line)\n if result:\n # we have found a label: get it\n sLabel = result.groups()[0]\n point_info.append(sLabel)\n\n # Output this label\n sLabel = '<tspan id=\"tspan{}\" y=\"{}\" x=\"{}\">{}</tspan>'.format(\n idx, pos_y, pos_x, sLabel)\n idx += 2\n lst_out.append(sLabel)\n\n section = \"point\"\n point_info = []\n\n # Finish up the svg nicely\n lst_out.append(\" </text>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\" </g>\")\n lst_out.append(\"</svg>\")\n # Convert the list into a string\n sBack = \"\\n\".join(lst_out)\n except:\n msg = oErr.get_error_message()\n oErr.DoError(\"ps2svg\")\n\n # Return what we have gathered\n return sBack", "def pathToURI(path):\n ret = libxml2mod.xmlPathToURI(path)\n return ret", "def lightenSvgFile(inFname, outFname=\"\"):\n if not outFname:\n # create outFname based on inFname\n if \"-screen\" in inFname:\n outFname = inFname.replace(\"-screen\", \"-print\")\n else:\n outFname = inFname.replace(\".svg\", \"\")+\"-print.svg\"\n if \"-dark\" in outFname:\n outFname = outFname.replace(\"-dark\", \"\")\n with open(inFname) as infile, open(outFname,\"w\") as outfile:\n l=infile.readline()\n while l:\n outfile.write(ligthenOneLine(l))\n l=infile.readline()\n return outFname", "def save(filename, canvas):\n data = write_svg.to_string(canvas).encode('utf-8')\n with gzip.open(filename, 'wb') as f:\n f.write(data)", "def loadFile(filterExt):\n basicFilter = \"*.\" + filterExt\n filePath = fileDialog2(fileFilter=basicFilter, dialogStyle=2, fm=1)\n if(filePath != None):\n #openfile = open('/Users/camtton/Desktop/drawing.svg', 'r')\n tokens = getSVGpath(filePath[0])\n return tokens\n else:\n print 'Please select a %s file'%(filterExt)", "def to_svg(self, outfile, scaling, precision):\n if isinstance(self.ref_cell, Cell):\n name = self.ref_cell.name\n else:\n name = self.ref_cell\n transform = \"translate({} {})\".format(\n numpy.format_float_positional(\n scaling * self.origin[0], trim=\"0\", precision=precision\n ),\n numpy.format_float_positional(\n scaling * self.origin[1], trim=\"0\", precision=precision\n ),\n )\n if self.rotation is not None:\n transform += \" rotate({})\".format(\n numpy.format_float_positional(\n self.rotation, trim=\"0\", precision=precision\n )\n )\n if self.x_reflection:\n transform += \" scale(1 -1)\"\n if self.magnification is not None:\n transform += \" scale({})\".format(\n numpy.format_float_positional(\n self.magnification, trim=\"0\", precision=precision\n )\n )\n outfile.write('<use transform=\"')\n outfile.write(transform)\n outfile.write('\" xlink:href=\"#')\n outfile.write(name.replace(\"#\", \"_\"))\n outfile.write('\"/>\\n')", "def render_path(path_to_item):\n result = \"\"\n for pth in path_to_item:\n if isinstance(pth, int):\n result += \"[{0}]\".format(pth)\n else:\n result += \"['{0}']\".format(pth)\n return result", "def get_filename(filepath):\n return filepath.replace(\"{}\\\\\".format(RES_DIR), \"\")", "def pathToFileName(self, path):\n\t\t# Find the path, and strip the leading slash.\n\t\tpath =urlparse.urlparse(self.path)[2].lstrip(\"/\")\n\t\t# Process url escape codes, and normalize the path.\n\t\tpath = os.path.normpath(urllib2.unquote(path))\n\t\t# normpath strips the last slash\n\t\tif os.path.isdir(path):\n\t\t\treturn path + '/'\n\t\telse:\n\t\t\treturn path", "def json2svg(json_f, path_out, pathway_iri, wp_id, pathway_version, theme):\n\n dir_out = path.dirname(path_out)\n # example base_out: 'WP4542.svg'\n base_out = path.basename(path_out)\n [stub_out, ext_out_with_dot] = path.splitext(base_out)\n\n pvjs_cmd = f\"pvjs --theme {theme}\"\n with open(json_f, \"r\") as f_in:\n with open(path_out, \"w\") as f_out:\n pvjs_ps = subprocess.Popen(\n shlex.split(pvjs_cmd), stdin=f_in, stdout=f_out, shell=False\n )\n pvjs_ps.communicate()[0]\n\n tree = ET.parse(path_out, parser=parser)\n root = tree.getroot()\n\n #############################\n # SVG > .svg\n #############################\n\n # TODO: make the stand-alone SVGs work for upload to WM Commons:\n # https://www.mediawiki.org/wiki/Manual:Coding_conventions/SVG\n # https://commons.wikimedia.org/wiki/Help:SVG\n # https://commons.wikimedia.org/wiki/Commons:Commons_SVG_Checker?withJS=MediaWiki:CommonsSvgChecker.js\n # W3 validator: http://validator.w3.org/#validate_by_upload+with_options\n\n # WM says: \"the recommended image height is around 400–600 pixels. When a\n # user views the full size image, a width of 600–800 pixels gives\n # them a good close-up view\"\n # https://commons.wikimedia.org/wiki/Help:SVG#Frequently_asked_questions\n root.set(\"width\", \"800px\")\n root.set(\"height\", \"600px\")\n\n # TODO: verify that all of the following cases are now correctly handled in pvjs\n for style_el in root.findall(\".//style\"):\n if not style_el.text == \"\":\n raise Exception(\"Expected empty style sheets.\")\n for el in root.findall(\".//pattern[@id='PatternQ47512']\"):\n raise Exception(\"Unexpected pattern.\")\n\n edge_warning_sent = False\n for el in root.xpath(\n \".//svg:g/svg:g[contains(@class,'Edge')]/svg:g\", namespaces=SVG_NS\n ):\n if not edge_warning_sent:\n print(\"TODO: update pvjs to avoid having nested g elements for edges.\")\n edge_warning_sent = True\n # raise Exception(\"Unexpected nested g element for edge.\")\n\n for el in root.xpath(\n \"/svg:svg/svg:g/svg:g[contains(@class,'Edge')]/svg:path/@style\",\n namespaces=SVG_NS,\n ):\n raise Exception(\n \"Unexpected style attribute on path element for edge.\", namespaces=SVG_NS\n )\n\n for el in root.xpath(\n \"/svg:svg/svg:defs/svg:g[@id='jic-defs']/svg:svg/svg:defs\", namespaces=SVG_NS\n ):\n raise Exception(\"Unexpected nested svg for defs.\")\n\n for el in root.findall(\".//defs/g[@id='jic-defs']/svg/defs\"):\n raise Exception(\"Unexpected nested svg for defs.\")\n\n for el in root.xpath(\n \".//svg:g/svg:g[contains(@class,'Edge')]/svg:path/@style\", namespaces=SVG_NS\n ):\n raise Exception(\"Unexpected style attribute on path element for edge.\")\n\n # TODO: should any of this be in pvjs instead?\n style_selector = (\n \"[@style='color:inherit;fill:inherit;fill-opacity:inherit;stroke:inherit;stroke-width:inherit']\"\n )\n for el_parent in root.findall(f\".//*{style_selector}/..\"):\n stroke_width = el_parent.attrib.get(\"stroke-width\", 1)\n for el in root.findall(f\".//*{style_selector}\"):\n el.set(\n \"style\",\n f\"color:inherit;fill:inherit;fill-opacity:inherit;stroke:inherit;stroke-width:{str(stroke_width)}\",\n )\n\n for el in root.findall(\".//*[@filter='url(#kaavioblackto000000filter)']\"):\n el.attrib.pop(\"filter\", None)\n\n for image_parent in root.findall(\".//*image/..\"):\n images = image_parent.findall(\"image\")\n for image in images:\n image_parent.remove(image)\n\n # TODO: do the attributes \"filter\" \"fill\" \"fill-opacity\" \"stroke\" \"stroke-dasharray\" \"stroke-width\"\n # on the top-level g element apply to the g elements for edges?\n\n # TODO: do the attributes \"color\" \"fill\" \"fill-opacity\" \"stroke\" \"stroke-dasharray\" \"stroke-width\"\n # on the top-level g element apply to the path elements for edges?\n\n # TODO: Which of the following is correct?\n # To make the SVG file independent of Arial, change all occurrences of\n # font-family: Arial to font-family: 'Liberation Sans', Arial, sans-serif\n # https://commons.wikimedia.org/wiki/Help:SVG#fallback\n # vs.\n # Phab:T64987, Phab:T184369, Gnome #95; font-family=\"'font name'\"\n # (internally quoted font family name) does not work\n # (File:Mathematical_implication_diagram-alt.svg, File:T184369.svg)\n # https://commons.wikimedia.org/wiki/Commons:Commons_SVG_Checker?withJS=MediaWiki:CommonsSvgChecker.js\n\n # Liberation Sans is the open replacement for Arial, but its kerning\n # has some issues, at least as processed by librsvg.\n # An alternative that is also supported MW is DejaVu Sans. Using\n # transform=\"scale(0.92,0.98)\"\n # might yield better kerning and take up about the same amount of space.\n\n # Long-term, should we switch our default font from Arial to something prettier?\n # It would have to be a well-supported font.\n # This page <https://commons.wikimedia.org/wiki/Help:SVG#fallback> says:\n # On Commons, librsvg has the fonts listed in:\n # https://meta.wikimedia.org/wiki/SVG_fonts#Latin_(basic)_fonts_comparison\n # ...\n # In graphic illustrations metric exact text elements are often important\n # and Arial can be seen as de-facto standard for such a feature.\n\n for el in root.xpath(\".//*[contains(@font-family,'Arial')]\", namespaces=SVG_NS):\n el.set(\"font-family\", \"'Liberation Sans', Arial, sans-serif\")\n\n # TODO: do we need to specify fill=currentColor for any elements?\n\n for el in root.xpath(\".//svg:defs//svg:marker//*[not(@fill)]\", namespaces=SVG_NS):\n el.set(\"fill\", \"currentColor\")\n\n for el in root.xpath(\".//svg:text[@stroke-width='0.05px']\", namespaces=SVG_NS):\n el.attrib.pop(\"stroke-width\", None)\n\n for el in root.xpath(\".//svg:text[@overflow]\", namespaces=SVG_NS):\n el.attrib.pop(\"overflow\", None)\n\n for el in root.xpath(\".//svg:text[@dominant-baseline]\", namespaces=SVG_NS):\n el.attrib.pop(\"dominant-baseline\", None)\n\n for el in root.xpath(\".//svg:text[@clip-path]\", namespaces=SVG_NS):\n el.attrib.pop(\"clip-path\", None)\n\n FONT_SIZE_RE = re.compile(r\"^([0-9.]*)px$\")\n # TRANSLATE_RE = re.compile(r\"^translate[(]([0-9.]*),([0-9.]*)[)]$\")\n TRANSLATE_RE = re.compile(r\"^translate\\(([0-9.]*),([0-9.]*)\\)$\")\n # We are pushing the text down based on font size.\n # This is needed because librsvg doesn't support attribute \"alignment-baseline\".\n\n for el in root.xpath(\".//svg:text[@font-size]\", namespaces=SVG_NS):\n font_size_full = el.attrib.get(\"font-size\")\n font_size_matches = re.search(FONT_SIZE_RE, font_size_full)\n if font_size_matches:\n font_size = float(font_size_matches.group(1))\n\n if not font_size:\n font_size = 5\n\n x_translation = None\n y_translation = None\n transform_full = el.attrib.get(\"transform\")\n if transform_full:\n translate_matches = re.search(TRANSLATE_RE, transform_full)\n if translate_matches:\n x_translation = float(translate_matches.group(1))\n y_translation_uncorrected = float(translate_matches.group(2))\n\n if not x_translation:\n x_translation = 0\n y_translation_uncorrected = 0\n\n y_translation_corrected = font_size / 3 + y_translation_uncorrected\n el.set(\"transform\", f\"translate({x_translation},{y_translation_corrected})\")\n\n # Add link outs\n WIKIDATA_CLASS_RE = re.compile(\"Wikidata_Q[0-9]+\")\n for el in root.xpath(\".//*[contains(@class,'DataNode')]\", namespaces=SVG_NS):\n wikidata_classes = list(\n filter(WIKIDATA_CLASS_RE.match, el.attrib.get(\"class\").split(\" \"))\n )\n if len(wikidata_classes) > 0:\n # if there are multiple, we just link out to the first\n wikidata_id = wikidata_classes[0].replace(\"Wikidata_\", \"\")\n el.tag = \"{http://www.w3.org/2000/svg}a\"\n # linkout_base = \"https://www.wikidata.org/wiki/\"\n linkout_base = \"https://scholia.toolforge.org/\"\n el.set(\"{http://www.w3.org/1999/xlink}href\", linkout_base + wikidata_id)\n\n # make linkout open in new tab/window\n el.set(\"target\", \"_blank\")\n\n ###########\n # Run SVGO\n ###########\n\n pre_svgo_svg_f = f\"{dir_out}/{stub_out}.pre_svgo.svg\"\n tree.write(pre_svgo_svg_f)\n\n tree.write(path_out)\n args = shlex.split(\n f'svgo --multipass --config \"{SCRIPT_DIR}/svgo-config.json\" {path_out}'\n )\n subprocess.run(args)\n\n #########################################\n # Future enhancements for pretty version\n #########################################\n\n # TODO: convert the following bash code into Python\n\n # Glyphs from reactome\n # TODO: how about using these: https://reactome.org/icon-lib\n # for example, mitochondrion: https://reactome.org/icon-lib?f=cell_elements#Mitochondrion.svg\n # They appear to be CC-4.0, which might mean we can't upload them to WM Commons?\n\n # Glyphs from SMILES\n # metabolite_patterns_css_f = (\n # f\"{dir_out}/{bare_stub_out}.metabolite-patterns-uri.css\"\n # )\n # metabolite_patterns_svg_f = (\n # f\"{dir_out}/{bare_stub_out}.metabolite-patterns-uri.svg\"\n # )\n #\n # if path.exists(metabolite_patterns_svg_f) and path.exists(\n # metabolite_patterns_css_f\n # ):\n # print(\n # f\"{metabolite_patterns_svg_f} & {metabolite_patterns_css_f} already exist. To overwrite, delete them & try again.\"\n # )\n # else:\n # # If only one of them exists, we recreate both\n # if path.exists(metabolite_patterns_svg_f):\n # os.remove(metabolite_patterns_svg_f)\n # elif path.exists(metabolite_patterns_css_f):\n # os.remove(metabolite_patterns_css_f)\n #\n # metabolite_patterns_svg_tree = ET.parse(\n # \"<svg><defs></defs></svg>\", parser=parser\n # )\n # metabolite_patterns_svg_root = metabolite_patterns_svg_tree.getroot()\n #\n # # TODO convert the following sh script to Python\n # \"\"\"\n # jq -r '[.entitiesById[] | select(.type | contains([\"Metabolite\"]))] | unique_by(.type)[] | [.xrefDataSource, .xrefIdentifier, [.type[] | select(startswith(\"wikidata:\"))][0], [.type[] | select(startswith(\"hmdb:\") and length == 14)][0]] | @tsv' \"$json_f\" | \\\n # while IFS=$'\\t' read -r data_source identifier wikidata_id hmdb_id; do\n # wikidata_identifier=$(echo \"$wikidata_id\" | sed 's/wikidata://');\n # bridgedb_request_uri=\"http://webservice.bridgedb.org/Human/attributes/$data_source/$identifier?attrName=SMILES\"\n # if [ -z \"$data_source\" ] || [ -z \"$identifier\" ]; then\n # echo \"Missing Xref data source and/or identifier in $stub_out\";\n # continue;\n # fi\n #\n # smiles=$(curl -Ls \"$bridgedb_request_uri\")\n # bridgedb_request_status=$?\n #\n # if [ \"$bridgedb_request_status\" != 0 ] || [ -z \"$smiles\" ] || [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # # if [ \"$bridgedb_request_status\" != 0 ]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri (status code: $bridgedb_request_status)\";\n # # elif [ -z \"$smiles\" ]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri (nothing returned)\";\n # # elif [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from $bridgedb_request_uri\";\n # # echo '(The server has not found anything matching the request URI)'\n # # fi\n #\n # # If the DataSource and Identifier specified don't get us a SMILES string,\n # # it could be because BridgeDb doesn't support queries for that DataSource.\n # # For example, WP396_97382 has a DataNode with PubChem-compound:3081372,\n # # http://webservice.bridgedb.org/Human/attributes/PubChem-compound/3081372?attrName=SMILES\n # # doesn't return anything. However, that DataNode can be mapped to HMDB:HMDB61196, and\n # # the url http://webservice.bridgedb.org/Human/attributes/HMDB/HMDB61196\n # # does return a SMILES string.\n # # Note that BridgeDb currently requires us to use the 5 digit HMDB identifier,\n # # even though there is another format that uses more digits.\n #\n # if [ ! -z \"$hmdb_id\" ]; then\n # hmdb_identifier=\"HMDB\"${hmdb_id:(-5)};\n # bridgedb_request_uri_orig=\"$bridgedb_request_uri\"\n # bridgedb_request_uri=\"http://webservice.bridgedb.org/Human/attributes/HMDB/$hmdb_identifier?attrName=SMILES\"\n # #echo \"Trying alternate bridgedb_request_uri: $bridgedb_request_uri\"\n # smiles=$(curl -Ls \"$bridgedb_request_uri\")\n # bridgedb_request_status=$?\n # if [ \"$bridgedb_request_status\" != 0 ]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri (status code: $bridgedb_request_status)\";\n # continue;\n # elif [ -z \"$smiles\" ]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri (nothing returned)\";\n # continue;\n # elif [[ \"$smiles\" =~ 'The server has not found anything matching the request URI' ]]; then\n # echo \"Failed to get SMILES string for $stub_out:$data_source:$identifier from both $bridgedb_request_uri_orig and alternate $bridgedb_request_uri\";\n # echo '(The server has not found anything matching the request URI)'\n # continue;\n # fi\n # else\n # continue;\n # fi\n # fi\n #\n # smiles_url_encoded=$(echo \"$smiles\" | jq -Rr '@uri')\n # cdkdepict_url=\"http://www.simolecule.com/cdkdepict/depict/bow/svg?smi=$smiles_url_encoded&abbr=on&hdisp=bridgehead&showtitle=false&zoom=1.0&annotate=none\"\n #\n # cat >> \"$css_out\" <<EOF\n # [typeof~=\"wikidata:$wikidata_identifier\"]:hover > .Icon {\n # cursor: default;\n # fill: url(#Pattern$wikidata_identifier);\n # transform-box: fill-box;\n # transform: scale(2, 3);\n # transform-origin: 50% 50%;\n # }\n # [typeof~=\"wikidata:$wikidata_identifier\"]:hover > .Text {\n # font-size: 0px;\n # }\n # EOF\n #\n # # TODO: do we want to disable the clip-path on hover?\n # #[typeof~=wikidata:$wikidata_identifier]:hover > .Icon {\n # # clip-path: unset;\n # # rx: unset;\n # # ry: unset;\n # # cursor: default;\n # # fill: url(#Pattern$wikidata_identifier);\n # # transform-box: fill-box;\n # # transform: scale(2, 3);\n # # transform-origin: 50% 50%;\n # #}\n #\n # # \"transform-box: fill-box\" is needed for FF.\n # # https://bugzilla.mozilla.org/show_bug.cgi?id=1209061\n #\n # xmlstarlet ed -L \\\n # -s \"/svg/defs\" -t elem -n \"pattern\" -v \"\" \\\n # --var prevpattern '$prev' \\\n # -s '$prevpattern' -t elem -n \"image\" -v \"\" \\\n # --var previmage '$prev' \\\n # -i '$prevpattern' -t attr -n \"id\" -v \"Pattern$wikidata_identifier\" \\\n # -i '$prevpattern' -t attr -n \"width\" -v \"100%\" \\\n # -i '$prevpattern' -t attr -n \"height\" -v \"100%\" \\\n # -i '$prevpattern' -t attr -n \"patternContentUnits\" -v \"objectBoundingBox\" \\\n # -i '$prevpattern' -t attr -n \"preserveAspectRatio\" -v \"none\" \\\n # -i '$prevpattern' -t attr -n \"viewBox\" -v \"0 0 1 1\" \\\n # -i '$previmage' -t attr -n \"width\" -v \"1\" \\\n # -i '$previmage' -t attr -n \"height\" -v \"1\" \\\n # -i '$previmage' -t attr -n \"href\" -v \"$cdkdepict_url\" \\\n # -i '$previmage' -t attr -n \"preserveAspectRatio\" -v \"none\" \\\n # \"$svg_out\"\n # done\n #\n # sed -i '/<style.*>/{\n # r '\"$metabolite_patterns_css_f\"'\n # }' \"$path_out\"\n #\n # sed -i '/<g id=\"jic-defs\">/{\n # r /dev/stdin\n # }' \"$path_out\" < <(xmlstarlet sel -t -c '/svg/defs/*' \"$metabolite_patterns_svg_f\")\n # \"\"\"", "def quote_path(path):\n return '\"' + re.sub(r'([\\\\$\"[])', r\"\\\\\\1\", path) + '\"'" ]
[ "0.664361", "0.6505727", "0.6453225", "0.6349524", "0.631118", "0.62037516", "0.6048819", "0.6006023", "0.5888196", "0.5883319", "0.5875403", "0.5838811", "0.578169", "0.57720757", "0.57636064", "0.5753614", "0.5715025", "0.57089794", "0.569792", "0.5695481", "0.56592214", "0.5653134", "0.56510067", "0.5633124", "0.5623576", "0.5623083", "0.5616998", "0.5607472", "0.55974567", "0.5590173", "0.5585671", "0.5585671", "0.5575619", "0.5574568", "0.5572619", "0.55589277", "0.55559766", "0.55559766", "0.55559766", "0.55559766", "0.55559766", "0.55559766", "0.55559766", "0.55559766", "0.55559766", "0.5548917", "0.5546954", "0.5543918", "0.5534484", "0.5510643", "0.5503648", "0.5495521", "0.5493523", "0.54877436", "0.54859066", "0.5478378", "0.54776883", "0.54586107", "0.54548764", "0.544847", "0.54339874", "0.5433442", "0.5418102", "0.54105914", "0.54103243", "0.54096955", "0.54089874", "0.5391592", "0.5391127", "0.53898454", "0.53849494", "0.53818744", "0.53799766", "0.5370293", "0.5361076", "0.53543735", "0.5350901", "0.5345961", "0.53322667", "0.53320295", "0.53267795", "0.5322346", "0.5306451", "0.5305636", "0.53056234", "0.5304065", "0.5304065", "0.5304065", "0.5273533", "0.5239267", "0.5238785", "0.5238087", "0.52317244", "0.5216074", "0.5216026", "0.52119416", "0.5203995", "0.5203425", "0.5191229", "0.5189917" ]
0.6743669
0
Tests that the processor can be initialized.
def setUp(self): self.logger = mock.MagicMock() test_state = state.DFTimewolfState(config.Config) self.turbinia_processor = turbinia_base.TurbiniaProcessorBase( test_state, self.logger) file_path = os.path.join( CURRENT_DIR, "test_data", "turbinia_request_status.json") self._request_status = json.load(open(file_path))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _should_initialize_check_run(self, payload):\n action = payload.get('action')\n return action in self.initialize_actions or self.initialize_actions is None", "def test_init_success(self):\n found = False\n try:\n pyint = Interpreter()\n except InitializationException: \n found = True\n self.assertFalse(found)", "def checkInit(this):\n\t\tif not this._CAP: raise Exception('Vous devez initialiser la caméra...')", "def _init_feature_processer(self):\n try:\n model_config = self._conf.get(PredictConstance.BASE_CONFIG,\n PredictConstance.FEATURE_ENGINEERING_CONFIG)\n conf = configparser.ConfigParser()\n conf.read(model_config)\n self._feature_processor = data_processor.DataProcessor(conf=conf,log_path = self.xeasy_log_path)\n if self._feature_processor.init() == runstatus.RunStatus.SUCC:\n return True\n else:\n return False\n except Exception as err:\n self.managerlogger.logger.error(\"init model error: %s\" % err)\n self.errorlogger.logger.error(\"init model error:\\n %s\" % traceback.format_exc())\n return False", "def test_controller_initialization(self):\n for name in self.our_controllers:\n self.assertTrue(self.check_state(name, 'initialized'), \"{} is initialized correctly\".format(name))", "def test_initialise(self):\n # Make sure the variables are all updated\n assert isinstance(gcmc_system_sampler.context, Context)\n assert isinstance(gcmc_system_sampler.positions, Quantity)\n assert isinstance(gcmc_system_sampler.simulation_box, Quantity)\n\n return None", "def test_01_Init(self):\n pass", "def init(self):\n return True", "def _check_initialized(self):\n self.assertEquals(0, self._error_count)\n self.assertEquals(0, len(self._error_messages))", "def setUp(self):\n self.core_processor = core_processor.ProcessCores()", "def initialize(self) -> bool:\n raise NotImplementedError", "def is_initialized(self) -> bool:\n return (\n (self._exchange_params_by_currency_id is not None)\n and (self._utility_params_by_good_id is not None)\n and (self._transaction_fees is not None)\n )", "def test_init(self):\n self.assertEqual(self.device_key, self.factory.device_key)", "def test_initialise(self):\n\n # Make sure the variables are all updated\n assert isinstance(gcmc_sphere_sampler.context, Context)\n assert isinstance(gcmc_sphere_sampler.positions, Quantity)\n assert isinstance(gcmc_sphere_sampler.sphere_centre, Quantity)\n\n return None", "def test_initialized() -> None:\n MapieRegressor()", "def _check_initialized(self):\n check_is_fitted(self, 'estimators_')", "def _real_initialize(self):\n pass", "def _is_initialized(self) -> bool:\n return len(self) > 0", "def test_initialization(self, create_controller: Controller) -> None:\n pass", "def test_init(self):\n self.assertIsNotNone(DatabaseIntermediary(), self.ec.db)", "def _check_init(self):\n # instantiate the guest class we want to test\n system_name = 'dummy_system'\n host_name = 'dummy.domain.com'\n user = 'root'\n passwd = 'somepwd'\n extensions = {}\n guest_obj = linux.GuestLinux(\n system_name, host_name, user, passwd, extensions)\n\n # validate if attributes were correctly assigned to object\n self.assertEqual('linux', guest_obj.GUEST_ID)\n self.assertIs(system_name, guest_obj.name)\n self.assertIs(host_name, guest_obj.host_name)\n self.assertIs(user, guest_obj.user)\n self.assertIs(passwd, guest_obj.passwd)\n self.assertIs(extensions, guest_obj.extensions)\n\n # return object for further testing\n return guest_obj", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()", "def test_init_default(self):\n self._test_init_default()", "def testInitializer(self):\n request = http.HttpRequest()\n\n data, check, mutator = initialize.MELANGE_INITIALIZER.initialize(\n request, [], {})\n self.assertEqual(request, data.request)\n self.assertEqual(data, check.data)\n self.assertEqual(data, mutator.data)", "def initialize(self):\n if not self._ready:\n self._real_initialize()\n self._ready = True", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "def __verify__(cls):\n\n try:\n UpstartSystem()\n return True\n except Exception as e:\n try:\n UpstartSystem(bus=DirectUpstartBus())\n return True\n except Exception as e:\n return False", "def _init(self) -> bool:\n CC3DPy.call_init()\n\n return True", "def ready(self):\n if not self.is_setup:\n return False\n\n if self.pocs.observatory.mount.is_parked:\n print_warning('Mount is parked. To unpark run `unpark`')\n return False\n\n return self.pocs.is_safe()", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisInput, 'error')", "def _manually_initialize(self) -> None:\n # XXX: maybe refactor, this is actually part of the public interface\n pass", "def test_init(self):\n\n # This environment must have another attributes\n self.assertTrue(hasattr(self.environment, 'transitions'))\n\n # By default mesh shape is 4x3\n self.assertEqual(spaces.Tuple((spaces.Discrete(4), spaces.Discrete(3))), self.environment.observation_space)\n\n # By default initial position is (0, 2)\n self.assertEqual((0, 2), self.environment.initial_state)\n\n # Default reward is (-0.04)\n self.assertEqual((-0.04,), self.environment.default_reward)", "def Initialise(self):\n self.__m_Platform.Initialise()\n self.__m_Pump.Initialise( False )", "def _check_configured(cls):\r\n if not cls._CONFIGURED:\r\n raise RuntimeError('Registry not configured')", "def testInit(self):\n map_state = MapState(self.event_manager)\n self.assertEqual(map_state.event_manager, self.event_manager)\n self.assertTrue(map_state in self.event_manager.listener_groups[\"default\"].listeners)\n self.assertEqual(map_state.occupied_sectors_by_actor_id, {})\n self.assertEqual(map_state.actors_by_sector_id, {})", "def ipmi_setup():\n\n verify_ipmi_user_parm_accepted()", "def __init__(self):\n self.setup_called = False", "def test_init(self):\n assert_not_equal(self.testGame, None)", "def test_init(self):\n nt.assert_raises(Exception, CisInterface.CisOutput, 'error')", "def ready(self):\n # load all the feature sets\n loading.load_feature_sets()\n\n # First check that all expect DelegateAPIs are present\n checks.check_expected_delegate_apis()\n # Now check if all the actions those views expecte are present.\n checks.check_configured_actions()", "def test_os_processor(self):\n self.assertEqual(self.settings.OS_PROCESSOR, platform.processor())", "def is_setup(self):\n if self.pocs is None:\n print_warning('POCS has not been setup. Please run `setup_pocs`')\n return False\n return True", "def check_init(self):\n if self.Nlayer > 1:\n raise Exception(\"Nlayer == 1 currently\")", "def test_setup_platform(self, store_mock):\n config = {\n ip.DOMAIN: {\n \"platform\": \"microsoft_face_identify\",\n \"source\": {\"entity_id\": \"camera.demo_camera\"},\n \"group\": \"Test Group1\",\n },\n \"camera\": {\"platform\": \"demo\"},\n mf.DOMAIN: {\"api_key\": \"12345678abcdef6\"},\n }\n\n with assert_setup_component(1, ip.DOMAIN):\n setup_component(self.hass, ip.DOMAIN, config)\n self.hass.block_till_done()\n\n assert self.hass.states.get(\"image_processing.microsoftface_demo_camera\")", "def testInit(self):\n self.globalInit()\n self.test.start()", "def test_initialized() -> None:\n MapieClassifier()", "def test_init(self):\n self.assertEqual(self.foo._base_cmd, 'sleep 10; hostname')\n self.assertEqual(self.foo._base_args, {})\n self.assertEqual(self.foo.InputArgs, {})\n self.assertEqual(self.foo.OracleJobName, 'job1')", "def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)", "def test_init(self):\n mic = mi.MicrophoneToText()\n\n self.assertTrue(mic.switch)\n self.assertIsNotNone(mic.resultkeywords)\n self.assertIsNotNone(mic.result)\n self.assertIsNotNone(mic.keywordsshort)\n # tests also chunk and maxbuffer\n self.assertIsNotNone(mic.q)\n self.assertIsNotNone(mic.keywords)\n self.assertIsNotNone(mic.resultkeywords)\n self.assertIsNotNone(mic.speech_to_text)\n # tests also audio, format, channel and rate\n self.assertIsNotNone(mic.stream)\n self.assertIsNotNone(mic.audio_source)", "def test_initialization(self):\n self.assertEqual(self.installer.host_name, \"ec2.amazon.com\")\n self.assertEqual(self.installer.user_name, \"ec2\")\n self.assertEqual(self.installer.os, \"ubuntu\")\n self.assertEqual(self.installer.key_path, \"./ODFEAMIInstanceKey.pem\")\n self.assertEqual(self.installer.RPM_package_version, \"1.4.0\")\n self.assertEqual(self.installer.APT_OSS_version, \"7.4.2\")", "def test_creature(self):\n self.assertEqual(len(self.processor), 3)", "def test_init(self):\n M = simulation.EventMonitor(self.G)\n self.assertTrue(hasattr(M, 't'))\n self.assertTrue(hasattr(M, 'i'))\n\n self.assertEqual(len(M.t), 0)\n self.assertEqual(len(M.i), 0)", "def test_setting_continuous_processing(processor):\n processor.continuous_processing = False\n assert not processor._state.test('continuous_processing')\n processor.continuous_processing = True\n assert processor._state.test('continuous_processing')", "def setUp(self):\n self.sc = init_orca_context(cores=4)", "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.compute_sentry = self.d.sentry['nova-compute'][0]\n self.rabbitmq_sentry = self.d.sentry['rabbitmq-server'][0]\n self.neutron_api_sentry = self.d.sentry['neutron-api'][0]\n self.n_ovs_sentry = self.d.sentry['neutron-openvswitch'][0]\n\n # pidof is failing to find neutron-server on stein\n # use pgrep instead.\n if self._get_openstack_release() >= self.bionic_stein:\n self.pgrep_full = True\n else:\n self.pgrep_full = False", "def _setup(self) -> None:\n\t\treturn", "def test_init(self):\n print os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n self.assertEqual(len(self.app_mgr.f2f_api_key), 32)\n self.assertEqual(self.app_mgr._continue, True)", "def _check_all_systems_ready(self):\n raise NotImplementedError()", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def _initialize(self):\n pass", "def test_init(self):\n self.assertEqual(self.ing_mgr.ingredient_list, [])\n self.assertEqual(self.ing_mgr.user_input, True)", "def initialized(self):\n # verbose = CMAKE_BUILD_VERBOSE\n # if verbose:\n # self.diagnose_initialized_problems()\n return self.project_build_dir.exists() and self.has_stored_config_file()", "def initialized(self):\n with self.sess.as_default():\n with self.sess.graph.as_default():\n uninitialized = self.sess.run(self.report_uninitialized)\n if len(uninitialized) == 0:\n return True\n else:\n return False", "def test_init(self):\n\n with patch.object(rospy, \"wait_for_service\", return_value=True), \\\n patch.object(rospy, \"get_param\", mock_get_param), \\\n patch.object(rospy, \"init_node\", return_value=None), \\\n patch.object(rospy, 'spin', return_value=None), \\\n patch.object(rospy.Service, '__init__', return_value=None) as mock_service_init, \\\n patch.object(rospy.Publisher, '__init__', return_value=None) as mock_publisher_init, \\\n patch.object(Thread, 'start', return_value=None) as mock_start_thread, \\\n patch.object(Thread, 'join', return_value=None), \\\n patch.object(socket.socket, 'connect', return_value=True) as mock_connect, \\\n patch.object(socket.socket, 'bind', return_value=True) as mock_bind:\n\n src.drivers.hyundai_robot.init()\n\n for sn in [\"move_along\", \"abort\", \"store_poses\", \"move_pose\", \"set_speed\", \"move_between\"]:\n # both required services are advertised\n assert len([call for call in mock_service_init.mock_calls if call[1][0] == sn]) == 1\n\n # topic is advertised\n assert mock_publisher_init.call_count == 2\n assert mock_publisher_init.mock_calls[0][1][0] == \"robot_state\"\n assert mock_publisher_init.mock_calls[0][1][1] == RobotState\n\n assert mock_publisher_init.mock_calls[1][1][0] == \"robot_controller_joint_state\"\n assert mock_publisher_init.mock_calls[1][1][1] == JointState", "def test_perfectModelEnsemble_init(PM_ds_initialized_1d):\n pm = PerfectModelEnsemble(PM_ds_initialized_1d)\n print(PerfectModelEnsemble)\n assert pm", "def test_init(power_supply):\n power_supply.Init()\n assert power_supply.state() == tango.DevState.STANDBY", "def test_preferences_init(self):\n self.preferences.init(\n exchange_params_by_currency_id=self.exchange_params,\n utility_params_by_good_id=self.utility_params,\n tx_fee=self.tx_fee,\n )\n assert self.preferences.utility_params_by_good_id is not None\n assert self.preferences.exchange_params_by_currency_id is not None\n assert self.preferences.transaction_fees[\"seller_tx_fee\"] == 4\n assert self.preferences.transaction_fees[\"buyer_tx_fee\"] == 5\n assert self.preferences.is_initialized", "def test_init():\n machine = Machine(['a', 'b', 'c', '_'])\n assert machine.alphabet == ['a', 'b', 'c', '_']\n assert machine.head is None\n assert machine.state is None\n assert machine.tape is None", "def test_init(self):\n sample = PrepSample(self.sample_id, self.prep_template)\n # Check that the internal id have been correctly set\n self.assertEqual(sample._id, '1.SKB8.640193')\n # Check that the internal template have been correctly set\n self.assertEqual(sample._md_template, self.prep_template)\n # Check that the internal dynamic table name have been correctly set\n self.assertEqual(sample._dynamic_table, \"prep_1\")", "def is_ready() -> bool:\n return True", "def initialise(self):\n self.device.initialise()\n return \"OK\"", "def _isinit(self):\n return self.dp.state()==PyTango.DevState.INIT", "def initialize():\n manager.initialize()\n logs.exit_great_success()", "def initialize_mpi(self):\n return False", "def _init_hardware(self):\n return", "def do_init(self):\n\n pass", "def initialize(self):\n raise DeviceException(DeviceException.FEATURE_NOT_IMPLEMENTED)", "def test_tracker_init():\n\n trackers, cap = init_tracker()\n\n assert len(trackers) > 0", "def init_assembler(self):\n\n try:\n if self.stages:\n estimator_count = 0\n for stage in self.stages:\n stage.initialise(self.config)\n if isinstance(stage, Estimator):\n estimator_count += 1\n if estimator_count > 1:\n raise ValueError(\"Stages can only have one Estimator class\")\n\n if self.finaliser:\n self.finaliser.initialise(self.config)\n\n except Exception as e:\n if self.config.surround.surface_exceptions:\n raise e\n LOGGER.exception(e)\n return False\n return True", "def sanity_check(self):\n return True", "def test_invalid_controller_initialization(self):\n for name in self.invalid_controllers:\n self.assertTrue(self.check_parameter_server(name), \"{} was loaded as expected\".format(name))\n\n for name in self.invalid_controllers:\n self.assertFalse(self.check_state(name, 'initialized'), \"{} initializes although it should not\".format(name))", "def test_init_on_task_run(self):\n launcher = TaskLauncher(self.db, self.task_run, self.get_mock_assignment_data_array())\n self.assertEqual(self.db, launcher.db)\n self.assertEqual(self.task_run, launcher.task_run)\n self.assertEqual(len(launcher.assignments), 0)\n self.assertEqual(len(launcher.units), 0)\n self.assertEqual(launcher.provider_type, MockProvider.PROVIDER_TYPE)", "def is_ready(self) -> bool:\n pass", "def test_init_game(self):\n screen = utils.init_game()\n self.assertIsInstance(screen, pg.Surface)", "def init(self, args):\n return True", "async def test_setup_missing_config(hass: HomeAssistant) -> None:\n assert await async_setup_component(\n hass, SENSOR_DOMAIN, {SENSOR_DOMAIN: {\"platform\": DOMAIN}}\n )\n await hass.async_block_till_done()\n assert len(hass.states.async_all(SENSOR_DOMAIN)) == 0", "def test_setup_sync(self):\n worker_helper = WorkerHelper()\n self.assertEqual(worker_helper.setup(), None)", "def initialized(self):\n return len(self.ops) > 0", "def initialize(self, *args, **kwargs):\n self.initialized = True", "def isInitialized(self):\n\t\tif self.isTypeSet and self.isCfgSet:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False", "def test_init_no_source():\n frame_ingestor = FrameIngestor()\n assert not frame_ingestor._source", "def _check_required_if_provider(self):\n return", "def test_initialisation(self):\n currency_endowment = {\"FET\": 100}\n good_endowment = {\"good_id\": 2}\n self.ownership_state.init(\n amount_by_currency_id=currency_endowment,\n quantities_by_good_id=good_endowment,\n )\n assert self.ownership_state.amount_by_currency_id is not None\n assert self.ownership_state.quantities_by_good_id is not None\n assert self.ownership_state.is_initialized", "def _check_all_systems_ready(self):\n self.check_joint_states()\n self.check_contact_1()\n self.check_contact_2()\n self.check_collision()\n # self.check_rgb_camera()\n # self.check_rgbd_camera()\n # self.check_gripper_state()\n rospy.logdebug(\"ALL SYSTEMS READY\")", "def test_verify_state_of_a_device():", "def workflow_initialized(self):\n return self._gdb_interface.initialized()", "def test_generate_uninit(perfectModelEnsemble_initialized_control):\n pm = perfectModelEnsemble_initialized_control\n pm = pm.generate_uninitialized()\n assert pm.get_uninitialized()", "def test_002_init(self):\n self.assertIsInstance(ionchrom.ionchrom(\"id\"),ionchrom.ionchrom)", "def _initialise_run(self) -> None:", "def sanity_check(self):\n pass" ]
[ "0.6729422", "0.65866035", "0.6567162", "0.65516055", "0.63776916", "0.63715816", "0.63663733", "0.626371", "0.624758", "0.62021387", "0.6187901", "0.6163606", "0.6123539", "0.61059314", "0.60137165", "0.5975385", "0.5962163", "0.59609634", "0.5944189", "0.5939157", "0.59364486", "0.5917788", "0.59143823", "0.5908398", "0.59059525", "0.58988214", "0.5879075", "0.58766884", "0.5874203", "0.58563733", "0.58553785", "0.5847653", "0.5844579", "0.5838185", "0.5816749", "0.5802104", "0.579358", "0.578945", "0.57691544", "0.5768564", "0.57542735", "0.57510465", "0.57492083", "0.5743968", "0.5739368", "0.57372457", "0.5736817", "0.57340205", "0.5733726", "0.57230455", "0.57215357", "0.57180065", "0.5714642", "0.57099074", "0.5704186", "0.5702344", "0.5698105", "0.5691853", "0.5685461", "0.5685461", "0.5685461", "0.5683132", "0.5680576", "0.5671568", "0.56582373", "0.56581897", "0.5655697", "0.56554943", "0.5652693", "0.5649877", "0.56459725", "0.5640023", "0.5634499", "0.56316787", "0.562994", "0.562988", "0.5629773", "0.5620857", "0.56057143", "0.5603636", "0.5594812", "0.55892915", "0.55847424", "0.5578023", "0.55737835", "0.55716246", "0.5566288", "0.55561656", "0.5553198", "0.5547029", "0.55464655", "0.55451363", "0.5539571", "0.5539123", "0.5535908", "0.5527373", "0.5524569", "0.552116", "0.55205667", "0.5515347", "0.5515312" ]
0.0
-1
Tests the TurbiniaSetup method.
def testTurbiniaSetup(self, _mock_read_config): _mock_read_config.return_value = {"OUTPUT_DIR": "/tmp"} self.turbinia_processor.TurbiniaSetUp( project="turbinia-project", turbinia_auth=False, turbinia_recipe=None, turbinia_zone="us-central1f", turbinia_api="http://localhost:8001", incident_id="123456789", sketch_id="12345", ) self.assertEqual(self.turbinia_processor.project, "turbinia-project") self.assertEqual(self.turbinia_processor.turbinia_zone, "us-central1f") self.assertEqual( self.turbinia_processor.turbinia_api, "http://localhost:8001") self.assertEqual(self.turbinia_processor.incident_id, "123456789") self.assertEqual(self.turbinia_processor.sketch_id, "12345") self.assertEqual(self.turbinia_processor.output_path, "/tmp") self.assertEqual(self.turbinia_processor.turbinia_recipe, None)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def setup():\n pass", "def TurbiniaSetUp(\n self, project: str, turbinia_auth: bool,\n turbinia_recipe: Union[str, None], turbinia_zone: str, turbinia_api: str,\n incident_id: str, sketch_id: int) -> None:\n self.project = project\n self.turbinia_auth = turbinia_auth\n self.turbinia_api = turbinia_api\n self.turbinia_recipe = turbinia_recipe\n self.turbinia_zone = turbinia_zone\n self.incident_id = incident_id\n self.sketch_id = sketch_id\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n self.requests_api_instance = turbinia_requests_api.TurbiniaRequestsApi(\n self.client)\n # We need to get the output path from the Turbinia server.\n api_instance = turbinia_configuration_api.TurbiniaConfigurationApi(\n self.client)\n try:\n api_response = api_instance.read_config()\n self.output_path = api_response.get('OUTPUT_DIR')\n except turbinia_api_lib.ApiException as exception:\n self.ModuleError(exception.body, critical=True)", "def setup( self ):", "def _setup_dut(ptfhost, request):\n logger.info(\"Set up SAI tests.\")\n\n _prepare_test_cases(ptfhost, request)", "def test_functionality(self):\n \n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()\n \n self.logout()", "def setup(self) -> None:", "def test_functionality(self):\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def setup(self):\n pass", "def runTest(self):\n self.setUp()\n self.test_visuThreeD1()", "def setup(self):\n ...", "def test_setup(self):\n assert self.transaction_behaviour.setup() is None\n self.assert_quantity_in_outbox(0)", "def setup(self):\n pass", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def test_functionality(self):\n self.templateName = \"Test Template\"\n self.browserObject = globalVars.browserObject\n\n self.preRunSetup()\n \n self.runTestCase()\n \n self.postRunCleanup()", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def setup(self):\n pass", "def SetupEnvironment(self):\n pass", "def setup(self):\r\n pass", "def setup(self):", "def setup(self):", "def setup(self):", "def setup(self):", "def main():\n setup(**setup_params)", "def setup(self): \n pass", "def setup(self):\n\t\tpass", "def Setup(self):\n return True", "def setUp(self):\n test_env_setup()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def test_setup(self):\n engine = Engine(self.config_file, self.api_token)\n engine.setup()", "async def test_setup(hass, setup_sensor):\n state = hass.states.get(\"sensor.ethereum\")\n assert state is not None\n\n assert state.name == \"Ethereum\"\n assert state.state == \"493.455\"\n assert state.attributes.get(\"symbol\") == \"ETH\"\n assert state.attributes.get(\"unit_of_measurement\") == \"EUR\"", "def test_setup(self):\n assert self.http_handler.setup() is None\n self.assert_quantity_in_outbox(0)", "async def test_setup(hass: HomeAssistant, ufp: MockUFPFixture) -> None:\n\n await hass.config_entries.async_setup(ufp.entry.entry_id)\n await hass.async_block_till_done()\n\n assert ufp.entry.state == ConfigEntryState.LOADED\n assert ufp.api.update.called\n assert ufp.entry.unique_id == ufp.api.bootstrap.nvr.mac", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def setup(self) -> None:\n pass", "def _set_up():\n repl._setUp = self.setUp", "def _fixture_setup(self):\n pass", "def setUp(self):\n lang = self._sim_lang\n self._simulator = self._find_resource(\n f\"drake/examples/hardware_sim/hardware_sim_{lang}\")\n self._example_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/example_scenarios.yaml\")\n self._test_scenarios = self._find_resource(\n \"drake/examples/hardware_sim/test/test_scenarios.yaml\")\n self._default_extra = {\n # For our smoke test, exit fairly quickly.\n \"simulation_duration\": 0.0625,\n }", "def runTest(self):\n self.setUp()\n self.test_ExtendSpine1()", "def setup(self):\n pass # pragma: no cover", "def setUp(self):\n print(\"New test by Nikolay Melnik\")", "def setUp(self):\n print(\"\\nIn setUp()...\")", "def _setup(self):", "def _setup(self):", "def unitary_test():", "def runTest(self):\n self.setUp()\n self.test_TractQuerier()", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n logging.debug('setting up')", "def setUp(self):\n sumo_bin = sumolib.checkBinary('sumo')\n traci.start([sumo_bin, \"-n\", self.network_path, \"-r\", self.routes_path])\n traci.simulationStep()", "def setUp(self):\r\n pass # nothing used by all\r", "def _setup(self) -> None:\n\t\treturn", "def setUp(self):\n # Let's install a bundle to use in tests\n self.run_function(\"assistive.install\", [OSA_SCRIPT, True])", "def setUp(self) :\n pass", "def runtest(self):", "def runTest(self):\n self.setUp()\n self.test_SegmentDicom1()", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n \n \n pass", "def setUp(self):\n pass #because we dont have anything to setup.", "def test_install(self):\n pass", "def test_setup(self):\n with pytest.raises(NotImplementedError):\n self.behaviour.setup()", "def setUp(self):\r\n # nothing to do, all tests use different things\r\n pass", "def setUp(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n timeCommand = \"time -p\"\n self.msTest = MS.MachineSettings(casDict,timeCommand)\n self.builder = MSFXMLB.MachineSettingsFromXMLBuilder()", "def setUp(self):\n self.delegate = AlwaysHitDelegate(\"\")\n self.environment = BattleEnvironment()", "def setUp(self):\n # create temporary directory\n if not usedir:\n self.test_dir = tempfile.mkdtemp()\n os.chdir(self.test_dir)\n else:\n os.chdir(usedir) \n\n super(SimpleTest, self).setUp()\n\n import SFramework\n self.manager = SFramework.TSStatisticsManager()\n self.manager.getWorkspaces().addObject(self.makeWS())", "def setUp(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n timeCommand = \"time -p\"\n self.msTest = MS.MachineSettings(casDict,timeCommand)", "def setUp(self):\n\n self.params = master_phil.extract()\n\n self.params.input.xtal_name = \"FALZA-x0085\"\n self.params.input.in_path = os.path.join(\n os.path.realpath(\"./test/resources\"), self.params.input.xtal_name\n )\n self.params.validate.input.base_mtz = os.path.join(\n self.params.input.in_path, \"FALZA-x0085.free.mtz\"\n )\n self.params.input.mtz = os.path.join(\n self.params.input.in_path, \"FALZA-x0085.free.mtz\"\n )\n self.params.input.pdb = os.path.join(self.params.input.in_path, \"refine.pdb\")\n self.params.output.out_dir = os.path.realpath(\"./test/output\")\n self.params.output.log_dir = os.path.realpath(\n os.path.join(\"./test/output\", \"logs\")\n )\n self.params.exhaustive.options.step = 0.05", "def setUp(self):\n MainTests.setUp(self)", "def runTest(self):\r\n self.setUp()\r\n self.test_CreateROI1()", "def test_Tuna(self):\n tuna = Tuna(\"1\", \"2\", \"3\", \"4\")\n self.assertIsNotNone(tuna)", "def setUp(self):\n self.setup_beets()", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass", "def setUp(self):\n pass" ]
[ "0.6720269", "0.65703833", "0.6467602", "0.6449046", "0.6447386", "0.64372605", "0.64077723", "0.6403362", "0.63865745", "0.635866", "0.6345223", "0.63323843", "0.63103575", "0.6282889", "0.6277148", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62679124", "0.62636167", "0.62636167", "0.6255071", "0.6253552", "0.6251323", "0.6251323", "0.6251323", "0.6251323", "0.62116057", "0.62113714", "0.6210461", "0.62040377", "0.6161", "0.6158741", "0.61566883", "0.61474603", "0.6145581", "0.6142353", "0.61359733", "0.61359733", "0.61359733", "0.6111301", "0.6094564", "0.60909575", "0.6090127", "0.606463", "0.60503834", "0.6039343", "0.6033478", "0.6033478", "0.60190487", "0.60065866", "0.60037583", "0.60037583", "0.6001869", "0.5991516", "0.59878963", "0.5986564", "0.5986183", "0.59851176", "0.5982413", "0.5979746", "0.5979746", "0.5979746", "0.5979746", "0.5979746", "0.5979746", "0.5979746", "0.5979746", "0.5979746", "0.5979314", "0.5973112", "0.5970051", "0.59502316", "0.59441787", "0.59438956", "0.59344506", "0.59344465", "0.5925618", "0.59243333", "0.5922233", "0.5921613", "0.5920707", "0.59157556", "0.59157556", "0.59157556", "0.59157556", "0.59157556", "0.59157556", "0.59157556", "0.59157556", "0.59157556", "0.59157556" ]
0.77893466
0
Tests the TurbiniaStart method.
def testTurbiniaStart(self, mock_create_request): mock_create_request.return_value = { "request_id": "41483253079448e59685d88f37ab91f7" } mock_api_instance = mock.MagicMock() mock_api_instance.create_request = mock_create_request self.turbinia_processor.requests_api_instance = mock_api_instance evidence = { "type": "GoogleCloudDisk", "disk_name": "disk-1", "project": "project-1", "zone": "us-central1-f", } request_id = self.turbinia_processor.TurbiniaStart( evidence=evidence, yara_rules=YARA_RULE) self.assertEqual(request_id, "41483253079448e59685d88f37ab91f7")
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testStart(self):\n self.machine.stop()\n self.machine.start(safe.Settling)\n \n self.assertCurrentMotion(motion.common.Hover)\n \n self.releaseTimer(safe.Settling.SETTLED)\n self.assertCurrentState(safe.Grabbing)", "def startTestRun(self):", "def startTest(asset):", "def start():\n trio.run(_main)", "def start():", "def start():", "def start():", "def start():", "def test_get_start_true(self):\n\n tt = TemperatureTracker()\n tt.start()\n self.assertIsNotNone(tt.get_start())", "def test_start_scan(self):\n pass", "def testStart(self):\n self.machine.stop()\n self.machine.start(safe.Grabbing)\n \n self.assertCurrentMotion(ram.motion.basic.RateChangeDepth)\n \n self.releaseTimer(safe.Grabbing.GRABBED)\n self.assertCurrentState(safe.Surface)", "def _start(self, unit):\n raise NotImplementedError", "def start(self) -> None:", "def start(self) -> None:", "def startTest(self, test):\n self._timer = time()", "def start_fixture(self):\n pass", "def _start(self):", "def test_start(self):\n magic_hat = Game()\n result = Game.start(magic_hat)\n self.assertEqual(result, game.STATUS_PLAYING)", "def start(self):\n ...", "def start(self) -> None:\n ...", "def start(self) -> None:\n ...", "def startTestRun(self, test):\n self.runTime= time.time()\n self.logger.debug(\"\\nBeginning ForceBalance test suite at %s\\n\" % time.strftime('%x %X %Z'))", "def start(self):\r\n self.setDriver('ST', 1)", "def startTestRun(self):\n self.startTime = time.time()\n # Really verbose information\n if self.verbose > 2:\n self.stream.writeln(self.colors.bold(pretty_version() + \"\\n\"))", "def startTest(testname, host):\r\n host, UNDI = getBoxInfo()\r\n runID = uuid.uuid5(uuid.NAMESPACE_DNS, host)\r\n lg.info(\"Test, %s, run started on %s/%s with runID %s\"%(testname, host, UNDI, runID.hex))\r\n def abort():\r\n lg.info(\"Aborting test run %s for test %s on %s/%s\"%(runID.hex, testname, host, UNDI))\r\n return\r\n def end():\r\n lg.info(\"Ending test run %s for test %s on %s/%s\"%(runID.hex, testname, host, UNDI))\r\n return\r\n return end, abort", "def testInit(self):\n self.globalInit()\n self.test.start()", "def test_run_started(self):", "def test_start_test(self):\n self.protocol.startTest(self.test)\n self.assertEqual(self.io.getvalue(), compat._b(\n \"test: %s\\n\" % self.test.id()))", "def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)", "def start(self):\r\n pass", "def start (self):\n pass", "def start (self):\n pass", "def startTestHook(self):", "def start(cobj):\n pass", "def setUp(self):\n sumo_bin = sumolib.checkBinary('sumo')\n traci.start([sumo_bin, \"-n\", self.network_path, \"-r\", self.routes_path])\n traci.simulationStep()", "def testStart(self):\n self.assert_(self.visionSystem.downwardSafeDetector)\n #self.assertCurrentMotion(motion.search.ForwardZigZag)", "def Start(self) :\n\t\t...", "def test_begin(self):", "def startup(self) -> None:", "def _start(self):\n pass", "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n Manager.Methods.ConfigureSimulator(self._proxy, {'denominator': 8})", "def setUp(self):\n self._service = Service()\n self._service.setUp()\n time.sleep(1)\n self._proxy = get_object(TOP_OBJECT)\n Manager.Methods.ConfigureSimulator(self._proxy, {'denominator': 8})", "def start(self, unit):\n # Default: act as a dummy.\n return self._start(unit)", "def run(self):\n self.speed_test.start()", "def start(self):\n try:\n pass\n except:\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def start(self):\n pass", "def init():\n serverboards.info(\"Init test running\")\n time.sleep(0.5)\n serverboards.info(\"Init test stop\")\n return 30", "def test_start(self):\n self.fail(\"write a test\")", "def start_workunit(self, workunit):\r\n pass", "def start_workunit(self, workunit):\r\n pass", "def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.testing = 1", "def setUp(self):\n self.t = Timew()", "def setUp(self):\n self.t = Timew()", "def setUp(self):\n self.t = Timew()", "def start(self):\r\n self.start_time = time.time()", "def test_start(http_service: Any) -> None:\n url = f\"{http_service}/start\"\n response = requests.get(url)\n\n assert response.status_code == 200\n assert response.headers[\"content-type\"] == \"text/html; charset=utf-8\"\n\n assert len(response.text) > 0", "def setUp(self):\n casDict = {\"Singular\":\"Singular\", \"Magma\":\"magma\", \"Maple\":\"maple\"}\n timeCommand = \"time -p\"\n self.msTest = MS.MachineSettings(casDict,timeCommand)", "def TerminalClientStart(self):\n pass", "def runRobot():", "def test_smoke(self):\n\t\tinit_state = torch.tensor(0.0)\n\t\ttotal_time = torch.tensor(4.0)\n\t\tprint('Agent state trajectory and actions:')\n\t\tAgent().play(init_state, total_time)\n\t\tpyro.clear_param_store()", "def startTest(self, test):\n test = proto_test(test)\n self.start_time = time.time()\n self.reinitialize()\n if self.start_callback:\n self.start_callback(test)", "def test_issue_start_stop_watch(self):\n pass", "def started(self):", "def runtest(self):", "def test_launch_traj(self, capsys):\n args = self.args.copy()\n args[\"traj_file\"] = str(PATH_DATA / \"2POPC.xtc\")\n args[\"out_file\"] = \"out.txt\"\n args[\"prefix_traj_ouput\"] = \"basename\"\n args[\"begin\"] = 0\n args[\"end\"] = 10000\n UI.launch(**args)\n captured = capsys.readouterr().out\n assert \"Results written to out.txt\" in captured\n assert \"Dealing with frame 10 at 10000.0 ps.\" in captured\n assert \"Writing new pdb with hydrogens.\" in captured\n assert \"Writing trajectory with hydrogens in xtc file.\" in captured", "def test_001_start(self):\n HEADING()\n self.db.start()\n up = self.db.isup()\n result = up\n assert result", "def ConsoleStart(self):\n pass", "def startTest(self, test):\n self.start()\n test.status = None\n test.errors = None\n test.test_item = self.service.start_nose_item(self, test)\n self.setupLoghandler()", "def runTest(self):\r\n self.setUp()\r\n self.test_sceneImport24281()", "def starting_tests(self):\n# disable menus during testing, because their message loop seems to interfere\n# with the natlink message loop which waits for recognitionMimic to\n# finish\n self.enable_menus(0)\n self.testing = 1\n self.parent.starting_tests()", "def start():\n # Have the car begin at a stop\n rc.drive.stop()\n # Print start message\n print(\">> Lab 4B - LIDAR Wall Following\")", "def begin(self, tests):\r\n raise NotImplementedError", "def start( *args, **kwargs ):", "def Start():\n timer.start()", "def begin(self):\n\n env = self.context.lookup(\"/environment\")\n\n self._test_results_dir = env[\"output_directory\"]\n self._starttime = env[\"starttime\"]\n self._runid = env[\"runid\"]\n\n self._result_filename = os.path.join(self._test_results_dir, \"testrun_results.jsos\")\n self._summary_filename = os.path.join(self._test_results_dir, \"testrun_summary.json\")\n self._import_errors_filename = os.path.join(self._test_results_dir, \"import_errors.jsos\")\n\n return", "def startSimulation(self):\n self.saveParameters()\n self.simulation.main()", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start(self):\n raise NotImplementedError", "def start():\n # Have the car begin at a stop\n rc.drive.stop()\n\n global width\n global height\n width = rc.camera.get_width()\n height = rc.camera.get_height()\n # rc.drive.set_max_speed(1)\n\n global currentChallenge\n global oldState\n currentChallenge = Challenge.ManualControl\n oldState = Challenge.Line\n\n global colorPriority\n colorPriority = None\n\n global oldCones\n oldCones = None\n\n global last_waypoint_type\n last_waypoint_type = None\n\n # Print start message\n print(\">> Final Challenge - Time Trials\")", "def test_get_start_false(self):\n\n tt = TemperatureTracker()\n self.assertIsNone(tt.get_start())", "def start(self):\n# if self._start_time is not None:\n self._start_time = time.perf_counter()", "def start(self):\n self.start_time = time.time()", "def runTest(self):\n return True", "def test_drive(self):\n global ENV, TRAFFIC_LIGHT\n ENV = simpy.Environment()\n TRAFFIC_LIGHT = TrafficLight()\n bus = Bus(nr=0)\n ENV.process(bus.drive())\n ENV.run()\n self.assertEqual(bus.movement.to_pos, 600)", "def start():\n import OnlineEnv as Online\n Online.end_config(False)\n #Online.end_config(True)", "def main():\n taxi = Taxi(\"Prius 1\", 100)\n taxi.drive(40)\n print(taxi)\n taxi.start_fare()\n taxi.drive(100)\n print(taxi)", "def test3():\n setLogLevel(\"info\")\n info(\"Configuracion Unidad experimental\")\n \"\"\" 1 -> Definicion de la topologia \"\"\"\n t1 = Topologia1()\n ue1 = UnidadExperimental()\n ue1.setTopo(t1)\n ue1.definirNodosClaves(C='h2', V='h3') # Caso solo para trafico normal\n ue1.setController('ryu', 'simple_switch_13.py,ofctl_rest.py')\n info(\"Configuracion del experimento\")\n \"\"\" 3. Confiracion del experimento \"\"\"\n exp1 = Experimento()\n exp1.configureParams(ue1)\n exp1.configurarTrafico('normal')\n \"\"\" 4. Inicio del experimento \"\"\"\n exp1.startTest()\n \"\"\" 5. Aplicacion de pruebas \"\"\"\n exp1.trafico.pingMeasure()\n exp1.trafico.pingMeasure(filename='ensayo_ping.log')\n \"\"\" 6. Fin del experimento \"\"\"\n exp1.endTest()\n info(\"Removiendo la topologia\\n\")\n exp1.killTest()\n info(\"Removiendo el controlador\\n\")\n exp1.killController() # Si no se pone no se finaliza el controlador", "def isstarted():", "def suite_started(self, module):", "def start():\n\n print(\"Hi. I'm your Amazon customer service assistant.\")\n print('What can I help you about your orders?')", "def main():\n tng.api.runner()", "def test_modes(self):\n step = self.run_step('S01-first.py')\n self.assertTrue(step.success)\n self.assertTrue(step.local.is_testing)\n self.assertFalse(step.local.is_interactive)\n self.assertFalse(step.local.is_single_run)" ]
[ "0.71255654", "0.6825471", "0.6790718", "0.66931415", "0.6602822", "0.6602822", "0.6602822", "0.6602822", "0.6589439", "0.6544202", "0.65038764", "0.650213", "0.63993055", "0.63993055", "0.6367139", "0.6244631", "0.6237109", "0.6224533", "0.61932135", "0.61699873", "0.61699873", "0.61687076", "0.6157207", "0.61193496", "0.61144316", "0.60601485", "0.60256684", "0.59974205", "0.5992832", "0.5986008", "0.59803635", "0.59803635", "0.5974042", "0.5963362", "0.5959553", "0.59567565", "0.5949914", "0.5944197", "0.593373", "0.5919183", "0.5911421", "0.5911421", "0.59020865", "0.59009576", "0.5896914", "0.5896023", "0.5896023", "0.5896023", "0.5896023", "0.5896023", "0.5896023", "0.5896023", "0.5896023", "0.5878892", "0.58753455", "0.58617705", "0.58617705", "0.58384943", "0.5810026", "0.5810026", "0.5810026", "0.5808882", "0.57976377", "0.5772234", "0.57636905", "0.57606596", "0.5725545", "0.57158643", "0.5710669", "0.57071406", "0.57046217", "0.56910264", "0.5689838", "0.56787163", "0.5677106", "0.5664552", "0.565394", "0.5649994", "0.5649182", "0.5646914", "0.5640828", "0.56036097", "0.55919373", "0.5591759", "0.5591759", "0.5591759", "0.5590724", "0.5585923", "0.5579845", "0.5577186", "0.55753803", "0.5572223", "0.5559787", "0.5539903", "0.553989", "0.5536467", "0.55317503", "0.55296296", "0.5529374", "0.5528162" ]
0.63942826
14
Tests the TurbiniaWait method.
def testTurbiniaWait(self, mock_get_request_status, _): mock_api_instance = mock.MagicMock() mock_api_instance.create_request = mock_get_request_status self.turbinia_processor.requests_api_instance = mock_api_instance mock_get_request_status.return_value = self._request_status for task, path in self.turbinia_processor.TurbiniaWait(TASK_ID): # Check that the task and path are correct for a PlasoParserTask if task["id"] == TASK_ID: self.assertEqual(task, self._request_status["tasks"][0]) self.assertEqual(path, TEST_TASK_PATH) break
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def wait():\n pass", "def wait():\n time.sleep(1)", "def do_wait(self):\n pass", "def wait(wait_time=WAIT_TIME):\n # time.sleep(wait_time)\n pass", "def wait(self):\n pass", "def wait(self):\n pass", "def wait(self):\n time.sleep(0.010)", "def wait(cls, quad):\n\t\twait_time = cls.get_address_value(quad.result)\n\t\ttime.sleep(wait_time/1000.0)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitrobot(robot):\n while not robot.GetController().IsDone():\n time.sleep(0.01)", "def waitUntilSuccess():", "def wait(self, timeoout=None, state=\"C-completed\"):", "def wait(self):\n\t\traise NotImplementedError(\"must be redeclared\")", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def waitUntilFinished():", "def wait(delay=2):\n time.sleep(delay)", "def wait(n=3):\n sleep(n)", "def wait(self, ms=None):\r\n util.raiseNotDefined()", "def wait(self):\n\t\twhile True:\n\t\t\tr1 = self.zaberSend(self.translation[\"hor\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tr2 = self.zaberSend(self.translation[\"ver\"], self.cmd[\"returnStatus\"], data=0)\n\t\t\tif r1[2] == 0 and r2[2] == 0:\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\ttime.sleep(.01)", "def wait(self):\n self.mainloop().wait()", "def wait(wait_time):\n\n time.sleep(wait_time)", "def wait(\n t: float,\n f: str,\n ) -> None:\n\n print(\"Waiting for %s...\" % f)\n\n time.sleep(t)\n\n return", "def _WaitForLinkerTestStatus(adb, timeout):", "def wait(self):\n time.sleep(self.next())", "def wait(self):\n self.event.wait()", "def wait_second(self, time_wait):\n # each test case 1st check for the stop button flag\n if not self.stopLoop:\n # get time\n ts = datetime.datetime.now().strftime(self.tsFormat)\n # Create label\n x = Label(\n self.testFrame, text=f'{ts} - Waiting {time_wait}s',\n background=self.bgChooser(),\n foreground=\"#a5120d\",\n font=self.boldFont, anchor='w')\n x.pack(fill=X)\n # add counter for BG\n self.bgCounter += 1\n # allow window to catch up\n self.tkRoot.update()\n self.update_scrollbar()\n time.sleep(1)\n # Automation Script below --------------------\n\n self.tv.wait_in_second(time_wait)\n\n # Automation Script above --------------------\n\n # revert label color to black\n x.config(foreground=\"#000\", font=self.mainFont)\n self.LabelLists.append(x)\n else:\n print(\"stopping test\")", "def wait_for_tag():\n time.sleep(1.1)", "def wait_for_test(wwt, timeout, for_render=False):\n from time import time\n from ..app import get_qapp\n MIN_ITERS = 128\n ALWAYS_EXTRA_LONG = sys.platform.startswith('darwin')\n\n if for_render and ALWAYS_EXTRA_LONG:\n timeout = 90\n\n app = get_qapp()\n t0 = time()\n iters = 0\n\n # Iterate for *at least* MIN_ITERS and *at least* `timeout` seconds.\n\n for _ in range(MIN_ITERS):\n iters += 1\n app.processEvents()\n\n while time() - t0 < timeout:\n iters += 1\n app.processEvents()\n\n dt = time() - t0\n\n if for_render:\n print(f'wait_for_test: iters={iters} dt={dt} timeout={timeout} always={ALWAYS_EXTRA_LONG}')", "def wait(self):\n\t\tself.wait_window(self)\n\t\treturn self.result", "def wait_progress(self):\n pass", "def wait_progress(self):\n pass", "def wait(self,c,time,loop):\r\n\r\n if loop==1:\r\n loopBool=True\r\n else:\r\n loopBool=False\r\n \r\n self.board.setupWait( time['ns'], loopBool)", "def _wait_what(self, expected):\r\n \r\n self._msg_server(cb.WAITWHATSERVER % (expected))", "def test_wait(self, mocker):\n\n tid = 289466\n site = \"mysite\"\n first_response = self.generate_task_dictionary(\n tid, state=\"waiting\", completed=False\n )\n\n responses = [\n {\"json\": first_response},\n {\"json\": self.generate_task_dictionary(tid)},\n ]\n url = (\n \"https://cloudapi.acquia.com/v1/\"\n \"sites/prod:{site}/tasks/{tid}.json\".format(tid=tid, site=site)\n )\n\n mocker.register_uri(\"GET\", url, responses)\n\n task = self.client.site(site).task(tid).wait()\n self.assertEqual(task[\"id\"], tid)\n self.assertEqual(task[\"state\"], \"done\")", "def wait(self):\n time.sleep(self.pause_time)", "def wait(self, th=\"100\"):\n # save and validate the parameters\n try:\n self.cfg['param'] = {'th' : int(th)}\n self.cfg.save()\n except ValueError:\n return self.error(errcode='badparams',\n errmsg=\"The parameter must be numeric.\")\n\n http.refresh(self.base_url + 'run?key=%s' % self.key)\n return self.tmpl_out(\"wait.html\")", "def wait(t):\n message = \"WAIT:\" + str(t) + '\\n'\n sock.sendall(message)\n time.sleep(t)\n return", "def test_calc_waiting():\n print '\\nTesting calc_waiting'\n expected = 142\n actual = sim.calc_waiting(106, 35, 71)\n if expected == actual:\n print 'calc_waiting(106, 35, 71) test passed.'\n else:\n print 'calc_waiting(106, 35, 71) test failed.'\n print 'expected: ', expected, ' actual: ', actual\n print ''", "def state_wait_do(cfg, app, win, events):", "def WaitForTest(self):\n\t\tpayload = { \"Arg1\": self.href }\n\t\treturn self._execute('waitForTest', payload=payload, response_object=None)", "def block_waiting( self ):\n while self.num_waiting > 0:\n time.sleep( 1 )", "def waitStatus(j, wtype='Load'):\n timeout = 1\n curIter = 0\n maxIter = 60\n done = False\n while not done:\n stat = j.GetStatus(wtype)\n if stat == \"complete\":\n done = True\n else:\n curIter = curIter + 1\n if curIter > maxIter:\n raise ValueError(\"timeout waiting\")\n time.sleep(timeout)", "def answer_waiting_call(self) -> None:", "def test_calc_waiting_1():\n print '\\nTesting calc_waiting_1'\n expected = 14\n actual = sim.calc_waiting(26, 32, 20)\n if expected == actual:\n print 'calc_waiting(26, 32, 20) test passed.'\n else:\n print 'calc_waiting(26, 32, 20) test failed.'\n print 'expected: ', expected, ' actual: ', actual\n print ''", "def __wait(min_sec, max_sec):\n time.sleep(randint(min_sec, max_sec))", "def wait(self, secs):\r\n t1 = time.time()\r\n self.driver.implicitly_wait(secs)\r\n self.my_print(\"{0} Set wait all element display in {1} seconds, Spend {2} seconds\".format(success,\r\n secs,time.time() - t1))", "async def test_wait_for(self) -> None:\n trigger = auraxium.Trigger(auraxium.event.Death)\n\n def do_nothing(_: auraxium.event.Event) -> None:\n pass\n\n trigger.action = do_nothing\n\n await self.client.wait_for(trigger, timeout=-1.0)\n\n with self.assertRaises(TimeoutError):\n await self.client.wait_for(trigger, timeout=0.00001)", "def wait_until_not_busy(debugger, t=100):\n\n while debugger.is_busy():\n yield timeout(t)", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def waitfor(self):\r\n finished = False\r\n while finished == False:\r\n time.sleep(5)\r\n finished = self.isFinished()", "def waitFor(self,duration=2):\n time.sleep(duration)\n print('Done waiting for ',duration)\n return", "def wait(self):\n return # this method might be obsolete since wait counter is no longer used and the ai counter is handled elsewhere.\n\n #the jump method could go in Being as well.", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_time():\n while rospy.Time().now().to_sec() == 0:\n pass", "def wait_for_interrupts(self, wait_time = 1):\n raise AssertionError(\"wait_for_interrupts function i not implemented\")", "def test_wait(self):\n skill = create_skill()\n\n expected_response = 'Yes I do, very much'\n\n converser = Thread(target=create_converse_responder(expected_response,\n skill))\n converser.start()\n validator = mock.Mock()\n validator.return_value = True\n is_cancel = mock.Mock()\n is_cancel.return_value = False\n on_fail = mock.Mock()\n\n response = skill._wait_response(is_cancel, validator, on_fail, 1)\n self.assertEqual(response, expected_response)\n converser.join()", "def in_waiting(self) -> int:\n pass", "def test_wait_race(self):\n mock_handler = mock.Mock()\n async_result = self._makeOne(mock_handler)\n\n async_result.set(\"immediate\")\n\n cv = threading.Event()\n\n def wait_for_val():\n # NB: should not sleep\n async_result.wait(20)\n cv.set()\n th = threading.Thread(target=wait_for_val)\n th.daemon = True\n th.start()\n\n # if the wait() didn't sleep (correctly), cv will be set quickly\n # if it did sleep, the cv will not be set yet and this will timeout\n cv.wait(10)\n eq_(cv.is_set(), True)\n th.join()", "def busyWait(self):\n time.sleep(0.0)", "def wait(self, cycles):\n\t\tpass", "def test_ProstateReporting1(self):\n\n self.delayDisplay(\"Starting the test\")\n\n self.delayDisplay('Test passed!')", "def wait_vm_operation(self, params: dict) -> Tuple[\"Status\", dict]:", "async def wait_until_done(self) -> None:\n ...", "def wait_until(self, check, timeout=None):\n self._wait_in_process_loop(lambda: (check(),None),timeout=timeout)", "def wait_inner():\n if (\n kernel32.WaitForMultipleObjects(\n 2,\n ctypes.pointer((HANDLE * 2)(cancel_event, timer)),\n False,\n INFINITE,\n )\n == WAIT_FAILED\n ):\n time_sleep(sleep_for)", "def wait(self, timeout=None):\n assert False, \"Deriving class must implement\"", "def wait(item=TIME, ants=0, tmo=0, waiton=-2, precomment=None, postcomment=None,\n subarray=DEFAULT) :\n if tmo > carmaIni.CORBA_CLIENT_CALL_TIMEOUT_S:\n warn = \"wait: Timeout (%ds) is greater than max CORBA timeout (%ds).\"\n warn += \"\\n\\tThrottling to %ds.\"\n warn = warn%( tmo, \n carmaIni.CORBA_CLIENT_CALL_TIMEOUT_S, \n carmaIni.CORBA_CLIENT_CALL_TIMEOUT_S - 10 )\n printWarning( warn )\n tmo = carmaIni.CORBA_CLIENT_CALL_TIMEOUT_S - 10\n \n return runKeyboardInterruptable( _wait, item, ants, tmo, waiton, \n precomment, postcomment, subarray )", "def wait(self) -> None:\n\n self.event_.wait()", "def wait_for(func):\n \n while not func() and not rospy.is_shutdown():\n time.sleep(0.01)", "def test_is_finished(self):\n experiment = Experiment(TasksMock())\n self.assertEquals(False, experiment.is_finished())\n for _ in range(0, 17):\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_down(time.time())\n self.assertEquals(False, experiment.is_finished())\n experiment.press_b_up(time.time())\n self.assertEquals(True, experiment.is_finished())", "def wait(period=5):\n import time\n print ('Wait for {val} seconds'.format(val=period))\n time.sleep(float(period))", "def wait_for_restore():\n while True:\n states = [\n # Wait till all actors are either \"ALIVE\" (retored),\n # or \"DEAD\" (cancelled. these actors are from other\n # finished test cases).\n a[\"state\"] == \"ALIVE\" or a[\"state\"] == \"DEAD\"\n for a in list_actors(filters=[(\"class_name\", \"=\", \"Actor\")])\n ]\n print(\"waiting ... \", states)\n if all(states):\n break\n # Otherwise, wait a bit.\n time.sleep(0.5)", "def _waitForTest(self, timeout=60):\n self._waiting = self.ioloop.add_timeout(time.time() + timeout,\n self._timeout)\n def _wait():\n if self._timedOut:\n self.fail('test timed out')\n self._done()\n if self._doneWaiting:\n self.ioloop.stop()\n return\n # we can use add_callback here but this uses less cpu when testing\n self.ioloop.add_timeout(time.time() + 0.01, _wait)\n\n self.ioloop.add_callback(_wait)\n self.ioloop.start()", "def wait(self, _id):\n while not self._actions[_id].done:\n sleep(1e-3)", "def wait(self, wait_timeout=10):\n if self._TransferInitiated == 0:\n return\n Error = \"DMA wait timed out.\"\n with timeout(seconds = wait_timeout, error_message = Error):\n while True:\n if libdma.XAxiDma_Busy(self.DMAengine,self.direction) == 0:\n break", "def wait(self, seconds):\n self.driver.implicitly_wait(seconds)", "def wait(self, seconds):\n time.sleep(seconds)", "def waitForCompletion(self):\n\n while(json.loads(self.robot.device())['state']!=0):\n time.sleep(0.1)\n continue\n\n return", "def wait(self):\n for _ in range(15):\n time.sleep(10)\n if self.ready:\n break\n else:\n raise RuntimeError('timeout, lease failed to start')", "def wait(function):\n def modified_function(*args, **kwargs):\n \"\"\"Tenta executar função até MAX_WAIT.\"\"\"\n start_time = time.time()\n while True:\n try:\n return function(*args, **kwargs)\n except (AssertionError, WebDriverException) as err:\n if time.time() - start_time > MAX_WAIT:\n raise err\n time.sleep(0.5)\n return modified_function", "def timeout_wait(self):\n if self._dtr_enabled:\n while (self.__micros() - self._resume_time) < 0:\n if False:\n break # TODO: Check for printer status here\n else:\n while (self.__micros() - self._resume_time) < 0:\n pass", "def wait(self, sleep_time):\n time.sleep(sleep_time)", "def wait(self, *args):\n print(\"and why are we stoping here?\")\n return self", "async def wait_for_state(self):\n await self.state_got.wait()\n assert self.time_step == self.rl_agent.current_round\n self.state_got.clear()", "def test_wait_tx_settled_ok(self, is_transaction_settled_mock):\n wait_tx_settled(\"some\", \"some\", timeout=4)", "def implicitly_wait(self, secs):\n self.base_driver.implicitly_wait(secs)", "def wait(self, timeout):\n raise NotImplementedError(\n u\"%s: Method not implemented\", self.__class__.__name__)", "def wait_for(test, timeout_seconds=DEFAULT_TIMEOUT):\n start = time.time()\n while True:\n if test():\n return True\n if time.time() - start > timeout_seconds:\n return False\n time.sleep(0.5)", "def __bool__(self):\n return self.wait(0)" ]
[ "0.761487", "0.74810785", "0.7287967", "0.6939687", "0.68942183", "0.68942183", "0.6890984", "0.6889294", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6812686", "0.6796886", "0.6759803", "0.67246556", "0.6694036", "0.6694036", "0.6694036", "0.6694036", "0.6644915", "0.6636954", "0.65609205", "0.65534943", "0.6512476", "0.6486751", "0.6474505", "0.6464315", "0.6440001", "0.6414563", "0.6412097", "0.64081895", "0.63769", "0.6353528", "0.63240427", "0.63240427", "0.6312455", "0.6291641", "0.6261047", "0.6241257", "0.6228091", "0.62029564", "0.6202869", "0.62016284", "0.61800265", "0.6173764", "0.61717206", "0.61633116", "0.6159476", "0.6142496", "0.613991", "0.6137641", "0.613356", "0.6124597", "0.6117992", "0.61146575", "0.60806555", "0.6058151", "0.6058151", "0.6058151", "0.60569257", "0.6030156", "0.60260177", "0.60132325", "0.5997902", "0.599417", "0.59740806", "0.5971719", "0.59610933", "0.5945446", "0.5923937", "0.5914383", "0.591289", "0.5896937", "0.5893381", "0.5879965", "0.5869608", "0.58666974", "0.5866203", "0.58636904", "0.58526784", "0.5850307", "0.58359253", "0.5835116", "0.5833784", "0.58317727", "0.5828029", "0.58248967", "0.58227164", "0.5817144", "0.58164626", "0.5815458", "0.5802901", "0.5801629", "0.5790572" ]
0.6903799
4
Tests the _isInterestingPath method.
def testIsInterestingPath(self): # pylint: disable=protected-access self.assertTrue(self.turbinia_processor._isInterestingPath(TEST_TASK_PATH))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _isInterestingPath(self, path: str) -> bool:\n for suffix in self.extensions:\n if path.endswith(suffix):\n return True\n return False", "def test_find_path_bi():\n assert True", "def is_path(self, s):\n return True", "def _is_interesting_op(self, op):\n return op_priority(op.type) <= self._parameters.trace_level", "def _IsTestFile(self, path):\n\n raise NotImplementedError", "def test_expand_path_2(self):\n input_path = \"/fake/path\"\n expanded_path = basic.expand_path(input_path)\n expected_path = input_path\n self.assertEqual(expanded_path, expected_path)", "def _is_nested(pkg: str, pkg_path: str, parent: str, parent_path: str) -> bool:\n norm_pkg_path = _path.normpath(pkg_path)\n rest = pkg.replace(parent, \"\", 1).strip(\".\").split(\".\")\n return pkg.startswith(parent) and norm_pkg_path == _path.normpath(\n Path(parent_path, *rest)\n )", "def check_endpoint_in_paths(context, endpoint):\n data = context.response.json()\n paths = check_and_get_attribute(data, \"paths\")\n assert endpoint in paths, \"Cannot find the expected endpoint {e}\".format(\n e=endpoint)", "def _issubpath(self, a, b):\n p1 = a.rstrip(os.sep).split(os.sep)\n p2 = b.rstrip(os.sep).split(os.sep)\n return p1[:len(p2)] == p2", "def test_verify_path2_8(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_verify_path2_9(self):\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def is_subpath(path: Path, other: Path):\n try:\n Path(path).relative_to(other)\n except ValueError:\n return False\n else:\n return True", "def test(cls, pathHolder, parentCrawler):\n if not super(AsciiCrawler, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in ['json']", "def _is_bad_path(path, base):\r\n return not resolved(joinpath(base, path)).startswith(base)", "def test_path_reactions(self):\n self.assertEqual(str(self.PathReaction2), 'CH2OH <=> methoxy')", "def test_verify_path2_13(self):\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_path(self, fs_path, fs):\n assert fs.path == fs_path", "def test_verify_path2_14(self):\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_verify_path2_7(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_verify_path2_4(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test(cls, pathHolder, parentCrawler):\n if not super(Scene, cls).test(pathHolder, parentCrawler):\n return False\n\n return pathHolder.ext() in cls.extensions()", "def _veritesting(self):\n\n p = self._input_path.copy()\n\n try:\n new_path_group = self._execute_and_merge(p)\n\n except (ClaripyError, SimError, AngrError):\n if not BYPASS_VERITESTING_EXCEPTIONS in p.state.options:\n raise\n else:\n l.warning(\"Veritesting caught an exception.\", exc_info=True)\n return False, PathGroup(self.project, stashes={'deviated', p})\n\n except VeritestingError as ex:\n l.warning(\"Exception occurred: %s\", str(ex))\n return False, PathGroup(self.project, stashes={'deviated', p})\n\n l.info('Returning a set of new paths: %s (successful: %s, deadended: %s, errored: %s, deviated: %s)',\n new_path_group,\n new_path_group.successful,\n new_path_group.deadended,\n new_path_group.errored,\n new_path_group.deviated\n )\n\n return True, new_path_group", "def testPathToLocator(self, _mock_inside, mock_cwd):\n ws = self.workspace_dir\n mock_cwd.return_value = ws\n\n foo_path = workspace_lib.PathToLocator(os.path.join(ws, 'foo'))\n baz_path = workspace_lib.PathToLocator(os.path.join(ws, 'bar', 'foo',\n 'baz'))\n daisy_path = workspace_lib.PathToLocator(os.path.join(constants.SOURCE_ROOT,\n 'src', 'overlays',\n 'overlay-daisy'))\n some_path = workspace_lib.PathToLocator(os.path.join(constants.SOURCE_ROOT,\n 'srcs', 'bar'))\n\n self.assertEqual('//foo', foo_path)\n self.assertEqual('//bar/foo/baz', baz_path)\n self.assertEqual('board:daisy', daisy_path)\n self.assertEqual(None, some_path)\n\n def assertReversible(loc):\n path = workspace_lib.LocatorToPath(loc)\n self.assertEqual(loc, workspace_lib.PathToLocator(path))\n\n assertReversible('//foo')\n assertReversible('//foo/bar/baz')\n assertReversible('board:gizmo')", "def test_verify_path2_6(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=None, expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def _is_request_in_include_path(self, request):\n if self._include_paths:\n for path in self._include_paths:\n if request.path.startswith(path):\n return True\n return False\n else:\n return True", "def test_verify_path2_10(self):\n result, msg = basic.verify_path2(self.file, kind=\"invalid\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def exists(self, path: PathLike):", "def test_n_path_reactions(self):\n self.assertEqual(self.Npath, 3)", "def test_image_path(self):\n self.assertEqual(\n self.mineral.image_path,\n 'minerals/images/some_filename.jpg')", "def test_infodir(self):\n self.chck_triple('infodir')", "def test_path_override(self):\n path_example = os.path.join(here, 'path-example.ini')\n manifest = ManifestParser(manifests=(path_example,))\n self.assertEqual(manifest.tests[0]['path'],\n os.path.join(here, 'fleem'))", "def test_expand_path_3(self):\n partial_path = \"/fake/path\"\n input_path = \".\" + partial_path\n expanded_path = basic.expand_path(input_path)\n local_path = Path(\".\").resolve()\n expected_path = str(local_path) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def is_path(t, path):\n if label(t) != path[0]:\n return False\n if len(path) == 1:\n return True\n return any([is_path(b, path[1:]) for b in branches(t)])", "def insignificant(path):\n\n # This part is simply an implementation detail for the code base that the\n # script was developed against. Ideally this would be moved out to a config\n # file.\n return path.endswith('Dll.H') or path.endswith('Forward.H') or \\\n path.endswith('templates.H')", "def test_wants_to_handle2(self):\n\n self.bogus_environ['PATH_INFO'] = '/by_frag/abc123'\n\n self.assertFalse(self.uh.wants_to_handle(self.bogus_environ))", "def _generic_test(self, pathstr, expected):\n self.assertEqual(self._get_pe_key(pathstr), expected)", "def test_path(self):\n self.assertEqual(\n self.log.current_log_path,\n f'{self.path}/.{datetime.now(ET).date().isoformat()}.log'\n )", "def test_verify_path2_3(self):\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=False)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_00(self):\n result = resolve_path({'_id': '1'}, '')\n expected = '/index.html'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, '/')\n expected = '/index.html'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'foo.png')\n expected = '/foo.png'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'bar/foo.js')\n expected = '/bar/foo.js'\n self.assertEqual(result, expected)\n\n result = resolve_path({'_id': '1'}, 'main.js')\n expected = '/main.js'\n self.assertEqual(result, expected)", "def isdir (self, path):\r\n pass", "def is_file(self, path: PathLike):", "def test_tree_intersection_name_exists():\n assert tree_intersection", "def test_top_level2(iscream):\n assert iscream.section is None\n assert len(iscream.branches) == 2\n assert len(list(iscream)) == 2\n assert str(iscream.subsection) == 'I Scream'\n assert len(list(iscream.subsections)) == 2\n assert iscream.depth == 0", "def __test(graph): \n \n if not isinstance(graph, basegraph):\n raise TypeError(\"Expected type was Graph.\")\n \n print \"### iPATH TEST DATA STRUCTURE\"\n print \"### Data Type: Graph ({})\".format(str(graph.__class__.__bases__[0].__name__))\n print \"### Implementation: {}\".format(str(graph.__class__.__name__))\n \n print \"\\n*** ADD NODE ***\\n\" \n for i in range(10):\n print \"add_node({})\".format(str(i)) \n graph.add_node(i) \n \n print \"\\n*** ADD ARC ***\\n\" \n for i in range(10):\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 1), str(2 * (i + 1)))\n graph.add_arc(i, i + 1, 2 * (i + 1))\n print \"add_arc({}, {}, {})\".format(str(i), str(i + 2), str(2 * (i + 2)))\n graph.add_arc(i, i + 2, 2 * (i + 2))\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE NODE ***\\n\" \n print \"remove_node(5)\"\n graph.remove_node(5)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** REMOVE ARC ***\\n\" \n print \"remove_arc(7, 8)\" \n graph.remove_arc(7, 8)\n \n print \"\\n*** GRAPH ***\\n\" \n print \"\\n{}\\n\".format(str(graph))\n \n print \"\\n*** INCIDENT ARCS ***\\n\" \n for node in graph.get_nodes():\n print \"Incident Arcs of {}\\t{}\\n\".format(str(node), str(graph.get_incident_arcs(node._id)))\n \n print \"\\n*** ADJACENCY ***\\n\" \n for i in range(10):\n for j in range(10):\n if graph.are_adjacent(i, j) == True:\n print \"Adjacency Between ({}, {}): True\\n\".format(str(i), str(j))\n \n print \"\\n*** NODES ***\\n\" \n print \"numNodes: {}\\n\".format(str(graph.get_num_nodes())) \n print \"Nodes: {}\\n\".format(str(graph.get_nodes())) \n \n print \"\\n*** ARCS ***\\n\" \n print \"numArcs: {}\\n\".format(str(graph.get_num_arcs())) \n print \"Arcs: {}\\n\".format(str(graph.get_arcs())) \n \n print \"\\n*** SEARCH BFS ***\\n\" \n for i in range(10): \n print \"bfs({})\".format(str(i))\n Lbfs = graph.bfs(i)\n for n in Lbfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n*** SEARCH DFS ***\\n\" \n for i in range(9):\n print \"dfs({})\".format(str(i))\n Ldfs = graph.dfs(i)\n for n in Ldfs:\n print \"{}\\n\".format(str(n))\n print \"\\n\"\n \n print \"\\n### END OF TEST ###\\n\"", "def is_nested(line):\n pass", "def test_path(self):\n base_handler_path = 'conman.routes.handlers.BaseHandler'\n self.assertEqual(BaseHandler.path(), base_handler_path)", "def test_find_hierarchy(chikin):\n hie = chikin.findHierarchy()\n assert hie == ('section', 'subsection')\n assert chikin.depth == 0\n section = TexSoup(r'\\section{asdf}').section\n assert chikin.getHeadingLevel(section, hie) == 1\n assert chikin.parseTopDepth(chikin.descendants) == 1", "def is_pathable(self) -> bool:\n # <<-- Creer-Merge: is_pathable_builtin -->> - Code you add between this comment and the end comment will be preserved between Creer re-runs.\n return False # DEVELOPER ADD LOGIC HERE\n # <<-- /Creer-Merge: is_pathable_builtin -->>", "def test_verify_path2_5(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.dir, kind=\"dir\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def isfile (self, path):\r\n pass", "def inPath(self, oth: 'StateNode') -> bool:\n if self == oth:\n return True\n if self.isSameState(oth):\n return True\n if self.previous is not None:\n return self.previous.inPath(oth)", "def test_addPath_obviousCycle(self):\n g = Garden()\n self.assertRaises(CycleError, g.addPath, 'foo', 'v1', [\n ('foo', 'v1'),\n ])", "def test_verify_path_1(self):\n result = basic.verify_path(self.test_filepath1, \"file\")\n self.assertTrue(result)", "def check_path(self, path):\n if path in self.app_path:\n return True\n else:\n return False", "def test_path(self):\n self.assertEqual(self.ftp_case.path, '/rfc/rfc1808.txt')\n self.assertEqual(self.ldap_case.path, '/c=GB')\n self.assertEqual(self.news_case.path, \n 'comp.infosystems.www.servers.unix')\n self.assertEqual(self.telnet_case.path, '/')\n self.assertEqual(self.urn_case.path, \n 'oasis:names:specification:docbook:dtd:xml:4.1.2')", "def test_wants_to_handle1(self):\n\n print 'REQUEST_METHOD: %(REQUEST_METHOD)s' % self.bogus_environ\n print 'PATH_INFO: %(PATH_INFO)s' % self.bogus_environ\n\n frag = self.uh.extract_frag(self.bogus_environ['PATH_INFO'])\n print 'frag: %s' % frag\n print 'frag in self.proj: %s' % (frag in self.proj)\n\n self.assertEqual(\n self.uh,\n self.uh.wants_to_handle(self.bogus_environ))", "def ismount(path):\n return True if not get_instance(path).relpath(path) else False", "def test_verify_path2_1(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_verify_path_7(self):\n result = basic.verify_path(str(self.test_directory1), \"invalid\")\n self.assertFalse(result)", "def test_verify_path2_2(self):\n self.file.touch()\n result, msg = basic.verify_path2(self.file, kind=\"file\", expect=False)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)", "def test_parse_url_path() -> None:\n assert indieauth._parse_url(\"http://ex.com\").path == \"/\"", "def path_check(dataset: LAMLDataset):\n roles = dataset.roles\n features = dataset.features\n for f in features:\n assert roles[f].name == \"Path\", \"Only path accepted in this transformer\"", "def path_is_base(self, path):\n\n return path is not None and len(path) == len(self.levels)", "def is_dir(self, path):", "def path_home_mock():\n raise AttributeError()", "def is_dir(self, path: PathLike):", "def _is_private(self, path, name, obj):\n # Skip objects blocked by doc_controls.\n if doc_controls.should_skip(obj):\n return True\n\n # Skip modules outside of the package root.\n if inspect.ismodule(obj):\n if hasattr(obj, \"__file__\"):\n if not obj.__file__.startswith(self._base_dir):\n return True\n\n # Skip objects blocked by the private_map\n if name in self._private_map.get(\".\".join(path), []):\n return True\n\n # Skip \"_\" hidden attributes\n is_dunder = name.startswith(\"__\") and name.endswith(\"__\")\n if name.startswith(\"_\") and not is_dunder:\n return True\n\n if name in [\"__base__\", \"__class__\"]:\n return True\n\n return False", "def exists_path(self, start, end):\n return end in self.paths(start)", "def verify_restricted_path(self) -> None:\n path = \"/usr\"\n with self.assertRaises(NotFoundException):\n verify_file_path(path)", "def assert_path(self, root: Node, path: str) -> None:\n\n if not self.__assert_path(root, path):\n raise Exception('Path \\'{}\\' not found in root node:\\n{}'.format(path, root))", "def test_level_depth(chikin):\n assert chikin.depth == 0\n assert str(chikin.section) == 'Chikin Tales'\n assert chikin.section.depth == 1\n assert chikin.section.subsection.depth == 2", "def test_io_path_string(args, string):\n assert deepr.io.Path(*args) == string", "def test_marking_path_parsing(self):\n \n # paths to attempt for a global AMBER marking\n global_xpaths = [\n {\n \"path\": \"//node() | //@*\",\n \"should_pass\": True\n },\n {\n \"path\": \"this is not a real xpath\",\n \"should_pass\": False\n }\n ]\n # paths to attempt for a local RED marking\n local_xpaths = [\n {\n \"path\": \"../../../descendant-or-self::node() | ../../../descendant-or-self::node()/@*\",\n \"should_pass\": True\n },\n {\n \"path\": \"this is not a real xpath\",\n \"should_pass\": False\n }\n ]\n\n for global_path_dict in global_xpaths:\n for local_path_dict in local_xpaths:\n # Format our STIX XML template\n xml = STIX_XML_TEMPLATE_GLOBAL_AND_COMPONENT.format(global_path_dict[\"path\"], local_path_dict[\"path\"])\n xml_readable = StringIO(xml)\n\n # Build and parse the MarkingContainer\n try:\n container = stixmarx.parse(xml_readable)\n except etree.XPathEvalError:\n self.assertTrue(global_path_dict[\"should_pass\"] is False or local_path_dict[\"should_pass\"] is False)\n continue\n\n package = container.package\n\n colors = [marking_spec.marking_structures[0].color for marking_spec in container.get_markings(package.indicators[0])]\n\n self.assertTrue(('AMBER' in colors) == global_path_dict[\"should_pass\"])\n self.assertTrue(('RED' in colors) == local_path_dict[\"should_pass\"])", "def getPath(obj):", "def test_verify_path2_16(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=None, expect=True)\n with self.subTest():\n self.assertTrue(result)\n with self.subTest():\n self.assertIsNone(msg)", "def test_is_not_hidden(self) -> None:\n path = \"home\"\n result = is_hidden(path)\n self.assertFalse(result)", "def test_docs_paths():\n assert os.path.exists('test/examples/docs/paths-root-api.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')\n assert os.path.exists('test/examples/docs/paths-subpath1.md')", "def exists(self, path):", "def test_hierarchies(self):\n compare_file_hierarchy(self, file_hierarchy(self.file_hierarchy_dict))", "def has_path(self, source, target):\n try:\n sp = nx.shortest_path(self.G, source, target)\n except nx.NetworkXNoPath:\n return False\n return True", "def test__extend_paths():\n file_paths = [\"docs/abcd/\", \"docs/123/\"]\n\n extend_paths = classifier_module.Classifier._extend_paths\n path_element = \"u/\"\n extended_paths = extend_paths(file_paths, path_element)\n\n assert len(file_paths) == len(extended_paths)\n for path_num in range(len(file_paths)):\n assert file_paths[path_num] + path_element == extended_paths[path_num]", "def path_is_hidden(path):\n for p in path.parts:\n if p != '..' and p[0] == '.':\n return True\n return False", "def test_expand_path_1(self):\n partial_path = \"/fake/path\"\n input_path = \"~\" + partial_path\n expanded_path = basic.expand_path(input_path)\n home_dir = Path(\"~\").expanduser()\n expected_path = str(home_dir) + partial_path\n self.assertEqual(expanded_path, expected_path)", "def pathlookup(obj_or_path_tuple, depth=None, include_origin=True):", "def testIsFunctions(self):\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS,\n identifier=self._IDENTIFIER_ANOTHER_FILE,\n location='/a_directory/another_file',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n\n self.assertFalse(file_entry.IsDevice())\n self.assertFalse(file_entry.IsDirectory())\n self.assertTrue(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, identifier=self._IDENTIFIER_A_DIRECTORY,\n location='/a_directory', parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertFalse(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())\n\n path_spec = path_spec_factory.Factory.NewPathSpec(\n definitions.TYPE_INDICATOR_HFS, location='/',\n parent=self._raw_path_spec)\n file_entry = self._file_system.GetFileEntryByPathSpec(path_spec)\n self.assertIsNotNone(file_entry)\n\n self.assertTrue(file_entry.IsRoot())\n self.assertFalse(file_entry.IsVirtual())\n self.assertTrue(file_entry.IsAllocated())\n\n self.assertFalse(file_entry.IsDevice())\n self.assertTrue(file_entry.IsDirectory())\n self.assertFalse(file_entry.IsFile())\n self.assertFalse(file_entry.IsLink())\n self.assertFalse(file_entry.IsPipe())\n self.assertFalse(file_entry.IsSocket())", "def test_make_pathways(self):\n basic_test_runner(self, 'pathways')", "def is_embedded(request):\n hx_current_url = request.headers.get('HX-Current-URL', None)\n if not hx_current_url:\n return False\n return request.path != urlparse(hx_current_url).path", "def test_ignored_path(self):\n with self.settings(IGNORE_URLS=(re.compile(r'^/ignored.*'),)):\n response = self.client.get('/ignored/')\n self.assertNormalMode(response)", "def test_invalid_path(self):\n self.assertRaises(argparse.ArgumentTypeError, generic.check_path, 'foo')", "def is_resource(self, path):\n # type: (Text) -> bool\n raise FileNotFoundError", "def testGetTagPathsForObjectIDsWithoutData(self):\n self.assertEqual([], list(getTagPathsForObjectIDs([])))", "def test_path(tmp_path: Path) -> None:\n path = tmp_path / \"repository\"\n repository = Repository.init(path)\n assert path == repository.path", "def is_private(path):\n for p in path.split(\".\"):\n if p.startswith(\"_\") and not p.startswith(\"__\"):\n return True\n return False", "def is_path_overbound(path):\n\n ip = path.addr\n\n if ip in self._boundaries:\n l.debug(\"... terminating Veritesting due to overbound\")\n return True\n\n if (ip in loop_heads # This is the beginning of the loop\n or path.jumpkind == 'Ijk_Call' # We also wanna catch recursive function calls\n ):\n path.info['loop_ctrs'][ip] += 1\n\n if path.info['loop_ctrs'][ip] >= self._loop_unrolling_limit + 1:\n l.debug('... terminating Veritesting due to overlooping')\n return True\n\n l.debug('... accepted')\n return False", "def test_access_nested_map_exception(self, nested_map, path):\n with self.assertRaises(KeyError) as error:\n access_nested_map(nested_map, path)\n self.assertEqual(error.exception.args[0], path[-1])", "def isfile(path):\n return get_instance(path).isfile(path)", "def _GetTestFromPath(self, test_id, path):\n\n raise NotImplementedError", "def test_save_subimage_fails(self):\n with self.assertRaises(NotImplementedError):\n self.cheese.save_analyzed_subimage()", "def test_access_nested_map(self, nested_map, path, result):\n self.assertEqual(access_nested_map(nested_map, path), result)", "def test_verify_path2_15(self):\n self.dir.mkdir()\n result, msg = basic.verify_path2(self.dir, kind=\"file\", expect=True)\n with self.subTest():\n self.assertFalse(result)\n with self.subTest():\n self.assertIsNotNone(msg)" ]
[ "0.72631514", "0.58760685", "0.58147526", "0.56765735", "0.55693245", "0.55457276", "0.54889286", "0.5446209", "0.53737456", "0.53603786", "0.53477657", "0.5298553", "0.5297065", "0.5293575", "0.5228027", "0.5226015", "0.519997", "0.51928556", "0.5165649", "0.51393193", "0.51282936", "0.5125355", "0.51160234", "0.5116009", "0.508583", "0.50801396", "0.50783086", "0.5071152", "0.5055903", "0.50488335", "0.50484806", "0.5048383", "0.50431705", "0.50240827", "0.50124526", "0.4997207", "0.4987228", "0.49844033", "0.497924", "0.49685115", "0.4966749", "0.49548122", "0.4954498", "0.49514386", "0.4939189", "0.49333218", "0.4921306", "0.49152198", "0.491404", "0.49103892", "0.49089274", "0.49051744", "0.49002552", "0.48989844", "0.48876566", "0.48833862", "0.48833472", "0.4875124", "0.48692986", "0.4868846", "0.48666528", "0.4866097", "0.4855773", "0.48547328", "0.48495054", "0.48447093", "0.48372582", "0.48372412", "0.4833649", "0.48277512", "0.48253152", "0.48187596", "0.48103648", "0.48097312", "0.4808425", "0.4800407", "0.47995594", "0.4797716", "0.47937384", "0.47913644", "0.47910434", "0.47873366", "0.47813332", "0.47801933", "0.4778709", "0.4762701", "0.47625512", "0.475886", "0.47522733", "0.4750461", "0.47490543", "0.47486362", "0.47451764", "0.47439092", "0.4738493", "0.47378954", "0.47331297", "0.47292346", "0.47288486", "0.47264493" ]
0.81139785
0
Tests the _ExtractFiles method.
def testExtractPath(self, mock_tempdir): mock_tempdir.return_value = '/tmp' file_path = os.path.join( CURRENT_DIR, "test_data", "c4e9abd577db475484b2ded34a011b96.tgz") expected_local_path = f"/tmp{TEST_TASK_PATH}" # pylint: disable=protected-access local_path = self.turbinia_processor._ExtractFiles( file_path, TEST_TASK_PATH) self.assertEqual(local_path, expected_local_path)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def extract_files(self) -> list:\n pass", "def extract(self):\n self.build_path_pairs()\n self.extract_field_blocks()\n self.assert_filenames()", "def test_zip_files(self):\n base_zip_files = ['whypython.txt', 'states.dbf', 'cities.kmz']\n\n text_file = os.path.join(os.getcwd(), 'test-data', 'whypython.txt')\n dbf_file = os.path.join(os.getcwd(), 'test-data', 'states.dbf')\n kml_file = os.path.join(os.getcwd(), 'test-data', 'cities.kmz')\n #non_file = os.path.join(os.getcwd(), 'test-data', 'emptyfolder')\n self.request['params'][0]['response']['docs'][0]['path'] = text_file\n self.request['params'][0]['response']['docs'][1]['path'] = dbf_file\n self.request['params'][0]['response']['docs'][2]['path'] = kml_file\n #self.request['params'][0]['response']['docs'][3]['path'] = non_file\n __import__(self.request['task'])\n getattr(sys.modules[self.request['task']], \"execute\")(self.request)\n zip_files = zipfile.ZipFile(os.path.join(self.temp_folder, 'output.zip')).namelist()\n self.assertEqual(sorted(zip_files), sorted(base_zip_files))", "def extract_files(self, *filenames):\n for filename in filenames:\n data = self.read_file(filename)\n f = open(filename, 'wb')\n f.write(data or b'')\n f.close()", "def _unzip_files(self) -> None:\n for file in self.input_path.iterdir():\n if is_zipfile(file):\n with ZipFile(file, mode=\"r\") as archive:\n archive.extractall(path=self.temp_path)", "def test_extract_configs():\n extract_config_dir = os.path.join(\n settings.BASE_DIR, \"extract_configs\", \"templates\"\n )\n for ft, obj in FILE_TYPES.items():\n ec_file = obj[\"template\"]\n if not ec_file:\n continue\n ec_path = os.path.join(extract_config_dir, ec_file)\n print(f\"Testing extract config: {ec_path}\")\n assert os.path.exists(ec_path)\n df = make_template_df(ft)\n Extractor().extract(df, ec_path)", "def extract(self, step_name, archive_file, output, mode='safe',\n include_files=()):\n assert mode in ('safe', 'unsafe'), 'Unknown mode %r' % (mode,)\n\n step_result = self.m.python(\n step_name,\n self.resource('extract.py'),\n [\n '--json-input', self.m.json.input({\n 'output': str(output),\n 'archive_file': str(archive_file),\n 'safe_mode': mode == 'safe',\n 'include_files': list(include_files),\n }),\n '--json-output', self.m.json.output(),\n ],\n step_test_data=lambda: self.m.json.test_api.output({\n 'extracted': {\n 'filecount': 1337,\n 'bytes': 0xbadc0ffee,\n },\n }))\n self.m.path.mock_add_paths(output)\n j = step_result.json.output\n if j.get('extracted', {}).get('filecount'):\n stat = j['extracted']\n step_result.presentation.step_text += (\n '<br/>extracted %s files - %.02f MB' % (\n stat['filecount'], stat['bytes'] / (1000.0**2)))\n if j.get('skipped', {}).get('filecount'):\n stat = j['skipped']\n step_result.presentation.step_text += (\n '<br/>SKIPPED %s files - %.02f MB' % (\n stat['filecount'], stat['bytes'] / (1000.0**2)))\n step_result.presentation.logs['skipped files'] = stat['names']\n step_result.presentation.status = self.m.step.FAILURE\n ex = self.m.step.StepFailure(step_name)\n ex.archive_skipped_files = stat['names']\n raise ex", "def test_get_filepaths(self):\n\n #setup\n get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn)\n \n #when\n test1 = get_filepaths(\"./dir1\", \".csv\")\n\n #result\n assert len(test1) == 2", "def test_case_4():\n print(\"*********Test_case_4***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('', path)\n for file in result:\n print(file)", "def extract_files( self, fnames: List[str], outdir: str, splitFiles=False ) -> bool:\n\n # TODO: Maybe create some progress indicator \n\n if not self.opened:\n self.log( \"err\", \"file is closed\" )\n return False\n\n fp = self.fp\n\n # WIP\n if splitFiles == True: \n self.log( \"info\", \"Will split large files\" )\n self.log( \"warn\", \"splitting files is WIP!\" )\n\n outdir = os.path.abspath( outdir )\n\n # create output directory, iff it does not exist already\n if not os.path.isdir( outdir ):\n self.log( \"info\", \"Creating output directory '%s'\" % outdir )\n try:\n os.mkdir( outdir )\n except:\n self.log( \"err\", \"Could not create output directory!\" )\n return False\n \n # if no files are specified, assume all files need to be extracted\n if fnames == None or len( fnames ) == 0:\n for f in self.listfiles():\n self.log( \"info\", \"Extracting '%s'\" % f[ 0 ] )\n\n # split files >4GB, iff requested\n if splitFiles == True and f[ 1 ] > FAT32_MAX_SIZE:\n splitFileDir = os.path.join( outdir, f[ 2 ] )\n if not self.__extract_split_file( f[ 2 ], f[ 1 ], splitFileDir ):\n self.log( \"err\", \"failed to extract '%s'\" % f[ 0 ] )\n else:\n outfile = open( os.path.join( outdir, f[ 0 ] ), \"wb\" )\n if not self.__extract_file( f[ 2 ], f[ 1 ], outfile ):\n self.log( \"err\", \"failed to extract '%s'\" % f[ 0 ] )\n outfile.close()\n else:\n # go through all specified files\n for f in self.listfiles():\n # extract existing files, ignore non-existing files\n if not f[ 0 ] in fnames:\n self.log( \"warn\", \"File '%s' does not exist! continuing...\" % f[ 0 ] )\n continue\n\n self.log( \"info\", \"Extracting '%s'\" % f[ 0 ] )\n\n if splitFiles == True and f[ 1 ] > FAT32_MAX_SIZE:\n # >4GB file should be split\n # WIP: maybe works?\n splitFileDir = os.path.join( outdir, f[ 0 ] )\n if not self.__extract_split_file( f[ 2 ], f[ 1 ], splitFileDir ):\n self.log( \"err\", \"Parted file extraction failed\" )\n else:\n # extract like normal\n outfile = open( os.path.join( outdir, f[ 0 ] ), \"wb\" )\n if not self.__extract_file( f[ 2 ], f[ 1 ], outfile ):\n self.log( \"err\", \"File extraction failed!\" )\n outfile.close()\n return True", "def extractZipFiles(rootDir, zipDir):\n for root, dirs, files in os.walk(zipDir, topdown=False):\n for name in files:\n \n zipFiles = os.path.join(root, name)\n \n #Check file extension here\n if \".zip\" not in zipFiles:\n continue\n \n else:\n zipPath = zipfile.ZipFile(zipFiles, 'r')\n #print(zipPath) \n \n filesInZip = zipPath.namelist()\n i = 0 \n for i in range(len(filesInZip)):\n #print(filesInZip[i])\n #print(zipPath.getinfo(filesInZip[i]))\n \n if \".mp3\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".m4a\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".mp4\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".png\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n elif \".jpg\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n \n elif \".pdf\" in filesInZip[i]:\n zipPath.extract(filesInZip[i], rootDir)\n print(\"{0} extracted to {1}\".format(filesInZip[i], rootDir))\n\n else:\n print(\"No media found in zip file {0}\".format(name))\n \n zipPath.close()", "def extractall(self, *args, **kwargs):\n self.zipfile.extractall(*args, **kwargs)", "def test_find_many_files_zipped_allow(self):\n\n these_file_names = satellite_io.find_many_files(\n top_directory_name=TOP_DIRECTORY_NAME,\n first_date_string=FIRST_DATE_STRING,\n last_date_string=LAST_DATE_STRING,\n prefer_zipped=True, allow_other_format=True, test_mode=True\n )\n\n self.assertTrue(these_file_names == FILE_NAMES_UNZIPPED)", "def test_collect_files():\n filelist = [\"test/a.ext\", \"test/b.asd\"]\n\n result = loader.collect_files(filelist, lambda x: x, lambda x: np.arange(0, 50))\n\n for k in filelist:\n assert np.array_equal(np.arange(0, 50), result[k])", "def test_get_file_accessors(self):\n pass", "def test_files_from_plate():\n plate_path = os.path.join(TEST_PATH_IX, \"test-plate-1\")\n output = filelister_ix.files_from_plate(plate_path)\n assert len(output) > 0\n for f in output:\n assert f.endswith(\".tif\")", "def test_find_many_files_zipped_no_allow(self):\n\n these_file_names = satellite_io.find_many_files(\n top_directory_name=TOP_DIRECTORY_NAME,\n first_date_string=FIRST_DATE_STRING,\n last_date_string=LAST_DATE_STRING,\n prefer_zipped=True, allow_other_format=False, test_mode=True\n )\n\n self.assertTrue(these_file_names == FILE_NAMES_ZIPPED)", "def test_pick_files(mock_zip_file):\n\n files = ['Unihan_Readings.txt', 'Unihan_Variants.txt']\n\n options = {'input_files': files, 'zip_path': str(mock_zip_file)}\n\n b = process.Packager(options)\n\n result = b.options['input_files']\n expected = files\n\n assert result == expected, 'Returns only the files picked.'", "def helperExtractParallel(self, test_name, hpss_path, zstash_path=ZSTASH_PATH):\n self.hpss_path = hpss_path\n use_hpss = self.setupDirs(test_name)\n self.create(use_hpss, zstash_path)\n self.add_files(use_hpss, zstash_path)\n self.extract(use_hpss, zstash_path)\n print_starred(\"Deleting the extracted files and doing it again in parallel.\")\n self.assertWorkspace()\n shutil.rmtree(self.test_dir)\n os.mkdir(self.test_dir)\n os.chdir(self.test_dir)\n if not use_hpss:\n shutil.copytree(\n \"{}/{}/{}\".format(TOP_LEVEL, self.backup_dir, self.cache), self.copy_dir\n )\n cmd = \"{}zstash extract -v --hpss={} --workers=3\".format(\n zstash_path, self.hpss_path\n )\n output, err = run_cmd(cmd)\n os.chdir(TOP_LEVEL)\n expected_present = [\n \"Extracting file0.txt\",\n \"Extracting file0_hard.txt\",\n \"Extracting file0_soft.txt\",\n \"Extracting file_empty.txt\",\n \"Extracting dir/file1.txt\",\n \"Extracting empty_dir\",\n \"Extracting dir2/file2.txt\",\n \"Extracting file3.txt\",\n \"Extracting file4.txt\",\n \"Extracting file5.txt\",\n ]\n if use_hpss:\n expected_present.append(\"Transferring file from HPSS\")\n expected_absent = [\"ERROR\", \"Not extracting\"]\n self.check_strings(cmd, output + err, expected_present, expected_absent)\n # Checking that the printing was done in order.\n tar_order = []\n console_output = output + err\n for word in console_output.replace(\"\\n\", \" \").split(\" \"):\n if \".tar\" in word:\n word = word.replace(\"{}/\".format(self.cache), \"\")\n tar_order.append(word)\n if tar_order != sorted(tar_order):\n error_message = \"The tars were printed in this order: {}\\nWhen it should have been in this order: {}\".format(\n tar_order, sorted(tar_order)\n )\n self.stop(error_message)\n\n # Run again, without verbose option.\n shutil.rmtree(self.test_dir)\n os.mkdir(self.test_dir)\n os.chdir(self.test_dir)\n if not use_hpss:\n shutil.copytree(\n \"{}/{}/{}\".format(TOP_LEVEL, self.backup_dir, self.cache), self.copy_dir\n )\n cmd = \"{}zstash extract --hpss={} --workers=3\".format(\n zstash_path, self.hpss_path\n )\n output, err = run_cmd(cmd)\n os.chdir(TOP_LEVEL)\n self.check_strings(cmd, output + err, expected_present, expected_absent)\n # Checking that the printing was done in order.\n tar_order = []\n console_output = output + err\n for word in console_output.replace(\"\\n\", \" \").split(\" \"):\n if \".tar\" in word:\n word = word.replace(\"{}/\".format(self.cache), \"\")\n tar_order.append(word)\n if tar_order != sorted(tar_order):\n error_message = \"The tars were printed in this order: {}\\nWhen it should have been in this order: {}\".format(\n tar_order, sorted(tar_order)\n )\n self.stop(error_message)", "def test_upload_dir_contents_one_file(self):\n self._test_upload_dir_contents(filenames=['file1'])", "def extractFiles(self, archivePath, extractedFiles, filterList = None, ignoreList = None, isExtractingToCurrentDirectory = False, outputMessage = None):\r\n archiveNameWithoutExtension = os.path.splitext(os.path.split(archivePath)[1])[0]\r\n archivePathWithoutName = os.path.split(archivePath)[0]\r\n\r\n if isExtractingToCurrentDirectory:\r\n directoryToExtractTo = os.path.realpath(os.path.join(archivePathWithoutName, archiveNameWithoutExtension))\r\n else:\r\n directoryToExtractTo = os.path.realpath(os.path.join(self.tempDirectory, archiveNameWithoutExtension))\r\n \r\n try:\r\n with zipfile.ZipFile(archivePath, 'r') as zip:\r\n for name in zip.namelist():\r\n if EtFile.isIgnoredFile(name, filterList, ignoreList):\r\n continue\r\n\r\n pathToExtractedFile = os.path.realpath(os.path.join(directoryToExtractTo, name))\r\n if not pathToExtractedFile.startswith(directoryToExtractTo):\r\n # Files inside of zip archives can be maliciously named to cause writing to unexpected locations. All versions\r\n # of python prior to 2.7.4 are vulnerable to this sort of attack. Therefore, we must manually guarantee\r\n # that all writes are to our temporary directory.\r\n raise Exception(\"Security warning: Blocked directory traversal when extracting: '{}'\".format(name))\r\n\r\n zip.extract(name, directoryToExtractTo)\r\n \r\n if os.path.splitext(pathToExtractedFile)[1].lower() == \".zip\":\r\n self.extractFiles(pathToExtractedFile, extractedFiles, filterList, ignoreList, True, outputMessage)\r\n else:\r\n extractedFiles.append(pathToExtractedFile)\r\n except zipfile.BadZipfile:\r\n outputMessage(\"BadZipfile: \" + archivePath)", "def testExtractHuntResults(self, _, mock_remove):\n self.grr_hunt_downloader.output_path = '/directory'\n expected = sorted([\n ('greendale-student04.c.greendale.internal',\n '/directory/hunt_H_A43ABF9D/C.4c4223a2ea9cf6f1'),\n ('greendale-admin.c.greendale.internal',\n '/directory/hunt_H_A43ABF9D/C.ba6b63df5d330589'),\n ('greendale-student05.c.greendale.internal',\n '/directory/hunt_H_A43ABF9D/C.fc693a148af801d5')\n ])\n test_zip = 'tests/lib/collectors/test_data/hunt.zip'\n # pylint: disable=protected-access\n result = sorted(self.grr_hunt_downloader._ExtractHuntResults(test_zip))\n self.assertEqual(result, expected)\n mock_remove.assert_called_with('tests/lib/collectors/test_data/hunt.zip')", "def test_upload_dir_contents_multiple_files(self):\n self._test_upload_dir_contents(filenames=['file1', 'file2'])", "def test_case_2():\n print(\"*********Test_case_2***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files(None, path)\n print(result)", "def _test_listing_content(self, f):\n found = []\n with Archive(f) as a:\n for entry in a:\n found.append(entry.pathname)\n\n self.assertEqual(set(found), set(FILENAMES))", "def extract_file(path):", "def test_only_files(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n need_result = ['meme1.jpg',\n 'meme2.png',\n 'meme4.jpg',\n 'meme4.png',\n 'meme monty python',\n ]\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result[:-1]]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=True)\n self.assertEqual(sorted(result), sorted(need_result_new))\n\n need_result_new = [os.path.join(dummy_folder, 'memes', x) for x in need_result]\n result = listdir(os.path.join(dummy_folder, 'memes'), only_files=False)\n self.assertEqual(sorted(result), sorted(need_result_new))", "def testBadZipFileExtractHuntResults(self, mock_extract, mock_remove):\n self.grr_hunt_downloader.output_path = '/directory'\n test_zip = 'tests/lib/collectors/test_data/hunt.zip'\n\n mock_extract.side_effect = zipfile.BadZipfile\n # pylint: disable=protected-access\n with self.assertRaises(errors.DFTimewolfError) as error:\n self.grr_hunt_downloader._ExtractHuntResults(test_zip)\n\n self.assertEqual(1, len(self.test_state.errors))\n self.assertEqual(\n error.exception.message,\n 'Bad zipfile tests/lib/collectors/test_data/hunt.zip: ')\n self.assertTrue(error.exception.critical)\n mock_remove.assert_not_called()", "def test_identify_contents_2(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n ignore_set = set([\".DS_Store\"])\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\",\n ignore_set=ignore_set)\n exp_num_items = 1\n self.assertEqual(len(list_of_items), exp_num_items)", "def test_identify_contents_7(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"invalid\")\n self.assertIsNone(list_of_items)", "def extract(cls, path, outdir):\r\n raise NotImplementedError()", "def test_filter_files(self):\n expected = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1\", False),\n ]\n files = [\n (\"/subdir1/fichier1\", False),\n (\"/subdir2/fichier2\", False),\n (\"/subdir2/fichier3\", False),\n (\"/subdir1/fichier4\", False),\n (\"/subdir1/subsubdir1/fichier1\", False),\n (\"/subdir1/subsubdir1/\", False),\n ]\n self.assertEqual(\n list(self.path_translator.filter_files(files, \"/subdir1\")),\n expected)", "def get_test_files(self):\n raise NotImplementedError", "def testOSErrorExtractHuntResults(self, mock_extract, mock_remove):\n self.grr_hunt_downloader.output_path = '/directory'\n test_zip = 'tests/lib/collectors/test_data/hunt.zip'\n mock_extract.side_effect = OSError\n # pylint: disable=protected-access\n\n with self.assertRaises(errors.DFTimewolfError) as error:\n self.grr_hunt_downloader._ExtractHuntResults(test_zip)\n self.assertEqual(1, len(self.test_state.errors))\n self.assertEqual(\n error.exception.message,\n 'Error manipulating file tests/lib/collectors/test_data/hunt.zip: ')\n self.assertTrue(error.exception.critical)\n mock_remove.assert_not_called()", "def extract(file, fileFormat):\n\tspeech.speak(\"Extracting files in \" + file + \".\")\n\tpatoolib.extract_archive(file)", "def test_verify_unzip(self):\n assert os.path.exists(\n os.path.join(\n settings.MEDIA_ROOT,\n \"indices\",\n \"test-index\",\n \"data\",\n \"sample.txt\"\n )\n )", "def parse_test_files():\n a_copy = PY_FILES[::]\n for f in a_copy:\n if 'test' in f:\n TEST_FILES.append(f)\n PY_FILES.remove(f)", "def test_iter_files():\n for i in iter_files(\"wrong_path\"):\n assert False, \"no files should be yielded\"\n\n files = list(iter_files(\"test_data/\"))\n assert len(files) > 0\n assert (\"test_data/stopwords.txt\", \"test_data/stopwords.txt\") in files\n assert (\"test_data/directory/test_dir\", \"test_data/directory/test_dir\") in files\n\n files = list(iter_files(\"http://google.com\"))\n assert len(files) > 0\n\n files = list(iter_files(\"http://google.com/X\", ignore_errors=True))\n assert not files", "def testing_files_for_process(cleanup_backups):\n _ = shutil.copytree(\"tests/files\", \"tmp/files\", dirs_exist_ok=True)\n try:\n yield\n finally:\n # remove files https://docs.python.org/3/library/shutil.html#shutil.rmtree\n shutil.rmtree(\"tmp/files\")", "def test_get_files_list(self):\n files = self.download.get_files_list()\n self.assertTrue(len(files) > 0)", "def test_get_file_content(self):\n pass", "def test_identify_contents_1(self):\n Path(self.base_dir, \"new_dir\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"file\")\n exp_num_items = 2\n self.assertEqual(len(list_of_items), exp_num_items)", "def setUp(self):\n dirname = os.path.dirname(__file__)\n self.files = [\n os.path.join(dirname, 'data',\n 'goes13_IR_107_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'goes15_IR_107_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'himawari8_IR1_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'meteosat7_IR_115_testwcm_201604291015.tif'),\n os.path.join(dirname, 'data',\n 'meteosat10_IR_108_testwcm_201604291015.tif')\n ]", "def test_unzip_file(self):\n\n # Path to the compressed file\n zipped_file = os.path.join(self.xmlfilepath, \"DLTINS_20210117_01of01.zip\")\n # Test for correct data\n # NOTE : For this test case to pass the source xml zipped file\n # should be present in the download path\n self.assertTrue(unzip_file(zipped_file, self.xmlfilepath))\n\n # Test for wrong target path\n self.assertFalse(unzip_file(zipped_file, r\"D:\\kqcA CK j \"))\n\n # Test for incorrect compressed file\n self.assertFalse(unzip_file(\"D:\\somerandomfile\", self.xmlfilepath))", "def process_all_files(src_directory, dst_directory, simon_sez=None):\n error = False\n\n if not os.path.exists(src_directory):\n logger.error(\n \"Directory {0} does not exist. Exiting.\".format(\n src_directory))\n error = True\n\n if not os.access(dst_directory, os.W_OK):\n logger.error(\n \"Destination directory {0} is not writable. Exiting.\".format(\n dst_directory))\n error = True\n\n if error:\n logger.warn(\"Exiting due to errors.\")\n sys.exit(1)\n\n harvester = Harvester(src_directory, metadata_dst_directory=dst_directory)\n filemaps = harvester[\"filemaps\"]\n\n count = 0\n for fm in filemaps.get():\n count += 1\n src_fmd = FileMetadata(os.path.join(src_directory, fm.src_fn))\n if simon_sez:\n logger.info(\n \"Copying metadata from {} ==> {}\".format(\n fm.src_fn, fm.dst_fn))\n src_fmd.copy_metadata(os.path.join(dst_directory, fm.dst_fn))\n else:\n logger.info(\n \"DRY RUN: Copying metadata from {} ==> {}\".format(\n fm.src_fn, fm.dst_fn))\n if count == 0:\n logger.warn(\"No matching files found. Check src and dst.\")", "def extract(archive_path, images_dir, test_zip=False):\n log(\"TRACE\", \"Attempting to extracted files from {}\".format(archive_path))\n with zipfile.ZipFile(archive_path) as images_zip:\n # Check that the Zip file is valid, in which case `testzip()` returns\n # None. If it's bad, that function will return a list of bad files\n try:\n if test_zip and images_zip.testzip():\n log(\"ERROR\", \"Could not extract the following invalid Zip file:\"\n \" {}\".format(archive_path))\n return []\n except OSError:\n log(\"ERROR\", \"Could not extract the following invalid Zip file:\"\n \" {}\".format(archive_path))\n return []\n images_zip.extractall(images_dir)\n archive_namelist = images_zip.namelist()\n log(\"TRACE\", \"Extracted files: {}\".format(archive_namelist))\n return archive_namelist", "def test_extract(self):\n for document in [test_pdfutil.BLANK, test_pdfutil.LOREM]:\n paper = factories.Paper.create(document=factory.django.FileField(\n data=document))\n paper_url = \"{}/{}\".format(EXTRACT_URL, paper.unique_id)\n\n c = django.test.Client()\n variables = [\"funding\", \"grant_id\"]\n for var in variables:\n var_url = \"{}/{}\".format(paper_url, var)\n self.assertEqual(b'{\"value\":null}', c.get(var_url).content)\n\n # Extract all at once\n self.assertEqual(b'{\"funding\":null,\"grant_id\":null}',\n c.get(paper_url).content)", "def test_identify_contents_4(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n ignore_set = set([\"new_dir2\"])\n list_of_items = basic.identify_contents(self.base_dir, kind=\"dir\",\n ignore_set=ignore_set)\n exp_num_items = 1\n self.assertEqual(len(list_of_items), exp_num_items)", "def runDataExtraction():\r\n config = CONFIG['steps']['DataExtraction']\r\n ci = config['inputs']\r\n co = config['outputs']\r\n columns = ci['columns']\r\n nrows = ci['nrows']\r\n input_bucket = ci['bucket']\r\n no_of_files = ci['no_of_files']\r\n\r\n output_bucket = co['bucket']\r\n csv_name_prefix = co['csv_name_prefix']\r\n\r\n minio_config = CONFIG['artifacts']['minio']\r\n minioClient = create_minio_client(minio_config[\"endpoint_url\"],\r\n access_key=minio_config[\"access_key\"],\r\n secret_key=minio_config[\"secret_key\"],\r\n secure=minio_config['secure'])\r\n\r\n boto_client = boto3.client(\"s3\",\r\n endpoint_url=minio_config[\"endpoint_url\"],\r\n aws_access_key_id=minio_config[\"access_key\"],\r\n aws_secret_access_key=minio_config[\"secret_key\"],\r\n region_name=minio_config[\"region_name\"])\r\n\r\n zip_files = get_files(input_bucket, boto_client, file_type='zip')\r\n\r\n no_of_files_to_process = no_of_files if no_of_files is not None else len(\r\n zip_files)\r\n for zip_file in tqdm(zip_files[:no_of_files_to_process], total=no_of_files_to_process):\r\n process_file(zip_file, input_bucket, output_bucket, minioClient, columns,\r\n nrows=nrows, output_csv_name_prefix=csv_name_prefix)", "def test_archive_run(self):\n pass", "def mass_extract(source_directory, target_directory):\n\n import os\n import ZipFile\n\n source_directory = raw_input(\"Where are the zips? \")\n target_directory = raw_input(\"To where do you want to extract the files? \")\n \n if not os.path.exists(source_directory):\n print \"Sorry, that folder doesn't seem to exist.\"\n source_directory = raw_input(\"Where are the zips? \")\n\n if not os.path.exists(target_directory):\n os.mkdir(target_directory)\n \n for path, directory, filename in os.walk(source_directory):\n zip_file = ZipFile.ZipFile(filenames)\n ZipFile.extract(zip_file, target_directory)\n zip_file.close()\n\n print \"Done.\"", "def extract_file(self):\n# path_destination = os.path.join(\n# self.root, self.resources.replace(\".zip\", \"\"))\n# os.makedirs(path_destination, exist_ok=True)\n shutil.unpack_archive(os.path.join(\n self.root, self.resources), self.root)\n os.remove(os.path.join(self.root, self.resources))", "def test_with_files(self, files):\n files_to_rename = list(set(self.files) - set(files))\n files_to_skip = []\n\n # Generate a unique suffix to append to files we want to ignore.\n index = 0\n file_rename_suffix = '___%d' % index\n while any([f.endswith(file_rename_suffix) for f in files_to_rename]):\n index += 1\n file_rename_suffix = '___%d' % index\n\n # Rename all files in the test case's file list but not the specified one.\n for file_to_rename in files_to_rename:\n absolute_file_to_rename = os.path.join(self.input_directory,\n file_to_rename)\n try:\n os.rename(absolute_file_to_rename,\n '%s%s' % (absolute_file_to_rename, file_rename_suffix))\n except OSError:\n # This can happen if we have already renamed a directory with files\n # under it. In this case, make sure we don't try to change the name\n # back later.\n files_to_skip.append(file_to_rename)\n\n # Clean up any issues with modifications of resources in subdirectories.\n for file_to_skip in files_to_skip:\n files_to_rename.remove(file_to_skip)\n files_to_rename.reverse()\n\n result = self.run()\n\n # Restore previously renamed files to their original locations.\n for file_to_rename in files_to_rename:\n absolute_file_to_rename = os.path.join(self.input_directory,\n file_to_rename)\n os.rename('%s%s' % (absolute_file_to_rename, file_rename_suffix),\n absolute_file_to_rename)\n\n return self._handle_test_result(result)", "def test_identify_contents_6(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n ignore_set = set([\"new_dir2\"])\n list_of_items = basic.identify_contents(self.base_dir, kind=None,\n ignore_set=ignore_set)\n exp_num_items = 3\n self.assertEqual(len(list_of_items), exp_num_items)", "def _ExtractFiles(self, tgz_path: str, path_to_collect: str) -> str:\n local_path = ''\n if not os.path.exists(tgz_path):\n self.logger.error(f'File not found {tgz_path}')\n return local_path\n\n tempdir = tempfile.mkdtemp()\n with tarfile.open(tgz_path) as file:\n members = self._FilterTarMembers(file, path_to_collect)\n file.extractall(path=tempdir, members=members)\n\n local_path = os.path.join(tempdir, path_to_collect.lstrip('/'))\n return local_path", "def test_ingest_zipfile():\n\n try:\n\n adult_data_df = ingest_csv_from_zipfile(Directories.ZIP_FILE_DIR.value, FileNames.DATA_FILE.value)\n\n except Exception as exception:\n\n pytest.fail('Ingest zip file error: {}'.format(exception))\n\n pass", "def test_get_file_executors(self):\n pass", "def test_create_files(self):\n\n testdir = \"test_output\"\n test_submission = Submission()\n self.addCleanup(os.remove, \"submission.tar.gz\")\n self.addCleanup(shutil.rmtree, testdir)\n\n test_submission.create_files(testdir)\n\n self.doCleanups()", "def extract(self, paths=None):\n\n all_files = self._get_package_files()\n if paths is None:\n extracted_files = all_files.values()\n else:\n extracted_files = [all_files[path] for path in paths]\n\n # filter already extracted file\n extracted_files = [pf for pf in extracted_files if not os.path.isfile(self.project.storage.fspath(pf.extract_path))]\n\n # group files by package\n files_by_package = defaultdict(list)\n for pf in extracted_files:\n files_by_package[pf.package].append(pf)\n\n package_files_path = f\"{self.path}/packages/files\"\n\n for package, files in files_by_package.items():\n with self.project.storage.stream(f\"{package_files_path}/{package}\") as reader:\n # sort files by offset to extract while streaming the bin file\n for pkgfile in sorted(files, key=lambda f: f.offset):\n logger.debug(f\"extracting {pkgfile.path}\")\n reader.skip_to(pkgfile.offset)\n fspath = self.project.storage.fspath(pkgfile.extract_path)\n with write_file_or_remove(fspath) as fout:\n if pkgfile.compressed:\n zobj = zlib.decompressobj(zlib.MAX_WBITS | 32)\n def writer(data):\n return fout.write(zobj.decompress(data))\n reader.copy(writer, pkgfile.size)\n fout.write(zobj.flush())\n else:\n reader.copy(fout.write, pkgfile.size)", "def test_scraper(self):\n\n for entry in tests:\n command = ['./mozdownload/scraper.py',\n '--base_url=%s' % self.wdir,\n '--destination=%s' % self.temp_dir]\n p = processhandler.ProcessHandler(command + entry['options'])\n p.run()\n p.wait()\n dir_content = os.listdir(self.temp_dir)\n self.assertTrue(entry['fname'] in dir_content)\n\n mozfile.remove(os.path.join(self.temp_dir, entry['fname']))", "def test_getContainerFromFolder(self):\n with self.assertRaises(ValueError):\n path_core._core.FolderContainer('/imaginaryPath/')\n testDir = str(TEST_DIR.joinpath('testDirectory1'))\n\n listOfFiles = os.listdir(testDir)\n testRegex = [re.compile(r) for r in [r\"file01_(\\d+).rgb\", r\"file02_(\\d+).rgb\", r\"file(\\d+).03.rgb\"]]\n result = {'alpha.txt', 'file.info.03.rgb'}.union({\n path_core._core.Sequence.fromRegexAndFiles(regex, listOfFiles) for regex in testRegex})\n\n\n container = path_core._core.FolderContainer(testDir)\n self.assertEqual(testDir, container.dir)\n self.assertEqual(result, set(container.contents))", "def test_input_folders_files(self):\n files = list_files_folder(data_dir + \"build-custom/files/\", ext=\"fna.gz\")\n folder = data_dir + \"build-custom/files/more/\"\n params = self.default_params.copy()\n params[\"db_prefix\"] = self.results_dir + \"test_input_folders_files\"\n params[\"input\"] = files + [folder]\n params[\"input_extension\"] = \"fna.gz\"\n cfg = Config(\"build-custom\", **params)\n self.assertTrue(run_ganon(cfg, params[\"db_prefix\"]), \"ganon build-custom run failed\")\n res = build_sanity_check_and_parse(vars(cfg))\n self.assertIsNotNone(res, \"ganon build-custom sanity check failed\")\n\n files.extend(list_files_folder(folder, ext=params[\"input_extension\"]))\n self.assertTrue(res[\"target\"][\"file\"].isin(files).all(), \"Files missing from target\")\n self.assertEqual(len(files), res[\"target\"].shape[0], \"Wrong number of files on target\")\n self.assertTrue(res[\"info\"][\"file\"].isin(files).all(), \"Files missing from info\")\n self.assertEqual(len(files), res[\"info\"].shape[0], \"Wrong number of files on info\")", "def test_unzip_and_flatten(\n self,\n mocker: MockerFixture,\n tmp_path: pathlib.Path,\n ) -> None:\n mock_zip = MockZipFile()\n mock_zip.add_files(\"my_dir/abcde\", \"my_dir/funky\")\n\n zipfile = mocker.patch(\"matl_online.utils.zipfile.ZipFile\")\n zipfile.return_value = mock_zip\n\n unzip(BytesIO(), tmp_path)\n\n extract_args = mock_zip.extract_all_arguments\n\n assert len(extract_args) == 2\n assert extract_args[0] == tmp_path\n\n output_names = [obj.filename for obj in extract_args[1]]\n\n assert len(output_names) == 2\n assert output_names[0] == \"abcde\"\n assert output_names[1] == \"funky\"", "def run(self, found_files):\n raise NotImplementedError", "def test_main(self):\n dummy_folder = TestOspaListDir.get_dummy_folder()\n result = listdir(dummy_folder,\n full_path=True,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=False,\n only_files=False,\n )\n need_result = ['memes',\n 'txt_files',\n 'antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n\n self.assertEqual(sorted(need_result), sorted(result))\n\n result = listdir(dummy_folder,\n full_path=True,\n only_files=True,\n )\n need_result = ['antigravity.png',\n 'egg.png',\n 'empty.txt',\n 'holy_grenade.png',\n 'spam.jpg',\n ]\n need_result = [os.path.join(dummy_folder, x) for x in need_result]\n self.assertEqual(sorted(need_result), sorted(result))\n self.assertEqual(sorted(os.listdir('.')), sorted(listdir(path='.', full_path=False)))", "def test_main(self):\n results = main(0.1, files)\n # 1\n self.assertEqual(results, \"All Done Successfully\")\n results = main(0.1, get_files_bad_file_path())\n # 2\n self.assertIn(\"skipping to next\", results)\n results = main(0.1, get_files_bad_type())\n # 3\n self.assertIn(\"skipping to next\", results)\n results = main(0.1, get_files_bad_name_table())\n # 4\n self.assertIn(\"closing app. . .\", results)", "def extract_all(fn,dst=\".\"):\r\n if tarfile.is_tarfile(fn): \r\n with tarfile.open(fn,'r') as tf:\r\n tf.extractall(dst)\r\n tf.close()\r\n elif zipfile.is_zipfile(fn):\r\n with zipfile.ZipFile(fn, 'r') as zf:\r\n zf.extractall(dst)\r\n zf.close()\r\n else:\r\n print( \"Please provide a tar archive file or zip file\" )", "def extract_tars(file_pattern, path_in, path_out):\n for f in glob.glob(os.path.join(path_in, file_pattern)):\n shutil.unpack_archive(f, path_out)", "def _extract_file(dest_path, root_dir):\n logger.info(\"Unzipping the dataset file.\")\n with zipfile.ZipFile(dest_path, \"r\") as zip_dir:\n zip_dir.extractall(root_dir)", "def extractor_multiprocess(self):\n pool = multiprocessing.Pool()\n queue = multiprocessing.Queue()\n queue.put(\"safe\")\n end = len(next(os.walk(self.datadir))[2])\n error = 0\n\n extractor_iterator = ((directory)\n for directory in os.listdir(self.datadir))\n with jsonlines.open(self.output, \"w\") as f:\n for x in tqdm.tqdm(\n pool.imap_unordered(self.extract_unpack, extractor_iterator), total=end\n ):\n if not x:\n \"\"\"\n To input error class or function\n \"\"\"\n error += 1\n continue\n msg = queue.get()\n if msg == \"safe\":\n f.write(x)\n queue.put(\"safe\")\n\n pool.close()", "def get_test_files(self):\n train_dir = os.path.join(self.data_dir, \"test_{}_new\".format(self.patient_no))\n filenames = os.listdir(train_dir)\n interm = ((os.path.splitext(f)[0].split(\"_\"), os.path.join(train_dir, f)) for f in filenames)\n return [(int(p[0][1]), int(p[0][2]), p[1]) for p in interm]", "def test_get_file_copy_list(self):\n \n so = sys.stdout\n dn = open(os.devnull,\"w\")\n \n # Create a file hierarchy to search for files\n root = tempfile.mkdtemp(prefix=\"test_casava_data_delivery_\")\n date = \"111111\"\n fcs = [\"{}_{}\".format(date,fcid) for fcid in [\"FCA\",\"FCB\"]]\n \n # Create some sample files\n exp_files = []\n samples = []\n for n in xrange(2):\n sample = tempfile.mkdtemp(dir=root)\n samples.append(os.path.basename(sample))\n for fcid in fcs:\n fcdir = os.path.join(sample,fcid)\n nophixdir = os.path.join(fcdir,\"nophix\")\n for d in [fcdir,nophixdir]:\n os.makedirs(d)\n test_names = [\"{:d}_{:s}_1_1_fastq.txt.gz\".format(random.randint(1,8),\n fcid),\n \"{}_CGATGT_L001_R1_001.fastq.gz\".format(samples[-1]),\n \"{}_CGATGT_L001_R1_001.fastq..gz\".format(samples[-1]),]\n for test_name in test_names:\n test_file = os.path.join(d,test_name)\n open(test_file,\"w\").close()\n exp_files.append([samples[-1],\n fcid,\n os.path.basename(d) == \"nophix\",\n test_file,\n os.path.join(samples[-1],fcid),\n create_final_name(os.path.basename(test_name),date,fcid.split(\"_\")[-1],samples[-1])])\n \n # Get the list of files to copy under various conditions\n \n for deliver_all_fcs in [False, True]:\n for fcid in fcs:\n for deliver_nophix in [False, True]:\n for skip_sample_list in [[],[samples[0]],[samples[1]],samples]:\n sys.stdout = dn\n obs_to_copy = sorted(get_file_copy_list(root,\"\",fcid,deliver_all_fcs,deliver_nophix,skip_sample_list))\n sys.stdout = so\n exp_to_copy = sorted([ef[3:6] for ef in exp_files if (deliver_all_fcs or ef[1] == fcid) and \\\n deliver_nophix == ef[2] and \\\n ef[0] not in skip_sample_list])\n #import pdb; pdb.set_trace()\n self.assertListEqual(obs_to_copy,\n exp_to_copy,\n \"The files to copy result did not match the expected for \" \\\n \"{:s}\".format(\", \".join([\"{:s}: {:s}\".format(k,v) for k, v in \\\n dict(zip([\"deliver_all_fcs\",\n \"fcid\",\n \"deliver_nophix\",\n \"skip_samples\"],\n [str(deliver_all_fcs),\n fcid,\n str(deliver_nophix),\n \" \".join(skip_sample_list)])).items()])))", "def test_process_two_filenames(generate_expected_two_files):\n # create local variables and run fixtures\n einfo = generate_expected_two_files\n expected = einfo['expected']\n fname = einfo['file_names']\n results = process_files([fname['stress'], fname['strain']])\n # compare the pifs\n A = results.properties[0].scalars\n B = expected['stress'].properties[0].scalars\n C = results.properties[1].scalars\n D = expected['strain'].properties[0].scalars\n assert np.array_equal(A, B), \\\n 'Results and expected pifs differ in stress values'\n assert np.array_equal(C, D), \\\n 'Results snd expected pifs differ in strain values'\n assert getattr( results, 'uid', None) is None, \\\n 'Result UID should be None'\n assert getattr(results, 'names', None) is None, \\\n 'Result should not be named'\n assert getattr(results, 'classifications', None) is None, \\\n 'Result should not have any classifications.'\n assert len(results.properties) == \\\n len(expected['stress'].properties) + \\\n len(expected['strain'].properties), \\\n 'The length of the result and expected properties lists do not match.'\n assert getattr(results, \"ids\", None) is None, \\\n 'Result ids should be None'\n assert getattr(results, 'source', None) is None, \\\n 'Result source should be None'\n assert getattr(results, 'quantity', None) is None, \\\n 'Result quantity should be None'\n assert getattr(results, 'preparation', None) is None,\\\n 'Result preparation should be None'\n assert getattr(results, \"subSystems\", None) is None, \\\n 'Results subSystem should be None'\n assert getattr(results, 'references', None) is None,\\\n 'Results references should be None'\n assert getattr(results, 'contacts', None) is None, \\\n 'Results contacts should be None'\n assert getattr(results, 'licenses', None) is None,\\\n 'Results licenses should be None'\n assert getattr(results,'tags', None) is None,\\\n 'Results tags should be None'", "def extract(apath, ffilter=[]):\n\n files = []\n\n def extract_recursive(curr_apath):\n \"\"\"Look into archive recursively to extract files considering ffilter\"\"\"\n\n handler = resolve_format(curr_apath)\n unpacker = HandlersFactory.get_handler(handler)\n _files = unpacker.files_list(curr_apath)\n\n for f in _files:\n if is_matched(f, ffilter=ffilter):\n _fpath = unpacker.extract(curr_apath, f)\n files.append(_fpath)\n if is_archive(f):\n _apath = unpacker.extract(curr_apath, f)\n extract_recursive(_apath)\n\n extract_recursive(apath)\n return files", "def test_identify_contents_3(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=\"dir\")\n exp_num_items = 2\n self.assertEqual(len(list_of_items), exp_num_items)", "def testCollectHuntResults(self,\n mock_get_write_archive,\n mock_ExtractHuntResults):\n self.mock_grr_api.Hunt.return_value.Get.return_value = \\\n mock_grr_hosts.MOCK_HUNT\n self.grr_hunt_downloader.Process()\n mock_get_write_archive.assert_called_with(mock_grr_hosts.MOCK_HUNT,\n '/tmp/test/H:12345.zip')\n mock_ExtractHuntResults.assert_called_with('/tmp/test/H:12345.zip')", "def get_test_files():\n repo_fs()\n return TEST_FILES", "def test_normal_execution(self, mock_mkdir, mock_isfile, mock_isdir,\n mock_conversion):\n # Set the mocked functions returned values\n mock_isfile.side_effect = [True, True, False, False, False, False,\n False]\n mock_isdir.side_effect = [False]\n mock_conversion.side_effect = lambda *x: x[-1]\n\n # Test execution\n outfiles = export_scalars_to_nifti(**self.kwargs)\n expected_outfiles = {\n \"gfa\": os.path.join(\n self.kwargs[\"outdir\"],\n \"{0}_gfa.nii.gz\".format(self.kwargs[\"model\"])),\n \"mean_diffusivity\": os.path.join(\n self.kwargs[\"outdir\"],\n \"{0}_mean_diffusivity.nii.gz\".format(self.kwargs[\"model\"]))}\n expected_files = []\n for name in (\"gfa\", \"mean_diffusivity\", \"adc\", \"lambda_parallel\",\n \"lambda_transverse\", \"fa\"):\n expected_files.append(\n os.path.join(\n self.kwargs[\"model_dir\"],\n \"{0}_{1}.ima\".format(self.kwargs[\"model\"], name)))\n self.assertEqual(expected_outfiles, outfiles)\n self.assertEqual([mock.call(self.kwargs[\"outdir\"])],\n mock_isdir.call_args_list)\n self.assertEqual([mock.call(self.kwargs[\"outdir\"])],\n mock_mkdir.call_args_list)\n self.assertEqual([mock.call(elem) for elem in expected_files],\n mock_isfile.call_args_list)\n self.assertEqual([\n mock.call(expected_files[0], expected_outfiles[\"gfa\"]),\n mock.call(expected_files[1],\n expected_outfiles[\"mean_diffusivity\"])],\n mock_conversion.call_args_list)", "def extract(self):\n with TemporaryDirectory() as temp_dir:\n logging.info(f\"downloading files to {temp_dir}\")\n aws_utils.download_files(\n self.s3_bucket,\n f\"{self.harvest_key_prefix}/{self.harvest_date}\",\n temp_dir,\n )\n\n csv_path = os.path.join(temp_dir, \"extract.csv\")\n with open(csv_path, \"w\") as csv_file:\n writer = csv.DictWriter(\n csv_file,\n self.extract_csv_header,\n lineterminator=os.linesep,\n )\n writer.writeheader()\n\n # iterate over HTML documents, extract data and write to CSV\n file_names = []\n for f in glob.glob(f\"{temp_dir}/*.bz2\"):\n logging.info(f\"parsing {f}\")\n listing_id = os.path.split(os.path.splitext(f)[0])[-1]\n file_names += listing_id,\n with bz2.open(f, \"rb\") as zip_file:\n writer.writerow({\n **self.soup_parser(\n self.html_parser(zip_file.read())\n ),\n \"listing_id\": listing_id,\n \"source\": urlparse(self.base_url).netloc,\n \"collection_date\": self.harvest_date,\n })\n\n # check how fields are empty on each row\n with open(csv_path) as csv_file:\n reader = csv.reader(csv_file)\n columns = len(next(reader))\n for index, row in enumerate(reader):\n nulls = row.count(\"NULL\")\n if nulls / columns > 0.3:\n logging.warning(\n f\"{nulls} null values in {file_names[index]}\"\n )\n\n # upload CSV file to S3\n csv_s3_key = (\n f\"{self.extract_key_prefix}/{self.harvest_date}/extract.csv\"\n )\n logging.info(f\"uploading data to {self.s3_bucket}/{csv_s3_key}\")\n client = boto3.client(\"s3\")\n client.upload_file(\n csv_path,\n self.s3_bucket,\n csv_s3_key,\n )\n\n logging.info(\"extraction finished\")", "def test_file_analyzer(self):\r\n file_analyzer = FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 8\")\r\n self.assertEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100, 'char': 4472}, \\\r\n 'HW08_Test_Himanshu.py': {'class': 1, 'function': 3, 'line': 38, 'char': 1861}})\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 0, 'function': 5, 'line': 46, 'char': 1931}})\r\n\r\n self.assertNotEqual(file_analyzer.files_summary, {'HW08_Himanshu.py': {'class': 1, 'function': 5, 'line': 100}}) # testing less fields\r\n\r\n with self.assertRaises(FileNotFoundError): # raises exception error\r\n FileAnalyzer(\"C:\\\\Users\\\\Himan\\\\Desktop\\\\Semester 2\\\\SSW 810\\\\HW\\\\Assignment 10\").files_summary", "def test_get_filepaths_empty(self):\n\n #setup\n get_filepaths = extractor.make_get_filepaths(self.mock_get_files_fn)\n\n \n #when\n test2 = get_filepaths(\"./dir2\", \".c\")\n\n #result\n assert len(test2) == 0", "def handle_extracted_files(\n actapi: act.api.Act, content: Text, extracted_files: List[Dict]\n) -> List[act.api.fact.Fact]:\n\n feeds_facts: List[act.api.fact.Fact] = []\n\n for file in extracted_files:\n\n chain = []\n\n if \"sha256\" not in file:\n continue\n\n if not file[\"file_path\"]:\n info(f\"{file} is missing file_path using name instead\")\n\n path = file[\"file_path\"] if file[\"file_path\"] else file[\"name\"]\n\n chain.append(\n actapi.fact(\"componentOf\").source(\"path\", path).destination(\"uri\", \"*\")\n )\n\n chain.append(\n actapi.fact(\"at\").source(\"content\", file[\"sha256\"]).destination(\"uri\", \"*\")\n )\n\n feeds_facts += act.api.fact.fact_chain(*chain)\n\n for hash_type in [\"md5\", \"sha1\", \"sha256\"]:\n feeds_facts.append(\n actapi.fact(\"represents\")\n .source(\"hash\", file[hash_type])\n .destination(\"content\", file[\"sha256\"])\n )\n feeds_facts.append(\n actapi.fact(\"category\", hash_type).source(\"hash\", file[hash_type])\n )\n\n if (\n content != file[\"sha256\"]\n ): # the act platform does not accept same object on source and destination for write\n feeds_facts.append(\n actapi.fact(\"writes\")\n .source(\"content\", content)\n .destination(\"content\", file[\"sha256\"])\n )\n\n return feeds_facts", "def test_identify_contents_5(self):\n Path(self.base_dir, \"new_dir1\").mkdir()\n Path(self.base_dir, \"new_dir2\").mkdir()\n Path(self.base_dir, \"file1.txt\").touch()\n Path(self.base_dir, \".DS_Store\").touch()\n list_of_items = basic.identify_contents(self.base_dir, kind=None)\n exp_num_items = 4\n self.assertEqual(len(list_of_items), exp_num_items)", "def test_search_file(self):\n base_dir = join(get_current_path(), 'samples', 'base_dir1')\n output_dir = join(get_current_path(), 'samples', 'base_dir1', 'result')\n files = search_files(base_dir, output_dir)\n self.assertTrue(self.verify_sub_folders(list(files.keys())))\n\n # sub folders under Concord is not counted, only files\n self.assertEqual(len(files['Concord']), 5)\n self.assertEqual(len(files['ListCo Equity']), 1)\n self.assertEqual(len(files['CLO Equity']), 2)\n self.assertEqual(files['ListCo Equity'][0], join(base_dir, 'ListCo Equity', 'Positions1219.xlsx'))", "def treat(input, output):\n files = find(input)\n acc = []\n for file in files:\n fileInfo = extract(file)\n out = makeOutputPath(output, fileInfo[\"path\"], fileInfo[\"filename\"])\n if not out == None:\n fileInfo[\"outPath\"] = out\n acc += [fileInfo]\n return acc", "def ExtractFilesInISO(XISOPath, ExtractPath, XSystemUpdateFolder):\n command=['./extract-xiso', '-x']\n if XSystemUpdateFolder == True:\n command.append('-s')\n command.append(XISOPath)\n command.append('-d')\n command.append(ExtractPath)\n print(command)\n sp = subprocess\n global extract_popen\n extract_popen = subprocess.Popen(command, stdout=sp.PIPE, stderr=sp.STDOUT)\n global ActFileInISOCount\n ActFileInISOCount = -1\n for line in extract_popen.stdout:\n Status = line\n print(Status)\n if Status.split(' ')[0] == 'extracting' :\n ActFileInISOCount = ActFileInISOCount + 1\n extract_popen.wait()", "def test_get_sh_files(self):\n server = MockUCSHttpServer('server')\n struct = U.UCSRepoPool(major=MAJOR, minor=MINOR, part=PART, patchlevel=PATCH, arch=ARCH)\n preup_path = struct.path('preup.sh')\n server.mock_add(preup_path, 'preup_content')\n postup_path = struct.path('postup.sh')\n server.mock_add(postup_path, 'postup_content')\n repo = ((server, struct),)\n\n gen = U.UniventionUpdater.get_sh_files(repo)\n\n self.assertEqual((server, struct, 'preup', preup_path, 'preup_content'), gen.next())\n self.assertEqual((server, struct, 'postup', postup_path, 'postup_content'), gen.next())\n self.assertRaises(StopIteration, gen.next)", "def test_unnecessary_files(self):\n path = os.path.join(BASE_DIR, \"tests\", \"fixtures\", \"test_unnecessary_files.zip\")\n zip_file = zipfile.ZipFile(path)\n\n with self.assertRaises(UnnecessaryFiles) as context:\n get_shapefile(zip_file)\n the_exception = context.exception\n self.assertEqual(UNNECESSARY_FILE, the_exception.message)", "def download_test_files(request):\n\n # Log the start of the function\n logger.info(\"=========== returns ms1 test files from code directory input/ms1\")\n\n # create an absolute path to the 'example_data_dir' containing the test data files, then create\n # absolute paths to each test data file. Note the test data files are located in this code base.\n example_data_dir = os.path.join(os.path.dirname(os.path.abspath(__file__)),'..','..','input/ms1')\n pos_input = os.path.join(example_data_dir, example_pos_filename)\n neg_input = os.path.join(example_data_dir, example_neg_filename)\n tracer_file = os.path.join(example_data_dir, example_tracer_filename)\n run_sequence_pos_file = os.path.join(example_data_dir, example_run_sequence_pos_filename)\n run_sequence_neg_file = os.path.join(example_data_dir, example_run_sequence_neg_filename)\n\n # create filenames\n filename1 = 'ms1_pos_input_test_data.csv'\n filename2 = 'ms1_neg_input_test_data.csv'\n filename3 = 'ms1_tracer_test_data.csv'\n filename4 = 'ms1_run_sequence_pos_test_data.csv'\n filename5 = 'ms1_run_sequence_neg_test_data.csv'\n\n # List of files to be zipped\n files_to_zip = {filename1: pos_input, filename2: neg_input, filename3: tracer_file, filename4: run_sequence_pos_file, filename5: run_sequence_neg_file}\n\n # Create an in-memory zip file\n in_memory_zip = BytesIO()\n with ZipFile(in_memory_zip, 'w', ZIP_DEFLATED) as zipf:\n # Add each file to the zipfile\n for filename in files_to_zip:\n logger.info('filename: {}'.format(filename))\n file_path = files_to_zip[filename]\n with open(file_path, 'rb') as file:\n file_content = file.read()\n zipf.writestr(filename, file_content)\n # The ZipFile object is automatically closed when exiting the 'with' block\n\n zip_filename = \"ms1_test_data_files.zip\"\n # Create an HTTP response with the zip file attached for download\n response = HttpResponse(in_memory_zip.getvalue(),content_type='application/zip')\n response['Content-Disposition'] = 'attachment; filename=' + zip_filename\n response['Content-length'] = in_memory_zip.tell()\n\n # Return the HTTP response\n return response", "def test_unarchive_run(self):\n pass", "def test_upload_file1(self):\n pass", "def test_get_file_object(self):\n pass", "def test_case_1():\n print(\"*********Test_case_1***********\")\n path = os.path.join(os.path.dirname(__file__), 'testdir')\n result = find_files('.c', path)\n for file in result:\n print(file)", "def testMkvExtract(self):\n self.assertEqual(\n self.mkvExtract,\n self.config.mkvExtract\n )", "def test_pipeline_extract_mock_calls(testbed: SparkETLTests):\n # When - calling the extract method with mocked spark and test config\n pipeline.extract(spark=testbed.mock_spark,\n config=testbed.config,\n logger=testbed.config)\n # Then - introspecting the spark method call\n testbed.mock_spark.read.load.assert_called_once_with(\n path='/user/soyel/pyspark-cicd-template/input/page_views',\n format='csv',\n header=True,\n schema=schema.page_views)\n testbed.mock_spark.read.table.assert_called_once_with(tableName='soyel_db.user_pageviews')\n testbed.mock_spark.reset_mock()", "def test_zip_file_streamer(mock_gen):\n urls = [\n 'http://www.example.com/coda123/manifest-md5.txt',\n 'http://www.example.com/coda123/bagit.txt',\n 'http://www.example.com/coda123/bag-info.txt'\n ]\n meta_id = 'coda123'\n mock_data_1 = [b'Test1', b'manifest', b'data1']\n mock_data_2 = [b'Test2', b'bagit', b'data2']\n mock_data_3 = [b'Test3', b'baginfo', b'data3']\n mock_gen.side_effect = [iter(mock_data_1), iter(mock_data_2), iter(mock_data_3)]\n chunk = list(presentation.zip_file_streamer(urls, meta_id))\n for data in mock_data_1, mock_data_2, mock_data_3:\n for val in data:\n assert val in chunk\n assert mock_gen.call_count == 3", "def test_resource_files():\n expected_files = os.listdir(EXPECTED)\n actual_files = os.listdir(TEMP_DIR)\n assert expected_files == actual_files", "def test_successful_file(self):\n\n url = '/%s/jobs/%i/input_files/' % (self.api, self.job.id)\n response = self.client.generic('GET', url)\n self.assertEqual(response.status_code, status.HTTP_200_OK, response.content)\n\n result = json.loads(response.content)\n results = result['results']\n self.assertEqual(len(results), 2)\n for result in results:\n self.assertTrue(result['id'] in [self.file3.id, self.file4.id])\n self.assertIn('file_name', result)\n self.assertIn('workspace', result)\n self.assertIn('media_type', result)\n self.assertIn('file_type', result)\n self.assertIn('file_size', result)\n self.assertIn('file_path', result)\n self.assertIn('is_deleted', result)\n self.assertIn('url', result)\n self.assertIn('created', result)\n self.assertIn('deleted', result)\n self.assertIn('data_started', result)\n self.assertIn('data_ended', result)\n self.assertIn('source_started', result)\n self.assertIn('source_ended', result)\n self.assertIn('last_modified', result)\n self.assertIn('geometry', result)\n self.assertIn('center_point', result)\n self.assertIn('countries', result)\n self.assertIn('job_type', result)\n self.assertIn('job', result)\n self.assertIn('job_exe', result)\n self.assertIn('job_output', result)\n self.assertIn('recipe_type', result)\n self.assertIn('recipe', result)\n self.assertIn('recipe_node', result)\n self.assertIn('batch', result)\n self.assertFalse(result['is_superseded'])\n self.assertIn('superseded', result)", "def _extract_zip(src, dst):\n # check if src is a valid .zip\n assert zipfile.is_zipfile(src), \"{} is not a valid .zip file.\".format(src)\n\n zip_file = zipfile.ZipFile(src, \"r\")\n for file in zip_file.namelist():\n zip_file.extract(file, dst)", "def test_process_single_file(generate_expected_one_file):\n einfo = generate_expected_one_file\n expected = einfo['expected']\n fname = einfo['file_name']\n results = process_files([fname])\n # compare the pifs\n A = results.properties[0].scalars\n B = expected.properties[0].scalars\n C = results.properties[1].scalars\n D = expected.properties[1].scalars\n assert np.array_equal(A, B), \\\n 'Result and expected pifs differ in stress values'\n assert np.array_equal(C, D), \\\n 'Result and expected pifs differ in strain values'\n assert getattr( results, 'uid', None) is None, \\\n 'Result UID should be None'\n assert getattr(results, 'names', None) is None, \\\n 'Result should not be named'\n assert getattr(results, 'classifications', None) is None, \\\n 'Result should not have any classifications.'\n assert len(results.properties) == \\\n len(expected.properties), \\\n 'The length of the result and expected properties lists do not match.'\n assert getattr(results, \"ids\", None) is None, \\\n 'Result ids should be None'\n assert getattr(results, 'source', None) is None, \\\n 'Result source should be None'\n assert getattr(results, 'quantity', None) is None, \\\n 'Result quantity should be None'\n assert getattr(results, 'preparation', None) is None,\\\n 'Result preparation should be None'\n assert getattr(results, \"subSystems\", None) is None, \\\n 'Results subSystem should be None'\n assert getattr(results, 'references', None) is None,\\\n 'Results references should be None'\n assert getattr(results, 'contacts', None) is None, \\\n 'Results contacts should be None'\n assert getattr(results, 'licenses', None) is None,\\\n 'Results licenses should be None'\n assert getattr(results,'tags', None) is None,\\\n 'Results tags should be None'" ]
[ "0.6961958", "0.67344284", "0.65078896", "0.64383835", "0.6424595", "0.6330281", "0.6321087", "0.63068205", "0.62984717", "0.62395144", "0.62120587", "0.61898607", "0.6165155", "0.61557406", "0.6154367", "0.6150874", "0.6107343", "0.61028737", "0.60906625", "0.60642004", "0.60624075", "0.6042666", "0.6038994", "0.60104775", "0.6010018", "0.6007514", "0.5986555", "0.597753", "0.5974138", "0.59651816", "0.59599847", "0.5955052", "0.5918386", "0.59175515", "0.5915182", "0.5910265", "0.5902864", "0.58878475", "0.58868414", "0.5880869", "0.5865729", "0.58459926", "0.58213896", "0.58077455", "0.5800239", "0.579115", "0.5789806", "0.5785594", "0.57836837", "0.5744244", "0.5738365", "0.5728937", "0.5700049", "0.56959516", "0.5689859", "0.5685275", "0.5674709", "0.56744087", "0.5673745", "0.56692886", "0.5659", "0.56494904", "0.5643458", "0.56396765", "0.5637287", "0.5637053", "0.5628507", "0.56261027", "0.5616272", "0.56144935", "0.5609", "0.5602254", "0.5593181", "0.5588326", "0.55830973", "0.5582688", "0.5582409", "0.5580927", "0.5570028", "0.55698407", "0.5566964", "0.5566546", "0.55568206", "0.55527884", "0.55419034", "0.55387574", "0.5536501", "0.55277306", "0.5527592", "0.5512111", "0.5504431", "0.54963654", "0.5488796", "0.54838806", "0.54798394", "0.54797", "0.54680383", "0.5449012", "0.54353386", "0.5431532" ]
0.64720875
3
Tests the RefreshClientCredentials method.
def testRefreshClientCredentials(self, mock_get_credentials, mock_initialize_client): # Set an expired token. self.turbinia_processor.credentials = mock.MagicMock( expiry = FAKE_CREDENTIALS['expiry'], expired = True) self.turbinia_processor.RefreshClientCredentials() mock_get_credentials.assert_called_once() mock_initialize_client.assert_called_once()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def RefreshClientCredentials(self) -> bool:\n refresh = False\n if self.credentials and self.credentials.expired:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n refresh = True\n return bool(refresh)", "def test_refresh_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n CloudCredentials.objects.refresh_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()", "def test_renews_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n old = self.factory.create(access_token='old_token', expires_at=self.expired_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 2)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()", "def test_authenticate_refresh(app, client, session, models):\n user = models[\"user\"][0]\n # Authenticate to receive a refresh token\n response = client.post(\n \"/authenticate/local\",\n data={\"email\": user.email, \"password\": \"hunter2\"},\n )\n refresh_token = json.loads(response.data)[\"refresh_token\"]\n\n # Check that token values are as expected\n assert len(refresh_token[\"val\"]) == 64\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) > datetime.now()\n assert datetime.fromtimestamp(refresh_token[\"exp\"]) < (\n datetime.now() + app.config[\"REFRESH_TOKEN_VALIDITY\"]\n )\n\n # Check that the returned token is now stored in the database\n assert refresh_token[\"val\"] == user.refresh_tokens[0].token\n\n # Expect refreshing token to succeed\n response = client.post(\n \"/refresh\", data={\"refresh_token\": refresh_token[\"val\"]}\n )\n assert response.status_code == 200\n raw_jwt_token = json.loads(response.data)[\"jwt\"]\n\n # Expect that the new claims are equal to the user claims, except for the\n # expiry which will have refreshed\n refresh_claims = jwt.decode(\n raw_jwt_token, app.config[\"RSA_PUBLIC_KEY\"], app.config[\"ALGORITHM\"],\n )\n del refresh_claims[\"exp\"]\n assert user.claims == refresh_claims\n\n # Expect refreshing an expired token to fail\n token = user.refresh_tokens[0]\n token.expiry = datetime.now() - timedelta(seconds=1)\n response = client.post(\"/refresh\", data={\"refresh_token\": token.token})\n assert response.status_code == 401", "def refresh_credentials():\n global auth_token\n auth_token = get_oauth_token()", "def test_legacy_client_invalid_refresh_token_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def test_legacy_client_invalid_refresh_token(self):\n self.legacy_client._client.access_token = 'invalidaccesstoken'\n self.legacy_client.token['refresh_token'] = 'invalidrefreshtoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def test_legacy_client_expired_access_token(self):\n self.legacy_client._client._expires_at = 1\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def test_renew_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n with HTTMock(spark_cloud_mock):\n CloudCredentials.objects._renew_token(self.cloud)\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()", "def test_patch_o_auth_client(self):\n pass", "def refresh(self):\n self._request_token(grant_type='client_credentials')", "async def test_request_refresh(client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n pass\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=401)\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request('get', conftest.API_URL)\n\n assert 2 == mock_refresh_token_called\n assert resp == resp_text\n assert 6 == len(caplog.records)", "def test_reused_token_get_auth_info(self):\r\n client_ = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n token=TOKEN,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n expected = {'auth_token': TOKEN,\r\n 'auth_tenant_id': None,\r\n 'auth_user_id': None,\r\n 'endpoint_url': self.client.endpoint_url}\r\n self.assertEqual(client_.get_auth_info(), expected)", "def test_patch_o_auth_client_authorization(self):\n pass", "def test_expired_credentials():\n pass", "def _refresh_token(self, client):\n\n url = self._url('token')\n client_data = self.clients[client]\n refresh_token = client_data['token']['refresh_token']\n data = {'grant_type': 'refresh_token',\n 'scope': 'PRODUCTION',\n 'refresh_token': refresh_token}\n consumer_key = client_data['response']['consumerKey']\n consumer_secret = client_data['response']['consumerSecret']\n auth = requests.auth.HTTPBasicAuth(consumer_key, consumer_secret)\n return self.POST(url, data=data, auth=auth)", "def test_mdb_revoking_credential(self):\n this_id = 9898\n data = self.cred_data\n data['credential_id'] = this_id\n cred = vccs_auth.credential.from_dict(data, None)\n self.mdb.add_credential(cred)\n\n # assert no exception\n cred2 = self.mdb.get_credential(this_id)\n\n print(\"Revoking credential :\\n{}\".format(pformat(cred2)))\n\n cred2.revoke({'reason': 'unit testing'})\n self.mdb.update_credential(cred2)\n\n # assert exception when fetching revoked credential\n with self.assertRaises(vccs_auth.credential.VCCSAuthCredentialError):\n self.mdb.get_credential(this_id)\n\n # assert exception when trying to activate credential again\n with self.assertRaises(ValueError):\n cred2.status('active')", "def test_mail_client_invalid_refresh_token_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))", "def test_mail_client_invalid_refresh_token(self):\n self.mail_client._client.access_token = 'invalidaccesstoken'\n self.mail_client.token['refresh_token'] = 'invalidrefreshtoken'\n with self.assertRaises(InvalidGrantError):\n self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))", "def test_replace_o_auth_client(self):\n pass", "def test_replace_o_auth_client_authorization(self):\n pass", "def test_authtoken_refresh(self):\n hagrid = models.User(username='hagrid', fullname='Rubeus Hagrid')\n auth_token = models.AuthToken(user=hagrid, algorithm='hmac-sha-1')\n existing_token = auth_token.token\n existing_secret = auth_token.secret\n auth_token.refresh()\n self.assertNotEqual(existing_token, auth_token.token)\n self.assertNotEqual(existing_secret, auth_token.secret)", "def test_mail_client_expired_access_token(self):\n self.mail_client._client._expires_at = 1\n response = self.mail_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def test_update_client(self):\n pass", "def test_update_user_profile(self):\n\n new_credentials = {'name': 'New Name', 'password': 'NewTestpass12'}\n response = self.client.patch(URL_ME, new_credentials)\n\n # Refresh the details of the user from the database.\n self.user.refresh_from_db()\n\n # Check that the update is successful.\n self.assertEqual(self.user.name, new_credentials['name'])\n self.assertTrue(self.user.check_password(new_credentials['password']))\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def test_good_token(self):\n self.assertEqual(CloudCredentials.objects.count(), 0)\n cred = self.factory.create(access_token='good_token', expires_at=self.current_dt)\n with HTTMock(spark_cloud_mock):\n refresh_access_token()\n self.assertEqual(CloudCredentials.objects.count(), 1)\n self.assertEqual(CloudCredentials.objects._access_token(), 'good_token')\n cred.delete()", "def testWarnsOnReturningErrorFromAuthenticateClient(self):\n\n class ErrorTestClientStorage(TestClientStorage):\n \"\"\" A ClientStorage to test returning errors from authenticateClient. \"\"\"\n\n def __init__(self, errorToReturn):\n super(ErrorTestClientStorage, self).__init__()\n self.error = errorToReturn\n\n def authenticateClient(self, client, request, secret=None):\n return self.error\n\n validRequest = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': self._VALID_CLIENT.secret,\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n tokenResource = TokenResource(\n self._TOKEN_FACTORY, self._PERSISTENT_STORAGE,\n self._REFRESH_TOKEN_STORAGE, self._AUTH_TOKEN_STORAGE,\n ErrorTestClientStorage(errorToReturn=MalformedParameterError('client_secret')),\n passwordManager=self._PASSWORD_MANAGER)\n with warnings.catch_warnings(record=True) as caughtWarnings:\n warnings.simplefilter('always')\n result = tokenResource.render_POST(validRequest)\n self.assertEqual(\n 1, len(caughtWarnings),\n msg='Expected the token resource to generate a warning, if '\n 'authenticateClient returns an OAuth2Error instead of raising it')\n self.assertTrue(issubclass(caughtWarnings[0].category, DeprecationWarning),\n msg='Expected the token resource to generate a DeprecationWarning')\n self.assertIn(\n 'Returning an error from authenticateClient is deprecated',\n str(caughtWarnings[0].message),\n msg='Expected the token resource to generate a DeprecationWarning explaining that '\n 'returning an error from authenticateClient is deprecated.')\n self.assertFailedTokenRequest(\n validRequest, result, MalformedParameterError('client_secret'),\n msg='Expected the token resource to reject the request '\n 'if authenticateClient returns an error.')", "def test_revoke_refresh_token(client, tokens):\n response = client.delete(\n \"/auth/refresh-token/\",\n headers={\"Authorization\": \"Bearer {}\".format(tokens[\"refresh\"])},\n )\n\n payload = response.get_json()\n assert response.status_code == HTTPStatus.OK\n assert payload[\"msg\"] == \"Refresh token successfully revoked\"", "def refresh(self):\n self._request_token(grant_type='password', username=self._username,\n password=self._password)", "def test_reset_passwd(self, test_client, user_test1):\n response = test_client.post('/api/auth/reset', json=dict(\n reset_password_token=create_access_token(identity=user_test1),\n password=\"Azerty!123\"\n ))\n res = json.loads(response.data)\n\n assert response.status_code == 200\n assert res['status'] == True", "def test_read_o_auth_client(self):\n pass", "def test_read_o_auth_client_authorization(self):\n pass", "def re_authenticate(self):\n url = URLS['token']\n data = {\n \"grant_type\": \"refresh_token\",\n \"refresh_token\": self.refresh_token,\n \"client_id\": self.client_id,\n \"client_secret\": self.client_secret\n }\n r = requests.post(url, data=data)\n r.raise_for_status()\n j = r.json()\n self.access_token = j['access_token']\n self.refresh_token = j['refresh_token']\n self._set_token_expiration_time(expires_in=j['expires_in'])\n return r", "async def test_expired_token_requires_reauth(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n status=http.HTTPStatus.BAD_REQUEST,\n )\n\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_ERROR\n\n flows = hass.config_entries.flow.async_progress()\n assert len(flows) == 1\n assert flows[0][\"step_id\"] == \"reauth_confirm\"", "def testAuthorizationForPublicClient(self):\n client = PublicClient('publicClient', ['https://return.nonexistent'], ['refresh_token'])\n refreshToken = 'publicClientRefreshToken'\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': client.id,\n 'refresh_token': refreshToken\n })\n self._REFRESH_TOKEN_STORAGE.store(refreshToken, client, self._VALID_SCOPE)\n newAuthToken = 'tokenForPublicClient'\n self._CLIENT_STORAGE.addClient(client)\n self._TOKEN_FACTORY.expectTokenRequest(newAuthToken, self._TOKEN_RESOURCE.authTokenLifeTime,\n client, self._VALID_SCOPE)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertValidTokenResponse(\n request, result, newAuthToken,\n self._TOKEN_RESOURCE.authTokenLifeTime, expectedScope=self._VALID_SCOPE)", "def test_client_update(self):\n pass", "def testUpdateCredentials(self):\r\n \r\n credentials = dict()\r\n credentials[\"username\"] = \"\"\r\n credentials[\"password\"] = \"\"\r\n self._factory.updateCredentials(credentials)", "async def test_request_with_zero_token_refresh_attempts(\n client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n pass\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request(\n 'get', conftest.API_URL, token_refresh_attempts=0)\n\n assert 0 == mock_refresh_token_called\n assert resp == resp_text\n assert 2 == len(caplog.records)", "def refresh_token(self,refresh_token=None,client_id=None, client_secret=None):\n\t\tif not refresh_token and not client_id:\n\t\t refresh_token = self.credentials.refresh_token\n\t\t client_id = self.credentials.client_id\n\t\t client_secret = self.credentials.client_secret\n\n\t\turl = 'https://accounts.google.com/o/oauth2/token'\n\t\tvalues = {\"refresh_token\":refresh_token, \"client_id\":client_id, \"client_secret\":client_secret, \"grant_type\":\"refresh_token\"}\n\t\tprint 'refresh_token POST values: ' + str(values)\n\t\t# encode data\n\t\tdata = urllib.urlencode(values)\n\t\tprint 'changed'\n\t\tprint 'data:' + str(data)\n\t\timport traceback\n\t\timport sys\n\t\t# post request for refresh token\n\t\ttry:\n\t\t\treq = urllib2.Request(url, data)\n\t\t\tprint req.get_full_url()\n\t\t\tresponse = urllib2.urlopen(req)\n\t\t\tprint 'response: ' + str(response)\n\t\t\tresponse_json = json.loads(response.read())\n\t\t\tprint 'google refresh token response json: ' + str(response_json)\n\n\t\texcept Exception, err:\n\t\t\tprint traceback.format_exc()\n\t\tnew_access_token = response_json[\"access_token\"]\n\t\tself.credentials.access_token = new_access_token\n\t\tnew_expiration_date = datetime.now() + timedelta(hours=1)\n\t\tself.credentials.token_expiry = new_expiration_date\n\t\tdb.session.add(self.credentials)\n\t\tdb.session.commit()\n\t\tprint 'done getting values from fresh_token'", "def testAuthorizationClientAuthInParams(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': self._VALID_CLIENT.secret\n })\n newAuthToken = 'tokenWithAuthInParameter'\n self._TOKEN_FACTORY.expectTokenRequest(newAuthToken, self._TOKEN_RESOURCE.authTokenLifeTime,\n self._VALID_CLIENT, self._VALID_SCOPE)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertValidTokenResponse(\n request, result, newAuthToken,\n self._TOKEN_RESOURCE.authTokenLifeTime, expectedScope=self._VALID_SCOPE)", "def mock_client():\n client = PolicyComputeEngine(url=\"https://127.0.0.1:8443\", port=8443, org_id=1)\n client.set_credentials(\"dummy\", \"dummy-1\")\n\n return client", "def test_attempts_to_refresh_token_when_appropriate(self, mock):\n\n badgr = self.get_badgr_setup()\n with vcr.use_cassette('tests/vcr_cassettes/try_refresh_token.yaml'):\n with self.assertRaises(exceptions.TokenAndRefreshExpiredError):\n badgr.get_from_server(self._sample_url)\n self.assertTrue(mock.called)", "def setUp(self):\n self.new_credentials = Credentials(\"gmail\", \"Zephon Makale\", \"1234xyz\")", "def test_client_verification_retrieve(self):\n pass", "def refreshAuthentication(self, authenticationToken):\r\n pass", "def test_evicts_invalid_refresh_token():\n\n tenant_id = \"tenant-id\"\n client_id = \"client-id\"\n invalid_token = \"invalid-refresh-token\"\n\n cache = TokenCache()\n cache.add({\"response\": build_aad_response(uid=\"id1\", utid=\"tid1\", access_token=\"*\", refresh_token=invalid_token)})\n cache.add({\"response\": build_aad_response(uid=\"id2\", utid=\"tid2\", access_token=\"*\", refresh_token=\"...\")})\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 2\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 1\n\n def send(request, **_):\n assert request.data[\"refresh_token\"] == invalid_token\n return mock_response(json_payload={\"error\": \"invalid_grant\"}, status_code=400)\n\n transport = Mock(send=Mock(wraps=send))\n\n client = AadClient(tenant_id, client_id, transport=transport, cache=cache)\n with pytest.raises(ClientAuthenticationError):\n client.obtain_token_by_refresh_token(scopes=(\"scope\",), refresh_token=invalid_token)\n\n assert transport.send.call_count == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN)) == 1\n assert len(cache.find(TokenCache.CredentialType.REFRESH_TOKEN, query={\"secret\": invalid_token})) == 0", "def test_access_token_setting(self):\n client = Client()\n assert not client.is_access_token_set()\n client.set_client_access_token(\"FAKE-TOKEN\")\n assert client.is_access_token_set()", "def testAuthorizationWithoutClientAuth(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, NoClientAuthenticationError(),\n msg='Expected the token resource to reject a request without any authentication.')\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n 'client_id': self._VALID_CLIENT.id,\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, NoClientAuthenticationError(),\n msg='Expected the token resource to reject a request without client authentication.')", "def test_valid_refresh_token(self):\n\n # Generate a valid access code\n with base.HybridSessionManager():\n authorization_code = auth_api.authorization_code_save({\n 'user_id': 2,\n 'state': 'test_state',\n 'code': 'test_valid_code'\n })\n\n content_type = 'application/x-www-form-urlencoded'\n # Generate an auth and a refresh token.\n resp_1 = self.app.post('/v1/openid/token',\n params={\n 'code': authorization_code.code,\n 'grant_type': 'authorization_code'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that this is a successful response\n self.assertEqual(200, resp_1.status_code)\n\n # Assert that the token came back in the response\n t1 = resp_1.json\n\n # Assert that both are in the database.\n with base.HybridSessionManager():\n access_token = \\\n token_api.access_token_get_by_token(t1['access_token'])\n self.assertIsNotNone(access_token)\n\n with base.HybridSessionManager():\n refresh_token = refresh_tokens.refresh_token_get_by_token(\n t1['refresh_token'])\n\n self.assertIsNotNone(refresh_token)\n\n content_type = 'application/x-www-form-urlencoded'\n # Issue a refresh token request.\n resp_2 = self.app.post('/v1/openid/token',\n params={\n 'refresh_token': t1['refresh_token'],\n 'grant_type': 'refresh_token'\n },\n content_type=content_type,\n expect_errors=True)\n\n # Assert that the response is good.\n self.assertEqual(200, resp_2.status_code)\n\n # Assert that the token came back in the response\n t2 = resp_2.json\n self.assertIsNotNone(t2['access_token'])\n self.assertIsNotNone(t2['expires_in'])\n self.assertIsNotNone(t2['id_token'])\n self.assertIsNotNone(t2['refresh_token'])\n self.assertIsNotNone(t2['token_type'])\n self.assertEqual('Bearer', t2['token_type'])\n\n # Assert that the access token is in the database\n with base.HybridSessionManager():\n new_access_token = \\\n token_api.access_token_get_by_token(t2['access_token'])\n self.assertIsNotNone(new_access_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, new_access_token.user_id)\n self.assertEqual(t2['id_token'], new_access_token.user_id)\n self.assertEqual(t2['expires_in'], CONF.oauth.access_token_ttl)\n self.assertEqual(t2['expires_in'], new_access_token.expires_in)\n self.assertEqual(t2['access_token'],\n new_access_token.access_token)\n\n # Assert that the refresh token is in the database\n\n with base.HybridSessionManager():\n new_refresh_token = refresh_tokens.refresh_token_get_by_token(\n t2['refresh_token'])\n\n self.assertIsNotNone(new_refresh_token)\n\n # Assert that system configured values is owned by the correct user.\n self.assertEqual(2, new_refresh_token.user_id)\n self.assertEqual(CONF.oauth.refresh_token_ttl,\n new_refresh_token.expires_in)\n self.assertEqual(t2['refresh_token'],\n new_refresh_token.refresh_token)\n\n # Assert that the old access tokens are no longer in the database and\n # have been cleaned up.\n\n with base.HybridSessionManager():\n no_access_token = \\\n token_api.access_token_get_by_token(t1['access_token'])\n with base.HybridSessionManager():\n no_refresh_token = \\\n refresh_tokens.refresh_token_get_by_token(t1['refresh_token'])\n\n self.assertIsNone(no_refresh_token)\n self.assertIsNone(no_access_token)", "def testRefresh(self):\n \n pass", "def test_delete_o_auth_client_authorization(self):\n pass", "def RefreshToken():\n params = {}\n params['client_id'] = Constants.USER['CLIENT_ID']\n params['client_secret'] = Constants.USER['CLIENT_SECRET']\n params['refresh_token'] = Constants.AUTH['REFRESH']\n params['grant_type'] = 'refresh_token'\n\n data = urllib.urlencode(params)\n\n headers = {\n 'User-Agent': 'LogoCert Client',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Accept': 'text/html, */*',\n }\n\n request_url = Constants.OAUTH_TOKEN\n\n request = urllib2.Request(request_url, data, headers)\n res = urllib2.urlopen(request)\n response = res.read()\n return json.loads(response)", "async def test_request_with_valid_token(\n client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n return True\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request(\n 'get', conftest.API_URL, token_refresh_attempts=1)\n\n assert 0 == mock_refresh_token_called\n assert resp == resp_text\n assert 2 == len(caplog.records)", "def test_access_token(self):\n exp = self.factory.create(access_token='expired', expires_at=self.expired_dt)\n cur = self.factory.create(access_token=ACCESS_TOKEN, expires_at=self.current_dt)\n old = self.factory.create(access_token='old', expires_at=self.old_dt)\n with HTTMock(spark_cloud_mock):\n token = CloudCredentials.objects._access_token()\n self.assertEqual(token, ACCESS_TOKEN)\n CloudCredentials.objects.all().delete()", "def _refresh_access_token(self):\n # force https so that we don't send around tokens unsecurely\n url = 'https://{}/api/token/refresh'.format(urlparse(self.base_url).netloc)\n \n # paranoid: check again that we only send the token to https\n if urlparse(url).scheme != \"https\":\n msg = 'This should not happen, please file a bug report.'\n raise Exception(msg)\n\n if not self.jwt_refresh_token:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"required.\", )\n\n # convert to json\n data = json.dumps({\"refresh\": self.jwt_refresh_token})\n # encode\n data = bytes(data, \"utf-8\")\n headers = {\"Content-Type\": \"application/json\"}\n html = urllib_request.Request(url, data=data, headers=headers)\n # decode('utf-8')\n try:\n result = urllib_request.urlopen(html).read().decode(\"utf-8\")\n dic = json.loads(result)\n self.jwt_access_token = dic['access']\n\n if self.debug:\n print('Got temporary access/refresh: {}/{}'.format(self.jwt_access_token, self.jwt_refresh_token))\n \n return\n except:\n raise FDSNUnauthorizedException(\"Unauthorized, authentication \"\n \"expired. Please set your credentials again.\", )", "def test_client_retrieve(self):\n pass", "def test_get_client(self):\n pass", "async def test_request(client, monkeypatch, caplog):\n mock_refresh_token_called = 0\n\n async def mock_refresh_token():\n nonlocal mock_refresh_token_called\n mock_refresh_token_called += 1\n\n monkeypatch.setattr(\n client._auth_client, 'refresh_token', mock_refresh_token)\n\n async def mock_valid_token_set():\n return False\n\n monkeypatch.setattr(client, 'valid_token_set', mock_valid_token_set)\n\n resp_text = 'ohai'\n\n with aioresponses() as mocked:\n mocked.get(conftest.API_URL, status=200, body=resp_text)\n resp = await client.request('get', conftest.API_URL)\n\n assert resp == resp_text\n\n assert 1 == mock_refresh_token_called\n assert 1 == len(mocked.requests)\n request = mocked.requests.popitem()[1][0]\n authorization_header = request.kwargs['headers']['Authorization']\n assert authorization_header == f'Bearer {client._auth_client.token}'\n assert 2 == len(caplog.records)", "def test_correct_credentials(self):\n with self.subTest(\"Valid credentials\"):\n valid_credentials = self._encode_basic_credentials(\n self.web_user.username, \"my_password\"\n )\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"Basic {valid_credentials}\"\n )\n self.assertEqual(response.status_code, 401)\n\n with self.subTest(\"Invalid credentials\"):\n invalid_credentials = self._encode_basic_credentials(\n self.web_user.username, \"not_the_correct_password\"\n )\n response = self.client.get(\n self.url, HTTP_AUTHORIZATION=f\"Basic {invalid_credentials}\"\n )\n self.assertEqual(response.status_code, 401)", "def refresh_client(self):\n params = dict(self.params)\n # This is a request payload we mock to fetch the data\n mock_payload = json.dumps(\n {\n \"clientContext\": {\n \"appVersion\": \"1.0\",\n \"contextApp\": \"com.icloud.web.fmf\",\n \"mapkitAvailable\": True,\n \"productType\": \"fmfWeb\",\n \"tileServer\": \"Apple\",\n \"userInactivityTimeInMS\": 537,\n \"windowInFocus\": False,\n \"windowVisible\": True,\n },\n \"dataContext\": None,\n \"serverContext\": None,\n }\n )\n req = self.session.post(self._friend_endpoint, data=mock_payload, params=params)\n self.response = req.json()", "def refresh_token(self, iam_client_id, iam_client_secret, refresh_token):\n\n data = HTTPHeaderDict()\n data.add('client_id', iam_client_id)\n data.add('client_secret', iam_client_secret)\n data.add('grant_type', 'refresh_token')\n data.add('refresh_token', refresh_token)\n \n self.log.info(\"refresh_token. data: %s\" % data)\n\n response = requests.post(self.token_endpoint, data=data, verify=True)\n\n try:\n response.raise_for_status()\n except requests.exceptions.HTTPError as e:\n # Whoops it wasn't a 200\n self.log.error(\"refresh_token() Error: %s \" %str(e))\n self.log.error(\"http error:\" + response.status_code)\n return response.status_code\n\n result = json.loads(response.content)\n return result[\"access_token\"]", "def test_delete_o_auth_client(self):\n pass", "def test_refresh_request_body(self):\n scopes = [\"scope1\", \"scope2\"]\n oauth = Oauth2Authenticator(\n TestOauth2Authenticator.refresh_endpoint,\n TestOauth2Authenticator.client_id,\n TestOauth2Authenticator.client_secret,\n TestOauth2Authenticator.refresh_token,\n scopes,\n )\n body = oauth.get_refresh_request_body()\n expected = {\n \"grant_type\": \"refresh_token\",\n \"client_id\": \"client_id\",\n \"client_secret\": \"client_secret\",\n \"refresh_token\": \"refresh_token\",\n \"scopes\": scopes,\n }\n assert body == expected", "def test_credentials_set_reset(self):\n empty_setting = {\n 'AccessKeyId': None,\n 'SecretAccessKey': None,\n 'SessionToken': None\n }\n nonempty_setting = {\n 'AccessKeyId': '1',\n 'SecretAccessKey': '2',\n 'SessionToken': '3'\n }\n self.assertEqual(_credentials, empty_setting)\n credentials_set(nonempty_setting)\n self.assertEqual(_credentials, nonempty_setting)\n credentials_reset()\n self.assertEqual(_credentials, empty_setting)", "def testAuthorizationWithClientAuthInHeaderAndParameter(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN,\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': self._VALID_CLIENT.secret\n })\n self._addAuthenticationToRequestHeader(request, self._VALID_CLIENT)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, MultipleClientAuthenticationError(),\n msg='Expected the token resource to reject a request which utilizes '\n 'more than one mechanism for authenticating the client.')", "def _refresh_access_token(self):\n url = self._get_url(subpath=\"auth\", route=\"refresh\")\n refresh_token = get_refresh_token()\n payload = {\"refresh_token\": refresh_token}\n response = self.session.post(url, json=payload)\n response.raise_for_status()\n access_token = response.json()[\"access_token\"]\n set_process_execution_user_token(access_token)\n self.session.headers[\"authorization\"] = f\"Bearer {access_token}\"", "def test_list_o_auth_client_authorization(self):\n pass", "def test_legacy_client(self):\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def refresh(refreshtoken):\n if not refreshtoken:\n raise Exception('refresh() requires refreshtoken parameter')\n\n p = {\n 'client_id': c.client_id,\n 'client_secret': c.client_secret,\n 'grant_type': 'refresh_token',\n 'refresh_token': refreshtoken\n }\n\n return r._post('/token/', p, '/oauth/v2', False)", "def checkCredentialChange(response):\n credentials = getattr(flask.g, '_credentials', None)\n if credentials is not None:\n config = get_user_config()\n json_credentials = credentials.to_json()\n if config.credentials != json_credentials:\n config.credentials = json_credentials\n config.save()\n\n return response", "def _refresh_token(self):\n token_url = self._base_url + '/api/oauth2/token'\n params = {\n 'grant_type': 'client_credentials',\n 'client_id': self._client_id,\n 'client_secret': self._client_secret\n }\n headers = {'accept': 'application/json'}\n response = requests.post(token_url,proxies = self._proxy,params= params,headers = headers)\n logging.debug(response.text)\n parsed = response.json()\n self._access_token = parsed['access_token']\n self._refresh_token = parsed['refresh_token']\n expires_in = parsed['expires_in']\n ## Keep a buffer of 120 seconds to refresh token before expiry\n self._expires_at = datetime.now() + timedelta(seconds=(expires_in - 120))\n\n logging.debug('access_token %s expires at %s', self._access_token, self._expires_at)\n\n return", "def test_get_auth_header_fresh(self, mocker):\n oauth = Oauth2Authenticator(\n TestOauth2Authenticator.refresh_endpoint,\n TestOauth2Authenticator.client_id,\n TestOauth2Authenticator.client_secret,\n TestOauth2Authenticator.refresh_token,\n )\n\n mocker.patch.object(Oauth2Authenticator, \"refresh_access_token\", return_value=(\"access_token\", 1000))\n header = oauth.get_auth_header()\n assert {\"Authorization\": \"Bearer access_token\"} == header", "async def _refresh_token(self):\n async with self.web_session.post(url=self._login_url, json=self._refresh_payload) as resp:\n if self.check_status(resp.status, self._login_url):\n data = await resp.json()\n token = data.get(\"access_token\")\n if token:\n self._set_token(token)\n self.expired_token = False\n return\n await self._try_login()\n await self._wait_for_login()", "def test_list_o_auth_client(self):\n pass", "def testInsecureConnection(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n }, authentication=self._VALID_CLIENT, isSecure=False)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InsecureConnectionError(),\n msg='Expected the token resource to reject a request made via an insecure transport')\n debugTokenResource = TokenResource(\n self._TOKEN_FACTORY, self._PERSISTENT_STORAGE, self._REFRESH_TOKEN_STORAGE,\n self._AUTH_TOKEN_STORAGE, self._CLIENT_STORAGE, allowInsecureRequestDebug=True,\n passwordManager=self._PASSWORD_MANAGER)\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n }, authentication=self._VALID_CLIENT, isSecure=False)\n newAuthToken = 'tokenViaInsecureConnection'\n self._TOKEN_FACTORY.expectTokenRequest(\n newAuthToken, debugTokenResource.authTokenLifeTime,\n self._VALID_CLIENT, self._VALID_SCOPE)\n result = debugTokenResource.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertValidTokenResponse(\n request, result, newAuthToken, expectedExpireTime=debugTokenResource.authTokenLifeTime,\n expectedScope=self._VALID_SCOPE)", "def test_legacy_client_invalid_access_token(self):\n self.legacy_client._client.access_token = 'invalidaccesstoken'\n response = self.legacy_client.request('GET', urljoin(BASE_URL, MOCK_ENDPOINT))\n self.assertEquals(response.content, RESPONSE)", "def refresh_token(self) -> None:\n with self._lock:\n if not self._endpoint:\n raise AuthenticationTokenError(\n 'Token is invalid and endpoint (auth_endpoint) for refresh is not set.')\n\n if self._token_info.fresh():\n return\n\n if not self._token_info.refresh_token:\n self.get_token()\n return\n\n url = self._endpoint + '/refresh'\n data = {\n \"client_id\": self._client_id,\n \"client_secret\": self._client_secret,\n \"refresh_token\": self._token_info.refresh_token\n }\n\n try:\n res = self.post(url, data)\n self._token_info.parse_token_result(res, 'Refresh token')\n except TokenExpiredError:\n self.get_token()", "def testAuthorizationWrongClientSecret(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'client_id': self._VALID_CLIENT.id,\n 'client_secret': 'invalidSecret',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidClientAuthenticationError(),\n msg='Expected the token resource to reject a request with an invalid client secret.')", "def tearDown(self):\n Credentials.credentials_list = []", "def tearDown(self):\n Credentials.credentials_list = []", "def should_refresh_client_fnc(response):\n return not response", "def test_refreshes_token_when_expired(self):\n\n badgr = self.get_badgr_setup()\n\n # _token_data isn't meant to be exposed; pylint: disable=W0212\n original_token = badgr._token_data['access_token']\n with vcr.use_cassette('tests/vcr_cassettes/expired_auth_token.yaml'):\n badgr.get_from_server(self._sample_url)\n self.assertNotEqual(original_token,\n badgr._token_data['access_token'])", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def renew_and_load_credentials(self):\n self.x509_proxies_data=[]\n if self.descript_obj.x509_proxies_plugin is not None:\n self.x509_proxies_data=self.descript_obj.x509_proxies_plugin.get_credentials()\n nr_credentials=len(self.x509_proxies_data)\n else:\n nr_credentials=0\n\n nr_good_credentials=nr_credentials\n for i in range(nr_credentials):\n cred_el=self.x509_proxies_data[i]\n cred_el.advertize=True\n cred_el.renew()\n cred_el.createIfNotExist()\n\n cred_el.loaded_data=[]\n for cred_file in (cred_el.filename, cred_el.key_fname, cred_el.pilot_fname):\n if cred_file:\n cred_data = cred_el.getString(cred_file)\n if cred_data:\n cred_el.loaded_data.append((cred_file, cred_data))\n else:\n # We encountered error with this credential\n # Move onto next credential\n break\n\n return nr_credentials", "def test_update(self):\n user = self.custodian_1_user\n user_client = self.custodian_1_client\n urls = [reverse('api:user-detail', kwargs={'pk': user.pk})]\n new_first_name = \"New First Name\"\n data = {\n \"first_name\": new_first_name,\n }\n access = {\n \"forbidden\": [self.anonymous_client, self.readonly_client, self.custodian_2_client],\n \"allowed\": [self.admin_client, user_client]\n }\n\n for client in access['forbidden']:\n for url in urls:\n self.assertIn(\n client.patch(url, data, format='json').status_code,\n [status.HTTP_401_UNAUTHORIZED, status.HTTP_403_FORBIDDEN]\n )\n\n for client in access['allowed']:\n for url in urls:\n new_first_name += '1'\n data['first_name'] = new_first_name\n self.assertEqual(\n client.patch(url, data, format='json').status_code,\n status.HTTP_200_OK\n )\n user.refresh_from_db()\n self.assertEqual(user.first_name, new_first_name)", "async def test_expired_token_refresh_internal_error(\n hass: HomeAssistant,\n component_setup: ComponentSetup,\n aioclient_mock: AiohttpClientMocker,\n) -> None:\n\n aioclient_mock.post(\n \"https://oauth2.googleapis.com/token\",\n status=http.HTTPStatus.INTERNAL_SERVER_ERROR,\n )\n\n await component_setup()\n\n entries = hass.config_entries.async_entries(DOMAIN)\n assert len(entries) == 1\n assert entries[0].state is ConfigEntryState.SETUP_RETRY", "def test_user_can_reset_password(self):\n self.client().post('/api/v1/auth/signup', data=self.user_data)\n login_response = self.client().post('/api/v1/auth/login', data=self.user_data)\n #Define header dictionary\n access_token = json.loads(login_response.data.decode())['access_token']\n reset_password = {\n \"user_email\": \"[email protected]\",\n \"old_password\": \"testexample\",\n \"new_password\": \"123456\"\n }\n reset_response = self.client().post('/api/v1/auth/reset-password',\n headers=dict(Authorization='Bearer ' + access_token), data=reset_password)\n #assert that the status code is equal to 200\n self.assertEqual(reset_response.status_code, 200)\n #return result in json format\n result = json.loads(reset_response.data.decode())\n #test that the response contains success message\n self.assertEqual(result[\"message\"],\n \"You have successfully reset your password.\")", "def setUp(self):\r\n super(CLITestAuthKeystone, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def test_account_reset_apikey(self):\r\n\r\n # Create a fake user\r\n test_user = factory.make_user(username='test_user')\r\n # Set and Get the current api key\r\n # make_user doesn't set the api key of user so set it explicitly\r\n current_apikey = test_user.api_key = \"random_key\"\r\n test_user.activation = Activation(u'signup')\r\n transaction.commit()\r\n\r\n # send a request to reset the api key\r\n res = self.testapp.post(\r\n \"/api/v1/test_user/api_key?api_key=\" + current_apikey,\r\n content_type='application/json',\r\n params={u'username': 'test_user',\r\n u'api_key': current_apikey},\r\n status=200)\r\n\r\n # Get the user's api key from db\r\n fetch_api = DBSession.execute(\r\n \"SELECT api_key FROM users WHERE username='test_user'\").fetchone()\r\n new_apikey = fetch_api['api_key']\r\n\r\n # make sure we can decode the body\r\n response = json.loads(res.body)\r\n\r\n # old and new api keys must not be the same\r\n self.assertNotEqual(\r\n current_apikey, new_apikey,\r\n \"Api key must be changed after reset request\")\r\n self.assertTrue(\r\n 'api_key' in response,\r\n \"Should have an api key in there: {0}\".format(response))\r\n\r\n # Api key in response must be the new one\r\n self.assertEqual(\r\n response['api_key'], new_apikey,\r\n \"Should have a api key of user {0}\".format(response))\r\n\r\n self._check_cors_headers(res)", "def _refresh_access_token(self) -> None:\n response = httpx.post(\n f\"{self._base_url}/oauth2/token\",\n proxies=self._proxies,\n data={\n \"grant_type\": \"client_credentials\",\n \"client_id\": self._api_key,\n \"client_secret\": self._api_secret,\n },\n )\n response.raise_for_status()\n token = response.json()[\"access_token\"]\n c = httpx.Client()\n c.close()\n self._authorization_headers = {\"Authorization\": f\"Bearer {token}\"}", "def should_refresh_client(self):\n return self.refresh_always or FindFriendsService.should_refresh_client_fnc(\n self.response\n )", "def test_update_profile_success(self):\n payload = {\n 'email': '[email protected]',\n 'password': 'newpassword'\n }\n res = self.client.patch(ME_URL, payload)\n\n # Refresh the user object with latest values from db\n self.user.refresh_from_db()\n\n self.assertEqual(res.status_code, status.HTTP_200_OK)\n self.assertEqual(self.user.email, payload['email'])\n self.assertTrue(self.user.check_password(payload['password']))", "def test_jwt_refresh_json(self):\n data = {\n 'token': utils.jwt_encode_handler(self.payload)\n }\n\n response = self.client.post(\n '/refresh-token/',\n json.dumps(data),\n content_type='application/json'\n )\n\n response_content = json.loads(smart_text(response.content))\n\n decoded_payload = utils.jwt_decode_handler(response_content['token'])\n\n self.assertEqual(response.status_code, 200)\n self.assertEqual(decoded_payload['username'], self.username)", "def testAuthorizationClientAuthInHeader(self):\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n self._addAuthenticationToRequestHeader(request, self._VALID_CLIENT)\n newAuthToken = 'tokenWithAuthInHeader'\n self._TOKEN_FACTORY.expectTokenRequest(newAuthToken, self._TOKEN_RESOURCE.authTokenLifeTime,\n self._VALID_CLIENT, self._VALID_SCOPE)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self._TOKEN_FACTORY.assertAllTokensRequested()\n self.assertValidTokenResponse(\n request, result, newAuthToken,\n self._TOKEN_RESOURCE.authTokenLifeTime, expectedScope=self._VALID_SCOPE)", "def test_cleans_previous_token_before_fetching_new_one(self):\n new_token = deepcopy(self.token)\n past = time.time() - 7200\n now = time.time()\n self.token[\"expires_at\"] = past\n new_token[\"expires_at\"] = now + 3600\n url = \"https://example.com/token\"\n\n with mock.patch(\"time.time\", lambda: now):\n for client in self.clients:\n sess = OAuth2Session(client=client, token=self.token)\n sess.send = fake_token(new_token)\n if isinstance(client, LegacyApplicationClient):\n # this client requires a username+password\n # if unset, an error will be raised\n self.assertRaises(ValueError, sess.fetch_token, url)\n self.assertRaises(\n ValueError, sess.fetch_token, url, username=\"username1\"\n )\n self.assertRaises(\n ValueError, sess.fetch_token, url, password=\"password1\"\n )\n # otherwise it will pass\n self.assertEqual(\n sess.fetch_token(\n url, username=\"username1\", password=\"password1\"\n ),\n new_token,\n )\n else:\n self.assertEqual(sess.fetch_token(url), new_token)", "async def test_api_call_service_context(\n hass: HomeAssistant, mock_api_client: TestClient, hass_access_token: str\n) -> None:\n calls = async_mock_service(hass, \"test_domain\", \"test_service\")\n\n await mock_api_client.post(\n \"/api/services/test_domain/test_service\",\n headers={\"authorization\": f\"Bearer {hass_access_token}\"},\n )\n await hass.async_block_till_done()\n\n refresh_token = await hass.auth.async_validate_access_token(hass_access_token)\n\n assert len(calls) == 1\n assert calls[0].context.user_id == refresh_token.user.id", "def testAuthorizationWrongClientSecretInHeader(self):\n client = getTestPasswordClient(self._VALID_CLIENT.id)\n client.secret = 'invalidSecret'\n request = self.generateValidTokenRequest(arguments={\n 'grant_type': 'refresh_token',\n 'refresh_token': self._VALID_REFRESH_TOKEN\n })\n self._addAuthenticationToRequestHeader(request, client)\n result = self._TOKEN_RESOURCE.render_POST(request)\n self.assertFailedTokenRequest(\n request, result, InvalidClientAuthenticationError(),\n msg='Expected the token resource to reject a request with an invalid client secret.')", "def config_client_read_auth_fixture(mocker):\n mock_config_client_read_auth = mocker.patch.object(\n AuthConfigurationClient, \"read_auth\")\n mock_config_client_read_auth.return_value = MOCK_CONFIG_CLIENT_READ_AUTH_RETURN_VALUE\n return mock_config_client_read_auth", "def set_credentials():", "def test_delete_creds(self):\n self.new_credentials.save_creds()\n self.new_credentials.delete_creds()\n\n self.assertEqual(len(Credentials.credential_list),0)" ]
[ "0.7480343", "0.6987335", "0.6519567", "0.647827", "0.64674634", "0.6362675", "0.6351274", "0.6320688", "0.6314245", "0.6286668", "0.6279366", "0.6243212", "0.6241278", "0.6184748", "0.6173102", "0.61290675", "0.6063998", "0.6048179", "0.60427195", "0.6041711", "0.6039306", "0.59483266", "0.5948239", "0.59248537", "0.5895134", "0.58802384", "0.5864514", "0.58497673", "0.58264863", "0.5806661", "0.577701", "0.5762825", "0.5743573", "0.57219285", "0.5718886", "0.56954974", "0.56904966", "0.5686357", "0.5684489", "0.5671002", "0.5649541", "0.56430346", "0.56028026", "0.56008935", "0.5596862", "0.5592366", "0.55887973", "0.5583421", "0.5580946", "0.5569636", "0.55479985", "0.5532748", "0.5530373", "0.55112654", "0.5511153", "0.5508042", "0.5498576", "0.54941314", "0.549303", "0.5488868", "0.5466903", "0.5466523", "0.54544115", "0.54488695", "0.5448848", "0.5444645", "0.54431385", "0.54416215", "0.5439677", "0.54333746", "0.5429978", "0.54238003", "0.5421793", "0.54160833", "0.54124355", "0.54094064", "0.5400678", "0.5397344", "0.5388529", "0.5388529", "0.53795785", "0.5367158", "0.5365451", "0.5363543", "0.5359991", "0.53599614", "0.53598404", "0.53592736", "0.53569233", "0.53565437", "0.53366697", "0.5336653", "0.5335749", "0.533482", "0.53294224", "0.5328151", "0.53181016", "0.5314474", "0.5309557", "0.53084916" ]
0.791131
0
Tests the InitializeTurbiniaApiClient method.
def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials): self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000' self.turbinia_processor.turbinia_auth = True mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token']) mock_credentials.id_token = mock.MagicMock() mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token'] self.turbinia_processor.credentials = mock_credentials mock_get_credentials.return_value = mock_credentials result = self.turbinia_processor.InitializeTurbiniaApiClient(None) mock_get_credentials.assert_called_once() self.assertIsInstance(result, turbinia_api_lib.ApiClient)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testInitializeTurbiniaApiClient(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock.MagicMock()\n mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']\n self.turbinia_processor.credentials = mock_credentials\n mock_get_credentials.return_value = mock_credentials\n result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials)\n mock_get_credentials.assert_not_called()\n self.assertIsInstance(result, turbinia_api_lib.ApiClient)", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def setUp(self):\n self.api = api.InvenTreeAPI(\n SERVER,\n username=USERNAME, password=PASSWORD,\n timeout=30,\n )", "def setUp(self):\n super(TestSyncServiceRisk, self).setUp()\n self.api = ExternalApiClient()", "def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()", "def setUp(self):\n self.client = APIClient()", "def setUp(self):\n self.client = APIClient()", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def setUp(self) -> None:\n self.client = APIClient()", "def InitializeTurbiniaApiClient(\n self, credentials: Credentials) -> turbinia_api_lib.ApiClient:\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n if not self.client_config:\n self.ModuleError('Unable to configure Turbinia API server', critical=True)\n # Check if Turbinia requires authentication.\n if self.turbinia_auth:\n if not credentials:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n if self.credentials and self.credentials.id_token:\n self.client_config.access_token = self.credentials.id_token\n else:\n self.ModuleError(\n 'Unable to obtain id_token from identity provider', critical=True)\n return turbinia_api_lib.ApiClient(self.client_config)", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n super().setUp()\n self.client = APIClient()", "def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)", "def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)", "def test_get_client(self):\n pass", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\r\n super(CLITestAuthKeystone, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client", "def test_create_o_auth_client(self):\n pass", "def setUpClass(cls):\n\n cls.client = TestClient(fastapi_app.app)\n log.info('Completed initialization for FastAPI based REST API tests')", "def setUp(self):\n self.client = DummyClient()", "def TurbiniaSetUp(\n self, project: str, turbinia_auth: bool,\n turbinia_recipe: Union[str, None], turbinia_zone: str, turbinia_api: str,\n incident_id: str, sketch_id: int) -> None:\n self.project = project\n self.turbinia_auth = turbinia_auth\n self.turbinia_api = turbinia_api\n self.turbinia_recipe = turbinia_recipe\n self.turbinia_zone = turbinia_zone\n self.incident_id = incident_id\n self.sketch_id = sketch_id\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n self.requests_api_instance = turbinia_requests_api.TurbiniaRequestsApi(\n self.client)\n # We need to get the output path from the Turbinia server.\n api_instance = turbinia_configuration_api.TurbiniaConfigurationApi(\n self.client)\n try:\n api_response = api_instance.read_config()\n self.output_path = api_response.get('OUTPUT_DIR')\n except turbinia_api_lib.ApiException as exception:\n self.ModuleError(exception.body, critical=True)", "def test_create_client(self):\n pass", "def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = '[email protected]'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))", "def test_init(self):\n rachio = Rachio(AUTHTOKEN)\n\n self.assertEqual(rachio.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.person.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.device.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.zone.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.schedulerule.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.flexschedulerule.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.notification.authtoken, AUTHTOKEN)", "def setUp(self):\n self.driver = {\n \"Email\": \"[email protected]\",\n \"Type\": \"driver\",\n \"Password\": \"pass123\",\n \"Confirm Password\": \"pass123\"\n }\n self.ride = {\n \"Destination\": \"Meru\",\n \"Origin\": \"Kutus\",\n \"Time\": \"9:00\",\n \"Date\": \"23/7/2018\",\n \"Ride Name\": \"Toyota\",\n \"Capacity\": \"7\"\n }\n self.request = {\n \"Email\": \"Njobu\",\n \"Tel\": \"+254716272376\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client\n self.app_context = self.app.app_context()\n self.app_context.push()", "def setUp(self):\n super(TestRiskSnapshotting, self).setUp()\n self.api = ExternalApiClient(use_ggrcq_service_account=True)\n self.objgen = ObjectGenerator()", "def setUpClass(cls) -> None:\n\n cls.wml_credentials = get_wml_credentials()\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n\n if not cls.wml_client.ICP:\n cls.cos_credentials = get_cos_credentials()\n cls.cos_endpoint = cls.cos_credentials.get('endpoint_url')\n cls.cos_resource_instance_id = cls.cos_credentials.get('resource_instance_id')\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n cls.project_id = cls.wml_credentials.get('project_id')", "def setUpClass(cls) -> None:\n\n cls.wml_credentials = get_wml_credentials()\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n\n if not cls.wml_client.ICP:\n cls.cos_credentials = get_cos_credentials()\n cls.cos_endpoint = cls.cos_credentials.get('endpoint_url')\n cls.cos_resource_instance_id = cls.cos_credentials.get('resource_instance_id')\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n cls.project_id = cls.wml_credentials.get('project_id')", "def test_for_client():", "def setUp(self):\n super(BaseTest, self).setUp()\n\n api_client_config = disk_manager_exercise_client.Configuration()\n api_client_config.host = self.ENDPOINT\n\n self.client = disk_manager_exercise_client.api.Disk-manager-exerciseApi(\n api_client=disk_manager_exercise_client.ApiClient(\n configuration=api_client_config\n )\n )", "def setUp(self):\r\n super(CLITestAuthNoAuth, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n endpoint_url=ENDPOINT_URL,\r\n auth_strategy=NOAUTH,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def setUp(self):\n self.bot = helpers.MockBot()\n self.bot.api_client.get = unittest.mock.AsyncMock()\n self.cog = information.Information(self.bot)", "def testApi(self):", "def test_init(self, api_module_soap, http_client, auth):\n api_objects = pytan3.api_objects.ApiObjects(\n module_file=api_module_soap[\"module_file\"]\n )\n credentials_auth = pytan3.auth_methods.Credentials(\n http_client=http_client, **auth\n )\n api_client = pytan3.api_clients.Soap(\n http_client=http_client, auth_method=credentials_auth\n )\n adapter = pytan3.adapters.Soap(api_client=api_client, api_objects=api_objects)\n assert adapter.api_client == api_client\n assert adapter.http_client == http_client\n assert adapter.api_objects == api_objects\n assert adapter.auth_method == credentials_auth\n assert adapter.get_name() == \"soap\"\n assert adapter.get_type() == \"soap\"\n assert adapter.result_cls == pytan3.results.Soap\n assert format(http_client) in format(adapter)\n assert format(http_client) in repr(adapter)\n cls = pytan3.adapters.load(adapter)\n assert issubclass(cls, pytan3.adapters.Soap)", "async def test_setup(hass: HomeAssistant, aioclient_mock: AiohttpClientMocker) -> None:\n await setup_integration(hass, aioclient_mock)\n assert hass.states.get(MAIN_ENTITY_ID)\n assert hass.states.get(CLIENT_ENTITY_ID)\n assert hass.states.get(UNAVAILABLE_ENTITY_ID)", "def test_client(test_username, test_api_key):\n return ViClient(username=test_username, api_key=test_api_key,\n url=\"https://vectorai-development-api-vectorai-test-api.azurewebsites.net/\")", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def setUp(self):\r\n super(CLITestNameorID, self).setUp()\r\n self.mox = mox.Mox()\r\n self.endurl = test_cli20.ENDURL\r\n self.client = client.Client(token=test_cli20.TOKEN,\r\n endpoint_url=self.endurl)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def setUpClass(cls):\n super(LBAASv2Test, cls).setUpClass()\n cls.keystone_client = openstack_utils.get_keystone_session_client(\n cls.keystone_session)\n cls.neutron_client = openstack_utils.get_neutron_session_client(\n cls.keystone_session)\n cls.octavia_client = openstack_utils.get_octavia_session_client(\n cls.keystone_session)\n cls.RESOURCE_PREFIX = 'zaza-octavia'\n\n # NOTE(fnordahl): in the event of a test failure we do not want to run\n # tear down code as it will make debugging a problem virtually\n # impossible. To alleviate each test method will set the\n # `run_tearDown` instance variable at the end which will let us run\n # tear down only when there were no failure.\n cls.run_tearDown = False\n # List of load balancers created by this test\n cls.loadbalancers = []\n # List of floating IPs created by this test\n cls.fips = []", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()", "def setUp(self):\n APP.config.from_object(CONFIGS['testing_config'])\n self.api = APP\n self.api_context = self.api.app_context()\n self.api_context.push()\n self.api_test_client = APP.test_client()\n\n # Base url common to all endpoints\n self.BASE_URL = '/api/v1'\n # Sample data for POST requests\n self.ORDER = {\n 'item_name': 'Big Samosa',\n 'item_price': 200,\n 'quantity': 1\n }\n\n self.ORDER_2 = {\n 'item_name': 'Pork Ribs',\n 'item_price': 1080,\n 'quantity': 1\n }", "def setup_method(self) -> None:\n self.client = Mock()", "def setUp(self):\n self.api = \"http://localhost:4031/\"\n self.version = \"0.2\"\n self.app = init_api()", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def setUp(self):\n self.client = APIClient()\n self.user = User.objects.create_user('[email protected]', password='testing')\n self.user.save()\n self.token = Token.objects.get(user=self.user)", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def setUp(self):\n self.logger = mock.MagicMock()\n test_state = state.DFTimewolfState(config.Config)\n self.turbinia_processor = turbinia_base.TurbiniaProcessorBase(\n test_state, self.logger)\n file_path = os.path.join(\n CURRENT_DIR, \"test_data\", \"turbinia_request_status.json\")\n self._request_status = json.load(open(file_path))", "def _initialize_tests(self, api_version=2):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.glance_sentry = self.d.sentry['glance'][0]\n self.swift_proxy_sentry = self.d.sentry['swift-proxy'][0]\n self.swift_storage_sentry = self.d.sentry['swift-storage'][0]\n\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n\n # Authenticate admin with keystone\n self._init_keystone_admin_client(api_version)\n\n # Authenticate admin with glance endpoint\n self.glance = u.authenticate_glance_admin(self.keystone)\n\n keystone_ip = self.keystone_sentry.info['public-address']\n keystone_relation = self.keystone_sentry.relation(\n 'identity-service', 'swift-proxy:identity-service')\n\n # Create a demo tenant/role/user\n self.demo_tenant = 'demoTenant'\n self.demo_role = 'demoRole'\n self.demo_user = 'demoUser'\n self.demo_project = 'demoProject'\n self.demo_domain = 'demoDomain'\n\n if (self._get_openstack_release() >= self.xenial_queens or\n api_version == 3):\n self.create_users_v3()\n self.demo_user_session, _ = u.get_keystone_session(\n keystone_ip,\n self.demo_user,\n 'password',\n api_version=3,\n user_domain_name=self.demo_domain,\n project_domain_name=self.demo_domain,\n project_name=self.demo_project\n )\n self.keystone_demo = keystone_client_v3.Client(\n session=self.demo_user_session)\n self.service_session, _ = u.get_keystone_session(\n keystone_ip,\n keystone_relation['service_username'],\n keystone_relation['service_password'],\n api_version=3,\n user_domain_name=keystone_relation['service_domain'],\n project_domain_name=keystone_relation['service_domain'],\n project_name=keystone_relation['service_tenant']\n )\n else:\n self.create_users_v2()\n # Authenticate demo user with keystone\n self.keystone_demo = \\\n u.authenticate_keystone_user(\n self.keystone, user=self.demo_user,\n password='password',\n tenant=self.demo_tenant)\n self.service_session, _ = u.get_keystone_session(\n keystone_ip,\n keystone_relation['service_username'],\n keystone_relation['service_password'],\n api_version=2,\n project_name=keystone_relation['service_tenant']\n )\n self.swift = swiftclient.Connection(session=self.service_session)", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def test_client_retrieve(self):\n pass", "def setUp(self):\n self.c = Client(host=\"localhost\")", "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.cinder_sentry = self.d.sentry['cinder'][0]\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n self.keystone_ip = self.keystone_sentry.relation(\n 'shared-db',\n 'percona-cluster:shared-db')['private-address']\n self.set_api_version(2)\n # Authenticate keystone admin\n self.keystone_v2 = self.get_keystone_client(api_version=2)\n self.keystone_v3 = self.get_keystone_client(api_version=3)\n self.create_users_v2()", "def setUp(self):\n\n self.client = None\n if conf.options.get_value('runlive') == 'true':\n self.client = gdata.analytics.client.AnalyticsClient()\n self.client.http_client.debug = True\n\n conf.configure_client(\n self.client,\n 'AnalyticsClientTest',\n self.client.auth_service)", "def test_create_virtual_account_client(self):\n pass", "def client():\n from csuibot import app\n app.config['TESTING'] = True\n return app.test_client()", "def setUp(self):\n from logi_circle import LogiCircle\n\n self.logi = LogiCircle(client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=REDIRECT_URI,\n cache_file=CACHE_FILE,\n api_key=API_KEY)\n self.fixtures = FIXTURES\n self.client_id = CLIENT_ID\n self.client_secret = CLIENT_SECRET\n self.redirect_uri = REDIRECT_URI\n self.cache_file = CACHE_FILE\n\n self.loop = asyncio.new_event_loop()", "def setUp(self):\n self.client = APIClient()\n self.user = self.make_user()\n self.detail_url = self.base_url + '{}/'", "def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def setUp(self):\n # Get API keys from file\n keys_file = open(\"keys\", \"r\")\n keys = json.loads(keys_file.read())\n keys_file.close()\n\n # Set standard values for testing\n self.dataset = \"FORMS\"\n self.table = \"Agencies\"\n self.table2 = \"AgencyForms\"\n self.badstr = \"blah\"\n\n # Create authenticated and unauthenticated instances of DOLAPI\n self.unauth = DOLAPI()\n self.badauth = DOLAPI(self.badstr, self.badstr * 2)\n self.auth = DOLAPI(str(keys['key']), str(keys['secret']))", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n BaseTest.__init__(self)\n self.client = APIClient()\n self.user = User.objects.create_user(\n self.user_name, self.user_email, self.password)\n self.user.is_active = True\n self.user.is_email_verified = True\n self.user.save()\n self.login_response = self.client.post(\n \"/api/users/login/\",\n self.login_data,\n format=\"json\")", "def test_init(self):\n with self.assertRaises(ValueError):\n TraxionPay(api_key=self.api_key)", "def setUp(self):\n self.app = init_api()", "def setUp(self):\n self.hass = get_test_home_assistant()\n with requests_mock.Mocker() as mock_req:\n self.setup_api(MOCK_DATA, mock_req)\n self.addCleanup(self.hass.stop)", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'CheapPaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def setUp(self):\n self.app = create_app('testing')\n self.client = self.app.test_client()", "def setup_class(cls):\n cls.client = APP.test_client()", "def test_read_o_auth_client(self):\n pass", "def test_create_api_key(self):\n pass", "def __init__(self, test_obj: 'T0TestBase') -> None:\n\n self.test_obj = test_obj\n self.client = test_obj.client", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'PremiumPaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def test_constructor_all_args(self):\n test_utils.generate_test_config_file()\n expected_auth = (\"hello\", \"world\")\n expected_url = \"http://wat.com/testing.json\"\n client = PowerTrackClient(_dummy_callback, auth=expected_auth, url=expected_url, config_file_path=config_file)\n\n self.assertEqual(expected_auth[0], client.auth[0])\n self.assertEqual(expected_auth[1], client.auth[1])\n self.assertEqual(expected_url, client.url)", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n self.app = app.test_client()\n self.headers = {\n 'Content-Type':'application/json',\n 'Authorization': 'Basic %s' % b64encode(b\"relpek:puorg\").decode(\"ascii\")\n }", "def test_client_create(self):\n pass", "def test_authtoken_init(self):\n auth_client = self.fixtures.auth_client\n crusoe = self.fixtures.crusoe\n result = models.AuthToken(\n auth_client=auth_client, user=crusoe, scope='id', validity=0\n )\n self.assertIsInstance(result, models.AuthToken)\n self.assertEqual(result.user, crusoe)\n self.assertEqual(result.auth_client, auth_client)", "def setUp(self): \n self.client = app.test_client()\n self.acceso = login(self.client)\n identity_loaded.connect(_on_principal_init)", "def test_create_o_auth_client_authorization(self):\n pass", "def test_set_api_key(self):\n\n api_key = 'abc'\n project_id = '123'\n\n kaput.init(api_key, project_id)\n\n self.assertEqual(api_key, kaput._API_KEY)\n self.assertEqual(project_id, kaput._PROJECT_ID)\n self.assertFalse(kaput._DEBUG)\n self.assertEqual(kaput._handle_exception, sys.excepthook)", "def setUp(self):\n\t\tself.conn = Client([\"127.0.0.1:11211\"], debug = 1)", "def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)", "def setUp(self):\n super(TestControlsImport, self).setUp()\n self.client.get(\"/login\")", "def setUp(self):\n self.client = RequestsClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def test_init(self):\n print os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n self.assertEqual(len(self.app_mgr.f2f_api_key), 32)\n self.assertEqual(self.app_mgr._continue, True)" ]
[ "0.763972", "0.71817064", "0.69348216", "0.6902786", "0.6898086", "0.68290806", "0.68290806", "0.6813784", "0.6806354", "0.67816716", "0.6775598", "0.6775598", "0.6775598", "0.6712668", "0.6631375", "0.65852684", "0.65815634", "0.6529811", "0.6472687", "0.6472687", "0.6472687", "0.6472687", "0.6447478", "0.64440966", "0.6405738", "0.63905644", "0.63741815", "0.6279089", "0.6278553", "0.62766874", "0.6269372", "0.625751", "0.62438613", "0.6239983", "0.6239983", "0.62125194", "0.61642706", "0.61497724", "0.6145362", "0.6142839", "0.61354023", "0.6130183", "0.6127149", "0.61191964", "0.61191964", "0.61191964", "0.61191964", "0.61191964", "0.61050457", "0.61040926", "0.6103726", "0.6088002", "0.60848993", "0.60746586", "0.6071686", "0.60712695", "0.60699606", "0.60599715", "0.6042989", "0.6039333", "0.6039333", "0.6039333", "0.6017007", "0.60100424", "0.6006237", "0.60044134", "0.5999542", "0.5983981", "0.59748626", "0.5970896", "0.59615636", "0.5958287", "0.59539515", "0.59539515", "0.5946084", "0.59429896", "0.5941315", "0.59412384", "0.5932894", "0.59301597", "0.59247464", "0.5918724", "0.59183496", "0.5908873", "0.59064007", "0.5898524", "0.5894799", "0.5894799", "0.5894799", "0.5894121", "0.58933204", "0.58928007", "0.5886987", "0.5882218", "0.5878135", "0.58693844", "0.5865535", "0.5863262", "0.5862124", "0.5858915" ]
0.73480415
1
Tests the InitializeTurbiniaApiClient method.
def testInitializeTurbiniaApiClient(self, mock_get_credentials): self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000' self.turbinia_processor.turbinia_auth = True mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token']) mock_credentials.id_token = mock.MagicMock() mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token'] self.turbinia_processor.credentials = mock_credentials mock_get_credentials.return_value = mock_credentials result = self.turbinia_processor.InitializeTurbiniaApiClient(mock_credentials) mock_get_credentials.assert_not_called() self.assertIsInstance(result, turbinia_api_lib.ApiClient)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def testInitializeTurbiniaApiClientNoCreds(self, mock_get_credentials):\n self.turbinia_processor.turbinia_api = 'http://127.0.0.1:8000'\n self.turbinia_processor.turbinia_auth = True\n mock_credentials = mock.MagicMock(spec=Credentials, id_token = FAKE_CREDENTIALS['token'])\n mock_credentials.id_token = mock.MagicMock()\n mock_credentials.id_token.return_value = FAKE_CREDENTIALS['token']\n self.turbinia_processor.credentials = mock_credentials\n mock_get_credentials.return_value = mock_credentials\n result = self.turbinia_processor.InitializeTurbiniaApiClient(None)\n mock_get_credentials.assert_called_once()\n self.assertIsInstance(result, turbinia_api_lib.ApiClient)", "def setUp(self):\n self.client = api.Client(config.get_config(), api.json_handler)", "def setUp(self):\n self.api = api.InvenTreeAPI(\n SERVER,\n username=USERNAME, password=PASSWORD,\n timeout=30,\n )", "def setUp(self):\n super(TestSyncServiceRisk, self).setUp()\n self.api = ExternalApiClient()", "def setUp(self):\n super(TestSyncServiceControl, self).setUp()\n self.api = ExternalApiClient()", "def setUp(self):\n self.client = APIClient()", "def setUp(self):\n self.client = APIClient()", "def setUp(self):\r\n super(CLITestAuthKeystoneWithId, self).setUp()\r\n self.client = client.HTTPClient(user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def setUp(self) -> None:\n self.client = APIClient()", "def InitializeTurbiniaApiClient(\n self, credentials: Credentials) -> turbinia_api_lib.ApiClient:\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n if not self.client_config:\n self.ModuleError('Unable to configure Turbinia API server', critical=True)\n # Check if Turbinia requires authentication.\n if self.turbinia_auth:\n if not credentials:\n self.credentials = self.GetCredentials(\n self.credentials_path, self.client_secrets_path)\n if self.credentials and self.credentials.id_token:\n self.client_config.access_token = self.credentials.id_token\n else:\n self.ModuleError(\n 'Unable to obtain id_token from identity provider', critical=True)\n return turbinia_api_lib.ApiClient(self.client_config)", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n\n self.client = APIClient()", "def setUp(self):\n super().setUp()\n self.client = APIClient()", "def setUp(self):\n global access_token\n global accountID\n global account_cur\n global api\n # self.maxDiff = None\n try:\n accountID, account_cur, access_token = unittestsetup.auth()\n setattr(sys.modules[\"oandapyV20.oandapyV20\"],\n \"TRADING_ENVIRONMENTS\",\n {\"practice\": {\n \"stream\": \"https://test.com\",\n \"api\": \"https://test.com\",\n }})\n api = API(environment=environment,\n access_token=access_token,\n headers={\"Content-Type\": \"application/json\"})\n api.api_url = 'https://test.com'\n except Exception as e:\n print(\"%s\" % e)\n exit(0)", "def setUp(self):\r\n super(CLITestAuthKeystoneWithIdandName, self).setUp()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n user_id=USER_ID,\r\n tenant_id=TENANT_ID,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)", "def testTurbiniaSetup(self, _mock_read_config):\n _mock_read_config.return_value = {\"OUTPUT_DIR\": \"/tmp\"}\n self.turbinia_processor.TurbiniaSetUp(\n project=\"turbinia-project\",\n turbinia_auth=False,\n turbinia_recipe=None,\n turbinia_zone=\"us-central1f\",\n turbinia_api=\"http://localhost:8001\",\n incident_id=\"123456789\",\n sketch_id=\"12345\",\n )\n self.assertEqual(self.turbinia_processor.project, \"turbinia-project\")\n self.assertEqual(self.turbinia_processor.turbinia_zone, \"us-central1f\")\n self.assertEqual(\n self.turbinia_processor.turbinia_api, \"http://localhost:8001\")\n self.assertEqual(self.turbinia_processor.incident_id, \"123456789\")\n self.assertEqual(self.turbinia_processor.sketch_id, \"12345\")\n self.assertEqual(self.turbinia_processor.output_path, \"/tmp\")\n self.assertEqual(self.turbinia_processor.turbinia_recipe, None)", "def test_get_client(self):\n pass", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\n self.client = Client()", "def setUp(self):\r\n super(CLITestAuthKeystone, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n auth_url=AUTH_URL,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def testclient():\n base_url = PARAMS.get(\"url\") + \"/v2\"\n client = Client(\n base_url=base_url,\n headers={\n \"Authorization\": f\"GenieKey {PARAMS.get('token')}\",\n }\n )\n return client", "def test_create_o_auth_client(self):\n pass", "def setUpClass(cls):\n\n cls.client = TestClient(fastapi_app.app)\n log.info('Completed initialization for FastAPI based REST API tests')", "def setUp(self):\n self.client = DummyClient()", "def TurbiniaSetUp(\n self, project: str, turbinia_auth: bool,\n turbinia_recipe: Union[str, None], turbinia_zone: str, turbinia_api: str,\n incident_id: str, sketch_id: int) -> None:\n self.project = project\n self.turbinia_auth = turbinia_auth\n self.turbinia_api = turbinia_api\n self.turbinia_recipe = turbinia_recipe\n self.turbinia_zone = turbinia_zone\n self.incident_id = incident_id\n self.sketch_id = sketch_id\n self.client_config = turbinia_api_lib.Configuration(host=self.turbinia_api)\n self.client = self.InitializeTurbiniaApiClient(self.credentials)\n self.requests_api_instance = turbinia_requests_api.TurbiniaRequestsApi(\n self.client)\n # We need to get the output path from the Turbinia server.\n api_instance = turbinia_configuration_api.TurbiniaConfigurationApi(\n self.client)\n try:\n api_response = api_instance.read_config()\n self.output_path = api_response.get('OUTPUT_DIR')\n except turbinia_api_lib.ApiException as exception:\n self.ModuleError(exception.body, critical=True)", "def test_create_client(self):\n pass", "def setUp(self):\n rand = ''.join(\n [random\n .choice(string.ascii_letters + string.digits) for n in range(16)])\n self.secret_key = 'sk_test_16c58271c29a007970de0353d8a47868df727cd0'\n self.random_ref = util.utf8(rand)\n self.test_email = '[email protected]'\n self.test_amount = 5000\n self.plan = 'Basic'\n self.client = TransactionResource(self.secret_key, self.random_ref)\n # self.client.initialize(util.utf8(self.test_amount),\n # util.utf8(self.test_email),\n # util.utf8(self.plan))", "def test_init(self):\n rachio = Rachio(AUTHTOKEN)\n\n self.assertEqual(rachio.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.person.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.device.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.zone.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.schedulerule.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.flexschedulerule.authtoken, AUTHTOKEN)\n self.assertEqual(rachio.notification.authtoken, AUTHTOKEN)", "def setUp(self):\n self.driver = {\n \"Email\": \"[email protected]\",\n \"Type\": \"driver\",\n \"Password\": \"pass123\",\n \"Confirm Password\": \"pass123\"\n }\n self.ride = {\n \"Destination\": \"Meru\",\n \"Origin\": \"Kutus\",\n \"Time\": \"9:00\",\n \"Date\": \"23/7/2018\",\n \"Ride Name\": \"Toyota\",\n \"Capacity\": \"7\"\n }\n self.request = {\n \"Email\": \"Njobu\",\n \"Tel\": \"+254716272376\"\n }\n self.app = create_app('testing')\n self.client = self.app.test_client\n self.app_context = self.app.app_context()\n self.app_context.push()", "def setUp(self):\n super(TestRiskSnapshotting, self).setUp()\n self.api = ExternalApiClient(use_ggrcq_service_account=True)\n self.objgen = ObjectGenerator()", "def setUpClass(cls) -> None:\n\n cls.wml_credentials = get_wml_credentials()\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n\n if not cls.wml_client.ICP:\n cls.cos_credentials = get_cos_credentials()\n cls.cos_endpoint = cls.cos_credentials.get('endpoint_url')\n cls.cos_resource_instance_id = cls.cos_credentials.get('resource_instance_id')\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n cls.project_id = cls.wml_credentials.get('project_id')", "def setUpClass(cls) -> None:\n\n cls.wml_credentials = get_wml_credentials()\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n\n if not cls.wml_client.ICP:\n cls.cos_credentials = get_cos_credentials()\n cls.cos_endpoint = cls.cos_credentials.get('endpoint_url')\n cls.cos_resource_instance_id = cls.cos_credentials.get('resource_instance_id')\n\n cls.wml_client = APIClient(wml_credentials=cls.wml_credentials)\n cls.project_id = cls.wml_credentials.get('project_id')", "def test_for_client():", "def setUp(self):\n super(BaseTest, self).setUp()\n\n api_client_config = disk_manager_exercise_client.Configuration()\n api_client_config.host = self.ENDPOINT\n\n self.client = disk_manager_exercise_client.api.Disk-manager-exerciseApi(\n api_client=disk_manager_exercise_client.ApiClient(\n configuration=api_client_config\n )\n )", "def setUp(self):\r\n super(CLITestAuthNoAuth, self).setUp()\r\n self.mox = mox.Mox()\r\n self.client = client.HTTPClient(username=USERNAME,\r\n tenant_name=TENANT_NAME,\r\n password=PASSWORD,\r\n endpoint_url=ENDPOINT_URL,\r\n auth_strategy=NOAUTH,\r\n region_name=REGION)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def setUp(self):\n self.bot = helpers.MockBot()\n self.bot.api_client.get = unittest.mock.AsyncMock()\n self.cog = information.Information(self.bot)", "def testApi(self):", "def test_init(self, api_module_soap, http_client, auth):\n api_objects = pytan3.api_objects.ApiObjects(\n module_file=api_module_soap[\"module_file\"]\n )\n credentials_auth = pytan3.auth_methods.Credentials(\n http_client=http_client, **auth\n )\n api_client = pytan3.api_clients.Soap(\n http_client=http_client, auth_method=credentials_auth\n )\n adapter = pytan3.adapters.Soap(api_client=api_client, api_objects=api_objects)\n assert adapter.api_client == api_client\n assert adapter.http_client == http_client\n assert adapter.api_objects == api_objects\n assert adapter.auth_method == credentials_auth\n assert adapter.get_name() == \"soap\"\n assert adapter.get_type() == \"soap\"\n assert adapter.result_cls == pytan3.results.Soap\n assert format(http_client) in format(adapter)\n assert format(http_client) in repr(adapter)\n cls = pytan3.adapters.load(adapter)\n assert issubclass(cls, pytan3.adapters.Soap)", "async def test_setup(hass: HomeAssistant, aioclient_mock: AiohttpClientMocker) -> None:\n await setup_integration(hass, aioclient_mock)\n assert hass.states.get(MAIN_ENTITY_ID)\n assert hass.states.get(CLIENT_ENTITY_ID)\n assert hass.states.get(UNAVAILABLE_ENTITY_ID)", "def test_client(test_username, test_api_key):\n return ViClient(username=test_username, api_key=test_api_key,\n url=\"https://vectorai-development-api-vectorai-test-api.azurewebsites.net/\")", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def __init__(self, client, use_stubs=True):\n super().__init__(client, use_stubs)", "def setUp(self):\r\n super(CLITestNameorID, self).setUp()\r\n self.mox = mox.Mox()\r\n self.endurl = test_cli20.ENDURL\r\n self.client = client.Client(token=test_cli20.TOKEN,\r\n endpoint_url=self.endurl)\r\n self.addCleanup(self.mox.VerifyAll)\r\n self.addCleanup(self.mox.UnsetStubs)", "def setUpClass(cls):\n super(LBAASv2Test, cls).setUpClass()\n cls.keystone_client = openstack_utils.get_keystone_session_client(\n cls.keystone_session)\n cls.neutron_client = openstack_utils.get_neutron_session_client(\n cls.keystone_session)\n cls.octavia_client = openstack_utils.get_octavia_session_client(\n cls.keystone_session)\n cls.RESOURCE_PREFIX = 'zaza-octavia'\n\n # NOTE(fnordahl): in the event of a test failure we do not want to run\n # tear down code as it will make debugging a problem virtually\n # impossible. To alleviate each test method will set the\n # `run_tearDown` instance variable at the end which will let us run\n # tear down only when there were no failure.\n cls.run_tearDown = False\n # List of load balancers created by this test\n cls.loadbalancers = []\n # List of floating IPs created by this test\n cls.fips = []", "def setUp(self):\r\n\r\n app.config['TESTING'] = True\r\n self.client = app.test_client()", "def setUp(self):\n APP.config.from_object(CONFIGS['testing_config'])\n self.api = APP\n self.api_context = self.api.app_context()\n self.api_context.push()\n self.api_test_client = APP.test_client()\n\n # Base url common to all endpoints\n self.BASE_URL = '/api/v1'\n # Sample data for POST requests\n self.ORDER = {\n 'item_name': 'Big Samosa',\n 'item_price': 200,\n 'quantity': 1\n }\n\n self.ORDER_2 = {\n 'item_name': 'Pork Ribs',\n 'item_price': 1080,\n 'quantity': 1\n }", "def setup_method(self) -> None:\n self.client = Mock()", "def setUp(self):\n self.api = \"http://localhost:4031/\"\n self.version = \"0.2\"\n self.app = init_api()", "def testTurbiniaStart(self, mock_create_request):\n mock_create_request.return_value = {\n \"request_id\": \"41483253079448e59685d88f37ab91f7\"\n }\n mock_api_instance = mock.MagicMock()\n mock_api_instance.create_request = mock_create_request\n self.turbinia_processor.requests_api_instance = mock_api_instance\n evidence = {\n \"type\": \"GoogleCloudDisk\",\n \"disk_name\": \"disk-1\",\n \"project\": \"project-1\",\n \"zone\": \"us-central1-f\",\n }\n request_id = self.turbinia_processor.TurbiniaStart(\n evidence=evidence, yara_rules=YARA_RULE)\n self.assertEqual(request_id, \"41483253079448e59685d88f37ab91f7\")", "def setUp(self):\n self.client = APIClient()\n self.user = User.objects.create_user('[email protected]', password='testing')\n self.user.save()\n self.token = Token.objects.get(user=self.user)", "def test_auth_client_instantiated():\n client = ConfigureClients()\n assert client.auth_client", "def setUp(self):\n self.logger = mock.MagicMock()\n test_state = state.DFTimewolfState(config.Config)\n self.turbinia_processor = turbinia_base.TurbiniaProcessorBase(\n test_state, self.logger)\n file_path = os.path.join(\n CURRENT_DIR, \"test_data\", \"turbinia_request_status.json\")\n self._request_status = json.load(open(file_path))", "def _initialize_tests(self, api_version=2):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.glance_sentry = self.d.sentry['glance'][0]\n self.swift_proxy_sentry = self.d.sentry['swift-proxy'][0]\n self.swift_storage_sentry = self.d.sentry['swift-storage'][0]\n\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n\n # Authenticate admin with keystone\n self._init_keystone_admin_client(api_version)\n\n # Authenticate admin with glance endpoint\n self.glance = u.authenticate_glance_admin(self.keystone)\n\n keystone_ip = self.keystone_sentry.info['public-address']\n keystone_relation = self.keystone_sentry.relation(\n 'identity-service', 'swift-proxy:identity-service')\n\n # Create a demo tenant/role/user\n self.demo_tenant = 'demoTenant'\n self.demo_role = 'demoRole'\n self.demo_user = 'demoUser'\n self.demo_project = 'demoProject'\n self.demo_domain = 'demoDomain'\n\n if (self._get_openstack_release() >= self.xenial_queens or\n api_version == 3):\n self.create_users_v3()\n self.demo_user_session, _ = u.get_keystone_session(\n keystone_ip,\n self.demo_user,\n 'password',\n api_version=3,\n user_domain_name=self.demo_domain,\n project_domain_name=self.demo_domain,\n project_name=self.demo_project\n )\n self.keystone_demo = keystone_client_v3.Client(\n session=self.demo_user_session)\n self.service_session, _ = u.get_keystone_session(\n keystone_ip,\n keystone_relation['service_username'],\n keystone_relation['service_password'],\n api_version=3,\n user_domain_name=keystone_relation['service_domain'],\n project_domain_name=keystone_relation['service_domain'],\n project_name=keystone_relation['service_tenant']\n )\n else:\n self.create_users_v2()\n # Authenticate demo user with keystone\n self.keystone_demo = \\\n u.authenticate_keystone_user(\n self.keystone, user=self.demo_user,\n password='password',\n tenant=self.demo_tenant)\n self.service_session, _ = u.get_keystone_session(\n keystone_ip,\n keystone_relation['service_username'],\n keystone_relation['service_password'],\n api_version=2,\n project_name=keystone_relation['service_tenant']\n )\n self.swift = swiftclient.Connection(session=self.service_session)", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def setUpClass(cls):\n\n cls.client = get_client()", "def test_client_retrieve(self):\n pass", "def setUp(self):\n self.c = Client(host=\"localhost\")", "def _initialize_tests(self):\n # Access the sentries for inspecting service units\n self.pxc_sentry = self.d.sentry['percona-cluster'][0]\n self.keystone_sentry = self.d.sentry['keystone'][0]\n self.cinder_sentry = self.d.sentry['cinder'][0]\n u.log.debug('openstack release val: {}'.format(\n self._get_openstack_release()))\n u.log.debug('openstack release str: {}'.format(\n self._get_openstack_release_string()))\n self.keystone_ip = self.keystone_sentry.relation(\n 'shared-db',\n 'percona-cluster:shared-db')['private-address']\n self.set_api_version(2)\n # Authenticate keystone admin\n self.keystone_v2 = self.get_keystone_client(api_version=2)\n self.keystone_v3 = self.get_keystone_client(api_version=3)\n self.create_users_v2()", "def setUp(self):\n\n self.client = None\n if conf.options.get_value('runlive') == 'true':\n self.client = gdata.analytics.client.AnalyticsClient()\n self.client.http_client.debug = True\n\n conf.configure_client(\n self.client,\n 'AnalyticsClientTest',\n self.client.auth_service)", "def test_create_virtual_account_client(self):\n pass", "def client():\n from csuibot import app\n app.config['TESTING'] = True\n return app.test_client()", "def setUp(self):\n from logi_circle import LogiCircle\n\n self.logi = LogiCircle(client_id=CLIENT_ID,\n client_secret=CLIENT_SECRET,\n redirect_uri=REDIRECT_URI,\n cache_file=CACHE_FILE,\n api_key=API_KEY)\n self.fixtures = FIXTURES\n self.client_id = CLIENT_ID\n self.client_secret = CLIENT_SECRET\n self.redirect_uri = REDIRECT_URI\n self.cache_file = CACHE_FILE\n\n self.loop = asyncio.new_event_loop()", "def setUp(self):\n self.client = APIClient()\n self.user = self.make_user()\n self.detail_url = self.base_url + '{}/'", "def test_open_api(self):\n response = self.client.get(self.initiatives_url)\n self.assertEqual(response.status_code, status.HTTP_200_OK)", "def setUp(self):\n # Get API keys from file\n keys_file = open(\"keys\", \"r\")\n keys = json.loads(keys_file.read())\n keys_file.close()\n\n # Set standard values for testing\n self.dataset = \"FORMS\"\n self.table = \"Agencies\"\n self.table2 = \"AgencyForms\"\n self.badstr = \"blah\"\n\n # Create authenticated and unauthenticated instances of DOLAPI\n self.unauth = DOLAPI()\n self.badauth = DOLAPI(self.badstr, self.badstr * 2)\n self.auth = DOLAPI(str(keys['key']), str(keys['secret']))", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n BaseTest.__init__(self)\n self.client = APIClient()\n self.user = User.objects.create_user(\n self.user_name, self.user_email, self.password)\n self.user.is_active = True\n self.user.is_email_verified = True\n self.user.save()\n self.login_response = self.client.post(\n \"/api/users/login/\",\n self.login_data,\n format=\"json\")", "def test_init(self):\n with self.assertRaises(ValueError):\n TraxionPay(api_key=self.api_key)", "def setUp(self):\n self.app = init_api()", "def setUp(self):\n self.hass = get_test_home_assistant()\n with requests_mock.Mocker() as mock_req:\n self.setup_api(MOCK_DATA, mock_req)\n self.addCleanup(self.hass.stop)", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'CheapPaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def setUp(self):\n self.app = create_app('testing')\n self.client = self.app.test_client()", "def setup_class(cls):\n cls.client = APP.test_client()", "def test_read_o_auth_client(self):\n pass", "def test_create_api_key(self):\n pass", "def __init__(self, test_obj: 'T0TestBase') -> None:\n\n self.test_obj = test_obj\n self.client = test_obj.client", "def setUp(self):\n self.app = app.test_client()\n self.api = MockApi()\n self.api.reset()\n for gateway in GATEWAYS:\n if gateway == 'PremiumPaymentGateway':\n item = self.api.put(\n gateway,\n 'available',\n 0,\n )", "def test_constructor_all_args(self):\n test_utils.generate_test_config_file()\n expected_auth = (\"hello\", \"world\")\n expected_url = \"http://wat.com/testing.json\"\n client = PowerTrackClient(_dummy_callback, auth=expected_auth, url=expected_url, config_file_path=config_file)\n\n self.assertEqual(expected_auth[0], client.auth[0])\n self.assertEqual(expected_auth[1], client.auth[1])\n self.assertEqual(expected_url, client.url)", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n\n self.client = app.test_client()\n app.config['TESTING'] = True", "def setUp(self):\n self.app = app.test_client()\n self.headers = {\n 'Content-Type':'application/json',\n 'Authorization': 'Basic %s' % b64encode(b\"relpek:puorg\").decode(\"ascii\")\n }", "def test_client_create(self):\n pass", "def test_authtoken_init(self):\n auth_client = self.fixtures.auth_client\n crusoe = self.fixtures.crusoe\n result = models.AuthToken(\n auth_client=auth_client, user=crusoe, scope='id', validity=0\n )\n self.assertIsInstance(result, models.AuthToken)\n self.assertEqual(result.user, crusoe)\n self.assertEqual(result.auth_client, auth_client)", "def setUp(self): \n self.client = app.test_client()\n self.acceso = login(self.client)\n identity_loaded.connect(_on_principal_init)", "def test_create_o_auth_client_authorization(self):\n pass", "def test_set_api_key(self):\n\n api_key = 'abc'\n project_id = '123'\n\n kaput.init(api_key, project_id)\n\n self.assertEqual(api_key, kaput._API_KEY)\n self.assertEqual(project_id, kaput._PROJECT_ID)\n self.assertFalse(kaput._DEBUG)\n self.assertEqual(kaput._handle_exception, sys.excepthook)", "def setUp(self):\n\t\tself.conn = Client([\"127.0.0.1:11211\"], debug = 1)", "def setUp(self):\n self.clnt = CvpClient()\n nodes = [\"1.1.1.1\"]\n self.clnt.nodes = nodes\n self.clnt.node_cnt = len(nodes)\n self.clnt.node_pool = cycle(nodes)\n self.api = CvpApi(self.clnt)", "def setUp(self):\n super(TestControlsImport, self).setUp()\n self.client.get(\"/login\")", "def setUp(self):\n self.client = RequestsClient()\n self.method = 'GET'\n self.url = 'http://github.com/ojengwa'\n self.headers = {}", "def test_init(self):\n print os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n self.assertEqual(len(self.app_mgr.f2f_api_key), 32)\n self.assertEqual(self.app_mgr._continue, True)" ]
[ "0.73480415", "0.71817064", "0.69348216", "0.6902786", "0.6898086", "0.68290806", "0.68290806", "0.6813784", "0.6806354", "0.67816716", "0.6775598", "0.6775598", "0.6775598", "0.6712668", "0.6631375", "0.65852684", "0.65815634", "0.6529811", "0.6472687", "0.6472687", "0.6472687", "0.6472687", "0.6447478", "0.64440966", "0.6405738", "0.63905644", "0.63741815", "0.6279089", "0.6278553", "0.62766874", "0.6269372", "0.625751", "0.62438613", "0.6239983", "0.6239983", "0.62125194", "0.61642706", "0.61497724", "0.6145362", "0.6142839", "0.61354023", "0.6130183", "0.6127149", "0.61191964", "0.61191964", "0.61191964", "0.61191964", "0.61191964", "0.61050457", "0.61040926", "0.6103726", "0.6088002", "0.60848993", "0.60746586", "0.6071686", "0.60712695", "0.60699606", "0.60599715", "0.6042989", "0.6039333", "0.6039333", "0.6039333", "0.6017007", "0.60100424", "0.6006237", "0.60044134", "0.5999542", "0.5983981", "0.59748626", "0.5970896", "0.59615636", "0.5958287", "0.59539515", "0.59539515", "0.5946084", "0.59429896", "0.5941315", "0.59412384", "0.5932894", "0.59301597", "0.59247464", "0.5918724", "0.59183496", "0.5908873", "0.59064007", "0.5898524", "0.5894799", "0.5894799", "0.5894799", "0.5894121", "0.58933204", "0.58928007", "0.5886987", "0.5882218", "0.5878135", "0.58693844", "0.5865535", "0.5863262", "0.5862124", "0.5858915" ]
0.763972
0
Test whether a section exists.
def has_section(self, section): return self.cfg.has_section(section)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_section_exist(self, section_name: str) -> bool:\n pass", "def has_section(self, section):\n raise NotImplementedError()", "def has_section(self, section):\n if section in self._dict:\n return True\n return False", "def has_section(self, section):\n\n return self.parser.has_section(section)", "def has_section(self, section):\n return section in self._sections", "def is_section_exist(self, section_name: str) -> bool:\n success = False\n try:\n self._vault_api.read_secret(path=section_name.upper(), mount_point=self.mount_point)\n success = True\n except InvalidPath:\n pass\n return success", "def has_section(self, section):\r\n return self.configparser.has_section(section)", "def has_section(self,name):\n return self.__config.has_section(name)", "def is_section_exist(self, section_name: str) -> bool:\n config = ConfigParser(allow_no_value=True)\n config.read(self.connection_string)\n\n result = False\n for section in config.sections():\n if section.lower().replace(' ', '_') == section_name.lower().replace(' ', '_'):\n result = True\n\n return result", "def has_section(self, section: str):\n if section == _DEFAULT:\n return True\n return self._config_parser.has_section(section)", "def config_has_section(section):\n return __CONFIG.has_section(section)", "def exists_ini_section( inifile, section ):\n found_section = False\n\n # read jobfile\n with open(inifile) as f:\n # loop over all lines\n for line in f:\n # until we find the section\n if \"[\"+section+\"]\" in line and line[0]!=\";\" and line[0]!=\"!\" and line[0]!=\"#\":\n found_section = True\n\n\n return found_section", "def has(self, section, setting):\n if section not in self.keys():\n return False\n if setting not in self[section].keys():\n return False\n return True", "def has_option(self, section, option):\n try:\n if option in self._dict[section]:\n return True\n return False\n except KeyError as e:\n raise NoSectionError(str(e)) from None", "def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n pass", "def _is_a_registered_section_title(self, sections, section_title):\n section_title = self._normalize_data(section_title)\n r = False\n for section in sections:\n r = any([True for lang, sectitle in section['titles'] if self._normalize_data(sectitle) == section_title])\n if r:\n break\n return r", "def _is_on_section(self, section_title, subsection_title):\r\n current_section_list = self.q(css='nav>div.chapter.is-open>h3>a').text\r\n current_subsection_list = self.q(css='nav>div.chapter.is-open li.active>a>p').text\r\n\r\n if len(current_section_list) == 0:\r\n self.warning(\"Could not find the current section\")\r\n return False\r\n\r\n elif len(current_subsection_list) == 0:\r\n self.warning(\"Could not find current subsection\")\r\n return False\r\n\r\n else:\r\n return (\r\n current_section_list[0].strip() == section_title and\r\n current_subsection_list[0].strip().split('\\n')[0] == subsection_title\r\n )", "def _check_required_section_found(self, docstring: PetscDocStringImpl) -> None:\n if not self and self.required:\n diag = self.diags.section_header_missing\n mess = f'Required section \\'{self.titles[0]}\\' not found'\n docstring.add_diagnostic_from_source_range(\n Diagnostic.Kind.ERROR, diag, mess, docstring.extent, highlight=False\n )\n return", "def check_section_exists(section_number, report=None):\n if report is None:\n report = MAIN_REPORT\n if not report['source']['success']:\n return False\n found = int((len(report['source']['sections']) - 1) / 2)\n if section_number > found:\n report.attach('Syntax error', category='Syntax', tool='Source',\n group=report['source']['section'],\n mistake=(\"Incorrect number of sections in your file. \"\n \"Expected {count}, but only found {found}\"\n ).format(count=section_number, found=found))", "def _check_section(self, section: Union[str, list], search_in_default_config: bool = None):\r\n section = None if section is None else ConfigDict.TO_KEY_FUNC(section)\r\n search_in_default_config = self._search_in_default_config if search_in_default_config is None \\\r\n else search_in_default_config\r\n if section not in self.sections() and section is not None and search_in_default_config:\r\n # if the section doesn't exist, append the default configuration to the configuration\r\n self.add_default_config_sections(sections=section)\r\n # self.reload_default(write=False, how='append') # old method\r\n logger.debug(\"Section(s) '{}' of default configuration appended to config.\".format(section))\r\n return section", "def is_section(line: str) -> bool:\n return len(line) > 0 and (line[0] == '[' and line[len(line) - 1] == ']')", "def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n success = False\n try:\n response = self._vault_api.read_secret(path=section_name.upper(), mount_point=self.mount_point)\n keys = list(response[\"data\"][\"data\"].keys())\n success = attr_name.upper() in keys\n except InvalidPath:\n pass\n return success", "def has_option(self, section, option):\n raise NotImplementedError()", "def remove_section(self, section):\n existed = section in self._sections\n if existed:\n del self._sections[section]\n return existed", "def hasoption(self, option):\n return self.has_option(NOSECTION, option)", "def check_sections(filenames, sections):\n for section in sections:\n # Make sure the path ends with a /\n if not section.endswith(\"/\"):\n section += \"/\"\n pattern = section.replace(\"/\", r\"\\/\") + r\"\\d+.*\"\n for fname in filenames:\n match = re.match(pattern, fname)\n if match is not None:\n return fname\n return False", "def _assert_section(config, name):\n if name not in config.sections():\n raise RuntimeError(\n 'Missing the section [{}] in the config file.'.format(name))", "def definition_exists(name: str) -> bool:\n try:\n return bool(lookup_definition(name))\n except:\n return False", "def exists(self):\n return True", "def exists(self):\n return True", "def create_section(section):\n\tif not config_parser or config_parser.has_section(section):\n\t\treturn False\n\tconfig_parser.add_section(section)\n\treturn True", "def is_attr_exist(self, section_name: str, attr_name: str) -> bool:\n config = ConfigParser(allow_no_value=True)\n config.read(self.connection_string)\n\n result = False\n for section in config.sections():\n if section.lower().replace(' ', '_') == section_name.lower().replace(' ', '_'):\n for attr in config[section]:\n if attr.lower().replace(' ', '_') == attr_name.lower().replace(' ', '_'):\n result = True\n\n return result", "def _hasOldStyleSection(self, inifile):\n sections = inifile.getSections()\n return 'main' in sections", "def config_has_option(section, option):\n return __CONFIG.has_option(section, option)", "def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section", "def _get_section(self, sections, section_id):\n for section in sections:\n\t if section['section_id'] == section_id:\n\t return section", "def check_pe_sections(self, pe):\n res = []\n for section in pe.sections:\n if b\"!This program cannot be run in DOS mode\" in section.get_data()[:400] or\\\n b\"This program must be run under Win32\" in section.get_data()[:400]:\n res.append(section.Name.decode('utf-8').strip('\\x00'))\n\n if len(res) > 0:\n print(\"[+] PE header in sections %s\" % \" \".join(res))\n return True\n return False", "def exists_ini_parameter( inifile, section, keyword ):\n found_section = False\n found_parameter = False\n\n # read jobfile\n with open(inifile) as f:\n # loop over all lines\n for line in f:\n\n # once found, do not run to next section\n if found_section and line[0] == \"[\":\n found_section = False\n\n # until we find the section\n if \"[\"+section+\"]\" in line:\n found_section = True\n\n # only if were in the right section the keyword counts\n if found_section and keyword+\"=\" in line:\n found_parameter = True\n\n return found_parameter", "def test_get_valid_section(self):\n arm = self.ar[2009][11]\n ars = arm['general']\n self.assertTrue(isinstance(ars, awstats_reader.AwstatsSection))", "def exists(self):\n return self.pod.file_exists(self.source_pod_path)", "def exists(self):\r\n return os.path.exists(self.full_path)", "def path_in_section(path, section):\n if not 'file' in section:\n return False\n\n for pattern in section['file']:\n regex = pattern_to_regex(pattern)\n\n match = re.match(regex, path)\n if match:\n # Check if there is an exclude pattern that applies\n for pattern in section['exclude']:\n regex = pattern_to_regex(pattern)\n\n match = re.match(regex, path)\n if match:\n return False\n\n return True\n\n return False", "def is_in_section_title(node: Element) -> bool:\n from sphinx.util.nodes import traverse_parent\n\n warnings.warn('is_in_section_title() is deprecated.',\n RemovedInSphinx30Warning, stacklevel=2)\n\n for ancestor in traverse_parent(node):\n if isinstance(ancestor, nodes.title) and \\\n isinstance(ancestor.parent, nodes.section):\n return True\n return False", "def exists(self, path: str) -> bool:\n pass", "def has(self, key):\n return os.path.isfile(self._filename(key))", "def test_no_section_by_section(self):\n notice = {\n \"document_number\": \"111-22\",\n \"fr_volume\": 22,\n \"cfr_part\": \"100\",\n \"publication_date\": \"2010-10-10\"\n }\n s = SectionBySection(None, notices=[notice])\n self.assertEqual(None, s.process(Node(label=['100', '22'])))", "def exists(self):\n try:\n select_template(self.get_paths())\n return True\n except TemplateDoesNotExist:\n return False", "def has_option(self, option, section = None):\n\n if section is None:\n section = self.default_section\n return self.cfg.has_option(section, option)", "def test_no_such_conf_section(self):\n del self.oslo_config_dict['heat']\n self.assert_service_disabled(\n 'orchestration',\n \"No section for project 'heat' (service type 'orchestration') was \"\n \"present in the config.\",\n )", "def exists(self) -> bool:\n p = pathlib.Path(self.summary_path)\n return p.exists()", "def is_file_exists(self):\n pass", "def has_option(self, section, option):\n if not section or section == DEFAULTSECT:\n option = self.optionxform(option)\n return option in self._defaults\n elif section not in self._sections:\n return False\n else:\n option = self.optionxform(option)\n return (option in self._sections[section]\n or option in self._defaults)", "def exists(self):\n try:\n self.world.find(self.ehandle)\n except KeyError:\n return False\n else:\n return True", "def has(self, tag, index):\n return self.get(tag, index) is not None", "def entry_exists(title):\n try:\n f = default_storage.open(f\"entries/{title}.md\")\n return True\n\n except FileNotFoundError:\n return False", "def _check(isamAppliance, realm, subsection):\n ret_obj = search(isamAppliance, realm, subsection)\n\n realm = \"{0}/{1}\".format(\"realms\", realm)\n\n logger.debug(\"Looking for existing kerberos subsection: {1} in realm: {0} in: {2}\".format(realm, subsection,\n ret_obj['data']))\n if ret_obj['data'] != {}:\n if ret_obj['data']['name'] == subsection:\n logger.debug(\"Found kerberos realm's subsection: {0}\".format(subsection))\n return True\n return False", "def __getitem__(self, section):\n result = self.get(section)\n\n if result is None:\n raise KeyError(section)\n\n return result", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def has(self, key):\n return False", "def exists(self):\n return os.path.isfile(self.location)", "def _has(self, key):\n path = self._get_key_path(key)\n return exists(path)", "def exists(identifier, network):\n foo = next(load(identifier, network), None)\n return foo is not None", "def exists(self) -> bool:\n return self._file_exists()", "def is_course_exists(self):\n db = Course._file.read_db()\n courses = db[\"courses\"]\n for crs in courses:\n if crs[\"course_name\"] == self._course_name:\n return True\n break\n return False", "def isFirstSection(section):\n board = section.board\n sectionsQueryset = board.section_set.all()\n\n return section.id == sectionsQueryset[0].id", "def has_item(self, usage_key):\r\n if usage_key.block_id is None:\r\n raise InsufficientSpecificationError(usage_key)\r\n try:\r\n course_structure = self._lookup_course(usage_key)['structure']\r\n except ItemNotFoundError:\r\n # this error only occurs if the course does not exist\r\n return False\r\n\r\n return self._get_block_from_structure(course_structure, usage_key.block_id) is not None", "def key_exists(key, dictionary):\n return key in dictionary and dictionary[key] is not None", "def hasItem(self, path): \n\t\treturn (path in self.items and self.items[path])", "def is_config_exist(self) -> bool:\n pass", "def has(self, key):\n return self.data.get(key, None) is not None", "def exists(self):\n return self.path.exists()", "def exists(self, path):", "def find_section(amdpar_xml):\n siblings = [s for s in amdpar_xml.itersiblings()]\n\n if len(siblings) == 0:\n return find_lost_section(amdpar_xml)\n\n for sibling in siblings:\n if sibling.tag == 'SECTION':\n return sibling\n\n paragraphs = [s for s in siblings if s.tag == 'P']\n if len(paragraphs) > 0:\n return fix_section_node(paragraphs, amdpar_xml)", "def exists():\n check50.include(\"data\")\n check50.exists(\"adventure.py\")\n check50.exists(\"room.py\")", "def Exists(self, path: str) -> bool:\n ...", "def has(self, block, name):\n try:\n return self._kvs.has(self._key(block, name))\n except KeyError:\n return False", "def has_option(self, option):\n\t\treturn self.config_parser.has_option(self.section_name, option)", "def object_exists(self, fname):\n return True", "def exists(self):\n return _os.path.exists(self.__str__())", "def has_item(self, usage_key):\r\n try:\r\n self._find_one(usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n return False", "def exists(self):\n return self.properties.get(\"Exists\", None)", "def key_exists(dictionary, key):\n\n exists = dictionary.get(key, None)\n return exists is not None", "def exist(self):\n return self.file_path.exists()", "def exists(self, url):\n return (self.base_path / url).exists()", "def object_exists(self, fname):\n return False", "def exists(path: str) -> bool:\n pass", "def is_config_exist(self) -> bool:\n return True", "def exists(self, selector):\n return not self.main_frame.findFirstElement(selector).isNull()\n\n\n #TODO: Still not work.", "def exists(self, Search_ID):\n if self.get_id(Search_ID) is None:\n return False\n else:\n return True", "def exists(self, path):\n raise TestException(self.settings_merged)", "def exists(path):\n return get_instance(path).exists(path)", "def does_exist(self, index):\n if index in self.map:\n return True\n return False", "def file_is_present(self, key=None):\n return os.path.isfile(self.file_path(key))", "def exists(self, key_name: str) -> bool:\n pass", "def has_item(self, usage_key):\r\n return usage_key in self.modules[usage_key.course_key]", "def does_location_exist(usage_key):\r\n try:\r\n search.path_to_location(modulestore(), usage_key)\r\n return True\r\n except ItemNotFoundError:\r\n # If the problem cannot be found at the location received from the grading controller server,\r\n # it has been deleted by the course author.\r\n return False\r\n except NoPathToItem:\r\n # If the problem can be found, but there is no path to it, then we assume it is a draft.\r\n # Log a warning in any case.\r\n log.warn(\"Got an unexpected NoPathToItem error in staff grading with location %s. \"\r\n \"This is ok if it is a draft; ensure that the location is valid.\", usage_key)\r\n return False", "def get_section(self, section_name):\n section_name = JSONSchema.format_section_name(section_name).lower()\n try:\n return self._sections[section_name]\n except KeyError:\n raise AquaError('No section \"{0}\"'.format(section_name))", "def __contains__(self, key):\n found = True\n try:\n self.__getitem__(key)\n except:\n found = False\n return found", "def test_section_is_correct(self):\n res = self.client().post('/api/v1/sections/', headers={'Content-Type': 'application/json'},\n data=json.dumps(self.section))\n get_res = self.client().get('api/v1/sections/', headers={'Content-Type': 'application/json'})\n records = json.loads(get_res.data)\n self.assertEqual(records[0]['title'], 'Test Title')", "def _writeSection(self, sectionName, options):\n return True" ]
[ "0.8818048", "0.8469707", "0.8389857", "0.8208384", "0.8178421", "0.8061962", "0.7974788", "0.7825325", "0.752125", "0.7518481", "0.7350608", "0.7341947", "0.7030508", "0.6875858", "0.6844375", "0.6687593", "0.6592266", "0.6513766", "0.64952224", "0.645685", "0.6444705", "0.6420969", "0.63553685", "0.62560624", "0.621761", "0.621413", "0.62039846", "0.61447126", "0.60513055", "0.60513055", "0.6020997", "0.6013222", "0.6005622", "0.5972401", "0.596406", "0.596406", "0.59359956", "0.5931756", "0.59154195", "0.5909455", "0.5883776", "0.5851279", "0.58371204", "0.581405", "0.580978", "0.5807553", "0.5796652", "0.5791947", "0.57783407", "0.5766391", "0.5760288", "0.57548636", "0.5740524", "0.5733833", "0.5711501", "0.57047933", "0.57023513", "0.569992", "0.5685643", "0.56849176", "0.56787175", "0.56618434", "0.56615156", "0.56591254", "0.56530195", "0.5646308", "0.56155705", "0.56108856", "0.56057173", "0.56047595", "0.5598153", "0.55981386", "0.5596847", "0.559604", "0.5587931", "0.5585464", "0.5584814", "0.55618817", "0.55600405", "0.55589277", "0.5558373", "0.5557449", "0.55337375", "0.55271256", "0.5526083", "0.5516562", "0.5502945", "0.5494479", "0.54927576", "0.5487774", "0.54832", "0.5482011", "0.5480793", "0.5478158", "0.5468319", "0.5466433", "0.54577076", "0.54548895", "0.5451786", "0.54516405" ]
0.82564044
3
Test whether an option exists.
def has_option(self, option, section = None): if section is None: section = self.default_section return self.cfg.has_option(section, option)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def has_option(self, key):\n\n return key in self.__opt", "def has_option(self, name):\n return self.option_settings[name]", "def has_option(self, option):\n\t\treturn self.config_parser.has_option(self.section_name, option)", "def has_option(self, option):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n return RawConfigParser.has_option(self, section, key)", "def has_option(self, section, option):\n try:\n if option in self._dict[section]:\n return True\n return False\n except KeyError as e:\n raise NoSectionError(str(e)) from None", "def config_has_option(section, option):\n return __CONFIG.has_option(section, option)", "def has_option(self, section, option):\n raise NotImplementedError()", "def hasoption(self, option):\n return self.has_option(NOSECTION, option)", "def has_option(self, key):\n if self.integration is None:\n return False\n return self.integration.has_option(f'{self.get_config_name()}.{key}')", "def contains(self, option: str) -> bool:\n return self._get_index(option) is not None", "def option_is_known(self, opt):\n return opt in self.results", "def hasOption(self, *args):\n return _libsbml.ConversionProperties_hasOption(self, *args)", "def has_option(self, section, option):\n if not section or section == DEFAULTSECT:\n option = self.optionxform(option)\n return option in self._defaults\n elif section not in self._sections:\n return False\n else:\n option = self.optionxform(option)\n return (option in self._sections[section]\n or option in self._defaults)", "def cmd_has_option(self, executable, search_option, arg=None):\n if not executable:\n return False\n arg_list = []\n if arg and is_genstr(arg):\n arg_list = [arg]\n elif isinstance(arg, list):\n arg_list = arg\n out = Uprocess().get_output([executable] + arg_list + [\"--help\"])\n if out and search_option in re.split(r\"[=|\\*\\[\\]\\n,; ]+\", out):\n return True\n return False", "def has_option(self, method, option):\n\t\targs = self.__parse_docstring(getattr(self, method).__doc__)\n\t\tif \"arguments\" in args:\n\t\t\treturn any(option == label for label in args[\"arguments\"].keys())\n\t\treturn False", "def has_option(self, section, option, defaults=True):\n if ProductSetting.exists(self.env, self.product, section, option):\n return True\n for parent in self.parents:\n if parent.has_option(section, option, defaults=False):\n return True\n return defaults and (section, option) in Option.registry", "def isset(cls,name):\n inst = cls.inst()\n if name in inst.options and \\\n len(inst.options[name]) > 0:\n return True\n else:\n return False", "def has_option(self, section, option):\n\n # Underscore-style is the recommended configuration style\n option = option.replace('-', '_')\n if ConfigParser.has_option(self, section, option):\n return True\n\n # Support dash-style option names (with deprecation warning).\n option_alias = option.replace('_', '-')\n if ConfigParser.has_option(self, section, option_alias):\n warn = 'Configuration [{s}] {o} (with dashes) should be avoided. Please use underscores: {u}.'.format(\n s=section, o=option_alias, u=option)\n warnings.warn(warn, DeprecationWarning)\n return True\n\n return False", "def _is_opt(self, opt):\r\n\r\n # TODO: raise BadOptionError for unknown option\r\n if len(opt) < 2 or opt[0] != '-':\r\n return False\r\n if opt[1] != '-':\r\n return self._short_opt.get(opt[0:2]) is not None\r\n try:\r\n if \"=\" in opt:\r\n (opt, next_arg) = opt.split(\"=\", 1)\r\n if self._match_long_opt(opt):\r\n return True\r\n except:\r\n pass\r\n\r\n return False", "def toolHasOptions(*args, **kwargs)->bool:\n pass", "def thereis_short_opt(options, opt_char):\n if len(opt_char) != 1:\n raise RuntimeError(\n \"This function is for one character options, opt_char>%s<.\" %\n opt_char)\n off = string.find(options, opt_char)\n if off >= 0:\n return (off < len(options)) and (options[off+1] == ':')\n else:\n return None", "def is_manually_set(option_name: str) -> bool:\n return get_where_defined(option_name) not in (\n ConfigOption.DEFAULT_DEFINITION,\n ConfigOption.STREAMLIT_DEFINITION,\n )", "def _is_opt_registered(opts, opt):\n if opt.dest in opts:\n if opts[opt.dest]['opt'] != opt:\n raise DuplicateOptError(opt.name)\n return True\n else:\n return False", "def _is_unset(option_name: str) -> bool:\n return get_where_defined(option_name) == ConfigOption.DEFAULT_DEFINITION", "def __assert_option(self, key):\n\n if not self.has_option(key):\n raise KeyError(\"No such option.\")", "def is_option(cls: Type['Option[T]'], value: Any) -> bool:\n return isinstance(value, Option)", "def test_get_option_exist(self):\n assert_equals(self.c.get(\"cuckoo\")[\"debug\"], False)\n assert_equals(self.c.get(\"cuckoo\")[\"tcpdump\"], \"/usr/sbin/tcpdump\")\n assert_equals(self.c.get(\"cuckoo\")[\"critical_timeout\"], 600)", "def option_is_default(self, opt):\n return opt in self.results and self.results[opt][1] is self._is_default", "def _is_popt_installed(self):\n try:\n Cmd.sh_e('{0} --query --whatprovides {1} --quiet'\n .format(self.rpm.rpm_path, self.package_popt_name))\n return True\n except CmdError:\n return False", "def _is_option(line):\n return '=' in line", "def check_options(options, parser):\n if not options.get('release_environment', None):\n print(\"release environment is required\")\n parser.print_help()\n return os.EX_USAGE\n\n return 0", "def option_is_extra(self, opt):\n return opt in self.results and self.results[opt][1] is self._is_extra", "def has(self, *options: str) -> bool:\n return bool(self.flags & self.mask(*options))", "def is_valid_option(cls, id_):\n return id_ in cls.CHOICES", "def has_value(key: str) -> bool:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.has_option(\"wsgi\", key)", "def __check_option(self,name):\n # Check if option exists\n if not self.__options.has_key(name):\n raise AttributeError('(EVOGTK - Preferences Helper) Preferences object has no attribute \\'%s\\'' % name)\n # Check for option type\n if self.__options[name][0] not in self.__supported_types:\n raise TypeError('(EVOGTK - Preferences Helper) Inconsistent data type \\'%s\\' for option \\'%s\\'' % (type,name)) \n return self.__options[name]", "def _is_valid_platform_option(self, name: str) -> bool:\n disallowed_platform_options = self.disallow.get(self.platform, set())\n if name in disallowed_platform_options:\n return False\n\n allowed_option_names = self.default_options.keys() | self.default_platform_options.keys()\n\n return name in allowed_option_names", "def option_flag(argument: Optional[str]) -> bool:\n if argument and argument.strip():\n raise ValueError('no argument is allowed; \"%s\" supplied' % argument)\n return True", "def checkOption(options, name, propname, optletter, encrypted=False, cdfPropname = None):\n if name not in options:\n ret = getPgaasPropValue(propname, encrypted=encrypted, dflt=None, skipComplaining=True)\n if ret is None and cdfPropname is not None:\n ret = getCdfPropValue(cdfPropname, encrypted=encrypted)\n options[name] = ret\n requireOption(\"either %s or config[%s]\" % (optletter, propname), options[name])", "def has_correct_options(options):\n\n has_correct = False\n\n for option in options:\n if option.get('correct') is True:\n has_correct = True\n\n if not has_correct:\n return c('error_need_correct')", "def read_boolean_option(config, section, option):\n if not config.has_section(section):\n return\n\n return config.has_option(section, option)", "def _is_valid_global_option(self, name: str) -> bool:\n allowed_option_names = self.default_options.keys() | PLATFORMS | {\"overrides\"}\n\n return name in allowed_option_names", "def getOption(arg):\n return (False, \"\", \"\")", "def find_option(self, name, namespace=...):\n ...", "def check_settings_existence(self):\n options = [\n 'AUTH_LDAP_SERVER_URI',\n 'AUTH_LDAP_USER_SEARCH_BASE',\n 'AUTH_LDAP_USER_USERNAME_ATTR',\n 'AUTH_LDAP_PROTOCOL_VERSION',\n 'AUTH_LDAP_BIND_DN',\n 'AUTH_LDAP_BIND_PASSWORD',\n ]\n for option in options:\n if not hasattr(settings, option):\n logger.error('LDAP::check_settings_existence\\tSetting %s is '\n 'not provided', option)\n sys.exit(1)", "def has(self, name):\n return name in self._defaults", "def get_option(self, option):\n if not self._options.has_key(option):\n raise KeyError, \"Invalid option: \" + option\n else:\n return self._options.get(option)", "def bool_option (arg: Any) -> bool:\n return True", "def _check_options(self, options):\r\n xmi_file = options.get(\"xmi_file\")\r\n if not xmi_file or not os.path.exists(xmi_file):\r\n self._error(\"Select XMI file\")\r\n return \r\n\r\n target_folder = options[\"target_folder\"]\r\n if not target_folder:\r\n self._error(\"Select target folder\")\r\n return\r\n \r\n if not os.path.exists(target_folder):\r\n self._error(\"Target folder not exists\")\r\n return \r\n \r\n return True", "def find_option(self, option_name, default=None):\n value = (\n getattr(self.pconfig.option, option_name, None) or\n self.pconfig.getini(option_name)\n )\n return value if value else default", "def requireOption(nm, val):\n return require(\"option\", nm, val)", "def flag_exists(self):\n return os.path.exists(self.flag_file)", "def _is_popt_installed(self):\n raise NotImplementedError('Implement this method.')", "def validate(dic, option_list):\n\tfor key in dic.viewkeys():\n\t\tif key in option_list:\n\t\t\tfor option in option_list:\n\t\t\t\tif option != key:\n\t\t\t\t\tif dic[option] and dic[key]:\n\t\t\t\t\t\traise click.UsageError('Invalid option combination --%s \\\n\t\t\t\t\t\t\tcannot be used with --%s' % (option, key))\n\n\treturn True", "def __contains__(self, key):\n return key in self._group._opts", "def option_not_exist_msg(option_name, existing_options):\n result = [\"option '%s' doesn't exist\" % option_name,\n \"Possible options are %s\" % existing_options or \"none\"]\n return \"\\n\".join(result)", "def option_is_macroarg(self, opt):\n return opt in self.results and ( \\\n self.results[opt][1] is self._is_macroarg or \\\n self.results[opt][1] is self._is_extra )", "def has_section(self, section):\r\n return self.configparser.has_section(section)", "def __contains__(self, key):\n return key in self._opts or key in self._groups", "def getbool(option, default = None):\n\treturn _cfg.getboolean('rosshm', option, fallback = default)", "def test_get_property_missing(self):\r\n try:\r\n value = self.config.option2\r\n assert value\r\n except Exception as e:\r\n self.assertIsInstance(e, OptionValueNotSetError)\r\n self.assertNotIn('option2', self.config.values)", "def get_bool(self, option, argument=None):\n return bool(self.get(option, argument))", "def exists(self, path, flag='-e'):\r\n try:\r\n return self._call(\"-test\", flag, path) == 0\r\n except subprocess.CalledProcessError:\r\n return False", "def task_submit_check_options():\n if not (task_has_option('all') or task_has_option('collection') \\\n or task_has_option('field') or task_has_option('pattern') \\\n or task_has_option('matching') or task_has_option('recids')):\n task_set_option('without', 1)\n task_set_option('last', 1)\n return True", "def check_options(self, options):\n return not any(not isinstance(element, str) for element in options)", "def option_is_false(self, name: str, prefix: bool = False) -> bool:\n\n return self.get_option_value(name=name, prefix=prefix) is False", "def cmakeBoolOptionIsSet(self, opt):\n\n if self.envcmake.has_key( opt ):\n\n val = str(self.envcmake.get(opt,\"\"))\n\n if val == \"1\" or val == \"ON\" or val == \"YES\":\n\n return True\n\n return False", "def exists(self, answer):\n return self.find(answer) is not None", "def getBooleanOption(aConfig, aSection, aOption):\n if aConfig.has_option(aSection, aOption):\n return aConfig.getboolean(aSection, aOption)\n else:\n # Default value. This should match the initialization done in\n # __init__ of class task in taskHandler.py\n if (aOption == \"fullScreenMode\" or\n aOption == \"formatOutput\" or\n aOption == \"compressOutput\"):\n return True\n else:\n # \"useWebDriver\"\n # \"runSlowTests\"\n # \"runSkipTests\"\n # \"useGrid\"\n return False", "def exists(self) -> bool:\n try:\n result = self.get()\n except KeyError:\n return False\n return True", "def option_bool(argument: Optional[str]) -> bool:\n if argument and argument.strip():\n output = tinydocutils.directives.choice(argument, (\"true\", \"false\"))\n return output == \"true\"\n return True", "def get_config_option(option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def is_choice(self):\n return self.__class__.get_setting_choices(self.key, **self.get_kwargs()) is not None", "def __contains__(self, item):\n return self.settings.has(item)", "def has(self, key) -> bool:\r\n if self.get(key) is not None:\r\n return True\r\n return False", "def has(self, section, setting):\n if section not in self.keys():\n return False\n if setting not in self[section].keys():\n return False\n return True", "def get_option(self, option):\n\t\treturn self.options[option]", "def exists(parser, output_name):\n return parser.setParseAction(isNotEmpty).setResultsName(output_name)", "def is_config_exist(self) -> bool:\n pass", "def test_get_value_missing(self):\r\n name = 'option2'\r\n option = self.config.options[name]\r\n\r\n self.assertRaises(OptionValueNotSetError, self.config.get_value, name, option)\r\n self.assertNotIn(name, self.config.values)", "def has(self, tag, index):\n return self.get(tag, index) is not None", "def exists(path, fs_options={}, fs=None):\n fs, path = parse(path, fs_options=fs_options, fs=fs)\n if fs is None:\n return os.path.exists(path)\n else:\n return fs.get_file_info([path])[0].type != pa.fs.FileType.NotFound", "def exists(self):\n return bool(self.get())", "def get_option(self, name):\n option_df = self.dismod_file.option\n records = option_df[option_df.option_name == name]\n if len(records) == 1:\n return records.option_value.iloc[0]\n else:\n raise KeyError(f\"Option {name} not found in options\")", "def get_config_option(option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def config_has_section(section):\n return __CONFIG.has_section(section)", "def has_preset(self, filename):\r\n\r\n return filename in self.preset_ids()", "def exists(self):\n return self.properties.get(\"Exists\", None)", "def exists(obj: Dict, path: str) -> bool:\n\n return get(obj, path) is not None", "def validate_available(parser, options):\n if not options.available:\n return\n\n if not options.manifest_id:\n parser.error(\"When specifying --available, --manifest-id is also required\")", "def _get_option(self, arg_name: str) -> Any:\n try:\n return getattr(self, f\"__{arg_name}\")\n except AttributeError as ex:\n raise AnalysisError(\n f\"The argument {arg_name} is selected but not defined. \"\n \"This key-value pair should be defined in the analysis option.\"\n ) from ex", "def get_config_option(self, option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this \" \\\n \"function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def is_config_exist(self) -> bool:\n return True", "def __getitem__(self, item):\n if item not in self._moptions:\n raise KeyError(\"Invalid option '%s'.\" % item)\n return self._runopts.get(item)", "def config_get_bool(section, option):\n return __CONFIG.getboolean(section, option)", "def hasCommand():\n args = sys.argv[1:]\n if '--help' in args:\n return False\n if '-h' in args:\n return False\n for arg in args:\n if arg and not arg.startswith('-'):\n return True\n return False", "def HasCaption(self):\r\n \r\n return self.HasFlag(self.optionCaption)", "def has_section(self, section):\n\n return self.cfg.has_section(section)", "def has_element(self, attrib_key, attrib_value, match_option=None):\n selector = UiSelector()\n selector.attributes(attrib_key, attrib_value, match_option)\n return UiObject(selector, self.android_device_driver).verify_exist()", "def get_option(self, key):\n return self.options[key]" ]
[ "0.8260075", "0.81346714", "0.79610306", "0.7878082", "0.78457946", "0.7843575", "0.78201646", "0.78074056", "0.77991354", "0.7590212", "0.75709635", "0.7506453", "0.74686354", "0.73545367", "0.73482585", "0.72637665", "0.723397", "0.69554585", "0.68729013", "0.6776543", "0.67231184", "0.66899663", "0.66889167", "0.6583347", "0.64299345", "0.6348826", "0.6347272", "0.63115674", "0.6291703", "0.6270133", "0.62556046", "0.6225454", "0.6207777", "0.6202631", "0.6200201", "0.6103253", "0.6094718", "0.6066677", "0.6055775", "0.60294604", "0.602693", "0.60025805", "0.5990422", "0.59897333", "0.5953136", "0.5934924", "0.59281147", "0.5920741", "0.5916128", "0.5904908", "0.5896928", "0.58926344", "0.58741194", "0.58424115", "0.58326316", "0.5827365", "0.5817272", "0.5807857", "0.57986456", "0.57901895", "0.57901114", "0.5785048", "0.57764", "0.57694554", "0.5766266", "0.57497936", "0.5749645", "0.5729422", "0.5717565", "0.5712075", "0.57095325", "0.57037413", "0.5676746", "0.5653343", "0.5649577", "0.5638394", "0.5629784", "0.56218874", "0.5616714", "0.5607262", "0.55907905", "0.558436", "0.55831516", "0.55816823", "0.5570024", "0.5542403", "0.55415463", "0.5539737", "0.55059147", "0.5497859", "0.54978013", "0.5494016", "0.54843366", "0.54779935", "0.54671794", "0.54664135", "0.5452914", "0.54444677", "0.54442436", "0.5443711" ]
0.7794908
9
Parse OpenSSLstyle foo.0, foo.1, ... subscripted options. Returns a list of values matching the specified option name.
def multiget(self, option, section = None): matches = [] if section is None: section = self.default_section if self.cfg.has_option(section, option): matches.append((-1, self.get(option, section = section))) for key, value in self.cfg.items(section): s = key.rsplit(".", 1) if len(s) == 2 and s[0] == option and s[1].isdigit(): matches.append((int(s[1]), self.get(option, section = section))) matches.sort() return [match[1] for match in matches]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def get_option(self, name):\r\n if not isinstance(name, str):\r\n name = \" \".join(name)\r\n lines = self.sendAndRecv(\"GETCONF %s\\r\\n\" % name)\r\n\r\n r = []\r\n for _,line,_ in lines:\r\n try:\r\n key, val = line.split(\"=\", 1)\r\n r.append((key,val))\r\n except ValueError:\r\n r.append((line, None))\r\n\r\n return r", "def value_options(*args):\n\n @with_pattern(r\"|\".join(args))\n def parse_options(text):\n return text\n\n return parse_options", "def _parse_delimited_options(ttsoptions, _engine):\n options = []\n for prop, val in [s.strip().split('=') for s in ttsoptions.split(',')]:\n prop = prop.strip()\n val = val.strip()\n val = float(val) if val.isdecimal() else val\n options[prop] = val\n\n return options", "def _handle_short_form(element):\n if len(element) <= 1:\n raise CmdLineException(\"Invalid option: '{}'\".format(element))\n tokens = []\n for i in range(1, len(element)):\n if element[i: i + 1] == \"=\":\n if i + 1 < len(element):\n tokens.append(element[i + 1:])\n break\n tokens.append(\"-\" + element[i: i + 1])\n return tokens", "def parse_options(self, options):\n pass", "def parse_options(options, return_list=True):\n\n cmd_options = []\n\n for key, value in options.items():\n\n if value is not None:\n txt = f\"--{key} {value}\"\n else:\n txt = f\"--{key}\"\n\n cmd_options.append(txt)\n\n if return_list:\n return cmd_options\n\n cmd_options = \" \".join(cmd_options)\n\n return cmd_options", "def parse_options(options):\r\n # convert single quotes inside option values to html encoded string\r\n options = re.sub(r\"([a-zA-Z])('|\\\\')([a-zA-Z])\", r\"\\1&#39;\\3\", options)\r\n options = re.sub(r\"\\\\'\", r\"&#39;\", options) # replace already escaped single quotes\r\n # parse the set of possible options\r\n lexer = shlex.shlex(options[1:-1].encode('utf8'))\r\n lexer.quotes = \"'\"\r\n # Allow options to be separated by whitespace as well as commas\r\n lexer.whitespace = \", \"\r\n\r\n # remove quotes\r\n # convert escaped single quotes (html encoded string) back to single quotes\r\n tokens = [x[1:-1].decode('utf8').replace(\"&#39;\", \"'\") for x in lexer]\r\n\r\n # make list of (option_id, option_description), with description=id\r\n return [(t, t) for t in tokens]", "def parse_args(args, optinfos):\n\n for opt_identifier, optinfo in optinfos:\n try:\n options, arguments = getopt.gnu_getopt(args, optinfo)\n return opt_identifier, options, arguments\n except getopt.GetoptError:\n # That version doesn't work, so try the next one\n continue\n \n # If we got this far, they both failed (read: syntax error)\n error(2, \"Syntax Error: Incorrect option passed. See the man page for more information.\\nA common cause is using old LPRng syntax.\\nValid options: %s\\n\" % \n (string.replace(re.sub(r'([a-zA-Z])', r'-\\1 ',\n optinfos[SYSTEM_CUPS][1]), ':', '[arg] ')))", "def _getOptions(self):\n args = []\n for iname, value in self.options:\n args.append('-' + iname)\n if value != 'true':\n args.append(value)\n return args", "def options_by_name(self):\n pass", "def options(argv=[]):\r\n parser = HendrixOptionParser\r\n return vars(parser.parse_args(argv)[0])", "def parse_options():\n\n from optparse import OptionParser\n usage = r\"\"\"%prog [options] <voxel_file>\"\"\"\n p = OptionParser(usage=usage)\n p.add_option('-o', '--output', action='store', dest='output',\n default='plot', help='Path to output SILO or VTK file.')\n p.add_option('-v', '--vtk', action='store_true', dest='vtk',\n default=False, help='Flag to convert to VTK instead of SILO.')\n parsed = p.parse_args()\n if not parsed[1]:\n p.print_help()\n return parsed\n return parsed", "def get_options(self, field):\n base, req_option = field.split(\"-\")\n assert base == \"options\", \"get_options can only be used to fetch options.\"\n option_type = self.option_str_to_int(req_option)\n i = 0\n # First, check if the option is already present in the packet\n for option in self.layer.options:\n # Scapy may try to be helpful and return the string of the option\n next_option = self.option_str_to_int(option[0])\n if option_type == next_option:\n _name, value = self.layer.options[i]\n # Some options (timestamp, checksums, nop) store their value in a\n # tuple.\n if isinstance(value, tuple):\n # Scapy returns values in any of these types\n if value in [None, b'', ()]:\n return ''\n value = value[0]\n if value in [None, b'', ()]:\n return ''\n if req_option == \"md5header\":\n return binascii.hexlify(value).decode(\"utf-8\")\n\n return value\n i += 1\n return ''", "def interpret_options(options):\n # template always has to be index 0\n template = options[0]\n # namespace always has to be index 1. Support 'ec2' (human friendly) and\n # 'AWS/EC2' (how CloudWatch natively calls these things)\n namespace = options[1].rsplit('/', 2)[-1].lower()\n next_idx = 2\n # region might be index 2\n region = ''\n if len(options) > 2 and re.match(r'^\\w+\\-[\\w\\-]+\\-\\d+$', options[2]):\n region = options[2]\n next_idx += 1\n else:\n next_idx = 2\n region = region or boto.config.get('Boto', 'ec2_region_name', 'us-east-1')\n\n filter_by = {}\n extras = []\n for arg in options[next_idx:]:\n if arg.startswith('-'):\n # throw these away for now\n extras.append(arg)\n elif '=' in arg:\n key, value = arg.split('=', 2)\n filter_by[key] = value\n else:\n # throw these away for now\n extras.append(arg)\n\n return template, namespace, region, filter_by, extras", "def get_options(options, opt_path):\r\n options_in = open(opt_path, 'r')\r\n # get exceptions\r\n for line_in in options_in:\r\n line = line_in.strip()\r\n if len(line) == 0:\r\n continue\r\n if line.startswith(\"#\"):\r\n continue\r\n if line.startswith(\"[\") and \"pep8\" in line:\r\n continue\r\n option = line\r\n if not line.startswith(\"-\"):\r\n line = \"--\" + line\r\n options.append(line)\r\n\r\n options_in.close()", "def test_parsingValues(self):\n argV = (\"--fooint 912 --foofloat -823.1 \"\n \"--eggint 32 --eggfloat 21\").split()\n self.usage.parseOptions(argV)\n self.failUnlessEqual(self.usage.opts['fooint'], 912)\n self.assert_(isinstance(self.usage.opts['fooint'], int))\n self.failUnlessEqual(self.usage.opts['foofloat'], -823.1)\n self.assert_(isinstance(self.usage.opts['foofloat'], float))\n self.failUnlessEqual(self.usage.opts['eggint'], 32)\n self.assert_(isinstance(self.usage.opts['eggint'], int))\n self.failUnlessEqual(self.usage.opts['eggfloat'], 21.)\n self.assert_(isinstance(self.usage.opts['eggfloat'], float))", "def _parse(self, args):\r\n\r\n ordered = []\r\n opt_full = dict()\r\n opt_abbrev = dict()\r\n\r\n args = args + [''] # Avoid out of range\r\n i = 0\r\n\r\n while i < len(args) - 1:\r\n arg = args[i]\r\n arg_next = args[i+1]\r\n if arg.startswith('--'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_full[arg[2:]] = arg_next\r\n i += 2\r\n elif arg.startswith('-'):\r\n if arg_next.startswith('-'):\r\n raise ValueError('{} lacks value'.format(arg))\r\n else:\r\n opt_abbrev[arg[1:]] = arg_next\r\n i += 2\r\n else:\r\n ordered.append(arg)\r\n i += 1\r\n \r\n return ordered, opt_full, opt_abbrev", "def ParseOption():\n parser = optparse.OptionParser()\n parser.add_option('--input', dest='input', help='Input file path')\n parser.add_option('--output', dest='output', help='Output file path')\n parser.add_option(\n '--var_name', dest='var_name', help='Var name for the array')\n return parser.parse_args()[0]", "def check(options, rules = rules):\n s = [\"str\", \"unicode\"]\n for key in options:\n if not key.endswith(\" comment\"):\n if key in rules:\n c = rules[key]\n else:\n raise OptionKeyError(key)\n value = options[key]\n if c[0] == \"U\": continue\n elif c[0] == \"POT\":\n if not(((value & (value - 1)) == 0) and value):\n raise OptionPOTError(key)\n elif c[0] == \"R\":\n if value not in list(range(c[1], c[2]+1)):\n raise OptionRangeError(key, c[1], c[2]+1)\n elif c[0] == \"B\":\n if value not in list(range(0, 2)):\n #print (value)\n raise OptionRangeError(key, 0, 2)\n elif c[0] == \"N1+\":\n if value < 1:\n raise OptionRangeError(key, 1, float(\"inf\"))\n elif c[0] == \"N0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"FN0+\":\n if value < 0:\n raise OptionRangeError(key, 0, float(\"inf\"))\n elif c[0] == \"N-1+\":\n if value < -1:\n raise OptionRangeError(key, -1, float(\"inf\"))\n elif c[0] == \"S\":\n if value.__class__.__name__ not in s:\n raise OptionTypeError(key, \"text\")\n elif c[0] == \"Name\":check_name(value,key)\n\n elif c[0] == \"L\":\n if value.__class__.__name__ != \"list\":\n raise OptionTypeError(key, \"list\")\n\n elif c[0] == \"C\":\n if len(value) != 3:\n raise OptionError()\n if sum(value) < 1:\n raise OptionError()\n else:\n raise Exception(\"%s not valid rule type from %s\" % (c[0], key))", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def lookup_option_symbols(self, underlying: str) -> List[Symbol]:\n url = \"/v1/markets/options/lookup\"\n params = {\"underlying\": underlying}\n\n data = self.get(url, params)\n res = MarketsAPIResponse(**data)\n return res.symbols", "def getOptionsNames(self) -> List[unicode]:\n ...", "def parseSubscripts(part):\n subs = str(part)\n subs = part.split(\"]\")[:-1]\n return [int(sub[1:]) for sub in subs]", "def parse_opts2(tokens, optpx='--', argparam=False):\n opts0 = []\n args = []\n n = len(optpx)\n\n for token in tokens:\n if token[:2] == optpx:\n opts0.append(token[n:])\n else:\n if argparam:\n token = token.split('=')\n args.append(token)\n\n opts = parse_opts(opts0)\n\n return args, opts", "def parse(self, section):\n # try to find alternatives if they exist\n alternatives = deepcopy(self.alternatives)\n while len(alternatives) != 0 and self.name not in section.dict:\n other_name = alternatives.pop(0)\n if other_name in section.dict:\n section.dict[self.name] = section.dict[other_name]\n del section.dict[other_name]\n break\n if not self.optional:\n assert_exists(self.name, section.dict, section.name)\n if self.name not in section.dict:\n return self.default\n else:\n if self.dtype != list:\n if self.dtype == bool:\n # this is necessary since ``bool(\"False\")`` returns ``True``.\n value = parse_bool(section, self.name)\n else:\n value = self.dtype(section.dict[self.name])\n if not self.validation_func(value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n return value\n else:\n\n value = parse_list(section.dict[self.name], self.datatype)\n\n # value validation\n if not all_true(self.validation_func, value):\n raise ValueError('Invalid input for option ' + self.name +\n ' in section ' + section.name)\n\n shape = deepcopy(self.shape)\n\n # now we need to get the correct shape\n if shape == -1:\n # we don't care for the shape of this\n if not isinstance(value, list):\n value = [value]\n return value\n\n if isinstance(shape, str):\n # in this case we simply use the shape of the option with this name\n if shape not in section.dict:\n raise ValueError(self.name + ' in ' + section.name + ' has an invalid ' +\\\n 'shape because the options whose shape it should have ' +\\\n 'does not exist. Check your option definitions!')\n shape = get_shape(section.dict[shape])\n if isinstance(shape, int):\n shape = [shape]\n # shape is now a list, but it might still contain strings\n for i in range(len(shape)):\n if isinstance(shape[i], str):\n shape[i] = len(section.dict[shape[i]])\n\n\n\n # shape is now either a 'flat' shape, i.e. something like [2, 3, 2],\n # or an expanded shape, e.g. [2, [3, 3], [[2, 2, 2],[2, 2, 2]]]\n # if it's flat, it might contain dimensions with -1 that cannot be\n # autoexpanded. We first need to determine the shape of this dimension.\n if is_flat(shape):\n real_shape = get_shape(value)\n if isinstance(real_shape, (list, tuple)):\n # if it's just a single number we can expand it\n # Here I'm trying to find the flat shape of the value that was\n # given in the configuration file.\n flat_shape_value = try_flattening_shape(real_shape)\n # It might happen that we cannot flatten the shape, in this\n # case there are negative values remaining in flat_shape_value.\n # If there are, this means that there is a dimension\n # containing lists of different lengths.\n # In any case I will try to replace any -1 in ``shape``\n # with the value in ``flat_shape_value``.\n shape = get_positive_shape(shape, flat_shape_value)\n # Now we do a test for equality of the asserted shape and\n # the shape of the value found in the config file. Keep in\n # mind that there might be -1 values left.\n if flat_shape_value != shape[-len(flat_shape_value):]:\n raise ShapeError(self.name, section.name)\n # If there are -1's left we must ensure that the \"depth\" of\n # the given value, i.e. the number of dimensions, is higher\n # than the ``number of dimensions after the value preceding\n # the first -1`` + 1 .\n if any(map(lambda x: x == -1, shape)):\n depth = numdim(value)\n mindepth = len(shape) - shape.index(-1) + 1\n if depth < mindepth:\n raise ValueError('Option ' + self.name + ' in section ' +\n section.name + ' can not be expanded!')\n shape = expand_shape(shape)\n\n # Now we have an expanded shape, so only two tasks remain:\n # * auto-expansion\n # * shape validation\n value = expand_to_shape(shape, value)\n if not compare_shapes(shape, get_shape(value)):\n raise ShapeError(self.name, section.name)\n return value", "def Options():\n p = optparse.OptionParser('split_doc.py [options] input_file out_prefix')\n # Like awk -v\n p.add_option(\n '-v', dest='default_vals', action='append', default=[],\n help=\"If the doc's own metadata doesn't define 'name', set it to this value\")\n p.add_option(\n '-s', '--strict', dest='strict', action='store_true', default=False,\n help=\"Require metadata\")\n return p", "def get_options(self, key):\n if key in self.options.get_option_names():\n return self.options\n\n try:\n scope, scoped_key = key.split('.')\n except ValueError:\n return None\n\n if scope == 'input' and scoped_key in self.input.options.get_option_names():\n return self.input.options\n elif scope == 'output' and scoped_key in self.output.options.get_option_names():\n return self.output.options\n elif scope == 'exploit' and scoped_key in self.exploit.options.get_option_names():\n return self.exploit.options\n else:\n return None", "def extract_opt(options, optname):\n extracted = []\n remaining = []\n for o, v in options:\n if o == optname:\n extracted.append((o, v))\n else:\n remaining.append((o, v))\n return extracted, remaining", "def _check_prefixes(self, docstring: PetscDocStringImpl) -> None:\n for key, opts in sorted(self.items.items()):\n lopts = len(opts)\n assert lopts >= 1, f'number of options {lopts} < 1, key: {key}, items: {self.items}'\n\n if lopts == 1:\n # only 1 option, should start with '.'\n self._check_opt_starts_with(docstring, opts[0], 'Solitary', '.')\n else:\n # more than 1, should be '+', then however many '.', then last is '-'\n self._check_opt_starts_with(docstring, opts[0], 'First multi', '+')\n for opt in opts[1:-1]:\n self._check_opt_starts_with(docstring, opt, 'Multi', '.')\n self._check_opt_starts_with(docstring, opts[-1], 'Last multi', '-')\n return", "def _getOptions(self, sectionName):\r\n\r\n if sectionName in self.sections:\r\n attri_list = self.cf.options(sectionName)\r\n return attri_list\r\n else:\r\n return None", "def _parse_option_section(conf, items, copt, opt, _allow_include=0):\n global config_stray_opts, _non_options, _list_options, _path_options\n\n for key, val in items:\n if key == 'include' and _allow_include:\n for inc in val.split(' '):\n _parse_option_section(conf, conf.items(inc), copt, opt, _allow_include=(_allow_include-1))\n\n for key, val in items:\n if key in _non_options:\n continue\n elif key in dir(copt):\n if key in _list_options:\n val = val.split(_list_options[key])\n elif isinstance(getattr(copt, key), list) or \\\n (key in ('modules',)):\n val = val.split(' ')\n elif isinstance(getattr(copt, key), bool):\n val = bool(val.lower() in ('1', 'true', 't', 'yes'))\n\n if not getattr(copt, key):\n setattr(opt, key, val)\n else:\n config_stray_opts.append((key, val))\n pass", "def _collect_options(self, option_index):\n input_option = list()\n if not option_index:\n for k in self._options.keys():\n input_option.append(self._options.get(k))\n else:\n for index in option_index:\n input_option.append(self._options.get(index))\n return input_option", "def getoptions(str,num,num2=None):\n if num2==None:\n num2=num\n op=str.split(',')\n if len(op) >= num and len(op) <= num2:\n for i in range(len(op)):\n op[i]=op[i].strip()\n return op\n else:\n raise OptionError, \"WrongNumber\"", "def processOption (self, line) :\n ll = line.split ('=')\n if len (ll) < 2:\n print \"Cannot parse option \" , line\n sys.exit()\n result = (ll[0].strip() , ll[1].strip())\n return result", "def parse_opts_adapter(tokens, delim, optpx='--', argparam=False):\n if any([t.startswith(optpx) for t in tokens]):\n # new style\n args, opts = parse_opts2(tokens, optpx=optpx, argparam=argparam)\n else:\n # old style\n args = tokens[:delim]\n opts = parse_opts(tokens[delim:])\n return args, opts", "def extract_option(prefix, args):\n if prefix in ('#',):\n unique = False\n else:\n unique = True\n value = [a for a in args if a.startswith(prefix)]\n if len(value) == 1:\n value = value[0]\n args.remove(value)\n value = value[1:]\n if not unique:\n return [value]\n return value\n elif len(value) > 1 and unique:\n print('More than one %s found in args' % prefix)\n sys.exit(1)\n elif len(value) > 1 and not unique:\n for v in value:\n if v in args:\n args.remove(v)\n return [v[1:] for v in value]\n return None", "def parseopts(opts):\n\n for opt, arg in opts:\n\n if opt in [\"--input\"]:\n filetag = arg\n\n return filetag", "def _parse_options(options):\n opts = dict()\n for attr in dir(options):\n if attr.startswith(\"__\"):\n continue\n opts[attr] = getattr(options, attr)\n return opts", "def getList(self,section, option): \n unparsedOption=self.get(section, option)\n if unparsedOption.find(',')>0:\n splittedValue=unparsedOption.split(',')\n strippedValue=[]\n while splittedValue:\n valuePart=splittedValue.pop(0)\n strippedValue.append(valuePart.strip())\n result=strippedValue\n else: result=unparsedOption\n return result", "def options(self, a: str) -> typing.Any:", "def parse_options(arguments):\n parser = optparse.OptionParser(option_list=OPTION_LIST)\n options, values = parser.parse_args(arguments)\n return options", "def parse_opts(opts0):\n opts = {}\n # parse the stuff in \"opts\"\n for opt in opts0:\n parsed = opt.split('=')\n key = parsed[0].strip()\n if len(parsed) > 1:\n # OLD: cmd = parsed[1].strip()\n cmd = '='.join(parsed[1:]).strip()\n else:\n cmd = ''\n opts[key] = cmd\n\n return opts", "def str2choice(options: List[str]) -> Callable[[str], str]:\n\n def _parse(string: str) -> str:\n if string not in options:\n raise argparse.ArgumentTypeError(\"Expected one of: \" + \" \".join(options))\n return string\n\n return _parse", "def opts_load(opts):\n attr_words = []\n kv_words = []\n kv_exprs = {}\n for opt in opts:\n if isinstance(opt, basestring): # attr_word\n attr_words.append(opt)\n elif isinstance(opt, list):\n if len(opt) == 1: # attr_word\n attr_words.append(unicode(opt[0]))\n elif len(opt) == 2 and not opt[1]: # attr_word\n attr_words.append(unicode(opt[0]))\n elif (len(opt) == 2 and\n len(opt[0]) == 1 and\n unicode(opt[0]).isalpha() and\n unicode(opt[1]).isdigit()\n ): # kv_word\n kv_words.append(unicode(opt[0]) + unicode(opt[1]))\n else: # kv_expr\n kv_exprs[unicode(opt[0])] = \" \".join(opt[1:])\n return attr_words, kv_words, kv_exprs", "def _optionvarkey(name):\n return \"ragdoll%s\" % (name[0].upper() + name[1:])", "def parseOpts(self):\n\n for opt in self.opts:\n var, val = opt.split('=', 1)\n try:\n val = int(val)\n except ValueError:\n try:\n val = float(val)\n except ValueError:\n # just a string\n pass\n self[var] = val", "def getmulti(self, section, option, nested=False):\n data = self.get(section, option)\n if '\\n' not in data and self.read_keyval(data)[0] is None:\n # oneliner version\n return data.strip().split()\n\n # block version\n if not nested:\n return [element for element in data.strip().split('\\n')]\n\n def walk(data):\n \"\"\"docstring for walk\"\"\"\n response = []\n option_name = None\n option_value = None\n for element in data.split('\\n'):\n if element and element.startswith(' '):\n option_value.append(element)\n continue\n if option_name:\n response.append({option_name: walk(dedent('\\n'.join(option_value)))})\n option_name = None\n option_value = None\n\n n, v = self.read_keyval(element)\n if not n:\n response.append(element)\n option_name = None\n option_value = None\n continue\n elif v:\n response.append({n: v})\n option_name = None\n option_value = None\n continue\n option_name = n\n option_value = []\n\n if option_name:\n response.append({option_name: walk(dedent('\\n'.join(option_value)))})\n\n return response\n return walk(data)", "def encode_options(options: Dict[str, Union[str, float, int]]) -> List[str]:\n d = list()\n rev_dict = {v: k for k, v in type_mappings.items()}\n for k, v in options.items():\n t = type(v)\n if t not in rev_dict:\n raise OptionParsingError(f\"Unknown option type {t}.\")\n arg = f'{k}={v}={rev_dict[t]}'\n d.append(arg)\n return d", "def _parse_option_name(line):\n return line.split('=')[0].strip()", "def myst_options(options):\n num_options = len(options.keys())\n myst_options = []\n if num_options == 0:\n return myst_options\n elif num_options < 2: # TODO parameterise this in conf.py\n for option, option_val in options.items():\n myst_options.append(\":{}: {}\".format(option, option_val).rstrip())\n return myst_options\n else:\n myst_options.append(\"---\")\n for item in sorted(options.keys()):\n myst_options.append(\"{}: {}\".format(item, options[item]))\n myst_options.append(\"---\")\n return myst_options", "def options(self, section):\n try:\n return list(self._dict[section])\n except KeyError as e:\n raise NoSectionError(str(e)) from None", "def get_option_names(self):\n # There are no options until the current exploit is set\n if self.exploit is None:\n return []\n\n option_names = self.options.get_option_names()\n\n if self.input is not None:\n option_names += ['input.' + option for option in self.input.options.get_option_names()]\n\n if self.output is not None:\n option_names += ['output.' + option for option in self.output.options.get_option_names()]\n\n if self.exploit is not None:\n option_names += ['exploit.' + option for option in self.exploit.options.get_option_names()]\n\n return option_names", "def parse_option(group_name, option_name, value_str):\n group = get_group(group_name)\n if option_name in group.members:\n return group.members[option_name].parse(value_str)\n else:\n raise UnknownConfigOptionError(groupname + \".\" + \"option_name\")", "def _parse_qselect(qselect_output):\n jobs = qselect_output.splitlines()\n if not jobs or (len(jobs) == 1 and jobs[0] == ''):\n return []\n return [int(job.split('.')[0]) for job in jobs]", "def get_options(self):\r\n return self._option_values", "def _parse_qselect(qselect_output):\n jobs = qselect_output.splitlines()\n if not jobs or (len(jobs) == 1 and jobs[0] is ''):\n return []\n return [int(job.split('.')[0]) for job in jobs]", "def _parse(self, options):\n\n '''Start by considering all registered options, and validating them\n if they are in the incoming options dict'''\n self.results = {}\n wanted = self.wanted.copy()\n for opt in wanted.keys():\n if opt in options:\n self.results[opt] = self._access(wanted, opt, options[opt])\n\n '''As all registered options, in trac.ini, have composite names,\n consisting of a prefix and the option name separated by a dot,\n now find the starting list of prefixes to consider. Either use\n the value of incoming option of the name found in self.config,\n or use the fixed default prefix from self.prefix'''\n if self.config in options:\n parents = self._parents_to_list(options[self.config])\n del options[self.config]\n else:\n parents = [ self.prefix ]\n\n '''Look up these composite options'''\n if len(wanted) > 0:\n self._inherit(options, parents, wanted, {})\n\n '''Set all still unresolved registered options, to their defaults'''\n for opt in wanted.keys():\n self.results[opt] = (\n wanted[opt].default,\n self._is_default,\n wanted[opt]\n )\n\n '''Move over all UNregistered options as they were passed in.'''\n for opt in options.keys():\n if not opt in self.results:\n self.results[opt] = (\n options[opt],\n self._is_extra,\n None\n )", "def parse_options():\n\n parser = optparse.OptionParser(usage=USAGE, version=VERSION)\n\n parser.add_option(\"-f\", \"--file\",\n action=\"store\", default=Utils.getConfig(\"defaultFile\"), dest=\"file\",\n help=\"Read the site name from external file\")\n\n parser.add_option(\"-s\", \"--site-name\",\n action=\"store\", default=\"\", dest=\"sitename\",\n help=\"Get links for specified url only\")\n\n opts, args = parser.parse_args()\n\n return opts, args", "def parse_cmdline(cmdline_args):\n option_parser = optparse.OptionParser(usage='usage: %prog [options] <pattern>',\n description='ts short for TextSearch, Grep like tool', prog='ts', add_help_option=False) # -h is a real option\n\n option_parser.add_option('-h', '--help', action='store_true', dest='help', help='Display this information')\n option_parser.add_option('-e', '--extension', action='store', dest='extension', type='string', default=None,\n help='file extension')\n\n group_searching = optparse.OptionGroup(option_parser, 'Regexp selection and interpretation')\n group_searching.add_option('-i', '--ignore-case', action='store_true', dest='ignore_case', default=False,\n help='Ignore case distinctions in the pattern')\n group_searching.add_option('-w', '--word-regexp', action='store_true', dest='word_regexp', default=False,\n help='Force the pattern to match only whole words')\n group_searching.add_option('-l', '--literal', action='store_true', dest='literal', default=False,\n help='Quote all metacharacters; the pattern is literal')\n option_parser.add_option_group(group_searching)\n\n group_miscellaneous = optparse.OptionGroup(option_parser, 'Miscellaneous')\n group_miscellaneous.add_option('--path-only', action='store_true', dest='path_only', default=False,\n help='only print out the matching file')\n group_miscellaneous.add_option('-v', '--invert-match', action='store_true', dest='invert_match', default=False,\n help='Invert the sense of matching, to select non-matching lines.')\n option_parser.add_option_group(group_miscellaneous)\n\n group_output = optparse.OptionGroup(option_parser, 'Output control')\n group_output.add_option('-c', '--count', action='store_true', dest='count', default=False,\n help='Suppress normal output; instead print a count of matching lines for each input file.')\n group_output.add_option('-o', '--only-matching', action='store_true', dest='only_matching', default=False,\n help='Print only the matched (non-empty) parts of a matching line, with each such part on '\n 'a separate output line.')\n option_parser.add_option_group(group_output)\n\n options, args = option_parser.parse_args(cmdline_args)\n return options, args, option_parser", "def options(self):\n if self._ast:\n for option in self._ast[1]:\n yield option", "def getList(self,section,option,sep=\";\"):\n value=ConfigParser.SafeConfigParser.get(self,section,option)\n value=value.strip('\"')\n vallist=value.split(sep)\n return vallist", "def getOptions(self, propertyListName: unicode) -> ghidra.framework.options.Options:\n ...", "def _parser_options():\n #We have two options: get some of the details from the config file,\n import argparse\n from pydft import base\n pdescr = \"Numerical DFT code.\"\n parser = argparse.ArgumentParser(parents=[base.bparser], description=pdescr)\n for arg, options in script_options.items():\n parser.add_argument(arg, **options)\n \n args = base.exhandler(examples, parser)\n if args is None:\n return\n\n return args # pragma: no cover", "def find_opts_linux(soup, header):\n\n # Get the source line of the header\n header_el = soup.find(id=header)\n if header_el is None:\n return set()\n header_source_line = soup.find(id=header).sourceline\n\n # Get the element where the options are described\n opts_el = [pre for pre in soup.find_all('pre') if pre.sourceline == header_source_line][0]\n\n opts_lines = opts_el.text.split('\\n')\n opts_lines = [line.lstrip().split(maxsplit=1)[0] for line in opts_lines if line]\n opts = [line for line in opts_lines if line[0] == '-' and line != '-']\n\n # Remove false positives\n opts = {o for o in opts if not o[-1] in NON_OPTS_CHARS}\n\n return opts", "def parse_options():\n\n parser = optparse.OptionParser()\n\n parser.add_option(\"-q\", \"--quiet\",\n action=\"store_true\", default=False, dest=\"quiet\",\n help=\"Enable quiet mode\")\n\n parser.add_option(\"-l\", \"--links\",\n action=\"store_true\", default=False, dest=\"links\",\n help=\"Get links for specified url only\")\n\n parser.add_option(\"-d\", \"--depth\",\n action=\"store\", type=\"int\", default=1, dest=\"depth\",\n help=\"Maximum depth to traverse\")\n\n parser.add_option(\"-v\", \"--verbose\",\n action=\"store_true\", default=False, dest=\"verbose\",\n help=\"Enable verbose mode\")\n\n opts, args = parser.parse_args()\n\n if len(args) < 1:\n parser.print_help()\n raise SystemExit, 1\n\n return opts, args", "def find_option(self, name, namespace=...):\n ...", "def list_opts():\n return [('ironic_lib', utils_opts)]", "def read_array_option(config, section):\n if not config.has_section(section):\n return\n\n return [item for item, _ in config.items(section)]", "def svn_diff_file_options_parse(*args):\n return _diff.svn_diff_file_options_parse(*args)", "def processCmdlineOpts(cmdOpts):\n global opts\n opts = {}\n for i in range(1,len(cmdOpts)):\n if re.match('-i', cmdOpts[i]):\n opts['i'] = cmdOpts[i+1]\n if i not in opts: \n opts['i']='awn.xml'\n return opts", "def parseOptions():\n\n strict = False\n lists = []\n addressbooks = []\n folders = []\n exclusions = []\n\n opts, args = getopt.getopt(sys.argv[1:], \"sl:a:f:e:\", [ \"strict\", \"hand-list\", \"addressbook\", \"folder\", \"exclusions\" ])\n if len(args) != 1 or len(opts) < 1: \n raise getopt.GetoptError(\"Invalid arguments.\")\n\n for opt in opts:\n if opt[0] == \"-s\" or opt[0] == \"--strict\":\n strict = True\n if opt[0] == '-l' or opt[0] == '--hand-list':\n lists.append(opt[1])\n if opt[0] == '-a' or opt[0] == '--addressbook':\n addressbooks.append(opt[1])\n if opt[0] == '-f' or opt[0] == '--folder':\n folders.append(opt[1])\n if opt[0] == '-e' or opt[0] == '--exclusions':\n exclusions.append(opt[1])\n\n return(args[0], strict, lists, addressbooks, folders, exclusions)", "def _find_options(self, inputfield):\r\n elements = inputfield.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def vararg_callback(option, opt_str, value, parser):\n\tassert value is None\n\tvalue = []\n\n\tdef floatable(str):\n\t\ttry:\n\t\t\tfloat(str)\n\t\t\treturn True\n\t\texcept ValueError:\n\t\t\treturn False\n\n\tfor arg in parser.rargs:\n\t\t# Stop on options like --foo \n\t\tif arg[:2] == \"--\" and len(arg) > 2:\n\t\t\tbreak\n\t\t# Stop on -a, but not on negative numbers\n\t\tif arg[:1] == \"-\" and len(arg) > 1 and not floatable(arg):\n\t\t\tbreak\n\t\tvalue.append(arg)\n\n\tdel parser.rargs[:len(value)]\n\tsetattr(parser.values, option.dest, value)", "def split_package_spec(package_spec):\n match = re.match('^(.*?)\\((.*)\\)$', package_spec)\n if match:\n package_name = match.group(1)\n package_options = match.group(2).split(',')\n else:\n package_name = package_spec\n package_options = []\n return package_name, package_options", "def parse_args(self, w, pos, parsing_state=None):\n\n from .. import latexwalker\n\n if parsing_state is None:\n parsing_state = w.make_parsing_state()\n\n argnlist = []\n\n if self.args_math_mode is not None and \\\n len(self.args_math_mode) != len(self.argspec):\n raise ValueError(\"Invalid args_math_mode={!r} for argspec={!r}!\"\n .format(self.args_math_mode, self.argspec))\n\n def get_inner_parsing_state(j):\n if self.args_math_mode is None:\n return parsing_state\n amm = self.args_math_mode[j]\n if amm is None or amm == parsing_state.in_math_mode:\n return parsing_state\n if amm == True:\n return parsing_state.sub_context(in_math_mode=True)\n return parsing_state.sub_context(in_math_mode=False)\n\n p = pos\n\n if self._like_pylatexenc1x_ignore_leading_star:\n # ignore any leading '*' character\n tok = w.get_token(p)\n if tok.tok == 'char' and tok.arg == '*':\n p = tok.pos + tok.len\n\n for j, argt in enumerate(self.argspec):\n if argt == '{':\n (node, np, nl) = w.get_latex_expression(\n p,\n strict_braces=False,\n parsing_state=get_inner_parsing_state(j)\n )\n p = np + nl\n argnlist.append(node)\n\n elif argt == '[':\n\n if self.optional_arg_no_space and w.s[p].isspace():\n # don't try to read optional arg, we don't allow space\n argnlist.append(None)\n continue\n\n optarginfotuple = w.get_latex_maybe_optional_arg(\n p,\n parsing_state=get_inner_parsing_state(j)\n )\n if optarginfotuple is None:\n argnlist.append(None)\n continue\n (node, np, nl) = optarginfotuple\n p = np + nl\n argnlist.append(node)\n\n elif argt == '*':\n # possible star.\n tok = w.get_token(p)\n if tok.tok == 'char' and tok.arg.startswith('*'):\n # has star\n argnlist.append(\n w.make_node(latexwalker.LatexCharsNode,\n parsing_state=get_inner_parsing_state(j),\n chars='*', pos=tok.pos, len=1)\n )\n p = tok.pos + 1\n else:\n argnlist.append(None)\n\n else:\n raise LatexWalkerError(\n \"Unknown macro argument kind for macro: {!r}\".format(argt)\n )\n\n parsed = ParsedMacroArgs(\n argspec=self.argspec,\n argnlist=argnlist,\n )\n\n return (parsed, pos, p-pos)", "def _getoptions():\n parser = OptionParser()\n parser.add_option(\"-f\", \"--dwca_file\", dest=\"dwca_file\",\n help=\"Darwin Core Archive file\",\n default=None)\n return parser.parse_args()[0]", "def parse_params(params):\n def isoption(x):\n return x.startswith('-')\n solo_flags = []\n arg_flags = dict()\n i = 0\n while i < len(params):\n if not isoption(params[i]):\n raise ValueError('\"' + params[i] + '\" does not look like an option.')\n if i == len(params) - 1 or isoption(params[i+1]):\n solo_flags.append(params[i])\n i += 1\n continue\n else:\n arg_flags[params[i]] = process_arg(params[i+1])\n i += 2\n continue\n return solo_flags, arg_flags", "def parse_options():\n\n parser = optparse.OptionParser(description='PySpark WordCount.')\n parser.add_option('-i', '--input', action='store', nargs=1,\n default='s3://dimajix-training/data/alice/',\n help='Input file or directory')\n parser.add_option('-o', '--output', action='store', nargs=1,\n default='alice-counts',\n help='Output file or directory')\n\n (opts, args) = parser.parse_args()\n\n return opts", "def get_options_from_file(path):\n with open(path) as f:\n content = f.read()\n keys = re.findall(r\"%(.+):\", content)\n values = re.findall(r\":\\s*([\\w\\W]+?)\\s*(?:%|$)\", content)\n\n options = dict(zip(keys, values))\n return options", "def getlist(self, option, sep=',', chars=None):\n return [chunk.strip(chars) for chunk in option.split(sep)]", "def parse_kw_args(tagname, bits, args_spec=None, restrict=False):\n\n args = []\n\n if restrict:\n if args_spec is None:\n raise ValueError(\"you must pass an args_spec dict if you want to restrict allowed args\")\n allowed = list(args_spec.keys())\n do_validate = True\n else:\n do_validate = args_spec is not None\n\n for bit in bits:\n try:\n name, val = bit.split('=')\n except ValueError:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must have 'key=value' form (got : '%s')\" \\\n % (tagname, bit)\n )\n\n name = str(name)\n if do_validate:\n if restrict:\n if name in allowed:\n # we only want each name once\n del allowed[allowed.index(name)]\n else:\n raise template.TemplateSyntaxError(\n \"keyword arguments to '%s' tag must be one of % (got : '%s')\" \\\n % (tagname, \",\".join(allowed), name)\n )\n\n validate = args_spec[name]\n else:\n validate = args_spec.get(name, None)\n\n if validate is not None:\n if callable(validate):\n try:\n val = validate(val)\n except Exception, e:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (%s)\" \\\n % (tagname, name, val, e)\n )\n else:\n # assume re\n if re.match(validate, val) is None:\n raise template.TemplateSyntaxError(\n \"invalid optional argument '%s' for '%s' tag: '%s' (doesn't match '%s')\" \\\n % (tagname, name, val, validate)\n )\n\n # should be ok if we managed to get here \n args.append((name, val))\n\n return args", "def parseCmdLine(cmdLine):\n files=[]\n modifiers=[]\n for i in range(len(cmdLine)):\n arg = cmdLine[i]\n if arg[:2] != '--':\n files = cmdLine[i:]\n return (modifiers, files)\n \n arg = arg[2:]\n parts = arg.split('=',1)\n modifiers.append((parts[0], parts[1]))\n return (modifiers, files)", "def parse_options_plotRates(parser):\n parsers = parser.add_subparsers()\n sub_parser = parsers.add_parser(\"plotRates\")\n sub_parser.add_argument(\"-i\", \"--interactive\", dest=\"interactive\", action='store_false', help=\"Draw plots on screen.\")\n sub_parser.add_argument(\"-n\", \"--nevents\", dest=\"nevents\", default=1, type=int, help=\"Total nmumber of events\")\n sub_parser.add_argument(\"-b\", \"--bunches\", dest=\"bunches\", default=0, type=int, help=\"Number of colliding bunches\")\n sub_parser.add_argument(\"--pu\", dest=\"pu\", default=20, type=int, help=\"Average PU. default=20\")\n sub_parser.add_argument(\"--xsect\", dest=\"xsect\", default=80, type=float, help=\"Total cross section in mb. default=80 mb\")\n sub_parser.add_argument(\"--instlumi\", dest=\"instlumi\", default=1.2e34, type=float, help=\"Instantaneous luminosity. default=1.2e-34 cm-2s-1\")\n sub_parser.add_argument(\"--scale\", dest=\"scale\", default=1., type=float, help=\"Additional scale factor for rate calculate\")\n sub_parser.add_argument(\"-l\", \"--legacy\", dest=\"legacy\", action='store_true', help=\"Draw plots relative to legacy.\")\n\n opts, unknown = parser.parse_known_args()\n return opts", "def parseArgs(args):\n parsed = []\n for arg in args:\n print arg\n arg = arg.strip()\n interpretation = None\n try:\n interpretation = float(arg)\n if string.find(arg, \".\") == -1:\n interpretation = int(interpretation)\n except:\n # Oh - it was a string.\n interpretation = arg\n pass\n parsed.append(interpretation)\n return parsed", "def parse_option():\n parser = argparse.ArgumentParser(\"zdm H0 I Figures\")\n parser.add_argument(\n \"figure\",\n type=str,\n help=\"function to execute: ('fiducial, 'varyH0', 'H0vsEmax')\",\n )\n # parser.add_argument('--cmap', type=str, help=\"Color map\")\n # parser.add_argument('--distr', type=str, default='normal',\n # help='Distribution to fit [normal, lognorm]')\n args = parser.parse_args()\n\n return args", "def parseOpts():\n global dir_source\n global fileList\n global suffix\n global begin\n global name\n \n shouldExit = False\n \n # check options. If options is None, exit. \n for o, a in opts:\n if o in (\"-h\", \"--help\"): # get help\n getHelp()\n shouldExit = True\n elif o in (\"-v\", \"--version\"): # show version\n showVersion()\n shouldExit = True\n elif o in (\"-s\", \"--suffix\"): # set suffix\n suffix = a\n elif o in (\"-b\", \"--begin\"): # set begin\n begin = int(a)\n elif o in (\"-n\", \"--name\"): # specify a name\n name = a\n \n if shouldExit:\n sys.exit()\n \n # get dir_source\n if args is None or len(args) == 0:\n print \"SRT:no source dictionary.\"\n sys.exit()\n dir_source = args[0]\n try:\n fileList = os.listdir(dir_source)\n fileList.sort()\n except:\n print \"SRT:wrong path\"\n sys.exit()\n else:\n renameFiles()", "def cli_options():\n\n parser = argparse.ArgumentParser(\n description='c[apirca]grep',\n formatter_class=argparse.RawTextHelpFormatter\n )\n\n parser.add_argument('-d', '--def', dest='defs',\n help='Network Definitions directory location. \\n',\n default='./def')\n\n # -i and -t can be used together, but not with any other option.\n ip_group = parser.add_argument_group()\n # take 1 or more IPs\n ip_group.add_argument('-i', '--ip', dest='ip', nargs='+', type=is_valid_ip,\n help='Return list of definitions containing the '\n 'IP(s).\\nMultiple IPs permitted.')\n\n ip_group.add_argument('-t', '--token', dest='token',\n help=('See if an IP is contained within the given '\n 'token.\\nMust be used in conjunction with '\n '-i/--ip [addr].'))\n\n exclusive_group = parser.add_mutually_exclusive_group()\n # the rest of the arguments are mutually exclusive with each other,\n # and -i / -t\n exclusive_group.add_argument('-c', '--cmp', dest='cmp', nargs=2,\n metavar=('OBJ', 'OBJ'),\n help=('Compare the two given network '\n 'definition tokens'))\n\n exclusive_group.add_argument('-g', '--gmp', dest='gmp', nargs=2,\n type=is_valid_ip, metavar=('IP', 'IP'),\n help=('Diff the network objects to'\n ' which the given IP(s) belong'))\n\n exclusive_group.add_argument('-o', '--obj', dest='obj', nargs='+',\n help=('Return list of IP(s) contained within '\n 'the given token(s)'))\n\n exclusive_group.add_argument('-s', '--svc', dest='svc', nargs='+',\n help=('Return list of port(s) contained '\n 'within given token(s)'))\n\n exclusive_group.add_argument('-p', '--port', dest='port', nargs=2,\n metavar=('PORT', 'PROTO'),\n help=('Returns a list of tokens containing '\n 'the given port and protocol'))\n\n return parser", "def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]", "def get_tag_options(label_matches):\r\n\ttag_options = []\r\n\tfor key in label_matches.keys():\r\n\t\tif key[1] not in tag_options:\r\n\t\t\ttag_options.append(key[1])\r\n\treturn tag_options", "def _ParseFilterOptions(self, options):\n names = [u'date_filters', u'filter_file']\n helpers_manager.ArgumentHelperManager.ParseOptions(\n options, self, names=names)\n\n extensions_string = self.ParseStringOption(options, u'extensions_string')\n self._ParseExtensionsString(extensions_string)\n\n names_string = getattr(options, u'names_string', None)\n self._ParseNamesString(names_string)\n\n signature_identifiers = getattr(options, u'signature_identifiers', None)\n try:\n self._ParseSignatureIdentifiers(\n self._data_location, signature_identifiers)\n except (IOError, ValueError) as exception:\n raise errors.BadConfigOption(exception)\n\n if self._filter_file:\n self.has_filters = True\n else:\n self.has_filters = self._filter_collection.HasFilters()", "def parse_options_header(value):\n def _tokenize(string):\n for match in _option_header_piece_re.finditer(string):\n key, value = match.groups()\n key = unquote_header_value(key)\n if value is not None:\n value = unquote_header_value(value)\n yield key, value\n\n if not value:\n return '', {}\n\n parts = _tokenize(';' + value)\n name = next(parts)[0]\n extra = dict(parts)\n return name, extra", "def parse_options():\n global parser\n parser.add_option(\"-r\", \"--regions\", dest=\"input_brain_regions\",\n help=\"Input file for brain region data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-g\", \"--gray\", dest=\"input_gray_levels\",\n help=\"Input file for gray level data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-n\", \"--nissl\", dest=\"input_nissl\",\n help=\"Input file for nissl data\",\n action=\"store\", type='string')\n\n parser.add_option(\"-o\", \"--output\", dest=\"output_folder_path\",\n help=\"Output folder for extracted data files\",\n action=\"store\", type='string')\n\n return parser.parse_args()", "def _ParseSendChangeOptions(bot_spec, options):\n values = [\n ('user', options.user),\n ('name', options.name),\n ]\n # A list of options to copy.\n optional_values = (\n 'email',\n 'revision',\n 'root',\n 'patchlevel',\n 'issue',\n 'patchset',\n 'target',\n 'project',\n )\n for option_name in optional_values:\n value = getattr(options, option_name)\n if value:\n values.append((option_name, value))\n\n # Not putting clobber to optional_names\n # because it used to have lower-case 'true'.\n if options.clobber:\n values.append(('clobber', 'true'))\n\n for bot, tests in bot_spec:\n values.append(('bot', ('%s:%s' % (bot, ','.join(tests)))))\n\n return values", "def user_input_choices(self, msg, *options):\n choices = ['%s %s' % (self.prefix, msg)]\n choices += [\n \"%s. %s\" % (num, opt) for num, opt in enumerate(options, 1)]\n try:\n input_str = int(\n vim.eval('inputlist(%s)' % self.prepare_value(choices)))\n except (KeyboardInterrupt, ValueError):\n input_str = 0\n\n if not input_str:\n self.message('Cancelled!')\n return False\n\n try:\n return options[input_str - 1]\n except (IndexError, ValueError):\n self.error('Invalid option: %s' % input_str)\n return self.user_input_choices(msg, *options)", "def _parse_arg_list(self):\n\t\targ_list = {}\n\t\tfor arg in getopt.getopt(sys.argv[1:], 'c:r:j:d')[0]:\n\t\t\targ_list[arg[0][1:]] = arg[1]\n\t\n\t\treturn arg_list", "def _find_options(self):\r\n elements = self.xml.findall('./options/option')\r\n return [{\r\n 'id': index,\r\n 'description': option.text,\r\n 'choice': option.get('choice')\r\n } for (index, option) in enumerate(elements)]", "def split_option(option, length):\n length = list(length)\n args = option.split(',')\n if len(args) not in length:\n sys.stderr.write('mpl-graph: Argument expected length {}. '\n 'Actual length of \"{}\" is {}\\n'.format(length, option, len(args)))\n sys.exit(ERR_NUM_OPTIONS)\n return args", "def parse_options() -> Namespace:\n\n opt_parser = OptionParser(\n \"liftoff\",\n [\n \"script\",\n \"config_path\",\n \"procs_no\",\n \"gpus\",\n \"per_gpu\",\n \"no_detach\",\n \"verbose\",\n \"copy_to_clipboard\",\n \"time_limit\", # This should be removed in favour of start_by\n \"start_by\",\n \"end_by\",\n \"optimize\",\n \"args\",\n \"filters\",\n \"results_path\",\n \"name\",\n \"max_runs\",\n \"shuffle\",\n ],\n )\n return opt_parser.parse_args()", "def options(self, section, *args):\n cnt = self._check_args('options', 2, 3, args)\n try:\n return ConfigParser.RawConfigParser.options(self, section)\n except ConfigParser.NoSectionError:\n if cnt == 1:\n return args[0]\n raise", "def get_options(defaults, usage, description='',epilog=''):\n parser=OptionParser(usage=usage,description=description,epilog=epilog)\n parser.add_option(\"-i\",\"--infile\",action=\"store\",dest=\"infile\",type=\"string\",\n default=defaults.get('infiles'),\n help='Name of input file of contigs, in .fasta')\n parser.add_option(\"-k\",\"--kmers\",action=\"store\",dest=\"kmers\",type=\"string\",\n default=defaults.get('kmers'),\n help='Sizes of k-mers to use as features, comma separated list')\n (options,args)=parser.parse_args()\n\n return (options, args)" ]
[ "0.5681802", "0.5620391", "0.5565046", "0.541359", "0.53751975", "0.5340231", "0.5283279", "0.5276728", "0.52554685", "0.5251639", "0.51984483", "0.5196909", "0.519201", "0.5174309", "0.5122386", "0.51024044", "0.51020473", "0.5095427", "0.5034037", "0.5030856", "0.50224656", "0.50154805", "0.4999125", "0.4985558", "0.49723536", "0.49649593", "0.49568874", "0.49501932", "0.49457142", "0.49314326", "0.4909702", "0.49089214", "0.4899176", "0.48945922", "0.48856264", "0.48747867", "0.48660278", "0.48649848", "0.48600608", "0.48480764", "0.4844047", "0.48378232", "0.48315924", "0.48313382", "0.48311242", "0.4830954", "0.48240346", "0.48235592", "0.4821683", "0.48210287", "0.48099124", "0.4808087", "0.4804755", "0.47982782", "0.47952265", "0.47937483", "0.47907114", "0.47824603", "0.4775003", "0.4769538", "0.47612673", "0.47603697", "0.4759387", "0.47589877", "0.4757818", "0.47543478", "0.47529587", "0.47526434", "0.4747803", "0.4744013", "0.47419336", "0.4735502", "0.47326082", "0.4730961", "0.4726055", "0.47010168", "0.46821746", "0.46771702", "0.4671631", "0.46689928", "0.46685436", "0.46616855", "0.46570438", "0.46568704", "0.46459672", "0.46450046", "0.46309984", "0.46299446", "0.46276668", "0.4624354", "0.46225905", "0.46185157", "0.46089193", "0.460565", "0.46038994", "0.4596164", "0.45940652", "0.45884493", "0.45779943", "0.45770994" ]
0.5834238
0
Replacement function for indirect variable substitution. This is intended for use with re.subn().
def _repl(self, m): section, option = m.group(1, 2) if section == "ENV": return os.getenv(option, "") else: return self.cfg.get(section, option)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def do_variable_substitution(item):\n if isinstance(item, str):\n try:\n item = re_keyref.sub(getdata, item)\n except KeyError, err:\n print >> sys.stderr, (\n \"Use of undefined key in variable substitution: %s\"\n % err)\n elif isinstance(item, list):\n for index in range(len(item)):\n item[index] = do_variable_substitution(item[index])\n elif isinstance(item, dict):\n for key, value in item.iteritems():\n item[key] = do_variable_substitution(value)\n return item", "def replacer(match: 'Match[str]') -> str:\n has_inv, varname = match.groups()\n try:\n res = fixup[varname.casefold()].value\n except KeyError:\n if default is None:\n raise KeyError(f'${varname} not found, known: {[\"$\"+var.var for var in fixup.values()]}') from None\n res = default\n if has_inv is not None:\n if allow_invert:\n try:\n res = '0' if srctools.BOOL_LOOKUP[res.casefold()] else '1'\n except KeyError:\n # If not bool, keep existing value.\n pass\n else:\n # Re-add the !, as if we didn't match it.\n res = '!' + res\n return res", "def replace_with(*, replacement, f=DECORATED):\n return replacement", "def replace_variables(text, vars=zen_settings['variables']):\n\treturn re.sub(r'\\$\\{([\\w\\-]+)\\}', lambda m: m.group(1) in vars and vars[m.group(1)] or m.group(0), text)", "def replace_variable(self, lexeme, variable_value):\r\n\r\n lexeme.replace(\r\n self.variable_lexeme_replace_type,\r\n variable_value\r\n )", "def _string_subst_partial(self, val):\n def repl(m):\n k = m.group('id')\n replacement = self.bib_database.strings[k.lower()] if k.lower() in self.bib_database.strings else k\n pre = '\"' if m.group('pre') != '\"' else ''\n post = '\"' if m.group('post') != '\"' else ''\n return pre + replacement + post\n\n logger.debug('Substitute string definitions inside larger expressions')\n if '#' not in val:\n return val\n\n # TODO?: Does not match two subsequent variables or strings, such as \"start\" # foo # bar # \"end\" or \"start\" # \"end\".\n # TODO: Does not support braces instead of quotes, e.g.: {start} # foo # {bar}\n # TODO: Does not support strings like: \"te#s#t\"\n return self.replace_all_re.sub(repl, val)", "def __Subst(self, m, s, l):\n if s is None:\n s = ''\n #if type(s) is types.LongType:\n #1.5.2: s = str(s)[:-1]\n return self.regexp.Subst(l, DTL.TemplateRegExp.macros[m], str(s))", "def do_subs(self, e):\n for expr, var in self.items():\n e = e.xreplace({var: expr})\n return e", "def replaceVariables(template, virtroot, treename):\n if not template.startswith('${') and not template.endswith('}'): raise SyntaxError('Template format unknown.')\n template = template[2:-1]\n template = template.replace('VIRTROOT', virtroot)\n template = template.replace('TREENAME', treename)\n\n # deal with concurrent / issues by having os.path.join build the final path\n return os.path.join('/', *template.split('/'))", "def replace_with(*, replacement='hello', f=DECORATED):\n return replacement", "def FillForm(string_for_substitution, dictionary_of_vars):\n return_string = string_for_substitution\n for i in re.findall(\"//%%(.*)%%//\", string_for_substitution):\n return_string = re.sub(\"//%%\" + i + \"%%//\", dictionary_of_vars[i],\n return_string)\n return return_string", "def substitute(x, c_name):\n if params.substitution[c_name].get(x) is not None:\n return params.substitution[c_name][x]\n else:\n return x", "def _substitute(self, mapping: VariableMapping) -> 'Substitution':\n return Substitution(\n # Create a new combined mapping. Later mappings override earlier\n # ones.\n mapping={\n **mapping,\n **{\n variable: term._substitute(mapping)\n for (variable, term) in self.mapping.items()\n }\n }\n )", "def subst(s, x):\n if isinstance(x, list):\n return [subst(s, xi) for xi in x]\n elif isinstance(x, tuple):\n return tuple([subst(s, xi) for xi in x])\n elif not isinstance(x, Expr):\n return x\n elif is_var_symbol(x.op):\n return s.get(x, x)\n else:\n return Expr(x.op, *[subst(s, arg) for arg in x.args])", "def subst_vars(s, local_vars):\n check_environ()\n\n def _subst(match, local_vars=local_vars):\n var_name = match.group(1)\n if var_name in local_vars:\n return str(local_vars[var_name])\n else:\n return os.environ[var_name]\n\n try:\n return re.sub(r'\\$([a-zA-Z_][a-zA-Z_0-9]*)', _subst, s)\n except KeyError, var:\n raise ValueError(\"invalid variable '$%s'\" % var)", "def substitute(\n self,\n text: str,\n default: Optional[str] = None,\n *,\n allow_invert: bool = False,\n ) -> str:\n if '$' not in text: # Early out, cannot substitute.\n return text\n\n # Cache the pattern used, we can reuse it whenever called again without adding new variables.\n if self._matcher is None:\n # Sort longer values first, so they are checked before smaller\n # counterparts.\n sections: Iterable[str] = map(re.escape, sorted(self._fixup.keys(), key=len, reverse=True))\n # ! maybe, $, any known fixups, then a default any-identifier check.\n self._matcher = re.compile(\n rf'(!)?\\$({\"|\".join(sections)}|[a-z_][a-z0-9_]*)',\n re.IGNORECASE,\n )\n\n fixup = self._fixup # Avoid making self a cell var.\n\n def replacer(match: 'Match[str]') -> str:\n \"\"\"Handles the replacement semantics.\"\"\"\n has_inv, varname = match.groups()\n try:\n res = fixup[varname.casefold()].value\n except KeyError:\n if default is None:\n raise KeyError(f'${varname} not found, known: {[\"$\"+var.var for var in fixup.values()]}') from None\n res = default\n if has_inv is not None:\n if allow_invert:\n try:\n res = '0' if srctools.BOOL_LOOKUP[res.casefold()] else '1'\n except KeyError:\n # If not bool, keep existing value.\n pass\n else:\n # Re-add the !, as if we didn't match it.\n res = '!' + res\n return res\n\n return self._matcher.sub(replacer, text)", "def _replace_variables(self, string):\n\n conversions = {\"VISUAL\": \"TM_SELECTED_TEXT\"}\n for old, new in conversions.items():\n string = string.replace(old, new)\n return string", "def subst_vars(target, source, d):\n var = re.compile('@([a-zA-Z_]+)@')\n with open(source, 'r') as fs:\n with open(target, 'w') as ft:\n for l in fs:\n m = var.search(l)\n if m:\n ft.write(l.replace('@%s@' % m.group(1), d[m.group(1)]))\n else:\n ft.write(l)", "def replace(\n haystack : Exp, haystack_context : Context, haystack_pool : Pool,\n needle : Exp, needle_context : Context, needle_pool : Pool,\n replacement : Exp) -> Exp:\n return _Replacer(haystack_context, haystack_pool, needle, needle_context, needle_pool, replacement).visit(haystack)", "def replaceVariable(theString,oldName,newName):\n return newName.join(re.split('(?<![\\w_])'+oldName+'(?![\\w_])',\n theString))", "def lreplace(pattern, sub, string):\n return re.sub('^%s' % pattern, sub, string)", "def substitution_func_gen(self, variables, code):\n \n #print(self.rule.name, self.external_vars)\n ext, rest = separate(variables, lambda v: v in self.external_vars.keys())\n \n substitution_dict = dict()\n substitution_dict.update( { e : self.external_vars[e] for e in ext } )\n substitution_dict.update( { r : p(r) for r in rest } )\n \n new_format_string = code.format(**substitution_dict)\n \n return ( set(rest), lambda vd = { r : r for r in rest }: new_format_string.format(**vd) )", "def substitute(self, args, lvars):\n if is_String(args) and not isinstance(args, CmdStringHolder):\n args = str(args) # In case it's a UserString.\n try:\n def sub_match(match):\n return self.conv(self.expand(match.group(1), lvars))\n result = _dollar_exps.sub(sub_match, args)\n except TypeError:\n # If the internal conversion routine doesn't return\n # strings (it could be overridden to return Nodes, for\n # example), then the 1.5.2 re module will throw this\n # exception. Back off to a slower, general-purpose\n # algorithm that works for all data types.\n args = _separate_args.findall(args)\n result = []\n for a in args:\n result.append(self.conv(self.expand(a, lvars)))\n if len(result) == 1:\n result = result[0]\n else:\n result = ''.join(map(str, result))\n return result\n else:\n return self.expand(args, lvars)", "def fn_sub(self, value):\n\n if isinstance(value, list):\n value, variables = value\n else:\n # only template parameter names, resource logical IDs, and resource attributes, will be parsed\n value, variables = value, {}\n\n for name, target in variables.items():\n value = value.replace('${{{}}}'.format(name), target)\n\n return Functions.SUB_VARIABLE_PATTERN.sub(self._sub_variable, value)", "def apply_variable_substitutions_and_merge(vardefs, extra_variables = {}):\n vardefs = dict(vardefs)\n\n deps = {}\n for var, vardef in vardefs.iteritems():\n if identifier_pattern.match(var):\n deps[var] = variables_referenced(vardef)\n\n #restrict the graph to variables we actually contain definitions for\n for var in deps:\n deps[var] = set([v for v in deps[var] if v in deps])\n\n #find all the places a variable is used\n uses = {}\n for var in deps:\n uses[var] = set()\n for var in deps:\n for var_used in deps[var]:\n uses[var_used].add(var)\n\n #place variables in levels\n levels = compute_graph_levels(deps, uses)\n\n for level, var_list in sorted(levels.items()):\n for var in var_list:\n for use in uses[var]:\n vardefs[use] = substitute_variables(vardefs[use], {var: vardefs[var]})\n\n for var in vardefs:\n vardefs[var] = substitute_variables(vardefs[var], extra_variables)\n\n for ev, ev_def in extra_variables.items():\n assert ev not in vardefs or vardefs[ev] == ev_def, \"Can't define %s twice!\" % ev\n vardefs[ev] = ev_def\n\n return vardefs", "def replace(text,pattern,replace=\"\"):\n\n thisFunc = inspect.currentframe().f_code.co_name\n result = re.sub(pattern,replace,text)\n return result", "def envsubst(string):\n # handle simple un-bracketed env vars like $FOO\n a = _simple_re.sub(_repl_simple_env_var, string)\n\n # handle bracketed env vars with optional default specification\n b = _extended_re.sub(_repl_extended_env_var, a)\n return b", "def substitute(self):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in self.bindings:\n term[i] = self.bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))", "def variableSubstitution(d):\n variable = re.compile(r\"^(.*)\\$\\{(.*)\\}(.*)\")\n\n # translate the dictionary to lower-case keys:\n dd = {k.lower():v for k,v in d.iteritems()}\n maxIterations=4\n \n for i in range(maxIterations):\n anyChanges=False\n for k,v in dd.iteritems():\n if not isinstance(v,str):\n # Only operate on string-valued entries\n continue\n m = variable.match(v)\n if not m:\n continue\n anyChanges = True\n vout = str(v)\n while m:\n key = m.group(2).lower()\n if key not in dd.keys():\n print \"ERROR: variable substitution asks for nonexistent Attribute\", key, \"in\", v\n sys.exit(1)\n if key==k:\n print \"ERROR: self-reference to Attribute\", key, \"in\", v\n vv = dd[key]\n if not isinstance(vv,str):\n print \"ERROR: variable substitution using non-string-valued Attribute\",key\n sys.exit(1)\n vout = m.expand(r\"\\g<1>\"+vv+r\"\\g<3>\")\n m = variable.match(vout)\n dd[k] = vout\n if not anyChanges:\n break # Done\n if i==maxIterations:\n print \"ERROR: Too many iterations in variableSubstitution\"\n sys.exit(1)\n # restore case of original dictionary\n for k in d.keys():\n d[k] = dd[k.lower()]\n return", "def replaces(func: Callable[..., Tuple[str]], name: str):\n Replacements._rep[name] = func\n return func", "def replace_with_arg(self, src_arg, tgt_arg):", "def replaceVariables(self, text, args={}, argsOnly=False):\n\n # Prevent too big inclusions\n # if (len(text) > self.max_include_size:\n # return text\n\n # This function is called recursively. To keep track of arguments we need a stack:\n self.arg_stack.append(args)\n \n braceCallbacks = {}\n if not argsOnly:\n braceCallbacks[2] = [None, self.braceSubstitution]\n braceCallbacks[3] = [None, self.argSubstitution]\n \n callbacks = {\n u'{': {\n 'end': u'}',\n 'cb': braceCallbacks,\n 'min': argsOnly and 3 or 2,\n 'max': 3\n },\n u'[': {\n 'end': u']',\n 'cb': {2: None},\n 'min': 2,\n 'max': 2\n }\n }\n text = replace_callback(text, callbacks)\n mArgStack.pop()\n \n return text", "def set_var(self,variable,value):\n self.template=self.template.replace(\"@{}@\".format(variable),value)", "def _rewrite_wrt(self, var):\n if var == \"\":\n return \"\"\n for pred_rd, pred_wrt, inst in reversed(self.all_rw_list):\n if pred_wrt == \"\" or var == pred_wrt:\n continue\n # exact matching\n if var.find(pred_wrt) != -1:\n var = var.replace(pred_wrt, pred_rd)\n break\n return var", "def variable_closure(variables, casify):\r\n def render_variable(children):\r\n \"\"\"\r\n Replace greek letters, otherwise escape the variable names.\r\n \"\"\"\r\n varname = children[0].latex\r\n if casify(varname) not in variables:\r\n pass # TODO turn unknown variable red or give some kind of error\r\n\r\n first, _, second = varname.partition(\"_\")\r\n\r\n if second:\r\n # Then 'a_b' must become 'a_{b}'\r\n varname = ur\"{a}_{{{b}}}\".format(\r\n a=enrich_varname(first),\r\n b=enrich_varname(second)\r\n )\r\n else:\r\n varname = enrich_varname(varname)\r\n\r\n return LatexRendered(varname) # .replace(\"_\", r\"\\_\"))\r\n return render_variable", "def substitution(formula, old_subformula, new_subformula):\n pass\n # ======== YOUR CODE HERE ========", "def xreplace(self, rule):\n value, _ = self._xreplace(rule)\n return value", "def multi_replace(stringlike, pettern_to_replacement_dict):\n string = str(stringlike)\n for pattern, replacement in pettern_to_replacement_dict.items():\n string = string.replace(pattern, replacement)\n return string", "def _substitute(self, formula, subs):\n\n return subs.get(formula, formula)", "def mineval(expr, ctx):\n for k, v in ctx.items():\n if k in expr:\n expr = re.sub(k, str(v), expr)\n return evaluateRPN(expr)", "def expandvars(buffer, env, default=None, skip_escaped=False):\n\n def replace_var(match):\n return env.get(match.group(2) or match.group(1), match.group(0) if default is None else default)\n\n pattern = (r'(?<!\\\\)' if skip_escaped else '') + r'\\$(\\w+|\\{([^}]*)\\})'\n return sub(pattern, replace_var, buffer)", "def Replace(expression, find, replace, start=1, count=-1):\n if find:\n return expression[:start - 1] + expression[start - 1:].replace(find, replace, count)\n else:\n return expression", "def regex_replace(s, old, new, count=0):\n\n return re.sub(old, new, s, count=count)", "def loope(value,arg):\r\n return value.replace(arg,'')", "def filter_re_replace(val: AnyStr, pattern: str, repl: str) -> str:\n return re.sub(pattern, repl, str(val))", "def str_replace(pat, rep, subject):\n return subject.replace(pat, rep)", "def VarNameReplace(old, new, *vars):\n\t#syntax = [ \"rename variables\" ]\n\tsyntax = []\n\tif not vars or \"*\" in vars:\n\t\tvars = None\n\tvd = spssaux.VariableDict(vars)\n\tfor v in vd:\n\t\toldname = v.VariableName\n\t\tnewname = oldname.replace(old,new).strip()\n\t\tif newname.lower() != oldname.lower():\n\t\t\tsyntax += [ \"(%s=%s)\" % (oldname, newname) ]\n\tif syntax:\n\t\tsyntax.insert(0, \"rename variables\")\n\t\tsyntax += [ spssterm ]\n\t\tif __debug__:\n\t\t\tprint \" \".join(syntax)\n\t\tspss.Submit(syntax)", "def replace_substrings(s, mapping):\n for (s1, repl) in mapping:\n s = s.replace(s1, repl)\n return s", "def map_variable(variable_name):\n return '%recipient.' + variable_name + '%'", "def subst(self, value, filter=None):\n\n if isinstance(value, Literal):\n return value._value\n elif isinstance(value, tuple):\n return tuple(self.subst(i, filter) for i in value)\n elif isinstance(value, list):\n return list(self.subst(i, filter) for i in value)\n elif isinstance(value, dict):\n return {i: self.subst(value[i], filter) for i in value}\n elif isinstance(value, StringTypes):\n def subfn(mo):\n var = mo.group(0)\n\n if var == \"$$\":\n return \"$\"\n\n # Apply variable filters\n parts = var[2:-1].split(\"|\")\n value = self.evaluate(parts[0])\n\n if len(parts) > 1:\n # Filters supplied directly\n for part in parts[1:]:\n if len(part) == 0:\n # Empty filter can be used to disable auto filter\n continue\n else:\n value = self.callfilter(part, value)\n elif filter:\n # Use auto-filter if specified\n for part in filter.split(\"|\"):\n value = self.callfilter(part, value)\n\n return value\n return re.sub(r\"\\$\\$|\\$\\(.*?\\)\", subfn, value)\n else:\n return value", "def sometimes_replace_1x_with_x(expr, var_name):\n if random.choice([0, 1]):\n expr = re.sub(r'(?<![0-9.])1{}'.format(var_name), r'{}'.format(var_name), expr)\n return expr", "def substitute(self,s,x):\r\n\t\t\r\n\t\t# turn substitution into top line\r\n\t\ttry:\r\n\t\t\tt = Li(s)\r\n\t\t\tb = Li(1)\r\n\t\t\t\r\n\t\t# unless it is a list of lines\r\n\t\texcept:\r\n\t\t\tt = Li(s[0])\r\n\t\t\tb = Li(s[1])\r\n\t\t\r\n\t\t# split variable from power\r\n\t\th = Te._chop(x)\r\n\t\tx = h[0]\r\n\t\t\r\n\t\t# assume power of 1 for substituted variable, but revise if found in string\r\n\t\tp = 1\r\n\t\ttry:\r\n\t\t\tp = int(h[1])\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\t\r\n\t\t# exponents in each term\r\n\t\te = [i.look(x) for i in self]\r\n\t\t\r\n\t\t# adjust for power of substituted variable\r\n\t\te = [i // p for i in e]\r\n\t\t\r\n\t\t# max, min powers of substitution\r\n\t\ttry:\r\n\t\t\ta = max(e)\r\n\t\t\tm = min(e)\r\n\t\texcept:\r\n\t\t\ta = 0\r\n\t\t\tm = 0\r\n\t\t\r\n\t\t# truncate max and min powers \r\n\t\tif a < 0:\r\n\t\t\ta = 0\r\n\t\tif m > 0:\r\n\t\t\tm = 0\r\n\t\t\t\r\n\t\t# dictionaries of calculated terms for top and bottom\r\n\t\tf = {}\r\n\t\tg = {}\r\n\t\t\t\r\n\t\t# expand top and bottom to truncated max and min\r\n\t\tq,f = Li._expand(t,-m,f)\r\n\t\tr,g = Li._expand(b,a,g)\r\n\t\tq = Li(q,c=False)\r\n\t\tr = Li(r,c=False)\r\n\t\t\r\n\t\t# store results in dictionaries\r\n\t\ty = {-m: q}\r\n\t\tz = {a: r}\r\n\t\t\r\n\t\t# make denominator\r\n\t\td = q.multiply(r)\r\n\t\t\r\n\t\t# convert each term\r\n\t\tl = Li([])\r\n\t\tfor n,i in enumerate(self):\r\n\t\t\t\r\n\t\t\t# exponent of substitution\r\n\t\t\tw = e[n]\r\n\t\t\t\r\n\t\t\t# divide out variable\r\n\t\t\tv = Te({x: -w * p})\r\n\t\t\ti = i.multiply(v)\r\n\t\t\t\r\n\t\t\t# retrieve top expansion\r\n\t\t\tif (w - m) in y:\r\n\t\t\t\tu = y[w - m]\r\n\t\t\t\t\r\n\t\t\t# or calculate\r\n\t\t\telse:\r\n\t\t\t\tu,f = Li._expand(t,w - m,f)\r\n\t\t\t\tu = Li(u,c=False)\r\n\t\t\t\ty[w - m] = u\r\n\t\t\t\r\n\t\t\t# retrieve bottom expansion\r\n\t\t\tif (a - w) in z:\r\n\t\t\t\tc = z[a - w]\r\n\t\t\t\r\n\t\t\t# or calculate\r\n\t\t\telse:\r\n\t\t\t\tc,g = Li._expand(b,a - w,g)\r\n\t\t\t\tc = Li(c,c=False)\r\n\t\t\t\tz[a - w] = c\r\n\t\t\t\r\n\t\t\t# multiply and add\r\n\t\t\tu = u.multiply(c)\r\n\t\t\tu = u.multiply(i)\r\n\t\t\tl = l.add(u)\r\n\t\t\r\n\t\treturn [l,d]", "def add_substitution(self, pattern, repl):\r\n\r\n self.substitutions.append( (re.compile(pattern), repl) )", "def doEdit(var, value, target):\n currentValue = target.get(var, \"\")\n newValue = Simplifier.simplify(str(value).replace(f\"{{{var}}}\", str(currentValue)))\n target[var] = newValue", "def substitute(self, substitution_map):\n for element_name in substitution_map:\n\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n return self.subsitute_helper(copy.deepcopy(substitution_map), False)", "def replacement(cls, search_pattern: str, replacement: str) -> PhonTransform:\n sub_func = lambda match: replacement\n return cls(search_pattern, sub_func)", "def note_replace(self, evarname, note, in_):\n self.note_add(evarname, note, replace=True, in_=in_)", "def substitute(self, substitutions, new_name=None):\n\n if new_name is None:\n new_name = self.name\n \n substitute = translator(substitutions)\n new_math = substitute(self.math)\n new_variables = {substitute(v) for v in self.variables}\n new_first_derivatives = {substitute(variable): substitute(derivative) for\n variable, derivative in self._first_derivatives.iteritems()}\n new_second_derivatives = {tuple(sorted(map(substitute, variable_pair))): \n substitute(second_derivative) for \n variable_pair, second_derivative in \n self._second_derivatives.iteritems()}\n return Function(new_math, variables=new_variables, \n first_derivatives=new_first_derivatives, \n second_derivatives=new_second_derivatives,\n name=new_name)", "def substitute(self, subs, **kwargs):\n return self", "def variable_subs(self, variable, newexpr):\n cls = type(self)\n newexpr = cls(newexpr)\n try:\n index = list(self.variables).index(variable)\n except ValueError:\n index = None\n if index is not None:\n head, data = self.pair\n result = cls.Number(0)\n variables = cls.variables\n for exps, coeff in data.iteritems():\n term = cls.Number(1)\n for i,exp in enumerate(exps):\n if exp:\n if i==index:\n term *= newexpr**exp\n else:\n term *= cls.Symbol(variables[i])**exp\n result += term * cls.Number(coeff)\n return result\n raise NotImplementedError(`self.variables, variable, index`)", "def _var_quote_sub(self, text, VARS):\n ## No need to live on class. Can be moved to tools. - Add assert test.\n qvars = map(lambda x: \"\\{ \" + x + \" \\}\", VARS)\n return text % tuple(qvars)", "def _substitute(template, fuzzer, benchmark):\n return template.format(fuzzer=fuzzer, benchmark=benchmark)", "def replace_variable(self, variable):\r\n if variable == 'x':\r\n return self.value\r\n if variable == 't':\r\n return self.timedelta\r\n raise ValueError(\"Invalid variable %s\", variable)", "def suffix_replace(original, old, new):\n ...", "def in_place_substitute(self):\r\n if self.substitute is not None:\r\n node = self.convert_type()\r\n self.leaf_replace(node) # for internals only\r\n self.root_replace(node)", "def replace_params(self):\n raw_sql = self.raw_sql\n for placeholder in self.to_replace:\n newreg = re.compile(placeholder)\n repl = self.get_replacement_value(placeholder)\n if repl:\n raw_sql = newreg.sub(str(repl), raw_sql)\n self.sql = raw_sql", "def expand_vars(string, env_vars=None):\n if env_vars is None:\n env_vars = os.environ\n # create a replacement callback function that uses env_vars as it's first\n # argument, additional arguments will be added after it\n repl_callback = functools.partial(_var_repl, env_vars)\n return re.sub(r'\\$(?P<variable>[a-zA-Z]\\w*)((?=[\\W])|$)', repl_callback, string)", "def replaceReg(self, var, expr ):\n # newZ3 = substitute( self.z3, (z3from, z3to) )\n if( self.cond == CT.NOT ):\n return Cond( CT.NOT, None, self.right.replaceReg( var, expr ), self.cleaned) \n elif( isArithmeticComp(self.cond)):\n return Cond(self.cond, self.left.replaceReg( var, expr ), self.right.replaceReg( var, expr ), cleaned = self.cleaned)\n elif( isLogicalOp(self.cond) ):\n return Cond(self.cond, self.left.replaceReg( var, expr), self.right.replaceReg( var, expr), cleaned = self.cleaned)\n else:\n return Cond(self.cond, self.left, self.right)", "def rename_regvar(*args):\n return _ida_frame.rename_regvar(*args)", "def apply_rule(word):\n return re.sub(search, replace, word)", "def expand_vars(self, line, name_dict):\n if '%' not in line:\n return line\n for k, v in name_dict.iteritems():\n line = line.replace('%VAR_' + k + '%', escape(v))\n return line", "def register_subst_rule(self, original_name, args, body):\n key = self._get_subst_rule_key(args, body)\n reg_value = self.subst_rule_registry.get(key)\n\n if reg_value is None:\n # These names are temporary and won't stick around.\n new_name = self.make_unique_var_name(\"_lpy_tmp_\"+original_name)\n self.subst_rule_registry[key] = (new_name, args, body)\n else:\n new_name, _, _ = reg_value\n\n self.subst_rule_old_names.setdefault(key, []).append(original_name)\n return new_name", "def substitute_macros(text):\n f_text = text\n for (pattern,replacement) in context.environment.items():\n replacement = replacement.replace(os.path.sep,'/')\n f_text = f_text.replace('$(%s)' % pattern.upper(), replacement)\n return f_text", "def replace_with(self, replacement):\n\n # FIND NAMES IN replacement\n parts = list(regex_parameters.split(replacement, include_separators=True))\n\n def replacer(tokens):\n acc = []\n for s, n in zip(parts, parts[1:]):\n acc.append(s)\n acc.append(text(tokens[n]))\n acc.append(parts[-1])\n return \"\".join(acc)\n\n return self / replacer", "def _expand_variables(input_str, cmake_vars):\n def replace(match):\n if match.group(1) in cmake_vars:\n return cmake_vars[match.group(1)]\n return \"\"\n return _CMAKE_ATVAR_REGEX.sub(replace,_CMAKE_VAR_REGEX.sub(replace, input_str))", "def substitution(plainText, key):\n return plainText", "def _re_sub_callback(match_object):\n return _replacement(match_object.group()[2:-1])", "def replace_vars(params, contents):\n if isinstance(contents, str):\n contents = [contents]\n replace_contents = []\n\n if params != None and contents != None:\n for content in contents:\n replace_content = content\n for match in regexp_replace_var.findall(content):\n if match in params:\n w_param = params[match][0].__str__()\n _logger.debug(\"match variable {} , replace by {}\".format(match, w_param))\n if not w_param.isdigit():\n w_param = w_param.replace(\"\\\"\", \"\").replace(\"\\'\", \"\").replace(\"\\\\\", \"\\\\\\\\\")\n replace_content = replace_content.replace(\"${\" + match + \"}\", w_param)\n else:\n replace_content = replace_content.replace(\"\\\"${\" + match + \"}\\\"\", w_param)\n \n else:\n replace_content = replace_content.replace(\"${\" + match + \"}\", \"\") \n _logger.debug(\"replace_content={}\".format(replace_content))\n replace_contents.append(replace_content)\n return replace_contents", "def varStringMod(self, arg):\n\t\targ[0] = \"'\" + arg[0] + \"'\"\n\t\treturn arg", "def replace(name, newobject):", "def replacer(s,replace_dict):\n for k,v in replace_dict.items(): s = s.replace(k,v)\n return s", "def substitute_with_bindings(self,bindings):\n\n n_chars = len(self.literal)\n term = ['' for i in range(n_chars)]\n\n for i in range(n_chars):\n if self.literal[i] in bindings:\n term[i] = bindings[self.literal[i]]\n else:\n term[i] = self.literal[i]\n\n return (''.join(term))", "def test_replace(self, in_, kwargs, out_):\n assert in_.replace(**kwargs) == out_", "def replace_with(*, replacement):\n def _apply_decorator(f):\n return replacement\n return _apply_decorator", "def replace(self, string):\n for i, j in self.defs.items():\n string = string.replace(i, j)\n return string", "def expand(self, s, lvars):\n if is_String(s):\n try:\n s0, s1 = s[:2]\n except (IndexError, ValueError):\n return s\n if s0 != '$':\n return s\n if s1 == '$':\n # In this case keep the double $'s which we'll later\n # swap for a single dollar sign as we need to retain\n # this information to properly avoid matching \"$(\"\" when\n # the actual text was \"$$(\"\" (or \"$)\"\" when \"$$)\"\" )\n return '$$'\n elif s1 in '()':\n return s\n else:\n key = s[1:]\n if key[0] == '{' or '.' in key:\n if key[0] == '{':\n key = key[1:-1]\n\n # Store for error messages if we fail to expand the\n # value\n old_s = s\n s = None\n if key in lvars:\n s = lvars[key]\n elif key in self.gvars:\n s = self.gvars[key]\n else:\n try:\n s = eval(key, self.gvars, lvars)\n except KeyboardInterrupt:\n raise\n except Exception as e:\n if e.__class__ in AllowableExceptions:\n return ''\n raise_exception(e, lvars['TARGETS'], old_s)\n\n if s is None and NameError not in AllowableExceptions:\n raise_exception(NameError(key), lvars['TARGETS'], old_s)\n elif s is None:\n return ''\n\n # Before re-expanding the result, handle\n # recursive expansion by copying the local\n # variable dictionary and overwriting a null\n # string for the value of the variable name\n # we just expanded.\n #\n # This could potentially be optimized by only\n # copying lvars when s contains more expansions,\n # but lvars is usually supposed to be pretty\n # small, and deeply nested variable expansions\n # are probably more the exception than the norm,\n # so it should be tolerable for now.\n lv = lvars.copy()\n var = key.split('.')[0]\n lv[var] = ''\n return self.substitute(s, lv)\n elif is_Sequence(s):\n def func(l, conv=self.conv, substitute=self.substitute, lvars=lvars):\n return conv(substitute(l, lvars))\n return list(map(func, s))\n elif callable(s):\n\n # SCons has the unusual Null class where any __getattr__ call returns it's self, \n # which does not work the signature module, and the Null class returns an empty\n # string if called on, so we make an exception in this condition for Null class\n # Also allow callables where the only non default valued args match the expected defaults\n # this should also allow functools.partial's to work.\n if isinstance(s, SCons.Util.Null) or {k for k, v in signature(s).parameters.items() if\n k in _callable_args_set or v.default == Parameter.empty} == _callable_args_set:\n\n s = s(target=lvars['TARGETS'],\n source=lvars['SOURCES'],\n env=self.env,\n for_signature=(self.mode == SUBST_SIG))\n else:\n # This probably indicates that it's a callable\n # object that doesn't match our calling arguments\n # (like an Action).\n if self.mode == SUBST_RAW:\n return s\n s = self.conv(s)\n return self.substitute(s, lvars)\n elif s is None:\n return ''\n else:\n return s", "def fix_varname(s):\n t = str(s).translate(TRANS_VARS)\n if t[0] not in VALID_CHARS1:\n t = '_%s' % t\n while t.endswith('_'):\n t = t[:-1]\n return t", "def _str_replace(mystring, rd):\n import re\n patternDict = {}\n myDict = {}\n for key,value in rd.items():\n pattern = re.compile(re.escape(key), re.IGNORECASE)\n patternDict[value] = pattern\n for key in patternDict:\n regex_obj = patternDict[key]\n mystring = regex_obj.sub(key, mystring)\n return mystring", "def replace_in_string(s, args_dict):\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n for key, value in args_dict.items():\n s = s.replace(key, value)\n return s", "def replace_partial_args(\n func: Callable, target: Any, repl: Any,\n match_func: Optional[Callable] = None,\n replace_func: Optional[Callable] = None,\n) -> Callable:\n if match_func is None:\n def match_func(i, v, t): return v is t\n if replace_func is None:\n def replace_func(v, r): return r\n args = [(replace_func(v, repl) if match_func(i, v, target) else v)\n for i, v in enumerate(func.args)]\n kwargs = {k: (replace_func(v, repl) if match_func(k, v, target) else v)\n for k, v in func.keywords.items()}\n new_func = type(func)(func.func, *args, **kwargs)\n return new_func", "def render_variable(children):\r\n varname = children[0].latex\r\n if casify(varname) not in variables:\r\n pass # TODO turn unknown variable red or give some kind of error\r\n\r\n first, _, second = varname.partition(\"_\")\r\n\r\n if second:\r\n # Then 'a_b' must become 'a_{b}'\r\n varname = ur\"{a}_{{{b}}}\".format(\r\n a=enrich_varname(first),\r\n b=enrich_varname(second)\r\n )\r\n else:\r\n varname = enrich_varname(varname)\r\n\r\n return LatexRendered(varname) # .replace(\"_\", r\"\\_\"))\r", "def apply_variable_substitutions_and_merge_repeatedly(vardefs, extra_variables = {}):\n while True:\n new_vardefs = apply_variable_substitutions_and_merge(vardefs, extra_variables)\n if new_vardefs == vardefs:\n return new_vardefs\n else:\n vardefs = new_vardefs", "def __call__(self, ustring):\n ustring = unicode(ustring)\n return self.charef_rex.sub(self._replacer, ustring)", "def _substitute(self, value, group=None, namespace=None):\n if isinstance(value, list):\n return [self._substitute(i, group=group, namespace=namespace)\n for i in value]\n elif isinstance(value, str):\n # Treat a backslash followed by the dollar sign \"\\$\"\n # the same as the string template escape \"$$\" as it is\n # a bit more natural for users\n if r'\\$' in value:\n value = value.replace(r'\\$', '$$')\n tmpl = self.Template(value)\n ret = tmpl.safe_substitute(\n self.StrSubWrapper(self, group=group, namespace=namespace))\n return ret\n elif isinstance(value, dict):\n # Substitute template variables in both key and value\n return {self._substitute(key, group=group, namespace=namespace):\n self._substitute(val, group=group, namespace=namespace)\n for key, val in value.items()}\n else:\n return value", "def substitute(self, substitution_map):\n for element_name in substitution_map:\n assert (is_constant(element_name) or is_variable(element_name)) and \\\n type(substitution_map[element_name]) is Term\n\n if is_constant(self.root) or is_variable(self.root): # we need to to deal only with the root\n if self.root in substitution_map.keys():\n return substitution_map[self.root] # change it with it is in the map\n else:\n return Term(self.root) # else return it as is\n\n else:\n assert is_function(self.root) # we have a function\n if self.root in substitution_map.keys():\n root = substitution_map[self.root] # update the root if it is in map\n else:\n root = self.root # else, leave it as it is, without changing it to Term\n args = [] # this is our args\n for index, arg in enumerate(self.arguments): # for every arg, switch it with it's substitute\n args.append(arg.substitute(substitution_map)) # recursive call to substitute\n return Term(root, args)\n # Task 9.1", "def sub(self, replace, string, count=0):\n return self.re.sub(replace, string, count)", "def substitute(expr, vars):\n subs = {}\n for atom in expr.atoms():\n name = str(atom)\n try:\n subs[atom] = vars[name]\n except KeyError:\n pass\n\n value = expr.subs(subs).evalf()\n if isinstance(value, SymbNumber) and value == int(value):\n return int(value)\n return value", "def replace_constant(self, src, dst):\n replace_count = 0\n for ii in self.__sections:\n for jj in range(len(ii.content)):\n line = ii.content[jj]\n replaced = re.sub(r'(\\$%s|\\$%s)' % (src, hex(src)), r'$%s' % hex(dst), line)\n if line != replaced:\n ii.content[jj] = replaced\n replace_count += 1\n if 1 > replace_count:\n raise RuntimeError(\"could not find constant to be replaced\")\n elif 1 < replace_count:\n raise RuntimeError(\"found constant to be replaced more than once, source destroyed\")", "def substitutes(string, **kwargs):\n return json.dumps(substitute(json.loads(string), **kwargs))", "def scons_subst_once(strSubst, env, key):\n if isinstance(strSubst, str) and strSubst.find('$') < 0:\n return strSubst\n\n matchlist = ['$' + key, '${' + key + '}']\n val = env.get(key, '')\n def sub_match(match, val=val, matchlist=matchlist):\n a = match.group(1)\n if a in matchlist:\n a = val\n if is_Sequence(a):\n return ' '.join(map(str, a))\n else:\n return str(a)\n\n if is_Sequence(strSubst):\n result = []\n for arg in strSubst:\n if is_String(arg):\n if arg in matchlist:\n arg = val\n if is_Sequence(arg):\n result.extend(arg)\n else:\n result.append(arg)\n else:\n result.append(_dollar_exps.sub(sub_match, arg))\n else:\n result.append(arg)\n return result\n elif is_String(strSubst):\n return _dollar_exps.sub(sub_match, strSubst)\n else:\n return strSubst", "def replace(self, *args, **kwargs): # real signature unknown\r\n pass" ]
[ "0.6543323", "0.64928055", "0.64095277", "0.640149", "0.6348403", "0.6247242", "0.61828613", "0.6170363", "0.60472184", "0.6013036", "0.6008018", "0.59389454", "0.5931353", "0.58995754", "0.5893741", "0.58664304", "0.58525014", "0.58441067", "0.5818302", "0.5775025", "0.5766904", "0.576294", "0.57571495", "0.5750464", "0.57456964", "0.5735117", "0.5705159", "0.5697676", "0.569603", "0.5686275", "0.5678311", "0.56698436", "0.56685406", "0.56683594", "0.56536734", "0.56058466", "0.5591921", "0.5585454", "0.55608237", "0.55558604", "0.5542706", "0.5540196", "0.5534714", "0.55197906", "0.55194694", "0.5517976", "0.5512222", "0.5498842", "0.5487821", "0.5480841", "0.5475907", "0.5463926", "0.5462092", "0.5461572", "0.54576194", "0.5457339", "0.5447971", "0.5442562", "0.54371804", "0.5429404", "0.54288924", "0.5419697", "0.5413241", "0.5410717", "0.54046625", "0.54006416", "0.5394361", "0.5383986", "0.53838056", "0.53824854", "0.5381631", "0.5366431", "0.5364821", "0.5363694", "0.53587544", "0.53554857", "0.53550404", "0.53549635", "0.53513765", "0.533798", "0.53239834", "0.5307322", "0.53034973", "0.53031385", "0.5302386", "0.53007424", "0.52997774", "0.52977365", "0.52934986", "0.529332", "0.5266779", "0.52506024", "0.5250345", "0.524616", "0.52395123", "0.5234324", "0.5233935", "0.5231804", "0.5230069", "0.5228698", "0.5227294" ]
0.0
-1
Get an option, perhaps with a default value.
def get(self, option, default = None, section = None): if section is None: section = self.default_section if default is not None and not self.cfg.has_option(section, option): return default val = self.cfg.get(section, option) while True: val, modified = self._regexp.subn(self._repl, val, 1) if not modified: return val
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _get_option(self, name, datatype, default):\n return config.get_option(self._options,\n name,\n type=datatype,\n default=default)", "def __get_option(self, option):\n if option in Config.OPTIONS.keys():\n _default = Config.OPTIONS[option]\n elif option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n elif option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n else:\n _default = None # XXX ??\n \n _val = self.__get(option)\n\n if _val: \n return _val\n else:\n return _default", "def get(self, option, default=None):\n\t\treturn self._get_raw(option, '', default)", "def get_option(self, option, default=None):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n try:\n value = self.get(section, key)\n value = self._str_to_val(value)\n except ValueError, s:\n logger.warning(\"get failed for {}/{}: {}\".format(section,key,s))\n value = default\n except NoSectionError:\n value = default\n except NoOptionError:\n value = default\n\n return value", "def get_option(self, option):\n\t\treturn self.options[option]", "def get_default(cls, opt):\n try:\n return cls._OPTS[opt].default\n except KeyError:\n raise ValueError('unknown option name %r' % (opt,))", "def get(self, option):\n return get(self.name, option)", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def get_option(self, option):\n if not self._options.has_key(option):\n raise KeyError, \"Invalid option: \" + option\n else:\n return self._options.get(option)", "def find_option(self, option_name, default=None):\n value = (\n getattr(self.pconfig.option, option_name, None) or\n self.pconfig.getini(option_name)\n )\n return value if value else default", "def option(number, default='no'):\n return answer(number).get('options', default)", "def get(option, default = None):\n\treturn _cfg.get('rosshm', option, fallback = default)", "def get_option(self, name, default_value, _type):\n _hash = name.strip()\n option = self.lines[_hash]\n if not option:\n self.lines[_hash] = option = ConfigOption(self.nextId, name, default_value)\n self.nextId += 1\n option.set_default_value(default_value, _type)\n return option", "def getOption(self, name):\n\n if name.lower() in self.defaultOptions:\n return self.options[name.lower()][1]\n else:\n raise Error('%s is not a valid option name.' % name)", "def get(self, **kws):\n assert len (kws)==1,`kws`\n key, default = kws.items()[0]\n if key not in self.__dict__:\n if VERBOSE:\n print 'Options.get: adding new option: %s=%r' % (key, default)\n self.__dict__[key] = default\n value = self.__dict__[key]\n if value is None:\n value = self.__dict__[key] = default\n return value", "def get(self, section, option, fallback=None):\n if not fallback: # attempt to get default value as fallback\n try:\n fallback = Section[section].value[option].value\n except KeyError: # no default value for this option\n fallback = None\n return self.parser.get(section, option, fallback=fallback)", "def get_option(self, key):\n return self.options[key]", "def getOption(arg):\n return (False, \"\", \"\")", "def getdefault(self, option, type=str, default=None):\r\n return self.get(Config.DEFAULT_SECTION, option, type, default=default)", "def get(self, section, option, type_=six.string_types, default=None):\n return self._getinstance(section, option, type_, default)", "def get_option(self, name: str, section: str = None) -> str:\n if self.default_vars and name in self.default_vars:\n return self.default_vars[name]\n else:\n ops = self.options\n if name in ops:\n return ops[name]\n else:\n raise ConfigurableError(f'No such option: {name}')", "def get(self, section, option, default=None, **kwargs):\n section = section.lower()\n option = option.lower()\n if self.has_section(section) and self.has_option(section, option):\n # Super does not work for ConfigParser as not inherited from object\n out = configparser.ConfigParser.get(self, section, option, **kwargs)\n # Count empty strings as not provided\n if not out:\n return default\n else:\n return out\n else:\n return default", "def get_config_option(option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def getOption(optionType: str, optionValue: str):\n config: str = openConfigFile()\n return locateOption(config, optionType, optionValue)", "def get(cls,name,as_type = str):\n inst = cls.inst()\n if name in inst.options:\n return as_type(inst.options[name])\n else:\n raise OptionsError(\"No option with key '%s'\" % name)", "def optval(self, option, default=None):\n return getattr(self._options_, option, default)", "def get(self, section, option, *args):\n cnt = self._check_args('get', 3, 4, args)\n try:\n return ConfigParser.RawConfigParser.get(self, section, option)\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n if cnt == 1:\n return args[0]\n raise", "def GetSetting(appname, section, key, default=None):\n settings = _OptionsDB(appname)\n try:\n return settings[section, key]\n except config.configparser.Error:\n if default is not None:\n return default\n raise", "def get_config_option(option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def readOption (self, optName) :\n if not optName in self.config:\n return None\n return self.config[optName]", "def get_config_option(self, option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this \" \\\n \"function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def get(self, section, option):\n if self._dict.has_key(section):\n return self._dict[section].get(option, None)\n return None", "def safe_get(self, section, option, default=None):\n try:\n return self.get(section, option)\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n if default is None:\n raise\n else:\n #TODO: add logging\n #gvlogger.info(\"Can't find section '%s' option '%s' in configuration file, reverting to defaults\" , section, option)\n return default", "def get(self, section, option, type=str, default=None):\r\n return self._getinstance(section, option, type, default=default)", "def get_option(key: str) -> Any:\n with _config_lock:\n config_options = get_config_options()\n\n if key not in config_options:\n raise RuntimeError('Config key \"%s\" not defined.' % key)\n return config_options[key].value", "def get_option(self, name):\n option_df = self.dismod_file.option\n records = option_df[option_df.option_name == name]\n if len(records) == 1:\n return records.option_value.iloc[0]\n else:\n raise KeyError(f\"Option {name} not found in options\")", "def get_option(self, key, default=None):\n current_profile = \"profiles.{}.{}\".format(self.get_profile(), key)\n global_profile = \"profiles.global.{}\".format(key)\n return self.__get_option__(current_profile, self.__get_option__(global_profile, default))", "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def pop_default(self, option: str) -> Optional[Any]:\n index = self._get_index(option)\n assert index is not None\n value = self._options[index]\n del self._options[index]\n default = value[1] if isinstance(value, tuple) else None\n return default", "def get_option_generic(pytest_config: pytest.Config, flag: str, default):\n cli_flag = flag.replace(\"-\", \"_\")\n ini_flag = flag\n\n # Lowest priority\n use = default\n\n # Middle priority\n if pytest_config.getini(ini_flag) is not None:\n use = pytest_config.getini(ini_flag)\n\n # Top priority\n if pytest_config.getoption(cli_flag) is not None:\n use = pytest_config.getoption(cli_flag)\n\n return use", "def get(self, key, default=''):\n key = self.optionxform(key)\n cached = self._cache.get(key, _use_default)\n if cached is not _use_default:\n return cached\n name_str = self.name\n key_str = to_unicode(key)\n settings = ProductSetting.select(self.env,\n where={'product': self.product,\n 'section': name_str,\n 'option': key_str})\n if len(settings) > 0:\n value = settings[0].value\n else:\n for parent in self.config.parents:\n value = parent[self.name].get(key, _use_default)\n if value is not _use_default:\n break\n else:\n if default is not _use_default:\n option = Option.registry.get((self.name, key))\n value = option.default if option else _use_default\n else:\n value = _use_default\n if value is _use_default:\n return default\n if not value:\n value = u''\n elif isinstance(value, basestring):\n value = to_unicode(value)\n self._cache[key] = value\n return value", "def _coalesceOption(self, name, default = ''):\n return self.view.settings().get(name, self.options.get(name, default))", "def get_config_value(config_parser, section, option, required=True, get_type=str, default=None):\n if get_type is int:\n getter = config_parser.getint\n elif get_type is bool:\n getter = config_parser.getboolean\n else:\n getter = config_parser.get\n\n try:\n value = getter(section, option)\n except ConfigParser.Error:\n if required:\n raise ConfigError(\"Missing configuration '{}' in section '{}'\".format(option, section))\n return default\n\n return value", "def get_option_value(self, key):\n\n # Check the key.\n self.__assert_option(key)\n\n # Get and return the value.\n return self.__opt[key]", "def get_default(section, option=\"\"):\n\tif not option:\n\t\tif defaults.has_key(section):\n\t\t\treturn defaults[section]\n\telse:\n\t\tif defaults.has_key(section):\n\t\t\tif defaults[section].has_key(option):\n\t\t\t\treturn defaults[section][option]\n\treturn None", "def get(self, option_id, default=None, entity=None, additional_options={}, transform=True):\n key = self._option_key(option_id, entity)\n\n if key in additional_options:\n value = additional_options.get(key)\n elif key in self._config:\n holder = self._config[key]\n if option_id.multiple:\n value = holder.get_combined(option_id.default)\n else:\n value = holder.get_highest_priority().value\n elif default is not None:\n value = default\n else:\n value = option_id.default\n # If someone asks for an option that we didn't know about then we add the default value\n # to the config so that it can be logged and printed\n self.set_default_values([option_id])\n\n return_value = value\n if transform and option_id.transform is not None:\n return_value = option_id.transform(value)\n\n try:\n return self._expand_value(return_value, multiple=option_id.multiple)\n except RuntimeError:\n raise InvalidValue(\n 'Circular reference found when evaluating config value for {key}'.format(key=key))", "def get_plugin_option(self, category, name, option):\r\n return self.plugmanc.readOptionFromPlugin(category, name, option)", "def get_option(self, section, option, default=None):\n config = ConfigParser.ConfigParser()\n config.read(self.app_conf)\n if not config.has_section(section):\n config.add_section(section)\n if config.has_option(section, option):\n ret = config.get(section, option)\n print ''.join(['found ', option, ' in configuration ', ret])\n else:\n config.set(section, option, default)\n ret = default\n config.write(open(self.app_conf, \"w\"))\n return ret", "def _get_option(self, option):\n try:\n return self._config.parser.get('blog', option)\n except configparser.NoOptionError:\n raise AbortError(\n _('The blog extension requires the {option} option.').format(\n option=option))", "def config_get(section, option):\n return __CONFIG.get(section, option)", "def getOption(self, *args):\n return _libsbml.ConversionProperties_getOption(self, *args)", "def __get(self, option=None):\n\n general = self.parser.options('general')\n\n gen = {}\n if not general:\n if option:\n return None\n return gen\n\n for item in general:\n value = self.parser.get('general', item).strip()\n if value:\n gen[item] = value\n\n if option:\n if gen.has_key(option):\n return gen[option]\n return None\n return gen", "def _get_cfg_value(cfg, server, option):\n value = None\n if cfg.has_option(server, option):\n value = cfg.get(server, option)\n elif cfg.has_option(\"global\", option):\n value = cfg.get(\"global\", option)\n\n return value", "def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]", "def get(self, configuration, option_name, server_id):\n configuration = g.user.get_api().get_configuration(configuration)\n return get_option(configuration, option_name, int(server_id))", "def get_option(option_dict, option):\n if isinstance(option_dict, dict) and option in option_dict:\n return option_dict[option]\n elif not isinstance(option_dict, dict):\n return None\n else:\n for value in option_dict.values():\n result = SchedulePolicy.get_option(value, option)\n if result is not None:\n return result", "def get(self, name, section=__section_default):\n \n if self.parser.has_option(section, name):\n return self.parser[section][name]\n else:\n return None", "def get(value, default=\"\"):\n if value is None:\n return default\n\n return value", "def get(section, option, boolean=False, integer=False, floating=False):\n if boolean:\n return_value = config.getboolean(section, option)\n elif integer:\n return_value = config.getint(section, option)\n elif floating:\n return_value = config.getfloat(section, option)\n else:\n return_value = config.get(section, option)\n return return_value", "def _get_option(self, arg_name: str) -> Any:\n try:\n return getattr(self, f\"__{arg_name}\")\n except AttributeError as ex:\n raise AnalysisError(\n f\"The argument {arg_name} is selected but not defined. \"\n \"This key-value pair should be defined in the analysis option.\"\n ) from ex", "def get(self, section, option):\n for provider in (self._user, self._sections, self._defaults):\n try:\n return provider[section][option]\n except KeyError:\n pass\n raise NoOptionError(option, section)", "def getint(self, option):\n return getint(self.name, option)", "def _get_option_value(self, section, option):\n value = None\n if self.config.has_section(section) and self.config.has_option(section, option):\n value = self.appName = self.config.get(section, option)\n return value", "def load_by_option(self, option):\n try:\n option_value = OptionValue.objects.filter(option=option)\n except OptionValue.DoesNotExist:\n option_value = None\n\n return option_value", "def _get_with_default(self, method, section, option, default, expected_type=None, **kwargs):\n try:\n try:\n # Underscore-style is the recommended configuration style\n option = option.replace('-', '_')\n return method(self, section, option, **kwargs)\n except (NoOptionError, NoSectionError):\n # Support dash-style option names (with deprecation warning).\n option_alias = option.replace('_', '-')\n value = method(self, section, option_alias, **kwargs)\n warn = 'Configuration [{s}] {o} (with dashes) should be avoided. Please use underscores: {u}.'.format(\n s=section, o=option_alias, u=option)\n warnings.warn(warn, DeprecationWarning)\n return value\n except (NoOptionError, NoSectionError):\n if default is LuigiConfigParser.NO_DEFAULT:\n raise\n if expected_type is not None and default is not None and \\\n not isinstance(default, expected_type):\n raise\n return default", "def get_value(self, name, option, presentation=False):\r\n if name in self.values:\r\n value = self.values[name]\r\n if presentation:\r\n return option.presentation(value)\r\n else:\r\n return value\r\n else:\r\n raise OptionValueNotSetError(name, option)", "def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)", "def getParameter(self, value):\n if value in self.commandLineDefaults:\n return self.commandLineDefaults[value]\n if value in self.defaults:\n return self.defaults[value]\n return None", "def get(self, option, argument=None):\n if argument:\n if self.config.has_section(argument) and (\n self.config.has_option(argument, \"city\") \\\n or self.config.has_option(argument, \"id\") \\\n or self.config.has_option(argument, \"st\")\n ):\n self.config.remove_section(argument)\n import sys\n message = \"WARNING: the city/id/st options are now unsupported in aliases\\n\"\n sys.stderr.write(message)\n if not self.config.has_section(argument):\n guessed = guess(\n argument,\n path=self.get(\"setpath\"),\n info=self.get(\"info\"),\n cache_search=(\n self.get(\"cache\") and self.get(\"cache_search\")\n ),\n cachedir=self.get(\"cachedir\"),\n quiet=self.get_bool(\"quiet\")\n )\n self.config.add_section(argument)\n for item in guessed.items():\n self.config.set(argument, *item)\n if self.config.has_option(argument, option):\n return self.config.get(argument, option)\n if option in self.options.__dict__:\n return self.options.__dict__[option]\n else:\n import os, sys\n message = \"%s error: no URI defined for %s\\n\" % (\n os.path.basename( sys.argv[0] ),\n option\n )\n sys.stderr.write(message)\n exit(1)", "def getint(self, section, option, default=None):\r\n return self.get(section, option, type=int, default=default)", "def get_option(self, n):\n opts = self.view.options_panel.original_widget.contents()\n return opts[n][0].original_widget.contents[1][0]", "def get_choice(self, option: int) -> Choice:\n return self._choices[option - 1]", "def get_option(options=None, local_tag=None, doc=None, doc_tag=None, default=None, error_on_none=True):\n variable = None\n\n # element level\n if options is not None and local_tag is not None:\n if local_tag in options and options[local_tag] is not None:\n variable = options[local_tag]\n if variable is not None:\n return variable\n\n # doc level\n if doc is not None and doc_tag is not None:\n variable = doc.get_metadata(doc_tag, None)\n if variable is not None:\n return variable\n\n # default level\n variable = default\n if variable is None and error_on_none:\n raise ValueError(\"could not retrieve a value for tag; local={0}, doc={1}\".format(local_tag, doc_tag))\n\n return variable", "def getlong(self, option, default = None, section = None):\n return long(self.get(option, default, section))", "def get(name, default=None):", "def get_value(self):\n return self._get_value(self.optionType, self.value, self.defaultValue)", "def get_option_value(self, name: str, prefix: bool = False) -> Any:\n\n if prefix:\n name = f\"{self.parent.pyautodoc_prefix}-{name}\"\n\n if name in self.parent.options:\n return self.parent.options[name]\n elif self.is_available(name):\n return self.get_app_cfg_by_name(name)", "def getArg (argv, option):\n i = 0\n ret = \"\"\n #Look over every arguments in order to find the option's value\n while i < len(argv):\n #If the option is find, try to return its value\n if (argv[i] == option):\n try:\n ret = argv[i+1]\n except Exception as e:\n print(\"I think you have not said the value after \" + value)\n explain()\n i+=1\n if ret == \"\":\n raise Exception(\"Option \" + option + \" not found\")\n return ret", "def Option(self):\n return self._Option", "def getOptionalNode(node, name, option=None):\n try:\n return getNode(node, name)\n except NodeError:\n return option", "def getbool(option, default = None):\n\treturn _cfg.getboolean('rosshm', option, fallback = default)", "def get(section, option=\"\", default=None):\n\n\tif not config_parser:\n\t\treturn default\n\n\tif not option:\n\t\t# get the whole section\n\n\t\tif config_parser.has_section(section):\n\t\t\t# the section is noticed by the parser,\n\t\t\t# merge with defaults (if any) and return\n\t\t\t# the value-dict\n\n\t\t\tnew = dict(config_parser.items(section))\n\n\t\t\tif defaults.has_key(section):\n\t\t\t\t# merge defaults + config values together\n\t\t\t\tcopy = defaults[section].copy()\n\t\t\t\tcopy.update(new)\n\t\t\t\treturn copy\n\t\t\telse:\n\t\t\t\treturn new\n\n\t\telif defaults.has_key(section):\n\t\t\t# the config parser does not know the\n\t\t\t# section but it's found in defaults-dict.\n\t\t\t# This is probably a private section.\n\t\t\treturn defaults[section]\n\n\t\telse:\n\t\t\treturn default\n\n\telse:\n\t\t# get specific option\n\n\t\ttry:\n\t\t\treturn config_parser.get(section, option)\n\t\texcept (ConfigParser.NoOptionError, ConfigParser.NoSectionError),e:\n\t\t\tif defaults.has_key(section):\n\t\t\t\ttry:\n\t\t\t\t\treturn defaults[section][option]\n\t\t\t\texcept KeyError:\n\t\t\t\t\treturn default\n\t\telse:\n\t\t\treturn default\n\n\t# usually this is not reached\n\treturn default", "def __getattr__(self, name):\n try:\n return self._get(name)\n except ValueError:\n raise\n except Exception:\n raise NoSuchOptError(name)", "def option(self, key):\n if self.integration is None:\n return None\n return self.configuration.get(f'{self.get_config_name()}.{key}')", "def default(self, option: str, default: Any = None) -> Any:\n if option == 'pull':\n return self.get('force-pull', default=default)\n\n return super().default(option, default=default)", "def getOptionalTag(node, tag, option=\"\"):\n try:\n return getTag(node, tag)\n except TagError:\n return option", "def __getitem__(self, option):\n if option not in self.__dict__.keys():\n raise KeyError(\"Option '{}' not found.\".format(option))\n\n return self.__dict__[option]", "def _get_options(self, struct, field):\n return struct.DESCRIPTOR.fields_by_name[field].GetOptions() if hasattr(struct, \"DESCRIPTOR\") else None", "def get_setting(section, option):\n config = configparser.ConfigParser()\n config.read('settings.ini')\n value = config.get(section, option)\n\n return value", "def get_setting(self, name, default=None):\n w = self.choices['which']\n if w == 'global_default':\n return self.settings.get_global_default(name, default)\n elif w == 'project_default':\n return self.settings.get_project_default(name, default)\n elif w == 'global_variant':\n return self.settings.get_global_variant(self.choices['variant'],\n name, default)\n elif w == 'project_variant':\n return self.settings.get_project_variant(self.choices['variant'],\n name, default)\n elif w == 'project_package_default':\n return self.settings.get_project_package_default(\n self.choices['package'], name, default)\n elif w == 'project_package_variant':\n return self.settings.get_project_package_variant(\n self.choices['package'], self.choices['variant'], name, default)\n elif w == 'project_package_target':\n return self.settings.get_project_package_target(\n self.choices['package'], self.choices['target'], name, default)\n else:\n raise AssertionError(w)", "def get_setting(which, default=None):\n settings = QSettings('USGS', 'guanoeditor')\n if default is None:\n return settings.value(which)\n else:\n return settings.value(which, default)", "def get_opt(self):\n return self.parser.parse_args()", "def get_opt(self):\n return self.parser.parse_args()", "def get_opt(self):\n return self.parser.parse_args()", "def requireOption(nm, val):\n return require(\"option\", nm, val)", "def __getitem__(self, item):\n if item not in self._moptions:\n raise KeyError(\"Invalid option '%s'.\" % item)\n return self._runopts.get(item)", "def get_required(self, section, option, type=str):\r\n val = self.get(section, option, type=type)\r\n if val is None:\r\n raise Config.ConfigError('Required option %s.%s is not defined.' % (section, option))\r\n return val", "def get_value(section, option):\n try:\n value = rcp.get(section, option)\n return value\n except:\n logging.error(\"Tried to retrieve nonexistant value from config (%s:%s).\",\n section, option)\n return False", "def getValFromConfig(mainConfig, sectionName, optionName, defaultVal=''):\n if mainConfig.has_section(sectionName):\n if mainConfig.has_option(sectionName, optionName) and mainConfig.get(sectionName, optionName):\n return mainConfig.get(sectionName, optionName)\n return defaultVal", "def get(self, name):\n try:\n return self._defaults[name]\n except KeyError:\n raise UndefinedDefault(\"default %s is undefined\" % name)" ]
[ "0.8309457", "0.8285595", "0.8050056", "0.78617734", "0.78480136", "0.7729407", "0.76687425", "0.7613236", "0.76116955", "0.7508143", "0.7377448", "0.7367811", "0.7332215", "0.7305065", "0.7281587", "0.7236693", "0.72000957", "0.7122571", "0.71041566", "0.7097126", "0.7038417", "0.700789", "0.69762677", "0.6970389", "0.6908797", "0.6881724", "0.6879818", "0.6876581", "0.6873039", "0.686091", "0.6857813", "0.6831281", "0.6798409", "0.67767745", "0.676576", "0.6761022", "0.67497706", "0.6730411", "0.67107606", "0.6668784", "0.6658421", "0.6641882", "0.662364", "0.66068184", "0.6586953", "0.6583804", "0.65823865", "0.65600765", "0.6541452", "0.65384734", "0.6518218", "0.65172976", "0.6501102", "0.6497883", "0.64879954", "0.64647245", "0.6462061", "0.6447685", "0.64379644", "0.6434409", "0.6430119", "0.6394852", "0.63923544", "0.6389833", "0.6386713", "0.6379505", "0.63739115", "0.6368572", "0.63620806", "0.6355025", "0.63482666", "0.63465667", "0.63252527", "0.6322004", "0.6314573", "0.6314227", "0.6311792", "0.6306454", "0.6300589", "0.63004744", "0.6295866", "0.62877727", "0.6283098", "0.6280857", "0.6277948", "0.6251535", "0.62402505", "0.62092704", "0.6185628", "0.6175888", "0.6157214", "0.6146697", "0.6146697", "0.6146697", "0.614663", "0.613302", "0.6131699", "0.6115124", "0.61058366", "0.6104152" ]
0.6900306
25
Get a boolean option, perhaps with a default value.
def getboolean(self, option, default = None, section = None): v = self.get(option, default, section) if isinstance(v, str): v = v.lower() if v not in self.cfg._boolean_states: raise ValueError, "Not a boolean: %s" % v v = self.cfg._boolean_states[v] return v
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getboolean(self, option, default=None):\n\t\treturn self._get_raw(option, 'boolean', default)", "def get_bool(options, name, default=False):\n value = options.get(name)\n if not value:\n return default\n if value.lower() == 'true':\n return True\n elif value.lower() == 'false':\n return False\n else:\n raise zc.buildout.UserError(\n \"Invalid value for %s option: %s\" % (name, value))", "def getboolean(self, option):\n return getboolean(self.name, option)", "def get_bool(self, option, argument=None):\n return bool(self.get(option, argument))", "def getbool(option, default = None):\n\treturn _cfg.getboolean('rosshm', option, fallback = default)", "def getbool(self, section, option, default=None):\r\n return self.get(section, option, type=bool, default=default)", "def get_bool(section, option, default=False):\n\tres = get(section, option, default)\n\n\tif res == default:\n\t\treturn default\n\n\tif res.lower() == \"true\" or res == \"1\":\n\t\treturn True\n\n\treturn default", "def getBoolean(self, section, option, default=False):\n return self.get(section, option, default, _bool)", "def config_get_bool(section, option):\n return __CONFIG.getboolean(section, option)", "def getboolean(self, section, option):\n value = self.get(section, option)\n if str(value).lower() in ('1', 'yes', 'true', \"on\"):\n return True\n if str(value).lower() in ('0', 'no', 'false', 'off'):\n return False\n raise ValueError('cannot use it as a boolean value')", "def getBooleanOption(aConfig, aSection, aOption):\n if aConfig.has_option(aSection, aOption):\n return aConfig.getboolean(aSection, aOption)\n else:\n # Default value. This should match the initialization done in\n # __init__ of class task in taskHandler.py\n if (aOption == \"fullScreenMode\" or\n aOption == \"formatOutput\" or\n aOption == \"compressOutput\"):\n return True\n else:\n # \"useWebDriver\"\n # \"runSlowTests\"\n # \"runSkipTests\"\n # \"useGrid\"\n return False", "def bool_option (arg: Any) -> bool:\n return True", "def get_bool(self, name, default=False):\n return self.get_as(self.parse_bool, name, default, value_type=bool)", "def get_attr_bool(self, name, default=False):\n v = self.get_attr(name)\n if v is None:\n return default\n if v.lower() in [\"t\", \"true\", \"y\", \"yes\", \"1\"]:\n return True\n else:\n return False", "def option_bool(argument: Optional[str]) -> bool:\n if argument and argument.strip():\n output = tinydocutils.directives.choice(argument, (\"true\", \"false\"))\n return output == \"true\"\n return True", "def get_bool(self, key, default):\n value = self.get(key, default)\n if isinstance(value, bool):\n return value\n return value.lower() in (\"true\", \"t\", \"yes\", \"y\")", "def option_default_true(arg: Any) -> bool:\n\n if isinstance(arg, bool):\n return arg\n\n if arg is None:\n return True\n\n sanitized = arg.strip().lower()\n\n if sanitized == \"true\":\n return True\n elif sanitized == \"false\":\n return False\n else:\n raise ValueError(f\"Directive option argument '{arg}' is not valid. \"\n f\"Valid arguments are 'true' or 'false'.\")", "def getBoolValue(self):\n return _libsbml.ConversionOption_getBoolValue(self)", "def safe_get_bool(self, section, option, default=None):\n try:\n return self.getboolean(section, option)\n except (ConfigParser.NoSectionError, ConfigParser.NoOptionError):\n if default is None:\n raise\n else:\n #gvlogger.info(\"Can't find section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default\n except ValueError:\n if not default:\n raise\n else:\n #gvlogger.info(\"Can't convert value from section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default", "def getOption(arg):\n return (False, \"\", \"\")", "def bool_value(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"bool_value\")", "def getbool(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n return super().getbool(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars)\n except NoOptionError:\n # config item was not set\n self.check_default(sec, name, default)\n return default\n except ValueError:\n # check if it was an empty string and return default or False if so\n value_string = super().getstr(sec, name)\n if not value_string:\n if default:\n return default\n\n return False\n\n # check if value is y/Y/n/N and return True/False if so\n value_string = remove_quotes(value_string)\n if value_string.lower() == 'y':\n return True\n if value_string.lower() == 'n':\n return False\n\n # if value is not correct type, log error and return None\n self.logger.error(f\"[{sec}] {name} must be an boolean.\")\n return None", "def test_getboolean_with_default(self):\n self.assertEqual(self.config.getboolean('advanced','p'),None)\n self.assertEqual(self.config.getboolean('advanced','p',True),True)", "def force_bool(value):\n if isinstance(value, (bool, int)):\n return bool(value)\n\n boolean_states = ConfigParser._boolean_states\n if not value.lower() in boolean_states:\n return None\n\n return boolean_states[value.lower()]", "def get_bool(self, sect, opt):\r\n return self.get_safe(sect, opt) == \"True\"", "def _get_bool(element, name, context, default=None):\n\n value = element.get(name)\n try:\n value = int(value)\n except:\n value = default\n\n _assert(value is not None,\n \"Missing or invalid boolean value of '{0}.{1}'.\".format(context,\n name))\n\n return bool(value)", "def parse_bool(section, optionname):\n string = section.dict[optionname]\n if string.lower() == \"true\" or string.lower() == \"yes\":\n return True\n elif string.lower() == \"false\" or string.lower() == \"no\":\n return False\n elif string.isdigit():\n return bool(int(string))\n else:\n raise ValueError(\"Option \" + optionname + \" in section \" + section.name\n + \" is not a valid boolean!\")", "def read_boolean_option(config, section, option):\n if not config.has_section(section):\n return\n\n return config.has_option(section, option)", "def _getbool(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> bool:\n try:\n return parser.getboolean(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read boolean value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be Yes/No\"\n ) from err", "def to_bool(name, default=False):\n return as_bool(get(name), default=default)", "def get_bool(self, key, default=RequiredAttr()):\n if key in self.attrs:\n val = self.attrs[key]\n return val.strip().lower() in ['true', '1', 't', 'y', 'yes']\n if isinstance(default, RequiredAttr):\n raise AttributeError(\"Required attribute {} not found.\".format(key))\n return default", "def _read_bool_from_config(key, default):\n if config.has_option('docker', key):\n return config.getboolean('docker', key)\n else:\n return default", "def _parse_task_open_option(self, value):\n if value == \"true\":\n return False\n elif value == \"false\":\n return True\n else:\n return None", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def option_is_default(self, opt):\n return opt in self.results and self.results[opt][1] is self._is_default", "def bool(self, item, default=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n\n if isinstance(item, (bool, int)):\n return bool(item)\n\n if (isinstance(item, str) and\n item.lower() in ('n', 'no', 'false', 'f', '0')):\n return False\n\n return True if item else False", "def get_boolean_attribute_value(attrs, attr_name):\n return 1 if attrs.get(attr_name, 0) in [\"True\", \"1\"] else 0", "def boolean_flag(parser, name, default=False, help=None):\n dest = name.replace('-', '_')\n parser.add_argument(\"--\" + name, action=\"store_true\", default=default, dest=dest, help=help)\n parser.add_argument(\"--no-\" + name, action=\"store_false\", dest=dest)", "def has_option(self, name):\n return self.option_settings[name]", "def argparse_bool(x):\n return str(x).lower() in {'true', '1', 'yes'}", "def bool_flag(s):\n if s.lower() in ['off', 'false', '0']:\n return False\n if s.lower() in ['on', 'true', '1']:\n return True\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag (0 or 1)\")", "def cmakeBoolOptionIsSet(self, opt):\n\n if self.envcmake.has_key( opt ):\n\n val = str(self.envcmake.get(opt,\"\"))\n\n if val == \"1\" or val == \"ON\" or val == \"YES\":\n\n return True\n\n return False", "def aria_bool(value: Optional[bool]) -> Optional[str]:\n\n if value is None:\n return None\n elif value is True:\n return \"true\"\n elif value is False:\n return \"false\"\n else:\n raise ValueError(str(value))", "def option_is_true(self, name: str, prefix: bool = False) -> bool:\n\n return self.get_option_value(name=name, prefix=prefix) is True", "def is_true(option_value):\n if option_value.lower()[0] == \"t\":\n return 1\n elif option_value.lower()[0] == \"y\":\n return 1\n elif option_value.lower()[0] == \"o\":\n return 1\n elif option_value.lower() == \"1\":\n return 1\n else:\n return 0", "def explicit_bool(value: bool) -> bool:\n return value", "def is_bool(self):\n validator = self.__class__.get_setting_validator(self.key, **self.get_kwargs())\n\n return self.__class__.validator_is_bool(validator)", "def option(number, default='no'):\n return answer(number).get('options', default)", "def arg_to_boolean(arg: str) -> Optional[bool]:\n return argToBoolean(arg) if arg else None", "def is_true(self, opt_value):\r\n return isinstance(opt_value, tuple) and opt_value[0] or opt_value", "def getSetBoolean(self, key: str, default: bool | None = None) -> bool:\n value = self.parsedConfig.getboolean(key, default)\n self.parsedConfig[key] = str(value)\n return value", "def bool_var(\n default: Any = RAISE, name: str | None = None, help: str | None = None\n) -> Any:\n return var(default=default, name=name, converter=_env_to_bool, help=help)", "def _get_option(self, name, datatype, default):\n return config.get_option(self._options,\n name,\n type=datatype,\n default=default)", "def read_config_boolean(config, section, item):\n value = read_config(config, section, item)\n if value is None:\n return value\n if value.lower() in RawConfigParser._boolean_states:\n return RawConfigParser._boolean_states[value.lower()]\n raise ValueError(\"Unknown boolean value {} in configuration {}:{}\".format(\n value, section, item))", "def get_envbool(key, *default):\n return get_env(key, *default, coerce=_bool)", "def optional(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"optional\")", "def boolean(self, label, component, config, name, default=False):\n\n default = self.setting(config, name, default)\n return st.checkbox(label, value=default, key=component + name)", "def boolean_option_action(option,opt_str,value,parser):\n #print \"Processing %s\" % (opt_str)\n setattr(parser.values,option.dest,True)", "def get_xml_bool_attribute(elem, attribute, default=None):\n value = elem.get(attribute, default)\n if value is None:\n raise XMLSchemaKeyError(attribute)\n elif value in ('true', '1') or value is True:\n return True\n elif value in ('false', '0') or value is False:\n return False\n else:\n raise XMLSchemaTypeError(\"an XML boolean value is required for attribute %r\" % attribute)", "def bool_flag(s):\n FALSY_STRINGS = {\"off\", \"false\", \"0\"}\n TRUTHY_STRINGS = {\"on\", \"true\", \"1\"}\n if s.lower() in FALSY_STRINGS:\n return False\n elif s.lower() in TRUTHY_STRINGS:\n return True\n else:\n raise argparse.ArgumentTypeError(\"invalid value for a boolean flag\")", "def getBoolParam(self, params, name):\n return params.get(name) in ('True', 'true', '1')", "def getbool(self, key):\n try:\n return self.parser.getboolean(\"wpwatcher\", key)\n except ValueError as err:\n raise ValueError(\n \"Could not read boolean value in config file for key '{}' and string '{}'. Must be Yes/No\".format(\n key, self.parser.get(\"wpwatcher\", key)\n )\n ) from err", "def test_get_value_bool(self):\n val = self.setting_bool.get_value()\n self.assertIsInstance(val, bool)\n self.assertEqual(val, True)", "def test_get_value_bool(self):\n val = self.setting_bool.get_value()\n self.assertIsInstance(val, bool)\n self.assertEqual(val, True)", "def parse_bool(bool_arg):\n if bool_arg.lower() in ('yes', 'true', 't', 'y', '1'):\n return True\n elif bool_arg.lower() in ('no', 'false', 'f', 'n', '0'):\n return False\n else:\n raise ValueError(f'Boolean argument expected. Got {bool_arg} instead.')", "def _bool(value):\n if isinstance(value, bool):\n return value;\n \n if isinstance(value, str):\n value = value.strip()\n \n if value.lower() in _BOOLEAN_STATES:\n return _BOOLEAN_STATES[value.lower()]\n return None", "def _getBoolFeature(self):\n\n # create args\n valueToGet = c_bool()\n\n errorCode = VimbaDLL.featureBoolGet(self._handle,\n self._name,\n byref(valueToGet))\n if errorCode != 0:\n raise VimbaException(errorCode)\n\n return valueToGet.value", "def parse_bool(arg):\n if arg == 'True':\n return True\n elif arg == 'False':\n return False\n else:\n raise argparse.ArgumentTypeError(\"Expected 'True' or 'False'.\")", "def getenv_bool(setting, default=None):\n result = os.environ.get(setting, None)\n if result is None:\n return default\n return str2bool(result)", "def get_bool2(self):\n pass", "def getBoolValue(self, *args):\n return _libsbml.ConversionProperties_getBoolValue(self, *args)", "def getBoolean(self, key):\n self._check(key)\n return self.__config.value(key).toBool()", "def as_bool(self):\n return self.as_type(bool)", "def test_getboolean(self):\n self.assertEqual(self.config.getboolean('advanced','bool'),True)", "def get_config_option(option_name, optional=False):\n option = self.options.get(option_name)\n\n if not option and optional is False:\n err = \"'{0}' is mandatory and is not set in the app.config file. You must set this value to run this function\".format(option_name)\n raise ValueError(err)\n else:\n return option", "def parse_bool(value):\n if value in (\"true\", \"True\", \"yes\", \"1\", \"on\"):\n return True\n if value in (\"false\", \"False\", \"None\", \"no\", \"0\", \"off\"):\n return False\n return bool(int(value))", "def restricted_bool(x):\n try:\n x = bool(x)\n except ValueError:\n raise argparse.ArgumentTypeError(\"%r not a bool literal\" % (x,))\n return x", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")", "def optional(self) -> Optional[bool]:\n return pulumi.get(self, \"optional\")" ]
[ "0.8732398", "0.86066973", "0.84299433", "0.8366191", "0.82892025", "0.8260845", "0.822819", "0.8115869", "0.78542686", "0.7821285", "0.78019625", "0.7718613", "0.7465177", "0.7321887", "0.73198515", "0.7313598", "0.7216047", "0.7181007", "0.7140331", "0.71299386", "0.7106636", "0.7086862", "0.70771307", "0.70424265", "0.70143163", "0.6993727", "0.69437563", "0.69056135", "0.6887488", "0.68757373", "0.6871865", "0.6866682", "0.67665046", "0.6757908", "0.6720131", "0.6703395", "0.66796815", "0.6638753", "0.6624162", "0.65919834", "0.6575934", "0.6573998", "0.6557802", "0.65173465", "0.65150493", "0.6493984", "0.64932877", "0.64749354", "0.64582884", "0.6449571", "0.6438069", "0.642041", "0.6399837", "0.6398147", "0.63963526", "0.6370766", "0.6370766", "0.6370766", "0.6370766", "0.6370766", "0.6370766", "0.63587314", "0.6350127", "0.6340106", "0.6325179", "0.63231933", "0.62960863", "0.6295265", "0.6295265", "0.62795115", "0.6277643", "0.6275817", "0.62752867", "0.62704873", "0.62609774", "0.62577164", "0.6256", "0.6220624", "0.62126845", "0.62115604", "0.61994344", "0.6181664", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545", "0.6169545" ]
0.8165139
7
Get an integer option, perhaps with a default value.
def getint(self, option, default = None, section = None): return int(self.get(option, default, section))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)", "def getint(self, option):\n return getint(self.name, option)", "def getint(self, section, option, default=None):\r\n return self.get(section, option, type=int, default=default)", "def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0", "def getInt(self, section, option, default=0):\n return self.get(section, option, default, int)", "def getint(self, section, option):\n return int(self.get(section, option))", "def option(number, default='no'):\n return answer(number).get('options', default)", "def _ParseIntegerOption(cls, options, argument_name, default_value=None):\n argument_value = getattr(options, argument_name, None)\n if not argument_value:\n return default_value\n\n if not isinstance(argument_value, py2to3.INTEGER_TYPES):\n raise errors.BadConfigOption(\n u'Unsupported option: {0:s} integer type required.'.format(\n argument_name))\n\n return argument_value", "def config_get_int(section, option):\n return __CONFIG.getint(section, option)", "def safe_get_int(self, section, option, default=None):\n try:\n return int(self.safe_get(section, option, default))\n except ValueError:\n if default is None:\n raise\n else:\n #gvlogger.info(\"Can't convert value from section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default", "def find_option(number):\n if not isinstance(number, int):\n raise TypeError(number)\n if not ((0 <= number) and (number <= 65535)):\n raise ValueError(number)\n return _OptionRegistry.get(number, None)", "def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]", "def getInt(self, item, default=0):\n value = self.getSection(CFG_GENERAL, item)\n return default if not value else int(value)", "def get_int(self, sect, opt):\r\n vstr = self.get_safe(sect, opt)\r\n try:\r\n return int(vstr)\r\n except ValueError:\r\n return 0", "def test_getint_with_default(self):\n self.assertEqual(self.config.getint('advanced','p'),None)\n self.assertEqual(self.config.getint('advanced','p',11),11)", "def getIntValue(self):\n return _libsbml.ConversionOption_getIntValue(self)", "def _get_option(self, name, datatype, default):\n return config.get_option(self._options,\n name,\n type=datatype,\n default=default)", "def get_attr_int(self, name, default=0):\n v = self.get_attr(name)\n if v is None:\n return default\n try:\n return int(v)\n except: # noqa\n return default", "def get_by_name_as_int(cls, name, default=None):\n try:\n return int(Configuration.get_by_name(name))\n except:\n return default", "def to_int(name, default=0):\n try:\n return int(get(name))\n except (TypeError, ValueError):\n return default", "def GetInteger(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetInteger(prompt)", "def value(self, value: Optional[int] = None) -> Optional[int]:\n ...", "def int(self, item, default=None):\n try:\n item = self.__getattr__(item)\n except AttributeError as err:\n if default is not None:\n return default\n raise err\n return int(item)", "def to_int_or_none(value: Union[None, int, str]) -> Optional[int]:\n return None if value is None else int(value)", "def getDbIntDefault(self, db, key, default):\n val = self.getDbStrNone(db, key)\n if val != None:\n return int(val)\n else:\n return default", "def getlong(self, option, default = None, section = None):\n return long(self.get(option, default, section))", "def setIntegerOption(self, option, value):\n result = self.__lib.voikkoSetIntegerOption(self.__handle, option, value)\n if result == 0:\n raise VoikkoException(\"Could not set integer option %s to value %s\" % (option, value))", "def get_option(self, option):\n\t\treturn self.options[option]", "def get(key: 'int | str', default: 'Optional[int]' = -1) -> 'Flags':\n if isinstance(key, int):\n return Flags(key)\n return Flags[key] # type: ignore[misc]", "def get_int_attribute(element, attr, default = 0):\n result = element.getAttribute(attr)\n if result == \"\":\n return default\n return int(result)", "def get_input(self):\n option = input(\"Enter the number of your choice: \")\n return option", "def getSetInt(self, key: str, default: int | None = None) -> int:\n value = self.parsedConfig.getint(key, default)\n self.parsedConfig[key] = str(value)\n return value", "def get_default(cls, opt):\n try:\n return cls._OPTS[opt].default\n except KeyError:\n raise ValueError('unknown option name %r' % (opt,))", "def get_uint_arg(name, default, **kwargs):\n try:\n val = int(kwargs.get(name, default))\n if val < 0:\n logger.error('Parameter %s must not be negative')\n val = default\n return val\n except:\n logger.error('Parameter %s is not an integer' % name)\n return default", "def get(self, option, default=None):\n\t\treturn self._get_raw(option, '', default)", "def make_intger(value):\n if value:\n return int(value)\n return None", "def _getint(\n parser: configparser.ConfigParser,\n key: str,\n section: str = \"wpwatcher\",\n ) -> int:\n try:\n return parser.getint(section, key)\n except ValueError as err:\n raise ValueError(\n f\"Could not read int value in config file for key '{key}' and string '{parser.get(section, key)}'. Must be an integer\"\n ) from err", "def get_choice(self):\n number = -1\n while (number < 0) or (number > len(self.options)):\n number = int(input('Enter your menu choice: '))\n return number", "def getInteger(self):\n return self.value if self.isInteger() else None", "def getintparam(name, default=None, stash=None, params=None):\n v = getparamlist(name, stash=stash, params=params)\n if len(v) > 0: return int(v[0])\n return default", "def int(self, key: str, def_: Union[builtins.int, T] = 0) -> Union[builtins.int, T]:\n try:\n return int(self.get(key))\n except (ValueError, TypeError):\n return def_", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def get_option(self, n):\n opts = self.view.options_panel.original_widget.contents()\n return opts[n][0].original_widget.contents[1][0]", "def getDefaultValue(self) -> Optional[int]:\n try:\n return int(self.placeholderText())\n except ValueError:\n return None", "def nonNegativeIntOrNone(value):\n return None if value == None or value < 0 else int(value)", "def as_int(self):\n try:\n value = int(self.value)\n except (ValueError, TypeError):\n value = self.default_value\n\n return value", "def get_int(self, name):\n return self.field(name).toInt()[0]", "def option_str_to_int(self, option):\n if type(option) == int:\n return option\n\n assert \"-\" not in option, \"Must be given specific option: %s.\" % option\n\n for val in self.scapy_options:\n if self.scapy_options[val].lower() == option.lower():\n return val\n\n if \" \" in option:\n option = option.replace(\" \", \"_\").lower()\n\n if option.lower() in self.options_names:\n return self.options_names[option.lower()]", "def format_option(self, options_int, value):\n # NOPs\n if options_int == 1:\n return (self.scapy_options[options_int], ())\n elif options_int in [5]:\n return (self.scapy_options[options_int], value)\n # Timestamp\n elif options_int in [8, 14]:\n return (self.scapy_options[options_int], (value, 0))\n elif options_int in self.scapy_options:\n return (self.scapy_options[options_int], value)\n else:\n return (options_int, value)", "def __call__(self, value: Optional[int] = None) -> Optional[int]:\n ...", "def get_option(self, key):\n return self.options[key]", "def getIntParam(self, paramkey, default=None):\n value = self.request.getParameter(paramkey)\n if value is None: return default\n try: return int(value)\n except: return default", "def __get_option(self, option):\n if option in Config.OPTIONS.keys():\n _default = Config.OPTIONS[option]\n elif option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n elif option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n else:\n _default = None # XXX ??\n \n _val = self.__get(option)\n\n if _val: \n return _val\n else:\n return _default", "def get(self, option):\n return get(self.name, option)", "def get_choice(self, option: int) -> Choice:\n return self._choices[option - 1]", "def setIntValue(self, *args):\n return _libsbml.ConversionOption_setIntValue(self, *args)", "def getint(self, sec, name, default=None, badtypeok=False, morevars=None,\n taskvars=None):\n if sec in self.OLD_SECTIONS:\n sec = 'config'\n\n try:\n # call ProdConfig function with no default set so\n # we can log and set the default\n return super().getint(sec, name, default=None,\n badtypeok=badtypeok, morevars=morevars,\n taskvars=taskvars)\n\n # if config variable is not set\n except NoOptionError:\n if default is None:\n default = util.MISSING_DATA_VALUE\n\n self.check_default(sec, name, default)\n return default\n\n # if invalid value\n except ValueError:\n # check if it was an empty string and return MISSING_DATA_VALUE\n if super().getstr(sec, name) == '':\n return util.MISSING_DATA_VALUE\n\n # if value is not correct type, log error and return None\n self.logger.error(f\"[{sec}] {name} must be an integer.\")\n return None", "def getOption(arg):\n return (False, \"\", \"\")", "def getInteger(self):\n pass", "def to_int(value, default=None):\n try:\n value = int(value)\n except (TypeError, ValueError):\n return default\n else:\n return value", "def test_default(self):\r\n self.assertEqual(self.option.default, 1234)", "def get_option(self, option):\n if not self._options.has_key(option):\n raise KeyError, \"Invalid option: \" + option\n else:\n return self._options.get(option)", "def __integer(value, name=\"\", internal=False):\n if value is None:\n __ex(\"The %s is missing.\" % name, internal)\n if value == \"\":\n __ex(\"The %s must not be empty.\" % name, internal)\n try:\n value = int(value)\n except ValueError:\n __ex(\"The %s must be an integer.\" % name, internal)\n return int(value)", "def getInt(self, key):\n self._check(key)\n value, flag = self.__config.value(key).toInt()\n if flag:\n return value\n else:\n raise ValueError(\"ConfigManager can't get key '%s' as int\" % key)", "def options():\n print(\"1: Compute the sum of 1..n\")\n print(\"2: Compute the product of 1..n\")\n print(\"9: Quit\")\n try:\n option = int(input(\"Choice: \"))\n except:\n return None\n return option", "def find_option(self, option_name, default=None):\n value = (\n getattr(self.pconfig.option, option_name, None) or\n self.pconfig.getini(option_name)\n )\n return value if value else default", "def test_intOrDefault(self):\n self.assertEqual(irc._intOrDefault(None), None)\n self.assertEqual(irc._intOrDefault([]), None)\n self.assertEqual(irc._intOrDefault(\"\"), None)\n self.assertEqual(irc._intOrDefault(\"hello\", 5), 5)\n self.assertEqual(irc._intOrDefault(\"123\"), 123)\n self.assertEqual(irc._intOrDefault(123), 123)", "def get_option_nibble(optionvalue):\n if optionvalue <= 12:\n return optionvalue\n elif optionvalue <= 255 + 13:\n return 13\n elif optionvalue <= 65535 + 269:\n return 14\n else:\n raise ValueError(\"Unsupported option delta \" + optionvalue)", "def possible_int(arg):\n try:\n return int(arg)\n except ValueError:\n logging.info(f'failed to parse {arg} as an int, treating it as a string')\n return arg", "def get_int(self):\n while True:\n try:\n choice = int(input(\"Choose: \"))\n if 1 <= choice <= len(self.menu):\n return choice\n print(\"Invalid choice.\")\n except (NameError,ValueError, TypeError,SyntaxError):\n print(\"That was not a number, genious.... :(\")", "def int_parameter(level, maxval):\n return int(level * maxval / 10)", "def getValue(self) -> Optional[int]:\n return self.__value", "def nonesafe_int(in_string: Optional[str]) -> Optional[int]:\n if in_string is None:\n return None\n return int(in_string)", "def get_option_generic(pytest_config: pytest.Config, flag: str, default):\n cli_flag = flag.replace(\"-\", \"_\")\n ini_flag = flag\n\n # Lowest priority\n use = default\n\n # Middle priority\n if pytest_config.getini(ini_flag) is not None:\n use = pytest_config.getini(ini_flag)\n\n # Top priority\n if pytest_config.getoption(cli_flag) is not None:\n use = pytest_config.getoption(cli_flag)\n\n return use", "def get(cls,name,as_type = str):\n inst = cls.inst()\n if name in inst.options:\n return as_type(inst.options[name])\n else:\n raise OptionsError(\"No option with key '%s'\" % name)", "def get(key, default=-1):\n if isinstance(key, int):\n return Parameter(key)\n if key not in Parameter._member_map_: # pylint: disable=no-member\n extend_enum(Parameter, key, default)\n return Parameter[key]", "def getInteger(self):", "def getInteger(self):", "def optval(self, option, default=None):\n return getattr(self._options_, option, default)", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def test_get_value_int(self):\n val = self.setting_int.get_value()\n self.assertIsInstance(val, int)\n self.assertEqual(val, 170)", "def get_option(self, option, default=None):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n try:\n value = self.get(section, key)\n value = self._str_to_val(value)\n except ValueError, s:\n logger.warning(\"get failed for {}/{}: {}\".format(section,key,s))\n value = default\n except NoSectionError:\n value = default\n except NoOptionError:\n value = default\n\n return value", "def read_config_int(config, section, item):\n value = read_config(config, section, item)\n if value is None:\n return value\n return int(value)", "def opt_value(self):\n return self._opt_value", "def int(self, max_=None):\n max_ = self.max_int if max_ is None else max_\n return self.rng.integers(max_).item()", "def test_getint(self):\n self.assertEqual(self.config.getint('advanced','n'),12)", "def getOption(self, name):\n\n if name.lower() in self.defaultOptions:\n return self.options[name.lower()][1]\n else:\n raise Error('%s is not a valid option name.' % name)", "def get_positive_int(num, name='Number', max_value=None,\n default_if_blank=None):\n if default_if_blank and len(num) == 0:\n return default_if_blank\n\n num = get_int(num, name=name)\n\n if num <= 0:\n raise ValueError('{} must be positive'.format(name))\n\n if max_value and num > max_value:\n raise ValueError(\"{} can't exceed {}\".format(name, max_value))\n\n return num", "def useti(self, prompt=None, default=None):\n \n i = 0\n abak = copy(default) # Backup our default value\n\n a = abak\n while(i<self.maxTries):\n tmp = self.uset(prompt,default)\n try:\n a = float(tmp)\n a = int(a)\n i = self.maxTries # preload failure\n except:\n # Print warning\n print\n print \" WARNING: Invalid Entry. Please enter an integer!!\"\n print \n # reload the default\n a = abak\n i = i+1\n \n return(a)", "def get_int(self, item: str) -> int:\n return int(self[item])", "def mint(x, default=None):\n try:\n return int(x)\n except ValueError:\n return default", "def getInteger(self):\n assert self._is_int is True\n return self._value", "def getdefault(self, option, type=str, default=None):\r\n return self.get(Config.DEFAULT_SECTION, option, type, default=default)", "def get(option, default = None):\n\treturn _cfg.get('rosshm', option, fallback = default)", "def getOption(self, *args):\n return _libsbml.ConversionProperties_getOption(self, *args)", "def give_me_an_integer():\n return 5\n pass", "def get_option(self, sorting_option_string=None, max_number=20):\n if sorting_option_string is None:\n print(\"sorting option string is not given. It will be a default option, score\")\n sorting_option_string = 'score'\n\n sorting_option = SortingOption.get_type_of(sorting_option_string)\n option = SuggestionOption(sorting_option=sorting_option, max_number=max_number)\n return option", "def get(self, section, option, type_=six.string_types, default=None):\n return self._getinstance(section, option, type_, default)", "def get_int(num, name='Number'):\n try:\n num = int(num)\n except ValueError:\n raise ValueError('{} must be an integer'.format(name))\n\n return num", "def get_option_value(self, key):\n\n # Check the key.\n self.__assert_option(key)\n\n # Get and return the value.\n return self.__opt[key]" ]
[ "0.84793097", "0.81527907", "0.79768705", "0.7929183", "0.7760084", "0.7490237", "0.73363376", "0.71646047", "0.7082002", "0.7010749", "0.6840518", "0.66669184", "0.65775895", "0.6542784", "0.65416443", "0.652801", "0.63949615", "0.6394363", "0.63876903", "0.633896", "0.63153094", "0.63023686", "0.62728655", "0.622992", "0.6211607", "0.6196161", "0.61919236", "0.61895233", "0.61851114", "0.61552656", "0.6150641", "0.6110283", "0.609345", "0.6089887", "0.60664135", "0.6026814", "0.60240626", "0.60217315", "0.60057306", "0.6005505", "0.5991693", "0.5971293", "0.5961673", "0.5959873", "0.59414834", "0.5909598", "0.5885566", "0.5877851", "0.5874784", "0.5852266", "0.5850736", "0.58272815", "0.5825624", "0.57817197", "0.57716346", "0.57632816", "0.57567763", "0.575054", "0.5750179", "0.57374376", "0.5731352", "0.5727445", "0.57230586", "0.5711453", "0.5710723", "0.57047445", "0.5698765", "0.5681531", "0.56721115", "0.5656477", "0.56523806", "0.564998", "0.5643048", "0.5636772", "0.56320053", "0.562969", "0.5625402", "0.5625402", "0.56240237", "0.5623638", "0.5623638", "0.5600196", "0.5568345", "0.5560787", "0.5559729", "0.55449027", "0.5538443", "0.5535514", "0.5534385", "0.5532062", "0.5522471", "0.5516305", "0.5510783", "0.5482867", "0.54790264", "0.5472786", "0.5456214", "0.5453968", "0.5452296", "0.5437295" ]
0.8415297
1
Get a long integer option, perhaps with a default value.
def getlong(self, option, default = None, section = None): return long(self.get(option, default, section))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def getint(self, option, default = None, section = None):\n return int(self.get(option, default, section))", "def getLong(self, int: int, int2: int) -> int:\n ...", "def getLong(t, swipl):\n i = c_long()\n if swipl.PL_get_long(t, byref(i)):\n return i.value\n else:\n raise InvalidTypeError(\"long\")", "def getLong(self, name: unicode) -> long:\n ...", "def getint(self, option, default=None):\n\t\treturn self._get_raw(option, 'int', default)", "def getint(self, option):\n return getint(self.name, option)", "def getint(self, section, option, default=None):\r\n return self.get(section, option, type=int, default=default)", "def to_long_int(val):\n return long(val) if six.PY2 else int(val)", "def getint(self, option, argument=None):\n value = self.get(option, argument)\n if value: return int(value)\n else: return 0", "def convertToLong(boolean: bool) -> int:\n ...", "def setLong(self, name: unicode, value: long) -> None:\n ...", "def getLong(self, addr: ghidra.program.model.address.Address) -> long:\n ...", "def long_attr(attr):\n try:\n val = long(attr, 0)\n except ValueError:\n raise EzXMLError(\"%s did not parse as an integer\" % attr)\n return val", "def field_to_long(value):\n if isinstance(value, (int, long)):\n return long(value)\n elif isinstance(value, basestring):\n return bytes_to_long(from_hex(value))\n else:\n return None", "def getint(self, section, option):\n return int(self.get(section, option))", "def read_long_long(data):\n s_type = \"=%s\" % get_type(\"long_long\")\n return struct.unpack(s_type, data.read(8))[0]", "def read_long(self):\n return self._packers[\"l\"].unpack(self.read(4))[0]", "def option(number, default='no'):\n return answer(number).get('options', default)", "def getInt(self, section, option, default=0):\n return self.get(section, option, default, int)", "def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value", "def get_long(self, key):\n if self._handle is None:\n raise Exception(\"GRIB file %s not open\" % (self.fname,))\n\n val = ctypes.c_long()\n rc = grib_get_long(self._handle, key, ctypes.byref(val))\n if rc:\n raise Exception(\"grib_get_long() failed: %d\" % (rc,))\n return val.value", "def find_option(number):\n if not isinstance(number, int):\n raise TypeError(number)\n if not ((0 <= number) and (number <= 65535)):\n raise ValueError(number)\n return _OptionRegistry.get(number, None)", "def getLong(self, address: ghidra.program.model.address.Address) -> long:\n ...", "def test_ulong_long_int(self):\n self.failUnlessEqual(self.callFunc('encode_longlong', self.const_integer), self.const_integer_long_long_encoded, 'long long encoding FAILED...')", "def nextLong(self) -> \"long\":\n raise NotImplementedError", "def read_long(self):\n a, b, c, d = self.read_list(4)\n return a << 24 | b << 16 | c << 8 | d", "def setLong(self, addr: ghidra.program.model.address.Address, value: long) -> None:\n ...", "def getLong(self, addr: ghidra.program.model.address.Address, bigEndian: bool) -> long:\n ...", "def config_get_int(section, option):\n return __CONFIG.getint(section, option)", "def validateLong(sValue, lMin = 0, lMax = None, aoNilValues = tuple([long(-1), None, '']), fAllowNull = True):\n if sValue in aoNilValues:\n if fAllowNull:\n return (None if sValue is None else aoNilValues[0], None);\n return (sValue, 'Mandatory.');\n try:\n if utils.isString(sValue):\n lValue = long(sValue, 0);\n else:\n lValue = long(sValue);\n except:\n return (sValue, 'Not a long integer');\n\n if lValue in aoNilValues:\n return (aoNilValues[0], None if fAllowNull else 'Mandatory.');\n\n if lMin is not None and lValue < lMin:\n return (lValue, 'Value too small (min %d)' % (lMin,));\n elif lMax is not None and lValue > lMax:\n return (lValue, 'Value too high (max %d)' % (lMax,));\n return (lValue, None);", "def read_long_integer(self, process_handle: int, address: int):\n self.__bufferSize = 8\n value = self.__read_bytes(process_handle, address)\n return None if value is None else int.from_bytes(value, byteorder='little')", "def __long__( self ):\r\n\t\treturnvalue = self.numerator / self.denominator\r\n\t\tif ( type( returnvalue ) == types.ComplexType ):\r\n\t\t\treturnvalue = long( abs( returnvalue ) )\r\n\t\telse:\r\n\t\t\treturnvalue = long( returnvalue )\r\n\t\treturn returnvalue", "def safe_get_int(self, section, option, default=None):\n try:\n return int(self.safe_get(section, option, default))\n except ValueError:\n if default is None:\n raise\n else:\n #gvlogger.info(\"Can't convert value from section '%s' option '%s' in configuration file, reverting to defaults\", section, option)\n return default", "def __long__(self):\n return long(self.micros() // 1000000) # pragma: PY2", "def to_long(x):\n if isinstance(x, long):\n return x\n elif isinstance(x, int):\n return long(x)\n else:\n return bytes_to_long(to_bytes(x))", "def read_long(data):\n s_type = \"=%s\" % get_type(\"long\")\n return struct.unpack(s_type, data.read(4))[0]", "def _ParseIntegerOption(cls, options, argument_name, default_value=None):\n argument_value = getattr(options, argument_name, None)\n if not argument_value:\n return default_value\n\n if not isinstance(argument_value, py2to3.INTEGER_TYPES):\n raise errors.BadConfigOption(\n u'Unsupported option: {0:s} integer type required.'.format(\n argument_name))\n\n return argument_value", "def testIntegerField_AllowLong(self):\n if six.PY2:\n messages.IntegerField(10, default=long(10))", "def check_option(option, max_length):\n res = utils.string_to_num(option)\n if res is None:\n return None\n\n if res < 0 or res >= max_length:\n return None\n return res", "def _get_option(self, name, datatype, default):\n return config.get_option(self._options,\n name,\n type=datatype,\n default=default)", "def serialize_long(self, obj):\n return self.serialize_int(obj)", "def get_int(self, sect, opt):\r\n vstr = self.get_safe(sect, opt)\r\n try:\r\n return int(vstr)\r\n except ValueError:\r\n return 0", "def __long__(self):\n if len(self) == 8:\n return struct_Q.unpack(self)[0]\n else:\n raise ValueError(\"Unable to cast field to int: length must be 8 bytes, field length is %d\" % len(self))", "def setLong(self, addr: ghidra.program.model.address.Address, value: long, bigEndian: bool) -> None:\n ...", "def setLong(self, address: ghidra.program.model.address.Address, value: long) -> None:\n ...", "def get_flag(self):\n return self.long_flag", "def read_unsigned_long_long(data):\n s_type = \"=%s\" % get_type(\"unsigned_long_long\")\n return struct.unpack(s_type, data.read(8))[0]", "def setIsLong(self, value):\n return self._set(isLong=value)", "def get_uint_arg(name, default, **kwargs):\n try:\n val = int(kwargs.get(name, default))\n if val < 0:\n logger.error('Parameter %s must not be negative')\n val = default\n return val\n except:\n logger.error('Parameter %s is not an integer' % name)\n return default", "def test_ulong_int(self):\n self.failUnlessEqual(self.callFunc('encode_long', self.const_integer), self.const_integer_long_encoded, 'long encoding FAILED...')", "def get_option_nibble(optionvalue):\n if optionvalue <= 12:\n return optionvalue\n elif optionvalue <= 255 + 13:\n return 13\n elif optionvalue <= 65535 + 269:\n return 14\n else:\n raise ValueError(\"Unsupported option delta \" + optionvalue)", "def int64_value(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"int64_value\")", "def get(key: 'int | str', default: 'Optional[int]' = -1) -> 'Flags':\n if isinstance(key, int):\n return Flags(key)\n return Flags[key] # type: ignore[misc]", "def value_to_db_auto(self, value):\r\n if value is None:\r\n return None\r\n return long(value)", "def getDbIntDefault(self, db, key, default):\n val = self.getDbStrNone(db, key)\n if val != None:\n return int(val)\n else:\n return default", "def bit_to_long(bits: str) -> Decimal:\n ints = int(bits, 2)\n result = Decimal(ints) / Decimal(_max_32bit)\n return result", "def test_ulong_long_int_decode(self):\n self.failUnlessEqual(self.readFunc('decode_longlong', self.const_integer_long_long_encoded), self.const_integer, 'unsigned long long decoding FAILED...')", "def read_ulong(self):\n return self._packers[\"L\"].unpack(self.read(4))[0]", "def getOption(arg):\n return (False, \"\", \"\")", "def get(self, option, default=None):\n\t\treturn self._get_raw(option, '', default)", "def write_long(self, registeraddress, value, signed=False):\n MAX_VALUE_LONG = 4294967295 # Unsigned INT32\n MIN_VALUE_LONG = -2147483648 # INT32\n\n _checkInt(value, minvalue=MIN_VALUE_LONG, maxvalue=MAX_VALUE_LONG, description='input value')\n _checkBool(signed, description='signed')\n self._genericCommand(16, registeraddress, value, numberOfRegisters=2, signed=signed, payloadformat='long')", "def readLong(data):\n high, low = struct.unpack(\">ll\", data[0:8])\n big = (long(high) << 32) + low\n rest = data[8:]\n return (big, rest)", "def test_getint_with_default(self):\n self.assertEqual(self.config.getint('advanced','p'),None)\n self.assertEqual(self.config.getint('advanced','p',11),11)", "def write_long(self, l):\n if not isinstance(l, six.integer_types):\n raise TypeError(\"expected an int, got %r\" % (type(l),))\n\n if not -2147483648 <= l <= 2147483647:\n raise OverflowError(\"Not in range, %d\" % l)\n\n self.write(self._packers[\"l\"].pack(l))", "def option(self, spec):\n return spec.options[self.rng.integers(len(spec.options))]", "def get_option(self, option, default=None):\n splitvals = option.split('/')\n section, key = \"/\".join(splitvals[:-1]), splitvals[-1]\n\n try:\n value = self.get(section, key)\n value = self._str_to_val(value)\n except ValueError, s:\n logger.warning(\"get failed for {}/{}: {}\".format(section,key,s))\n value = default\n except NoSectionError:\n value = default\n except NoOptionError:\n value = default\n\n return value", "def optval(self, option, default=None):\n return getattr(self._options_, option, default)", "def get_option(self, n):\n opts = self.view.options_panel.original_widget.contents()\n return opts[n][0].original_widget.contents[1][0]", "def test_ulong_long_int_out_of_upper_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_longlong, 18446744073709551616)", "def _validateLong(dErrors, sName, sValue, lMin = 0, lMax = None, aoNilValues = tuple([long(-1), None, ''])):\n (sValue, sError) = ModelDataBase.validateLong(sValue, lMin, lMax, aoNilValues, fAllowNull = False);\n if sError is not None:\n dErrors[sName] = sError;\n return sValue;", "def read_unsigned_long(data):\n s_type = \"=%s\" % get_type(\"unsigned_long\")\n return struct.unpack(s_type, data.read(4))[0]", "def str_to_long(data_structure):\n if (type(data_structure) is str and len(data_structure) > 0 and\n data_structure[-1] == 'L'):\n try:\n return long(data_structure)\n except ValueError:\n return data_structure\n else:\n return data_structure", "def get_option(self, option):\n\t\treturn self.options[option]", "def test_ulong_long_int_out_of_lower_range(self):\n self.failUnlessRaises(Exception, self.codec.encode_longlong, -1)", "def get_default(cls, opt):\n try:\n return cls._OPTS[opt].default\n except KeyError:\n raise ValueError('unknown option name %r' % (opt,))", "def python_to_SPARQL_long(self, query, var = None) :\n\t\tif var == None :\n\t\t\tvar = self._bnodeVar()\n\t\treturn self.python_to_SPARQL_long_helper(query, var)[1]", "def fmt_option_val(option):\n if option is None:\n return \"\"\n return str(option)", "def test_arg_option_long_only(self):\n optional_long = [\n arg for arg in cli_args.values() if len(arg.flags) == 1 and arg.flags[0].startswith(\"-\")\n ]\n for arg in optional_long:\n assert ILLEGAL_LONG_OPTION_PATTERN.match(arg.flags[0]) is None, f\"{arg.flags[0]} is not match\"", "def opt(self, key, default=False):\n if key not in self.options:\n return default\n return self.options.get(key)", "def __get_option(self, option):\n if option in Config.OPTIONS.keys():\n _default = Config.OPTIONS[option]\n elif option in Config.FILE_OPTIONS.keys():\n _default = Config.FILE_OPTIONS[option]\n elif option in Config.PATH_OPTIONS.keys():\n _default = Config.PATH_OPTIONS[option]\n else:\n _default = None # XXX ??\n \n _val = self.__get(option)\n\n if _val: \n return _val\n else:\n return _default", "def is_long(self):\n answer = self._call('is_long')\n return answer.yes", "def getSetInt(self, key: str, default: int | None = None) -> int:\n value = self.parsedConfig.getint(key, default)\n self.parsedConfig[key] = str(value)\n return value", "def int_parameter(level, maxval):\n return int(level * maxval / 10)", "def getLSLimits(*args):\n return args[0].Limit.LSLimit.ls_limit", "def GetInteger(self,prompt=''):\n\t\treturn self.acad.ActiveDocument.Utility.GetInteger(prompt)", "def testLongValue(self):\n objectID = uuid4()\n user = createUser(u'username', u'password', u'User',\n u'[email protected]')\n namespace = createNamespace(user, u'name')\n tag = createTag(user, namespace, u'tag')\n self.store.add(TagValue(user.id, tag.id, objectID, sys.maxint + 1))", "def getLCLimits(*args):\n return args[0].Limit.LCLimit.lc_limit", "def wLong(self, value):\n self.w(struct.pack(self.endian + \"q\", value))", "def long(self, amount):", "def get_long_uid(klass, short_or_long_uid):\n if '_' in short_or_long_uid:\n return short_or_long_uid\n else:\n # Check the characters to make sure this could be valid\n if re.match(r'^[A-Za-z0-9\\-]+$', short_or_long_uid):\n return klass.generate_uid(identifier=short_or_long_uid)\n else:\n # Original UID is invalid anyway, return None\n None", "def make_intger(value):\n if value:\n return int(value)\n return None", "def get(option, default = None):\n\treturn _cfg.get('rosshm', option, fallback = default)", "def _bytes_to_long(self, input_bytes) -> int:\n return struct.unpack(\"<q\", input_bytes)[0]", "def getInt(self, item, default=0):\n value = self.getSection(CFG_GENERAL, item)\n return default if not value else int(value)", "def derefLongFromAddr (addr) :\n\tval = gdb.Value(addr).cast(gdb.lookup_type('long').pointer()).dereference()\n\treturn long(val) & faddress_and", "def get_option(self, key):\n return self.options[key]", "def getIntValue(self):\n return _libsbml.ConversionOption_getIntValue(self)", "def to_int_or_none(value: Union[None, int, str]) -> Optional[int]:\n return None if value is None else int(value)", "def get_attr_int(self, name, default=0):\n v = self.get_attr(name)\n if v is None:\n return default\n try:\n return int(v)\n except: # noqa\n return default", "def invalid_lattlong(self):\n return random.choice(INVALID_LATTLONG)" ]
[ "0.70192164", "0.68363434", "0.6702272", "0.66861874", "0.6630546", "0.66111416", "0.6431476", "0.6385897", "0.6310543", "0.628531", "0.6267423", "0.62353796", "0.6210837", "0.61966425", "0.6178884", "0.6079542", "0.60778195", "0.60364276", "0.59944767", "0.59922874", "0.59922874", "0.595521", "0.59507596", "0.5885261", "0.58822787", "0.58808017", "0.58609617", "0.58520585", "0.57999116", "0.57695496", "0.5766562", "0.5742026", "0.5731828", "0.5703479", "0.5699036", "0.5671929", "0.56659997", "0.5648356", "0.55990785", "0.5586852", "0.5586505", "0.55806386", "0.5574356", "0.5566785", "0.5564942", "0.5556492", "0.55398077", "0.5531014", "0.5504535", "0.55014044", "0.54516906", "0.54159397", "0.5407598", "0.5385006", "0.53614706", "0.5359407", "0.5353647", "0.5310346", "0.53095233", "0.53072125", "0.52002096", "0.5157204", "0.51458985", "0.51415014", "0.5140093", "0.5134896", "0.5131976", "0.51074946", "0.5105709", "0.50854474", "0.5083474", "0.5082056", "0.5064166", "0.5062262", "0.50503945", "0.5035141", "0.5019623", "0.50103635", "0.49998173", "0.49965623", "0.49932796", "0.49837986", "0.49819198", "0.49774048", "0.4977025", "0.49756622", "0.49755818", "0.49582294", "0.49578118", "0.49488693", "0.49478924", "0.4946219", "0.49422652", "0.4941316", "0.4931332", "0.4931206", "0.49301887", "0.49178705", "0.49035743", "0.489946" ]
0.8568428
0
Consolidated control for all the little global control flags scattered through the libraries. This isn't a particularly good place for this function to live, but it has to live somewhere and making it a method of the config parser from which it gets all of its data is less silly than the available alternatives.
def set_global_flags(self): import rpki.http, rpki.x509, rpki.sql, rpki.async, rpki.log try: rpki.http.debug_http = self.getboolean("debug_http") except ConfigParser.NoOptionError: pass try: rpki.http.want_persistent_client = self.getboolean("want_persistent_client") except ConfigParser.NoOptionError: pass try: rpki.http.want_persistent_server = self.getboolean("want_persistent_server") except ConfigParser.NoOptionError: pass try: rpki.http.use_adns = self.getboolean("use_adns") except ConfigParser.NoOptionError: pass try: rpki.http.enable_ipv6_clients = self.getboolean("enable_ipv6_clients") except ConfigParser.NoOptionError: pass try: rpki.http.enable_ipv6_servers = self.getboolean("enable_ipv6_servers") except ConfigParser.NoOptionError: pass try: rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs") except ConfigParser.NoOptionError: pass try: rpki.sql.sql_persistent.sql_debug = self.getboolean("sql_debug") except ConfigParser.NoOptionError: pass try: rpki.async.timer.gc_debug = self.getboolean("gc_debug") except ConfigParser.NoOptionError: pass try: rpki.async.timer.run_debug = self.getboolean("timer_debug") except ConfigParser.NoOptionError: pass try: rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms")) except ConfigParser.NoOptionError: pass try: rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms")) except ConfigParser.NoOptionError: pass try: rpki.async.gc_summary(self.getint("gc_summary"), self.getint("gc_summary_threshold", 0)) except ConfigParser.NoOptionError: pass try: rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks") except ConfigParser.NoOptionError: pass
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _GclStyleSettings(self):\n settings = {\n 'port': self.GetCodeReviewSetting('TRYSERVER_HTTP_PORT'),\n 'host': self.GetCodeReviewSetting('TRYSERVER_HTTP_HOST'),\n 'svn_repo': self.GetCodeReviewSetting('TRYSERVER_SVN_URL'),\n 'gerrit_url': self.GetCodeReviewSetting('TRYSERVER_GERRIT_URL'),\n 'git_repo': self.GetCodeReviewSetting('TRYSERVER_GIT_URL'),\n 'project': self.GetCodeReviewSetting('TRYSERVER_PROJECT'),\n # Primarily for revision=auto\n 'revision': self.GetCodeReviewSetting('TRYSERVER_REVISION'),\n 'root': self.GetCodeReviewSetting('TRYSERVER_ROOT'),\n 'patchlevel': self.GetCodeReviewSetting('TRYSERVER_PATCHLEVEL'),\n }\n logging.info('\\n'.join(['%s: %s' % (k, v)\n for (k, v) in settings.iteritems() if v]))\n for (k, v) in settings.iteritems():\n # Avoid overwriting options already set using command line flags.\n if v and getattr(self.options, k) is None:\n setattr(self.options, k, v)", "def base_settings():\n return \"\"\"\n iota = True\n rho = False\n omega = True\n chi = True\n pini = False\n\n emr = 0\n constrain_omega = 1\n iota.at_specific = 0\n iota.min = 0.0001\n iota.age_cnt = 2\n iota.time_cnt = 2\n omega.at_specific = 1\n omega.min = 0.0001\n omega.age_cnt = 0\n omega.time_cnt = 0\n chi.at_specific = 0\n chi.min = 0.0001\n chi.age_cnt = 1\n chi.time_cnt = 2\n drill_start = 0\n drill_end = -1\n re.iota = all\n re.omega = all\n re.chi = all\n study.0 = False\n study.11 = True\n study.11.at_specific = 0\n study.11.age_cnt = 1\n study.11.time_cnt = 1\n study.11.covtype = rate_value\n study.11.rate = chi\n study.1604 = True\n study.1604.at_specific = 0\n study.1604.age_cnt = 1\n study.1604.time_cnt = 1\n study.1604.covtype = meas_std\n country.156 = True\n country.156.at_specific = 0\n country.156.age_cnt = 1\n country.156.time_cnt = 1\n country.156.covtype = rate_value\n country.156.rate = iota\n country.1998 = True\n country.1998.at_specific = 0\n country.1998.age_cnt = 1\n country.1998.time_cnt = 1\n country.1998.covtype = meas_std\n job_idx = 0\n \"\"\"", "def AutomagicalSettings(self):\n # Try to find gclient or repo root first.\n if not self.options.no_search:\n self.toplevel_root = gclient_utils.FindGclientRoot(self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .gclient at %s' % self.toplevel_root)\n else:\n self.toplevel_root = gclient_utils.FindFileUpwards(\n os.path.join('..', '.repo'), self.checkout_root)\n if self.toplevel_root:\n logging.info('Found .repo dir at %s'\n % os.path.dirname(self.toplevel_root))\n\n # Parse TRYSERVER_* settings from codereview.settings before falling back\n # on setting self.options.root manually further down. Otherwise\n # TRYSERVER_ROOT would never be used in codereview.settings.\n self._GclStyleSettings()\n\n if self.toplevel_root and not self.options.root:\n assert os.path.abspath(self.toplevel_root) == self.toplevel_root\n self.options.root = gclient_utils.PathDifference(self.toplevel_root,\n self.checkout_root)\n else:\n self._GclStyleSettings()", "def RPC_DigitizationToolCommonCfg(flags, name=\"RpcDigitizationTool\", **kwargs):\n from MuonConfig.MuonCondAlgConfig import RpcCondDbAlgCfg # MT-safe conditions access\n acc = RpcCondDbAlgCfg(flags)\n if flags.Digitization.DoXingByXingPileUp:\n kwargs.setdefault(\"FirstXing\", RPC_FirstXing())\n kwargs.setdefault(\"LastXing\", RPC_LastXing())\n kwargs.setdefault(\"OutputObjectName\", \"RPC_DIGITS\")\n if flags.Digitization.PileUpPremixing:\n kwargs.setdefault(\"OutputSDOName\", flags.Overlay.BkgPrefix + \"RPC_SDO\")\n else:\n kwargs.setdefault(\"OutputSDOName\", \"RPC_SDO\")\n # config\n kwargs.setdefault(\"DeadTime\", 100)\n kwargs.setdefault(\"PatchForRpcTime\", True)\n # kwargs.setdefault(\"PatchForRpcTimeShift\", 9.6875)\n kwargs.setdefault(\"PatchForRpcTimeShift\", 12.5)\n kwargs.setdefault(\"turnON_efficiency\", True)\n kwargs.setdefault(\"turnON_clustersize\", True)\n kwargs.setdefault(\"testbeam_clustersize\", 0)\n kwargs.setdefault(\"ClusterSize1_2uncorr\", 0)\n kwargs.setdefault(\"CutProjectedTracks\", 100)\n kwargs.setdefault(\"RPCInfoFromDb\", True)\n kwargs.setdefault(\"Efficiency_fromCOOL\", True)\n kwargs.setdefault(\"EfficiencyPatchForBMShighEta\", False)\n kwargs.setdefault(\"ClusterSize_fromCOOL\", True)\n kwargs.setdefault(\"DumpFromDbFirst\", False)\n kwargs.setdefault(\"PanelId_OFF_fromlist\", False)\n kwargs.setdefault(\"PanelId_OK_fromlist\", False)\n kwargs.setdefault(\"IgnoreRunDependentConfig\", False)\n kwargs.setdefault(\"PrintCalibrationVector\",False )\n kwargs.setdefault(\"PhiAndEtaEff_A\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_A\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"PhiAndEtaEff_C\",[0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938, 0.938])\n kwargs.setdefault(\"OnlyPhiEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"OnlyEtaEff_C\" ,[0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022, 0.022])\n kwargs.setdefault(\"FracClusterSize1_A\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_A\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_A\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_A\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n kwargs.setdefault(\"FracClusterSize1_C\", [0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664, 0.609664])\n kwargs.setdefault(\"FracClusterSize2_C\", [0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986, 0.259986])\n kwargs.setdefault(\"FracClusterSizeTail_C\",[0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035, 0.13035 ])\n kwargs.setdefault(\"MeanClusterSizeTail_C\",[0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598, 0.548598])\n RpcDigitizationTool = CompFactory.RpcDigitizationTool\n acc.setPrivateTools(RpcDigitizationTool(name, **kwargs))\n return acc", "def parseFlags(self):\n # Blank return value.\n retVal = \"\"\n \n try:\n # Store flags as we parse them.\n allFlags = []\n \n # Get the accumulator flag.\n accFlag = self.__flags & self.f_accum\n trendFlag = self.__flags & self.f_trend\n modeFlag = self.__flags & self.f_mode\n \n # Complete set of readings?\n if accFlag == self.f_accum_complete:\n # Completed loading values into the accumulator.\n allFlags.append('C')\n elif accFlag == self.f_accum_accum:\n # Still accumulating.\n allFlags.append('A')\n elif accFlag == self.f_accum_unk:\n # Unknown.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Trend?\n if (trendFlag) == self.f_trend_stable:\n # Readings stable.\n allFlags.append('S')\n elif (trendFlag) == self.f_trend_up:\n # Still accumulating.\n allFlags.append('U')\n elif (trendFlag) == self.f_trend_dn:\n # Still accumulating.\n allFlags.append('D')\n elif (trendFlag) == self.f_trend_unk:\n # Still accumulating.\n allFlags.append('?')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Mode?\n if modeFlag == self.f_mode_fast:\n # Fast\n allFlags.append('F')\n elif modeFlag == self.f_mode_slow:\n # Slow\n allFlags.append('S')\n elif modeFlag == self.f_mode_counter:\n # Stream\n allFlags.append('C')\n elif modeFlag == self.f_mode_scaler:\n # Roll\n allFlags.append('L')\n else:\n # Bad value.\n allFlags.append('!')\n \n # Build a nice string.\n retVal = ''.join(allFlags)\n \n \n except:\n raise\n \n # Return value.\n return retVal", "def __process_flags(self, flags: int) -> Dict[str, bool]:\n return {\n 'ns': True if flags & 0x100 else False,\n 'cwr': True if flags & 0x080 else False,\n 'ece': True if flags & 0x040 else False,\n 'urg': True if flags & 0x020 else False,\n 'ack': True if flags & 0x010 else False,\n 'psh': True if flags & 0x008 else False,\n 'rst': True if flags & 0x004 else False,\n 'syn': True if flags & 0x002 else False,\n 'fin': True if flags & 0x001 else False,\n }", "def get_flags(cls):\n return cls.get_short_flag(), cls.get_flag()", "def _flags(self):\n done, data = self._request('GE')\n if done:\n flags = int(data[1], 16)\n else:\n raise EvseError\n return {\n 'service_level': (flags & 0x0001) + 1,\n 'diode_check': not flags & 0x0002,\n 'vent_required': not flags & 0x0004,\n 'ground_check': not flags & 0x0008,\n 'stuck_relay_check': not flags & 0x0010,\n 'auto_service_level': not flags & 0x0020,\n 'auto_start': not flags & 0x0040,\n 'serial_debug': not not flags & 0x0080,\n 'lcd_type': 'monochrome' if flags & 0x0100 else 'rgb',\n 'gfi_self_test': not flags & 0x0200\n }", "def __options(self):\n\t\ta = 1 if self.random else 0\n\t\tb = 2 if self.topoftheday else 0\n\t\tc = 4 if self.offline else 0\n\t\treturn a+b+c", "def versatileOptions():\r\n return tuple(sorted(i[0] for i in list(Options.defaults().items()) if i[1].find(' #v ') > 0))", "def preprocess_settings(self, eventlist):\n\n # cache some stuff?\n self.controllerroot = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_controllerroot)\n # pack manager settings\n self.comp('packmanager').set_directories( self.get_root_pack_directory_list() + self.get_site_pack_directory_list() )\n self.comp('packmanager').set_packsettings( self.settings.get_value(mconst.DEF_SETTINGSEC_packs) )\n self.comp('packmanager').set_default_packsettings(mconst.DEF_SETTINGVAL_default_pack_settings)\n self.comp('packmanager').set_flag_loadsetuptoolspacks(self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_flag_importsetuptoolspacks, mconst.DEF_SETTINGVAL_flag_importsetuptoolspacks))\n # database manager settings\n self.comp('dbmanager').set_databasesettings( self.settings.get_value(mconst.DEF_SETTINGSEC_database) )\n # isenabled flag\n self.isenabled = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_isenabled, self.isenabled)\n self.siteurl_relative = self.settings.get_subvalue(mconst.DEF_SETTINGSEC_config, mconst.DEF_SETTINGNAME_siteurl_relative, self.siteurl_relative)", "def common_options(self):\n return self._common_options", "def __get_options(self):\n for sect in self.file_parser.sections():\n if self.file_parser.has_option(sect, 'implementation'):\n selected_imp = self.file_parser.get(sect, 'implementation')\n imptype = self.file_parser.get(sect, 'optype')\n # pylint: disable = E1103\n enabled = self.file_parser.get(sect, 'enabled').lower()\n # pylint: enable = E1103\n if enabled == 'always':\n stateval = True\n permanent = True\n elif enabled == 'true':\n stateval = True\n permanent = False\n else:\n stateval = False\n permanent = False\n\n if self.file_parser.has_option(sect, 'id'):\n _id = self.file_parser.get(sect, 'id')\n self.opt_dict[sect]['id'] = _id\n\n self.opt_dict[sect]['permanent'] = permanent\n self.opt_dict[sect]['imptype'] = imptype\n if stateval == True:\n imp_unavailable = (selected_imp in self.imp2opt_dict) and (\n self.imp2opt_dict[selected_imp] != 'none' )\n if selected_imp == 'none' or imp_unavailable:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'\n else:\n self.opt_dict[sect]['enabled'] = True\n self.set_imp(sect, selected_imp)\n# dbmsg = 'Add imp2opt_dict[{0}] = {1}'\n# print dbmsg.format(selected_imp, sect)\n else:\n self.opt_dict[sect]['enabled'] = False\n self.opt_dict[sect]['selected_imp'] = 'none'", "def system_protection_config():\n\n\tprint_section_header(\"GENERAL SYSTEM PROTECTION\", Fore.BLUE)\n\n\t# Enable Gatekeeper\n\tif prompt_yes_no(top_line=\"-> Enable Gatekeeper?\",\n\t bottom_line=\"Defend against malware by enforcing code signing and verifying downloaded applications before letting them to run.\"):\n\t\tprint_confirmation(\"Enabling Gatekeeper...\")\n\t\tsp.run('sudo spctl --master-enable', shell=True, stdout=sp.PIPE)\n\t\tsp.run('sudo spctl --enable --label \"Developer ID\"', shell=True, stdout=sp.PIPE)\n\n\t# Disable automatic software whitelisting\n\tif prompt_yes_no(top_line=\"-> Prevent automatic software whitelisting?\",\n\t bottom_line=\"Both built-in and downloaded software will require user approval for whitelisting.\"):\n\t\tprint_confirmation(\"Preventing automatic whitelisting...\")\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsigned', 'off'], stdout=sp.PIPE)\n\t\tsp.run(['sudo', '/usr/libexec/ApplicationFirewall/socketfilterfw', '--setallowsignedapp', 'off'], stdout=sp.PIPE)\n\n\t# Captive Portal\n\tif prompt_yes_no(top_line=\"-> Disable Captive Portal Assistant and force login through browser on untrusted networks?\",\n\t bottom_line=\"Captive Portal could be triggered and direct you to a malicious site WITHOUT any user interaction.\"):\n\t\tprint_confirmation(\"Disabling Captive Portal Assistant...\")\n\t\tsp.run(['sudo', 'defaults', 'write', '/Library/Preferences/SystemConfiguration/com.apple.captive.control', 'Active', '-bool', 'false'], stdout=sp.PIPE)", "def default_controls(self):\n\t\tcontrol_list = []\n\t\tconfig = ConfigParser.ConfigParser()\n\t\tconfig.read(\"./config.ini\")\n\t\tcontrols = config.options(\"default_controls\")\n\t\tfor c in controls:\n\t\t\ttry: control_list.append( config.get(\"default_controls\", c) )\n\t\t\texcept:\n\t\t\t\tprint \"ERROR: missing control settings. Check config.ini.\"\n\t\t\t\traise(SystemExit)\n\t\treturn control_list", "def use_flags(*funcs):\n\n global GLOBAL_STATUS\n if funcs:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.add('FLAGS')\n else:\n GLOBAL_STATUS.discard('ERRORS')\n GLOBAL_STATUS.discard('FLAGS')\n\n for name in _get_func_names(funcs):\n if 'error' not in name and 'flag' not in name:\n globals()[name] = globals()[name].flag", "def _tools(self):\n # sign on\n yield \"\"\n yield \"# tools\"\n yield \"# librarian\"\n yield \"ar := ar\"\n yield \"ar.flags.create := rc\"\n yield \"ar.flags.extract := x\"\n yield \"ar.flags.remove := d\"\n yield \"ar.flags.update := ru\"\n yield \"ar.create := $(ar) $(ar.flags.create)\"\n yield \"ar.extract := $(ar) $(ar.flags.extract)\"\n yield \"ar.remove := $(ar) $(ar.flags.remove)\"\n yield \"ar.update := $(ar) $(ar.flags.update)\"\n yield \"\"\n yield \"# cwd\"\n yield \"cd := cd\"\n yield \"\"\n yield \"# file attributes\"\n yield \"chgrp := chgrp\"\n yield \"chgrp.flags.recurse := -R\"\n yield \"chgrp.recurse := $(chgrp) $(chgrp.flags.recurse)\"\n yield \"\"\n yield \"chmod := chmod\"\n yield \"chmod.flags.recurse := -R\"\n yield \"chmod.flags.write := +w\"\n yield \"chmod.recurse := $(chmod) $(chmod.flags.recurse)\"\n yield \"chmod.write := $(chmod) $(chmod.flags.write)\"\n yield \"chmod.write-recurse := $(chmod.recurse) $(chmod.flags.write)\"\n yield \"\"\n yield \"chown := chown\"\n yield \"chown.flags.recurse := -R\"\n yield \"chown.recurse := $(chown) $(chown.flags.recurse)\"\n yield \"\"\n yield \"# copy\"\n yield \"cp := cp\"\n yield \"cp.flags.force := -f\"\n yield \"cp.flags.recurse := -r\"\n yield \"cp.flags.force-recurse := -fr\"\n yield \"cp.force := $(cp) $(cp.flags.force)\"\n yield \"cp.recurse := $(cp) $(cp.flags.recurse)\"\n yield \"cp.force-recurse := $(cp) $(cp.flags.force-recurse)\"\n yield \"\"\n yield \"# date\"\n yield \"date := date\"\n yield \"date.date := $(date) '+%Y-%m-%d'\"\n yield \"date.stamp := $(date) -u\"\n yield \"date.year := $(date) '+%Y'\"\n yield \"\"\n yield \"# diff\"\n yield \"diff := diff\"\n yield \"\"\n yield \"# echo\"\n yield \"echo := echo\"\n yield \"\"\n yield \"# git\"\n yield \"git := git\"\n yield 'git.hash := $(git) log --format=format:\"%h\" -n 1'\n yield \"git.tag := $(git) describe --tags --long --always\"\n yield \"\"\n yield \"# loader\"\n yield \"ld := ld\"\n yield \"ld.flags.out := -o\"\n yield \"ld.flags.shared := -shared\"\n yield \"ld.out := $(ld) $(ld.flags.out)\"\n yield \"ld.shared := $(ld) $(ld.flags.shared)\"\n yield \"\"\n yield \"# links\"\n yield \"ln := ln\"\n yield \"ln.flags.soft := -s\"\n yield \"ln.soft := $(ln) $(ln.flags.soft)\"\n yield \"\"\n yield \"# directories\"\n yield \"mkdir := mkdir\"\n yield \"mkdir.flags.make-parents := -p\"\n yield \"mkdirp := $(mkdir) $(mkdir.flags.make-parents)\"\n yield \"\"\n yield \"# move\"\n yield \"mv := mv\"\n yield \"mv.flags.force := -f\"\n yield \"mv.force := $(mv) $(mv.flags.force)\"\n yield \"\"\n yield \"# ranlib\"\n yield \"ranlib := ranlib\"\n yield \"ranlib.flags :=\"\n yield \"\"\n yield \"# remove\"\n yield \"rm := rm\"\n yield \"rm.flags.force := -f\"\n yield \"rm.flags.recurse := -r\"\n yield \"rm.flags.force-recurse := -rf\"\n yield \"rm.force := $(rm) $(rm.flags.force)\"\n yield \"rm.recurse := $(rm) $(rm.flags.recurse)\"\n yield \"rm.force-recurse := $(rm) $(rm.flags.force-recurse)\"\n yield \"\"\n yield \"rmdir := rmdir\"\n yield \"\"\n yield \"# rsync\"\n yield \"rsync := rsync\"\n yield \"rsync.flags.recurse := -ruavz --progress --stats\"\n yield \"rsync.recurse := $(rsync) $(rsync.flags.recurse)\"\n yield \"\"\n yield \"# sed\"\n yield \"sed := sed\"\n yield \"\"\n yield \"# ssh\"\n yield \"ssh := ssh\"\n yield \"scp := scp\"\n yield \"scp.flags.recurse := -r\"\n yield \"scp.recurse := $(scp) $(scp.flags.recurse)\"\n yield \"\"\n yield \"# tags\"\n yield \"tags := true\"\n yield \"tags.flags :=\"\n yield \"tags.home :=\"\n yield \"tags.file := $(tags.home)/TAGS\"\n yield \"\"\n yield \"# tar\"\n yield \"tar := tar\"\n yield \"tar.flags.create := -cvj -f\"\n yield \"tar.create := $(tar) $(tar.flags.create)\"\n yield \"\"\n yield \"# TeX and associated tools\"\n yield \"tex.tex := tex\"\n yield \"tex.latex := latex\"\n yield \"tex.pdflatex := pdflatex\"\n yield \"tex.bibtex := bibtex\"\n yield \"tex.dvips := dvips\"\n yield \"tex.dvipdf := dvipdf\"\n yield \"\"\n yield \"# empty file creation and modification time updates\"\n yield \"touch := touch\"\n yield \"\"\n yield \"# yacc\"\n yield \"yacc := yacc\"\n yield \"yacc.c := y.tab.c\"\n yield \"yacc.h := y.tab.h\"\n\n # all done\n return", "def ini_get_all():\n raise NotImplementedError()", "def TRT_DigitizationBasicCfg(flags, **kwargs):\n acc = ComponentAccumulator()\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(TRT_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc", "def _CommonOptions(self, p):\n super()._CommonOptions(p, opt_v=False)", "def __build_global_cfg(globalcfg):\n cfglst = []\n\n # Set Global configuration\n gbl_prefix = 'keylset global_config '\n __append_line(cfglst, '#Generated from Cafe')\n timestamp = time.strftime(\"%m-%d-%y %H:%M:%S\", time.localtime())\n __append_line(cfglst, '#TimeStamp: ' + timestamp)\n __append_line(cfglst, gbl_prefix + 'ChassisName {' + globalcfg['ChassisName'] + '}')\n __append_line(cfglst, gbl_prefix + 'RandomSeed ' + str(random.randint(1, 999999999)))\n __append_line(cfglst, \"\")\n __append_line(cfglst, '#LogsAndResultsInfo Global Options')\n # TODO: Remove hardcoded log directory\n __append_line(cfglst, gbl_prefix + 'LogsDir C:/Users/Testmin/VeriWave/WaveApps/Results')\n __append_line(cfglst, gbl_prefix + 'GeneratePdfReport True')\n __append_line(cfglst, \"\")\n __append_line(cfglst, '#Test Traffic Global Options')\n __append_line(cfglst, gbl_prefix + 'Source {' + globalcfg['Source'] + '}')\n __append_line(cfglst, gbl_prefix + 'Destination {' + globalcfg['Destination'] + '}')\n __append_line(cfglst, gbl_prefix + 'Ports {' + globalcfg['Ports'] + '}')\n # __append_line(cfglst, gbl_prefix + 'MappingOption 0')\n __append_line(cfglst, gbl_prefix + 'PayloadData None')\n # __append_line(cfglst, gbl_prefix + 'DestinationPort ' + globalcfg['DestinationPort'])\n # __append_line(cfglst, gbl_prefix + 'SourcePort ' + globalcfg['SourcePort'])\n #__append_line(cfglst, gbl_prefix + 'TestList {unicast_unidirectional_throughput}')\n __append_line(cfglst, gbl_prefix + 'TestList {' + globalcfg['TestList'] + '}')\n __append_line(cfglst, gbl_prefix + 'Direction {' + globalcfg['Direction'] + '}')\n # Assumption there always be a WiFi group present\n __append_line(cfglst, gbl_prefix + 'Channel {' + globalcfg['Channel'] + '}')\n __append_line(cfglst, gbl_prefix + 'WirelessGroupCount ' + str(globalcfg['WirelessGroupCount']))\n __append_line(cfglst, gbl_prefix + 'FlowType UDP')\n __append_line(cfglst, gbl_prefix + 'ArpNumRetries 5')\n __append_line(cfglst, gbl_prefix + 'ArpRate 100')\n __append_line(cfglst, gbl_prefix + 'ArpTimeout 5')\n __append_line(cfglst, gbl_prefix + 'NumTrials 1')\n if 'SettleTime' in globalcfg:\n __append_line(cfglst, gbl_prefix + 'SettleTime ' + globalcfg['SettleTime'])\n else:\n __append_line(cfglst, gbl_prefix + 'SettleTime 3')\n if 'LossTolerance' in globalcfg:\n __append_line(cfglst, gbl_prefix + 'LossTolerance ' + globalcfg['LossTolerance'])\n if 'TrialDuration' in globalcfg:\n __append_line(cfglst, gbl_prefix + 'TrialDuration ' + globalcfg['TrialDuration'])\n __append_line(cfglst, gbl_prefix + 'TestDurationSec ' + globalcfg['TrialDuration'])\n __append_line(cfglst, \"\")\n\n return cfglst", "def read_flags():\n return flag_args", "def _control_predefined(operation, num_ctrl_qubits):\n if operation.name == 'x' and num_ctrl_qubits in [1, 2]:\n if num_ctrl_qubits == 1:\n import qiskit.extensions.standard.cx\n cgate = qiskit.extensions.standard.cx.CnotGate()\n else:\n import qiskit.extensions.standard.ccx\n cgate = qiskit.extensions.standard.ccx.ToffoliGate()\n elif operation.name == 'y':\n import qiskit.extensions.standard.cy\n cgate = qiskit.extensions.standard.cy.CyGate()\n elif operation.name == 'z':\n import qiskit.extensions.standard.cz\n cgate = qiskit.extensions.standard.cz.CzGate()\n elif operation.name == 'h':\n import qiskit.extensions.standard.ch\n cgate = qiskit.extensions.standard.ch.CHGate()\n elif operation.name in {'rx', 'ry', 'rz'}:\n if operation.name == 'rx':\n import qiskit.extensions.standard.crx\n cgate = qiskit.extensions.standard.crx.CrxGate(*operation.params)\n elif operation.name == 'ry':\n import qiskit.extensions.standard.cry\n cgate = qiskit.extensions.standard.cry.CryGate(*operation.params)\n else: # operation.name == 'rz'\n import qiskit.extensions.standard.crz\n cgate = qiskit.extensions.standard.crz.CrzGate(*operation.params)\n if num_ctrl_qubits == 1:\n return cgate\n else:\n # only predefined for one control qubit\n return cgate.control(num_ctrl_qubits - 1)\n elif operation.name == 'swap':\n import qiskit.extensions.standard.cswap\n cgate = qiskit.extensions.standard.cswap.FredkinGate()\n elif operation.name == 'u1':\n import qiskit.extensions.standard.cu1\n cgate = qiskit.extensions.standard.cu1.Cu1Gate(*operation.params)\n elif operation.name == 'u3':\n import qiskit.extensions.standard.cu3\n cgate = qiskit.extensions.standard.cu3.Cu3Gate(*operation.params)\n elif operation.name == 'cx':\n import qiskit.extensions.standard.ccx\n cgate = qiskit.extensions.standard.ccx.ToffoliGate()\n else:\n raise QiskitError('No standard controlled gate for \"{}\"'.format(\n operation.name))\n return cgate", "def common_options(func):\n\n def parse_preset(ctx, param, value):\n return PRESETS.get(value, (None, None))\n\n def parse_private(ctx, param, value):\n return hex_from_b64(value) if value else None\n\n func = click.option('--private', default=None, help='Private.', callback=parse_private)(func)\n\n func = click.option(\n '--preset',\n default=None, help='Preset ID defining prime and generator pair.',\n type=click.Choice(PRESETS.keys()), callback=parse_preset\n )(func)\n\n return func", "def get_flags(self):\n return self.short_flag, self.long_flag", "def options_set(self):\n\n global OPTIONS\n OPTIONS.append(config.ENABLE(self.threaded))\n OPTIONS.append(config.ENABLE(self.datasaver))\n OPTIONS.append(self.language)", "def RPC_DigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"PileUpTools\" not in kwargs:\n PileUpTools = acc.popToolsAndMerge(RPC_DigitizationToolCfg(flags))\n kwargs[\"PileUpTools\"] = PileUpTools\n acc.merge(PileUpToolsCfg(flags, **kwargs))\n return acc", "def process_flags(self):\n self.parse_search_terms(self.search_terms)\n \n # If randomisation is explicitly set, we enable it outright.. if not\n # it depends on whether we've provided search terms or not\n if self.force_randomise:\n self.randomise = True\n elif self.search_terms:\n self.randomise = False\n \n if self.update_index:\n self._update_index()\n \n if self.list_only:\n self.music_client = \"echo\" # FIXME: unix-only!\n self.loop_songs = False", "def handleSpecialCERNMergeSettings(self, funcName):\n if self.getCmsswVersion().startswith(\"CMSSW_7_5\") and False:\n self.logger.info(\"Using fastCloning/lazydownload\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCloneCacheHintDir=cms.untracked.string(\"lazy-download\")))\n elif funcName == \"merge\":\n self.logger.info(\"Using lazydownload\")\n self.process.add_(cms.Service(\"SiteLocalConfigService\",\n overrideSourceCacheHintDir=cms.untracked.string(\"lazy-download\")))\n return", "def jail_global_option(action, option = ''):\n \n# find jail global optins section by comment in /etc/jail.conf\n jc = open(jailconf, 'r')\n jcl = []\n for i in jc:\n i = i.strip('\\n')\n jcl.append(i)\n\n# global option begin \n for i in jcl:\n if \"#@\" in i:\n jb = jcl.index(i) + 1\n break\n\n# global option end\n x = 0\n for i in jcl[jb:]:\n x = x + 1\n if \"#@\" in i:\n je = jb + x - 1\n break\n\n# create global options list\n lmen = [\"Number\", \"Jails Global Settnings\"]\n jail_global = []\n number_global = 0\n for i in jcl[jb:je]:\n jail_global.append([number_global, i[:-1]])\n number_global += 1\n\n# list jail global options in /etc/jail.conf\n if action == \"list\":\n print tabulate(jail_global, lmen)\n return False\n \n# remove jail global option from /etc/jail.conf\n if action == \"rm\":\n print tabulate(jail_global, lmen)\n \n if number_global == 0:\n return False\n \n while True:\n rmglobal = raw_input(\"global setting number or (!) :> \")\n if rmglobal == \"!\":\n print \" INFO: Interrupted by user\"\n return False\n\n try:\n int(rmglobal)\n except ValueError:\n print \" ERROR: Slecet valid number (%s - %s)!\" % (0, len(jail_global) - 1)\n continue\n\n if int(rmglobal) >= len(jail_global):\n print \" ERROR: Slecet valid number (%s - %s)!\" % (0, len(jail_global) - 1)\n continue\n \n# write new config with changed jails global options\n jc = open(jailconf, 'w')\n del jcl[jb+int(rmglobal)]\n jc.write('\\n'.join(jcl))\n jc.close()\n \n logvar = jail_global[int(rmglobal)][1]\n msg = \" INFO: '%s' Global setting was removed!\" % logvar\n print msg\n return False\n\n# add new jail global option at the end\n if action == \"add\":\n option.append(';')\n option = ''.join(option) # convert option list in string\n \n # write new config with changed jail local option\n jc = open(jailconf, 'w')\n jcl.insert(je, option)\n jc.write('\\n'.join(jcl))\n jc.close()\n \n msg = \" INFO: '%s' Global setting was added!\" % option[:-1]\n log(msg)\n return False", "def setup_flags(self):\n self.io_args.color = self.io_args.color_full\n self.io_args.rig_in = self.io_args.rig\n self.io_args.matches = os.path.join(self.io_args.output_root, \"matches.json\")\n self.io_args.rig_out = os.path.join(self.io_args.output_root, \"rig.json\")", "def _handle_common_options(ctx, conpath=None, force=None, species=None):\n\n config = configuration.Config()\n\n ## Update the config or look in places where it may exist\n if conpath:\n config.reload(conpath)\n\n if force is not None:\n config.config['overwrite'] = force\n\n if species is not None:\n config.config['species'] = species\n\n ctx.ensure_object(dict)\n ctx.obj['config'] = config\n\n return ctx", "def _AddCommonFlags(self, resource):\n self.flags['format'] = 'json'\n self.additional_flags.extend(FLAGS.openstack_additional_flags or ())", "def TRT_OverlayDigitizationCfg(flags, **kwargs):\n acc = TRT_OverlayDigitizationBasicCfg(flags, **kwargs)\n acc.merge(TRT_OutputCfg(flags))\n return acc", "def apply_settings():\n\n scs_globals = _get_scs_globals()\n\n # avoid recursion if another apply settings is running already\n if scs_globals.config_update_lock:\n return False\n\n # NOTE: save file paths in extra variables and apply them on the end\n # to make sure all of the settings are loaded first.\n # This is needed as some libraries reading are driven by other values from config file.\n # For example: \"use_infixed\"\n scs_project_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.scs_project_path, scs_globals)\n shader_presets_filepath = _property_utils.get_by_type(bpy.types.GlobalSCSProps.shader_presets_filepath, scs_globals)\n trigger_actions_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.trigger_actions_rel_path, scs_globals)\n sign_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sign_library_rel_path, scs_globals)\n tsem_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.tsem_library_rel_path, scs_globals)\n traffic_rules_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path, scs_globals)\n hookup_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.hookup_library_rel_path, scs_globals)\n matsubs_library_rel_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.matsubs_library_rel_path, scs_globals)\n sun_profiles_library_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.sun_profiles_lib_path, scs_globals)\n conv_hlpr_converters_path = _property_utils.get_by_type(bpy.types.GlobalSCSProps.conv_hlpr_converters_path, scs_globals)\n\n # NOTE: as dump level is written in same section as config type\n # applying it directly might take place before we get information about config type\n # so it has to be saved into variable and applied only if global settings are loaded from config file\n dump_level = scs_globals.dump_level\n\n # lock update now, as we don't want any properties update functions to trigger rewrite of config file\n # which would lead to unwanted recursion\n engage_config_lock()\n\n config_container = _pix.get_data_from_file(get_config_filepath(), \" \")\n\n # avoid applying process of config if not present (most probably permission problems on config creation)\n if config_container is not None:\n\n settings_file_valid = 0\n for section in config_container:\n if settings_file_valid == 2:\n if section.type == \"Paths\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ProjectPath\":\n scs_project_path = prop[1]\n elif prop[0] == \"ShaderPresetsFilePath\":\n shader_presets_filepath = prop[1]\n elif prop[0] == \"TriggerActionsRelFilePath\":\n trigger_actions_rel_path = prop[1]\n elif prop[0] == \"TriggerActionsUseInfixed\":\n scs_globals.trigger_actions_use_infixed = prop[1]\n elif prop[0] == \"SignRelFilePath\":\n sign_library_rel_path = prop[1]\n elif prop[0] == \"SignUseInfixed\":\n scs_globals.sign_library_use_infixed = prop[1]\n elif prop[0] == \"TSemProfileRelFilePath\":\n tsem_library_rel_path = prop[1]\n elif prop[0] == \"TSemProfileUseInfixed\":\n scs_globals.tsem_library_use_infixed = prop[1]\n elif prop[0] == \"TrafficRulesRelFilePath\":\n traffic_rules_library_rel_path = prop[1]\n elif prop[0] == \"TrafficRulesUseInfixed\":\n scs_globals.traffic_rules_library_use_infixed = prop[1]\n elif prop[0] == \"HookupRelDirPath\":\n hookup_library_rel_path = prop[1]\n elif prop[0] == \"MatSubsRelFilePath\":\n matsubs_library_rel_path = prop[1]\n elif prop[0] == \"SunProfilesFilePath\":\n sun_profiles_library_path = prop[1]\n elif prop[0] == \"ConvertersPath\":\n conv_hlpr_converters_path = prop[1]\n elif prop[0] == \"UseAlternativeBases\":\n scs_globals.use_alternative_bases = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"Import\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ImportScale\":\n scs_globals.import_scale = float(prop[1])\n elif prop[0] == \"PreservePathForExport\":\n scs_globals.import_preserve_path_for_export = prop[1]\n elif prop[0] == \"ImportPimFile\":\n scs_globals.import_pim_file = prop[1]\n elif prop[0] == \"UseWelding\":\n scs_globals.import_use_welding = prop[1]\n elif prop[0] == \"WeldingPrecision\":\n scs_globals.import_welding_precision = prop[1]\n elif prop[0] == \"UseNormals\":\n scs_globals.import_use_normals = prop[1]\n elif prop[0] == \"ImportPitFile\":\n scs_globals.import_pit_file = prop[1]\n elif prop[0] == \"LoadTextures\":\n scs_globals.import_load_textures = prop[1]\n elif prop[0] == \"ImportPicFile\":\n scs_globals.import_pic_file = prop[1]\n elif prop[0] == \"ImportPipFile\":\n scs_globals.import_pip_file = prop[1]\n elif prop[0] == \"ImportPisFile\":\n scs_globals.import_pis_file = prop[1]\n elif prop[0] == \"ConnectedBones\":\n scs_globals.import_connected_bones = prop[1]\n elif prop[0] == \"BoneImportScale\":\n scs_globals.import_bone_scale = float(prop[1])\n elif prop[0] == \"ImportPiaFile\":\n scs_globals.import_pia_file = prop[1]\n elif prop[0] == \"IncludeSubdirsForPia\":\n scs_globals.import_include_subdirs_for_pia = prop[1]\n elif section.type == \"Export\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"ExportScale\":\n scs_globals.export_scale = float(prop[1])\n elif prop[0] == \"ApplyModifiers\":\n scs_globals.export_apply_modifiers = prop[1]\n elif prop[0] == \"ExcludeEdgesplit\":\n scs_globals.export_exclude_edgesplit = prop[1]\n elif prop[0] == \"IncludeEdgesplit\":\n scs_globals.export_include_edgesplit = prop[1]\n elif prop[0] == \"ActiveUVOnly\":\n scs_globals.export_active_uv_only = prop[1]\n elif prop[0] == \"ExportVertexGroups\":\n scs_globals.export_vertex_groups = prop[1]\n elif prop[0] == \"ExportVertexColor\":\n scs_globals.export_vertex_color = prop[1]\n elif prop[0] == \"ExportVertexColorType\":\n scs_globals.export_vertex_color_type = str(prop[1])\n elif prop[0] == \"ExportVertexColorType7\":\n scs_globals.export_vertex_color_type_7 = str(prop[1])\n elif prop[0] == \"ExportPimFile\":\n scs_globals.export_pim_file = prop[1]\n elif prop[0] == \"OutputType\":\n scs_globals.export_output_type = prop[1]\n elif prop[0] == \"ExportPitFile\":\n scs_globals.export_pit_file = prop[1]\n elif prop[0] == \"ExportPicFile\":\n scs_globals.export_pic_file = prop[1]\n elif prop[0] == \"ExportPipFile\":\n scs_globals.export_pip_file = prop[1]\n elif prop[0] == \"SignExport\":\n scs_globals.export_write_signature = prop[1]\n elif section.type == \"GlobalDisplay\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"DisplayLocators\":\n scs_globals.display_locators = prop[1]\n elif prop[0] == \"LocatorSize\":\n scs_globals.locator_size = float(prop[1])\n elif prop[0] == \"LocatorEmptySize\":\n scs_globals.locator_empty_size = float(prop[1])\n elif prop[0] == \"DisplayConnections\":\n scs_globals.display_connections = prop[1]\n elif prop[0] == \"CurveSegments\":\n scs_globals.curve_segments = prop[1]\n elif prop[0] == \"OptimizedConnsDrawing\":\n scs_globals.optimized_connections_drawing = prop[1]\n elif prop[0] == \"DisplayTextInfo\":\n scs_globals.display_info = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"GlobalColors\":\n for prop in section.props:\n if prop[0] in (\"\", \"#\"):\n pass\n elif prop[0] == \"PrefabLocatorsWire\":\n scs_globals.locator_prefab_wire_color = prop[1]\n elif prop[0] == \"ModelLocatorsWire\":\n scs_globals.locator_model_wire_color = prop[1]\n elif prop[0] == \"ColliderLocatorsWire\":\n scs_globals.locator_coll_wire_color = prop[1]\n elif prop[0] == \"ColliderLocatorsFace\":\n scs_globals.locator_coll_face_color = prop[1]\n elif prop[0] == \"NavigationCurveBase\":\n scs_globals.np_connection_base_color = prop[1]\n elif prop[0] == \"MapLineBase\":\n scs_globals.mp_connection_base_color = prop[1]\n elif prop[0] == \"TriggerLineBase\":\n scs_globals.tp_connection_base_color = prop[1]\n elif prop[0] == \"InfoText\":\n scs_globals.info_text_color = prop[1]\n elif prop[0] == \"BasePaint\":\n scs_globals.base_paint_color = prop[1]\n else:\n lprint('W Unrecognised item \"%s\" has been found in setting file! Skipping...', (str(prop[0]),))\n elif section.type == \"Header\":\n for prop in section.props:\n if prop[0] == \"FormatVersion\":\n if prop[1] == 1:\n settings_file_valid += 1\n elif prop[0] == \"Type\":\n if prop[1] == \"Configuration\":\n settings_file_valid += 1\n elif prop[0] == \"DumpLevel\":\n dump_level = prop[1]\n elif prop[0] == \"ConfigStoragePlace\":\n scs_globals.config_storage_place = prop[1]\n\n # if settings are read directly from blend file,\n # release update lock and don't search/apply any settings further\n if prop[1] == \"BlendFile\":\n settings_file_valid += 1\n\n # as dump level can be read already (it can be placed above config storage place property),\n # reset local variable back to value that was saved with blend file\n dump_level = scs_globals.dump_level\n\n break # to avoid further reading of header properties, so dump_level won't be overwritten unintentionally\n\n scs_globals.dump_level = dump_level\n\n # now as last apply all of the file paths\n # NOTE: applying paths is crucial for libraries\n # (they are reloaded/initiated in property update functions).\n if bpy.app.background: # if blender runs without UI then apply libraries directly as async operator is UI depended\n\n scs_globals.scs_project_path = scs_project_path\n scs_globals.shader_presets_filepath = shader_presets_filepath\n scs_globals.trigger_actions_rel_path = trigger_actions_rel_path\n scs_globals.sign_library_rel_path = sign_library_rel_path\n scs_globals.tsem_library_rel_path = tsem_library_rel_path\n scs_globals.traffic_rules_library_rel_path = traffic_rules_library_rel_path\n scs_globals.hookup_library_rel_path = hookup_library_rel_path\n scs_globals.matsubs_library_rel_path = matsubs_library_rel_path\n scs_globals.sun_profiles_lib_path = sun_profiles_library_path\n scs_globals.conv_hlpr_converters_path = conv_hlpr_converters_path\n\n else: # if blender is started normally use asynchronous operator to reload libraries\n\n bpy.ops.world.scs_paths_initialization('INVOKE_DEFAULT', paths_list=[\n {\"name\": \"project base path\", \"attr\": \"scs_project_path\", \"path\": scs_project_path},\n {\"name\": \"shader presets\", \"attr\": \"shader_presets_filepath\", \"path\": shader_presets_filepath},\n {\"name\": \"trigger actions library\", \"attr\": \"trigger_actions_rel_path\", \"path\": trigger_actions_rel_path},\n {\"name\": \"sign library\", \"attr\": \"sign_library_rel_path\", \"path\": sign_library_rel_path},\n {\"name\": \"traffic semaphore library\", \"attr\": \"tsem_library_rel_path\", \"path\": tsem_library_rel_path},\n {\"name\": \"traffic rules library\", \"attr\": \"traffic_rules_library_rel_path\", \"path\": traffic_rules_library_rel_path},\n {\"name\": \"hookups library\", \"attr\": \"hookup_library_rel_path\", \"path\": hookup_library_rel_path},\n {\"name\": \"material substance library\", \"attr\": \"matsubs_library_rel_path\", \"path\": matsubs_library_rel_path},\n {\"name\": \"sun profiles library\", \"attr\": \"sun_profiles_lib_path\", \"path\": sun_profiles_library_path},\n {\"name\": \"converters file path\", \"attr\": \"conv_hlpr_converters_path\", \"path\": conv_hlpr_converters_path},\n ])\n\n # release lock as properties are applied\n release_config_lock(use_paths_init_callback=not bpy.app.background)\n\n return True", "def edit_settings(self):\n return 1 << 3", "def cmdopt(request):\n return request.config.getoption(\"-c\")", "def control_opt(self):\n\n\n if self.run_opt['refine']:\n self.run_opt['relaunch']=1\n \n #check value for 'madweight_main'\n for i in range(3,9)+[-1,-3]:\n if self.run_opt[num_to_tag[i]]==1:\n self.run_opt['madweight_main']=1\n break\n\n if self.run_opt['relaunch']==1:\n self.run_opt['control']=1", "def __init__(self):\n ConfigParser.RawConfigParser.OPTCRE = re.compile(r'(?P<option>[^=\\s][^=]*)\\s*(?P<vi>[=])\\s*(?P<value>.*)$')\n self.CONFIG = ConfigParser.ConfigParser()\n self.CONFIG.read(os.path.join(os.path.dirname(__file__)))\n self.IPS = []", "def get_cfg_defaults():\n return _C.clone()", "def TRT_OverlayDigitizationBasicCfg(flags, **kwargs):\n acc = ComponentAccumulator()\n if \"DigitizationTool\" not in kwargs:\n tool = acc.popToolsAndMerge(TRT_OverlayDigitizationToolCfg(flags))\n kwargs[\"DigitizationTool\"] = tool\n\n if flags.Concurrency.NumThreads > 0:\n kwargs.setdefault(\"Cardinality\", flags.Concurrency.NumThreads)\n\n # Set common overlay extra inputs\n kwargs.setdefault(\"ExtraInputs\", flags.Overlay.ExtraInputs)\n\n TRTDigitization = CompFactory.TRTDigitization\n acc.addEventAlgo(TRTDigitization(name=\"TRT_OverlayDigitization\", **kwargs))\n return acc", "def getConfigAll(self):\n return self.configAll(False)", "def _list_usage_flags():\n yield from sorted(self._defs.items())\n if self._parent is None:\n if self._CONFIG_FLAG.name not in self._defs:\n yield (self._CONFIG_FLAG.name, self._CONFIG_FLAG)", "def gen_common_parser() -> optparse.OptionParser:\n parser = optparse.OptionParser()\n parser.add_option(\"--prefs\", dest=\"file_path\")\n return parser", "def main():\n \n # we need options!\n if len(sys.argv) == 1:\n help_option()\n sys.exit(1)\n \n # check and set any flags passed from the command line\n for arg in sys.argv:\n if arg == sys.argv[0]:\n continue\n elif arg.startswith(\"-\"):\n eval_function(arg.split(\"-\")[1]+\"_flag\")\n \n # check to see if the system has all the required programs\n required_programs=[\"wmctrl\",\"xprop\",\"xwininfo\"]\n if has_required_programs(required_programs) == False:\n sys.exit(1)\n \n initialize_global_variables()\n \n # parse the args to determine what should be done\n for arg in sys.argv:\n if arg == sys.argv[0]:\n continue\n elif arg.startswith(\"-\"):\n continue\n else:\n eval_function(arg+\"_option\")", "def _updateMaskedValueSet():\n global masked_value_set\n for confName in controller.CONF:\n # Add all needed values to masked_value_set\n if (controller.getParamKeyValue(confName, \"MASK_INPUT\") == True):\n masked_value_set.add(controller.CONF[confName])", "def get_eval_flag_dict(eval_mode):\n\n # Base dictionary with all flags set to True.\n dict = {}\n for key in EVAL_FLAGS:\n dict[key] = True\n\n # Auto-annotations.\n if eval_mode == \"draft\":\n dict[\"check_locus_tag\"] = False\n dict[\"check_trna\"] = False\n dict[\"import_locus_tag\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_coords\"] = False\n\n # Manual annotations.\n elif eval_mode == \"final\":\n dict[\"import_locus_tag\"] = False\n\n # SEA-PHAGES GenBank records.\n elif eval_mode == \"auto\":\n dict[\"check_locus_tag\"] = False\n dict[\"check_description_field\"] = False\n dict[\"check_replace\"] = False\n dict[\"check_trna\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_description_tally\"] = False\n dict[\"check_gene\"] = False\n dict[\"check_coords\"] = False\n\n # Non-SEA-PHAGES GenBank records.\n elif eval_mode == \"misc\":\n dict[\"check_locus_tag\"] = False\n # TODO below should probably be True, but it causes problems\n # when checking the current genome, GNM2_001, since these are not 'draft'\n # genomes.\n dict[\"check_replace\"] = False\n dict[\"check_trna\"] = False\n dict[\"check_id_typo\"] = False\n dict[\"check_host_typo\"] = False\n dict[\"check_author\"] = False\n dict[\"check_description\"] = False\n dict[\"check_description_tally\"] = False\n dict[\"check_gene\"] = False\n\n # Custom QC settings. User can select the settings, so it is initialized as\n # a copy of the base eval_mode. The user can provide the\n # customized combination of options.\n elif eval_mode == \"custom\":\n for key in dict.keys():\n prompt = f\"Eval_flag: {key}. {EVAL_FLAGS[key]}\"\n response = basic.ask_yes_no(prompt=prompt, response_attempt=3)\n if response is None:\n print(\"The default setting for this eval_flag will be used.\")\n else:\n dict[key] = response\n\n elif eval_mode == \"base\":\n pass\n else:\n print(\"A valid eval_mode has not been selected.\")\n return dict", "def tls_control_system():\n eps1 = lambda t, args: 0.5\n eps2 = lambda t, args: 1\n H1 = [0.5 * sigmaz(), [sigmap(), eps1], [sigmam(), eps1]]\n H2 = [0.5 * sigmaz(), [sigmaz(), eps2]]\n c_ops = [0.1 * sigmap()]\n objectives = [\n krotov.Objective(\n initial_state=ket('0'), target=ket('1'), H=H1, c_ops=c_ops\n ),\n krotov.Objective(\n initial_state=ket('0'), target=ket('1'), H=H2, c_ops=c_ops\n ),\n ]\n controls = [eps1, eps2]\n controls_mapping = krotov.conversions.extract_controls_mapping(\n objectives, controls\n )\n return objectives, controls, controls_mapping", "def _check_config(self):", "def _modified_option_defaults(self) -> Dict[str, Any]:\n return {\n # Change 'debug.traceback' default to True if debug logging is enabled.\n 'debug.traceback': logging.getLogger('pyocd').isEnabledFor(logging.DEBUG),\n }", "def preprocess_options(base_options, cmdargs):\n # ===============================================================================\n\n class OptionValues(object):\n pass\n\n option_values = OptionValues()\n # Create a base option dictionary indexed by short and long options.\n # Add the built-in optparse help and version options so that they can be\n # detected as stand-alone options.\n options = {}\n builtins = [BooleanOption('-h', '--help', 'help', ''),\n BooleanOption(None, '--version', 'version', '')]\n for opt in list(base_options) + builtins:\n setattr(option_values, opt.get_dest(), opt.get_default())\n if opt.short_opt:\n options[opt.short_opt] = opt\n if opt.long_opt:\n options[opt.long_opt] = opt\n # Walk through the options and arguments and set option values as attributes.\n iopt = 0\n while iopt < len(cmdargs):\n if cmdargs[iopt].startswith('-'):\n if cmdargs[iopt] in options:\n opt = options[cmdargs[iopt]]\n if opt.has_value():\n # Option with argument\n setattr(option_values, opt.get_dest(), cmdargs[iopt + 1])\n iopt += 1\n else:\n # Boolean option\n setattr(option_values, opt.get_dest(), True)\n iopt += 1\n return option_values", "def SynchronizeFlags(self):\n pass", "def _break_pipeline_cfg(self):", "def process_flags(self):\n\t\tsflags = []\n\t\tfor attr in dir(self):\n\t\t\tif attr[:3] != \"PF_\":\n\t\t\t\tcontinue\n\t\t\tvalue = getattr(self, attr)\n\t\t\tif value & self.fields[\"flags\"]:\n\t\t\t\tsflags.append(attr)\n\n\t\treturn sflags", "def list_opts():\n return [('ironic_lib', utils_opts)]", "def gather_default():\n\n def fill_header_section():\n \"\"\"Fills up \"Header\" section.\"\"\"\n section = _SectionData(\"Header\")\n section.props.append((\"FormatVersion\", 1))\n section.props.append((\"Source\", get_combined_ver_str()))\n section.props.append((\"Type\", \"Configuration\"))\n section.props.append((\"Note\", \"User settings of SCS Blender Tools\"))\n author = bpy.context.user_preferences.system.author\n if author:\n section.props.append((\"Author\", str(author)))\n section.props.append((\"ConfigStoragePlace\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.config_storage_place)))\n section.props.append((\"DumpLevel\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.dump_level)))\n return section\n\n def fill_paths_section():\n \"\"\"Fills up \"Paths\" section.\"\"\"\n section = _SectionData(\"Paths\")\n section.props.append((\"ProjectPath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.scs_project_path)))\n section.props.append((\"\", \"\"))\n section.props.append((\"ShaderPresetsFilePath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.shader_presets_filepath)))\n section.props.append((\"TriggerActionsRelFilePath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.trigger_actions_rel_path)))\n section.props.append((\"TriggerActionsUseInfixed\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.trigger_actions_use_infixed))))\n section.props.append((\"SignRelFilePath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.sign_library_rel_path)))\n section.props.append((\"SignUseInfixed\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.sign_library_use_infixed))))\n section.props.append((\"TSemProfileRelFilePath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.tsem_library_rel_path)))\n section.props.append((\"TSemProfileUseInfixed\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.tsem_library_use_infixed))))\n section.props.append((\"TrafficRulesRelFilePath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.traffic_rules_library_rel_path)))\n section.props.append((\"TrafficRulesUseInfixed\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.traffic_rules_library_use_infixed))))\n section.props.append((\"HookupRelDirPath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.hookup_library_rel_path)))\n section.props.append((\"MatSubsRelFilePath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.matsubs_library_rel_path)))\n section.props.append((\"ConvertersPath\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.conv_hlpr_converters_path)))\n section.props.append((\"UseAlternativeBases\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.use_alternative_bases))))\n return section\n\n def fill_import_section():\n \"\"\"Fills up \"Import\" section.\"\"\"\n section = _SectionData(\"Import\")\n section.props.append((\"ImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_scale)))\n section.props.append((\"PreservePathForExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_preserve_path_for_export))))\n section.props.append((\"ImportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pim_file))))\n section.props.append((\"UseWelding\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_welding))))\n section.props.append((\"WeldingPrecision\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_welding_precision))))\n section.props.append((\"UseNormals\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_use_normals))))\n section.props.append((\"ImportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pit_file))))\n section.props.append((\"LoadTextures\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_load_textures))))\n section.props.append((\"ImportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pic_file))))\n section.props.append((\"ImportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pip_file))))\n section.props.append((\"ImportPisFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pis_file))))\n section.props.append((\"ConnectedBones\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_connected_bones))))\n section.props.append((\"BoneImportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.import_bone_scale)))\n section.props.append((\"ImportPiaFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_pia_file))))\n section.props.append((\"IncludeSubdirsForPia\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.import_include_subdirs_for_pia))))\n return section\n\n def fill_export_section():\n \"\"\"Fills up \"Export\" section.\"\"\"\n section = _SectionData(\"Export\")\n section.props.append((\"ExportScale\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_scale)))\n section.props.append((\"ApplyModifiers\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_apply_modifiers))))\n section.props.append((\"ExcludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_exclude_edgesplit))))\n section.props.append((\"IncludeEdgesplit\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_include_edgesplit))))\n section.props.append((\"ActiveUVOnly\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_active_uv_only))))\n section.props.append((\"ExportVertexGroups\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_groups))))\n section.props.append((\"ExportVertexColor\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color))))\n section.props.append((\"ExportVertexColorType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type)))\n section.props.append((\"ExportVertexColorType7\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_vertex_color_type_7)))\n # section.props.append((\"ExportAnimFile\", info.get_default_prop_value(bpy.types.GlobalSCSProps.export_anim_file)))\n section.props.append((\"ExportPimFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pim_file))))\n section.props.append((\"OutputType\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.export_output_type)))\n section.props.append((\"ExportPitFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pit_file))))\n section.props.append((\"ExportPicFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pic_file))))\n section.props.append((\"ExportPipFile\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_pip_file))))\n section.props.append((\"SignExport\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.export_write_signature))))\n return section\n\n def fill_global_display_section():\n \"\"\"Fills up \"GlobalDisplay\" section.\"\"\"\n section = _SectionData(\"GlobalDisplay\")\n section.props.append((\"DisplayLocators\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.display_locators))))\n section.props.append((\"LocatorSize\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.locator_size)))\n section.props.append((\"LocatorEmptySize\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.locator_empty_size)))\n section.props.append((\"DisplayConnections\", int(_property_utils.get_by_type(bpy.types.GlobalSCSProps.display_connections))))\n section.props.append((\"CurveSegments\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.curve_segments)))\n section.props.append((\"DisplayTextInfo\", _property_utils.get_by_type(bpy.types.GlobalSCSProps.display_info)))\n return section\n\n def fill_global_colors_section():\n \"\"\"Fills up \"GlobalColors\" section.\"\"\"\n section = _SectionData(\"GlobalColors\")\n section.props.append((\"PrefabLocatorsWire\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.locator_prefab_wire_color))))\n section.props.append((\"ModelLocatorsWire\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.locator_model_wire_color))))\n section.props.append((\"ColliderLocatorsWire\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.locator_coll_wire_color))))\n section.props.append((\"ColliderLocatorsFace\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.locator_coll_face_color))))\n section.props.append((\"NavigationCurveBase\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.np_connection_base_color))))\n section.props.append((\"MapLineBase\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.mp_connection_base_color))))\n section.props.append((\"TriggerLineBase\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.tp_connection_base_color))))\n section.props.append((\"InfoText\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.info_text_color))))\n section.props.append((\"BasePaint\", tuple(_property_utils.get_by_type(bpy.types.GlobalSCSProps.base_paint_color))))\n return section\n\n '''\n def fill_various_section():\n \"\"\"Fills up \"Various\" section.\"\"\"\n section = data_SectionData(\"Various\")\n section.props.append((\"DumpLevel\", _get_scs_globals().dump_level))\n return section\n '''\n\n # DATA CREATION\n header_section = fill_header_section()\n paths_section = fill_paths_section()\n import_section = fill_import_section()\n export_section = fill_export_section()\n global_display_section = fill_global_display_section()\n global_colors_section = fill_global_colors_section()\n # various_section = fill_various_section()\n\n # DATA ASSEMBLING\n config_container = [header_section, paths_section, import_section, export_section, global_display_section, global_colors_section]\n # config_container.append(various_section)\n\n return config_container", "def runtime_cflags(self, ctx: Context) -> Iterable[str]:\n if self.builtin_passes:\n return self.builtin_passes.runtime_cflags(ctx)\n return []", "def manipulator(self):\n manipulator = ConfigurationManipulator()\n for flag in OPT_FLAGS:\n manipulator.add_parameter(\n EnumParameter(flag,\n ['on', 'off', 'default']))\n return manipulator", "def handleSpecialCERNMergeSettings(self, funcName):\n if self.getCmsswVersion().startswith(\"CMSSW_7_5\") and False:\n self.logger.info(\"Using fastCloning/lazydownload\")\n self._enableLazyDownload()\n elif funcName == \"merge\":\n self.logger.info(\"Using lazydownload\")\n self._enableLazyDownload()\n\n return", "def global_settings(request):\n return {\n 'OFFLINE_MODE_CAPABILITY_ENABLED': settings.OFFLINE_MODE_CAPABILITY_ENABLED\n }", "def write_flags(self):\n\n self.cmake.write(\n '################# Flags ################\\n'\n '# Defines Flags for Windows and Linux. #\\n'\n '########################################\\n\\n'\n )\n\n self.define_group_properties()\n self.define_windows_flags()\n self.define_linux_flags()", "def __getFlaglist( self , keyinfo ):\n\t\tflaglist = []\n\t\tif( getBit( keyinfo , 0 ) == 1 ):\n\t\t\tflaglist.append( 'HMAC_MD5_RC4' )\n\t\tif( getBit( keyinfo , 1 ) == 1 ):\n\t\t\tflaglist.append( 'HMAC_SHA1_AES' )\n\t\tif( getBit( keyinfo , 3 ) == 0 ):\n\t\t\tflaglist.append( 'group' )\n\t\tif( getBit( keyinfo , 3 ) == 1 ):\n\t\t\tflaglist.append( 'pairwise' )\n\t\tif( getBit( keyinfo , 4 ) == 0 and getBit( keyinfo , 5 ) == 0 ):\n\t\t\tflaglist.append( 'idx0' )\n\t\tif( getBit( keyinfo , 4 ) == 1 ):\n\t\t\tflaglist.append( 'idx1' )\n\t\tif( getBit( keyinfo , 5 ) == 1 ):\n\t\t\tflaglist.append( 'idx2' )\n\t\tif( getBit( keyinfo , 6 ) == 1 ):\n\t\t\tflaglist.append( 'install' )\n\t\tif( getBit( keyinfo , 7 ) == 1 ):\n\t\t\tflaglist.append( 'ack' )\n\t\tif( getBit( keyinfo , 8 ) == 1 ):\n\t\t\tflaglist.append( 'mic' )\n\t\tif( getBit( keyinfo , 9 ) == 1 ):\n\t\t\tflaglist.append( 'secure' )\n\t\tif( getBit( keyinfo , 10 ) == 1 ):\n\t\t\tflaglist.append( 'error' )\n\t\tif( getBit( keyinfo , 11 ) == 1 ):\n\t\t\tflaglist.append( 'request' )\n\t\tif( getBit( keyinfo , 12 ) == 1 ):\n\t\t\tflaglist.append( 'encrypted' )\n\t\treturn flaglist", "def getConfigData(self) -> dict:\r\n return {\r\n PYVConst.DownloadCode: self.config.get(CONF_DOWNLOAD_CODE, \"\"),\r\n PYVConst.ForceStandard: self.toBool(self.config.get(CONF_FORCE_STANDARD, False)),\r\n PYVConst.ForceAutoEnroll: self.toBool(self.config.get(CONF_FORCE_AUTOENROLL, True)),\r\n PYVConst.AutoSyncTime: self.toBool(self.config.get(CONF_AUTO_SYNC_TIME, True)),\r\n PYVConst.PluginLanguage: self.config.get(CONF_LANGUAGE, \"EN\"),\r\n PYVConst.EnableRemoteArm: self.toBool(self.config.get(CONF_ENABLE_REMOTE_ARM, False)),\r\n PYVConst.EnableRemoteDisArm: self.toBool(self.config.get(CONF_ENABLE_REMOTE_DISARM, False)),\r\n PYVConst.EnableSensorBypass: self.toBool(self.config.get(CONF_ENABLE_SENSOR_BYPASS, False)),\r\n PYVConst.MotionOffDelay: self.config.get(CONF_MOTION_OFF_DELAY, 120),\r\n PYVConst.OverrideCode: self.config.get(CONF_OVERRIDE_CODE, \"\"),\r\n PYVConst.ForceKeypad: self.toBool(self.config.get(CONF_FORCE_KEYPAD, False)),\r\n PYVConst.ArmWithoutCode: self.toBool(self.config.get(CONF_ARM_CODE_AUTO, False)),\r\n PYVConst.SirenTriggerList: self.config.get(CONF_SIREN_SOUNDING, [\"Intruder\"]),\r\n PYVConst.B0_Enable: self.toBool(self.config.get(CONF_B0_ENABLE_MOTION_PROCESSING, False)),\r\n PYVConst.B0_Min_Interval_Time: self.config.get(CONF_B0_MIN_TIME_BETWEEN_TRIGGERS, 5),\r\n PYVConst.B0_Max_Wait_Time: self.config.get(CONF_B0_MAX_TIME_FOR_TRIGGER_EVENT, 30),\r\n }", "def TRT_DigitizationBasicToolCfg(flags, name=\"TRT_DigitizationBasicTool\", **kwargs):\n acc = TRT_GeometryCfg(flags)\n acc.merge(MagneticFieldSvcCfg(flags))\n PartPropSvc = CompFactory.PartPropSvc\n acc.addService(PartPropSvc(InputFile=\"PDGTABLE.MeV\"))\n if flags.Detector.Overlay and not flags.Input.isMC:\n acc.merge(addFolders(flags, \"/TRT/Cond/DigVers\", \"TRT_OFL\", tag=\"TRTCondDigVers-Collisions-01\", db=\"OFLP200\"))\n # default arguments\n kwargs.setdefault(\"PAI_Tool_Ar\", TRT_PAI_Process_ArToolCfg(flags))\n kwargs.setdefault(\"PAI_Tool_Kr\", TRT_PAI_Process_KrToolCfg(flags))\n kwargs.setdefault(\"PAI_Tool_Xe\", TRT_PAI_Process_XeToolCfg(flags))\n kwargs.setdefault(\"Override_TrtRangeCutProperty\", flags.Digitization.TRTRangeCut)\n kwargs.setdefault(\"RandomSeedOffset\", flags.Digitization.RandomSeedOffset)\n if not flags.Digitization.DoInnerDetectorNoise:\n kwargs.setdefault(\"Override_noiseInSimhits\", 0)\n kwargs.setdefault(\"Override_noiseInUnhitStraws\", 0)\n if flags.Beam.Type == \"cosmics\":\n kwargs.setdefault(\"PrintDigSettings\", True)\n kwargs.setdefault(\"Override_cosmicFlag\", 0)\n kwargs.setdefault(\"Override_doCosmicTimingPit\", 1)\n kwargs.setdefault(\"Override_jitterTimeOffset\", 0.)\n kwargs.setdefault(\"Override_timeCorrection\", 0)\n if flags.Digitization.DoXingByXingPileUp:\n kwargs.setdefault(\"FirstXing\", TRT_FirstXing())\n kwargs.setdefault(\"LastXing\", TRT_LastXing())\n TRTDigitizationTool = CompFactory.TRTDigitizationTool\n tool = TRTDigitizationTool(name, **kwargs)\n acc.setPrivateTools(tool)\n return acc", "def set_globals():\n logging.debug(\"Setting globals in config\")\n for section in all_defaults:\n if not rcp.has_section(section):\n set_config_section_to_defaults(section)\n global config_changed\n config_changed = True\n for option in all_defaults[section]:\n value = get_value_or_set_to_default(section, option) \n set_global_option(section, option, value)\n return True", "def RPC_OverlayDigitizationBasicCfg(flags, **kwargs):\n acc = MuonGeoModelCfg(flags)\n if \"DigitizationTool\" not in kwargs:\n tool = acc.popToolsAndMerge(RPC_OverlayDigitizationToolCfg(flags))\n kwargs[\"DigitizationTool\"] = tool\n\n if flags.Concurrency.NumThreads > 0:\n kwargs.setdefault(\"Cardinality\", flags.Concurrency.NumThreads)\n\n # Set common overlay extra inputs\n kwargs.setdefault(\"ExtraInputs\", flags.Overlay.ExtraInputs)\n\n RPC_Digitizer = CompFactory.RPC_Digitizer\n acc.addEventAlgo(RPC_Digitizer(name=\"RPC_OverlayDigitizer\", **kwargs))\n return acc", "def main():\n print(\"Configuring system\")\n ain_config, settling_conf, resolution_config = None, None, None\n print(\"\\tSetting globals\")\n Globals.init()\n print(\"\\tSetting signals\")\n signal.signal(signal.SIGINT, signal_handler)\n signal.signal(signal.SIGTERM, signal_handler)\n signal.signal(signal.SIGABRT, signal_handler)\n signal.signal(signal.SIGQUIT, signal_handler)\n signal.signal(signal.SIGTSTP, signal_handler)\n signal.signal(signal.SIGHUP, signal_handler)\n print(\"\\tConnecting to devices\")\n handles, information = ld_connect(T7_DEVICE, CT[0])\n if len(handles) != 0:\n print(\"\\tFound \" + str(len(handles)) + \" device(s)\")\n ain_addr = [AIN_ADDRS[0], AIN_ADDRS[2], AIN_ADDRS[1], AIN_ADDRS[3]]\n settling_addr = [SETTLING_ADDR[0], SETTLING_ADDR[2], SETTLING_ADDR[1], SETTLING_ADDR[3]]\n resolution_addr = [RES_ADDR[0], RES_ADDR[2], RES_ADDR[1], RES_ADDR[3]]\n ain_range = [1.0, 10.0, 1.0, 10.0]\n gnd_ref_range = [NEGATIVE_REF_ADDR[0], NEGATIVE_REF_ADDR[2]]\n \"\"\" 1 is for AIN1, 3 for AIN3 and 5 for AIN5 \"\"\"\n gnd_io_range = [1, 3]\n ain_config = ld_ain_config(handles, ain_addr, aio_dir=1, ain_range=ain_range)\n settling_conf = ld_settling_config(handles, settling_addr, SETTLING_LIST[6])\n resolution_config = ld_resolution_config(handles, resolution_addr, RES_LIST[12])\n gnd_config = ld_gnd_ref_conf(handles, gnd_ref_range, gnd_io_range)\n Globals.add_global_handles(handles)\n Globals.add_global_information(information)\n if ain_config == 0 and settling_conf == 0 and resolution_config == 0 and gnd_config == 0:\n \"\"\"\n sync = Sync.Devices(handles, 10, 1)\n\n sync.sync()\n \"\"\"\n print(\"\\tScanning device(s)\")\n Monitor = Devices(handles, 500, [\"AIN0\", \"AIN2\"], 10000, 1)\n \"\"\"\n Sync = sync.Devices(handles, 500, [\"AIN0\", \"AIN2\", \"AIN4\"], 3000, 1)\n \"\"\"\n Sync.sync()\n \"\"\"\n monitor_dio_ain(handles, information)\n print(\"Closing connection to devices\")\n \"\"\"\n else:\n if ain_config == 0:\n print(\"Analog configuration: Success.\")\n else:\n print(\"Analog configuration: Failure.\")\n if settling_conf == 0:\n print(\"Settling time configuration: Success.\")\n else:\n print(\"Settling time configuration: Failure.\")\n if resolution_config == 0:\n print(\"Resolution configuration: Success.\")\n else:\n print(\"Resolution configuration: Failure.\")\n if gnd_config == 0:\n print(\"Gnd references configuration: Success.\")\n else:\n print(\"Gnd references configuration: Failure.\")\n print(\"Configuration unsuccessful. Closing connection\")\n for handle in Globals.handles:\n ljm.close(handle)\n print(\"Connections closed\")\n\n else:\n print(\"\\tUnable to detect any devices\")\n signal.signal(signal.SIGINT, signal.SIG_DFL)\n signal.signal(signal.SIGTERM, signal.SIG_DFL)\n signal.signal(signal.SIGQUIT, signal.SIG_DFL)\n signal.signal(signal.SIGTSTP, signal.SIG_DFL)\n signal.signal(signal.SIGHUP, signal.SIG_DFL)\n signal.signal(signal.SIGABRT, signal.SIG_DFL)\n print(\"Exiting\")\n return", "def default_settings():\n\n script_file_path = __file__[0:__file__.rfind(os.sep)]\n binary_file_path = os.sep.join([script_file_path, '..', 'bin'])\n is_win = False\n if 'win' in sys.platform:\n is_win = True\n\n ######## Default programs\n global svm_train, svm_scale, svm_predict\n global bin_seqs, get_data, extr_pred_cds, rpsblast, parse_blast, metalocs_operate, metatisa\n global train_cds_model_py, subset_py, grid_py\n \n svm_train = os.sep.join([binary_file_path, 'svm-train'])\n svm_scale = os.sep.join([binary_file_path, 'svm-scale'])\n svm_predict = os.sep.join([binary_file_path, 'svm-predict'])\n \n bin_seqs = os.sep.join([binary_file_path, 'bin-seqs'])\n get_data = os.sep.join([binary_file_path, 'get-data'])\n extr_pred_cds = os.sep.join([binary_file_path, 'extr-pred-cds'])\n rpsblast = os.sep.join([binary_file_path, 'rpsblast'])\n parse_blast = os.sep.join([binary_file_path, 'parse-blast'])\n metalocs_operate = os.sep.join([binary_file_path, 'metalocs-operate'])\n metatisa = os.sep.join([binary_file_path, 'metatisa'])\n \n train_cds_model_py = os.sep.join([script_file_path, 'train-cds-model.py'])\n subset_py = os.sep.join([script_file_path, 'subset.py'])\n grid_py = os.sep.join([script_file_path, 'grid-xtest.py'])\n\n if is_win:\n check_file_existence([svm_train+'.exe', svm_scale+'.exe', svm_predict+'.exe', bin_seqs+'.exe', get_data+'.exe'])\n check_file_existence([extr_pred_cds+'.exe', rpsblast+'.exe', parse_blast+'.exe', metalocs_operate+'.exe'])\n else:\n check_file_existence([svm_train, svm_scale, svm_predict, bin_seqs, get_data])\n check_file_existence([extr_pred_cds, rpsblast, parse_blast, metalocs_operate])\n check_file_existence([train_cds_model_py, subset_py, grid_py])\n\n ######## Default parameters\n global project_name, taxonomy, binmodel, cdsmodel, tismodel, blast_db, metatisa_settings\n global min_orf_len, max_orf_len, svm_cut_value, svm_cut_value2, svm_sub_size\n global exists_bin_file, exists_hit_file, ORFsets_status, prob_status, is_help\n global bin_file, seqs_file, hit_file, blast_ev, seeds_ev\n global run_uni_pred, run_novel_pred, run_metatisa\n\n dat_file_path = os.sep.join([script_file_path, '..', 'dat'])\n project_name = 'sample'\n# taxonomy = os.sep.join([dat_file_path, 'binmodel', 'test.bin-map'])\n# binmodel = os.sep.join([dat_file_path, 'binmodel', 'test.binmodel'])\n taxonomy = os.sep.join([dat_file_path, 'binmodel', '261-genomes.bin-map'])\n binmodel = os.sep.join([dat_file_path, 'binmodel', '261-genomes.k8.binmodel'])\n cdsmodel = os.sep.join([dat_file_path, 'cdsmodel'])\n tismodel = os.sep.join([dat_file_path, 'tismodel'])\n blast_db = os.sep.join([dat_file_path, 'Cdd', 'Cdd'])\n metatisa_settings = os.sep.join([dat_file_path, 'tismodel', 'metatisa-settings.txt'])\n \n min_ORF_len = 60\n max_ORF_len = 1500\n svm_cut_value = 0.5\n svm_cut_value2 = 0.5\n svm_sub_size = 10000\n\n exists_bin_file = False\n exists_hit_file = False\n ORFsets_status = 0\n prob_status = 1\n is_help = False\n \n run_uni_pred = True\n run_novel_pred = True\n run_metatisa = True\n \n bin_file = ''\n seqs_file = ''\n hit_file = ''\n blast_ev = 1e-10\n seeds_ev = 1e-40", "def settings():\n raise NotImplementedError # pragma: nocoverage", "def cflags_other(self):\n\n status, stdout, stderr = self.__xcall__(['--cflags-only-other'])\n\n if status != 0:\n raise RuntimeError(\"error querying --cflags-only-other for package `%s': %s\" % (self.name, stderr))\n\n flag_map = {\n '-D': 'define_macros',\n }\n\n kw = {}\n\n for token in stdout.split():\n if token[:2] in flag_map:\n kw.setdefault(flag_map.get(token[:2]), []).append(token[2:])\n\n else: # throw others to extra_link_args\n kw.setdefault('extra_compile_args', []).append(token)\n\n # make it uniq\n for k, v in kw.items(): kw[k] = uniq(v)\n\n # for macros, separate them so they can be plugged on C/C++ extensions\n if 'define_macros' in kw:\n for k, string in enumerate(kw['define_macros']):\n if string.find('=') != -1:\n kw['define_macros'][k] = string.split('=', 2)\n else:\n kw['define_macros'][k] = (string, None)\n\n return kw", "def all_options():\n return _OptionRegistry.values()", "def set_global_constants(argv):\n\n global DEBUG_LEVEL\n\n try:\n if get_config('rtsoft.debug') == '1':\n DEBUG_LEVEL = True\n except GitError as msg:\n pass\n\n global GATEKEEPERS\n global GATEKEEPERS_ENABLE\n\n try:\n GATEKEEPERS = get_config('rtsoft.gatekeepers').split(' ')\n GATEKEEPERS_ENABLE = True\n except GitError as msg:\n pass\n\n global COMMIT_MESSAGE_CHECK\n\n try:\n if get_config('rtsoft.commit-message-check') == '1':\n COMMIT_MESSAGE_CHECK = True\n except GitError as msg:\n pass\n\n global BRANCH_NAME_CHECK\n\n try:\n if get_config('rtsoft.branch-name-check') == '1':\n BRANCH_NAME_CHECK = True\n except GitError as msg:\n pass\n\n global MASTER_BRANCHES\n\n try:\n MASTER_BRANCHES = get_config('rtsoft.master-branches').split(' ')\n except GitError as msg:\n DEBUG(\"Can't read rtsoft.master-branches. Maybe it is not define\")\n\n global refname\n global oldrev\n global newrev\n\n (refname, oldrev, newrev) = argv\n\n global user\n\n user = User()", "def _ApplyFlags(cls, config_values, flag_values):\n super()._ApplyFlags(config_values, flag_values)\n if flag_values['cloud_spanner_config'].present:\n config_values['config'] = flag_values.cloud_spanner_config\n if flag_values['cloud_spanner_nodes'].present:\n config_values['nodes'] = flag_values.cloud_spanner_nodes\n if flag_values['cloud_spanner_project'].present:\n config_values['project'] = flag_values.cloud_spanner_project", "def control():\n pass", "def _all_cli_opts(self):\n for item in self._cli_opts:\n yield item['opt'], item['group']", "def get_cfg():\n return _C.clone()", "def apply_grab_settings(self):\n raise NotImplementedError", "def get_cfg_defaults():\r\n # Return a clone so that the defaults will not be altered\r\n # This is for the \"local variable\" use pattern\r\n return _C.clone()", "def get_common_options(argparse_parser):\n\n def is_valid_file(argparse_parser, arg):\n # http://stackoverflow.com/a/11541450/1763984\n if not os.path.exists(arg):\n argparse_parser.error(\"The file %s does not exist!\" % arg)\n else:\n return open(arg, 'r') # return an open file handle\n\n common_options = argparse_parser.add_argument_group(\n 'common options for pynux commands')\n common_options.add_argument(\n '--loglevel',\n default=_loglevel_,\n help=''.join([\n \"CRITICAL ERROR WARNING INFO DEBUG NOTSET, default is \", _loglevel_\n ]))\n common_options.add_argument(\n '--rcfile',\n default=None,\n help=\"path to ConfigParser compatible ini file\",\n type=lambda x: is_valid_file(argparse_parser, x))\n return common_options", "def reset():\n for i in flags.keys(): flags[i] = 0\n for i in meta.keys(): meta[i] = \"\"\n return (None, \"CON\")", "def my_settings_function():\n global level\n level += 1", "def checkallflags(flags_with_values,flags_withoutvalues,cldic):\r\n if len(set(flags_with_values).intersection(set(flags_without_values))) > 0:\r\n print ( \"error some flags appear in two lists of flags, with and without required values:\",set(flags_with_values).intersection(set(flags_without_values)))\r\n printcommandset()\r\n sys.exit(1)\r\n for flag in set(flags_with_values).union(set(flags_withoutvalues)):\r\n if flag not in cldic:\r\n print ( \"error some flag mismatch between strings of flags and dictionary of flags:\",flag)\r\n printcommandset()\r\n sys.exit(1)\r\n return", "def define_windows_flags(self):\n\n # Define FLAGS for Windows\n self.set_warning_level()\n self.set_whole_program_optimization()\n self.set_use_debug_libraries()\n self.set_runtime_library()\n self.set_optimization()\n self.set_intrinsic_functions()\n self.set_runtime_type_info()\n self.set_function_level_linking()\n self.set_generate_debug_information()\n self.set_exception_handling()\n\n # Write FLAGS for Windows\n self.cmake.write('if(MSVC)\\n')\n if self.win_deb_flags != '':\n send('Debug FLAGS found = ' + self.win_deb_flags, 'ok')\n self.cmake.write(\n ' set(CMAKE_CXX_FLAGS_DEBUG \"${CMAKE_CXX_FLAGS_DEBUG}%s\")\\n' % self.win_deb_flags\n )\n else: # pragma: no cover\n send('No Debug FLAGS found', '')\n if self.win_rel_flags != '':\n send('Release FLAGS found = ' + self.win_rel_flags, 'ok')\n self.cmake.write(\n ' set(CMAKE_CXX_FLAGS_RELEASE \"${CMAKE_CXX_FLAGS_RELEASE}%s\")\\n' %\n self.win_rel_flags\n )\n else: # pragma: no cover\n send('No Release FLAGS found', '')\n self.cmake.write('endif(MSVC)\\n')", "def test_checkFlags(self):\n self.failUnlessEqual(self.nice.opts['aflag'], 1)\n self.failUnlessEqual(self.nice.opts['flout'], 0)", "def __init__( self ) :\r\n\r\n # ref. : p.196 of App.G: \"Experimental Control Protocol\" \r\n self._translation_table = \\\r\n { type(True) : ( 'bool', '=?' ) , # Python standard (\"struct\"): one byte \r\n # there are no \"short\" integers by default in Python \r\n # also integers are 64-bit on 64-bit Unix, so we have to check their size, \r\n # see the 'check_table' below \r\n type(1) : ( 'long', '=l' ) , # '=' -- translation is: four bytes\r\n type(1L) : ( 'long', '=l' ) , # '=' -- translation is: four bytes\r\n # type(1.0) : ( 'doub', '=d' ) , # \" 64 bit I3E floating-point number \" \r\n ## temp test \r\n ## type(1.0) : ( 'sing', '=f' ) , # \" 64 bit I3E floating-point number \" \r\n # bugfix: due to another bug, Netstation seems to ignore the byte order \r\n # for the floating-point values ( and assumes 'UNIX' ( i.e. network / \r\n # / \"big-endian\" ) order of bytes )\r\n type(1.0) : ( 'doub', '!d' ) , # \" 64 bit I3E floating-point number \" \r\n type('') : ( 'TEXT', '%ds' ) , # !! a special case\r\n ## ---------------------------------\r\n ## type( None ) : ( '\\x00' * 4, '=H' ) # one more special case for a bugfix ,\r\n ## # see pack() method comments below \r\n } \r\n\r\n # some data types must undergo additional compatibility checks ... \r\n self._check_table = \\\r\n { # type(1L) : lambda x : int(x) , # if this fails then the next attempt would have probably been do\r\n type(1) : is_32_bit_int_compatible, \r\n type(1L) : is_32_bit_int_compatible, \r\n }", "def _get_optlevel(\n target, fc, cc, debug, fflags, cflags, osname=None, verbose=False\n):\n # remove target extension, if necessary\n target = _get_base_app_name(target)\n\n # get lower case OS string\n if osname is None:\n osname = _get_osname()\n\n # remove .exe extension from compiler if necessary\n if fc is not None:\n fc = _get_base_app_name(fc)\n if cc is not None:\n cc = _get_base_app_name(cc)\n\n compiler = None\n if fc is not None:\n compiler = fc\n if compiler is None:\n compiler = cc\n\n # get - or / to prepend for compiler switches\n prepend = _get_prepend(compiler, osname)\n\n # set basic optimization level\n if debug:\n if osname == \"win32\":\n optlevel = \"O0\"\n else:\n optlevel = \"O0\"\n else:\n optlevel = \"O2\"\n\n # look for optimization levels in fflags\n for flag in fflags:\n if flag[:2] == \"-O\" or flag == \"-fast\":\n if not debug:\n optlevel = flag[1:]\n break # after first optimization (O) flag\n\n # look for optimization levels in cflags\n for flag in cflags:\n if flag[:2] == \"-O\":\n if not debug:\n optlevel = flag[1:]\n break # after first optimization (O) flag\n\n # reset optlevel with specified flags from setters\n if compiler == fc:\n tval = _set_fflags(target, fc, argv=False, osname=osname)\n else:\n tval = _set_cflags(target, cc, argv=False, osname=osname)\n\n # look for for optimization levels in compiler flags from setters\n if tval is not None:\n for flag in tval:\n if flag[:2] == \"-O\":\n if not debug:\n optlevel = flag[1:]\n break # after first optimization (O) flag\n\n # prepend optlevel\n optlevel = prepend + optlevel\n\n return optlevel", "def getControl(*args):", "def getControl(*args):", "def getControl(*args):", "def getControl(*args):", "def get_options():\n\n global args\n\n options = parser.add_argument_group(\"flags\")\n options.add_argument(\n \"-t\",\n \"--hash-type\",\n help=\"type of hash from the following: lm, ntlm, md4, md5, sha1, sha256, sha512\",\n metavar=\"\",\n required=True,\n choices=[\"lm\", \"ntlm\", \"md4\", \"md5\", \"sha1\", \"sha256\", \"sha512\"],\n )\n options.add_argument(\n \"-w\",\n \"--wordlist\",\n help=\"file path to wordlist\",\n metavar=\"\",\n type=argparse.FileType(\"r\"),\n required=True,\n )\n\n hash_group = options.add_mutually_exclusive_group(required=True)\n hash_group.add_argument(\n \"-s\", \"--hash-string\", help=\"hash string to crack\", metavar=\"\"\n )\n hash_group.add_argument(\n \"-l\",\n \"--hash-list\",\n help=\"file path to the list of hashes\",\n metavar=\"\",\n type=argparse.FileType(\"r\"),\n )\n\n args = parser.parse_args()", "def processOptions_(self, opts):\n\n for opt in opts.keys():\n val = opts[opt]\n\n # Skip actions, they are processed later in initializeActions_()\n if opt in self.main_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n if opt in self.aux_actions:\n self.cfg_params['SKIM.'+opt[1:]] = val\n continue\n \n\n elif ( opt == '-cfg' ):\n pass\n\n elif ( opt in ('-continue', '-c') ):\n # Already processed in processContinueOption_()\n pass\n\n elif ( opt == '-Q' ):\n self.flag_quiet = 1\n pass\n\n elif ( opt == '-debug' ):\n if val: self.debug_level = int(val)\n else: self.debug_level = 1\n pass\n\n elif string.find(opt,'.') == -1:\n print common.prog_name+'. Unrecognized option '+opt\n usage()\n pass\n\n # Override config parameters from INI-file with cmd-line params\n if string.find(opt,'.') == -1 :\n self.cfg_params['SKIM.'+opt[1:]] = val\n pass\n else:\n # Command line parameters in the form -SECTION.ENTRY=VALUE\n self.cfg_params[opt[1:]] = val\n pass\n pass\n return", "def __int__(self):\n flags = self._analog_input_mode\n flags = set_bit(flags, 2, self._send_on_sensor_alarm)\n flags = set_bit(flags, 3, self._send_on_input_port_change)\n flags = set_bit(flags, 4, self._enable_1_wire_port)\n flags = set_bit(flags, 5, self._enable_all_link_aliasing)\n flags = set_bit(flags, 6, self._send_on_output_port_change)\n flags = set_bit(flags, 7, self._enable_output_timers)\n return flags", "def _get_default_options():\n return {\n \"library_folders\": [],\n \"verbose\": False,\n \"check_balanced\": True,\n \"mtime_check\": True,\n \"cache\": False,\n \"codegen\": False,\n \"expand_mx\": False,\n \"unroll_loops\": True,\n \"inline_functions\": True,\n \"expand_vectors\": False,\n \"resolve_parameter_values\": False,\n \"replace_parameter_expressions\": False,\n \"replace_constant_expressions\": False,\n \"eliminate_constant_assignments\": False,\n \"replace_parameter_values\": False,\n \"replace_constant_values\": False,\n \"eliminable_variable_expression\": None,\n \"factor_and_simplify_equations\": False,\n \"detect_aliases\": False,\n \"allow_derivative_aliases\": True,\n \"reduce_affine_expression\": False,\n }", "def get_system_flags(self):\n\t\treturn call_sdk_function('PrlVmCfg_GetSystemFlags', self.handle)", "def _options(self):\n return", "def getStdSwitches( targetPlatform, targetName ):\n Any.requireIsTextNonEmpty( targetPlatform )\n Any.requireIsTextNonEmpty( targetName )\n\n # We need defaults because the macro parser needs the switch to\n # correctly parse c++ code.\n\n\n fileName = os.path.join( 'build/%s/CMakeFiles/%s.dir/flags.make' %\n ( targetPlatform, targetName ) )\n\n Any.requireIsDirNonEmpty( 'build/%s' % targetPlatform )\n Any.requireIsFileNonEmpty( fileName )\n\n # read-in ground truth information\n logging.debug( 'parsing %s', fileName )\n content = FastScript.getFileContent( fileName, splitLines=True )\n raw_C_CFLAGS = ''\n raw_CPP_CFLAGS = ''\n regexp_C_CFLAGS = re.compile( r'^C_FLAGS\\s=\\s+(.*)$' )\n regexp_CPP_CFLAGS = re.compile( r'^CXX_FLAGS\\s=\\s+(.*)$' )\n\n for line in content:\n tmp = regexp_C_CFLAGS.search( line )\n\n if tmp:\n raw_C_CFLAGS = tmp.group( 1 )\n\n tmp = regexp_CPP_CFLAGS.search( line )\n\n if tmp:\n raw_CPP_CFLAGS = tmp.group( 1 )\n\n # get the default language standards\n standards = Compilers.getDefaultLanguageStandard(targetPlatform)\n cStdSwitch = '-std={}'.format( standards[ 'c' ] )\n cppStdSwitch = '-std={}'.format( standards[ 'c++' ] )\n\n # look if the user specified different standards in the C_FLAGS/CPP_FLAGS\n # CMake variables\n candidates = shlex.split( raw_C_CFLAGS )\n for candidate in candidates:\n if candidate.startswith( '-std=' ):\n cStdSwitch = candidate\n\n candidates = shlex.split( raw_CPP_CFLAGS )\n for candidate in candidates:\n if candidate.startswith( '-std=' ):\n cppStdSwitch = candidate\n\n return Switches( c=cStdSwitch, cpp=cppStdSwitch )", "def read_ctl_file(self, ctl_file):\n temp_options = {}\n with open(ctl_file) as ctl_handle:\n for line in ctl_handle:\n line = line.strip()\n uncommented = line.partition(\"*\")[0]\n if uncommented != \"\":\n if \"=\" not in uncommented:\n raise AttributeError, \\\n \"Malformed line in control file:\\n{0}\".format(line)\n (option, value) = uncommented.split(\"=\")\n option = option.strip()\n value = value.strip()\n if option == \"seqfile\":\n self.alignment = value\n elif option == \"outfile\":\n self.out_file = value\n elif option not in self._options:\n raise KeyError, \"Invalid option: {0}\".format(option)\n else:\n if \".\" in value or \"e-\" in value:\n try:\n converted_value = float(value)\n except:\n converted_value = value\n else:\n try:\n converted_value = int(value)\n except:\n converted_value = value\n temp_options[option] = converted_value\n for option in self._options.keys():\n if option in temp_options.keys():\n self._options[option] = temp_options[option]\n else:\n self._options[option] = None", "def getColorMapFlags():\n\treturn colorMap_flag", "def get_flags():\n flags.DEFINE_string(\n 'model_name',\n help='MobileNet version name: mobilenet_v1, mobilenet_v2, '\n 'mobilenet_v3_small and mobilenet_v3_large',\n default='mobilenet_v1'\n )\n flags.DEFINE_string(\n 'dataset_name',\n help='Dataset name from TDFS to train on: imagenette, imagenet2012',\n default='imagenette'\n )\n flags.DEFINE_string(\n 'model_dir',\n help='Working directory.',\n default='./tmp'\n )\n flags.DEFINE_string(\n 'data_dir',\n help='Directory for training data.',\n default=None\n )\n flags.DEFINE_bool(\n 'resume_checkpoint',\n help='Whether resume training from previous checkpoint.',\n default=False\n )\n flags.DEFINE_string(\n 'optimizer_name',\n help='Name of optimizer.',\n default='rmsprop'\n )\n flags.DEFINE_string(\n 'learning_scheduler_name',\n help='Name of learning rate scheduler.',\n default='exponential'\n )\n # for hyperparameter tuning\n flags.DEFINE_float(\n 'op_momentum',\n help='Optimizer momentum.',\n default=0.9\n )\n flags.DEFINE_float(\n 'op_decay_rate',\n help='Optimizer discounting factor for gradient.',\n default=0.9\n )\n flags.DEFINE_float(\n 'lr',\n help='Base learning rate.',\n default=0.008\n )\n flags.DEFINE_float(\n 'lr_decay_rate',\n help='Magnitude of learning rate decay.',\n default=0.97\n )\n flags.DEFINE_float(\n 'lr_decay_epochs',\n help='Frequency of learning rate decay.',\n default=2.4\n )\n flags.DEFINE_float(\n 'label_smoothing',\n help='The amount of label smoothing.',\n default=0.0,\n )\n flags.DEFINE_float(\n 'ma_decay_rate',\n help='Exponential moving average decay rate.',\n default=None\n )\n flags.DEFINE_float(\n 'dropout_rate',\n help='Dropout rate.',\n default=0.2\n )\n flags.DEFINE_float(\n 'std_weight_decay',\n help='Standard weight decay.',\n default=0.00004\n )\n flags.DEFINE_float(\n 'truncated_normal_stddev',\n help='The standard deviation of the truncated normal weight initializer.',\n default=0.09\n )\n flags.DEFINE_float(\n 'batch_norm_decay',\n help='Batch norm decay.',\n default=0.9997\n )\n flags.DEFINE_integer(\n 'batch_size',\n help='Training batch size.',\n default=4 # for testing purpose\n )\n flags.DEFINE_integer(\n 'epochs',\n help='Number of epochs.',\n default=5\n )" ]
[ "0.5948904", "0.55440557", "0.55275595", "0.5320084", "0.5296819", "0.5249181", "0.5204246", "0.520022", "0.51432735", "0.5137308", "0.5134827", "0.5134588", "0.5134572", "0.5123408", "0.50960565", "0.5094958", "0.50912803", "0.50796866", "0.50630355", "0.50593835", "0.5054699", "0.5049256", "0.5030954", "0.5027189", "0.50233203", "0.50198644", "0.50112945", "0.5005883", "0.5001551", "0.50001395", "0.49905095", "0.49886522", "0.49724615", "0.49579555", "0.49203017", "0.49164388", "0.4913796", "0.49068522", "0.48983887", "0.4896394", "0.48762956", "0.48759097", "0.48665553", "0.48575532", "0.48469082", "0.48384175", "0.48375636", "0.4819344", "0.48176757", "0.4816427", "0.4812508", "0.48079503", "0.48019177", "0.4801872", "0.48003215", "0.48002", "0.47981703", "0.4787953", "0.47852647", "0.47826087", "0.47815636", "0.47802213", "0.47700304", "0.47654828", "0.47640374", "0.47619316", "0.47517803", "0.47515815", "0.47459123", "0.4745016", "0.47438738", "0.47367024", "0.47363907", "0.47356656", "0.4732598", "0.4730309", "0.47292084", "0.47197866", "0.4719184", "0.47160158", "0.4710748", "0.4710651", "0.46991605", "0.46987963", "0.46971002", "0.4694568", "0.46842143", "0.46842143", "0.46842143", "0.46842143", "0.468403", "0.4678309", "0.46753922", "0.46748835", "0.46718955", "0.46670255", "0.46576324", "0.46456534", "0.4643958", "0.46418315" ]
0.66763616
0
Read inputs and find keys with multiple values.
def main() : parser = argparse.ArgumentParser() parser.add_argument("key", help="column name of the KEY", nargs=1) parser.add_argument("val", help="column name of the VALUE", nargs=1) parser.add_argument("infile", help="CSV input file", nargs=1) parser.add_argument("outfile", help="CSV output file", nargs=1) try : args = parser.parse_args() except IOError as e : print (e) sys.exit(1) analyzer = Analyzer(args.key[0], args.val[0]) analyzer.read(*args.infile) analyzer.write(*args.outfile)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def keysWhichMatch(cls, *args):\n if len(cls.keys) < len(args) > 0:\n raise ValueError('Number of keys provided is too long.\\n'\n 'Len Class Keys: %s\\n'\n 'Len Provided Keys: %s\\n' % (len(cls.keys), len(args)))\n\n index = 0\n output = cls.db_key_tuples()\n\n for keyToCheck in args:\n temp = []\n for key in output:\n if key[index] == keyToCheck:\n temp.append(key)\n\n index += 1\n output = temp\n\n return output", "def keysAll():", "def test_4():\n input, output = get_input()\n with open(\n os.path.join(os.path.dirname(__file__), \"..\", \"test_data\",\n \"main_test4_keys.txt\")) as f:\n returnkeys = f.read().splitlines()\n os.system(f\" python -m vcflat -i {input} -o {output}\")\n\n with open(output, \"r\") as f:\n reader = csv.DictReader(f)\n assert [\n i for i in returnkeys if i in set(reader.fieldnames[0].split(\"\\t\"))\n ]\n os.remove(output)", "def check_keys(set_name, keys, value, expect_key):\n\trecords = lib.read_all_records(set_name)\n\n\tfor key in keys:\n\t\tdigest = lib.get_key_digest(set_name, key)\n\t\tmeta_key, meta_ttl, record = records[str(digest).encode().hex()]\n\t\tlib.validate_record(key, record, [\"value\"], [value])\n\t\tlib.validate_meta(key, meta_key, meta_ttl, expect_key)", "def get_registered_input_keys(self):\n return tuple(self._input_keys)", "def fitInput(fitChoose):\n p_list = []\n for keys, values in fitChoose.items():\n if values == 1:\n p_list.append(keys)\n else:\n continue\n return p_list", "def rds_scan_keys(rds, glob):\n n = 0\n keys = []\n while(True):\n n, k = rds.scan(n, match=glob)\n keys.extend(k)\n if n == 0:\n break\n return keys", "def findall(self, key_list):\n for i in range(3):\n try:\n return self.redis_handler.mget(key_list) \n except:\n continue", "def read_file_keys(fname):\n with open(fname, 'r') as infile:\n fkeys = infile.read().split('\\n')\n return set(fkeys)", "def keys():", "def search_multiple_keys(dictionary, primary_search='isRequired', search_list=['label', 'name']):\n\n # get a flat list of the schema and keep only items in all_search_list\n all_search_list = [primary_search] + search_list\n result = []\n flat_dict = flatten(dictionary)\n for k, v in flat_dict.items():\n if any(x in k for x in all_search_list):\n result.append( {k: v} )\n\n # iterate through the schema and get the search items corresponding to each primary_search item (at the same level/section)\n help_list = []\n for i in result:\n try:\n tmp_dict = {}\n # key = i.keys()[0]\n key = list(i.keys())[0] # n Python 3 dict.keys() returns an iterable but not indexable object. Therefore convert it to an iterable, which is list.\n if key and key.endswith(primary_search):\n for item in all_search_list:\n corresponding_label_key = '.'.join(key.split('.')[:-1]) + '.' + item\n for j in result:\n key_label = list(j.keys())[0]\n if key_label and key_label.endswith(item) and key_label == corresponding_label_key: # and result.has_key(key):\n tmp_dict.update({item: j[key_label]})\n if tmp_dict:\n help_list.append( tmp_dict )\n #if tmp_dict:\n # help_list.append( {primary_search: tmp_dict} )\n\n except Exception as e:\n #import ipdb; ipdb.set_trace()\n print(e)\n\n return help_list", "def test_find_inputs(self):\n codeanswer = find_inputs()\n keys = ['./indir-twofile5', './indir-good2', './indir-deep/deepdir', './indir-good1']\n for key in codeanswer:\n self.assertEqual(isinstance(codeanswer[key][1], Job), True)", "def read_keys(self) -> list[KeyPress]:", "def searchcolkeys(self,fctn,cols,keylist,*args):\n goodkeys=[]\n for key in keylist:\n temp=[]\n for c in cols:\n temp.append(self.getentry(key,c))\n for i in range(len(args)):\n temp.append(args[i])\n\n if fctn(*tuple(temp)):\n goodkeys.append(key)\n return(goodkeys)", "def keyValues(self): # real signature unknown; restored from __doc__\n return []", "def get_ids(input_id):\r\n key_value = input_id.split(':')\r\n\r\n if len(key_value) != 2:\r\n raise CLIAbort('Invalid ID %s: ID should be of the form xxx:yyy'\r\n % input_id)\r\n return key_value", "def get_ids(input_id):\r\n key_value = input_id.split(':')\r\n if len(key_value) != 2:\r\n raise CLIAbort('Invalid ID %s: ID should be of the form xxx:yyy'\r\n % input_id)\r\n return key_value", "def gets(self, key):\n result = self._get_raw_input()[key]\n if isinstance(result, list):\n return deepcopy(result)\n return [result]", "def find_keys(args):\n key = args['--key']\n if key:\n return [key]\n\n keyfile = args['--apikeys']\n if keyfile:\n return read_keyfile(keyfile)\n\n envkey = os.environ.get('TINYPNG_API_KEY', None)\n if envkey:\n return [envkey]\n\n local_keys = join(abspath(\".\"), \"tinypng.keys\")\n\n if isfile(local_keys):\n return read_keyfile(local_keys)\n\n home_keys = join(expanduser(\"~/.tinypng.keys\"))\n if isfile(home_keys):\n return read_keyfile(home_keys)\n\n return []", "def check_keys(self):", "def get_valid_keys(args_list, black_list=['out', 'gpu']):\n keys = args_list[0].keys()\n valid_keys = []\n for key in keys:\n if key not in black_list:\n cur = None\n for args in args_list:\n if cur is None:\n cur = args[key]\n if key not in args:\n warnings.warn('{} not in args={}'.format(key, args))\n continue\n if cur != args[key]:\n valid_keys.append(key)\n break\n return valid_keys", "async def keys(self) -> Iterable[str]:", "def checkKeysCorrection(self, input, valid_keys):\n for key in input.keys():\n if key not in valid_keys:\n print(\"[ERROR] Key '%s' does not exist.\" % key)\n return False\n # check whether all result keys are included in valid keys\n if key == \"result\" and not self.checkResultsCorrection(result=input[\"result\"], valid_keys=valid_keys):\n return False\n return True", "def get_keywords(self):\n\n if str(self.keywords) == \"unset\": return []\n # if self.keywords: return self.keywords\n if len(self.keywords) > 0: return self.keywords\n # retrieve from args and return if exists\n keywords = Settings.get_keywords() or []\n if len(keywords) > 0: return keywords\n if not Settings.prompt(\"keywords\"):\n self.keywords = \"unset\" # used to skip prompting for value in future\n return []\n question = {\n 'type': 'input',\n 'name': 'keywords',\n 'message': 'Keywords:',\n 'validate': ListValidator\n }\n keywords = prompt(question)[\"keywords\"]\n keywords = [n.strip() for n in keywords.split(\",\")]\n # confirm keywords\n if not Settings.confirm(keywords): return self.get_keywords()\n self.keywords = keywords\n return self.keywords", "def search_keys(dictionary, search_list=['help_text', 'label']):\n search_item1 = search_list[0]\n search_item2 = search_list[1]\n result = []\n flat_dict = flatten(dictionary)\n for k, v in flat_dict.items():\n if any(x in k for x in search_list):\n result.append( {k: v} )\n\n help_list = []\n for i in result:\n try:\n key = list(i.keys())[0]\n if key and key.endswith(search_item1):\n corresponding_label_key = '.'.join(key.split('.')[:-1]) + '.' + search_item2\n for j in result:\n key_label = list(j.keys())[0]\n if key_label and key_label.endswith(search_item2) and key_label == corresponding_label_key: # and result.has_key(key):\n #import ipdb; ipdb.set_trace()\n help_list.append({search_item2: j[key_label], search_item1: i[key]})\n except Exception as e:\n #import ipdb; ipdb.set_trace()\n print(e)\n\n return help_list", "def lazy_match(name, key_value_tuples):\r\n result = []\r\n for (k, v) in key_value_tuples:\r\n if k.lower().find(name.lower()) == 0:\r\n result.append((k, v))\r\n if len(result) == 0:\r\n print \"%s does not match any options:\" % name\r\n for k, _v in key_value_tuples:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n if len(result) > 1:\r\n print \"%s matched multiple options:\" % name\r\n for k, _v in result:\r\n print \"\\t%s\" % k\r\n sys.exit(2)\r\n return result", "def get_inchikey_by_name(self, names):\n result = []\n synonym_field = 'synonyms'\n pos_0 = {'name': {'$in': names}}\n pos_1 = {synonym_field: {'$in': names}}\n query = {'$or': [pos_0, pos_1]}\n projection = {'inchi_key': 1} \n docs = self.collection.find(filter=query, projection=projection, collation=self.collation)\n if docs is None:\n return result\n else:\n for doc in docs:\n result.append(doc['inchi_key'])\n return result", "def console_input_multiple():\r\n\r\n print(\"Count names. Stop if input is empty.\")\r\n\r\n name_counts = {}\r\n while True:\r\n name = sys.stdin.readline()\r\n name = name.strip().lower().capitalize()\r\n if name == \"\":\r\n break\r\n if name in name_counts:\r\n name_counts[name] += 1\r\n else:\r\n name_counts[name] = 1\r\n print(name_counts)", "def get_keys_from_lines(lines):\n keys = []\n for line in lines:\n key = get_key_from_line(line)\n if key:\n keys.append(key)\n return keys", "def check_required_keys(instrument, filename, hdulist):\n check_if_filename_present = False\n not_found_req_keys= []\n missing_keys = []\n (get_instrume, get_telescop, get_reftype) = get_file_headers(hdulist)\n\n file_loc = \"/grp/hst/cdbs/tools/jwst/required_keywords/\" + change_style(instrument) + \"_required_keywords.csv\"\n with open(file_loc, 'rb') as csvfile:\n keyreader = csv.reader(csvfile, delimiter=' ', quotechar='|')\n for row in keyreader:\n check_if_tests_in_filename = False\n #INSTRUME and REFTYPE have valid values\n if re.search(get_instrume.lower(),row[0]) != None and \\\n re.search(get_reftype.lower(),row[0]) != None:\n\n check_if_filename_present = True\n #TELESCOP exists and has a matching value\n if get_telescop and re.search(get_telescop.lower(),row[0]) != None:\n if set(row[1:]).issubset(set(hdulist[0].header)):\n print (\"Required keywords are present\")\n else:\n for key in row[1:]:\n if key not in hdulist[0].header:\n missing_keys.append(key)\n print (\"Missing keywords in {}: {}\".format(filename, missing_keys))\n break\n #TELESCOP exists but does not have a valid value or does not exist\n else:\n for key in row[1:]:\n if key not in hdulist[0].header:\n missing_keys.append(key)\n if missing_keys:\n print (\"Missing keywords in {}: {}\".format(filename, missing_keys))\n else:\n if get_telescop:\n print (\"Check TELESCOP value: {}\".format(hdulist[0].header[\"TELESCOP\"]))\n else:\n print (\"Set valid value for TELESCOP\")\n break\n\n if not check_if_filename_present:\n print (\"ERROR: Could not find file to check required keys for {}\".format(filename))\n if get_reftype:\n print (\"The REFTYPE may be invalid: {}\".format(get_reftype))", "def exact_key_items(self):\n for key_node, value in self.get_tree_entries():\n for key in self._defining_context.infer_node(key_node):\n if is_string(key):\n yield key.get_safe_value(), LazyTreeValue(self._defining_context, value)", "def extract(self, key, readby=False):\n if key not in self:\n return []\n match = self[key].reads if readby is False else self[key].readby\n found = []\n for k, v in self.items():\n if k in match:\n found.append(v)\n return found", "def assert_keys_have_values(self, caller, *keys):\n for key in keys:\n self.assert_key_has_value(key, caller)", "def input_keys(self) -> List[str]:\n return [self.input_url_key, self.input_browser_content_key]", "def _check_entry_keys(self, entry, n, key, required, xor_sets=None, optional=None):\n xor_sets = set(tuple()) if xor_sets is None else xor_sets\n optional = set() if optional is None else optional\n nth = ordinal(n)\n if not isinstance(entry, dict):\n raise TypeError(f\"Each entry must be a dictionary, error on {nth} {key}\")\n if len(required - entry.keys()) > 0:\n missing_keys = required - entry.keys()\n raise ValueError(\n f\"Each entry of {key} requires keys of: {', '.join(sorted(required))}. \"\n f\"Missing {sorted(missing_keys)} on {nth} entry, possibly others.\"\n )\n allowable_keys = required | optional | set().union(*xor_sets)\n if not set(entry.keys()) <= allowable_keys:\n unknown_keys = set(entry.keys()) - allowable_keys\n err_msg = f\"Got unknown keys in {nth} {key}: {', '.join(unknown_keys)}\"\n raise ValueError(err_msg)\n for xor_set in sorted(xor_sets):\n if len(xor_set & entry.keys()) != 1:\n err_msg = f\"For {key}, must specify one of {xor_set} but not both\"\n err_msg += f\". Error on {nth} entry, possibly others\"\n raise ValueError(err_msg)", "def test_keys(self):\n storage = Storage()\n keys_to_set = {'1': 'hello',\n '2': 'bye',\n '3': [1, 2, 'three'],\n '4': {1: 'one', 2: 'two'},\n 'a': 1,\n 'aa': 2,\n 'abc':3,\n 'hello':4}\n for key in keys_to_set:\n storage.set(key, keys_to_set[key])\n\n pattern_answers = {'?': ['1','2','3','4','a'],\n '*': list(keys_to_set.keys()),\n '[13]': ['1', '3'],\n '[^a]': ['1','2','3','4'],\n '[1-3]': ['1','2','3'],\n '?[ae]*': ['aa', 'hello']}\n for pattern in pattern_answers:\n self.assertEqual(pattern_answers[pattern],\n storage.keys(pattern), f'For pattern \"{pattern}\" expected {pattern_answers[pattern]}.')", "def GetInputFileKeys(version):\n \n if version == 7:\n inputfile_keys = ['DynBrkFi','PtfmFile',\n 'TwrFile','FurlFile','BldFile(1)',\n 'BldFile(2)','BldFile(3)','NoiseFile','ADAMSFile',\n 'LinFile']\n \n elif version == 8:\n errStr = 'Keys for FAST 8 have not been coded yet.'\n ValueError(errStr)\n \n else:\n errStr = 'Uncoded version \\\"{:d}\\\".'.format(version)\n ValueError(errStr)\n \n return inputfile_keys", "def retrieve_keys(tags, common=True):\n r = StrictRedis.from_url('redis://10.0.0.10:6379')\n # if tags exist, filter them (later)\n # print(tags)\n if tags == []:\n return []\n else:\n print('FILTERING')\n if common:\n available_keys = set([])\n else:\n available_keys = [set([]) for tag in tags]\n # implement union of sets\n for count, tag in enumerate(tags):\n try:\n keys_list = r.get(tag.strip()).split(',')[1:]\n for key in keys_list:\n if common:\n available_keys.add(key)\n else:\n available_keys[count].add(key)\n except:\n print('Tag %s not found - check spelling' % tag)\n if not common:\n available_keys = set().intersection(*available_keys)\n return list(available_keys)", "def grab_keys(self,\r\n entrylist,\r\n all_caps=True,\r\n first_caps=True):\r\n\r\n returnkeys = set()\r\n for a_temp in entrylist:\r\n returnkeys = returnkeys.union(self.get_keys_from_note(a_temp))\r\n returnlist = [k_temp for k_temp in returnkeys\r\n if (all_caps\r\n or k_temp != k_temp.upper())\r\n and (first_caps\r\n or k_temp[0]+k_temp[1:]\r\n != k_temp[0].upper()+k_temp[1:])]\r\n return returnlist", "def get_user_inputs():\n print('Enter the path to the loan applications file, path to the output file, N (the starting capital), K (the max number of concurrently active loans)')\n print('For example: applications.json approved.txt 50000 1000')\n user_input = raw_input()\n return user_input.split()", "def get_inputs(ckt, a):\n return find_bscs(ckt, a) | set([x for x in a if ckt[x].function.lower() in ['input']])", "def get_input(data_file: str) -> Set[int]:\n with open(data_file) as f:\n return {int(x.strip()) for x in f.readlines()}", "def check_key_list_match(key_mode, key_list, num_keys, row):\n if key_mode == 'all':\n return True\n else:\n found = False\n for k_match in key_list:\n k_match = nori.scalar_to_tuple(k_match)\n # sanity check\n if len(k_match) > num_keys:\n nori.core.email_logger.error(\n'''\nError: key list entry has more elements than the actual row in call to\ncheck_key_list_match(); call was (in expanded notation):\n\ncheck_key_list_match(key_mode={0},\n key_list={1},\n key_cv={2},\n row={3})\n\nExiting.'''.format(*map(nori.pps, [key_mode, key_list, key_cv, row]))\n )\n sys.exit(nori.core.exitvals['internal']['num'])\n for i, match_val in enumerate(k_match):\n if row[i] != match_val:\n break\n if i == (len(k_match) - 1):\n found = True\n if found:\n break\n if key_mode == 'include':\n return found\n if key_mode == 'exclude':\n return not found", "def extract_values(line):\n key, value = line.split(':')\n key, value = key.strip(), value.strip()\n key = key.replace(' ', '_')\n\n # values as lists\n if key.lower() in ('flags', 'bugs'):\n value = value.split()\n return key.lower(), value", "def processReadings(self, readings):\r\n return {key:value for key, value in readings.items() if not set(key).issubset(self.symbols)}", "def sequence_key_search(self,key,return_found_terms=False):\r\n if not return_found_terms:\r\n returnvalue = set()\r\n else:\r\n returnvalue = (set(),set())\r\n\r\n\r\n\r\n if key.startswith('GT_'):\r\n func_pred = '>='\r\n pred_len = 3\r\n elif key.startswith('LT_'):\r\n func_pred = '<='\r\n pred_len = 3\r\n elif key.startswith('=_'):\r\n func_pred = '='\r\n pred_len = 2\r\n elif key.startswith('G_'):\r\n func_pred = '>'\r\n pred_len = 2\r\n elif key.startswith('L_'):\r\n func_pred = '<'\r\n pred_len = 2\r\n elif key.startswith('E_'):\r\n func_pred = '='\r\n pred_len = 2\r\n elif key.startswith('R_'):\r\n func_pred = '/'\r\n pred_len = 2\r\n elif key.startswith('ALL_'):\r\n func_pred = '?'\r\n pred_len = 4\r\n\r\n else:\r\n return returnvalue\r\n\r\n key = key[pred_len:]\r\n if key.startswith(LEFTBRACKET):\r\n key = key[1:]\r\n left_more_than = True\r\n else:\r\n left_more_than = False\r\n if key.endswith(RIGHTBRACKET):\r\n key = key[:-1]\r\n right_less_than = True\r\n else:\r\n right_less_than = False\r\n\r\n if ATSIGN not in key:\r\n return returnvalue\r\n else:\r\n if SLASH in key:\r\n afterslash = key.split(SLASH)[1]\\\r\n .split(ATSIGN)[1]\\\r\n .replace(POUND,EMPTYCHAR)\\\r\n .replace(UNDERLINE,EMPTYCHAR)\r\n key = key.split(SLASH)[0]\r\n else:\r\n afterslash = EMPTYCHAR\r\n identifier = key.split(ATSIGN)[0]\r\n key_value = key.split(ATSIGN)[1]\r\n\r\n\r\n key_mark, key_value, key_type, key_value2 = self.parse_sequence_key(key_value,afterslash)\r\n\r\n\r\n if not self.default_dict['sequences'].query(term1=identifier,action='in'):\r\n return returnvalue\r\n sub_sequence = []\r\n\r\n\r\n\r\n if key_type == self.default_dict['sequences'].query(term1='#TYPE#',\r\n term2=identifier,\r\n action='get'):\r\n\r\n sequence = self.default_dict['sequences'].query(term1=identifier,\r\n action='get')\r\n if not key_value2:\r\n\r\n #If only one value entered\r\n\r\n sub_sequence = sequence.get(func_name=func_pred,item=key_value)\r\n\r\n else:\r\n\r\n # for a range of values\r\n\r\n if func_pred == '/':\r\n if left_more_than:\r\n left_func = '>'\r\n else:\r\n left_func = '>='\r\n if right_less_than:\r\n right_func = '<'\r\n else:\r\n right_func = '<='\r\n\r\n from_left_sequence = sequence.get(func_name=left_func,item=key_value)\r\n from_right_sequence = sequence.get(func_name=right_func,item=key_value2)\r\n sub_sequence = [x for x in from_left_sequence+from_right_sequence if x in from_left_sequence and x in from_right_sequence]\r\n\r\n\r\n returnset = set()\r\n returnfound = set()\r\n\r\n # Collate search terms\r\n for x_temp in sub_sequence:\r\n x_temp = identifier+ATSIGN+key_mark+str(x_temp)\r\n\r\n if x_temp.endswith('.0'):\r\n\r\n x_temp = x_temp[:-2]\r\n\r\n for y_temp in [x_temp+'.0',x_temp,DASH.join(x_temp.split(DASH)[0:2]),\r\n DASH.join(x_temp.split(DASH)[0:1])]:\r\n\r\n if y_temp in self.keys():\r\n returnset = returnset.union(self.get_indexes_for_key(y_temp))\r\n returnfound.add(y_temp)\r\n\r\n\r\n if not return_found_terms:\r\n return returnset\r\n else:\r\n return returnset, returnfound", "def match_in_shelve(input: List[str], table_key: str = 'biota_columns') -> list:\n\n result = shelf_read(table_key)\n if result is not None:\n return [c for c in result[table_key] if c in input]\n else:\n return None", "def many(keys: List[str]):\n for key in keys:\n actions.key(key)", "def _filter_search_values(key: str, values: list, collection: list):\n return_data = []\n for item in collection:\n if any(val in values for val in item[key]):\n return_data.append(item)\n return return_data", "def keys(targets):", "def read_inputs(self):\n #inputs \n inputs = {}\n # read inputs\n c = 1\n with open(self.path) as f:\n lines = f.readlines()\n for line in lines:\n data = line.rstrip(os.linesep).rstrip(',').split(',')\n input = np.array([np.float64(i) for i in data])\n inputs['image'+str(c)] = input\n c += 1\n\n\n\n return inputs", "def split(self, **kwargs):\n if len(kwargs.keys()) != 1:\n raise ValueError(\n \"One keyword argument is required: Key must be a meta_data field.\"\n )\n data = self._data.copy()\n for colname, values in kwargs.items():\n values = [values] if type(values) == str else values\n mask = data[colname].isin(values)\n return self._copy(data[mask]), self._copy(data[~mask])", "def _is_key_value(data):\n if data is None:\n return False\n return all(x in data for x in ['key', 'value'])", "def read_trans_prompts(lines: List[str], lowercase=True) -> List[Tuple[str,str]]:\n\n ids_prompts = []\n first = True\n for line in lines:\n if lowercase:\n line = line.strip().lower()\n else:\n line = line.strip()\n # in a group, the first one is the KEY. \n # all others are part of the set. \n if len(line) == 0:\n first = True\n else:\n if first:\n key, prompt = line.split(FIELDSEP)\n ids_prompts.append((key, prompt))\n first = False\n\n return ids_prompts", "def Keys(self) -> _n_1_t_4:", "def AllKeys(self) -> _n_0_t_1[str]:", "def process_input_items(args):\n return dict(sum([Counter({sku: value * SCORES[k] for sku, value in\n Counter(args[k].split(',')).items()}) or Counter() for k in\n set(SCORES.keys()) & set(args.keys())], Counter()))", "def get_keys(self):\n self._logger.info(ME + '.get_keys()')\n\n tmp_primary_keys = []\n tmp_data_keys = []\n try:\n tmp_primary_keys = config.get(ME, 'primary_keys').split(',')\n tmp_data_keys = config.get(ME, 'data_keys').split(',')\n self.index_key = config.get(ME, 'index_key') #FIXME: this is bad\n except KeyError as error_msg:\n self._logger.error(\n 'EXCEPTION: Keys missing' +\n '\\r\\tprimary_keys={0}'.format(','.join(tmp_primary_keys)) +\n '\\r\\tdata_keys={0}'.format(','.join(tmp_data_keys)) +\n '\\r\\tindex_key={0}'.format(self.index_key),\n exc_info=True\n )\n raise Connection.TableKeysMissing(error_msg, ME)\n\n self._logger.debug(\n 'keys validated:' + \\\n '\\r\\tprimary_keys={0}'.format(','.join(tmp_primary_keys)) +\n '\\r\\tdata_keys={0}'.format(','.join(tmp_data_keys)) +\n '\\r\\tindex_key={0}'.format(self.index_key)\n )\n return tmp_primary_keys, tmp_data_keys", "def get_shared_keys(param_list):\n\tif not param_list:\n\t\treturn\n\tkeys = set(param_list[0].keys())\n\tfor i in range(1, len(param_list)):\n\t\tkeys = keys.intersection(param_list[i].keys())\n\tkeys = list(keys)\n\tkeys.sort()\n\treturn keys", "def parse_input(input_file):\n lines = open(input_file).read().split('\\n\\n')\n rules = {}\n for line in lines[0].splitlines():\n key, values = line.split(':')\n\n items = values.replace(' or ', ' ').split()\n rules[key] = items\n my_ticket = lines[1].splitlines()[1]\n tickets = lines[2].splitlines()[1:]\n return rules, my_ticket, tickets", "def input_key(cols):\n key = {}\n for col in range(cols):\n column_order = input(\"\\nColumn to read: \")\n column_dir = \"\"\n while (column_dir != \"up\" and column_dir != \"down\"):\n column_dir = input(\"\\nDirection (up or down): \").lower()\n key[column_order] = column_dir\n return key", "def get_cursor_values(self, keys: Set[str]) -> Mapping[str, str]:", "def _get_flagging_key_values(flagging_file):\n\n with open(flagging_file, 'r') as f:\n lines = f.readlines()[:6]\n \n N_Rec = 'nRec' # Total number of spectra feeds into the synthesis image. This is not always constant so grab the value beam-by-beam.\n N_Chan = 'nChan' # Total number of channel\n\n # Search for keywords in the file\n \n for i in range(len(lines)):\n line = lines[i]\n if line.find(N_Rec) >=0:\n tokens = line.split()\n n_Rec = float(tokens[2])\n if line.find(N_Chan) >=0:\n tokens = line.split()\n n_Chan = float(tokens[2])\n\n exp_count = n_Rec*35 #counting antenna number from zero based on the recorded data\n \n return n_Rec, n_Chan, exp_count", "def fetch_input_values(cmd, params):\n try:\n case_no = current_config.COMMAND_DICT[cmd]\n if case_no == 1:\n \"\"\"Create_parking_lot\"\"\"\n if len(params) == 1:\n return [params[0]]\n\n elif case_no == 2:\n \"\"\"Park\"\"\"\n if len(params) == 3:\n return [params[0], params[2]]\n\n elif case_no == 3:\n \"\"\" Leave \"\"\"\n if len(params) == 1:\n return [params[0]]\n logger.debug(\"Something wrong with input\")\n\n elif case_no == 4:\n \"\"\"Vehicle_registration_number_for_driver_of_age\"\"\"\n if len(params) == 1:\n return [params[0]]\n\n elif case_no == 5:\n \"\"\"Slot_numbers_for_driver_of_age\"\"\"\n if len(params) == 1:\n return [params[0]]\n\n elif case_no == 6:\n \"\"\"Slot_number_for_car_with_number\"\"\"\n if len(params) == 1:\n return [params[0]]\n\n except KeyError:\n return\n # logger.debug(\"Something wrong with input\")\n # raise InvalidParams(\"Something wrong with input\")", "def searchcols(self,fctn,cols,*args):\n goodkeys=[]\n for key in self.allrowkeys:\n temp=[]\n for c in cols:\n temp.append(self.getentry(key,c))\n for i in range(len(args)):\n temp.append(args[i])\n\n if fctn(*tuple(temp)):\n goodkeys.append(key)\n return(goodkeys)", "def _kv_helper(cache, value):\n vals = [v.replace('\"','') for v in value.split(cache[\"delimiter\"])]\n if \"filtering\" not in cache or _filtering_passed_helper(cache[\"filtering\"], vals): #yield if filtering criteria met or no filtering criteria \n k = \"+\".join(vals) if cache[\"key_columns\"] == \"*\" else \"+\".join(vals[l] for l in cache[\"key_columns\"]) \n v = \",\".join(vals) if cache[\"target_columns\"] == \"*\" else \",\".join([vals[l] for l in cache[\"target_columns\"]])\n return k, v\n return None, None", "def getList(folder, key1=\"\", key2=\"\", key3=\"\"):\n L = os.listdir(folder)\n L = [v for v in L if (key1 in v) and (key2 in v) and (key3 in v)]\n return L", "def get_Flagging_KeyValues(flagging_file):\n\n flag_infile = open(flagging_file, 'r')\n LINES = flag_infile.readlines()[:6]\n flag_infile.close()\n \n N_Rec = 'nRec' # Total number of spectra feeds into the synthesis image. This is not always constant so grab the value beam-by-beam.\n N_Chan = 'nChan' # Total number of channel\n\n # Search for keywords in the file\n \n for i in range(len(LINES)):\n line = LINES[i]\n if line.find(N_Rec) >=0:\n TOKS = line.split()\n n_Rec = float(TOKS[2])\n if line.find(N_Chan) >=0:\n TOKS = line.split()\n n_Chan = float(TOKS[2])\n\n exp_count = n_Rec*35 #counting antenna number from zero based on the recorded data\n \n return n_Rec, n_Chan, exp_count", "def test_3():\n input, output = get_input()\n keys = \"'#CHROM POS'\"\n os.system(f\" python -m vcflat -i {input} -o {output} --keys {keys}\")\n\n with open(output, \"r\") as f:\n reader = csv.DictReader(f)\n assert keys[1:-1].split() == reader.fieldnames[0].split(\"\\t\")\n os.remove(output)", "def get_inputs(title, list_labels):\n print(f\"{title}\")\n # list which holds the input answers from user\n user_inputs = []\n for item in list_labels:\n user_inputs.append(input(f\"{item}: \"))\n return user_inputs", "def parse_inputs(inputs):\n parsed = inputs.split('\\n')\n\n result_set = dict()\n this_tile = []\n tile_id = 0\n for line in parsed:\n if 'Tile' in line:\n tile_id = re.search('Tile ([0-9]+):', line).group(1)\n elif line:\n line = line.replace('#', '1').replace('.', '0')\n split_line = [int(x) for x in line]\n this_tile.append(split_line)\n else:\n result_set[tile_id] = array(this_tile)\n this_tile = []\n tile_id = 0\n\n return result_set", "def check(self, entry_type:str, x:str):\n keys = set()\n x = self._decode(x)\n\n for log in self.logs:\n for datum in log[entry_type]:\n res = self._follow(datum, x)\n\n if type(res) == dict:\n for key in res.keys():\n keys.add(key)\n elif type(res) == list:\n keys.add('< %d' % len(res))\n \n return list(keys)", "def test_get_multiple(multiple_bucket): # pylint: disable=redefined-outer-name\n for idx in range(2):\n element_number = idx + 1\n assert multiple_bucket.get(f\"key {element_number}\") == f\"value {element_number}\"", "def key_usages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"key_usages\")", "def key_usages(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"key_usages\")", "def scan(self,p=False):\r\n\t\t\r\n\t\t# get variables from all terms\r\n\t\tv = []\r\n\t\tfor i in self:\r\n\t\t\ta,b,c = i.parse()\r\n\t\t\t\r\n\t\t\t# add keys\r\n\t\t\tv += b.keys()\r\n\t\t\t\r\n\t\t# remove duplicates\r\n\t\tv = set(v)\r\n\t\t\r\n\t\t# sort into upper case, lower case\r\n\t\tl = []\r\n\t\tu = []\r\n\t\tfor i in v:\r\n\t\t\tif i[0].isupper():\r\n\t\t\t\tu.append(i)\r\n\t\t\telse:\r\n\t\t\t\tl.append(i)\r\n\t\t\t\t\r\n\t\t# alphabetize and recombine\r\n\t\tl.sort()\r\n\t\tu.sort()\r\n\t\tv = l + u\r\n\t\t\r\n\t\t# print to screen if selected\r\n\t\tif p:\r\n\t\t\ts = ''\r\n\t\t\tfor i in v:\r\n\t\t\t\ts += '%s, ' % (i)\r\n\t\t\tprint(s[:-2])\r\n\t\t\r\n\t\treturn v", "def gen_key_value_extract(key, var, value, req_keys):\n if hasattr(var,'items'):\n for k, v in var.items():\n if k == key:\n if value.lower() in v.lower():\n v_list = []\n for req_key in req_keys:\n v_list.append(var[req_key])\n v_tup = tuple(v_list)\n yield v_tup\n if isinstance(v, dict):\n for result in gen_key_value_extract(key, v, value, req_keys):\n yield result\n elif isinstance(v, list):\n for d in v:\n for result in gen_key_value_extract(key, d, value, req_keys):\n yield result", "def read_keyValues():\n # Create the list of CIs from our data\n ci = db.session.query(CI).order_by(CI.id).all()\n app.logger.debug(pformat(ci))\n # Serialize the data for the response\n ci_schema = CISchema(many=True)\n data = ci_schema.dump(ci)\n keyValues = []\n for d in data:\n keyValuePair = {}\n keyValuePair[\"key\"] = d.get(\"id\")\n keyValuePair[\"value\"] = d.get(\"value\")\n keyValues.append(keyValuePair)\n print(keyValues)\n return keyValues", "def read_typed_(self, start_key=\"\", end_key=None):\n if end_key == \"\":\n return\n start_key_entry = None\n if start_key:\n if isinstance(start_key, unicode):\n try:\n start_key = str(start_key)\n except:\n pass\n if not isinstance(start_key, str):\n raise ValueError(\"start must be <type 'str'> got: %s\" % type(start_key))\n start_key_entry = (start_key, )\n end_key_entry = None\n if end_key:\n if isinstance(end_key, unicode):\n try:\n end_key = str(end_key)\n except:\n pass\n if not isinstance(end_key, str):\n raise ValueError(\"end must be <type 'str'> got: %s\" % type(end_key))\n end_key_entry = (end_key, )\n \n split_entry = []\n for entry in self.read_entries_(start_key_entry, end_key_entry):\n if len(entry) == 2:\n if split_entry:\n self.not_read.append(split_entry[0][0])\n yield entry\n elif len(entry) == 5:\n if entry[1] == 0:\n if split_entry:\n self.not_read.append(split_entry[0][0])\n split_entry = [entry]\n elif (split_entry and split_entry[0][0] == entry[0] and\n len(split_entry) == int(entry[1]) and\n split_entry[0][3] == entry[3]):\n split_entry.append(entry)\n if split_entry and len(split_entry) == int(split_entry[0][2]):\n value = \"\".join([x[4] for x in split_entry])\n yield entry[0], value\n split_entry = []", "def key_dict_values (self):\r\n\r\n if self.using_database:\r\n value_tuple = (notebookname,)\r\n db_cursor.execute(\"SELECT note_index \"\r\n +\"FROM keys_to_indexes\"\r\n +\" WHERE notebook=?;\",\r\n value_tuple)\r\n\r\n fetched = db_cursor.fetchall()\r\n if fetched:\r\n return {index[0].strip() for index in fetched}\r\n return set()\r\n\r\n return self.key_dict.values()", "def valid_config_keys():\n click.echo(', '.join(get_class_properties(OffshoreInputsConfig)))", "def get_inputs():\n inputs = {}\n for obj in vars(acsploit.input).values():\n if hasattr(obj, 'INPUT_NAME'):\n inputs[obj.INPUT_NAME] = obj\n\n return inputs", "def get_keys():\n keys = []\n with open('keys', 'r') as file:\n for line in file:\n keys.append(line.strip('\\n'))\n return tuple(keys)", "def pullAll(*keys):", "def _get_keys(self, listOfKeys):\n return self._keys", "def get_keys(filen, flist): \n if (filen in flist[0]):\n key1 = 'PSTH_STIM'\n key2 = 'ELEC_'\n key3 = '_TRIAL_'\n elif (filen in flist[1]) or (filen in flist[2]):\n key1 = 'PSTH'\n key2 = ''\n key3 = '_'\n elif (filen in flist[3]) or (filen in flist[4]):\n key1 = 'Stim'\n key2 = 'Elec'\n key3 = 'Repet'\n return key1, key2, key3", "def lookup(keys, n):\n for i in range(len(keys)):\n if keys[i] == n:\n return True\n return False", "def search(self, *args, **kwargs):\n ret = []\n for record in Handler.ALL_VERS_DATA.values():\n matchArgs = list(kwargs.keys())\n for k,v in iteritems(kwargs): # restrict records based on key-value match requirement\n try:\n if record[k] != v: break # a non-matching requirement means this record doesn't match\n except: break # record doesn't have required key 'k'\n matchArgs.remove(k)\n if matchArgs: continue # didn't match all required kwargs\n matchArgs = list(args)\n for k,v in iteritems(record): # find any record with a <value> in it\n if k in matchArgs: matchArgs.remove(k)\n if v in matchArgs: matchArgs.remove(v)\n if matchArgs: continue # didn't match all required args\n ret.append(record)\n return ret", "def _extract_inputs(self, inputs, input_names):\n # inputs is either a dict or a list, where index matches\n # the input banks.\n if isinstance(inputs, dict):\n return [inputs[name] for name in input_names]\n else:\n return [\n inputs[index]\n for index in [self.input_bank_order.index(name) for name in input_names]\n ]", "def get_field_inputs(filename):\n with open(filename) as csvfile:\n reader = csv.reader(csvfile, delimiter=',', quotechar='|')\n field_list = set()\n required_field_list = set()\n for row in reader:\n field_list.add(row[0])\n if row[1] == 'Yes':\n required_field_list.add(row[0])\n return [field_list, required_field_list]", "def multiget(self, keys):\n return self.sp.multiget(keys)", "def key_in_field(self, key, fields):\n for field in fields:\n if key in field:\n return True\n return False", "def getKeyObjs(self,\n key,\n value = None):\n keyList = []\n keys = self.__keyObjs\n tmpKey = key + \"___\"\n for keyIdx in keys:\n if ((string.find(keyIdx, tmpKey) != -1) and (value != None)):\n if (re.search(value,self.__keyObjs[keyIdx].getValue())!=None):\n keyList.append(self.__keyObjs[keyIdx])\n elif (string.find(keyIdx, tmpKey) != -1):\n keyList.append(self.__keyObjs[keyIdx])\n return keyList", "def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.danny_path,\n \"--key-types\",\n \"rsa\",\n \"ed25519\",\n ]\n self.assert_cli_sys_exit(args, 0)", "def test_main_multiple_keys(self):\n args = [\n \"--layout\",\n self.layout_double_signed_path,\n \"--layout-keys\",\n self.alice_path,\n self.danny_path,\n \"--key-types\",\n \"rsa\",\n \"ed25519\",\n ]\n self.assert_cli_sys_exit(args, 0)", "def get_duplicates(lines):\n duplicates = []\n keys_checked = {}\n for line in lines:\n key, value = get_key_and_value_from_line(line=line)\n if key:\n if key in keys_checked:\n duplicates.append(u\"{key}={value}\".format(key=key, value=value))\n translation_in_list = u\"{key}={value}\".format(key=key, value=keys_checked[key])\n if translation_in_list not in duplicates:\n duplicates.append(translation_in_list)\n else:\n keys_checked[key] = value\n return duplicates", "def find_nested_in_dict(data, key_list):\n return reduce(lambda d, k: d[k], key_list, data)", "def get_keys_to_add(self,keys_to_add):\r\n\r\n keyset = set()\r\n for k_temp in keys_to_add:\r\n if k_temp != EMPTYCHAR:\r\n if k_temp[0] == DOLLAR:\r\n keyset.update(self.default_dict['keymacros'].\r\n get_definition(k_temp[1:]))\r\n else:\r\n keyset.add(k_temp)\r\n return list(check_hyperlinks(keyset, display=display, notebook=notebook))", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def test_has_correct_number_of_keys_and_values(self):\n self.has_correct_number_of_keys_and_values(2, 1)", "def check(self):\n view_keys(self.d)\n paso1=input('Do you want to change any parameter?(y/n)\\n')\n while paso1[0] == 'y':\n key=input('Which one?\\n')\n if key not in self.d:\n paso2=input(\"This parameter is not in the dictionary.\\\nDo you want to include it?(y/n)\\n\")\n if paso2[0]=='y':\n value=input('value(s) of '+key+'?= ')\n self.d[key]=tuple(split(replace(value,',',' ')))\n else:continue\n else:\n value=input('New value(s) of '+key+'?= ')\n self.d[key]=tuple(split(replace(value,',',' ')))\n view_keys(self.d)\n paso1=input('Anything else?(y/n)\\n')" ]
[ "0.5824653", "0.5802758", "0.578554", "0.5753453", "0.5746162", "0.5673391", "0.5568094", "0.5562528", "0.5508408", "0.5508204", "0.55056906", "0.5499257", "0.5487545", "0.54547495", "0.5448356", "0.5445263", "0.54099", "0.5394247", "0.5388735", "0.53838193", "0.53304183", "0.5315513", "0.5233124", "0.5228973", "0.5204807", "0.5195331", "0.51897824", "0.5189745", "0.5178956", "0.51691216", "0.51586694", "0.51489085", "0.51443267", "0.51404935", "0.5129441", "0.5058515", "0.50559145", "0.5050336", "0.5047612", "0.5044641", "0.50321805", "0.5023591", "0.50145227", "0.5009457", "0.50065815", "0.49861014", "0.49778342", "0.4968848", "0.49624154", "0.4958482", "0.49576685", "0.4949986", "0.49422967", "0.4941906", "0.49394682", "0.49325067", "0.49228755", "0.4914396", "0.49128625", "0.49109182", "0.49105376", "0.49051246", "0.4903801", "0.49005887", "0.48894292", "0.4882028", "0.48730695", "0.48709786", "0.48693028", "0.4867387", "0.48669526", "0.48632362", "0.48563287", "0.48542896", "0.48542896", "0.48533946", "0.48519868", "0.48459014", "0.48410717", "0.48340082", "0.48275694", "0.4826561", "0.48241466", "0.48224822", "0.48168683", "0.48131454", "0.48104528", "0.48031163", "0.47961164", "0.47833684", "0.47779626", "0.477505", "0.4773784", "0.47637782", "0.47637782", "0.4760072", "0.47593707", "0.47578266", "0.47559506", "0.47559506", "0.4755862" ]
0.0
-1
Checks for number of vertices >>> num_vertices([[1, 1], [1, 1]], [[1, 1, 1], [1, 1, 1], [1, 1, 1]]) False >>> num_vertices([[1, 0], [0, 1]], [[0, 0], [0, 0]]) True
def num_vertices(graph1: list, graph2: list): if len(graph1[0]) != len(graph2[0]): return False return True
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def hasvertices(self):\n if len(self.vertices) > 0:\n return True\n else:\n return False", "def num_vertices(self):\n return len(self.vertices)", "def num_vertices(self):\n return len(self.vertices)", "def getNumVertices(self):\n return len(self.V)", "def num_vertices(self):\n return len(self)", "def num_vertices(self):\n return self._top_exp.number_of_vertices()", "def num_vertices(self):\r\n return len(self.__graph_dict.keys())", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def num_vertices(self, p):\n ret_val = self._num_vertices(p)\n return ret_val", "def num_vertices(self):\n return self.n * (1 + int(self.variant.is_bipartite()))", "def check_nverts(sections):\n return _check_nentries(sections, \"NVERTS\", \"VERTEX\")", "def n_vertices(self):\n try: \n return self._n_vertices\n except AttributeError:\n self._n_vertices = 0\n for v in self.vertex_generator(): self._n_vertices += 1\n return self._n_vertices", "def vertexCount(self):\n return self._nVertices", "def vertex_count(self) -> int:\n return len(self._vertex_map)", "def return_num_vertices(self):\n return self.__size", "def get_vertices_count(self) -> int:\n # TODO: verify the following claim:\n raise NotImplementedError", "def __len__(self):\n return len(self._vertices)", "def getNumVertexes(self):\n return _osgAnimation.RigTransformHardware_getNumVertexes(self)", "def contains_several_vertices(self, currentState):\n\t\treturn True if sum(currentState) > 3 else False", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def n_vertices(self):\n return len(self.minimized_generators())", "def get_num_vertices(self):\n\n return self._graph_state.get_num_vertices()", "def test_number_of_vertex_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.vsize, len(M.vert))", "def is_vertex(self):\n return True", "def is_vertex(self):\n return True", "def has_vertex(self, x, y):\n\n return min(x, y) > 0 and x <= self.width and y <= self.height", "def _vertices_are_equal(\n vertices1: List[np.ndarray], vertices2: List[np.ndarray]\n) -> bool:\n if len(vertices1) != len(vertices2):\n return False\n diff = vertices1 - vertices2\n if np.abs(np.max(diff)) < ways_are_equal_tolerance:\n return True\n return False", "def vertex_count(self):\n return len(self._outgoing)", "def hasVertex(self, vertexNumber):\n try:\n rs = self.findVertex(vertexNumber)\n return 0\n except VertexError, e:\n return 1", "def vertex_multidegree(breakpoint_graph, vertex):\n return len(list(breakpoint_graph.get_edges_by_vertex(vertex)))", "def get_no_vertices(self):\r\n return len(self.__neighbours.keys())", "def vert_count(self, vert_count):\n if not isinstance(vert_count, int):\n raise TypeError(f'Number of vertices should be of type integer')\n if vert_count < 3:\n raise ValueError(f'Number of vertices should be greater than or equal to 3')\n\n self._vert_count = vert_count", "def is_vertex(self): \n return False", "def get_vertices_num(self):\n return self.coords.shape[0]", "def is_vertex(self):\n return False", "def numverts(self):\n return self._numvertstotal", "def checkVertices(vertices, limits):\n isWithin = True\n for i,v in enumerate(vertices):\n x = v[0]\n y = v[1]\n z = v[2]\n if x < limits[0][0] or x > limits[0][1]:\n isWithin = False\n break\n if y < limits[1][0] or y > limits[1][1]:\n isWithin = False\n break\n if z < limits[2][0] or z > limits[2][1]:\n isWithin = False\n break\n return isWithin", "def verify_vertex_values(self):\n for (line, row) in [(ln, rw) for ln in range(9) for rw in range(9)]:\n if self.grid[line][row] not in range(1, 10) and self.grid[line][row] is not None:\n raise VertexValueError()", "def __len__(self):\n return self._vertices.shape[0]", "def is_valid_adjacency_matrix(connections, num_intermediate, num_input, num_output):\n\n num_emitting = num_intermediate + num_input\n num_receiving = num_intermediate + num_output\n\n if connections.size(0) < num_receiving:\n return False\n if connections.size(1) < num_emitting:\n return False\n\n embedded_intermediate_size = connections.size(0) - num_output\n #check that dimensions of the connectivity tensor are consistent with single fixed intermediate size\n if embedded_intermediate_size < 0 or embedded_intermediate_size != connections.size(1) - num_input:\n return False\n\n # check left-justified\n if connections[num_receiving:, :].sum().item() > 0:\n return False\n if connections[:, num_emitting:].sum().item() > 0:\n return False\n # check that vertices only receive input from ancestors\n for i in range(num_receiving):\n if connections[i, i+ num_input:].sum().item() > 0:\n return False\n return True", "def vert_count(self):\n return self._vert_count", "def contains_vertex(self, vertex_name: n):\n return vertex_name in self._graph.keys()", "def hasNoDoubleVertices(self):\n assert all(self.vertices.count(v) == 1 for v in self.vertices)\n return (all(all(v1 == v2 or v1.dist(v2) > COMPARISON_EPSILON for v2 in self.vertices)\n for v1 in self.vertices) and\n all(self.vertices.count(v) == 1 for v in self.vertices))", "def num_vertex_sets(self, r):\n return sage.all.binomial(self.n, r)", "def is_edge_connected(num_vertices, adj_dict, edges):\n if not num_vertices or not adj_dict:\n return True\n check = { i:False for i in adj_dict.keys() }\n check_edges = [False for _ in range(len(edges))]\n first_vertex = list(adj_dict.keys())[0]\n is_edge_connected_dfs(adj_dict, first_vertex, edges, check, check_edges)\n return not False in check_edges", "def num_loop_vertices(self):\n return (self.L.size() - 1) if self.has_loop_vertices() else 0", "def has_vertex(t, tri, vertex):\n for i in range(3):\n if t[tri][i] == vertex:\n return True\n return False", "def draw_triangle(vertices, shape):\n # add 0.5 to account for fact that pixel centers are at (0.5, 0.5)\n barycenters = barycentric_coords(vertices, numpy.indices(shape) + 0.5)\n return (barycenters >= 0).all(axis=0)", "def verticesEqual(self, v1, v2, eps=1e-8):\n if abs(v1[0] - v2[0]) > eps:\n return False\n if abs(v1[1] - v2[1]) > eps:\n return False\n if abs(v1[2] - v2[2]) > eps:\n return False\n return True", "def has_isolated_vertices(self):\n return self.properties.isolated_vertices", "def getNumBonesPerVertex(self):\n return _osgAnimation.RigTransformHardware_getNumBonesPerVertex(self)", "def has_vertex(self, vertex) -> bool:\n return self._start is vertex or self._end is vertex", "def is_graph_34valent(self, G):\n return all( len(G[v])<=4 for v in G.vertices() )", "def vertices_degree(graph1: list, graph2: list):\n check1 = []\n check2 = []\n for row, _ in enumerate(graph1):\n degree1 = 0\n degree2 = 0\n for column, _ in enumerate(graph1[row]):\n if graph1[row][column] == 1:\n degree1 += 1\n if graph2[row][column] == 1:\n degree2 += 1\n check1.append(degree1)\n check2.append(degree2)\n if sorted(check1) == sorted(check2):\n return True, check1, check2\n return False, []", "def num_edges_rows(graph):\n return len(graph.graph.edges), len(graph.graph.nodes)", "def pertenece(self,v):\n return v in self.vertices.keys()", "def __init__(self, vertices):\n if len(vertices) < 3:\n raise LocationsTooLittle(\"Requires three or more vertices to form \"\n \"a polygon\")\n self.vertices = vertices", "def vertices(self, *args, **kwargs) -> Any:\n pass", "def num_polygons(self):\n return len(self.polygons)", "def is_simple(self):\n if not self.is_compact(): return False\n\n for v in self.vertex_generator():\n adj = [a for a in v.neighbors()]\n if len(adj) != self.dim():\n return False\n\n return True", "def is_vertex_in_graph(self, vertex):\r\n return vertex in self.__neighbours.keys()", "def num_rows(self):\n if self._is_vertex_frame():\n return self.__graph__.summary()['num_vertices']\n elif self._is_edge_frame():\n return self.__graph__.summary()['num_edges']", "def is_connected(self, vertices_encountered = None, start_vertex=None):\n\n if vertices_encountered is None:\n vertices_encountered = set()\n gdict = self.__graph_dict\n vertices = list(gdict.keys()) # list is necessary in python 3\n # if empty list return\n if len(vertices) == 0 :\n return False\n if not start_vertex:\n # Choose a vertex vertex from graph as starting point\n start_vertex = vertices[0]\n vertices_encountered.add(start_vertex)\n if len(vertices_encountered) != len(vertices):\n for vertex in gdict[start_vertex]:\n if vertex not in vertices_encountered:\n if self.is_connected(vertices_encountered,vertex):\n return True\n else:\n return True\n return False", "def count(self, volume):\n\n countResult = 0\n\n for x in range(volume.shape[0]):\n for y in range(volume.shape[1]):\n for z in range(volume.shape[2]):\n if self.isMember(volume[x,y,z]):\n countResult += 1\n\n return countResult", "def num_edges(self):\n return sum(len(v.adj) for v in self.vertices.values())", "def size(self):\n num_vert = 0\n num_edg = 0\n for vertex in self.vertices():\n num_vert += 1\n num_edg += len(self.neighbors(vertex))\n return (num_vert, num_edg)", "def check_input(nodes, num_edges):\n num_nodes = len(nodes)\n min_edges = num_nodes - 1\n if num_edges < min_edges:\n raise ValueError('num_edges less than minimum (%i)' % min_edges)\n max_edges = num_nodes * (num_nodes - 1)\n if num_edges > max_edges:\n raise ValueError('num_edges greater than maximum (%i)' % max_edges)", "def _validateVertex(self, v):\n if v < 0 or v >= self._V:\n raise Exception(\"vertex {} is not between 0 and {}\".format(v, (self._V-1)))", "def num_polygons(self):\n return len(self)", "def checkNumNeighbors():", "def num_generic_vertex_sets(self, r):\n return sage.all.binomial(self.L.size(), r) * (self.q - 1) ** r", "def num_cuboids(self):\n return self._shape_count(_sff.cuboid)", "def check_corners(self, vertices, corners):\n assert_allclose(vertices['ul'], corners[0])\n assert_allclose(vertices['ur'], corners[1])\n assert_allclose(vertices['lr'], corners[2])\n assert_allclose(vertices['ll'], corners[3])", "def _row_or_col_is_header(s_count, v_count):\n if s_count == 1 and v_count == 1:\n return False\n else:\n return (s_count + 1) / (v_count + s_count + 1) >= 2. / 3.", "def checkEachLineCount(mat):\n n = sum(mat[0])\n \n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n", "def test_number_of_list_elements_in_MESH_chunk(self):\n for O in self.mod.objts.itervalues():\n for M in O.meshes.itervalues():\n self.assertEqual(M.lsize, len(M.list))", "def size(matrix):\n size = 0\n for _,row in matrix.items():\n #size += len([r for r in row.values() if r != None])\n for _,v in row.items():\n #print(\"V:\",v)\n size += 1 if v != None else 0\n return size", "def test_are_vertices_adjacent_yes_ascending(self):\n\n self.assertTrue(skeleton_lines._are_vertices_adjacent(\n vertex_indices=VERTEX_INDICES_ADJACENT,\n num_vertices_in_polygon=NUM_VERTICES_FOR_ADJACENCY_TEST))", "def checkEachLineCount(mat):\n n = sum(mat[0])\n\n assert all(sum(line) == n for line in mat[1:]), \"Line count != %d (n value).\" % n\n return n", "def add_vertex_edge(self, vertices):\n if len(vertices) < 2:\n raise Exception('Cannot have a single vertex')\n self.add_vertex(vertices[0])\n length_array = len(vertices)\n for iterator in range(1, length_array):\n num = vertices[iterator]\n is_number = False\n try:\n int(num)\n is_number = True\n except ValueError:\n pass\n if is_number:\n self.add_edge(vertices[0], num)", "def getNumberOfNeighbors(self, vertexNumber): \n\n return self.__degreeCount[vertexNumber]", "def _num_edges(self):\n return len(self._eid2partid)", "def __init__(self, vertices):\n self.vertices = vertices", "def order(self):\n return len(self.vertices())", "def validate(self):\n invalid = []\n self.load()\n\n if self.graph.num_vertices() < 1:\n return 'Graph is invalid, no vertices'\n\n if self.graph.num_edges() < 1:\n return 'Graph is invalid, no edges'\n\n for v in self.graph.vertices():\n if(v.in_degree() + v.out_degree() == 0):\n invalid.append(v)\n\n if len(invalid) != 0:\n return invalid\n else:\n return 'Graph is valid'", "def __len__(self):\n return len(self.geometries)", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def num_edges(graph1: list, graph2: list):\n check1 = 0\n check2 = 0\n for row, _ in enumerate(graph1):\n for column, _ in enumerate(graph1[row]):\n if graph1[row][column] == 1:\n check1 += 1\n if graph2[row][column] == 1:\n check2 += 1\n return check1 == check2", "def num_edges(self):\r\n return len(self.__generate_edges())", "def num_nodes(self):\n return ((len(self.tensor_u)+1) * (len(self.tensor_v)+1) *\n (len(self.tensor_w)+1))", "def vert_degree(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = 0\n\t\tfor x in prob:\n\t\t\tfor vertex in x:\n\t\t\t\tif element == vertex:\n\t\t\t\t\tvertex_map[element] += 1\n\treturn vertex_map", "def count_points_near_vertices(\n t, coords, bruteforce_simplex_counts=False,\n count_bincount=True, **kwargs):\n flat_tri = t.simplices.flatten()\n flat_ind = np.repeat(np.arange(t.nsimplex), 3)\n v_touches = []\n for i in range(t.npoints):\n v_touches.append(flat_ind[np.argwhere(flat_tri == i)])\n found = t.find_simplex(coords, bruteforce=bruteforce_simplex_counts)\n if count_bincount:\n bc = np.bincount(found, minlength=t.nsimplex)\n pt_count = np.array([\n bc[v_touches[i]].sum() for i in range(t.npoints)\n ])\n else:\n pt_count = np.zeros(t.npoints)\n for i in range(t.npoints):\n for j in v_touches[i]:\n pt_count[i] += np.count_nonzero(found == j)\n return pt_count", "def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise", "def contains_vertex(self, v_name: str) -> bool:\n for i in self.adj_list:\n if i == v_name:\n return True\n return False", "def make_complete_graph(num_vertices):\n V = num_vertices\n K = V * (V - 1) // 2\n grid = np.zeros([3, K], np.int32)\n k = 0\n for v2 in range(V):\n for v1 in range(v2):\n grid[:, k] = [k, v1, v2]\n k += 1\n return grid", "def __init__(self, numvertices, directed=False):\n self._numvertices = numvertices\n self._directed = directed\n self._numedges = 0\n self._adjacents = [list() for _ in range(0, numvertices)]", "def is_generically_rigid(n_vertices, edges, dimensions):\n amplification_iterations = 1 # increase for higher probability of success (which is pretty high anyway)\n\n for iteration in range(amplification_iterations):\n random_vertex_config = numpy.random.rand(n_vertices, dimensions)\n if inf_dof(random_vertex_config, edges) == 0:\n return True\n return False", "def vertices_at_least_once(self):\n clauses = []\n for vertex in range(0,self.graph.num_vertices):\n clauses.append(self.vertex_at_least_once(vertex))\n return clauses", "def addVertex(self, v, vertices):\n vn = v / np.linalg.norm(v) * self._radius\n vertices += [[vn[0], vn[1], vn[2]]]\n return len(vertices)-1", "def get_vertex_count(remote, objectid):\n cmd1 = mmapi.StoredCommands()\n key1 = cmd1.AppendSceneCommand_GetVertexCount(objectid)\n remote.runCommand(cmd1)\n result_val = mmapi.any_result()\n cmd1.GetSceneCommandResult_GetVertexCount(key1, result_val)\n return result_val.i" ]
[ "0.7340938", "0.7330129", "0.7330129", "0.71462345", "0.7098103", "0.70285016", "0.6986101", "0.6935374", "0.68259686", "0.67648", "0.67457217", "0.66626644", "0.6626465", "0.6615794", "0.6540091", "0.6527618", "0.6514949", "0.64980805", "0.64802784", "0.64247876", "0.64116603", "0.63819706", "0.6202178", "0.6066598", "0.6066598", "0.60598207", "0.6036509", "0.6034562", "0.6000705", "0.5976976", "0.5958791", "0.5927859", "0.5925421", "0.5912454", "0.58922607", "0.5852529", "0.58202773", "0.5800827", "0.5627967", "0.55805504", "0.5530551", "0.55180496", "0.5513108", "0.5478913", "0.54747677", "0.54504496", "0.5446953", "0.5418443", "0.54155356", "0.53857595", "0.5377736", "0.5367778", "0.5359806", "0.53572655", "0.53247464", "0.5312695", "0.52519274", "0.52388316", "0.522821", "0.5195962", "0.51847106", "0.51820403", "0.51638335", "0.5161128", "0.5156218", "0.5144553", "0.5132528", "0.5117961", "0.51101565", "0.51068866", "0.50990915", "0.50922656", "0.5086796", "0.5077564", "0.5062677", "0.50564116", "0.5052302", "0.5050476", "0.5040422", "0.50347066", "0.50300986", "0.50202775", "0.501623", "0.5010786", "0.50065875", "0.5005376", "0.49847248", "0.49839428", "0.49750334", "0.4968772", "0.49685264", "0.49622467", "0.4953255", "0.49461234", "0.4941648", "0.49413136", "0.49313042", "0.49235973", "0.4922842", "0.4918123" ]
0.7056209
5
Checks for number of edges >>> num_edges([[1, 1], [0, 1]], [[1, 1], [1, 1]]) False >>> num_edges([[1, 0], [0, 1]], [[0, 1], [1, 0]]) True
def num_edges(graph1: list, graph2: list): check1 = 0 check2 = 0 for row, _ in enumerate(graph1): for column, _ in enumerate(graph1[row]): if graph1[row][column] == 1: check1 += 1 if graph2[row][column] == 1: check2 += 1 return check1 == check2
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def num_edges(self):\r\n return len(self.__generate_edges())", "def _number_of_edges(self):\n if self._edges is None:\n return 0\n return len(self._edges)", "def Test_NumEdges(Graph_MD):\n N_Edges = float(Graph_MD.number_of_edges())\n\n return N_Edges", "def has_edges(self):\n\n return len(self._edges) > 0", "def num_edges(self):\n return sum(1 for _ in self.iteredges())", "def _num_edges(self):\n return len(self._eid2partid)", "def countEdges(self):\n return numpy.count_nonzero(self.supportArray) / 2", "def Nedges(self):\n return len(self.edges)", "def num_of_edge(self):\n try:\n return self.edges\n except:\n print(\"ERROR: No graph exists\")", "def num_edges(self):\n return sum(len(v.adj) for v in self.vertices.values())", "def check_number_edges(ugraph):\n directed_edges = 0\n for node in ugraph:\n directed_edges += len(ugraph[node])\n if directed_edges % 2 == 0:\n return directed_edges / 2\n else:\n return \"Not Undirected\"", "def check_input(nodes, num_edges):\n num_nodes = len(nodes)\n min_edges = num_nodes - 1\n if num_edges < min_edges:\n raise ValueError('num_edges less than minimum (%i)' % min_edges)\n max_edges = num_nodes * (num_nodes - 1)\n if num_edges > max_edges:\n raise ValueError('num_edges greater than maximum (%i)' % max_edges)", "def __has_multiple_edges(self):\n return \\\n len(\n list(\n [\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n ] # the length of the list which allows duplicates...\n )\n ) != \\\n len(\n set(\n {\n tuple((edge.get_first_incident_node().get_name(), edge.get_second_incident_node().get_name()))\n for edge in self.get_edges()\n } # ...should equal the length of the set that does not allow duplicates\n )\n ) # return True if the two data structures are equal in size and False otherwise", "def number_of_edges(self) -> int:\n return self.graph.number_of_edges()", "def countEdges(self):\n n = 0\n for (hub, table) in self.totsupport.iteritems():\n n += len(table)\n return n", "def EdgesCount(self):\n return len(self.edges)", "def num_edges(self):\n return self._ll_tree.get_num_edges()", "def num_edges(self):\n return self._top_exp.number_of_edges()", "def num_edges_rows(graph):\n return len(graph.graph.edges), len(graph.graph.nodes)", "def __len__(self) -> int:\r\n return len(self._edges)", "def edge_count(self) -> int:\n return self._n_edges", "def num_edges(self):\n return (self.n * (self.L.size() - 1) - self.num_loop_vertices()) // (1 + int(not self.variant.is_bipartite()))", "def compute_num_edges(graph):\n # return the number of edges\n return sum([len(graph[source_node].keys()) for source_node in graph.keys()]) / 2", "def _num_edges(self):\n return int(self._edge_map[-1])", "def num_edges(g):\n total_edges_with_duplicates = sum(len(v) for v in g.values())\n return total_edges_with_duplicates // 2", "def edgecount(self):\n\n raise NotImplementedError", "def edge_count(self) -> int:\n return int(self.graph_tuple_stats.edge_count or 0)", "def is_edge_connected(num_vertices, adj_dict, edges):\n if not num_vertices or not adj_dict:\n return True\n check = { i:False for i in adj_dict.keys() }\n check_edges = [False for _ in range(len(edges))]\n first_vertex = list(adj_dict.keys())[0]\n is_edge_connected_dfs(adj_dict, first_vertex, edges, check, check_edges)\n return not False in check_edges", "def number_of_deviation_edges(self):\n return len(list(self.deviation_edges()))", "def return_num_edges(self):\n return sum(map(lambda x: len(x),self.__adj))", "def countEdges(self):\n s1 = self.DBcurs.execute(\"SELECT count(*) FROM edges\")\n data = s1.fetchone()\n if data:\n return data[0]\n else:\n return 0", "def size(self):\n return len(self.edges())", "def ndim(self):\n return len(self.edges)", "def checkNumNeighbors():", "def get_num_edges(self):\n\n return self._graph_state.get_num_edges()", "def is_valid_adjacency_matrix(connections, num_intermediate, num_input, num_output):\n\n num_emitting = num_intermediate + num_input\n num_receiving = num_intermediate + num_output\n\n if connections.size(0) < num_receiving:\n return False\n if connections.size(1) < num_emitting:\n return False\n\n embedded_intermediate_size = connections.size(0) - num_output\n #check that dimensions of the connectivity tensor are consistent with single fixed intermediate size\n if embedded_intermediate_size < 0 or embedded_intermediate_size != connections.size(1) - num_input:\n return False\n\n # check left-justified\n if connections[num_receiving:, :].sum().item() > 0:\n return False\n if connections[:, num_emitting:].sum().item() > 0:\n return False\n # check that vertices only receive input from ancestors\n for i in range(num_receiving):\n if connections[i, i+ num_input:].sum().item() > 0:\n return False\n return True", "def num_vertices(graph1: list, graph2: list):\n if len(graph1[0]) != len(graph2[0]):\n return False\n return True", "def number_of_direct_deviation_edges(self):\n return len(list(self.direct_deviation_edges()))", "def number_of_indirect_deviation_edges(self):\n return len(list(self.indirect_deviation_edges()))", "def edge_count(self):\n total = sum(len(self._outgoing[v]) for v in self._outgoing)\n # for undirected graphs, make sure not to double-count edges\n return total if self.is_directed() else total // 2", "def e_size(self) -> int:\n return self.edges_on_graph", "def edge_count(self):\r\n return int(sum(self.degree(node) for node in range(self.size))/2)", "def graph_is_connected(node_count, edges):\n\n disjoint_set = disjoint.DisjointSet(node_count + 1)\n\n for a, b in edges:\n disjoint_set.union(a, b)\n\n # Check if all nodes are part of the same set\n\n root = disjoint_set.root(1)\n\n for i in range(2, node_count + 1):\n if disjoint_set.root(i) != root:\n return False\n\n return True", "def number_of_trail_edges(self):\n return len(list(self.trail_edges()))", "def calculateEdges(i, j, matrix):\n num = 0\n if i > 0:\n if matrix[i-1][j] == 0:\n num += 1\n if j > 0:\n if matrix[i][j-1] == 0:\n num += 1\n if i < len(matrix) - 1:\n if matrix[i+1][j] == 0:\n num += 1\n if j < len(matrix[0]) - 1:\n if matrix[i][j+1] == 0:\n num += 1\n \n return num", "def has_multiple_edges(self):\n # Create a list of edge 2-tuples (a, b)\n edge_tuples = [(e['from_id'], e['to_id']) for e in self._edges]\n if len(edge_tuples) > len(set(edge_tuples)): # Do 'real' multiple edges exist?\n return True\n\n # Create a list of edge 2-tuples (a, b) with a <= b\n edge_tuples = [(min(e['from_id'], e['to_id']), max(e['from_id'], e['to_id'])) for e in self._edges]\n edge_tuples_set = set(edge_tuples)\n\n if len(edge_tuples) == 2 * len(edge_tuples_set): # This only happens if for each edge (a, b) also (b, a) exists\n return False\n else:\n # The set kicks out duplicate edges => less edges in the set means there were multiple edges\n return len(edge_tuples) > len(edge_tuples_set)", "def test_edges(self):\n\n edge_list = self.g.edges()\n self.assertEqual(42, len(edge_list))\n\n # p1 p3 and p3 p1 are valid edges\n t1 = ('p1', 'p3')\n self.assertTrue(t1 in edge_list)\n\n t2 = ('p3', 'p1')\n self.assertTrue(t2 in edge_list)\n\n made_up = ('z1', 'q123')\n self.assertFalse(made_up in edge_list)\n\n return None", "def guess_num_nodes_from(edgelist):\n return np.max(edgelist) + 1", "def num_vertices(self):\r\n return len(self.__graph_dict.keys())", "def check_nverts(sections):\n return _check_nentries(sections, \"NVERTS\", \"VERTEX\")", "def data_edge_count(self) -> int:\n return int(self.graph_tuple_stats.data_edge_count or 0)", "def control_edge_count(self) -> int:\n return int(self.graph_tuple_stats.control_edge_count or 0)", "def size(self):\n try:\n return len(self._adjacency_list)\n except Exception as error:\n print(f'An error occurred: {error}')", "def check_edges(self):\n if self.rect.right >= self.screen_rect.right or self.rect.left <= 0:\n return True", "def num_edge_features(self):\n return self[0].num_edge_features", "def check_regularity(edges):\n for a, b in edges:\n counter_a = 0\n counter_b = 0\n for x, y in edges:\n if a == x or a == y:\n counter_a += 1\n if b == x or b == y:\n counter_b += 1\n assert (counter_a > 0) and (counter_b > 0)\n if (counter_a == 1) or (counter_b == 1):\n raise Exception(\"Boundary is not closed.\")\n if (counter_a > 2) or (counter_b > 2):\n raise Exception(\"More than two edges share a node.\")", "def is_connected(self):\n if self.V < 1:\n raise ValueError(\"empty graph\")\n if self.V < 2:\n return True\n if self.E == 0:\n return False\n cc = self.cc()\n return int(cc.max() == 0)", "def getNumberOfEdges(self):\n LIB.mnt_grid_getNumberOfEdges.argtypes = [POINTER(c_void_p)]\n n = c_size_t()\n ier = LIB.mnt_grid_getNumberOfEdges(self.obj, byref(n))\n if ier:\n error_handler(FILE, 'getNumberOfEdges', ier)\n return n.value", "def edge_num(self,row1,col1,row2,col2):\n\n row = row1\n col = col1\n row_n = row2\n col_n = col2\n \n if row2 < row1 or col2 < col1:\n row = row2\n col = col2\n row_n = row1\n col_n = col1\n \n if not ((row == row_n and col == col_n - 1) or (row == row_n-1 and col == col_n)):\n return -1\n\n if row < 0 or row_n >= self.rows or col < 0 or col_n >= self.cols:\n return -1\n \n node1 = row*self.rows+col+1\n node2 = row_n*self.rows+col_n+1\n edge_number = self.edge2index[(node1,node2)]\n #print \"%s %s: %d\" % (str(node1),str(node2),edge_number)\n \"\"\"\n #THIS DOWN HERE WOULD WORK IF GRAPHILLION NUMBERED EDGES CORRECTLY BUT IT DOESNT\n #print \"(%d,%d) (%d,%d)\" % (row,col,row_n,col_n)\n if row + col < self.cols - 1:\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * row\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 1 + 2 * row\n #edges[edge_number] = 1\n else:\n col_dist = self.cols - col - 1\n if col_n == col + 1: \n #print \"(%d,%d) (%d,%d)\" % (row, col, row, col + 1)\n edge_number = self.diags[row + col] + 2 * col_dist - 1\n #edges[edge_number] = 1\n elif row_n == row + 1:\n #print \"(%d,%d) (%d,%d)\" % (row, col, row + 1, col)\n edge_number = self.diags[row + col] + 2 * col_dist\n #edges[edge_number] = 1\n \"\"\"\n\n return edge_number", "def check_edges(self):\n start = int(input('Enter start vertex: '))\n end = int(input('Enter end vertex: '))\n if self._graph.is_edge_between(start, end):\n print('There is an edge from ' + str(start) + ' to ' + str(end))\n else:\n print('There is NO edge from ' + str(start) + ' to ' + str(end))", "def Test_NumNodes(Graph_MD):\n N_Knoten = Graph_MD.number_of_nodes()\n \n return N_Knoten", "def test_nodes_exist(graph_no_edges):\n for node in graph_no_edges:\n assert graph_no_edges.has_node(node)", "def validate(self, raise_on_error: bool = True) -> bool:\n cls_name = self.__class__.__name__\n status = True\n\n for edge_type, store in self._edge_store_dict.items():\n src, _, dst = edge_type\n\n num_src_nodes = self[src].num_nodes\n num_dst_nodes = self[dst].num_nodes\n if num_src_nodes is None:\n status = False\n warn_or_raise(\n f\"'num_nodes' is undefined in node type '{src}' of \"\n f\"'{cls_name}'\", raise_on_error)\n\n if num_dst_nodes is None:\n status = False\n warn_or_raise(\n f\"'num_nodes' is undefined in node type '{dst}' of \"\n f\"'{cls_name}'\", raise_on_error)\n\n if 'edge_index' in store:\n if (store.edge_index.dim() != 2\n or store.edge_index.size(0) != 2):\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} needs to be \"\n f\"of shape [2, num_edges] in '{cls_name}' (found \"\n f\"{store.edge_index.size()})\", raise_on_error)\n\n if 'edge_index' in store and store.edge_index.numel() > 0:\n if store.edge_index.min() < 0:\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} contains \"\n f\"negative indices in '{cls_name}' \"\n f\"(found {int(store.edge_index.min())})\",\n raise_on_error)\n\n if (num_src_nodes is not None\n and store.edge_index[0].max() >= num_src_nodes):\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} contains \"\n f\"larger source indices than the number of nodes \"\n f\"({num_src_nodes}) of this node type in '{cls_name}' \"\n f\"(found {int(store.edge_index[0].max())})\",\n raise_on_error)\n\n if (num_dst_nodes is not None\n and store.edge_index[1].max() >= num_dst_nodes):\n status = False\n warn_or_raise(\n f\"'edge_index' of edge type {edge_type} contains \"\n f\"larger destination indices than the number of nodes \"\n f\"({num_dst_nodes}) of this node type in '{cls_name}' \"\n f\"(found {int(store.edge_index[1].max())})\",\n raise_on_error)\n\n return status", "def num_edges(self, etype: Optional[str] = None) -> int:\n if etype:\n if etype not in self.canonical_etypes:\n etype = self.get_corresponding_canonical_etype(etype)\n return self.num_edges_dict[etype]\n else:\n return self.total_number_of_edges", "def check_dim(gr, DIM):\n l = len(gr)\n if(l != DIM):\n return False\n\n for i in range(0, DIM):\n if(len(gr[i]) != l):\n return False \n return True", "def ngraphs(self):\n return len(self.__graph_list)", "def num_edge_features(self) -> int:\n data, _, _ = self[0]\n if hasattr(data, 'num_edge_features'):\n return data.num_edge_features\n raise AttributeError(f\"'{data.__class__.__name__}' object has no \"\n f\"attribute 'num_edge_features'\")", "def vertex_count(self):\n return len(self._outgoing)", "def set_edges(self, edges):\n if (not isinstance(edges, None.__class__) and (edges.size != 0)):\n if ((np.shape(edges)[0] != self.E) or (np.shape(edges)[1] != 2)):\n raise ValueError('Incompatible size of the edge matrix')\n if edges.max() + 1 > self.V:\n raise ValueError('Incorrect edge specification')\n self.edges = edges\n else:\n self.edges = []", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def IsEdge(self, p_int, p_int_1):\n ...", "def check_edges(self):\n\t\tscreen_rect = self.screen.get_rect()\n\t\tif self.rect.right >= screen_rect.right:\n\t\t\treturn True\n\t\telif self.rect.left <= 0:\n\t\t\treturn True", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right or self.rect.left <= 0:\n return True", "def assert_count_equal(self, result, count):\n self.assertGreater(count, 0)\n self.assertEqual(len(result['edges']), count)", "def check_edge_list(src_nodes, dst_nodes, edge_weights):\n\n if len(src_nodes) != len(dst_nodes):\n raise EdgeListError(\"src_nodes and dst_nodes must be of same length.\")\n\n if edge_weights is None:\n return\n\n if len(edge_weights) != len(src_nodes):\n raise EdgeListError(\"src_nodes and edge_weights must be of same length.\")", "def count_nodes(self):\n\t\treturn self.__count_nodes(self)", "def has_edge(self, otherNode):\n\t\t\treturn otherNode in self.edges", "def call_edge_count(self) -> int:\n return int(self.graph_tuple_stats.call_edge_count or 0)", "def contains_several_vertices(self, currentState):\n\t\treturn True if sum(currentState) > 3 else False", "def num_of_subgraphs(self):\n \n G = self.to_undirected_graph()\n \n count = G.num_subgraph()\n \n print('The number of disconnected components in the graph is ', count)", "def num_nodes(g):\n return len(g.keys())", "def num_vertices(self):\n return len(self.vertices)", "def num_vertices(self):\n return len(self.vertices)", "def getNumVertexes(self):\n return _osgAnimation.RigTransformHardware_getNumVertexes(self)", "def compute_num_nodes(graph):\n return len(graph.keys()) # return the number of nodes in the graph", "def node_is_edge(self, node: MazeCell) -> bool:\n return node.x == 0 or node.x == self._ncols - 1 or node.y == 0 or node.y == self._nrows - 1", "def __len__(self):\n return len(self.subgraph_list)", "def size(self):\n return (len(self.nodes), sum([len(x.outgoing_relations) for x in self.nodes.values()]))", "def num_nodes(self):\n return len(self.nodes)", "def count_edges(input_first, input_second, problem):\n\tcount = 0\n\tfor idx in xrange(len(input_first)):\n\t\tfor index in xrange(len(input_second)):\n\t\t\tif (input_first[idx], input_second[index]) in problem:\n\t\t\t\tcount += 1\n\t\t\telif (input_second[index], input_first[idx]) in problem:\n\t\t\t\tcount += 1\n\treturn count", "def is_graph_34valent(self, G):\n return all( len(G[v])<=4 for v in G.vertices() )", "def num_rows(self):\n if self._is_vertex_frame():\n return self.__graph__.summary()['num_vertices']\n elif self._is_edge_frame():\n return self.__graph__.summary()['num_edges']", "def validate_graph(self) -> bool:\n return True", "def is_edge_connected_dfs(adj_dict, vertex, edges, check, check_edges):\n check[vertex] = True\n length = len(edges)\n for vert, weigth in adj_dict[vertex]:\n for i in range(length):\n a, b, c = edges[i]\n if (a == vertex and b == vert and c == weigth):\n check_edges[i] = True\n elif (a == vert and b == vertex and c == weigth):\n check_edges[i] = True\n if not check[vert]:\n is_edge_connected_dfs(adj_dict, vert, edges, check, check_edges)", "def check_edges(self):\n screen_rect = self.screen.get_rect()\n if self.rect.right >= screen_rect.right:\n return True\n elif self.rect.left <= screen_rect.left:\n return True", "def check_edges(self):\r\n screen_rect = self.screen.get_rect()\r\n if self.rect.right >= screen_rect.right:\r\n return True\r\n elif self.rect.left <= 0:\r\n return True", "def get_no_edges(self):\r\n return len(self.__cost.keys())", "def num_vertices(self):\n return self.n * (1 + int(self.variant.is_bipartite()))", "def __len__(self) -> int:\n return len(self.graphs)" ]
[ "0.7388578", "0.73457235", "0.7252701", "0.712351", "0.70799005", "0.7057893", "0.6985507", "0.6887687", "0.68730575", "0.68627745", "0.68099093", "0.6797033", "0.66984785", "0.6688148", "0.66881293", "0.66494614", "0.6648401", "0.6627459", "0.66019803", "0.6598041", "0.6585541", "0.6533134", "0.65072227", "0.64729965", "0.6412084", "0.641191", "0.6401119", "0.6394866", "0.6386497", "0.6353117", "0.62996227", "0.62692386", "0.6267706", "0.6242642", "0.6146364", "0.61307925", "0.61220187", "0.60993", "0.60928607", "0.60220987", "0.6001508", "0.5987522", "0.59850615", "0.5972873", "0.59498554", "0.5940342", "0.5936876", "0.59084135", "0.58682513", "0.5841622", "0.5828945", "0.5805139", "0.57851535", "0.57294345", "0.57291466", "0.5720827", "0.57192516", "0.5717744", "0.57132477", "0.5684725", "0.56740975", "0.5664609", "0.565673", "0.5653402", "0.561812", "0.56105924", "0.5575163", "0.55614144", "0.5560409", "0.5559323", "0.5553917", "0.555274", "0.5541134", "0.5540595", "0.55324817", "0.5522654", "0.55223376", "0.55131006", "0.5509962", "0.55051315", "0.5499792", "0.54976946", "0.548827", "0.548827", "0.54851925", "0.54801", "0.5465702", "0.5461233", "0.5449955", "0.5448708", "0.54454774", "0.54278445", "0.5425706", "0.5415891", "0.5413062", "0.5407475", "0.54046786", "0.54016423", "0.54014045", "0.5395456" ]
0.68215644
10
Checks for vertices' degrees >>> vertices_degree([[1, 0], [1, 1]], [[0, 1], [1, 0]]) (False, []) >>> vertices_degree([[1, 1], [0, 1]], [[1, 0], [1, 1]]) (True, [2, 1], [1, 2])
def vertices_degree(graph1: list, graph2: list): check1 = [] check2 = [] for row, _ in enumerate(graph1): degree1 = 0 degree2 = 0 for column, _ in enumerate(graph1[row]): if graph1[row][column] == 1: degree1 += 1 if graph2[row][column] == 1: degree2 += 1 check1.append(degree1) check2.append(degree2) if sorted(check1) == sorted(check2): return True, check1, check2 return False, []
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _has_degree(\n self,\n degree: int,\n vertex: Vertex,\n ) -> bool:\n\n return vertex.get_id() in self._vertices_of_degree[degree]", "def vertice_degree(self):\r\n if(self.is_empty()):\r\n raise ValueError(\"Graph is empty.\")\r\n else:\r\n if(self.__directed):\r\n degrees = {}\r\n l = list(self.__graph_dict.values())\r\n flatter = []\r\n for x in l:\r\n for y in x:\r\n flatter.append(y)\r\n\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n if(k in flatter):\r\n degrees[k] += flatter.count(k)\r\n return degrees\r\n\r\n else:\r\n degrees = {}\r\n for k in self.__graph_dict.keys():\r\n degrees[k] = len(self.__graph_dict[k])\r\n return degrees", "def in_degree(self, vertices=None, labels=False):\n if vertices in self:\n return self._backend.in_degree(vertices)\n elif labels:\n return {v:d for v, d in self.in_degree_iterator(vertices, labels=labels)}\n else:\n return list(self.in_degree_iterator(vertices, labels=labels))", "def return_indeg(self, vertex: np.int_):\n return sum(map(lambda x: x>0,self.__mat[:,vertex]))", "def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()", "def degree(adj_mat, vertex):\n return np.sum(adj_mat[vertex][:])", "def hasvertices(self):\n if len(self.vertices) > 0:\n return True\n else:\n return False", "def is_clockwise(vertices):\n v = vertices\n area = ((v[1][0] - v[0][0]) * (v[1][1] + v[0][1]) +\n (v[2][0] - v[1][0]) * (v[2][1] + v[1][1]) +\n (v[0][0] - v[2][0]) * (v[0][1] + v[2][1])) / 2\n return (area > 0)", "def vert_degree(input_vertices):\n\tvertex_map = {}\n\tfor element in input_vertices:\n\t\tvertex_map[element] = 0\n\t\tfor x in prob:\n\t\t\tfor vertex in x:\n\t\t\t\tif element == vertex:\n\t\t\t\t\tvertex_map[element] += 1\n\treturn vertex_map", "def degree(self, v):\n self._validateVertex(v)\n return self._adj[v].size()", "def compute_in_degrees(digraph):\n # initialize in-degrees dictionary with zero values for all vertices\n in_degree = {}\n for vertex in digraph:\n in_degree[vertex] = 0\n # consider each vertex\n for vertex in digraph:\n # amend in_degree[w] for each outgoing edge from v to w\n for neighbour in digraph[vertex]:\n in_degree[neighbour] += 1\n return in_degree", "def in_degree_iterator(self, vertices=None, labels=False):\n if vertices is None:\n vertices = self.vertex_iterator()\n if labels:\n for v in vertices:\n yield (v, self.in_degree(v))\n else:\n for v in vertices:\n yield self.in_degree(v)", "def test_graph_no_vertices(self):\n # initialize empty vertex graph\n vertices = []\n vertex_frame = self.context.frame.create(vertices, self.vertex_schema)\n graph = self.context.graph.create(vertex_frame, self.doc_edge_frame)\n\n # call sparktk to calculate deg cen result\n res = graph.degree_centrality()\n\n # ensure that all deg cen result values are 0 since there\n # are no valid vertices\n pandas_res = res.to_pandas()\n for (index, row) in pandas_res.iterrows():\n self.asertAlmostEqual(row[\"degree_centrality\"], 0)", "def _calculate_degree_centrality(self, vertices, edges):\n # here we are calculating our own deg cen res on the fly\n # edge counts will store the number of edges associated with\n # each vertex\n edge_counts = {}\n\n # get the edge frame in pandas form and iterate\n edge_pandas = edges.to_pandas()\n for (index, row) in edge_pandas.iterrows():\n # extract src and dest node index\n src = int(row[\"src\"])\n dest = int(row[\"dst\"])\n # now we increment the count for that node\n # in edge_counts, or initialize it to one\n # if it doesn't exist\n if src not in edge_counts.keys():\n edge_counts[src] = 1\n else:\n edge_counts[src] = edge_counts[src] + 1\n if dest not in edge_counts.values():\n edge_counts[dest] = 1\n else:\n edge_counts[dest] = edge_counts[dest] + 1\n return edge_counts", "def return_outdeg(self, vertex: np.int_):\n return len(self.__adj[vertex])", "def print_degree(self):\n vertex = int(input('enter vertex: '))\n in_degree = self._graph.get_in_degree(vertex)\n out_degree = self._graph.get_out_degree(vertex)\n print('The in degree of ' + str(vertex) + ' is ' + str(in_degree))\n print('The out degree of ' + str(vertex) + ' is ' + str(out_degree))", "def is_vertex(self):\n return True", "def is_vertex(self):\n return True", "def get_degrees_dictionary(edges):\n dd = {} # degrees dictionary for vertexes\n\n def append_vertex(vertex, edge_index):\n if vertex not in dd.keys():\n dd[vertex] = [1, edge_index]\n else:\n dd[vertex][0] += 1\n dd[vertex].append(edge_index)\n\n e = edges\n for i in range(len(e)):\n append_vertex(e[i][0], i)\n append_vertex(e[i][1], i)\n\n return dd", "def is_vertex(self): \n return False", "def deg_mat(adj_mat, size, vertices):\n deg_mat = np.zeros((size,size))\n for i in vertices:\n deg_mat[i][i] = degree(adj_mat, i)\n return deg_mat", "def get_degree(self, vertex):\r\n if not self.is_vertex_in_graph(vertex):\r\n raise GraphException(f\"The vertex {vertex} does not exist in the graph.\")\r\n return len(self.__neighbours[vertex])", "def is_connected(self, vertices_encountered = None, start_vertex=None):\n\n if vertices_encountered is None:\n vertices_encountered = set()\n gdict = self.__graph_dict\n vertices = list(gdict.keys()) # list is necessary in python 3\n # if empty list return\n if len(vertices) == 0 :\n return False\n if not start_vertex:\n # Choose a vertex vertex from graph as starting point\n start_vertex = vertices[0]\n vertices_encountered.add(start_vertex)\n if len(vertices_encountered) != len(vertices):\n for vertex in gdict[start_vertex]:\n if vertex not in vertices_encountered:\n if self.is_connected(vertices_encountered,vertex):\n return True\n else:\n return True\n return False", "def is_vertex(self):\n return False", "def pertenece(self,v):\n return v in self.vertices.keys()", "def demukron_network_order_function(vertices: List[Vertice], adj_matrix: np.ndarray) -> np.ndarray:\n current_level = 0\n vertice_indices_set = set(range(len(vertices)))\n m = adj_matrix.sum(axis=0) # array of in-degrees\n degrees_array = np.zeros(len(vertices))\n\n while vertice_indices_set:\n zero_on_the_current_step = {i for i in vertice_indices_set if m[i] == 0}\n for i in zero_on_the_current_step:\n degrees_array[i] = current_level\n m = m - adj_matrix[i]\n vertice_indices_set = vertice_indices_set - zero_on_the_current_step\n current_level += 1\n return degrees_array", "def getDegrees(self):\n l = []\n for node in self.getNodes():\n l.append((node, len(self.graph[node])))\n\n return l", "def degrees(self):\n A = self.adjacency()\n A.data = np.ones(A.nnz)\n right = np.array(A.sum(1)).ravel()\n left = np.array(A.sum(0)).ravel()\n return right, left", "def _compute_node_degrees(self):\n mes = []\n args = []\n for metaedge, matrix in self.adj_matrices.items():\n mes.append(metaedge)\n args.append(matrix)\n res = parallel_process(array=args, function=mt.calculate_degrees, n_jobs=self.n_jobs, front_num=0)\n for metaedge, (out_degree, in_degree) in zip(mes, res):\n self.out_degree[metaedge] = out_degree\n self.in_degree[metaedge] = in_degree", "def degree(self, node):\r\n if not 0 <= node < self.size:\r\n raise ValueError(\"Cannot find degree for a node not in the graph\")\r\n return len(self.edges[node])", "def compute_degrees(self, graph):\n\n g_vertices = graph.vertices\n g_edges = graph.edges\n\n # Get unweighted degrees\n indeg = graph.inDegrees\n outdeg = graph.outDegrees\n\n # Get weighted degrees\n w_indeg = (g_edges.groupby(\"dst\").agg(sum(\"weight\").alias(\"w_inDegree\"))).selectExpr(\"dst as id\",\n \"w_inDegree as w_inDegree\")\n w_outdeg = (g_edges.groupby(\"src\").agg(sum(\"weight\").alias(\"w_outDegree\"))).selectExpr(\"src as id\",\n \"w_outDegree as w_outDegree\")\n # Update vertices attribute\n new_v = g_vertices.join(indeg, \"id\", \"left_outer\")\n new_v = new_v.join(outdeg, \"id\", \"left_outer\")\n new_v = new_v.join(w_indeg, \"id\", \"left_outer\")\n new_v = new_v.join(w_outdeg, \"id\", \"left_outer\")\n new_v = new_v.na.fill(0)\n\n # Update graph\n self.graph = GraphFrame(new_v, g_edges)", "def triangle_quadrature_standard_points(degree):\n\tdegree = int(degree)\n\tassert (degree >= 0)\n\t\n\tif ((degree == 0) or (degree == 1)):\n\t\tpoint_matrix = [[1.0 / 3.0, 1.0 / 3.0]]\n\t\tweight_vector = [1.0]\n\n\telif (degree == 2):\n\t\tpoint_matrix = [\n\t\t\t[1.0 / 6.0, 1.0 / 6.0], [1.0 / 6.0, 2.0 / 3.0], [2.0 / 3.0, 1.0 / 6.0]\n\t\t]\n\t\tweight_vector = [1.0 / 3.0, 1.0 / 3.0, 1.0 / 3.0]\n\t\n\telif (degree == 3):\n\t\tpoint_matrix = [\n\t\t\t[1.0 / 3.0, 1.0 / 3.0], \n\t\t\t[0.2, 0.2], [0.2, 0.6], [0.6, 0.2]\n\t\t]\n\t\tweight_vector = [\n\t\t\t-27.0 / 48.0, \n\t\t\t25.0 / 48.0, 25.0 / 48.0, 25.0 / 48.0\n\t\t]\n\t\n\t# Analytical solutions are too complicated after this point. Numerical methods are used to give 14 siginificant digits.\n\telif (degree == 4):\n\t\tpoint_matrix = [\n\t\t\t[0.445948490915965, 0.445948490915965], [0.445948490915965, 0.108103018168070], [0.108103018168070, 0.445948490915965], \n\t\t\t[0.091576213509771, 0.091576213509771], [0.091576213509771, 0.816847572980459], [0.816847572980459, 0.091576213509771]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.223381589678011, 0.223381589678011, 0.223381589678011, \n\t\t\t0.109951743655322, 0.109951743655322, 0.109951743655322\n\t\t]\n\t\n\telif (degree == 5):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.470142064105115, 0.470142064105115], [0.470142064105115, 0.059715871789770], [0.059715871789770, 0.470142064105115], \n\t\t\t[0.101286507323456, 0.101286507323456], [0.101286507323456, 0.797426985353087], [0.797426985353087, 0.101286507323456]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.225000000000000, \n\t\t\t0.132394152788506, 0.132394152788506, 0.132394152788506, \n\t\t\t0.125939180544827, 0.125939180544827, 0.125939180544827\n\t\t]\n\t\n\telif (degree == 6):\n\t\tpoint_matrix = [\n\t\t\t[0.249286745170910, 0.249286745170910], [0.249286745170910, 0.501426509658179], [0.501426509658179, 0.249286745170910], \n\t\t\t[0.063089014491502, 0.063089014491502], [0.063089014491502, 0.873821971016996], [0.873821971016996, 0.063089014491502], \n\t\t\t[0.310352451033784, 0.636502499121399], [0.636502499121399, 0.053145049844817], [0.053145049844817, 0.310352451033784], [0.636502499121399, 0.310352451033784], [0.310352451033784, 0.053145049844817], [0.053145049844817, 0.636502499121399]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.116786275726379, 0.116786275726379, 0.116786275726379, \n\t\t\t0.050844906370207, 0.050844906370207, 0.050844906370207, \n\t\t\t0.082851075618374, 0.082851075618374, 0.082851075618374, 0.082851075618374, 0.082851075618374, 0.082851075618374\n\t\t]\n\t\n\telif (degree == 7):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.260345966079040, 0.260345966079040], [0.260345966079040, 0.479308067841920], [0.479308067841920, 0.260345966079040], \n\t\t\t[0.065130102902216, 0.065130102902216], [0.065130102902216, 0.869739794195568], [0.869739794195568, 0.065130102902216], \n\t\t\t[0.312865496004874, 0.638444188569810], [0.638444188569810, 0.048690315425316], [0.048690315425316, 0.312865496004874], [0.638444188569810, 0.312865496004874], [0.312865496004874, 0.048690315425316], [0.048690315425316, 0.638444188569810]\n\t\t]\n\t\tweight_vector = [\n\t\t\t-0.149570044467682, \n\t\t\t0.175615257433208, 0.175615257433208, 0.175615257433208, \n\t\t\t0.053347235608838, 0.053347235608838, 0.053347235608838, \n\t\t\t0.077113760890257, 0.077113760890257, 0.077113760890257, 0.077113760890257, 0.077113760890257, 0.077113760890257\n\t\t]\n\t\n\telif (degree == 8):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.459292588292723, 0.459292588292723], [0.459292588292723, 0.081414823414554], [0.081414823414554, 0.459292588292723], \n\t\t\t[0.170569307751760, 0.170569307751760], [0.170569307751760, 0.658861384496480], [0.658861384496480, 0.170569307751760], \n\t\t\t[0.050547228317031, 0.050547228317031], [0.050547228317031, 0.898905543365938], [0.898905543365938, 0.050547228317031], \n\t\t\t[0.263112829634638, 0.728492392955404], [0.728492392955404, 0.008394777409958], [0.008394777409958, 0.263112829634638], [0.728492392955404, 0.263112829634638], [0.263112829634638, 0.008394777409958], [0.008394777409958, 0.728492392955404]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.144315607677787, \n\t\t\t0.095091634267285, 0.095091634267285, 0.095091634267285, \n\t\t\t0.103217370534718, 0.103217370534718, 0.103217370534718, \n\t\t\t0.032458497623198, 0.032458497623198, 0.032458497623198, \n\t\t\t0.027230314174435, 0.027230314174435, 0.027230314174435, 0.027230314174435, 0.027230314174435, 0.027230314174435\n\t\t]\n\t\n\telif (degree == 9):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.489682519198738, 0.489682519198738], [0.489682519198738, 0.020634961602525], [0.020634961602525, 0.489682519198738], \n\t\t\t[0.437089591492937, 0.437089591492937], [0.437089591492937, 0.125820817014127], [0.125820817014127, 0.437089591492937], \n\t\t\t[0.188203535619033, 0.188203535619033], [0.188203535619033, 0.623592928761935], [0.623592928761935, 0.188203535619033], \n\t\t\t[0.044729513394453, 0.044729513394453], [0.044729513394453, 0.910540973211095], [0.910540973211095, 0.044729513394453], \n\t\t\t[0.221962989160766, 0.741198598784498], [0.741198598784498, 0.036838412054736], [0.036838412054736, 0.221962989160766], [0.741198598784498, 0.221962989160766], [0.221962989160766, 0.036838412054736], [0.036838412054736, 0.741198598784498]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.09713579628280, \n\t\t\t0.031334700227139, 0.031334700227139, 0.031334700227139, \n\t\t\t0.077827541004774, 0.077827541004774, 0.077827541004774, \n\t\t\t0.079647738927210, 0.079647738927210, 0.079647738927210, \n\t\t\t0.025577675658698, 0.025577675658698, 0.025577675658698, \n\t\t\t0.043283539377289, 0.043283539377289, 0.043283539377289, 0.043283539377289, 0.043283539377289, 0.043283539377289\n\t\t]\n\t\n\telif (degree == 10):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.485577633383657, 0.485577633383657], [0.485577633383657, 0.028844733232685], [0.028844733232685, 0.485577633383657], \n\t\t\t[0.109481575485037, 0.109481575485037], [0.109481575485037, 0.781036849029926], [0.781036849029926, 0.109481575485037], \n\t\t\t[0.307939838764121, 0.550352941820999], [0.550352941820999, 0.141707219414880], [0.141707219414880, 0.307939838764121], [0.550352941820999, 0.307939838764121], [0.307939838764121, 0.141707219414880], [0.141707219414880, 0.550352941820999], \n\t\t\t[0.246672560639903, 0.728323904597411], [0.728323904597411, 0.025003534762686], [0.025003534762686, 0.246672560639903], [0.728323904597411, 0.246672560639903], [0.246672560639903, 0.025003534762686], [0.025003534762686, 0.728323904597411], \n\t\t\t[0.066803251012200, 0.923655933587500], [0.923655933587500, 0.009540815400299], [0.009540815400299, 0.066803251012200], [0.923655933587500, 0.066803251012200], [0.066803251012200, 0.009540815400299], [0.009540815400299, 0.923655933587500]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.090817990382754, \n\t\t\t0.036725957756467, 0.036725957756467, 0.036725957756467, \n\t\t\t0.045321059435528, 0.045321059435528, 0.045321059435528, \n\t\t\t0.072757916845420, 0.072757916845420, 0.072757916845420, 0.072757916845420, 0.072757916845420, 0.072757916845420, \n\t\t\t0.028327242531057, 0.028327242531057, 0.028327242531057, 0.028327242531057, 0.028327242531057, 0.028327242531057, \n\t\t\t0.009421666963733, 0.009421666963733, 0.009421666963733, 0.009421666963733, 0.009421666963733, 0.009421666963733\n\t\t]\n\t\n\telif (degree == 11):\n\t\tpoint_matrix = [\n\t\t\t[0.534611048270758, 0.534611048270758], [0.534611048270758, -0.069222096541517], [-0.069222096541517, 0.534611048270758], \n\t\t\t[0.398969302965855, 0.398969302965855], [0.398969302965855, 0.202061394068290], [0.202061394068290, 0.398969302965855], \n\t\t\t[0.203309900431282, 0.203309900431282], [0.203309900431282, 0.593380199137435], [0.593380199137435, 0.203309900431282], \n\t\t\t[0.119350912282581, 0.119350912282581], [0.119350912282581, 0.761298175434837], [0.761298175434837, 0.119350912282581], \n\t\t\t[0.032364948111276, 0.032364948111276], [0.032364948111276, 0.935270103777448], [0.935270103777448, 0.032364948111276], \n\t\t\t[0.356620648261293, 0.593201213428213], [0.593201213428213, 0.050178138310495], [0.050178138310495, 0.356620648261293], [0.593201213428213, 0.356620648261293], [0.356620648261293, 0.050178138310495], [0.050178138310495, 0.593201213428213], \n\t\t\t[0.171488980304042, 0.807489003159792], [0.807489003159792, 0.021022016536166], [0.021022016536166, 0.171488980304042], [0.807489003159792, 0.171488980304042], [0.171488980304042, 0.021022016536166], [0.021022016536166, 0.807489003159792]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.000927006328961, 0.000927006328961, 0.000927006328961, \n\t\t\t0.077149534914813, 0.077149534914813, 0.077149534914813, \n\t\t\t0.059322977380774, 0.059322977380774, 0.059322977380774, \n\t\t\t0.036184540503418, 0.036184540503418, 0.036184540503418, \n\t\t\t0.013659731002678, 0.013659731002678, 0.013659731002678, \n\t\t\t0.052337111962204, 0.052337111962204, 0.052337111962204, 0.052337111962204, 0.052337111962204, 0.052337111962204, \n\t\t\t0.020707659639141, 0.020707659639141, 0.020707659639141, 0.020707659639141, 0.020707659639141, 0.020707659639141\n\t\t]\n\t\n\telif (degree == 12):\n\t\tpoint_matrix = [\n\t\t\t[0.488217389773805, 0.488217389773805], [0.488217389773805, 0.023565220452390], [0.023565220452390, 0.488217389773805], \n\t\t\t[0.439724392294460, 0.439724392294460], [0.439724392294460, 0.120551215411079], [0.120551215411079, 0.439724392294460], \n\t\t\t[0.271210385012116, 0.271210385012116], [0.271210385012116, 0.457579229975768], [0.457579229975768, 0.271210385012116], \n\t\t\t[0.127576145541586, 0.127576145541586], [0.127576145541586, 0.744847708916828], [0.744847708916828, 0.127576145541586], \n\t\t\t[0.021317350453210, 0.021317350453210], [0.021317350453210, 0.957365299093579], [0.957365299093579, 0.021317350453210], \n\t\t\t[0.275713269685514, 0.608943235779788], [0.608943235779788, 0.115343494534698], [0.115343494534698, 0.275713269685514], [0.608943235779788, 0.275713269685514], [0.275713269685514, 0.115343494534698], [0.115343494534698, 0.608943235779788], \n\t\t\t[0.281325580989940, 0.695836086787803], [0.695836086787803, 0.022838332222257], [0.022838332222257, 0.281325580989940], [0.695836086787803, 0.281325580989940], [0.281325580989940, 0.022838332222257], [0.022838332222257, 0.695836086787803], \n\t\t\t[0.116251915907597, 0.858014033544073], [0.858014033544073, 0.025734050548330], [0.025734050548330, 0.116251915907597], [0.858014033544073, 0.116251915907597], [0.116251915907597, 0.025734050548330], [0.025734050548330, 0.858014033544073], \n\t\t]\n\t\tweight_vector = [\n\t\t\t0.025731066440455, 0.025731066440455, 0.025731066440455, \n\t\t\t0.043692544538038, 0.043692544538038, 0.043692544538038, \n\t\t\t0.062858224217885, 0.062858224217885, 0.062858224217885, \n\t\t\t0.034796112930709, 0.034796112930709, 0.034796112930709, \n\t\t\t0.006166261051559, 0.006166261051559, 0.006166261051559, \n\t\t\t0.040371557766381, 0.040371557766381, 0.040371557766381, 0.040371557766381, 0.040371557766381, 0.040371557766381, \n\t\t\t0.022356773202303, 0.022356773202303, 0.022356773202303, 0.022356773202303, 0.022356773202303, 0.022356773202303, \n\t\t\t0.017316231108659, 0.017316231108659, 0.017316231108659, 0.017316231108659, 0.017316231108659, 0.017316231108659\n\t\t]\n\t\n\telif (degree == 13):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.495048184939705, 0.495048184939705], [0.495048184939705, 0.009903630120591], [0.009903630120591, 0.495048184939705], \n\t\t\t[0.468716635109574, 0.468716635109574], [0.468716635109574, 0.062566729780852], [0.062566729780852, 0.468716635109574], \n\t\t\t[0.414521336801277, 0.414521336801277], [0.414521336801277, 0.170957326397447], [0.170957326397447, 0.414521336801277], \n\t\t\t[0.229399572042831, 0.229399572042831], [0.229399572042831, 0.541200855914337], [0.541200855914337, 0.229399572042831], \n\t\t\t[0.114424495196330, 0.114424495196330], [0.114424495196330, 0.771151009607340], [0.771151009607340, 0.114424495196330], \n\t\t\t[0.024811391363459, 0.024811391363459], [0.024811391363459, 0.950377217273082], [0.950377217273082, 0.024811391363459], \n\t\t\t[0.268794997058761, 0.636351174561660], [0.636351174561660, 0.094853828379579], [0.094853828379579, 0.268794997058761], [0.636351174561660, 0.268794997058761], [0.268794997058761, 0.094853828379579], [0.094853828379579, 0.636351174561660], \n\t\t\t[0.291730066734288, 0.690169159986905], [0.690169159986905, 0.018100773278807], [0.018100773278807, 0.291730066734288], [0.690169159986905, 0.291730066734288], [0.291730066734288, 0.018100773278807], [0.018100773278807, 0.690169159986905], \n\t\t\t[0.126357385491669, 0.851409537834241], [0.851409537834241, 0.022233076674090], [0.022233076674090, 0.126357385491669], [0.851409537834241, 0.126357385491669], [0.126357385491669, 0.022233076674090], [0.022233076674090, 0.851409537834241]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.052520923400802, \n\t\t\t0.011280145209330, 0.011280145209330, 0.011280145209330, \n\t\t\t0.031423518362454, 0.031423518362454, 0.031423518362454, \n\t\t\t0.047072502504194, 0.047072502504194, 0.047072502504194, \n\t\t\t0.047363586536355, 0.047363586536355, 0.047363586536355, \n\t\t\t0.031167529045794, 0.031167529045794, 0.031167529045794, \n\t\t\t0.007975771465074, 0.007975771465074, 0.007975771465074, \n\t\t\t0.036848402728732, 0.036848402728732, 0.036848402728732, 0.036848402728732, 0.036848402728732, 0.036848402728732, \n\t\t\t0.017401463303822, 0.017401463303822, 0.017401463303822, 0.017401463303822, 0.017401463303822, 0.017401463303822, \n\t\t\t0.015521786839045, 0.015521786839045, 0.015521786839045, 0.015521786839045, 0.015521786839045, 0.015521786839045\n\t\t]\n\t\n\telif (degree == 14):\n\t\tpoint_matrix = [\n\t\t\t[0.488963910362179, 0.488963910362179], [0.488963910362179, 0.022072179275643], [0.022072179275643, 0.488963910362179], \n\t\t\t[0.417644719340454, 0.417644719340454], [0.417644719340454, 0.164710561319092], [0.164710561319092, 0.417644719340454], \n\t\t\t[0.273477528308839, 0.273477528308839], [0.273477528308839, 0.453044943382323], [0.453044943382323, 0.273477528308839], \n\t\t\t[0.177205532412543, 0.177205532412543], [0.177205532412543, 0.645588935174913], [0.645588935174913, 0.177205532412543], \n\t\t\t[0.061799883090873, 0.061799883090873], [0.061799883090873, 0.876400233818255], [0.876400233818255, 0.061799883090873], \n\t\t\t[0.019390961248701, 0.019390961248701], [0.019390961248701, 0.961218077502598], [0.961218077502598, 0.019390961248701], \n\t\t\t[0.172266687821356, 0.770608554774996], [0.770608554774996, 0.057124757403648], [0.057124757403648, 0.172266687821356], [0.770608554774996, 0.172266687821356], [0.172266687821356, 0.057124757403648], [0.057124757403648, 0.770608554774996], \n\t\t\t[0.336861459796345, 0.570222290846683], [0.570222290846683, 0.092916249356972], [0.092916249356972, 0.336861459796345], [0.570222290846683, 0.336861459796345], [0.336861459796345, 0.092916249356972], [0.092916249356972, 0.570222290846683], \n\t\t\t[0.298372882136258, 0.686980167808088], [0.686980167808088, 0.014646950055654], [0.014646950055654, 0.298372882136258], [0.686980167808088, 0.298372882136258], [0.298372882136258, 0.014646950055654], [0.014646950055654, 0.686980167808088], \n\t\t\t[0.118974497696957, 0.879757171370171], [0.879757171370171, 0.001268330932872], [0.001268330932872, 0.118974497696957], [0.879757171370171, 0.118974497696957], [0.118974497696957, 0.001268330932872], [0.001268330932872, 0.879757171370171]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.021883581369429, 0.021883581369429, 0.021883581369429, \n\t\t\t0.032788353544125, 0.032788353544125, 0.032788353544125, \n\t\t\t0.051774104507292, 0.051774104507292, 0.051774104507292, \n\t\t\t0.042162588736993, 0.042162588736993, 0.042162588736993, \n\t\t\t0.014433699669777, 0.014433699669777, 0.014433699669777, \n\t\t\t0.004923403602400, 0.004923403602400, 0.004923403602400, \n\t\t\t0.024665753212564, 0.024665753212564, 0.024665753212564, 0.024665753212564, 0.024665753212564, 0.024665753212564, \n\t\t\t0.038571510787061, 0.038571510787061, 0.038571510787061, 0.038571510787061, 0.038571510787061, 0.038571510787061, \n\t\t\t0.014436308113534, 0.014436308113534, 0.014436308113534, 0.014436308113534, 0.014436308113534, 0.014436308113534, \n\t\t\t0.005010228838501, 0.005010228838501, 0.005010228838501, 0.005010228838501, 0.005010228838501, 0.005010228838501\n\t\t]\n\t\n\telif (degree == 15):\n\t\tpoint_matrix = [\n\t\t\t[0.506972916858243, 0.506972916858243], [0.506972916858243, -0.013945833716486], [-0.013945833716486, 0.506972916858243], \n\t\t\t[0.431406354283023, 0.431406354283023], [0.431406354283023, 0.137187291433955], [0.137187291433955, 0.431406354283023], \n\t\t\t[0.277693644847144, 0.277693644847144], [0.277693644847144, 0.444612710305711], [0.444612710305711, 0.277693644847144], \n\t\t\t[0.126464891041254, 0.126464891041254], [0.126464891041254, 0.747070217917492], [0.747070217917492, 0.126464891041254], \n\t\t\t[0.070808385974686, 0.070808385974686], [0.070808385974686, 0.858383228050628], [0.858383228050628, 0.070808385974686], \n\t\t\t[0.018965170241073, 0.018965170241073], [0.018965170241073, 0.962069659517853], [0.962069659517853, 0.018965170241073], \n\t\t\t[0.261311371140087, 0.604954466893291], [0.604954466893291, 0.133734161966621], [0.133734161966621, 0.261311371140087], [0.604954466893291, 0.261311371140087], [0.261311371140087, 0.133734161966621], [0.133734161966621, 0.604954466893291], \n\t\t\t[0.388046767090269, 0.575586555512814], [0.575586555512814, 0.036366677396917], [0.036366677396917, 0.388046767090269], [0.575586555512814, 0.388046767090269], [0.388046767090269, 0.036366677396917], [0.036366677396917, 0.575586555512814], \n\t\t\t[0.285712220049916, 0.724462663076655], [0.724462663076655, -0.010174883126571], [-0.010174883126571, 0.285712220049916], [0.724462663076655, 0.285712220049916], [0.285712220049916, -0.010174883126571], [-0.010174883126571, 0.724462663076655], \n\t\t\t[0.215599664072284, 0.747556466051838], [0.747556466051838, 0.036843869875878], [0.036843869875878, 0.215599664072284], [0.747556466051838, 0.215599664072284], [0.215599664072284, 0.036843869875878], [0.036843869875878, 0.747556466051838], \n\t\t\t[0.103575616576386, 0.883964574092416], [0.883964574092416, 0.012459809331199], [0.012459809331199, 0.103575616576386], [0.883964574092416, 0.103575616576386], [0.103575616576386, 0.012459809331199], [0.012459809331199, 0.883964574092416]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.001916875642849, 0.001916875642849, 0.001916875642849, \n\t\t\t0.044249027271145, 0.044249027271145, 0.044249027271145, \n\t\t\t0.051186548718852, 0.051186548718852, 0.051186548718852, \n\t\t\t0.023687735870688, 0.023687735870688, 0.023687735870688, \n\t\t\t0.013289775690021, 0.013289775690021, 0.013289775690021, \n\t\t\t0.004748916608192, 0.004748916608192, 0.004748916608192, \n\t\t\t0.038550072599593, 0.038550072599593, 0.038550072599593, 0.038550072599593, 0.038550072599593, 0.038550072599593, \n\t\t\t0.027215814320624, 0.027215814320624, 0.027215814320624, 0.027215814320624, 0.027215814320624, 0.027215814320624, \n\t\t\t0.002182077366797, 0.002182077366797, 0.002182077366797, 0.002182077366797, 0.002182077366797, 0.002182077366797, \n\t\t\t0.021505319847731, 0.021505319847731, 0.021505319847731, 0.021505319847731, 0.021505319847731, 0.021505319847731, \n\t\t\t0.007673942631049, 0.007673942631049, 0.007673942631049, 0.007673942631049, 0.007673942631049, 0.007673942631049\n\t\t]\n\t\n\telif (degree == 16):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.497380541948438, 0.497380541948438], [0.497380541948438, 0.005238916103123], [0.005238916103123, 0.497380541948438], \n\t\t\t[0.413469438549352, 0.413469438549352], [0.413469438549352, 0.173061122901295], [0.173061122901295, 0.413469438549352], \n\t\t\t[0.470458599066991, 0.470458599066991], [0.470458599066991, 0.059082801866017], [0.059082801866017, 0.470458599066991], \n\t\t\t[0.240553749969521, 0.240553749969521], [0.240553749969521, 0.518892500060958], [0.518892500060958, 0.240553749969521], \n\t\t\t[0.147965794222573, 0.147965794222573], [0.147965794222573, 0.704068411554854], [0.704068411554854, 0.147965794222573], \n\t\t\t[0.075465187657474, 0.075465187657474], [0.075465187657474, 0.849069624685052], [0.849069624685052, 0.075465187657474], \n\t\t\t[0.016596402623025, 0.016596402623025], [0.016596402623025, 0.966807194753950], [0.966807194753950, 0.016596402623025], \n\t\t\t[0.296555596579887, 0.599868711174861], [0.599868711174861, 0.103575692245252], [0.103575692245252, 0.296555596579887], [0.599868711174861, 0.296555596579887], [0.296555596579887, 0.103575692245252], [0.103575692245252, 0.599868711174861], \n\t\t\t[0.337723063403079, 0.642193524941505], [0.642193524941505, 0.020083411655416], [0.020083411655416, 0.337723063403079], [0.642193524941505, 0.337723063403079], [0.337723063403079, 0.020083411655416], [0.020083411655416, 0.642193524941505], \n\t\t\t[0.204748281642812, 0.799592720971327], [0.799592720971327, -0.004341002614139], [-0.004341002614139, 0.204748281642812], [0.799592720971327, 0.204748281642812], [0.204748281642812, -0.004341002614139], [-0.004341002614139, 0.799592720971327], \n\t\t\t[0.189358492130623, 0.768699721401368], [0.768699721401368, 0.041941786468010], [0.041941786468010, 0.189358492130623], [0.768699721401368, 0.189358492130623], [0.189358492130623, 0.041941786468010], [0.041941786468010, 0.768699721401368], \n\t\t\t[0.085283615682657, 0.900399064086661], [0.900399064086661, 0.014317320230681], [0.014317320230681, 0.085283615682657], [0.900399064086661, 0.085283615682657], [0.085283615682657, 0.014317320230681], [0.014317320230681, 0.900399064086661]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.046875697427642, \n\t\t\t0.006405878578585, 0.006405878578585, 0.006405878578585, \n\t\t\t0.041710296739387, 0.041710296739387, 0.041710296739387, \n\t\t\t0.026891484250064, 0.026891484250064, 0.026891484250064, \n\t\t\t0.042132522761650, 0.042132522761650, 0.042132522761650, \n\t\t\t0.030000266842773, 0.030000266842773, 0.030000266842773, \n\t\t\t0.014200098925024, 0.014200098925024, 0.014200098925024, \n\t\t\t0.003582462351273, 0.003582462351273, 0.003582462351273, \n\t\t\t0.032773147460627, 0.032773147460627, 0.032773147460627, 0.032773147460627, 0.032773147460627, 0.032773147460627, \n\t\t\t0.015298306248441, 0.015298306248441, 0.015298306248441, 0.015298306248441, 0.015298306248441, 0.015298306248441, \n\t\t\t0.002386244192839, 0.002386244192839, 0.002386244192839, 0.002386244192839, 0.002386244192839, 0.002386244192839, \n\t\t\t0.019084792755899, 0.019084792755899, 0.019084792755899, 0.019084792755899, 0.019084792755899, 0.019084792755899, \n\t\t\t0.006850054546542, 0.006850054546542, 0.006850054546542, 0.006850054546542, 0.006850054546542, 0.006850054546542\n\t\t]\n\t\n\telif (degree == 17):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.497170540556774, 0.497170540556774], [0.497170540556774, 0.005658918886452], [0.005658918886452, 0.497170540556774], \n\t\t\t[0.482176322624625, 0.482176322624625], [0.482176322624625, 0.035647354750751], [0.035647354750751, 0.482176322624625], \n\t\t\t[0.450239969020782, 0.450239969020782], [0.450239969020782, 0.099520061958437], [0.099520061958437, 0.450239969020782], \n\t\t\t[0.400266239377397, 0.400266239377397], [0.400266239377397, 0.199467521245206], [0.199467521245206, 0.400266239377397], \n\t\t\t[0.252141267970953, 0.252141267970953], [0.252141267970953, 0.495717464058095], [0.495717464058095, 0.252141267970953], \n\t\t\t[0.162047004658461, 0.162047004658461], [0.162047004658461, 0.675905990683077], [0.675905990683077, 0.162047004658461], \n\t\t\t[0.075875882260746, 0.075875882260746], [0.075875882260746, 0.848248235478508], [0.848248235478508, 0.075875882260746], \n\t\t\t[0.015654726967822, 0.015654726967822], [0.015654726967822, 0.968690546064356], [0.968690546064356, 0.015654726967822], \n\t\t\t[0.334319867363658, 0.655493203809423], [0.655493203809423, 0.010186928826919], [0.010186928826919, 0.334319867363658], [0.655493203809423, 0.334319867363658], [0.334319867363658, 0.010186928826919], [0.010186928826919, 0.655493203809423], \n\t\t\t[0.292221537796944, 0.572337590532020], [0.572337590532020, 0.135440871671036], [0.135440871671036, 0.292221537796944], [0.572337590532020, 0.292221537796944], [0.292221537796944, 0.135440871671036], [0.135440871671036, 0.572337590532020], \n\t\t\t[0.319574885423190, 0.626001190286228], [0.626001190286228, 0.054423924290583], [0.054423924290583, 0.319574885423190], [0.626001190286228, 0.319574885423190], [0.319574885423190, 0.054423924290583], [0.054423924290583, 0.626001190286228], \n\t\t\t[0.190704224192292, 0.796427214974071], [0.796427214974071, 0.012868560833637], [0.012868560833637, 0.190704224192292], [0.796427214974071, 0.190704224192292], [0.190704224192292, 0.012868560833637], [0.012868560833637, 0.796427214974071], \n\t\t\t[0.180483211648746, 0.752351005937729], [0.752351005937729, 0.067165782413524], [0.067165782413524, 0.180483211648746], [0.752351005937729, 0.180483211648746], [0.180483211648746, 0.067165782413524], [0.067165782413524, 0.752351005937729], \n\t\t\t[0.080711313679564, 0.904625504095608], [0.904625504095608, 0.014663182224828], [0.014663182224828, 0.080711313679564], [0.904625504095608, 0.080711313679564], [0.080711313679564, 0.014663182224828], [0.014663182224828, 0.904625504095608]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.033437199290803, \n\t\t\t0.005093415440507, 0.005093415440507, 0.005093415440507, \n\t\t\t0.014670864527638, 0.014670864527638, 0.014670864527638, \n\t\t\t0.024350878353672, 0.024350878353672, 0.024350878353672, \n\t\t\t0.031107550868969, 0.031107550868969, 0.031107550868969, \n\t\t\t0.031257111218620, 0.031257111218620, 0.031257111218620, \n\t\t\t0.024815654339665, 0.024815654339665, 0.024815654339665, \n\t\t\t0.014056073070557, 0.014056073070557, 0.014056073070557, \n\t\t\t0.003194676173779, 0.003194676173779, 0.003194676173779, \n\t\t\t0.008119655318993, 0.008119655318993, 0.008119655318993, 0.008119655318993, 0.008119655318993, 0.008119655318993, \n\t\t\t0.026805742283163, 0.026805742283163, 0.026805742283163, 0.026805742283163, 0.026805742283163, 0.026805742283163, \n\t\t\t0.018459993210822, 0.018459993210822, 0.018459993210822, 0.018459993210822, 0.018459993210822, 0.018459993210822, \n\t\t\t0.008476868534328, 0.008476868534328, 0.008476868534328, 0.008476868534328, 0.008476868534328, 0.008476868534328, \n\t\t\t0.018292796770025, 0.018292796770025, 0.018292796770025, 0.018292796770025, 0.018292796770025, 0.018292796770025, \n\t\t\t0.006665632004165, 0.006665632004165, 0.006665632004165, 0.006665632004165, 0.006665632004165, 0.006665632004165\n\t\t]\n\t\n\telif (degree == 18):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.493344808630921, 0.493344808630921], [0.493344808630921, 0.013310382738157], [0.013310382738157, 0.493344808630921], \n\t\t\t[0.469210594241957, 0.469210594241957], [0.469210594241957, 0.061578811516086], [0.061578811516086, 0.469210594241957], \n\t\t\t[0.436281395887006, 0.436281395887006], [0.436281395887006, 0.127437208225989], [0.127437208225989, 0.436281395887006], \n\t\t\t[0.394846170673416, 0.394846170673416], [0.394846170673416, 0.210307658653168], [0.210307658653168, 0.394846170673416], \n\t\t\t[0.249794568803157, 0.249794568803157], [0.249794568803157, 0.500410862393686], [0.500410862393686, 0.249794568803157], \n\t\t\t[0.161432193743843, 0.161432193743843], [0.161432193743843, 0.677135612512315], [0.677135612512315, 0.161432193743843], \n\t\t\t[0.076598227485371, 0.076598227485371], [0.076598227485371, 0.846803545029257], [0.846803545029257, 0.076598227485371], \n\t\t\t[0.024252439353450, 0.024252439353450], [0.024252439353450, 0.951495121293100], [0.951495121293100, 0.024252439353450], \n\t\t\t[0.043146367216965, 0.043146367216965], [0.043146367216965, 0.913707265566071], [0.913707265566071, 0.043146367216965], \n\t\t\t[0.358911494940944, 0.632657968856636], [0.632657968856636, 0.008430536202420], [0.008430536202420, 0.358911494940944], [0.632657968856636, 0.358911494940944], [0.358911494940944, 0.008430536202420], [0.008430536202420, 0.632657968856636], \n\t\t\t[0.294402476751957, 0.574410971510855], [0.574410971510855, 0.131186551737188], [0.131186551737188, 0.294402476751957], [0.574410971510855, 0.294402476751957], [0.294402476751957, 0.131186551737188], [0.131186551737188, 0.574410971510855], \n\t\t\t[0.325017801641814, 0.624779046792512], [0.624779046792512, 0.050203151565675], [0.050203151565675, 0.325017801641814], [0.624779046792512, 0.325017801641814], [0.325017801641814, 0.050203151565675], [0.050203151565675, 0.624779046792512], \n\t\t\t[0.184737559666046, 0.748933176523037], [0.748933176523037, 0.066329263810916], [0.066329263810916, 0.184737559666046], [0.748933176523037, 0.184737559666046], [0.184737559666046, 0.066329263810916], [0.066329263810916, 0.748933176523037], \n\t\t\t[0.218796800013321, 0.769207005420443], [0.769207005420443, 0.011996194566236], [0.011996194566236, 0.218796800013321], [0.769207005420443, 0.218796800013321], [0.218796800013321, 0.011996194566236], [0.011996194566236, 0.769207005420443], \n\t\t\t[0.101179597136408, 0.883962302273467], [0.883962302273467, 0.014858100590125], [0.014858100590125, 0.101179597136408], [0.883962302273467, 0.101179597136408], [0.101179597136408, 0.014858100590125], [0.014858100590125, 0.883962302273467], \n\t\t\t[0.020874755282586, 1.014347260005363], [1.014347260005363, -0.035222015287949], [-0.035222015287949, 0.020874755282586], [1.014347260005363, 0.020874755282586], [0.020874755282586, -0.035222015287949], [-0.035222015287949, 1.014347260005363]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.030809939937647, \n\t\t\t0.009072436679404, 0.009072436679404, 0.009072436679404, \n\t\t\t0.018761316939594, 0.018761316939594, 0.018761316939594, \n\t\t\t0.019441097985477, 0.019441097985477, 0.019441097985477, \n\t\t\t0.027753948610810, 0.027753948610810, 0.027753948610810, \n\t\t\t0.032256225351457, 0.032256225351457, 0.032256225351457, \n\t\t\t0.025074032616922, 0.025074032616922, 0.025074032616922, \n\t\t\t0.015271927971832, 0.015271927971832, 0.015271927971832, \n\t\t\t0.006793922022963, 0.006793922022963, 0.006793922022963, \n\t\t\t-0.002223098729920, -0.002223098729920, -0.002223098729920, \n\t\t\t0.006331914076406, 0.006331914076406, 0.006331914076406, 0.006331914076406, 0.006331914076406, 0.006331914076406, \n\t\t\t0.027257538049138, 0.027257538049138, 0.027257538049138, 0.027257538049138, 0.027257538049138, 0.027257538049138, \n\t\t\t0.017676785649465, 0.017676785649465, 0.017676785649465, 0.017676785649465, 0.017676785649465, 0.017676785649465, \n\t\t\t0.018379484638070, 0.018379484638070, 0.018379484638070, 0.018379484638070, 0.018379484638070, 0.018379484638070, \n\t\t\t0.008104732808192, 0.008104732808192, 0.008104732808192, 0.008104732808192, 0.008104732808192, 0.008104732808192, \n\t\t\t0.007634129070725, 0.007634129070725, 0.007634129070725, 0.007634129070725, 0.007634129070725, 0.007634129070725, \n\t\t\t0.000046187660794, 0.000046187660794, 0.000046187660794, 0.000046187660794, 0.000046187660794, 0.000046187660794\n\t\t]\n\t\n\telif (degree == 19):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.489609987073006, 0.489609987073006], [0.489609987073006, 0.020780025853987], [0.020780025853987, 0.489609987073006], \n\t\t\t[0.454536892697893, 0.454536892697893], [0.454536892697893, 0.090926214604215], [0.090926214604215, 0.454536892697893], \n\t\t\t[0.401416680649431, 0.401416680649431], [0.401416680649431, 0.197166638701138], [0.197166638701138, 0.401416680649431], \n\t\t\t[0.255551654403098, 0.255551654403098], [0.255551654403098, 0.488896691193805], [0.488896691193805, 0.255551654403098], \n\t\t\t[0.177077942152130, 0.177077942152130], [0.177077942152130, 0.645844115695741], [0.645844115695741, 0.177077942152130], \n\t\t\t[0.110061053227952, 0.110061053227952], [0.110061053227952, 0.779877893544096], [0.779877893544096, 0.110061053227952], \n\t\t\t[0.055528624251840, 0.055528624251840], [0.055528624251840, 0.888942751496321], [0.888942751496321, 0.055528624251840], \n\t\t\t[0.012621863777229, 0.012621863777229], [0.012621863777229, 0.974756272445543], [0.974756272445543, 0.012621863777229], \n\t\t\t[0.395754787356943, 0.600633794794645], [0.600633794794645, 0.00361141784841], [0.00361141784841, 0.395754787356943], [0.600633794794645, 0.395754787356943], [0.395754787356943, 0.00361141784841], [0.00361141784841, 0.600633794794645], \n\t\t\t[0.307929983880436, 0.557603261588784], [0.557603261588784, 0.134466754530780], [0.134466754530780, 0.307929983880436], [0.557603261588784, 0.307929983880436], [0.307929983880436, 0.134466754530780], [0.134466754530780, 0.557603261588784], \n\t\t\t[0.264566948406520, 0.720987025817365], [0.720987025817365, 0.014446025776115], [0.014446025776115, 0.264566948406520], [0.720987025817365, 0.264566948406520], [0.264566948406520, 0.014446025776115], [0.014446025776115, 0.720987025817365], \n\t\t\t[0.358539352205951, 0.594527068955871], [0.594527068955871, 0.046933578838178], [0.046933578838178, 0.358539352205951], [0.594527068955871, 0.358539352205951], [0.358539352205951, 0.046933578838178], [0.046933578838178, 0.594527068955871], \n\t\t\t[0.157807405968595, 0.839331473680839], [0.839331473680839, 0.002861120350567], [0.002861120350567, 0.157807405968595], [0.839331473680839, 0.157807405968595], [0.157807405968595, 0.002861120350567], [0.002861120350567, 0.839331473680839], \n\t\t\t[0.075050596975911, 0.701087978926173], [0.701087978926173, 0.223861424097916], [0.223861424097916, 0.075050596975911], [0.701087978926173, 0.075050596975911], [0.075050596975911, 0.223861424097916], [0.223861424097916, 0.701087978926173], \n\t\t\t[0.142421601113383, 0.822931324069857], [0.822931324069857, 0.034647074816760], [0.034647074816760, 0.142421601113383], [0.822931324069857, 0.142421601113383], [0.142421601113383, 0.034647074816760], [0.034647074816760, 0.822931324069857], \n\t\t\t[0.065494628082938, 0.924344252620784], [0.924344252620784, 0.010161119296278], [0.010161119296278, 0.065494628082938], [0.924344252620784, 0.065494628082938], [0.065494628082938, 0.010161119296278], [0.010161119296278, 0.924344252620784]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.032906331388919, \n\t\t\t0.010330731891272, 0.010330731891272, 0.010330731891272, \n\t\t\t0.022387247263016, 0.022387247263016, 0.022387247263016, \n\t\t\t0.030266125869468, 0.030266125869468, 0.030266125869468, \n\t\t\t0.030490967802198, 0.030490967802198, 0.030490967802198, \n\t\t\t0.024159212741641, 0.024159212741641, 0.024159212741641, \n\t\t\t0.016050803586801, 0.016050803586801, 0.016050803586801, \n\t\t\t0.008084580261784, 0.008084580261784, 0.008084580261784, \n\t\t\t0.002079362027485, 0.002079362027485, 0.002079362027485, \n\t\t\t0.003884876904981, 0.003884876904981, 0.003884876904981, 0.003884876904981, 0.003884876904981, 0.003884876904981, \n\t\t\t0.025574160612022, 0.025574160612022, 0.025574160612022, 0.025574160612022, 0.025574160612022, 0.025574160612022, \n\t\t\t0.008880903573338, 0.008880903573338, 0.008880903573338, 0.008880903573338, 0.008880903573338, 0.008880903573338, \n\t\t\t0.016124546761731, 0.016124546761731, 0.016124546761731, 0.016124546761731, 0.016124546761731, 0.016124546761731, \n\t\t\t0.002491941817491, 0.002491941817491, 0.002491941817491, 0.002491941817491, 0.002491941817491, 0.002491941817491, \n\t\t\t0.018242840118951, 0.018242840118951, 0.018242840118951, 0.018242840118951, 0.018242840118951, 0.018242840118951, \n\t\t\t0.010258563736199, 0.010258563736199, 0.010258563736199, 0.010258563736199, 0.010258563736199, 0.010258563736199, \n\t\t\t0.003799928855302, 0.003799928855302, 0.003799928855302, 0.003799928855302, 0.003799928855302, 0.003799928855302\n\t\t]\n\t\n\telif (degree == 20):\n\t\tpoint_matrix = [\n\t\t\t[0.333333333333333, 0.333333333333333], \n\t\t\t[0.500950464352200, 0.500950464352200], [0.500950464352200, -0.001900928704400], [-0.001900928704400, 0.500950464352200], \n\t\t\t[0.488212957934729, 0.488212957934729], [0.488212957934729, 0.023574084130543], [0.023574084130543, 0.488212957934729], \n\t\t\t[0.455136681950283, 0.455136681950283], [0.455136681950283, 0.089726636099435], [0.089726636099435, 0.455136681950283], \n\t\t\t[0.401996259318289, 0.401996259318289], [0.401996259318289, 0.196007481363421], [0.196007481363421, 0.401996259318289], \n\t\t\t[0.255892909759421, 0.255892909759421], [0.255892909759421, 0.488214180481157], [0.488214180481157, 0.255892909759421], \n\t\t\t[0.176488255995106, 0.176488255995106], [0.176488255995106, 0.647023488009788], [0.647023488009788, 0.176488255995106], \n\t\t\t[0.104170855336758, 0.104170855336758], [0.104170855336758, 0.791658289326483], [0.791658289326483, 0.104170855336758], \n\t\t\t[0.053068963840930, 0.053068963840930], [0.053068963840930, 0.893862072318140], [0.893862072318140, 0.053068963840930], \n\t\t\t[0.041618715196029, 0.041618715196029], [0.041618715196029, 0.916762569607942], [0.916762569607942, 0.041618715196029], \n\t\t\t[0.011581921406822, 0.011581921406822], [0.011581921406822, 0.976836157186356], [0.976836157186356, 0.011581921406822], \n\t\t\t[0.344855770229001, 0.606402646106160], [0.606402646106160, 0.048741583664839], [0.048741583664839, 0.344855770229001], [0.606402646106160, 0.344855770229001], [0.344855770229001, 0.048741583664839], [0.048741583664839, 0.606402646106160], \n\t\t\t[0.377843269594854, 0.615842614456541], [0.615842614456541, 0.006314115948605], [0.006314115948605, 0.377843269594854], [0.615842614456541, 0.377843269594854], [0.377843269594854, 0.006314115948605], [0.006314115948605, 0.615842614456541], \n\t\t\t[0.306635479062357, 0.559048000390295], [0.559048000390295, 0.134316520547348], [0.134316520547348, 0.306635479062357], [0.559048000390295, 0.306635479062357], [0.306635479062357, 0.134316520547348], [0.134316520547348, 0.559048000390295], \n\t\t\t[0.249419362774742, 0.736606743262866], [0.736606743262866, 0.013973893962392], [0.013973893962392, 0.249419362774742], [0.736606743262866, 0.249419362774742], [0.249419362774742, 0.013973893962392], [0.013973893962392, 0.736606743262866], \n\t\t\t[0.212775724802802, 0.711675142287434], [0.711675142287434, 0.075549132909764], [0.075549132909764, 0.212775724802802], [0.711675142287434, 0.212775724802802], [0.212775724802802, 0.075549132909764], [0.075549132909764, 0.711675142287434], \n\t\t\t[0.146965436053239, 0.861402717154987], [0.861402717154987, -0.008368153208227], [-0.008368153208227, 0.146965436053239], [0.861402717154987, 0.146965436053239], [0.146965436053239, -0.008368153208227], [-0.008368153208227, 0.861402717154987], \n\t\t\t[0.137726978828923, 0.835586957912363], [0.835586957912363, 0.026686063258714], [0.026686063258714, 0.137726978828923], [0.835586957912363, 0.137726978828923], [0.137726978828923, 0.026686063258714], [0.026686063258714, 0.835586957912363], \n\t\t\t[0.059696109149007, 0.929756171556853], [0.929756171556853, 0.010547719294141], [0.010547719294141, 0.059696109149007], [0.929756171556853, 0.059696109149007], [0.059696109149007, 0.010547719294141], [0.010547719294141, 0.929756171556853]\n\t\t]\n\t\tweight_vector = [\n\t\t\t0.033057055541624, \n\t\t\t0.000867019185663, 0.000867019185663, 0.000867019185663, \n\t\t\t0.011660052716448, 0.011660052716448, 0.011660052716448, \n\t\t\t0.022876936356421, 0.022876936356421, 0.022876936356421, \n\t\t\t0.030448982673938, 0.030448982673938, 0.030448982673938, \n\t\t\t0.030624891725355, 0.030624891725355, 0.030624891725355, \n\t\t\t0.024368057676800, 0.024368057676800, 0.024368057676800, \n\t\t\t0.015997432032024, 0.015997432032024, 0.015997432032024, \n\t\t\t0.007698301815602, 0.007698301815602, 0.007698301815602, \n\t\t\t-0.000632060497488, -0.000632060497488, -0.000632060497488, \n\t\t\t0.001751134301193, 0.001751134301193, 0.001751134301193, \n\t\t\t0.016465839189576, 0.016465839189576, 0.016465839189576, 0.016465839189576, 0.016465839189576, 0.016465839189576, \n\t\t\t0.004839033540485, 0.004839033540485, 0.004839033540485, 0.004839033540485, 0.004839033540485, 0.004839033540485, \n\t\t\t0.025804906534650, 0.025804906534650, 0.025804906534650, 0.025804906534650, 0.025804906534650, 0.025804906534650, \n\t\t\t0.008471091054441, 0.008471091054441, 0.008471091054441, 0.008471091054441, 0.008471091054441, 0.008471091054441, \n\t\t\t0.018354914106280, 0.018354914106280, 0.018354914106280, 0.018354914106280, 0.018354914106280, 0.018354914106280, \n\t\t\t0.000704404677908, 0.000704404677908, 0.000704404677908, 0.000704404677908, 0.000704404677908, 0.000704404677908, \n\t\t\t0.010112684927462, 0.010112684927462, 0.010112684927462, 0.010112684927462, 0.010112684927462, 0.010112684927462, \n\t\t\t0.003573909385950, 0.003573909385950, 0.003573909385950, 0.003573909385950, 0.003573909385950, 0.003573909385950\n\t\t]\n\t\n\telse:\n\t\traise Exception(\"Triangle quadrature with degree > 20 is not supported for now.\")\n\t\n\tpoint_matrix = np.array(point_matrix, dtype=np.float64)\n\tweight_vector = np.array(weight_vector, dtype=np.float64)\n\t\n\treturn point_matrix, weight_vector", "def out_degree(self, vertices=None, labels=False):\n if vertices in self:\n return self._backend.out_degree(vertices)\n elif labels:\n return {v:d for v, d in self.out_degree_iterator(vertices, labels=labels)}\n else:\n return list(self.out_degree_iterator(vertices, labels=labels))", "def degree_node(g, node):\n return len(g[node])", "def get_adj_and_degrees(num_nodes, triplets):\n adj_list = [[] for _ in range(num_nodes)]\n for i, triplet in enumerate(triplets):\n adj_list[triplet[0]].append([i, triplet[2]])\n adj_list[triplet[2]].append([i, triplet[0]])\n\n degrees = np.array([len(a) for a in adj_list])\n adj_list = [np.array(a) for a in adj_list]\n return adj_list, degrees", "def compute_in_degrees(digraph):\n in_degrees = {}\n keys = digraph.keys()\n for dummy_key in keys:\n edges = digraph[dummy_key]\n for dummy_edges in edges:\n if in_degrees.has_key(dummy_edges) == False:\n in_degrees[dummy_edges] = 1\n else:\n in_degrees[dummy_edges] += 1\n \n for dummy_key in keys:\n if in_degrees.has_key(dummy_key) == False:\n in_degrees[dummy_key] = 0\n return in_degrees", "def permutations(graph1: list, graph2: list, degrees: tuple):\n degrees1 = degrees[0]\n degrees2 = degrees[1]\n check1 = []\n check2 = []\n for index, _ in enumerate(degrees1):\n degree = degrees1[index]\n temp = []\n for vertex, _ in enumerate(graph1[index]):\n if graph1[index][vertex] == 1:\n temp.append(degrees1[vertex])\n check1.append((degree, tuple(sorted(temp))))\n\n for index, _ in enumerate(degrees2):\n degree = degrees2[index]\n temp = []\n for vertex in range(len(graph2[index])):\n if graph2[index][vertex] == 1:\n temp.append(degrees2[vertex])\n check2.append((degree, tuple(sorted(temp))))\n\n return len(set(check1 + check2)) == len(set(check1))", "def degree_v(self):\n return self._degree_v", "def compute_in_degrees (digraph) :\n in_degree = dict()\n\n # initialize the in-degree of each node with 0s\n for key in digraph :\n in_degree[key] = 0\n\n for node in digraph :\n for head_node in digraph[node] :\n in_degree[head_node]+=1\n\n return in_degree", "def is_dateline(vertices):\n vertices = np.asarray(vertices, dtype=\"d\")\n longitudes = vertices[:, 0]\n return np.abs(longitudes.min(axis=0) - longitudes.max(axis=0)) > 180", "def compute_in_degrees(digraph):\n\n\tin_degrees = {}\n\n\tfor node in digraph:\n\t\tin_degrees[node] = 0\n\n\tfor node in digraph:\n\t\t# print \"node:\", node\n\t\tfor element in digraph[node]:\n\t\t\t# print \"element:\", element\n\t\t\tif element in digraph:\n\t\t\t\t# print \"element in digraph:\", element, digraph, element in digraph\n\t\t\t\tin_degrees[element] += 1\n\n\t# print \"in_degrees:\", in_degrees\n\treturn in_degrees", "def draw_triangle(vertices, shape):\n # add 0.5 to account for fact that pixel centers are at (0.5, 0.5)\n barycenters = barycentric_coords(vertices, numpy.indices(shape) + 0.5)\n return (barycenters >= 0).all(axis=0)", "def _vertices_are_equal(\n vertices1: List[np.ndarray], vertices2: List[np.ndarray]\n) -> bool:\n if len(vertices1) != len(vertices2):\n return False\n diff = vertices1 - vertices2\n if np.abs(np.max(diff)) < ways_are_equal_tolerance:\n return True\n return False", "def compute_in_degrees(digraph):\n\t# print \"digraph:\", digraph\n\n\tin_degrees = {}\n\n\tfor node in digraph:\n\t\tin_degrees[node] = 0\n\n\tfor node in digraph:\n\t\t# print \"node:\", node\n\t\tfor element in digraph[node]:\n\t\t\t# print \"element:\", element\n\t\t\tif element in digraph:\n\t\t\t\t# print \"element in digraph:\", element, digraph, element in digraph\n\t\t\t\tin_degrees[element] += 1\n\n\t# print \"in_degrees:\", in_degrees\n\treturn in_degrees", "def calc_degree(graph_rdd):\n all_degree = graph_rdd \\\n .map(swap) \\\n .union(graph_rdd) \\\n .map(lambda (x, y): (x, 1)) \\\n .reduceByKey(add, numPartitions=40)\n return all_degree", "def check_nverts(sections):\n return _check_nentries(sections, \"NVERTS\", \"VERTEX\")", "def num_vertices(graph1: list, graph2: list):\n if len(graph1[0]) != len(graph2[0]):\n return False\n return True", "def validate(self):\n invalid = []\n self.load()\n\n if self.graph.num_vertices() < 1:\n return 'Graph is invalid, no vertices'\n\n if self.graph.num_edges() < 1:\n return 'Graph is invalid, no edges'\n\n for v in self.graph.vertices():\n if(v.in_degree() + v.out_degree() == 0):\n invalid.append(v)\n\n if len(invalid) != 0:\n return invalid\n else:\n return 'Graph is valid'", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def are_vertices_clockwise(self,line):\r\n \r\n import numpy as np\r\n \r\n signed_area = 0\r\n for idx in range(line.shape[0]):\r\n \r\n x1 = line[idx,0]\r\n y1 = line[idx,1]\r\n if idx == line.shape[0]-1:\r\n x2 = line[0,0]\r\n y2 = line[0,1]\r\n else:\r\n x2 = line[idx+1,0]\r\n y2 = line[idx+1,1]\r\n \r\n signed_area += (x1 * y2 - x2 * y1)\r\n \r\n return (np.sign(signed_area) == -1.)", "def in_degree(self, node):\n \n try:\n return len(self.prefix[node])\n \n except:\n print(\"ERROR: The node does not exist.\")", "def _sector_orientation(self, vertices):\n if not vertices[0] == vertices[-1]:\n vertices.append(vertices[0])\n xy = np.transpose(np.array(vertices))\n x, y = xy[0], xy[1]\n return np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)) > 0, vertices", "def Test_Degree(Graph_MD):\n \n Degree = M_Graph.get_Degree(Graph_MD)\n KPS = float(sum(Degree)) / float(len(Degree))\n\n return KPS", "def get_num_vertices(triangles):\n return numpy.amax(numpy.reshape(triangles, -1)) + 1", "def in_degree_distribution(graph):\n in_degrees = collections.Counter()\n for node in graph.nodes(data=True):\n in_degrees[graph.in_degree(node[0])] += 1\n\n in_degrees = sorted(in_degrees.items(), key=lambda x: x[0])\n\n print(in_degrees)", "def is_edge_connected(num_vertices, adj_dict, edges):\n if not num_vertices or not adj_dict:\n return True\n check = { i:False for i in adj_dict.keys() }\n check_edges = [False for _ in range(len(edges))]\n first_vertex = list(adj_dict.keys())[0]\n is_edge_connected_dfs(adj_dict, first_vertex, edges, check, check_edges)\n return not False in check_edges", "def degree(graph, nodes=None, weight=None):\n\n if nodes is None:\n nodes = graph.nodes\n else:\n not_in_graph = [nid for nid in nodes if nid not in graph.nodes]\n if not_in_graph:\n logger.error('Nodes {0} not in graph'.format(not_in_graph))\n\n results = {}\n if weight:\n for node in nodes:\n results[node] = sum([graph.edges[(node, n)].get(weight, 1) for n in graph.adjacency[node]])\n if node in graph.adjacency[node]:\n results[node] += graph.edges[(node, node)].get(weight, 1)\n else:\n for node in nodes:\n results[node] = len(graph.adjacency[node])\n if node in graph.adjacency[node]:\n results[node] += 1\n\n return results", "def in_degree_distribution(digraph):\n # find in_degrees\n in_degree = compute_in_degrees(digraph)\n # initialize dictionary for degree distribution\n degree_distribution = {}\n # consider each vertex\n for vertex in in_degree:\n # update degree_distribution\n if in_degree[vertex] in degree_distribution:\n degree_distribution[in_degree[vertex]] += 1\n else:\n degree_distribution[in_degree[vertex]] = 1\n return degree_distribution", "def check_degrees(degrees):\n type_check(degrees, (numbers.Number, list, tuple), \"degrees\")\n if isinstance(degrees, numbers.Number):\n check_value(degrees, (0, float(\"inf\")), \"degrees\")\n elif isinstance(degrees, (list, tuple)):\n if len(degrees) == 2:\n type_check_list(degrees, (numbers.Number,), \"degrees\")\n if degrees[0] > degrees[1]:\n raise ValueError(\"degrees should be in (min,max) format. Got (max,min).\")\n else:\n raise TypeError(\"If degrees is a sequence, the length must be 2.\")", "def _degree_verts(g):\n n = len(g)\n # degs = map(len, g) is a tiny bit slower than the following line\n degs = [ len(g[v]) for v in range(n) ]\n dv = dict()\n for v in range(n):\n degnbr = [0] * n\n for w in g[v]:\n degnbr[degs[w]] += 1\n # Could use defaultdict below, but it does not seem to be faster\n dv.setdefault(tuple(degnbr), []).append(v)\n return dv", "def compute_in_degrees(digraph):\n num_degree = {}\n for dummy_node in digraph:\n num_degree[dummy_node] = 0\n for key in digraph:\n for node in digraph[key]:\n num_degree[node] += 1\n return num_degree", "def verify_polynomial(points, degree):\n used_points = points[0:degree + 1]\n\n for i in range(degree + 1, len(points) + 1):\n if lagrange_interpolation(i, used_points) != points[i - 1][1]:\n return False\n\n return True", "def degree(self,v,outgoing=True):\n adj = self._outgoing if outgoing else self._incoming\n return len(adj[v])", "def num_vertices(self):\r\n return len(self.__graph_dict.keys())", "def is_regular(G):\n\n deg = None\n\n for node in G:\n d = G.degree(node)\n if deg is None:\n deg = d\n if d != deg:\n return False\n\n return True", "def is_connected(self):\n \n # All the vertices in the graph\n vertices = set(self.vertices())\n \n # Take a random vertex to start the search from\n vertex_search_start = self._edges.keys()[0]\n vertices_found = set(self.DFS(vertex_search_start))\n \n return vertices == vertices_found", "def getDegree(self, node):\n\n return len(self.graph[node])", "def validate_clockwise_points(points):\n \n if len(points) != 8:\n raise Exception(\"Points list not valid.\" + str(len(points)))\n \n point = [\n [int(points[0]) , int(points[1])],\n [int(points[2]) , int(points[3])],\n [int(points[4]) , int(points[5])],\n [int(points[6]) , int(points[7])]\n ]\n edge = [\n ( point[1][0] - point[0][0])*( point[1][1] + point[0][1]),\n ( point[2][0] - point[1][0])*( point[2][1] + point[1][1]),\n ( point[3][0] - point[2][0])*( point[3][1] + point[2][1]),\n ( point[0][0] - point[3][0])*( point[0][1] + point[3][1])\n ]\n \n summatory = edge[0] + edge[1] + edge[2] + edge[3];\n return summatory <= 0", "def compute_in_degrees(digraph):\n print \"processing In-Degrees\" # Status indicator for long processing times\n xgraph = {} # create a blank dict\n for node in iter(digraph.viewkeys()): # creates an iter of just the keys in the dict. increase performance for larger data sets maybe? IE only shows the keys\n xgraph[node] = 0 # from the list of keys (nodes) creates a new keys for a new dict\n for edges in iter(digraph.viewvalues()): # creates an iter of just the values in the dict. increase performance for larger data sets maybe? IE only shows the values\n if node in edges: # looks for the nodes in the edges (from dict values)\n xgraph[node] += 1 # if node found increase by 1\n #print digraph.itervalues()\n\n return xgraph # returns a new dict with nodes as keys and the value is how many in degrees", "def compute_in_degrees(digraph):\n print \"processing In-Degrees\" # Status indicator for long processing times\n xgraph = {} # create a blank dict\n for node in iter(digraph.viewkeys()): # creates an iter of just the keys in the dict. increase performance for larger data sets maybe? IE only shows the keys\n xgraph[node] = 0 # from the list of keys (nodes) creates a new keys for a new dict\n for edges in iter(digraph.viewvalues()): # creates an iter of just the values in the dict. increase performance for larger data sets maybe? IE only shows the values\n if node in edges: # looks for the nodes in the edges (from dict values)\n xgraph[node] += 1 # if node found increase by 1\n #print digraph.itervalues()\n\n return xgraph # returns a new dict with nodes as keys and the value is how many in degrees", "def IsEulerGraph(self):\n\n for node in self.nodes:\n if ((len(node.neighbours) % 2) == 1) or (len(node.neighbours) == 0):\n return False\n return True", "def has_vertex(t, tri, vertex):\n for i in range(3):\n if t[tri][i] == vertex:\n return True\n return False", "def test_degree(poly_equation):\n equation = poly_equation\n A = 1e10\n degree = np.log(equation.flux(A)/equation.flux(1))/np.log(A)\n npt.assert_allclose(equation.degree(), degree)", "def _get_odd_degree_vertices(graph):\n odd_degree_vertices = set()\n for index, row in enumerate(graph):\n if len(np.nonzero(row)[0]) % 2 != 0:\n odd_degree_vertices.add(index)\n return odd_degree_vertices", "def _get_odd_degree_vertices(graph):\n odd_degree_vertices = set()\n for index, row in enumerate(graph):\n if len(np.nonzero(row)[0]) % 2 != 0:\n odd_degree_vertices.add(index)\n return odd_degree_vertices", "def num_vertices(self):\n return len(self.vertices)", "def num_vertices(self):\n return len(self.vertices)", "def contains_vertex(self, vertex_name: n):\n return vertex_name in self._graph.keys()", "def test_are_vertices_adjacent_yes_ascending(self):\n\n self.assertTrue(skeleton_lines._are_vertices_adjacent(\n vertex_indices=VERTEX_INDICES_ADJACENT,\n num_vertices_in_polygon=NUM_VERTICES_FOR_ADJACENCY_TEST))", "def is_eulerian(A):\r\n assert A is not None\r\n if is_multiobjects(A):\r\n return all(is_eulerian(adj) for adj in A)\r\n\r\n if not is_connected(A):\r\n return False\r\n\r\n deg = degree(A)\r\n if isinstance(deg, tuple):\r\n # Directed graph\r\n # Every node must have equal in degree and out degree and the\r\n # graph must be strongly connected\r\n ind, outd = deg\r\n return np.all(ind == outd)\r\n # An undirected Eulerian graph has no vertices of odd degree and\r\n # must be connected.\r\n return np.all(deg % 2 == 0)", "def vertex_multidegree(breakpoint_graph, vertex):\n return len(list(breakpoint_graph.get_edges_by_vertex(vertex)))", "def vertices(self, *args, **kwargs) -> Any:\n pass", "def _degree_has_changed(first, second):\n return len(set(first) ^ set(second)) != 0", "def obtener_cantidad_vertices(self):\n return len(self.vertices.keys())", "def RequiredDecompositions(degree):\n if degree == 0:\n return 1\n elif degree == 2:\n return 2\n else:\n raise NotImplementedError, \"Direct subtractions not available for graphs with degree=%s\" % degree", "def non_rotated_vertices(self):\n v0 = [self.pos.x - self.width / 2, self.pos.y - self.height / 2]\n v1 = [self.pos.x + self.width / 2, self.pos.y - self.height / 2]\n v2 = [self.pos.x + self.width / 2, self.pos.y + self.height / 2]\n v3 = [self.pos.x - self.width / 2, self.pos.y + self.height / 2]\n return v0, v1, v2, v3", "def add_vertex_edge(self, vertices):\n if len(vertices) < 2:\n raise Exception('Cannot have a single vertex')\n self.add_vertex(vertices[0])\n length_array = len(vertices)\n for iterator in range(1, length_array):\n num = vertices[iterator]\n is_number = False\n try:\n int(num)\n is_number = True\n except ValueError:\n pass\n if is_number:\n self.add_edge(vertices[0], num)", "def degree_analytically(self, A):\n if len(A) == 1:\n return self.L.size() - 1 - self.is_loop_vertex(A[0])\n if len(A) == 2:\n if A[0] == A[1]:\n raise ValueError(\"The vertices in A should be distinct, but {} appears twice\".format(A[0]))\n if not self.is_generic(A):\n return 0\n vertex_count = (self.L.size() - 1) // (self.K.size() - 1) - int(self.pair_type(A[0], A[1]) == self.K.one())\n if self.is_edge(A[0], A[1]):\n for v in A:\n vertex_count -= int(self.is_loop_vertex(v))\n return vertex_count\n if len(A) > 2:\n raise ValueError(\"The degree of {}-vertex sets is unknown (yet)\".format(len(A)))\n # TODO: It is not.", "def has_isolated_vertices(self):\n return self.properties.isolated_vertices", "def get_vertices(self):\n return self.vertList.keys()", "def InferPolynomialDegree(self):\n\n assert self.element_type is not None\n assert self.elements is not None\n\n if self.degree is not None:\n if isinstance(self.degree,np.ndarray):\n self.degree = np.asscalar(self.degree)\n i = self.degree\n if self.element_type == \"tet\" and (i+1)*(i+2)*(i+3)/6==self.elements.shape[1]:\n return self.degree\n if self.element_type == \"tri\" and (i+1)*(i+2)/2==self.elements.shape[1]:\n return self.degree\n\n\n p = 0\n if self.element_type == \"tet\":\n for i in range(100):\n if (i+1)*(i+2)*(i+3)/6==self.elements.shape[1]:\n p = i\n break\n\n elif self.element_type == \"tri\":\n for i in range(100):\n if (i+1)*(i+2)/2==self.elements.shape[1]:\n p = i\n break\n\n elif self.element_type == \"hex\":\n for i in range(100):\n if int((i+1)**3)==self.elements.shape[1]:\n p = i\n break\n\n elif self.element_type == \"quad\":\n for i in range(100):\n if int((i+1)**2)==self.elements.shape[1]:\n p = i\n break\n\n elif self.element_type == \"line\":\n for i in range(100):\n if int(i+1)==self.elements.shape[1]:\n p = i\n break\n\n elif self.element_type == \"pent\":\n if 5==self.elements.shape[1]:\n p = 1\n else:\n raise NotImplementedError(\"High order pentagonal elements are not supported yet\")\n\n elif self.element_type == \"point\":\n p = 1\n\n self.degree = p\n return p", "def check(degree, knot_vector, num_ctrlpts):\n try:\n if knot_vector is None or len(knot_vector) == 0:\n raise ValueError(\"Input knot vector cannot be empty\")\n except TypeError as e:\n print(\"An error occurred: {}\".format(e.args[-1]))\n raise TypeError(\"Knot vector must be a list or tuple\")\n except Exception:\n raise\n\n # Check the formula; m = p + n + 1\n if len(knot_vector) != degree + num_ctrlpts + 1:\n return False\n\n # Check ascending order\n prev_knot = knot_vector[0]\n for knot in knot_vector:\n if prev_knot > knot:\n return False\n prev_knot = knot\n\n return True", "def vertices(self):\n\n if self._faces is None:\n if self._vertices is None:\n return None\n self.triangulate()\n return self._vertices", "def get_vertices(self):\n if self.vert_list.keys() != None:\n return self.vert_list.keys()\n raise KeyError(\"Vertex not found\")", "def edges(adj_mat, vertices):\n return [(i,j) for i,j in\n vertices if (i < j and adj_mat[i][j] == 1)]", "def verticesEqual(self, v1, v2, eps=1e-8):\n if abs(v1[0] - v2[0]) > eps:\n return False\n if abs(v1[1] - v2[1]) > eps:\n return False\n if abs(v1[2] - v2[2]) > eps:\n return False\n return True", "def comp(p1,p2,adj,perm):\r\n #degree of p1\r\n f1 = 0\r\n #degree of p2\r\n f2 = 0\r\n \r\n #compute the degrees\r\n for i in range(m):\r\n if (V[p1],V[i]) in adj or (V[i],V[p1]) in adj:\r\n f1 += 1\r\n\r\n for i in range(m):\r\n if (V[p2],V[i]) in adj or (V[i],V[p2]) in adj:\r\n f2 += 1\r\n \r\n if f2 > f1:\r\n return True\r\n else:\r\n return False", "def degree(self):\n return self._deg", "def test_are_vertices_adjacent_no_ascending(self):\n\n self.assertFalse(skeleton_lines._are_vertices_adjacent(\n vertex_indices=VERTEX_INDICES_NON_ADJACENT,\n num_vertices_in_polygon=NUM_VERTICES_FOR_ADJACENCY_TEST))", "def get_deg(nodes = 10000,edges=None):\n \n D= nx.MultiDiGraph()\n D.add_nodes_from(np.arange(1,nodes))\n D.add_edges_from(edges);\n return np.array(D.in_degree())[:,1],np.array(D.out_degree())[:,1],D" ]
[ "0.70981395", "0.6648923", "0.66187066", "0.6453982", "0.6208992", "0.6075277", "0.6049173", "0.6037286", "0.59577584", "0.5948439", "0.59199977", "0.5896408", "0.58028036", "0.580261", "0.57468504", "0.57298976", "0.5702665", "0.5702665", "0.56859505", "0.56500363", "0.56339896", "0.56014055", "0.55758584", "0.5567647", "0.5564437", "0.5562019", "0.5549129", "0.55312765", "0.55051035", "0.5477032", "0.547292", "0.5424315", "0.5416411", "0.5407305", "0.5404", "0.53995997", "0.5367521", "0.5349395", "0.53442633", "0.5341615", "0.5336092", "0.5335423", "0.5332185", "0.5331523", "0.5325165", "0.532225", "0.5305627", "0.5292461", "0.5286139", "0.5286139", "0.52684927", "0.52683216", "0.5267548", "0.5261717", "0.5257677", "0.5237707", "0.52331626", "0.52062464", "0.5199686", "0.51955914", "0.5187597", "0.518462", "0.5170366", "0.51651406", "0.51598287", "0.5148115", "0.51443493", "0.5138674", "0.51342016", "0.51342016", "0.51280206", "0.51133525", "0.5112145", "0.5106941", "0.5106941", "0.50984555", "0.50984555", "0.5097344", "0.50961334", "0.5076788", "0.5071003", "0.5060202", "0.505891", "0.5058901", "0.5052372", "0.50504184", "0.5045094", "0.50408643", "0.5029912", "0.50255346", "0.50188774", "0.5013631", "0.4999913", "0.49916095", "0.4989321", "0.49880806", "0.49878117", "0.49858823", "0.49849907", "0.49816906" ]
0.6969743
1
Checks if there can be bijection between two graphs
def permutations(graph1: list, graph2: list, degrees: tuple): degrees1 = degrees[0] degrees2 = degrees[1] check1 = [] check2 = [] for index, _ in enumerate(degrees1): degree = degrees1[index] temp = [] for vertex, _ in enumerate(graph1[index]): if graph1[index][vertex] == 1: temp.append(degrees1[vertex]) check1.append((degree, tuple(sorted(temp)))) for index, _ in enumerate(degrees2): degree = degrees2[index] temp = [] for vertex in range(len(graph2[index])): if graph2[index][vertex] == 1: temp.append(degrees2[vertex]) check2.append((degree, tuple(sorted(temp)))) return len(set(check1 + check2)) == len(set(check1))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def is_bipartite(G):\n try:\n bipartite_color(G)\n return True\n except:\n return False", "def compare_graphs(self):\n\t\tpass", "def graph_issuperset(graph1, graph2):\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(graph1, graph2)\n\n return graph1.nodes.issuperset(graph2.nodes) and graph1.edges.issuperset(graph2.edges)", "def is_complete(self, A, B):\n return all(self.is_edge(v, w) for v in A for w in B)", "def graph_issubset(graph1, graph2):\n\n # Validate if all arguments are Graphs\n check_graphbase_instance(graph1, graph2)\n\n return graph1.nodes.issubset(graph2.nodes) and graph1.edges.issubset(graph2.edges)", "def is_bipartite(G):\n return G.is_bipartite()", "def is_compatible(self, other):\n return self.intervals == other.intervals and\\\n self.nonderived_directions == other.nonderived_directions", "def is_bijective(self):\n return self.is_injective() and self.is_surjective()", "def check_for_isomorphism(graph1: list, graph2: list, directed=False) -> bool:\n matrix1 = get_adjancy_matrix(graph1, directed)\n matrix2 = get_adjancy_matrix(graph2, directed)\n\n if num_vertices(matrix1, matrix2):\n if num_edges(matrix1, matrix2):\n degrees = vertices_degree(matrix1, matrix2)\n if degrees[0]:\n return permutations(matrix1, matrix2, degrees[1:])\n return False", "def disjoint(self, other): # -> bool:\n ...", "def isomorphic(graph1, graph2):\r\n\r\n gd1 = _TripleCanonicalizer(graph1).to_hash()\r\n gd2 = _TripleCanonicalizer(graph2).to_hash()\r\n return gd1 == gd2", "def preserved_in(self, bijection: BijectionType) -> bool:\n return all(self.func(k) == self.func(v) for k, v in bijection.items())", "def isdisjoint(self, other):\n self._check_title(other)\n\n # sort by top-left vertex\n if self.bounds > other.bounds:\n i = self\n self = other\n other = i\n\n return (self.max_col, self.max_row) < (other.min_col, other.max_row)", "def encompasses_broadcastable(b1, b2):\r\n if len(b1) < len(b2):\r\n return False\r\n b1 = b1[-len(b2):]\r\n return not any(v1 and not v2 for v1, v2 in zip(b1, b2))", "def is_boundary_edge(a, b, bdy_edges):\n for edge in bdy_edges:\n a0, b0 = edge\n if a == a0 and b == b0:\n return True\n return False", "def isconform(self, other):\n return _image.image_isconform(self, other)", "def is_allow(self, src: Vertex, dst: Vertex) -> bool:\n if self.link == Link.NONE:\n return False\n elif self.link == Link.BI:\n return src in self.vertices or dst in self.vertices\n return src == self.src and dst == self.dst", "def can_combine(self, first, second):\n # Need to check out of order issues as\n # blocks are sorted by where they start in a\n mismatch_ab = (first.a_end <= second.a\n and second.b_end <= first.b)\n mismatch_ba = (second.a_end <= first.a\n and first.b_end <= second.b)\n out_of_order = mismatch_ab or mismatch_ba\n return not out_of_order and self.jump_gap(second)", "def testIsBiconnected(self):\n self.assertEqual(is_biconnected(self.G1), True)\n self.assertEqual(is_biconnected(self.G2), False)", "def equivalent(kls, first, second):\n if first.empty() and second.empty():\n return True\n elif first.vertices.shape[0] != second.vertices.shape[0]:\n return False\n elif first.edges.shape[0] != second.edges.shape[0]:\n return False\n\n EPSILON = 1e-7\n\n vertex1, ct1 = np.unique(first.vertices, axis=0, return_counts=True)\n vertex2, ct2 = np.unique(second.vertices, axis=0, return_counts=True)\n \n vertex_match = np.all(np.abs(vertex1 - vertex2) < EPSILON)\n ct_match = np.all(ct1 == ct2)\n if not (vertex_match and ct_match):\n return False\n\n g1 = nx.Graph()\n g1.add_edges_from(first.edges)\n g2 = nx.Graph()\n g2.add_edges_from(second.edges)\n edges_match = nx.is_isomorphic(g1, g2)\n del g1 \n del g2\n\n if not edges_match:\n return False\n\n second_verts = {}\n for i, vert in enumerate(second.vertices):\n second_verts[tuple(vert)] = i\n \n attrs = [ attr['id'] for attr in first.extra_attributes ]\n for attr in attrs:\n buf1 = getattr(first, attr)\n buf2 = getattr(second, attr)\n if len(buf1) != len(buf2):\n return False\n\n for i in range(len(buf1)):\n i2 = second_verts[tuple(first.vertices[i])]\n if buf1[i] != buf2[i2]:\n return False\n\n return True", "def equiv(subdiagram1, subdiagram2):\n # TODO: Make sure arguments are the right type\n # TODO: Make this work for subdiagrams of length >= 1\n # subdiagrams are not equivalent if they have different numbers of crossings\n # print \"sub1\\t\", subdiagram1, len(subdiagram1[0])\n # print \"sub2\\t\", subdiagram2, len(subdiagram2[0])\n if len(subdiagram1[0]) != len(subdiagram2[0]):\n return False\n # look for a match\n for i in range(len(subdiagram1[0])-1):\n crossing1 = subdiagram1[0][i]\n typeMatch = False\n for j in range(len(subdiagram2[0])-1):\n crossing2 = subdiagram2[0][j]\n print \"\\tc1 \",crossing1\n print \"\\tc2 \",crossing2\n # check for same crossing type\n # TODO: check for empty crossing\n if len(crossing1) == 5 and len(crossing2) == 5:\n if crossing1[0] == crossing2[0]:\n print \" :)\"\n typeMatch = True\n \n\n return True", "def is_multigraph(self):\n # TO DO: Call coloring algorithm\n return False", "def commutes_with(self, other):\n a = self.array_form\n b = other.array_form\n if len(a) != len(b):\n raise ValueError(\"The number of elements in the permutations \\\ndon\\'t match.\")\n for i in range(len(a)-1):\n if a[b[i]] != b[a[i]]:\n return False\n return True", "def is_bipartite(self):\n # TO DO: Call coloring algorithm\n return False", "def _has_only_dropped_degrees(first, second):\n has_dropped = len(set(first) - set(second)) > 0\n has_added = len(set(second) - set(first)) > 0\n return (has_dropped and not has_added)", "def test_not_strongly_connected(self):\n G = DiGraph([(0, 1), (0, 2), (1, 2)])\n assert_false(is_strongly_connected(G))", "def is_multigraph(self):\n # TO DO: Call coloring algorithm\n return True", "def is_collapse_legal(mesh, u, v, allow_boundary=False):\r\n # collapsing of boundary vertices is currently not supported\r\n # change this to `and` to support collapsing to or from the boundary\r\n if not allow_boundary:\r\n if mesh.is_vertex_on_boundary(v) or mesh.is_vertex_on_boundary(u):\r\n return False\r\n\r\n # check for contained faces\r\n for nbr in mesh.halfedge[u]:\r\n if nbr in mesh.halfedge[v]:\r\n # check if U > V > NBR is a face\r\n fkey = mesh.halfedge[u][v]\r\n if fkey != mesh.halfedge[v][nbr] or fkey != mesh.halfedge[nbr][u]:\r\n # check if V > U > NBR is a face\r\n fkey = mesh.halfedge[v][u]\r\n if fkey != mesh.halfedge[u][nbr] or fkey != mesh.halfedge[nbr][v]:\r\n return False\r\n\r\n for nbr in mesh.halfedge[v]:\r\n if nbr in mesh.halfedge[u]:\r\n # check if U > V > NBR is a face\r\n fkey = mesh.halfedge[u][v]\r\n if fkey != mesh.halfedge[v][nbr] or fkey != mesh.halfedge[nbr][u]:\r\n # check if V > U > NBR is a face\r\n fkey = mesh.halfedge[v][u]\r\n if fkey != mesh.halfedge[u][nbr] or fkey != mesh.halfedge[nbr][v]:\r\n return False\r\n\r\n return True", "def is_multigraph(G):\n return G.is_multigraph()", "def haveNoSameEdges(seg1,seg2,segmentsMeta):\n seg1Edges = segmentsMeta['edges'][seg1]\n seg2Edges = segmentsMeta['edges'][seg2]\n return not any(a==b for a in seg1Edges for b in seg2Edges)", "def comp(p1,p2,adj,perm):\r\n #degree of p1\r\n f1 = 0\r\n #degree of p2\r\n f2 = 0\r\n \r\n #compute the degrees\r\n for i in range(m):\r\n if (V[p1],V[i]) in adj or (V[i],V[p1]) in adj:\r\n f1 += 1\r\n\r\n for i in range(m):\r\n if (V[p2],V[i]) in adj or (V[i],V[p2]) in adj:\r\n f2 += 1\r\n \r\n if f2 > f1:\r\n return True\r\n else:\r\n return False", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def permissible(e1, e2):\n return e1[1] == e2[0] and \\\n total_edge_length(e1, e2) < maximum_distance and \\\n total_edge_angle(e1, e2) < maximum_angle_delta", "def sameGrid(A, B):\n if np.isscalar(A):\n # A is a scalar \n if not np.isscalar(B):\n return False\n else:\n return (A == B)\n else:\n # Assume two GriddedBasis objects \n if A.nd != B.nd or A.ng != B.ng:\n return False \n elif not np.all(A.gridpts == B.gridpts):\n return False \n else:\n return True", "def isUndirected(G):\n for v in G:\n if v in G[v]:\n return False\n for w in G[v]:\n if v not in G[w]:\n return False\n return True", "def b3_correctness(el_a, el_b, system_el2kbid, gold_el2kbid):\n correct = False\n\n if(inSameSet(el_a, el_b, system_el2kbid) and \n inSameSet(el_a, el_b, gold_el2kbid) and\n sameLinking(el_a, el_b, system_el2kbid, gold_el2kbid) #THIS CONDITION DEPARTS FROM THE ORIGINAL BCUBED (extesion for the Entity Linking problem)\n ):\n correct = True\n\n return correct", "def is_isomorphic(A,B):\n return A.cardinality == B.cardinality and is_subalgebra(A,B)", "def is_bipartite(self):\n return True", "def is_surjective(self):\n # Testing equality of free modules over PIDs is unreliable\n # see Trac #11579 for explanation and status\n # We test if image equals codomain with two inclusions\n # reverse inclusion of below is trivially true\n return self.codomain().is_submodule(self.image())", "def can_broadcast(shape1, shape2) -> bool:\n return(\n reduce(\n lambda a, b: a and b,\n starmap(\n lambda a, b: (a == b or (a == 1 or b == 1)),\n zip_longest(shape1, shape2, fillvalue=1)\n )\n )\n )", "def num_vertices(graph1: list, graph2: list):\n if len(graph1[0]) != len(graph2[0]):\n return False\n return True", "def parallel(self, other):\n return other.angle == self.angle", "def parallel(self, other):\n return other.angle == self.angle", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def cross(form1, form2):\n for point in form1.points:\n if point in form2:\n return True\n for point in form2.points:\n if point in form1:\n return True\n return", "def different_values_constraint(A, a, B, b):\r\n return a != b", "def check_undirected(graph):\n for node in graph:\n for neighbor in graph[node]:\n if node not in graph[neighbor]:\n return False\n return True", "def test_bidirectional_edges(self):\n G = DiGraph()\n G.add_edges_from([(0, 1), (1, 2), (2, 3), (3, 0), (1, 3), (0, 2)])\n G.add_edge(1, 0)\n assert_false(is_tournament(G))", "def _degree_has_changed(first, second):\n return len(set(first) ^ set(second)) != 0", "def _is_sink() -> bool:\n\n def _is_inplace(n: Node):\n \"\"\"Get the inplace argument from ``torch.fx.Node``\n \"\"\"\n inplace = False\n if n.op == \"call_function\":\n inplace = n.kwargs.get(\"inplace\", False)\n elif n.op == \"call_module\":\n inplace = getattr(n.graph.owning_module.get_submodule(n.target), \"inplace\", False)\n return inplace\n\n def _is_shape_consistency(n: Node):\n \"\"\"Check if this node is shape-consistency node (i.e. ``runtime_apply`` or ``runtime_apply_for_iterable_object``)\n \"\"\"\n return n.target in [runtime_apply, runtime_apply_for_iterable_object, runtime_comm_spec_apply]\n\n return not sum([v for _, v in deps.items()]) and not any(map(_is_inplace, n.users)) and not any(\n map(_is_shape_consistency, n.users))", "def pair_is_consistent(graph, u, v):\n relations = get_all_relations(graph, u, v)\n\n if 1 != len(relations):\n return False\n\n return list(relations)[0]", "def is_directed(self):\n return self._incoming is not self._outgoing\n # directed if maps are distinct", "def is_same_graph_with_merge(var1, var2, givens=None):\r\n if givens is None:\r\n givens = {}\r\n # Copy variables since the MergeOptimizer will modify them.\r\n copied = copy.deepcopy([var1, var2, givens])\r\n vars = copied[0:2]\r\n givens = copied[2]\r\n # Create FunctionGraph.\r\n inputs = theano.gof.graph.inputs(vars)\r\n # The clone isn't needed as we did a deepcopy and we cloning will\r\n # break the mapping in givens.\r\n fgraph = theano.gof.fg.FunctionGraph(inputs, vars, clone=False)\r\n # Perform Variable substitution.\r\n for to_replace, replace_by in givens.iteritems():\r\n fgraph.replace(to_replace, replace_by)\r\n # Perform merge optimization.\r\n merge_optimizer.optimize(fgraph)\r\n # When two variables perform the same computations, they will have the same\r\n # owner in the optimized graph.\r\n # We need to be careful with the special case where the owner is None,\r\n # which happens when the graph is made of a single Variable.\r\n # We also need to make sure we replace a Variable if it is present in\r\n # `givens`.\r\n vars_replaced = [givens.get(v, v) for v in vars]\r\n o1, o2 = [v.owner for v in vars_replaced]\r\n if o1 is None and o2 is None:\r\n # Comparing two single-Variable graphs: they are equal if they are\r\n # the same Variable.\r\n return vars_replaced[0] == vars_replaced[1]\r\n else:\r\n return o1 is o2", "def allow_relation(self, obj1, obj2, **hints):\n\n result = False\n if not (obj1._meta.model_name in GeoSpatialRouting.includedModels and \n obj2._meta.model_name in GeoSpatialRouting.includedModels) :\n result = None\n return result", "def is_pair_allowed(a, b):\n if a == complementary(b):\n return True\n if a == 'G' and b == 'U' or a == 'U' and b == 'G':\n return True\n return False", "def test_canonize_neighborhood_same_graph(nauty, ref_graph, ref_graph2):\n key = nauty.canonize_neighborhood(ref_graph, 2, 1)\n key2 = nauty.canonize_neighborhood(ref_graph2, 3, 1)\n assert key == key2", "def is_acyclic(self, queue):\n if len(self.graph) == 0:\n return True\n\n elif not queue.is_empty():\n source = queue.dequeue()\n source_node = self.graph.get_node(source)\n for node in source_node.data.children:\n if (node.data.in_degree - 1) == 0:\n queue.enqueue(node.data.element)\n self.remove_vertex(source)\n result = self.is_acyclic(queue)\n else:\n result = False\n\n return result", "def check_perm(tri1,tri2):\n for t1 in tri1:\n for t2 in tri2:\n if t1 == t2:\n return False\n return True", "def are_concatenate_on_graph(self, subgraph) -> bool:\n self.visit(subgraph)\n return self.on_graph", "def test_wp_association_bp(self):\n test_graph = wikipathways_to_bel(WP2359, self.hgnc_manager)\n\n self.assertEqual(type(test_graph), BELGraph, msg='Error with graph type')\n\n self.assertEqual(test_graph.summary_dict()['Number of Nodes'], 2)\n self.assertEqual(test_graph.summary_dict()['Number of Edges'], 1)\n self.assertEqual(count_relations(test_graph)['regulates'], 1)", "def __ne__(self, other):\n return np.all(self.grid != other.grid) or np.all(self.pos != other.pos)", "def test_non_native_two_qubit_gates(self, valkmusa, gate):\n\n QB1, QB2 = valkmusa.qubits\n\n for op in (\n gate(QB1, QB2),\n gate(QB2, QB1).with_tags('tag_baz'),\n ):\n decomposition = valkmusa.decompose_operation_full(op)\n assert TestGateDecomposition.is_native(decomposition)", "def is_same_graph(var1, var2, givens=None, debug=False):\r\n # Lazy import.\r\n if givens is None:\r\n givens = {}\r\n global equal_computations, is_same_graph_with_merge\r\n if equal_computations is None:\r\n from theano.gof.opt import is_same_graph_with_merge\r\n from theano.scan_module.scan_utils import equal_computations\r\n # Convert `givens` to dictionary.\r\n if not isinstance(givens, dict):\r\n givens = dict(givens)\r\n # Get result from the merge-based function.\r\n rval1 = is_same_graph_with_merge(var1=var1, var2=var2, givens=givens)\r\n # Get result from the function `equal_computations` from scan_utils.\r\n use_equal_computations = True\r\n if givens:\r\n # We need to build the `in_xs` and `in_ys` lists. To do this, we need\r\n # to be able to tell whether a variable belongs to the computational\r\n # graph of `var1` or `var2`.\r\n # The typical case we want to handle is when `to_replace` belongs to\r\n # one of these graphs, and `replace_by` belongs to the other one. In\r\n # other situations, the current implementation of `equal_computations`\r\n # is probably not appropriate, so we do not call it.\r\n ok = True\r\n in_xs = []\r\n in_ys = []\r\n # Compute the sets of all variables found in each computational graph.\r\n inputs_var = map(inputs, ([var1], [var2]))\r\n all_vars = [set(variables(v_i, v_o))\r\n for v_i, v_o in ((inputs_var[0], [var1]),\r\n (inputs_var[1], [var2]))]\r\n\r\n def in_var(x, k):\r\n # Return True iff `x` is in computation graph of variable `vark`.\r\n return x in all_vars[k - 1]\r\n\r\n for to_replace, replace_by in givens.iteritems():\r\n # Map a substitution variable to the computational graphs it\r\n # belongs to.\r\n inside = dict((v, [in_var(v, k) for k in (1, 2)])\r\n for v in (to_replace, replace_by))\r\n if (inside[to_replace][0] and not inside[to_replace][1] and\r\n inside[replace_by][1] and not inside[replace_by][0]):\r\n # Substitute variable in `var1` by one from `var2`.\r\n in_xs.append(to_replace)\r\n in_ys.append(replace_by)\r\n elif (inside[to_replace][1] and not inside[to_replace][0] and\r\n inside[replace_by][0] and not inside[replace_by][1]):\r\n # Substitute variable in `var2` by one from `var1`.\r\n in_xs.append(replace_by)\r\n in_ys.append(to_replace)\r\n else:\r\n ok = False\r\n break\r\n if not ok:\r\n # We cannot directly use `equal_computations`.\r\n if debug:\r\n raise AssertionError(\r\n 'When `debug` is True we want to make sure we are also '\r\n 'using the `equal_computations` implementation')\r\n use_equal_computations = False\r\n else:\r\n in_xs = None\r\n in_ys = None\r\n if use_equal_computations:\r\n rval2 = equal_computations(xs=[var1], ys=[var2],\r\n in_xs=in_xs, in_ys=in_ys)\r\n assert rval2 == rval1\r\n return rval1", "def _is_compatible_symbolic_array(a, b):\n if not a.shape == b.shape:\n return False\n a = a.flatten()\n b = b.flatten()\n for t, v in zip(a, b):\n if not is_symbolic(t) and not is_symbolic(v):\n if t != v:\n return False\n return True", "def similar(g1, g2):\r\n return all(t1 == t2 for (t1, t2) in _squashed_graphs_triples(g1, g2))", "def isScalene(self):\n\t\treturn self.a != self.b != self.c", "def is_bipartite(graph: list[list[int]]) -> bool:\n n = len(graph)\n color = [0] * n\n\n def dfs(c, node):\n color[node] = c\n for nxt in graph[node]:\n if color[nxt] == c:\n return False\n if color[nxt] == 0 and not dfs(-c, nxt):\n return False\n return True\n\n for i in range(n):\n if color[i] == 0 and not dfs(1, i):\n return False\n return True", "def is_converged(self,a,b):\n return np.array_equal(a,b)", "def is_bipartite(adj_list):\n A, B = -123, +123\n colors = {}\n v = next(iter(adj_list))\n stack = [(A, v)]\n while stack:\n color, v = stack.pop()\n if v in colors:\n continue\n colors[v] = color\n for w in adj_list[v]:\n if w not in colors:\n stack += [(-color, w)]\n elif colors[w] == color:\n return False\n assert colors.keys() == adj_list.keys(), \"must be connected\"\n return True", "def validate_graph(self) -> bool:\n return True", "def _check(self):\n d = self.degree()\n Sd = self.parent()._sym\n\n if prod(self._g, Sd.one()) != Sd.one():\n raise ValueError(\"the product is not identity\")\n\n if self._connected and not perms_are_connected(self._g, d):\n raise ValueError(\"not connected\")", "def allclose(tensor1: Tensor, tensor2: Tensor) ->bool:\n if tensor1.dtype != tensor2.dtype:\n tensor2 = tensor2\n return torch.allclose(tensor1, tensor2)", "def bipartite_vertex_cover(bigraph, algo=\"Hopcroft-Karp\"):\n if algo == \"Hopcroft-Karp\":\n coord = [(irow,icol) for irow,cols in enumerate(bigraph) for icol in cols]\n coord = np.array(coord)\n graph = csr_matrix((np.ones(coord.shape[0]),(coord[:,0],coord[:,1])))\n matchV = maximum_bipartite_matching(graph, perm_type='row')\n matchV = [None if x==-1 else x for x in matchV]\n nU, nV = graph.shape\n assert len(matchV) == nV\n elif algo == \"Hungarian\":\n matchV = max_bipartite_matching2(bigraph)\n nU, nV = len(bigraph), len(matchV)\n else:\n assert False\n\n matchU = [None] * nU\n \n for v in range(nV): # -- build the mapping from U to V\n if matchV[v] is not None:\n matchU[matchV[v]] = v\n \n def old_konig():\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n for u in range(nU):\n if matchU[u] is None: # -- starting with free vertices in U\n _alternate(u, bigraph, visitU, visitV, matchV)\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n def new_konig():\n # solve the limitation of huge number of recursive calls\n visitU = [False] * nU # -- build max alternating forest\n visitV = [False] * nV\n wait_u = set(range(nU)) - set(matchV) \n while len(wait_u) > 0:\n u = wait_u.pop()\n visitU[u] = True\n for v in bigraph[u]:\n if not visitV[v]:\n visitV[v] = True\n assert matchV[v] is not None # otherwise match is not maximum\n assert matchV[v] not in wait_u\n wait_u.add(matchV[v])\n inverse = [not b for b in visitU]\n return (inverse, visitV)\n \n #res_old = old_konig()\n res_new = new_konig()\n #assert res_old == res_new\n return res_new", "def test_native_two_qubit_gates(self, valkmusa, gate):\n\n QB1, QB2 = valkmusa.qubits\n\n op = gate(QB1, QB2)\n decomposition = valkmusa.decompose_operation_full(op)\n assert decomposition == [op]\n assert TestGateDecomposition.is_native(decomposition)", "def __neq__(self, other):\n return self.atlasID != other.atlasID", "def check_affine_equivalence(f, g, A, a, B, b):\n for x in range(0, 2**N):\n y = oplus(x, a)\n y = apply_bin_mat(y, A)\n y = g[y]\n y = apply_bin_mat(y, B)\n y = oplus(y, b)\n if y != f[x]:\n return False\n return True", "def is_permutation(A, B):\n return set(A) == set(B)", "def is_cyclic_permutation(A, B):\n # Check if same length\n if len(A) != len(B):\n return False\n # Check that contain the same elements\n if set(A) == set(B):\n longlist = A + A\n if contains_sublist(longlist, B):\n return True\n else:\n return False\n else:\n return False", "def is_separating_axis(o, p1, p2):\n min1, max1 = float('+inf'), float('-inf')\n min2, max2 = float('+inf'), float('-inf')\n\n for v in p1:\n projection = np.dot(v, o)\n\n min1 = min(min1, projection)\n max1 = max(max1, projection)\n\n for v in p2:\n projection = np.dot(v, o)\n\n min2 = min(min2, projection)\n max2 = max(max2, projection)\n\n if max1 >= min2 and max2 >= min1:\n d = min(max2 - min1, max1 - min2)\n # push a bit more than needed so the shapes do not overlap in future\n # tests due to float precision\n d_over_o_squared = d/np.dot(o, o) + 1e-10\n pv = d_over_o_squared*o\n return False, pv\n else:\n return True, None", "def is_planar(G):\n result=True\n bad_minor=[]\n n=len(G.nodes())\n iterazione=0\n if n>5:\n print 'N >5'\n\n for subnodes in it.combinations(G.nodes(),6):\n iterazione+=1\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if bipartite.is_bipartite(G):# check if the graph G has a subgraph K(3,3)\n X, Y = bipartite.sets(G)\n if len(X)==3:\n result=False\n bad_minor=subnodes\n return result,bad_minor\n iterazione=0\n if n>4 and result:\n print 'N >4'\n\n for subnodes in it.combinations(G.nodes(),5):\n print 'iterazione %d'%iterazione\n subG=G.subgraph(subnodes)\n if len(subG.edges())==10:# check if the graph G has a subgraph K(5)\n result=False\n bad_minor=subnodes\n return result,bad_minor\n\n return result,bad_minor", "def test_exact_two_qubit_cnot_decompose_paulis(self):\n unitary = Operator.from_label(\"XZ\")\n self.check_exact_decomposition(unitary.data, two_qubit_cnot_decompose)", "def iso(self,G1, glist):\n for G2 in glist:\n if isomorphic(G1,G2):\n return True\n return False", "def __gt__(self, other):\n return other._is_subpolyhedron(self) and not self._is_subpolyhedron(other)", "def is_similar_with(self, other):\n\n # corresponding angles are congruent\n if self.angles != other.angles:\n return False\n # corresponding sides are proportional\n proportion = self.perimeter() / other.perimeter()\n for i in range(len(self.lengths)):\n if self.lengths[i]/other.lengths[i] != proportion:\n return False\n return True", "def invariant(self):\n\t\treturn (self.demand.popId != self.dstPopId)", "def independent(self) -> bool:\n parent = self._parent()\n if parent is None:\n return True\n connections = parent._graph.connections\n path = self._path\n lp = len(path)\n for con in connections:\n if con[\"type\"] == \"connection\":\n if con[\"target\"][:lp] == path:\n return False\n return True", "def check_regularity(edges):\n for a, b in edges:\n counter_a = 0\n counter_b = 0\n for x, y in edges:\n if a == x or a == y:\n counter_a += 1\n if b == x or b == y:\n counter_b += 1\n assert (counter_a > 0) and (counter_b > 0)\n if (counter_a == 1) or (counter_b == 1):\n raise Exception(\"Boundary is not closed.\")\n if (counter_a > 2) or (counter_b > 2):\n raise Exception(\"More than two edges share a node.\")", "def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True", "def __gt__(self, other):\n if self.head_vertex <= other.head_vertex:\n return False\n elif self.tail_vertex <= other.tail_vertex:\n return False\n elif self.weight <= other.weight:\n return False\n return True", "def pose_close(\n p1: np.ndarray, p2: np.ndarray, angle_thresh=5.0, trans_thresh=0.05\n) -> bool:\n return (\n angular_separation(p1[0:3, 0:3], p2[0:3, 0:3]) < angle_thresh\n and translation_separation(p1[0:3, 3], p2[0:3, 3]) < trans_thresh\n )", "def gam_in_biomass(reaction):\n left = set([\"atp_c\", \"h2o_c\"])\n right = set([\"adp_c\", \"pi_c\", \"h_c\"])\n return (\n left.issubset(met.id for met in reaction.reactants) and\n right.issubset(met.id for met in reaction.products))", "def __gt__(self, other):\n return self.x ** 2 + self.y ** 2 > other.x ** 2 + other.y ** 2", "def is_connected(object_one, object_two):\n\n for vert_one in object_one.Vertexes:\n for vert_two in object_two.Vertexes:\n if (vert_one.X == vert_two.X) and (vert_one.y == vert_two.y):\n return True\n\n return False", "def have_same_topology(first_mesh, second_mesh):\n return attr_has_same_shape(first_mesh, second_mesh, \"v\") and attr_is_equal(\n first_mesh, second_mesh, \"f\"\n )", "def is_directed(G):\n return G.is_directed()", "def __gt__(self, other: Compound[Scalar]) -> bool:\n return (self._points_set > other._points_set\n if isinstance(other, Multipoint)\n else NotImplemented)", "def has_crossing_len2_ob(self) -> bool:\n fcell = self.first_cell\n scell = self.second_cell\n if self._fuse_row:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (scell, fcell)),\n ]\n else:\n possible_obs = [\n GriddedPerm((0, 1), (fcell, scell)),\n GriddedPerm((1, 0), (fcell, scell)),\n ]\n return any(ob in possible_obs for ob in self._tiling.obstructions)", "def is_subgraph_of(self, other):\n # If it is already recognized that it is a subgraph this procedure can be skipped.\n if other in self.__supergraph :\n return True\n \n if type(self)!=type(other):\n raise TypeError(\"Only works between graphs.\")\n elif other.return_num_vertices() == 0:\n return False\n elif self.return_num_vertices() == 0:\n return True\n names_to_check = self.return_names()\n # Checks if the vertices are a subset\n if not set(names_to_check).issubset(set(other.return_names())):\n return False\n \n # Traverses each node and checks if the adjacencies build a subset.\n # To do so, the node indices must be replaced by node names.\n # This is laborious, but only needs to be done once.\n for name in names_to_check:\n selflist = set(map(lambda x: (self.return_vertexName(x[0]),x[1]), self.return_adjacencies(self.return_vertexIndex(name))))\n otherlist = set(map(lambda x: (other.return_vertexName(x[0]),x[1]), other.return_adjacencies(other.return_vertexIndex(name))))\n if not selflist.issubset(otherlist):\n return False\n self.__supergraph.append(other)\n \n return True", "def check_controllability(A, B):\n assert len(A.shape) == 2\n dim = A.shape[0]\n stack = []\n for i in range(dim):\n term = B @ np.linalg.matrix_power(A, i)\n stack.append(term)\n grammian = np.hstack(stack)\n return np.linalg.matrix_rank(grammian) == dim", "def has_bond_crossing(self):\n return self.count_bond_collisions() > 0" ]
[ "0.58745533", "0.58431983", "0.5821069", "0.58176845", "0.5800676", "0.5800201", "0.571649", "0.56977814", "0.56428045", "0.56401634", "0.5639373", "0.5630606", "0.5611228", "0.5610141", "0.5608601", "0.559945", "0.55920273", "0.5584586", "0.557866", "0.5549889", "0.55390096", "0.55294067", "0.548488", "0.5467634", "0.546328", "0.5438523", "0.541881", "0.5416355", "0.54109174", "0.53972083", "0.53882927", "0.5367333", "0.5365464", "0.5360014", "0.53457475", "0.5340369", "0.5338004", "0.53269655", "0.53144944", "0.5304315", "0.53041524", "0.52771556", "0.52771556", "0.526745", "0.52586657", "0.5255519", "0.5255238", "0.5252334", "0.525014", "0.52490914", "0.523938", "0.5233853", "0.5227845", "0.5216756", "0.5216079", "0.5214593", "0.5210459", "0.52076435", "0.52044845", "0.51856905", "0.5184143", "0.51786566", "0.5175448", "0.5174069", "0.516993", "0.51675344", "0.51569813", "0.5154277", "0.5152942", "0.51370347", "0.51331097", "0.513203", "0.51276803", "0.5126862", "0.51086", "0.5101035", "0.5098533", "0.50954974", "0.5093008", "0.5088192", "0.5086586", "0.50847054", "0.50845444", "0.5083722", "0.50837064", "0.5078423", "0.5067512", "0.5066659", "0.5066659", "0.50658965", "0.50591105", "0.50570285", "0.50564873", "0.50554955", "0.5051706", "0.5051666", "0.50504297", "0.5048631", "0.50470066", "0.5046987" ]
0.5283513
41
Main function for checking isomorphism >>> check_for_isomorphism([(1, 2), (1, 3), (1, 5),\ (2, 4), (2, 6), (3, 1), (3, 4),\ (4, 2), (5, 1), (5, 6), (5, 7), (6, 8), (7, 8)],\ [(1, 2), (1, 3), (1, 5), (2, 4), (3, 1), (3, 4),\ (3, 7), (4, 2), (5, 6), (5, 7), (6, 8), (7, 8)]) True >>> check_for_isomorphism([(1, 3), (1, 5),\ (2, 4), (2, 6), (3, 1), (3, 4),\ (4, 2), (5, 1), (5, 6), (5, 7), (6, 8), (7, 8)],\ [(1, 2), (1, 3), (1, 5), (2, 4), (3, 1), (3, 4),\ (3, 7), (4, 2), (5, 6), (5, 7), (6, 8), (7, 8)]) False
def check_for_isomorphism(graph1: list, graph2: list, directed=False) -> bool: matrix1 = get_adjancy_matrix(graph1, directed) matrix2 = get_adjancy_matrix(graph2, directed) if num_vertices(matrix1, matrix2): if num_edges(matrix1, matrix2): degrees = vertices_degree(matrix1, matrix2) if degrees[0]: return permutations(matrix1, matrix2, degrees[1:]) return False
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_check_isomorphism(self):\n mol1 = Molecule(smiles='[O-][N+]#N')\n mol2 = Molecule(smiles='[N-]=[N+]=O')\n self.assertTrue(converter.check_isomorphism(mol1, mol2))", "def test_is_isomorphic(self):\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz1['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz1['dict_diff_order'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz11['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz11['dict_diff_order'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n\n mol1 = converter.s_bonds_mol_from_xyz(self.xyz10['dict'])\n mol2 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order1'])\n mol3 = converter.s_bonds_mol_from_xyz(self.xyz10['dict_diff_order2'])\n self.assertTrue(mol1.is_isomorphic(mol2, save_order=True, strict=False))\n self.assertTrue(mol1.is_isomorphic(mol3, save_order=True, strict=False))", "def test_isomorphic_perumtations(self, inputs):\n # get benzene with all aromatic atoms/bonds labeled\n benzene = Molecule.from_smiles(\"c1ccccc1\")\n # get benzene with no aromatic labels\n benzene_no_aromatic = create_benzene_no_aromatic()\n # now test all of the variations\n assert (\n Molecule.are_isomorphic(\n benzene,\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )[0]\n is inputs[\"result\"]\n )", "def test_isomorphic_general(self):\n # check that hill formula fails are caught\n ethanol = create_ethanol()\n acetaldehyde = create_acetaldehyde()\n assert ethanol.is_isomorphic_with(acetaldehyde) is False\n assert acetaldehyde.is_isomorphic_with(ethanol) is False\n # check that different orderings work with full matching\n ethanol_reverse = create_reversed_ethanol()\n assert ethanol.is_isomorphic_with(ethanol_reverse) is True\n # check a reference mapping between ethanol and ethanol_reverse matches that calculated\n ref_mapping = {0: 8, 1: 7, 2: 6, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 0}\n assert (\n Molecule.are_isomorphic(ethanol, ethanol_reverse, return_atom_map=True)[1]\n == ref_mapping\n )\n # check matching with nx.Graph atomic numbers and connectivity only\n assert (\n Molecule.are_isomorphic(\n ethanol,\n ethanol_reverse.to_networkx(),\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # check matching with nx.Graph with full matching\n assert ethanol.is_isomorphic_with(ethanol_reverse.to_networkx()) is True\n\n from openff.toolkit.topology.topology import Topology\n\n topology = Topology.from_molecules(ethanol)\n assert (\n Molecule.are_isomorphic(\n ethanol,\n [*topology.molecules][0],\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # test hill formula passes but isomorphic fails\n mol1 = Molecule.from_smiles(\"Fc1ccc(F)cc1\")\n mol2 = Molecule.from_smiles(\"Fc1ccccc1F\")\n assert mol1.is_isomorphic_with(mol2) is False\n assert mol2.is_isomorphic_with(mol1) is False", "def test_isomorphism_match(data):\n\n reference = data.draw(ISO_BUILDER)\n nodes = data.draw(st.sets(st.sampled_from(list(reference.nodes)),\n max_size=len(reference)))\n graph = reference.subgraph(nodes)\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def check_xyz_isomorphism(self, allow_nonisomorphic_2d=False, xyz=None):\n xyz = xyz or self.final_xyz\n passed_test, return_value = False, False\n if self.mol is not None:\n try:\n b_mol = molecules_from_xyz(xyz, multiplicity=self.multiplicity, charge=self.charge)[1]\n except SanitizationError:\n b_mol = None\n if b_mol is not None:\n is_isomorphic = check_isomorphism(self.mol, b_mol)\n else:\n is_isomorphic = False\n if is_isomorphic:\n passed_test, return_value = True, True\n else:\n # isomorphism test failed\n passed_test = False\n if self.conf_is_isomorphic:\n if allow_nonisomorphic_2d:\n # conformer was isomorphic, we **do** allow nonisomorphism, and the optimized structure isn't\n return_value = True\n else:\n # conformer was isomorphic, we don't allow nonisomorphism, but the optimized structure isn't\n return_value = False\n else:\n # conformer was not isomorphic, don't strictly enforce isomorphism here\n return_value = True\n if not passed_test:\n logger.error('The optimized geometry of species {0} is not isomorphic with the 2D structure {1}'.format(\n self.label, self.mol.toSMILES()))\n if not return_value:\n logger.error('Not spawning additional jobs for this species!')\n else:\n logger.info('Species {0} was found to be isomorphic with the perception '\n 'of its optimized coordinates.'.format(self.label))\n else:\n logger.error('Cannot check isomorphism for species {0}'.format(self.label))\n return return_value", "def test_isomorphic_general(self):\n # check that hill formula fails are caught\n ethanol = create_ethanol()\n acetaldehyde = create_acetaldehyde()\n assert ethanol.is_isomorphic_with(acetaldehyde) is False\n assert acetaldehyde.is_isomorphic_with(ethanol) is False\n # check that different orderings work with full matching\n ethanol_reverse = create_reversed_ethanol()\n assert ethanol.is_isomorphic_with(ethanol_reverse) is True\n # check a reference mapping between ethanol and ethanol_reverse matches that calculated\n ref_mapping = {0: 8, 1: 7, 2: 6, 3: 3, 4: 4, 5: 5, 6: 1, 7: 2, 8: 0}\n assert (\n Molecule.are_isomorphic(ethanol, ethanol_reverse, return_atom_map=True)[1]\n == ref_mapping\n )\n # check matching with nx.Graph atomic numbers and connectivity only\n assert (\n Molecule.are_isomorphic(\n ethanol,\n ethanol_reverse.to_networkx(),\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # check matching with nx.Graph with full matching\n assert ethanol.is_isomorphic_with(ethanol_reverse.to_networkx()) is True\n # check matching with a TopologyMolecule class\n from openforcefield.topology.topology import Topology, TopologyMolecule\n\n topology = Topology.from_molecules(ethanol)\n topmol = TopologyMolecule(ethanol, topology)\n assert (\n Molecule.are_isomorphic(\n ethanol,\n topmol,\n aromatic_matching=False,\n formal_charge_matching=False,\n bond_order_matching=False,\n atom_stereochemistry_matching=False,\n bond_stereochemistry_matching=False,\n )[0]\n is True\n )\n # test hill formula passes but isomorphic fails\n mol1 = Molecule.from_smiles(\"Fc1ccc(F)cc1\")\n mol2 = Molecule.from_smiles(\"Fc1ccccc1F\")\n assert mol1.is_isomorphic_with(mol2) is False\n assert mol2.is_isomorphic_with(mol1) is False", "def iso(G1, glist):\n for G2 in glist:\n if isomorphic(G1, G2):\n return True\n return False", "def test_isomorphic_perumtations(self, inputs):\n # get benzene with all aromatic atoms/bonds labeled\n benzene = Molecule.from_smiles(\"c1ccccc1\")\n # get benzene with no aromatic labels\n benzene_no_aromatic = create_benzene_no_aromatic()\n # now test all of the variations\n assert (\n Molecule.are_isomorphic(\n benzene,\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )[0]\n is inputs[\"result\"]\n )\n\n assert (\n benzene.is_isomorphic_with(\n benzene_no_aromatic,\n aromatic_matching=inputs[\"aromatic_matching\"],\n formal_charge_matching=inputs[\"formal_charge_matching\"],\n bond_order_matching=inputs[\"bond_order_matching\"],\n atom_stereochemistry_matching=inputs[\"atom_stereochemistry_matching\"],\n bond_stereochemistry_matching=inputs[\"bond_stereochemistry_matching\"],\n )\n is inputs[\"result\"]\n )", "def iso(self,G1, glist):\n for G2 in glist:\n if isomorphic(G1,G2):\n return True\n return False", "def check_Motifs(H, m):\n\t#This function will take each possible subgraphs of gr of size 3, then\n\t#compare them to the mo dict using .subgraph() and is_isomorphic\n\t\n\t#This line simply creates a dictionary with 0 for all values, and the\n\t#motif names as keys\n\n\t##paper source \"Higher-order organization ofcomplex networks\" (2016) Benson et al, Science\n\t## I choose only the unidirection ones : M1, M5, M8, M9, M10\n\n\n\ts = int(m)\n\n\tif (s==3):\n\t\t#motifs = {'M1': nx.DiGraph([(1,2),(2,3),(3,1)]), 'M5': nx.DiGraph([(1,2),(2,3),(1,3)]), 'M8': nx.DiGraph([(2, 1),(2,3)]), 'M9': nx.DiGraph([(2, 1),(3, 2)]), 'M10': nx.DiGraph([(1,2),(3,2)])}\n\t\tmotifs = {'M1': [(1,2),(2,3),(3,1)], 'M5': [(1,2),(2,3),(1,3)], 'M8': [(2, 1),(2,3)], 'M9': [(2, 1),(3, 2)], 'M10': [(1,2),(3,2)],\n\t\t\t\t\t'M2': [(1,2),(2,3),(3,2),(3,1)], 'M3': [(1,2),(2,3),(3,2),(1,3),(3,1)], 'M4': [(1,2),(2,1),(2,3),(3,2),(1,3),(3,1)], 'M6': [(2, 1),(2,3),(1,3),(3,1)], 'M7': [(1,2),(3,2),(1,3),(3,1)],\n\t\t\t\t\t'M11': [(1,2),(2,1),(2,3)], 'M12': [(1,2),(2,1),(3,2)], 'M13': [(1,2),(2,1),(2,3),(3,2)]}\n\n\telif (s==4): ## under development\n\t\tmotifs = {'bifan': [(1,2),(1,3),(4,2),(4,3)]}\n\n\t\t#edgeLists=[[[1,2],[1,3],[1,4]]]\n\t\t#edgeLists.append([[1,2],[1,3],[1,4],[2,3]])\n\t\t#edgeLists.append([[1,2],[1,3],[1,4],[2,3],[3,4]])\n\t\t#edgeLists.append([[1,2],[1,3],[1,4],[2,3],[3,4],[2,4]])\n\telse:\n\t\traise nx.NetworkXNotImplemented('Size of motif must be 3 or 4')\n\n\t#outf = open(f2, 'w')\n\t#print >> outf, 'commitid|motiflabel|count'\n\n\tG = H\n\n\tmcount = dict(zip(motifs.keys(), list(map(int, np.zeros(len(motifs))))))\n\n\t## match the pattern and count the motifs \n\tdict_edges = defaultdict(list); dict_nodes = defaultdict(list)\n\tfor key in motifs :\n\t\n\t\t\tpattern = motifs[key]\n\t\t\n\t\t\tgmoti = nx.DiGraph()\n\t\t\tgmoti.add_edges_from(pattern)\n\n\t\t\tmotif_pattern_obs = subgraph_pattern(G, gmoti, sign_sensitive=False)\n\n\t\t\ts = []\n\t\t\tfor subgraph in motif_pattern_obs :\n\t\t\t\ttup = tuple(subgraph.keys())\n\t\t\t\ts.append(tup)\n\n\t\t\tuniqs = list(set(s))\n\n\t\t\tif len(uniqs) > 0 :\n\t\t\t\tmaplist = map(list, uniqs)\n\n\t\t\t### label the edges as per the motif labels\n\t\t\t\tmcount[str(key)] = len(maplist)\n\n\t\t\t\tfor triplets in maplist :\n\t\t\t\t\tsubgraph = G.subgraph(triplets)\n\t\t\t\t\tedgeLists = [e for e in subgraph.edges() if G.has_edge(*e)]\n\n\t\t\t\t## an edge is part of multiple motifs\n\t\t\t\t## lets count the number of motifs an edge is part of \n\t\t\t\t\tfor u, v in edgeLists :\n\t\t\t\t\t\tdict_edges[(u, v)].append(str(key))\n\t\n\n\t\t\t\t## A node is also part of multiple motifs. \n\t\t\t\t## We count the total number of motifs a node is part of\n\t\t\t\t## We count the frequency of occurence each motif the node is part of\n\t\t\t\t\tnodelists = subgraph.nodes()\n\t\t\t\t\tfor n in nodelists :\n\t\t\t\t\t\tdict_nodes[str(n)].append(str(key))\n\n\n\n\t\t#for keys, values in mcount.items() :\n\t\t#\tprint >> outf, '%s|%s|%s' %(outname, keys, values) \n\n\t### Let's mark the edge with motif type and count. We count the number of types\n\t### of motif an edge is a part of. An edge could appear in M1: M1x times and in M2: M2x times and so on\n\n\tfor u,v in G.edges() :\n\t\t\tif (u,v) in dict_edges :\n\t\t\t\tG[u][v]['num_motif_edge'] = len(list(set(dict_edges[(u,v)])))\n\n\t### Let's mark the node with motif type and count. We count the number of types of motif a node is a part of. \n\n\tfor n in G.nodes() :\n\t\tmotficountnode = dict(zip(motifs.keys(), list(map(int, np.zeros(len(motifs))))))\n\n\t\tif str(n) in dict_nodes :\n\t\t\tsubgraphnodeslist = dict_nodes[str(n)]\n\n\t\t\tfor key in subgraphnodeslist:\n\t\t\t\tmotficountnode[str(key)] +=1\n\n\t\tfor motif, count in motficountnode.items() :\n\t\t\tG.node[n][str(motif)] = int(count)\n\n\t### Let's mark the edge with motif type and count. We count the number of types\n\t### of motif an edge is a part of. An edge could appear in M1: M1x times and in M2: M2x times and so on\n\n\tfor u,v in G.edges() :\n\t\tmotficountedge = dict(zip(motifs.keys(), list(map(int, np.zeros(len(motifs))))))\n\n\t\tif (u,v) in dict_edges :\n\t\t\tsubgraphedgeslist = dict_edges[(u,v)]\n\n\t\t\tfor key in subgraphedgeslist:\n\t\t\t\tmotficountedge[str(key)] +=1\n\n\t\tfor motif, count in motficountedge.items() :\n\t\t\tG[u][v][str(motif)] = int(count)\n\n\n\treturn G", "def test_isomorphism_nonmatch(reference, graph):\n\n note((\"Reference nodes\", reference.nodes(data=True)))\n note((\"Reference edges\", reference.edges))\n note((\"Graph nodes\", graph.nodes(data=True)))\n note((\"Graph edges\", graph.edges))\n\n node_match = nx.isomorphism.categorical_node_match('element', None)\n matcher = nx.isomorphism.GraphMatcher(reference, graph, node_match=node_match)\n expected = make_into_set(matcher.subgraph_isomorphisms_iter())\n found = make_into_set(vermouth.graph_utils.isomorphism(reference, graph))\n note((\"Found\", found))\n note((\"Expected\", expected))\n\n if not expected:\n event(\"Not subgraphs\")\n if found == expected:\n event(\"Exact match\")\n\n assert found <= expected", "def test_check_ambigous(self):\r\n\r\n flow0 = Flowgram(\"\")\r\n flow1 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 1.23 0.0 3.1\")\r\n flow2 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n flow3 = Flowgram(\r\n \"0 1.2 2.1 3.4 0.02 0.0 0.0 0.01 1.02 0.08 0.5 1.0 4.1 0.0 0.0 0.0 0.0 1.23 0.0 3.1\")\r\n\r\n self.assertEqual(check_ambigous(flow0, 4), False)\r\n self.assertEqual(check_ambigous(flow1, 4), False)\r\n self.assertEqual(check_ambigous(flow2, 4), True)\r\n self.assertEqual(check_ambigous(flow2, 7), True)\r\n self.assertEqual(check_ambigous(flow2, 8), False)\r\n self.assertEqual(check_ambigous(flow3, 3), True)\r\n self.assertEqual(check_ambigous(flow3, 4), False)", "def check_homogeneity(impurities, hull, used_pivots):\n for instance in impurities:\n if check_inside_hull(hull, instance):\n return False\n return True", "def check_puzzle_unicity(puzzle_list: list) -> None:\n if len(set(puzzle_list)) != len(puzzle_list):\n raise ParsingError(\"Puzzle numbers must be unique.\")", "def validate_ion(self, i_seq, out = sys.stdout, debug = True):\n\n atom_props = self.atoms_to_props[i_seq]\n element = mmtbx.ions.server.get_element(atom_props.atom)\n elem_params = mmtbx.ions.server.get_metal_parameters(element)\n\n if elem_params is not None:\n atom_type = atom_props.get_atom_type(params=self.params.water)\n atom_props.check_ion_environment(\n ion_params = elem_params,\n wavelength = self.wavelength,\n require_valence = self.params.require_valence)\n atom_props.check_fpp_ratio(\n ion_params = elem_params,\n wavelength = self.wavelength,\n fpp_ratio_min = self.params.phaser.fpp_ratio_min,\n fpp_ratio_max = self.params.phaser.fpp_ratio_max)\n elif element in mmtbx.ions.HALIDES:\n identity = atom_props.identity()\n atom_props.inaccuracies[identity] = set()\n\n if not self.looks_like_halide_ion(i_seq = i_seq, element = element):\n atom_props.inaccuracies[identity].add(atom_props.BAD_HALIDE)\n else:\n raise Sorry(\"Element '%s' not supported:\\n%s\" %\n (element, atom_props.atom.format_atom_record()))\n\n return atom_props", "def test_defined_in_iter():\n\n @type_checked\n def _run_test(thing:[(int, str, str)]):\n for group in thing:\n assert isinstance(group[0], int)\n assert isinstance(group[1], str)\n assert isinstance(group[2], str)\n assert len(thing) == 4\n\n _run_test(thing=[\n (12.3, None, False),\n (\"12.1\", True, 1),\n (False, 10, 12.1),\n (True, 14.9, None),\n ])", "def is_canonical(hybrids):\n mrhyb = hybrids[2].upper().replace(\"U\", \"T\")\n mirhyb = hybrids[0].upper().replace(\"U\", \"T\")\n hybrid = hybrids[1]\n \"\"\"\n 2-8\n \"\"\"\n if hybrid[1:8] == \"|||||||\":\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:8], mrhyb[1:8]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-8-Gwoble\"\n else:\n return True, \"2-8\"\n elif (hybrid[1:7] == \"||||||\" and mrhyb[0] == 'A'):\n guwoble = False\n for mirnuc, mrnuc in zip(mirhyb[1:7], mrhyb[1:7]):\n if (mirnuc == 'G' and mrnuc == 'T') or (mirnuc == 'T' and mrnuc == 'G'):\n guwoble = True\n if guwoble:\n return False, \"2-7-A-Gwoble\"\n else:\n return True, \"2-7-A\"\n else:\n if hybrid[0:7] == \"|||||||\":\n return False, \"1-7-ElMMo\"\n elif hybrid[1:7] == \"||||||\":\n return False, \"6-mer\"\n if \"v\" in hybrid[0:8]:\n return False, \"mRNAbulge\"\n elif \"^\" in hybrid[0:8]:\n return False, \"miRNAbulge\"\n elif \"O\" in hybrid[0:8]:\n return False, \"symmetric_loop\"\n else:\n return False, \"unknown\"", "def is_graph_isomorphic(self):\n out=True\n for node in self.node_names:\n self.move_to_node(node)\n if not self.check_closed_path:\n out=False\n return out", "def assertNiftiShape(shape, *args):\n for fname in args:\n d = ensure.ensureIsImage(fname)\n assert tuple(d.shape) == tuple(shape), \\\n 'incorrect shape ({}) for nifti: {}:{}'.format(\n shape, d.shape, fname)", "def _is_all_input_shape_generalize(input_shape_tuple):\n for elem in input_shape_tuple:\n if not is_shape_unknown(elem.shape):\n return False\n return True", "def validate(s):\n\n # base case: square is 1 or 0\n if s == 1 or s == 0:\n return True\n\n # list of length 4\n if isinstance(s, list) and len(s) == 4:\n\n # idea one: fail fast\n for i in s:\n if not validate(i):\n return False\n return True\n\n # idea 2: \"and\" the results ALSO fail fast\n # return (validate(s[0]) and \n # validate(s[1]) and \n # validate(s[2]) and \n # validate(s[3]))\n # OR\n # return all([validate(i) for i in s])\n\n # idea 3: multiply the results: will not return boolean\n # return (validate(s[0]) * \n # validate(s[1]) * \n # validate(s[2]) * \n # validate(s[3]))\n\n # not one of our numbers or list of length 4\n # another base case\n return False", "def test_case_03_isosceles(self):\n self.__assert_equals_test_case(self.yield_isosceles_triangles(), 'Isosceles Triangle')", "def test_isosceles_triangle(self):\n self.assertEqual(classify_triangle(2, 2, 3), 'Isosceles', '2,2,4 is Isosceles')\n self.assertEqual(classify_triangle(3, 5, 3), 'Isosceles', '3,5,3 is Isosceles')\n self.assertEqual(classify_triangle(4, 6, 6), 'Isosceles', '4,6,6 is Isosceles')", "def output_is_valid(output):\n\n is_correct = type(output) is list\n for member in output:\n is_correct *= type(member) is list\n for item in member:\n is_correct *= type(item) is tuple and len(item) == 2\n\n return bool(is_correct)", "def is_correct_smiles(smiles):\n if smiles == \"\":\n return False\n\n try:\n return MolFromSmiles(smiles, sanitize=True) is not None\n except Exception:\n return False", "def is_correct_smiles(smiles):\n if smiles == \"\":\n return False\n\n try:\n return MolFromSmiles(smiles, sanitize=True) is not None\n except Exception:\n return False", "def compute_smile_prop(smile):\n\n def compute_for_one(smi):\n\n \"\"\"\n Computes properties for a single smile sequence\n\n Inputs \n smi (str) : A sequence of smile characters\n Outputs\n prop (list): Computed properties, \"Not exist\" if properties cannot be computed\n \"\"\"\n\n try:\n mol=Chem.MolFromSmiles(smi) \n prop = [Descriptors.ExactMolWt(mol), Descriptors.MolLogP(mol), QED.qed(mol)]\n except:\n prop = 'Not exist!'\n return prop\n\n \n if isinstance(smile, (list, tuple)):\n all_list = []\n for s in list(smile):\n all_list.append(compute_for_one(s))\n props = all_list\n\n elif isinstance(smile, str):\n props = compute_for_one(smile) \n else:\n print(f\"Input must be a string or list, Instead got {type(smile)}\")\n \n return props", "def is_isomorphic(self, other, return_map=False):\n if return_map:\n if not(self.degree() == other.degree() and\n self.length() == other.length()):\n return False, None\n sn, sn_map = self.relabel(return_map=True)\n on, on_map = other.relabel(return_map=True)\n if sn != on:\n return False, None\n return True, sn_map * ~on_map\n\n return (self.degree() == other.degree() and\n self.length() == other.length() and\n self.relabel() == other.relabel())", "def test_is_isomorphic_to_smiles(self, pdb_path, smiles, sdf_path):\n pdb_path = get_data_file_path(pdb_path)\n pdb_mol = Molecule.from_pdb_and_smiles(pdb_path, smiles)\n\n smiles_mol = Molecule.from_smiles(smiles)\n\n assert pdb_mol.is_isomorphic_with(smiles_mol)", "def test_isomorphic_striped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )\n\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n assert not Molecule.are_isomorphic(\n mol1,\n mol2,\n strip_pyrimidal_n_atom_stereo=False,\n atom_stereochemistry_matching=True,\n bond_stereochemistry_matching=True,\n )[0]", "def _check_molecule_uniqueness(molecule_list: Optional[list[Molecule]]):\n # TODO: This could all be replaced by MoleculeSet\n if molecule_list is None:\n return\n\n for index, molecule in enumerate(molecule_list):\n for other_index, other_molecule in enumerate(molecule_list):\n if other_index <= index:\n continue\n if other_molecule.is_isomorphic_with(molecule):\n # The toolkit used to enforce that `partial_bond_orders_from_molecules` must not have isomorphic\n # duplicates in its list, raising `ValueError` if any fail.\n\n raise DuplicateMoleculeError(\n \"Duplicate molecules found in `partial_bond_orders_from_molecules` list. \"\n \"Please ensure that each molecule in this list is isomorphically unique.\",\n )", "def test_picometers_validate_list(self):\n picometers = inches_to.picometers([1.0, 2.0, 3.0, 4.0])\n comparison = np.array([2.54e10, 2*2.54e10, 3*2.54e10, 4*2.54e10])\n\n try:\n for i in range(len(comparison)):\n self.assertTrue(math.isclose(picometers[i], comparison[i], rel_tol=self.accepted_error))\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.passed))\n except AssertionError:\n print('{:.40s}{}'.format(sys._getframe().f_code.co_name + self.padding, self.failed))", "def check(m) :\n #find Connected-component\n lst = find_c(m)\n for e in lst :\n # verify len , 3 is the len of large boat\n if len(e) > 3 :\n return False\n if not is_vert(e) and not is_hori(e):\n return False\n return True", "def is_proper(i0, i1, i2, i3, bond_set):\n if (i0, i1) in bond_set and (i1, i2) in bond_set and (i2, i3) in bond_set and len(set([i0, i1, i2, i3])) == 4:\n return True\n return False", "def is_isomorphic(A,B):\n return A.cardinality == B.cardinality and is_subalgebra(A,B)", "def are_compatible_imgs(one_img, another_img):\n return have_same_shapes(one_img, another_img)", "def test_isosceles(self):\r\n self.assertEqual(triangle_classification(4, 4, 5), 'Isosceles Triangle')\r\n self.assertEqual(triangle_classification(1234567890, 1234567890, 987654321), 'Isosceles Triangle')\r\n self.assertNotEqual(triangle_classification(3, 4, 5), 'Isosceles Triangle')\r\n self.assertNotEqual(triangle_classification(2, 2, 2.0000000000000001), 'Isosceles Triangle') # precision failure\r\n self.assertEqual(triangle_classification(2, 2, 2.000000000000001), 'Isosceles Triangle')\r\n self.assertEqual(triangle_classification(2, 2, 2.0000000000000001), 'Equilateral Triangle')", "def check_magic_square(square, square_edge):\n\n def check(square_edge, list_to_check):\n # NB There is many ways to that:\n # as the zen said simple is better than complex...\n\n # The formula is M = (n²(n²+1)/2) / n\n constant = ((square_edge**2) * (square_edge**2 + 1) / 2) / square_edge\n\n for el in list_to_check:\n # We compare that each iterator elements is equal to the magic constant\n assert el == constant, \"{} is not magic\".format(list_to_check)\n\n # Check constant for each row\n check(square_edge, numpy.sum(square, axis=0))\n\n # Check constant for each column\n check(square_edge, numpy.sum(square, axis=1))\n\n # Check constant for diagonal\n check(square_edge, [numpy.sum(square.diagonal())])\n\n return print(\"Correct \\n\")", "def xs_exists(i, r, g):\n # all istopoes\n act_i = ['U234', 'U235', 'U236', 'U238', 'PU238', 'PU239',\n 'PU240', 'PU241', 'PU242', 'NP237', 'AM241', 'AM243']\n fp_i = ['RH103', 'CS133', 'ND143', 'ND145', 'GD155', 'MO95', 'TC99', 'RU101', 'AG107', 'AG109', 'SM147', 'SM149', 'SM150',\n 'SM151', 'SM152', 'EU153', 'XE135', 'I135', 'IN115', 'CD106', 'CD108', 'CD110', 'CD111', 'CD112', 'CD113', 'CD114', 'CD116', 'B10', 'B11']\n\n if i != None and i not in act_i and i not in fp_i and i not in ['MACR', 'MACRT']:\n raise ValueError('Update of iso lists required not present iso', i)\n\n # Exclusive to cathegory reac\n act_r = ['fiss', 'nufi', 'spec']\n macr_r = ['ener', 'difc', 'tota']\n\n # for i=None\n if r != None and g != None:\n if 'tran' in r:\n if r[5] == '1' and g != '1':\n return False\n if r[5] == '2' and g != '2':\n return False\n\n # for i!=None\n if i != None:\n if i in act_i:\n if r in macr_r:\n return False\n\n if i in fp_i:\n if r in macr_r:\n return False\n if r in act_r:\n return False\n\n if 'MACR' in i:\n if 'tran2' in r:\n return False\n if 'tran3' in r:\n return False\n\n # excs are reaction n2n, n3n,... If the iso has high abso, then it doesnt shouw this r\n if i == 'GD155' or i == 'SM150' or i == 'XE135' or i == 'I135' or i == 'XE135' or i == 'B10':\n if r == 'excs':\n return False\n\n return True", "def check_SMILES(mol, validate_dict):\n # Check SMILES\n try:\n smi_check = mol.GetProp('original SMILES')\n except KeyError:\n validate_dict = add_warning(\n molecule_name=mol.GetProp('_Name'),\n field='original SMILES',\n warning_string=\"molecule has no 'original SMILES' property\",\n validate_dict=validate_dict)\n return validate_dict\n\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES',\n warning_string=\"invalid SMILES %s\" % (smi_check,),\n validate_dict=validate_dict)\n\n return validate_dict", "def iter_all_hypo_isomorphic(hypo_indicator, nhypo):\n hypo_ind = [i for i in range(nhypo)]\n for permuted in uperm(hypo_ind):\n perm_hypo_indicator = []\n for li in hypo_indicator:\n if len(li) >= 1:\n perm_li = [permuted[v] for v in li]\n perm_hypo_indicator.append(sorted(perm_li))\n elif len(li) == 0:\n perm_hypo_indicator.append(li)\n yield perm_hypo_indicator", "def testIsotropicDistance(self):\n (w,h) = self.im1_1.getSize()\n \n self.im1_1.reset()\n drawSquare(self.im1_1, (w//2-1, h//2-1, w//2+1, h//2+1), 1)\n \n self.im8_3.reset()\n drawSquare(self.im8_3, (w//2-1, h//2-1, w//2+1, h//2+1), 1)\n self.im8_3.setPixel(2, (w//2, h//2))\n isotropicDistance(self.im1_1, self.im8_1)\n (x,y) = compare(self.im8_1, self.im8_3, self.im8_2)\n self.assertTrue(x<0)", "def is_MatrixMorphism(x):\n return isinstance(x, MatrixMorphism_abstract)", "def check_correct_numbers(puzzle_size: int, puzzle_list: list) -> None:\n for number in range(puzzle_size * puzzle_size):\n if number not in puzzle_list:\n raise ParsingError(\"Puzzle does not contain expected numbers.\")", "def test_parseIdList(self):\n inputs = [\n b'1:*',\n b'5:*',\n b'1:2,5:*',\n b'*',\n b'1',\n b'1,2',\n b'1,3,5',\n b'1:10',\n b'1:10,11',\n b'1:5,10:20',\n b'1,5:10',\n b'1,5:10,15:20',\n b'1:10,15,20:25',\n b'4:2'\n ]\n\n outputs = [\n MessageSet(1, None),\n MessageSet(5, None),\n MessageSet(5, None) + MessageSet(1, 2),\n MessageSet(None, None),\n MessageSet(1),\n MessageSet(1, 2),\n MessageSet(1) + MessageSet(3) + MessageSet(5),\n MessageSet(1, 10),\n MessageSet(1, 11),\n MessageSet(1, 5) + MessageSet(10, 20),\n MessageSet(1) + MessageSet(5, 10),\n MessageSet(1) + MessageSet(5, 10) + MessageSet(15, 20),\n MessageSet(1, 10) + MessageSet(15) + MessageSet(20, 25),\n MessageSet(2, 4),\n ]\n\n lengths = [\n None, None, None,\n 1, 1, 2, 3, 10, 11, 16, 7, 13, 17, 3\n ]\n\n for (input, expected) in zip(inputs, outputs):\n self.assertEqual(imap4.parseIdList(input), expected)\n\n for (input, expected) in zip(inputs, lengths):\n if expected is None:\n self.assertRaises(TypeError, len, imap4.parseIdList(input))\n else:\n L = len(imap4.parseIdList(input))\n self.assertEqual(L, expected,\n \"len(%r) = %r != %r\" % (input, L, expected))", "def test(list_of_f, iterable):\n print(\"Testing for the list of functions {} ...\".format([f.__name__ for f in list_of_f])) # DEBUG\n result = True\n print(\"Testing for the iterable {} ...\".format(iterable)) # DEBUG\n i = iterable\n allperms = []\n for f in list_of_f:\n allperms.append(sorted([list(p) for p in f(iterable)]))\n for i, pi in enumerate(allperms):\n for j in range(i + 1, len(allperms)):\n pj = allperms[j]\n if pi != pj:\n print(\" - Function #{} ({.__name__}) gave a different list of permutations as function #{} ({.__name__}) ...\".format(i, list_of_f[i], j, list_of_f[j])) # DEBUG\n result = False\n else:\n print(\" - Function #{} ({.__name__}) gave the same list of permutations as function #{} ({.__name__}) ...\".format(i, list_of_f[i], j, list_of_f[j])) # DEBUG\n return result", "def test_isosceles():\n assert 'isosceles' == classify_triangle(2,2,3)", "def checkForAmbiguousIDs(self):\n\n result = True\n\n for frame in self.groundtruth_[\"frames\"]:\n ids = set()\n for groundtruth in frame[\"annotations\"]:\n if not \"id\" in groundtruth:\n # We should have already warned about a missing ID in checkForExistingIDs\n # no need to raise an exception in this function by trying to access missing IDs\n continue\n\n if groundtruth[\"id\"] in ids:\n result &= False\n write_stderr_red(\"Warning:\", \"Ambiguous id (%s) found in ground truth, timestamp %f, frame %d!\" % (str(groundtruth[\"id\"]), frame[\"timestamp\"], frame[\"num\"] if \"num\" in frame else -1))\n else:\n ids.add(groundtruth[\"id\"])\n\n for frame in self.hypotheses_[\"frames\"]:\n ids = set()\n for hypothesis in frame[\"hypotheses\"]:\n if hypothesis[\"id\"] in ids:\n result &= False\n write_stderr_red(\"Warning:\", \"Ambiguous hypothesis (%s) found in hypotheses, timestamp %f, frame %d!\" % (str(hypothesis[\"id\"]), frame[\"timestamp\"], frame[\"num\"] if \"num\" in frame else -1))\n else:\n ids.add(hypothesis[\"id\"])\n\n return result # true: OK, false: ambiguous id found", "def equivalent_to_representative(h):\n for p in monomial_generator(order):\n for q in monomial_generator(order):\n for r in representatives:\n if np.array_equal(h, np.dot(p, r).dot(q)):\n return True\n return False", "def check_SMILES(mol, validate_dict):\n # Check SMILES\n smi_check = mol.GetProp('original SMILES')\n\n m = Chem.MolFromSmiles(smi_check, sanitize=False)\n if m is None:\n validate_dict = add_warning(molecule_name=mol.GetProp('_Name'),\n field='original SMILES',\n warning_string=\"invalid SMILES %s\" % (smi_check,),\n validate_dict=validate_dict)\n\n return validate_dict", "def test_is_symmetric_and_hollow(self):\r\n self.assertTrue(is_symmetric_and_hollow(array([[0, 1], [1, 0]])))\r\n self.assertTrue(is_symmetric_and_hollow(matrix([[0, 1], [1, 0]])))\r\n self.assertTrue(is_symmetric_and_hollow(matrix([[0.0, 0], [0.0, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0.001, 1], [1, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0, 1.1], [1, 0]])))\r\n self.assertTrue(not is_symmetric_and_hollow(\r\n array([[0.5, 1.1], [1, 0]])))", "def check_identity(precision, lhs_calls, rhs_calls):\n\n def to_str(hla):\n return hla.prec_str(precision)\n\n for gene in 'ABC':\n lhs_set = set(map(to_str, lhs_calls[gene]))\n rhs_set = set(map(to_str, rhs_calls[gene]))\n if lhs_set != rhs_set:\n return False\n return True", "def test_invalid_inputs(self):\n f = gtrutils.check_petition_combos\n \n self.assertFalse( f(-1, 1, [], False, False))\n self.assertFalse( f( 0, 1, [], False, False))\n self.assertFalse( f( 1, 0, [], False, False))\n self.assertFalse( f( 1, 1, [-1], False, False))\n self.assertFalse( f( 1,-1, [], False, False))\n self.assertFalse( f( 1, 1, [1], False, False)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1], True, True)) # n_off_role can never be 1\n self.assertFalse( f( 1, 1, [1,3], True, True)) # n_off_role can never be 1\n\n self.assertFalse( f( 3, 0, [2,3,3], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 3, 0, [2,3,3], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 2, 0, [2,3,3], False, True)) # n_off_role can never be 1\n self.assertFalse( f( 2, 0, [2,3,3], True, False)) # n_off_role can never be 1\n self.assertFalse( f( 5, 1, [6,6], True, False)) # n_off_role can never be 1", "def check(indivs, geno_list):\r\n\tfor i in xrange(0,len(indivs)):\r\n\t\tif indivs[i] not in geno_list:\r\n\t\t\t# print \"this is not in: \"+ indivs[i]\r\n\t\t\treturn False\r\n\treturn True", "def test_isomorphic_stripped_stereochemistry(self):\n mol1 = Molecule.from_smiles(\"CCC[N@](C)CC\")\n mol2 = Molecule.from_smiles(\"CCC[N@@](C)CC\")\n\n # Ensure default value is respected and order does not matter\n assert Molecule.are_isomorphic(mol1, mol2, strip_pyrimidal_n_atom_stereo=True)\n assert Molecule.are_isomorphic(mol1, mol2)\n assert Molecule.are_isomorphic(mol2, mol1)\n\n assert mol1 == mol2\n assert Molecule.from_smiles(\"CCC[N@](C)CC\") == Molecule.from_smiles(\n \"CCC[N@@](C)CC\"\n )", "def valid_inverse_functionality(self, graph: Graph, fact: Tuple[str, str, str]) -> bool:\n similar_relation_exists = (None, fact[1], fact[2]) in graph\n # increment the counter if a similar fact already exists (True -> +1, False -> +0)\n self.num_facts_violating_inverse_functionality += similar_relation_exists\n return not similar_relation_exists", "def test_mixed_verfication(self):\n\n lists_map = ima.process_allowlists(ALLOWLIST, '')\n lists_map_wrong = ima.process_allowlists(ALLOWLIST_WRONG, '')\n lists_map_empty = ima.process_allowlists(ALLOWLIST_EMPTY, '')\n lists_map_exclude = ima.process_allowlists(ALLOWLIST, EXCLUDELIST)\n lists_map_exclude_wrong = ima.process_allowlists(ALLOWLIST_WRONG, EXCLUDELIST)\n empty_keyring = ima_file_signatures.ImaKeyring()\n\n # every entry is covered by the allowlist and there's no keyring -> this should pass\n self.assertTrue(ima.process_measurement_list(COMBINED.splitlines(), str(lists_map)) is not None)\n\n curdir = os.path.dirname(os.path.abspath(__file__))\n keydir = os.path.join(curdir, \"data\", \"ima_keys\")\n keyring = ima_file_signatures.ImaKeyring()\n\n rsakeyfile = os.path.join(keydir, \"rsa2048pub.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(rsakeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n\n eckeyfile = os.path.join(keydir, \"secp256k1.pem\")\n pubkey, keyidv2 = ima_file_signatures.get_pubkey_from_file(eckeyfile)\n keyring.add_pubkey(pubkey, keyidv2)\n\n # entries are not covered by a exclude list -> this should fail\n self.assertTrue(ima.process_measurement_list(COMBINED.splitlines(), ima_keyring=keyring) is None)\n\n # all entries are either covered by allow list or by signature verification -> this should pass\n self.assertTrue(ima.process_measurement_list(COMBINED.splitlines(), str(lists_map), ima_keyring=keyring) is not None)\n\n # the signature is valid but the hash in the allowlist is wrong -> this should fail\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map_wrong), ima_keyring=keyring) is None)\n\n # the signature is valid and the file is not in the allowlist -> this should pass\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map_empty), ima_keyring=keyring) is not None)\n\n # the signature is invalid but the correct hash is in the allowlist -> this should fail\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map), ima_keyring=empty_keyring) is None)\n\n # the file has no signature but the hash is correct -> this should pass\n self.assertTrue(ima.process_measurement_list(MEASUREMENTS.splitlines(), str(lists_map)))\n\n # All files are in the exclude list but hashes are invalid -> this should pass\n self.assertTrue(ima.process_measurement_list(MEASUREMENTS.splitlines(), str(lists_map_exclude_wrong)) is not None)\n\n # All files are in the exclude list and their signatures are invalid -> this should pass\n self.assertTrue(ima.process_measurement_list(SIGNATURES.splitlines(), str(lists_map_exclude), ima_keyring=empty_keyring) is not None)\n\n # All files are in the exclude list but hashes or signatures are invalid -> this should pass\n self.assertTrue(ima.process_measurement_list(MEASUREMENTS.splitlines(), str(lists_map_exclude_wrong), ima_keyring=empty_keyring) is not None)", "def _identify_determinism_check(parents_of, descendants_of , root_indices, observed_index):\n list_extract_and_union = lambda list_of_lists, indices: set().union(\n chain.from_iterable(list_of_lists[v] for v in indices))\n parents_of_observed = set(parents_of[observed_index])\n # descendants_of_roots = [self.descendants_of[v] for v in root_indices]\n # descendants_of_roots = set().union(*descendants_of_roots)\n descendants_of_roots = list_extract_and_union(descendants_of, root_indices)\n U1s = list(root_indices)\n Y = observed_index\n Xs = list(parents_of_observed.intersection(descendants_of_roots))\n return (U1s, [Y], Xs)", "def _check_valid_interactions(self, interactions_iterable, varname):\n unsafe = set(interactions_iterable)\n unknown = unsafe.symmetric_difference(_INTERACTIONS.keys()) & unsafe\n if unknown:\n raise NameError(\n f\"Unknown interaction(s) in {varname!r}: {', '.join(unknown)}\"\n )", "def test():\n # Check all numbers are unique\n from collections import defaultdict\n a = defaultdict()\n for i in range(SIZE+1):\n if encode(i) in a:\n return False\n a[encode(i)] = i", "def is_isomorphic(self, s1, s2):\n # encode strings\n enc1, enc2 = [], []\n count1, count2 = 0, 0\n dict1, dict2 = dict(), dict()\n for i in range(len(s1)):\n char1, char2 = s1[i], s2[i]\n if char1 in dict1:\n enc1.append(dict1[char1])\n else:\n count1 += 1\n dict1[char1] = count1\n enc1.append(dict1[char1])\n if char2 in dict2:\n enc2.append(dict2[char2])\n else:\n count2 += 1\n dict2[char2] = count2\n enc2.append(dict2[char2])\n return enc1 == enc2 # compare encodings", "def test_known_common_stable_isotopes_len():\n\n assert len(common_isotopes()) == 288, (\n \"The length of the list returned by common_isotopes() is \"\n f\"{len(common_isotopes())}, which is not the expected value.\"\n )\n\n assert len(stable_isotopes()) == 254, (\n \"The length of the list returned by stable_isotopes() is \"\n f\"{len(stable_isotopes())}, which is not the expected value.\"\n )\n\n assert 3352 <= len(known_isotopes()) <= 3400, (\n \"The length of the list returned by known_isotopes() is \"\n f\"{len(known_isotopes())}, which is not within the expected range.\"\n )", "def known_isotopes(argument=None):\n\n def known_isotopes_for_element(argument):\n element = atomic_symbol(argument)\n isotopes = []\n for isotope in Isotopes.keys():\n if element + '-' in isotope and isotope[0:len(element)] == element:\n isotopes.append(isotope)\n if element == 'H':\n isotopes.insert(1, 'D')\n isotopes.insert(2, 'T')\n mass_numbers = [mass_number(isotope) for isotope in isotopes]\n sorted_isotopes = [mass_number for (isotope, mass_number) in\n sorted(zip(mass_numbers, isotopes))]\n return sorted_isotopes\n\n if argument is not None:\n try:\n element = atomic_symbol(argument)\n isotopes_list = known_isotopes_for_element(element)\n except Exception:\n raise ValueError(\"known_isotopes is unable to get isotopes from \"\n f\"an input of: {argument}\")\n elif argument is None:\n isotopes_list = []\n for atomic_numb in range(1, 119):\n isotopes_list += known_isotopes_for_element(atomic_numb)\n\n return isotopes_list", "def test_known_common_stable_isotopes_cases():\n assert \"H-1\" in known_isotopes(\"H\")\n assert \"D\" in known_isotopes(\"H\")\n assert \"T\" in known_isotopes(\"H\")\n assert \"Be-8\" in known_isotopes(\"Be\")\n assert \"Og-294\" in known_isotopes(118)\n assert \"H-1\" in common_isotopes(\"H\")\n assert \"H-4\" not in common_isotopes(1)\n assert \"H-1\" in stable_isotopes(\"H\")\n assert \"D\" in stable_isotopes(\"H\")\n assert \"T\" not in stable_isotopes(\"H\")\n assert \"Fe-56\" in common_isotopes(\"Fe\", most_common_only=True)\n assert \"He-4\" in common_isotopes(\"He\", most_common_only=True)", "def test_validation_correct_molecules():\n paths = examples_paths()\n molecules = [\n {'name': 'toluene', 'leap': {'parameters': 'leaprc.gaff'}},\n {'name': 'toluene', 'leap': {'parameters': ['leaprc.gaff', 'toluene.frcmod']}},\n {'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'}},\n {'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},\n 'antechamber': {'charge_method': None}},\n {'name': 'p-xylene', 'antechamber': {'charge_method': 'bcc'},\n 'epik': {'ph': 7.6, 'ph_tolerance': 0.7, 'tautomerize': False, 'select': 0}},\n {'smiles': 'Cc1ccccc1', 'openeye': {'quacpac': 'am1-bcc'},\n 'antechamber': {'charge_method': None}, 'epik': {'select': 1}},\n\n {'filepath': paths['abl']},\n {'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}},\n {'filepath': paths['abl'], 'leap': {'parameters': 'leaprc.ff99SBildn'}, 'select': 1},\n {'filepath': paths['abl'], 'select': 'all'},\n {'filepath': paths['abl'], 'select': 'all', 'strip_protons': True},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {}},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'add_missing_residues': True}},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'add_missing_atoms': 'all', 'ph': '8.0'}},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'remove_heterogens': 'all'}},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'replace_nonstandard_residues': True}},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'apply_mutations': {'chain_id': 'A', 'mutations': 'T85I'}}},\n {'filepath': paths['abl'], 'select': 'all', 'modeller': {'apply_mutations': {'chain_id': 'A', 'mutations': 'T85I'}}},\n {'filepath': paths['abl'], 'select': 'all', 'modeller': {'apply_mutations': {'chain_id': 'A', 'mutations': 'WT'}}},\n {'filepath': paths['abl'], 'select': 'all', 'pdbfixer': {'apply_mutations': {'chain_id': 'A', 'mutations': 'I8A/T9A'}}},\n {'filepath': paths['toluene'], 'leap': {'parameters': 'leaprc.gaff'}},\n {'filepath': paths['benzene'], 'epik': {'select': 1, 'tautomerize': False}},\n # Regions tests, make sure all other combos still work\n {'name': 'toluene', 'regions': {'a_region': 4}},\n {'name': 'toluene', 'regions': {'a_region': 'dsl string'}},\n {'name': 'toluene', 'regions': {'a_region': [0, 2, 3]}},\n {'name': 'toluene', 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}},\n {'smiles': 'Cc1ccccc1', 'regions': {'a_region': 4}},\n {'smiles': 'Cc1ccccc1', 'regions': {'a_region': 'dsl string'}},\n {'smiles': 'Cc1ccccc1', 'regions': {'a_region': [0, 2, 3]}},\n {'smiles': 'Cc1ccccc1', 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}},\n {'filepath': paths['abl'], 'regions': {'a_region': 4}},\n {'filepath': paths['abl'], 'regions': {'a_region': 'dsl string'}},\n {'filepath': paths['abl'], 'regions': {'a_region': [0, 2, 3]}},\n {'filepath': paths['abl'], 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}},\n {'filepath': paths['toluene'], 'regions': {'a_region': 4}},\n {'filepath': paths['toluene'], 'regions': {'a_region': 'dsl string'}},\n {'filepath': paths['toluene'], 'regions': {'a_region': [0, 2, 3]}},\n {'filepath': paths['toluene'], 'regions': {'a_region': [0, 2, 3], 'another_region': [5, 4, 3]}}\n ]\n for molecule in molecules:\n yield ExperimentBuilder._validate_molecules, {'mol': molecule}", "def main():\n print_function()\n\n # print(is_stairs([ 2, 3, 4, 5]))\n # print(is_stairs([8, 7, 6]))\n # print(is_stairs([2, 3, 5]))\n # print(is_stairs([2, 3, 2]))\n # print(is_stairs([4]))\n # print(is_stairs2([\"a\", \"b\", \"c\"]))\n # print(is_stairs2([\"c\", \"b\", \"a\"]))\n # print(is_stairs2([\"a\", \"B\", \"c\"]))\n # print(is_stairs2([\"a\", \"b\", \"C\"]))\n # print(is_stairs2([\"c\", \"B\", \"a\"]))\n # print(is_stairs2([\"C\", \"b\", \"a\"]))\n # print(factorial(3))\n # print(factorial(1))\n # print(factorial(0))\n # print(factorial(40))\n print(test(['d', 'e', 'g']))", "def is_interesting(x):\n # Never overwrites a symbol\n if x[0] == '1' and x[1] == '0' and x[2] == '1' and x[3] == '4' and x[6] == '1' and x[7] == '0' and x[8] == '1' and x[9] == '4':\n return False\n\n # Sets everything to 0, loops on 0\n if x[0] == '1' and x[1] == '0' and x[2] == '1' and x[3] == '0' and x[6] == '1' and x[7] == '0':\n return False\n # Dual of above\n if x[0] == '1' and x[1] == '4' and x[2] == '1' and x[3] == '4' and x[8] == '1' and x[9] == '4':\n return False\n\n # Loops between states doing nothing\n if x[0] == '1' and x[1] == '0' and x[2] == '1' and x[3] == '4' and x[6] == '0' and x[7] == '0' and x[8] == '0' and x[9] == '4':\n return False\n # Almost dual of above\n if x[0] == '1' and x[1] == '4' and x[2] == '1' and x[3] == '0' and x[6] == '0' and x[7] == '4' and x[8] == '0' and x[9] == '0':\n return False\n\n # Too many blanks, always goes right with blanks\n if x[0] == '0' and x[1] in ('1', '3', '5') and x[2] == '0' and x[3] in ('1', '3', '5') and x[4] == '1' and x[5] in ('1', '3', '5') and x[10] in ('0', '1') and x[11] in ('1', '3', '5'):\n return False\n\n # Loop on 0, or 0 -> 1, until end of input, then go forever\n if x[0] == '0' and x[1] in ('0', '4') and x[2] == '0' and x[3] == '1' and x[4] == '1' and x[5] in ('1', '3', '5') and x[10] in ('0', '1') and x[11] in ('1', '3', '5'):\n return False\n\n return True", "def test_invalid_isosceles():\n assert 'invalid' == classify_triangle(1,1,3)", "def contains_isomorphic_subgraph_bruteforce(self, H):\n if not isinstance(H, sage.graphs.graph.Graph):\n raise ValueError(\"H is not a graph.\")\n try:\n self.sage_graph().subgraph_search_iterator(H)\n return True\n except StopIteration:\n return False", "def __do_memebers_exist__(self):\n assert self.element_type is not None\n assert self.elements is not None\n assert self.points is not None\n if self.element_type == \"tri\" or self.element_type == \"quad\":\n assert self.edges is not None\n ndim = self.InferSpatialDimension()\n if self.element_type == \"tet\" or self.element_type == \"hex\":\n assert self.faces is not None", "def is_correctly_identified(self, identity = None):\n if identity is None:\n identity = self.identity()\n\n return len(self.inaccuracies[identity]) == 0", "def test_unique_together_normalization(self):\n data = {\n \"2-tuple\": ((\"foo\", \"bar\"), ((\"foo\", \"bar\"),)),\n \"list\": ([\"foo\", \"bar\"], ((\"foo\", \"bar\"),)),\n \"already normalized\": (\n ((\"foo\", \"bar\"), (\"bar\", \"baz\")),\n ((\"foo\", \"bar\"), (\"bar\", \"baz\")),\n ),\n \"set\": (\n {(\"foo\", \"bar\"), (\"bar\", \"baz\")}, # Ref #21469\n ((\"foo\", \"bar\"), (\"bar\", \"baz\")),\n ),\n }\n\n for unique_together, normalized in data.values():\n\n class M(models.Model):\n foo = models.IntegerField()\n bar = models.IntegerField()\n baz = models.IntegerField()\n\n Meta = type(\n \"Meta\", (), {\"unique_together\": unique_together, \"apps\": Apps()}\n )\n\n checks, _ = M()._get_unique_checks()\n for t in normalized:\n check = (M, t)\n self.assertIn(check, checks)", "def test_pruned_impropers(self, molecule, n_impropers, n_pruned):\n mol = Molecule.from_smiles(molecule)\n assert mol.n_impropers == n_impropers\n assert len(mol.smirnoff_impropers) == n_pruned\n assert len(mol.amber_impropers) == n_pruned\n\n # Order not guaranteed, so cannot zip and compare directly\n for smirnoff_imp in mol.smirnoff_impropers:\n # Convert SMIRNOFF-style improper into AMBER-style\n mod_imp = (\n smirnoff_imp[1],\n smirnoff_imp[0],\n smirnoff_imp[2],\n smirnoff_imp[3],\n )\n assert mod_imp in mol.amber_impropers", "def test_enumerating_protomers(self):\n\n mol = Molecule.from_smiles(\"Oc2ccc(c1ccncc1)cc2\")\n\n # there should be three protomers for this molecule so restrict the output\n protomers = mol.enumerate_protomers(max_states=2)\n\n assert mol not in protomers\n assert len(protomers) == 2\n\n # now make sure we can generate them all\n protomers = mol.enumerate_protomers(max_states=10)\n\n assert mol not in protomers\n assert len(protomers) == 3\n\n # make sure each protomer is unique\n unique_protomers = set(protomers)\n assert len(protomers) == len(unique_protomers)", "def test_enumerating_protomers(self):\n\n mol = Molecule.from_smiles(\"Oc2ccc(c1ccncc1)cc2\")\n\n # there should be three protomers for this molecule so restrict the output\n protomers = mol.enumerate_protomers(max_states=2)\n\n assert mol not in protomers\n assert len(protomers) == 2\n\n # now make sure we can generate them all\n protomers = mol.enumerate_protomers(max_states=10)\n\n assert mol not in protomers\n assert len(protomers) == 3\n\n # make sure each protomer is unique\n unique_protomers = set(protomers)\n assert len(protomers) == len(unique_protomers)", "def _subgraph_isomorphism_matcher(digraph, nxpattern, node_pred, edge_pred):\n graph_matcher = iso.DiGraphMatcher(digraph, nxpattern, node_match=node_pred, edge_match=edge_pred)\n yield from graph_matcher.subgraph_isomorphisms_iter()", "def _check_heterogeneous_mutations(self):\n # Currently, frontend assigns empty list if this value is not entered.\n mutations = {str(d.input.get(\"mutations\", [])) for d in self._data}\n genesets = {str(d.input.get(\"geneset\", \"\")) for d in self._data}\n\n if len(mutations) > 1:\n name = \"mutations\"\n multiple = mutations\n elif len(genesets) > 1:\n name = \"genesets\"\n multiple = genesets\n else:\n return\n\n raise ValueError(\n f\"Variants should be computed with the same {name} input. \"\n f\"Variants of samples in collection {self.collection.name} \"\n f\"have been computed with {', '.join(list(multiple))}.\\n\"\n \"Use geneset filter in the VariantTables constructor.\\n\"\n )", "def _check(self):\n d = self.degree()\n Sd = self.parent()._sym\n\n if prod(self._g, Sd.one()) != Sd.one():\n raise ValueError(\"the product is not identity\")\n\n if self._connected and not perms_are_connected(self._g, d):\n raise ValueError(\"not connected\")", "def check(self):\n for row in self.grid:\n for i in range(1, 10):\n if row.count(i) != 1:\n return False\n\n for col in range(9):\n lst = [row[col] for row in self.grid]\n for i in range(1, 10):\n if lst.count(i) != 1:\n return False\n \n for i in range(3):\n for j in range(3):\n lst = [row[j* 3:(j*3) + 3] for row in self.grid[i * 3:(i*3) + 3]] \n flat_list = []\n for k in lst:\n for number in k:\n flat_list.append(number)\n \n for check_number in range(1, 10):\n if flat_list.count(check_number) != 1:\n return False\n return True", "def _smilei_check():\n \n # Verify classes were not overriden\n for CheckClassName in [\"SmileiComponent\",\"Species\", \"Laser\",\"Collisions\",\n \"DiagProbe\",\"DiagParticleBinning\", \"DiagScalar\",\"DiagFields\",\n \"DiagTrackParticles\",\"DiagPerformances\",\"ExternalField\",\"PrescribedField\",\n \"SmileiSingleton\",\"Main\",\"Checkpoints\",\"LoadBalancing\",\"MovingWindow\",\n \"RadiationReaction\", \"ParticleData\", \"MultiphotonBreitWheeler\",\n \"Vectorization\", \"MultipleDecomposition\"]:\n CheckClass = globals()[CheckClassName]\n try:\n if not CheckClass._verify: raise Exception(\"\")\n except:\n raise Exception(\"ERROR in the namelist: it seems that the name `\"+CheckClassName+\"` has been overriden\")\n \n # Checkpoint: Verify the restart_dir and find possible restart file for each rank\n if len(Checkpoints)==1 and Checkpoints.restart_dir:\n if len(Checkpoints.restart_files) == 0 :\n Checkpoints.restart = True\n pattern = Checkpoints.restart_dir + os.sep + \"checkpoints\" + os.sep\n if Checkpoints.file_grouping:\n pattern += \"*\"+ os.sep\n pattern += \"dump-*-*.h5\"\n # pick those file that match the mpi rank\n files = filter(lambda a: smilei_mpi_rank==int(search(r'dump-[0-9]*-([0-9]*).h5$',a).groups()[-1]), glob(pattern))\n \n if Checkpoints.restart_number is not None:\n # pick those file that match the restart_number\n files = filter(lambda a: Checkpoints.restart_number==int(search(r'dump-([0-9]*)-[0-9]*.h5$',a).groups()[-1]), files)\n \n Checkpoints.restart_files = list(files)\n \n if len(Checkpoints.restart_files) == 0:\n raise Exception(\n \"ERROR in the namelist: cannot find valid restart files for processor \"+str(smilei_mpi_rank) +\n \"\\n\\t\\trestart_dir = '\" + Checkpoints.restart_dir +\n \"'\\n\\t\\trestart_number = \" + str(Checkpoints.restart_number) +\n \"\\n\\t\\tmatching pattern: '\" + pattern + \"'\" )\n \n else :\n raise Exception(\"restart_dir and restart_files are both not empty\")\n \n # Verify that constant() and tconstant() were not redefined\n if not hasattr(constant, \"_reserved\") or not hasattr(tconstant, \"_reserved\"):\n raise Exception(\"Names `constant` and `tconstant` cannot be overriden\")\n \n # Convert float profiles to constant() or tconstant()\n def toSpaceProfile(input):\n try : return constant(input*1.)\n except: return input\n def toTimeProfile(input):\n try:\n input*1.\n return tconstant()\n except: return input\n for s in Species:\n s.number_density = toSpaceProfile(s.number_density)\n s.charge_density = toSpaceProfile(s.charge_density)\n s.particles_per_cell = toSpaceProfile(s.particles_per_cell)\n s.charge = toSpaceProfile(s.charge)\n s.mean_velocity = [ toSpaceProfile(p) for p in s.mean_velocity ]\n s.temperature = [ toSpaceProfile(p) for p in s.temperature ]\n for e in ExternalField:\n e.profile = toSpaceProfile(e.profile)\n for e in PrescribedField:\n e.profile = toSpaceProfile(e.profile)\n for a in Antenna:\n a.space_profile = toSpaceProfile(a.space_profile )\n a.time_profile = toTimeProfile (a.time_profile )\n for l in Laser:\n l.chirp_profile = toTimeProfile( l.chirp_profile )\n l.time_envelope = toTimeProfile( l.time_envelope )\n l.space_envelope = [ toSpaceProfile(p) for p in l.space_envelope ]\n l.phase = [ toSpaceProfile(p) for p in l.phase ]\n for s in ParticleInjector:\n s.number_density = toSpaceProfile(s.number_density)\n s.charge_density = toSpaceProfile(s.charge_density)\n s.time_envelope = toTimeProfile(s.time_envelope)\n s.particles_per_cell = toSpaceProfile(s.particles_per_cell )\n s.mean_velocity = [ toSpaceProfile(p) for p in s.mean_velocity ]\n s.temperature = [ toSpaceProfile(p) for p in s.temperature ]", "def checkInput(Matrix,List):\r\n \r\n if type(Matrix) != list or type(List) != list:\r\n \r\n raise RuntimeError('malformed')\r\n for k in Matrix:\r\n if type(k) != list:\r\n \r\n raise RuntimeError('malformed')\r\n if len(k) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n for j in k:\r\n if type(j) != int and type(j) != float:\r\n \r\n raise RuntimeError('malformed')\r\n if j > 30:\r\n \r\n raise RuntimeError('malformed')\r\n for p in List:\r\n if type(p) != str:\r\n \r\n raise RuntimeError('malformed')\r\n\r\n if len(Matrix) != len(List):\r\n \r\n raise RuntimeError('malformed')\r\n return", "def victory_checker() -> bool:\r\n conflict_check()\r\n for x in range(shape):\r\n for y in range(shape):\r\n if conflict_space[x, y] != 0:\r\n return False\r\n if separation_crawler(False):\r\n return False\r\n return True", "def check_instructions(instructions, complex_pdb, c_chain = \"L\", f_chain=\"L\"):\n fragments_and_atoms = get_pdb_fragments_and_atoms_from_instructions([instructions])\n for fragment, atom_core, atom_fr in fragments_and_atoms:\n atoms_if_bond = extract_hydrogens_from_instructions([fragment, atom_core, atom_fr])\n if atoms_if_bond:\n ch.check_if_atom_exists_in_ligand(complex_pdb, atoms_if_bond[0], c_chain)\n ch.check_if_atom_exists_in_ligand(complex_pdb, atoms_if_bond[1], c_chain)\n ch.check_if_atom_exists_in_ligand(fragment, atoms_if_bond[2], f_chain)\n ch.check_if_atom_exists_in_ligand(fragment, atoms_if_bond[3], f_chain)\n else:\n ch.check_if_atom_exists_in_ligand(fragment, atom_fr, f_chain)\n ch.check_if_atom_exists_in_ligand(complex_pdb, atom_core, c_chain)\n with open(fragment) as content:\n fr_content = content.readlines()\n ch.check_duplicated_pdbatomnames(fr_content)", "def is_identity(operator):\n if isinstance(\n operator,\n (QubitOperator, FermionOperator, BosonOperator, QuadOperator)):\n return list(operator.terms) == [()]\n raise TypeError('Operator of invalid type.')", "def test_multi_same(nothing_list):\n result = multi_same_list(nothing_list)\n assert result[1][2] == 0\n assert result[0][2] == 0", "def test_smiles_types(self, data, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = create_cis_1_2_dichloroethene()\n isomeric, explicit_hs, mapped = (\n data[\"isomeric\"],\n data[\"explicit_hydrogens\"],\n data[\"mapped\"],\n )\n if data[\"error\"] is not None:\n with pytest.raises(data[\"error\"]):\n mol.to_smiles(\n isomeric=isomeric,\n explicit_hydrogens=explicit_hs,\n mapped=mapped,\n toolkit_registry=toolkit,\n )\n\n else:\n # make the smiles then do some checks on it\n output_smiles = mol.to_smiles(\n isomeric=isomeric,\n explicit_hydrogens=explicit_hs,\n mapped=mapped,\n toolkit_registry=toolkit,\n )\n if isomeric:\n assert \"\\\\\" in output_smiles\n if explicit_hs:\n assert \"H\" in output_smiles\n if mapped:\n for i in range(1, 7):\n assert f\":{i}\" in output_smiles\n # if the molecule is mapped make it using the mapping\n mol2 = Molecule.from_mapped_smiles(\n mapped_smiles=output_smiles,\n toolkit_registry=toolkit,\n allow_undefined_stereo=not isomeric,\n )\n else:\n # make a molecule from a standard smiles\n mol2 = Molecule.from_smiles(\n smiles=output_smiles,\n allow_undefined_stereo=not isomeric,\n toolkit_registry=toolkit,\n )\n\n isomorphic, atom_map = Molecule.are_isomorphic(\n mol,\n mol2,\n return_atom_map=True,\n aromatic_matching=True,\n formal_charge_matching=True,\n bond_order_matching=True,\n atom_stereochemistry_matching=isomeric,\n bond_stereochemistry_matching=isomeric,\n )\n\n assert isomorphic is True\n if mapped:\n assert {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5} == atom_map\n\n else:\n pytest.skip(\n f\"The required toolkit ({toolkit_class.toolkit_name}) is not available.\"\n )", "def test_smiles_types(self, data, toolkit_class):\n\n if toolkit_class.is_available():\n toolkit = toolkit_class()\n mol = create_cis_1_2_dichloroethene()\n isomeric, explicit_hs, mapped = (\n data[\"isomeric\"],\n data[\"explicit_hydrogens\"],\n data[\"mapped\"],\n )\n if data[\"error\"] is not None:\n with pytest.raises(data[\"error\"]):\n mol.to_smiles(\n isomeric=isomeric,\n explicit_hydrogens=explicit_hs,\n mapped=mapped,\n toolkit_registry=toolkit,\n )\n\n else:\n\n # make the smiles then do some checks on it\n output_smiles = mol.to_smiles(\n isomeric=isomeric,\n explicit_hydrogens=explicit_hs,\n mapped=mapped,\n toolkit_registry=toolkit,\n )\n if isomeric:\n assert \"\\\\\" in output_smiles\n if explicit_hs:\n assert \"H\" in output_smiles\n if mapped:\n for i in range(1, 7):\n assert f\":{i}\" in output_smiles\n # if the molecule is mapped make it using the mapping\n mol2 = Molecule.from_mapped_smiles(\n mapped_smiles=output_smiles,\n toolkit_registry=toolkit,\n allow_undefined_stereo=not isomeric,\n )\n else:\n # make a molecule from a standard smiles\n mol2 = Molecule.from_smiles(\n smiles=output_smiles,\n allow_undefined_stereo=not isomeric,\n toolkit_registry=toolkit,\n )\n\n isomorphic, atom_map = Molecule.are_isomorphic(\n mol,\n mol2,\n return_atom_map=True,\n aromatic_matching=True,\n formal_charge_matching=True,\n bond_order_matching=True,\n atom_stereochemistry_matching=isomeric,\n bond_stereochemistry_matching=isomeric,\n )\n\n assert isomorphic is True\n if mapped:\n assert {0: 0, 1: 1, 2: 2, 3: 3, 4: 4, 5: 5} == atom_map\n\n else:\n pytest.skip(\n f\"The required toolkit ({toolkit_class.toolkit_name}) is not available.\"\n )", "def checkIntersections(path_list):\n som = 0\n joined_list = [hash(i) for i in list(itertools.chain.from_iterable(path_list))] # lelijk\n occurrences = np.bincount(joined_list)\n for i in occurrences:\n if i > 1:\n som += i\n return som", "def assertIsNifti(*args):\n for f in args:\n f = ensure.ensureIsImage(f)\n\n # Nifti2Image derives from Nifti1Image,\n # so we only need to test the latter.\n assert isinstance(f, nib.nifti1.Nifti1Image), \\\n 'file must be a nifti (.nii or .nii.gz): {}'.format(f)", "def test_smiles_parser_return_is_successful(mols):\n preprocessor = NFPPreprocessor()\n parser = SmilesParser(preprocessor)\n mol_smiles_with_invalid = [\n 'var', 'CN=C=O', 'hoge', 'Cc1ccccc1', 'CC1=CC2CC(CC1)O2']\n result = parser.parse(mol_smiles_with_invalid, return_smiles=True,\n return_is_successful=True)\n\n dataset = result['dataset']\n assert len(dataset) == 3\n is_successful = result['is_successful']\n assert len(is_successful) == 5\n assert numpy.alltrue(is_successful[[1, 3, 4]])\n assert numpy.alltrue(~is_successful[[0, 2]])\n\n # We assume NFPPreprocessor works as documented.\n for i in range(3):\n expect = preprocessor.get_input_features(mols[i])\n check_input_features(dataset[i], expect)", "def test_get_most_common_isotope_for_element(self):\n common_isotopes = list()\n common_isotopes.append(converter.get_most_common_isotope_for_element('H'))\n common_isotopes.append(converter.get_most_common_isotope_for_element('B'))\n common_isotopes.append(converter.get_most_common_isotope_for_element('C'))\n common_isotopes.append(converter.get_most_common_isotope_for_element('Zn'))\n common_isotopes.append(converter.get_most_common_isotope_for_element('U'))\n common_isotopes.append(converter.get_most_common_isotope_for_element('Og'))\n self.assertEqual(common_isotopes, [1, 11, 12, 64, 238, 294])", "def checkio(lines_list):\n row = [[0]*3,[0]*3,[0]*3,[0]*3]\n colume = [[0]*4,[0]*4,[0]*4]\n square = 0\n # save line in matrix\n for i in lines_list:\n if i[0]-i[1] in [-1,1]:\n row[int((i[0]-1)/4)][min(i[0],i[1])%4-1] = 1\n else:\n colume[int(((min(i[0],i[1])-1)/4))][min(i[0],i[1])%4-1] = 1\n\n for r in [0, 1, 2]:\n # r is the start point of row\n for c in [0, 1, 2]:\n # c is the start point of colume\n for line in range(1, 4-max(r,c)):\n # line is the length of square\n check = 0\n print(line)\n for i in range(0, line):\n check = row[r][c+i] + colume[r+i][c] + row[r+line][c+i] + colume[r+i][c+line] + check\n if check == line * 4:\n square += 1\n return square", "def SetFunction():\r\n s2 = []\r\n s3 = []\r\n s4 = []\r\n s2 = { i for i in range(21) if i%2 == 0}\r\n s3 = { i for i in range(21) if i%3 == 0}\r\n s4 = { i for i in range(21) if i%4 == 0}\r\n s2 = set(s2)\r\n s3 = set(s3)\r\n s4 = set(s4)\r\n print s3.issubset(s2)\r\n print s4.issubset(s2)", "def is_valid_integer_list(any_list):\n list_object = json.loads(any_list)\n return not any(not is_valid_integer(str(listing_id)) for listing_id in\n list_object)", "def check_fam(x):\n func_arr = [is_triangle, is_square, is_penta, is_hexa, is_hepta, is_octa]\n for i in xrange(len(func_arr)):\n if func_arr[i](x):\n return i\n return None", "def isCircular(alist):\n global primes\n for j in alist:\n if j not in primes:\n return False\n return True", "def is_perfect_square():", "def subgraph_is_isomorphic(graph, subgraph):\n graph_gspan = networkx_to_gspan(graph, 0)\n subgraph_gspan = networkx_to_gspan(subgraph, 1)\n\n # create temporary files during gspan processing\n input_fd, input_filename = tempfile.mkstemp()\n output_fd, output_filename = tempfile.mkstemp()\n\n with os.fdopen(input_fd, 'w', encoding='utf-8') as input_handler:\n input_handler.write(graph_gspan + subgraph_gspan)\n orig_stdout = sys.stdout\n sys.stdout = os.fdopen(output_fd, 'w', encoding='utf-8')\n subgraph_miner = gSpan(input_filename, 2, where=True)\n subgraph_miner.run()\n sys.stdout = orig_stdout\n mined_subgraphs = parse_mined_gspan_file(output_filename)\n\n # remove temporary files\n os.remove(input_filename)\n os.remove(output_filename)\n\n em = iso.numerical_edge_match('weight', 0)\n nm = iso.categorical_node_match('name', None)\n for mined_subgraph in mined_subgraphs:\n graph_matcher = iso.GraphMatcher(mined_subgraph, subgraph, node_match=nm, edge_match=em)\n if graph_matcher.is_isomorphic():\n return True\n return False", "def checker(self, product):\n for item in self.instock:\n if item == product:\n return True\n return False" ]
[ "0.68144727", "0.61583287", "0.57273215", "0.5682086", "0.561438", "0.5609958", "0.5602563", "0.55251133", "0.5451978", "0.5407561", "0.5355188", "0.5268322", "0.5252409", "0.5186853", "0.51790214", "0.50514466", "0.50385714", "0.49780542", "0.49707803", "0.4955957", "0.4917146", "0.4910591", "0.48722613", "0.48361441", "0.48041403", "0.48002657", "0.48002657", "0.48000416", "0.47979224", "0.47934723", "0.47877374", "0.47861126", "0.47820207", "0.47742128", "0.4739932", "0.4738458", "0.47373596", "0.47288582", "0.4725199", "0.4719736", "0.47135255", "0.47050568", "0.4684526", "0.46735683", "0.46723455", "0.46711046", "0.46697852", "0.46484512", "0.46428674", "0.46315578", "0.46239945", "0.46212557", "0.4618224", "0.461614", "0.46151453", "0.45879897", "0.45853046", "0.45688766", "0.4563486", "0.4553459", "0.45532578", "0.45451692", "0.4541362", "0.45401022", "0.45395046", "0.45393875", "0.4539088", "0.4531603", "0.4531138", "0.4524411", "0.45228657", "0.4516202", "0.4512285", "0.45101827", "0.45072603", "0.45072603", "0.45072109", "0.4505129", "0.44909662", "0.44867542", "0.4482384", "0.4477666", "0.4456558", "0.4451742", "0.44506735", "0.44485527", "0.44473037", "0.44446194", "0.44415894", "0.44398788", "0.44372204", "0.44301102", "0.44278935", "0.44271806", "0.44237897", "0.4416676", "0.44160613", "0.44124612", "0.44105035", "0.44094193" ]
0.5521835
8
r""" Wait for the user to type a character (and hit Enter). If the user enters one of the characters in letters, return that character. If the user hits Enter without entering a character, and default is specified, returns `default`, Otherwise, asks the user to enter a character again.
def _prompt(letters='yn', default=None): import sys while True: try: inputstr = sys.stdin.readline().strip() except KeyboardInterrupt: sys.exit(0) if inputstr and inputstr in letters: return inputstr if default is not None and inputstr == '': return default print 'Come again?'
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prompt(letters='yn', default=None):\n while True:\n try:\n input_text = sys.stdin.readline().strip()\n except KeyboardInterrupt:\n sys.exit(0)\n if input_text and input_text in letters:\n return input_text\n if default is not None and input_text == '':\n return default\n print('Come again?')", "def input_with_default(prompt, default):\n response = raw_input(\"%s (Default %s) \"%(prompt, default))\n if not response:\n return default\n return response", "def default_input(prompt, default_value):\r\n item = input(prompt + \"[Enter for \" + default_value + \"]: \").lower()\r\n if item == \"\":\r\n item = default_value\r\n return item", "def get_guess():\n print('Choose a letter:')\n return input()", "def default(prompt, default, validator=(lambda x: True), hint=None):\n user_input = input(\"{0} [{1}]\".format(prompt, default))\n while not validator(user_input):\n user_input = input(\"{0} [{1}]\".format(prompt, default))\n return user_input or default", "def prompt_string(prompt=\"Enter a value\",\n default=None):\n _new = None\n while True:\n try:\n _new = str(input(f\"{prompt}? [{str(default)}]: \")) # nosec\n break\n except ValueError:\n print(\"Sorry, I didn't understand that.\")\n continue\n except KeyboardInterrupt:\n break\n return default if _new in [None, ''] else _new", "def user_prompt(prompt, default=None):\n prompt = f\"\\n {prompt} [{default}] runs or type an amount: \"\n response = input(prompt)\n if not response and default:\n return default\n else:\n return response", "def get_input(prompt, default=None, choices=None, option_value=None):\r\n if option_value is not None:\r\n return option_value\r\n \r\n choices = choices or []\r\n while 1:\r\n r = raw_input(prompt+' ').strip()\r\n if not r and default is not None:\r\n return default\r\n if choices:\r\n if r not in choices:\r\n r = None\r\n else:\r\n break\r\n else:\r\n break\r\n return r", "def prompt(name, default):\n value = raw_input('%s [%s]: ' %(name, default))\n if not value:\n value = default\n return value", "def prompt(msg, default=NO_DEFAULT, validate=None):\n while True:\n response = input(msg + \" \").strip()\n if not response:\n if default is NO_DEFAULT:\n continue\n return default\n if validate is None or validate(response):\n return response", "def ask(question, options, default):\n assert default in options\n\n question += \" ({})? \".format(\"/\".join(o.upper() if o == default else o for o in options))\n selected = None\n while selected not in options:\n selected = input(question).strip().lower()\n if selected == \"\":\n selected = default\n else:\n if selected not in options:\n question = \"Please type '{}'{comma} or '{}': \".format(\n \"', '\".join(options[:-1]), options[-1],\n comma=',' if len(options) > 2 else '',\n )\n return selected", "def get_input():\n letters = input('Enter letters, Enter to quit:\\n')\n return letters", "def query_input(question, default=None, color=default_color):\n if default is None or default == '':\n prompt = ' '\n elif type(default) == str:\n prompt = flo(' [{default}] ')\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(color(question + prompt))\n choice = raw_input()\n if default is not None and choice == '':\n return default\n if choice != '':\n return choice", "def ask_letter(self):\n letter = ' '\n while letter not in string.ascii_lowercase:\n letter = input('Write a letter:\\n')\n letter.lower()\n\n return letter", "def text_input(self, prompt, default=None):\n try:\n user_input = self(prompt)\n if default is not None and user_input == \"\":\n return default\n except InputDisabled:\n if default is not None:\n return default\n raise\n\n return user_input", "def inputChoice(self, question, options, hotkeys, default=None):\n options = options[:] # we don't want to edit the passed parameter\n for i in range(len(options)):\n option = options[i]\n hotkey = hotkeys[i]\n # try to mark a part of the option name as the hotkey\n m = re.search('[%s%s]' % (hotkey.lower(), hotkey.upper()), option)\n if hotkey == default:\n caseHotkey = hotkey.upper()\n else:\n caseHotkey = hotkey\n if m:\n pos = m.start()\n options[i] = '%s[%s]%s' % (option[:pos], caseHotkey,\n option[pos+1:])\n else:\n options[i] = '%s [%s]' % (option, caseHotkey)\n # loop until the user entered a valid choice\n while True:\n prompt = '%s (%s)' % (question, ', '.join(options))\n answer = self.input(prompt)\n if answer.lower() in hotkeys or answer.upper() in hotkeys:\n return answer\n elif default and answer=='': # empty string entered\n return default", "def simple_response(prompt, default=None):\n if default is None:\n response = input(prompt + ': ')\n else:\n response = input(prompt + f' [{default}]' + ': ')\n if response != '':\n return response\n elif response == '' and default is not None:\n return default\n else:\n print('Please enter a valid response')\n return simple_response(prompt, default)", "def process_default(self, character):\n pass", "def get_input():\n return getch()", "def get_guess():\n letter = input(\"Please input a letter to check\").lower()\n if len(letter) != 1:\n print(\"Please input a single letter\")\n get_guess()\n elif letter not in \"abcdefghijklmnopqrstuvxyz\":\n print (\"Only input letters\")\n get_guess()\n else:\n return letter", "def prompt_with_options(prompt, default=None, options=None):\n\n msg = \"%s [%s]: \" % (prompt, default) if default is not None else \"%s: \" % prompt\n value = None\n while value is None:\n value = raw_input(msg).strip()\n if value:\n if options and value not in options:\n value = None\n elif default is not None:\n value = default\n\n return value", "def guess_input(self):\n try:\n self.player_guess = input('Guess a letter: ').lower()\n Character(self.player_guess, self.selected_phrase)\n except ValueError:\n print(\"That was not a valid input. Please pick a number between 1 and 10\")\n if self.player_guess == \"\":\n print (\"Please enter a letter,try again.\")\n if not self.player_guess.isalpha():\n print (\"Please only enter a letter(a-z),try again.\")\n if len(self.player_guess) > 1:\n print(\"Please enter only one letter at a time.\")", "def get_value(prompt, default=None, hidden=False):\n _prompt = '%s : ' % prompt\n if default:\n _prompt = '%s [%s]: ' % (prompt, default)\n\n if hidden:\n ans = getpass(_prompt)\n else:\n ans = raw_input(_prompt)\n\n # If user hit Enter and there is a default value\n if not ans and default:\n ans = default\n return ans", "def _ask_prompt(question: str,\n console: io.IO,\n validate: Optional[Callable[[str], None]] = None,\n default: Optional[str] = None) -> str:\n validate = validate or (lambda x: None)\n while True:\n answer = console.ask(question)\n if default and not answer:\n answer = default\n try:\n validate(answer)\n break\n except ValueError as e:\n console.error(e)\n\n return answer", "def prompt_selection(self,\r\n prompt_text: str,\r\n validate: Union[Callable[[str], Optional[Any]], partial],\r\n default: Any) -> Any:\r\n while True:\r\n try:\r\n if self.__use_standard_console:\r\n user_input = prompt(prompt_text)\r\n else:\r\n user_input = self.__alt_prompt(prompt_text)\r\n except KeyboardInterrupt:\r\n return default\r\n if user_input == '':\r\n return default\r\n user_input = validate(user_input)\r\n if user_input is not None:\r\n break\r\n return user_input", "def input_timeout(prompt: str, t_timeout: [float, int] = 30, default: str = None) -> str:\n print(prompt, end=\" \")\n rlist, _, _ = select.select([sys.stdin], [], [], t_timeout)\n\n if not rlist:\n if default is None:\n raise RuntimeError(f\"No input received within {t_timeout}s!\")\n else:\n return default\n\n return sys.stdin.readline().strip()", "def ask_yes_no(question, default=\"y\"):\n valid = {\"y\": True, \"n\": False}\n\n if default is None:\n prompt = \" [y/n] \"\n elif default == \"y\":\n prompt = \" [Y/n] \"\n elif default == \"n\":\n prompt = \" [y/N] \"\n else:\n raise ValueError(\"invalid default answer: '%s'\" % default)\n\n while True:\n sys.stdout.write(question + prompt)\n choice = input().lower()\n\n if default is not None and choice == '':\n return valid[default]\n\n choice_letter = choice[0]\n\n if choice_letter in valid:\n return valid[choice_letter]\n else:\n sys.stdout.write(\"Please respond with 'yes' or 'no' \"\n \"(or 'y' or 'n').\\n\")", "def _get_user_input(query, valid, default):\n\n # Wait for valid user input and return choice upon receipt\n while True:\n choice = input(query)\n if default is not None and choice == \"\":\n return default\n elif choice in valid:\n return choice\n else:\n print(\"Please respond with '\" + \\\n \"or '\".join(opt + \"' \" for opt in valid) + \"\\n\")", "def waitenterpressed(message = \"Press ENTER to continue...\"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\traw_input(message)\n\treturn 0", "def choice():\n choice = input(\"press e to encode press d to decode or press q to quit:\")\n if choice == \"e\":\n return \"e\"\n\n elif choice == \"d\":\n return \"d\"\n else:\n print(\"Okay bye\")", "def get_user_choice():\n user_input = input('Your choice: ')\n return user_input", "def press_enter():\n raw_input(\"\\n\\nPress Enter\")", "def question(text, default):\n\n while 1:\n if isinstance(default, bool):\n if default:\n default_str = \"yes\"\n else:\n default_str = \"no\"\n else:\n default_str = str(default)\n report(text + \" [\" + default_str + \"] :\")\n input_audio_icon()\n\n if not dont_ask:\n str_inp = input(\">\")\n\n # On plain enter, return default\n if dont_ask or (len(str_inp) == 0):\n return default\n # If a value was typed, check it and convert it\n elif isinstance(default, bool):\n if str_inp in [\"yes\", \"y\", \"Y\", \"true\", \"t\", \"1\"]:\n return True\n elif str_inp in [\"no\", \"n\", \"N\", \"false\", \"f\", \"0\"]:\n return False\n else:\n report(\"Unknown answer (type 'yes' or 'no')\")\n continue\n elif isinstance(default, int):\n return int(str_inp)\n elif isinstance(default, str):\n return str_inp\n else:\n raise TypeError(\"Invalid type for the default value\")", "def wait_key():\n result = None\n if os.name == 'nt':\n result = input(\"Press Enter to continue...\")\n else:\n import termios\n fd = sys.stdin.fileno()\n\n oldterm = termios.tcgetattr(fd)\n newattr = termios.tcgetattr(fd)\n newattr[3] = newattr[3] & ~termios.ICANON & ~termios.ECHO\n termios.tcsetattr(fd, termios.TCSANOW, newattr)\n\n try:\n result = sys.stdin.read(1)\n except IOError:\n pass\n finally:\n termios.tcsetattr(fd, termios.TCSAFLUSH, oldterm)\n\n return result", "def _ask_user_yn(question, default):\n\n input_valid = False\n default = default.lower()\n answer = \"\"\n while not input_valid:\n answer = input(question)\n if answer == \"\":\n answer = default\n if re.findall(r\"[YyNn]\", answer):\n input_valid = True\n answer = answer[0].lower()\n else:\n print(\"Please answer Y, N or Return.\")\n\n return answer", "def get_answer(message, answers='Yn', default='Y', quit=''):\r\n if quit and quit not in answers:\r\n answers = answers + quit\r\n \r\n message = message + '(' + '/'.join(answers) + ')[' + default + ']:'\r\n ans = raw_input(message).strip().upper()\r\n if default and not ans:\r\n ans = default.upper()\r\n while ans not in answers.upper():\r\n ans = raw_input(message).strip().upper()\r\n if quit and ans == quit.upper():\r\n print \"Command be cancelled!\"\r\n sys.exit(0)\r\n return ans", "def askOption():\n while True:\n print(\"Do you want to (E)ncode or (D)ecode?\") \n choice = input(\">> \")\n \n if choice.lower() in ['d','e']:\n return choice", "def user_input():\n key = input('Move position or press y to do turn: ')\n if key == 'w' or 's' or 'd' or 'a' or 'y':\n return key\n elif key == 'h':\n print('There is no help yet')\n else:\n print('Need help? Press \"h\"')\n return user_input()", "def user_input(msg, valid, default=None, timeout=None):\n\n # Add trailing whitespace to `msg` if not already present and append\n # default reply (if provided)\n suffix = \"\" + \" \" * (not msg.endswith(\" \"))\n if default is not None:\n default = default.replace(\"[\", \"\").replace(\"]\",\"\")\n assert default in valid\n suffix = \"[Default: '{}'] \".format(default)\n query = msg + suffix\n\n if timeout is None:\n return _get_user_input(query, valid, default)\n else:\n procQueue = multiprocessing.Queue()\n proc = multiprocessing.Process(target=_queuing_input,\n args=(procQueue,\n sys.stdin.fileno(),\n query,\n valid,\n default)\n )\n proc.start()\n countdown = tqdm(desc=\"Time remaining\", leave=True, bar_format=\"{desc}: {n} \",\n initial=timeout, position=1)\n ticker = 0\n while procQueue.empty() and ticker < timeout:\n time.sleep(1)\n ticker += 1\n countdown.n = timeout - ticker\n countdown.refresh() # force refresh to display elapsed time every second\n countdown.close()\n proc.terminate()\n\n if not procQueue.empty():\n choice = procQueue.get()\n else:\n choice = default\n return choice", "def ask_question(\n question: Text,\n input_type: Callable[[Text], Any],\n default: Optional[Any] = None,\n hide_input: Optional[bool] = False,\n) -> Any:\n if hide_input:\n if default:\n hint = \"**\"\n else:\n hint = \"\"\n\n return getpass.getpass(f\"{question} [{hint}] \")\n\n if default:\n ask = f\"{question} [{default}]\"\n else:\n ask = question\n\n answer = input(f\"{ask}: \")\n return input_type(answer)", "def user_input(self, msg, default=''):\n msg = '%s %s ' % (self.prefix, msg)\n\n if default != '':\n msg += '[%s] ' % default\n\n try:\n vim.command('echohl Debug')\n input_str = vim.eval('input(\"%s> \")' % msg)\n vim.command('echohl none')\n except KeyboardInterrupt:\n input_str = ''\n\n return input_str or default", "def enter():\n input(\"\\nClick Enter to continue \")", "def get_input(prompt):\n return input(prompt)", "def get_input(prompt):\n return input(prompt)", "def get_yes_no_input(logger, text, default=None):\n if default:\n default = default.strip().lower()\n\n y = \"Y\" if default == \"y\" else \"y\"\n n = \"N\" if default == \"n\" else \"n\"\n\n prompt = f\"{text} [{yellow(y)}/{yellow(n)}]\"\n user_input = \"\"\n\n while not user_input:\n logger(prompt, end=\"\")\n user_input = input(\" \").strip().lower()\n if user_input == \"\" and default:\n user_input = default\n\n return user_input", "def ask_user_input(prompt: str) -> str:\n return input(prompt)", "def _readchar(self):\n if len(self.user) == 0:\n return sys.stdin.read(1)\n\n else:\n iRet = self.user[0]\n self.user = self.user[1:]\n return iRet", "def get_guess(already_guessed):\n\n while True:\n print('Guess a letter.')\n guess = (input()).lower()\n if len(guess) != 1:\n print('Please enter a single letter.')\n elif guess == ' ':\n print('Space is not a valid entry. Please enter a single letter.')\n elif guess in already_guessed:\n print('\"Already guessed the letter. Choose again.')\n elif guess not in 'abcdefghijklmnopqrstuvwxyz':\n print('Please enter a LETTER.')\n else:\n return guess", "def player_choice(text):\n try:\n action_choice = input(text)\n return action_choice.lower()\n except NameError:\n print(\"Invalid input. Please try again.\")", "def enterMessage ():\r\n a = input(\"Enter the message:\\n\")\r\n return a", "def ask_yes_no(question, default=None, ctrl_c=\"n\", ctrl_d=None):\n\n ctrl_d = default if ctrl_d is None else ctrl_d\n\n option_prompt = OPTION_PROMPTS[default]\n prompt = question + \"? [{}] \".format(option_prompt)\n\n ans = None\n while ans not in ANSWERS:\n try:\n ans = read_input(prompt).lower()\n if not ans: # response was an empty string\n ans = default\n except KeyboardInterrupt:\n print()\n ans = ctrl_c\n except EOFError:\n print()\n ans = ctrl_d\n\n return ANSWERS[ans]", "def wait_for_user_input():\n\n input(\"Pulse ENTER para continuar...\")", "def menu():\r\n cont = False\r\n while cont == False:\r\n choice = input(\"Enter a letter to choose an option:\\n\" +\r\n \"e - Enter preferences\\nr - Get recommendations\\n\" +\r\n \"p - Show most popular artists\\nh - How popular is the most popular\\n\" +\r\n \"m - Which user has the most likes\\nq - Save and quit\\n\")\r\n if isinstance(choice, str):\r\n cont = True\r\n else:\r\n print(\"please enter one of the choices above\")\r\n return choice", "def getstring(message = \"Enter a value: \"):\n\ttry:\n\t\tinput = raw_input\n\texcept: \n\t\tpass\n\treturn raw_input(message)", "def getParam(prompt, default, helpMessage=\"No help implemented yet!\"):\n while True:\n param = raw_input(prompt)\n param = param or default\n if param == \"h\" or param == \"help\":\n print \"\\n\"+helpMessage+\"\\n\"\n else:\n break\n return param", "def test_prompt_msg_shows_default(self):\n with mock.patch('__builtin__.raw_input', return_value=\"Andrew\") as mockinput:\n result = self.prompt._prompt(self.response, {\n \"say\": \"First Name\",\n \"ask\": \"first_name\",\n \"default\": \"foobar\"\n })\n\n args, kwargs = mockinput.call_args\n\n self.assertEquals(\"First Name [foobar]? \", args[0])\n self.assertEquals(result['ansible_facts']['first_name'], 'Andrew')", "def query_until(prompt, condition, default=None):\n assert type(prompt) is str and callable(condition)\n response = raw_input(prompt + ' >> ')\n if default is not None and not response:\n return default\n while not condition(response):\n if default is not None and not response:\n return default\n print \"'%s' is not a valid response.\" % response\n response = raw_input(prompt + ' >> ')\n return response", "def test_multiple_char_input(self):\n self.choice.return_value = \"ant\"\n self.input.side_effect = [\"a\", \"nt\", \"n\", \"t\", ] + [\"n\"]\n\n gallows.main()\n\n self.xprint.assert_any_call('Please enter a single letter.')", "def askforinput(msg='Do you want to proceed?', tab='', newline='\\n'):\n while True:\n inp = input(ColorText(f\"{newline}{tab}INPUT NEEDED: {msg} \\n{tab}(yes | no): \").warn().__str__()).lower()\n if inp in ['yes', 'no']:\n if inp == 'no' and msg=='Do you want to proceed?':\n print(ColorText('exiting %s' % sys.argv[0]).fail())\n exit()\n break\n else:\n print(ColorText(\"Please respond with 'yes' or 'no'\").fail())\n return inp", "def validate(prompt, char_type, case):\n if char_type == 'A' and case == \"U\":\n while True:\n user_input = input(prompt).upper()\n try:\n if len(user_input) > 245:\n print(f'\\n.............\\n'\n f'Invalid input you entered {len(user_input)} characters\\n'\n f'Character limit is 245.\\n')\n elif user_input.replace(\" \", \"\").isalpha():\n return user_input\n print(\"\\n.............\\n\"\n \"Invalid input, non letter character.\\n\")\n except (ValueError, TypeError):\n print(\"\\n.............\\n\"\n \"Invalid input, non letter character.\\n\")\n elif char_type == 'I':\n while True:\n user_input = input(prompt)\n try:\n if 26 > int(user_input) > 0:\n return int(user_input)\n print(\"\\n.............\\n\"\n \"Invalid input, outside range of 1-25.\\n\")\n except (ValueError, TypeError):\n print(\"\\n.............\\n\"\n \"Invalid input, not a number.\\n\")", "def prompt(\n\t\ttext: str,\n\t\tdefault: Optional[str] = None,\n\t\thide_input: bool = False,\n\t\tconfirmation_prompt: bool = False,\n\t\ttype: Optional[_ConvertibleType] = None, # noqa: A002 # pylint: disable=redefined-builtin\n\t\tvalue_proc: Optional[Callable[[Optional[str]], Any]] = None,\n\t\tprompt_suffix: str = \": \",\n\t\tshow_default: bool = True,\n\t\terr: bool = False,\n\t\tshow_choices: bool = True,\n\t\t):\n\n\tresult = None # noqa\n\n\tdef prompt_func(text):\n\t\ttry:\n\t\t\treturn _prompt(text, err=err, hide_input=hide_input)\n\t\texcept (KeyboardInterrupt, EOFError):\n\t\t\tif hide_input:\n\t\t\t\tclick.echo(None, err=err)\n\t\t\traise click.Abort()\n\n\tif value_proc is None:\n\t\tvalue_proc = convert_type(type, default)\n\n\tprompt = _build_prompt(text, prompt_suffix, show_default, default, show_choices, type) # type: ignore\n\n\twhile True:\n\t\twhile True:\n\t\t\tvalue = prompt_func(prompt)\n\n\t\t\tif value:\n\t\t\t\tbreak\n\t\t\telif default is not None:\n\t\t\t\tif isinstance(value_proc, Path):\n\t\t\t\t\t# validate Path default value (exists, dir_okay etc.)\n\t\t\t\t\tvalue = default\n\t\t\t\t\tbreak\n\t\t\t\treturn default\n\n\t\ttry:\n\t\t\tresult = value_proc(value)\n\t\texcept click.UsageError as e:\n\t\t\tclick.echo(f\"Error: {e.message}\", err=err) # noqa: B306\n\t\t\tcontinue\n\n\t\tif not confirmation_prompt:\n\t\t\treturn result\n\n\t\twhile True:\n\t\t\tvalue2 = prompt_func(\"Repeat for confirmation: \")\n\t\t\tif value2:\n\t\t\t\tbreak\n\n\t\tif value == value2:\n\t\t\treturn result\n\n\t\tclick.echo(\"Error: the two entered values do not match\", err=err)", "def player_input():\n x_o = ['X', 'O']\n player = \"\"\n while True:\n player = input('Choose your player X or O: ')\n if player.upper() in x_o:\n break\n else:\n print('It is neither X nor O! Choose again:')\n player = player.upper()\n print(f\"Your player is {player}\")\n return player", "def selection_input(\n self,\n prompt,\n choices,\n default=None,\n error_message=\"Invalid Selection\",\n transform=None\n ):\n while True:\n result = self.text_input(prompt, default)\n\n if transform is not None and result is not None:\n result = transform(result)\n\n if result in choices:\n return result\n\n print()\n print(error_message)", "def onKey(self,event):\n \n ch = event.char.lower()\n \n if ch in ('\\n','\\r'):\n ch = self.defaultButton[0].lower()\n \n if ch == self.yesMessage[0].lower():\n self.yesButton()\n elif ch == self.noMessage[0].lower():\n self.noButton()\n elif ch == 'c':\n self.cancelButton()\n \n return \"break\"", "def prompt_yes_no(question, default):\n again = 'Unknown response.'\n if default.lower() in ('y', 'yes'):\n options = '(Y/n): '\n elif default.lower() in ('n', 'no'):\n options = '(y/N): '\n else:\n raise ValueError('default must be \"y\", \"yes\", \"n\", or \"no\"')\n\n response = input(' '.join((question, options))).lower()\n while response not in ('y', 'yes', 'n', 'no', ''):\n response = input(' '.join((again, question, options))).lower()\n if response == '':\n return default\n return response", "def checkLetter():\n\tguess = False\n\twhile guess != True:\n\t\tguess = str(raw_input(\"Guess a letter: \"))\n\t\tif guess.isalpha() and len(guess) == 1 :\n\t\t\treturn guess\n\t\telif not guess.isalpha() or len(guess) > 1:\n\t\t\tprint \"The input may be one letter only!\"\n\t\telse:\n\t\t\tprint \"Error in checkLetter\"", "def confirm(s=None, default=False):\n\n if s:\n s = '{} (y/n): '.format(s)\n else:\n s = 'Continue? (y/n): '\n answer = input(s).strip().lower()\n return answer.startswith('y') if answer else default", "def wait_for_enter(field_data=\"\"):\n try:\n input(f\"{field_data}\\n\" f\"Press the 'ENTER' key to continue\")\n except KeyboardInterrupt:\n pass", "def cont():\n\n try:\n input = raw_input()\n except Exception:\n pass", "def get_user_input(self):\r\n try:\r\n user_input = input('Guess a letter: ')\r\n print('\\n')\r\n if user_input.lower() in self.already_guessed:\r\n raise ValueError(YELLOW + 'You already guessed '\r\n f'{user_input.lower()}.\\n' + END)\r\n if len(user_input) == 0:\r\n raise ValueError(YELLOW + 'You didn\\'t enter a letter. '\r\n 'Please enter a letter between A-Z\\n' + END)\r\n if not user_input.isalpha():\r\n raise ValueError(YELLOW + 'You entered a number. '\r\n 'Please enter a letter between A-Z.\\n' + END)\r\n if len(user_input) > 1:\r\n raise ValueError(YELLOW + 'Please enter one letter.\\n' + END)\r\n except ValueError as error:\r\n print(error)\r\n self.get_user_input()\r\n else:\r\n if len(self.already_guessed) > 0: # prints previous guesses\r\n self.print_previous_guesses()\r\n if user_input.lower() in [letter.original.lower() for letter in\r\n self.active_phrase if letter != ' ']:\r\n for letter in self.active_phrase:\r\n if letter != ' ':\r\n letter.compare_guess(user_input) # checks guess\r\n self.active_phrase.print_phrase()\r\n else:\r\n self.lives -= 1\r\n print(f'You have {self.lives} out of 5 lives remaining!\\n')\r\n if user_input.lower() not in self.already_guessed:\r\n self.already_guessed.append(user_input.lower())\r\n self.active_phrase.print_phrase()", "def get_input_string():\n return input(\"Enter input string: \")", "def user_input_name():\n # Variable to use outcome globally in other functions\n global name\n # User name input\n name = input('\\n\\033[1;32;10mPlease enter your name:\\n')\n # If no characters in input -> message to user and repeat this function\n if is_char(name) is True:\n print(\"\\033[1;31;10mNo characters please type your name\")\n user_input_name()\n # Else if character is digit -> message to user and repeat this function\n elif is_digit(name) is True:\n print(\"\\033[1;31;10mPlease do not include digits in your name.\")\n user_input_name()\n # Is special symbol -> message to user and repeat this function\n elif is_special_char(name) is True:\n print(\"\\033[1;31;10mPlease do not include special symbols.\")\n user_input_name()\n # Else return name\n return name", "def input_wrapper(msg):\n userinput = input(msg)\n if userinput != 'q':\n return userinput\n else:\n sys.exit()", "def get_input(user_input):\n return input(user_input)", "def getInput(prompt):\n if platform.python_version().startswith('3'):\n userInput = input('%s ' % prompt).strip()\n if platform.python_version().startswith('2'):\n userInput = raw_input('%s ' % prompt).strip()\n return userInput", "def prompt(text, choices):\n text += \" [\" + \"/\".join(choices) + \"] \"\n while True:\n inp = input(text)\n if inp in choices:\n return inp", "def input_(text=''):\n while True:\n try:\n thing = input(text)\n if thing == '':\n raise ValueError\n else:\n return thing\n except (EOFError, KeyboardInterrupt, ValueError):\n print()", "def get_choice():\n choice = input(\"Would you like to login/register: \")\n return choice", "def yes_no(question, default= None):\n valid_option = {\"yes\": True,\n \"y\": True,\n \"no\": False,\n \"n\": False\n }\n if default is None:\n option = \" [y/n] \"\n elif default == \"yes\":\n option == \" [Y/n] \"\n elif default == \"no\":\n option == \" [y/N] \"\n else:\n raise ValueError(\"'%s' is a non valid option. Please select \" % default)\n \n while True:\n print(question + option)\n user_choice = input().lower()\n if default is not None and user_choice == '':\n return valid_option[default]\n elif user_choice in valid_option:\n return valid_option[user_choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n'). \\t\")", "def ask_for_confirmation(prompt=\"Are you sure? \", default=True):\n yes, no = (\"Y\", \"n\") if default else (\"y\", \"N\")\n prompt += f\"[{yes}/{no}] \"\n\n while True:\n ans = input(prompt).lower().strip()\n if not ans:\n return default\n elif not (\"yes\".startswith(ans) or \"no\".startswith(ans)):\n print(\"Please enter yes or no.\")\n continue\n else:\n return \"yes\".startswith(ans)", "def get_name():\n clear()\n name = input(\"Employee Name: \")\n\n if len(name) == 0:\n input(\"Name must have at least one character.\")\n return get_name()\n else:\n return name", "def get_input_char():\n return ord(sys.stdin.read(1))", "def ask_question(msg, answers=\"[yes/No]\", default=\"no\"):\n if answers[0] != '[' or answers[-1] != ']':\n msg = \"%s wrongly specified, should be in [] separated by /\" % answers\n raise ValueError(msg)\n\n answer_list = answers[1:-1].split('/')\n \n if len(answer_list) < 2:\n raise ValueError(\"Too few possible answers: %s\" % answers)\n \n answer_list = [item.lower() for item in answer_list[:]]\n default = default.lower()\n \n if default not in answer_list:\n raise ValueError(\"Default answer %s not among answers: %s\" % (default,\n answers))\n \n print_out = \"%s %s: \" % (msg, answers)\n print print_out,\n \n inpt = None\n while inpt == None:\n try:\n inpt = raw_input()\n except KeyboardInterrupt:\n print_msg_exit(\" KeyboardInterrupt, exit.\", exit_code=1)\n except Exception, ex:\n print ex\n inpt = None\n print(\" Couldn't recognize the answer, try again.\")\n print print_out,\n else:\n inpt = inpt.lower()\n # finally, check what the user answered \n for i in range(len(answer_list)):\n if inpt == answer_list[i][0] or inpt == answer_list[i]:\n return answer_list[i]\n else:\n if inpt == '':\n return default\n else:\n inpt = None\n print \" Couldn't recognize the answer, try again.\"\n print print_out,", "def getch():\n\n import sys, tty, termios\n from select import select\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n try:\n tty.setraw(sys.stdin.fileno())\n [i, o, e] = select([sys.stdin.fileno()], [], [], 0.35)\n if i:\n ch = sys.stdin.read(1)\n else:\n ch = ''\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return ch", "def get_choice(attempt):\n try:\n user_text=''\n\n if attempt ==1:\n user_text ='Guess a number between 0 and 99:'\n \n choice = int(input(user_text))\n except ValueError:\n return get_choice()\n return choice", "def user_yesno(msg, default=None):\n\n # Parse optional `default` answer\n valid = {\"yes\": True, \"y\": True, \"ye\":True, \"no\":False, \"n\":False}\n if default is None:\n suffix = \" [y/n] \"\n elif default == \"yes\":\n suffix = \" [Y/n] \"\n elif default == \"no\":\n suffix = \" [y/N] \"\n\n # Wait for valid user input, if received return `True`/`False`\n while True:\n choice = input(msg + suffix).lower()\n if default is not None and choice == \"\":\n return valid[default]\n elif choice in valid.keys():\n return valid[choice]\n else:\n print(\"Please respond with 'yes' or 'no' (or 'y' or 'n').\\n\")", "def get_input(prompt):\n # type: (str) -> str\n return raw_input(prompt)", "def get_guess(self):\n new_guess = \"\"\n try:\n new_guess = input(\"Enter a letter: \").lower()\n if len(new_guess) > 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too long. Make sure that it is only one character\")\n elif len(new_guess) < 1:\n new_guess = \"INVALID\"\n raise ValueError(\"The guess you entered was too short. Make sure that it is only one character\")\n elif ord(new_guess) < 97 or ord(new_guess) > 122:\n new_guess = \"INVALID\"\n raise ValueError(\"Your input was deemed invalid! Please make sure input is a character a-z\")\n elif new_guess in self.guesses:\n print(f\"You already guessed the letter {new_guess}, try again\")\n new_guess = \"INVALID\"\n except ValueError as err:\n print(err)\n return new_guess", "def name_choice():\n username=\"\"\n while username ==\"\": #if value of user is empty the loop continue\n username=input(\"please enter your name or pseudo\")\n return username\n print(\"welcome on roulette game \",username,\" !\")", "def new_or_old_character():\r\n sentinel_value = True\r\n while sentinel_value is True:\r\n user_input = input(\"Are you a new player? Type 'y' for yes and to create a new character,\"\r\n \" or 'n' for no to load an old character from file\\n\")\r\n if user_input == 'y':\r\n # If user is a new player, create a new character\r\n new_character = character.create_character()\r\n return new_character\r\n elif user_input == 'n':\r\n # If user is not a new player, load a previously saved character\r\n new_character = get_stored_username()\r\n print(\"Welcome back,\", new_character['Name'])\r\n return new_character\r\n else:\r\n print(\"Please enter y or n\")", "def get_string_input():\n string_input = input('Please enter string: ')\n return string_input", "def get(self, key, default=None):\n try:\n val = self._store.get(key)\n except KeyError:\n val = default\n if val is None and not default:\n return self._auto_prompt(key)\n return val", "def prompt(self):\n # TODO: fix null input\n print('Enter user input: ')\n userinput = input()\n print(f'User chose: {userinput}')\n return userinput", "def menu():\n print(\"Choose an option\")\n print(\"(L)ist Friends\")\n print(\"(A)dd Friend\")\n print(\"(C)lear List\")\n print(\"(Q)uit\")\n while True:\n choice = input(\"Now choose: \").lower().strip()\n if choice in 'lacq':\n return choice\n print(\"Invalid choice.\")", "def get_input(prompt):\n try:\n try:\n return raw_input(prompt)\n except NameError:\n return input(prompt)\n except EOFError:\n return ''", "def AskText(question, default='', title=''):\n\n # build the dialog\n dlg = wx.TextEntryDialog(None, str(question), title, value=default)\n\n if (dlg.ShowModal() != wx.ID_OK): # run it and capture answer\n dlg.Destroy()\n return None\n\n result = str(dlg.GetValue()) # convert answer to string\n return result", "def askForKey(): \n while True:\n # Ask for key until the user will provide a valid one.\n try:\n userKey = input(f\"Please enter a key: \")\n userKey = int(userKey)\n except ValueError:\n continue\n else:\n return userKey", "def input_change(self, c):\n if c == 10: # Enter\n if len(self.input) > 0:\n if self.input.startswith('/'): # It's a command\n self.lanchat.do_command(self.input)\n else: # It's a message\n self.lanchat.send_message(self.input)\n self.input = ''\n elif c == 127 or c == 263: # Backspace\n if len(self.input) > 0:\n self.input = self.input[:-1]\n else:\n if c not in range(0, 127): # Non-printable characters\n return\n if len(self.input) >= self.max_input:\n return\n self.input += chr(c)", "def ask_user_input(self, sentence):\n user_input = raw_input(sentence + \" : \")\n return user_input", "def _readchar() -> str: # pragma: no cover\n if not sys.stdin.isatty():\n return sys.stdin.read(1)\n fd = sys.stdin.fileno()\n old_settings = termios.tcgetattr(fd)\n new_settings = termios.tcgetattr(fd)\n new_settings[3] = new_settings[3] & ~termios.ECHO & ~termios.ICANON\n termios.tcsetattr(fd, termios.TCSANOW, new_settings)\n try:\n char = sys.stdin.read(1)\n finally:\n termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)\n return char" ]
[ "0.7521456", "0.7162696", "0.7019684", "0.6967578", "0.6841188", "0.67817444", "0.6743779", "0.66840625", "0.66546315", "0.65572464", "0.6538709", "0.64224786", "0.636659", "0.63616836", "0.63427144", "0.63147855", "0.6242766", "0.61260843", "0.60846496", "0.6079177", "0.6052511", "0.6009335", "0.5971891", "0.5946004", "0.5933735", "0.58956397", "0.5891569", "0.58412546", "0.5811614", "0.58091176", "0.58082724", "0.5784806", "0.57690215", "0.57530564", "0.5730544", "0.57271546", "0.5724826", "0.5723263", "0.5685357", "0.56822145", "0.5676046", "0.56687653", "0.5629904", "0.5629904", "0.5618197", "0.56172985", "0.559708", "0.557554", "0.55737394", "0.55695355", "0.5554706", "0.5554588", "0.55447286", "0.55295223", "0.55283487", "0.551812", "0.5513549", "0.5510338", "0.5503214", "0.55016184", "0.54773116", "0.54591846", "0.5449011", "0.5442297", "0.5437781", "0.5432966", "0.5428263", "0.5425164", "0.5403045", "0.5401355", "0.5386274", "0.5380821", "0.5379361", "0.53681666", "0.53625286", "0.5357463", "0.5354184", "0.5348114", "0.5330272", "0.53228027", "0.5318675", "0.5318583", "0.5318173", "0.53116447", "0.53082556", "0.5301312", "0.5299254", "0.52988374", "0.5294553", "0.52892846", "0.5288141", "0.52832985", "0.5279905", "0.5270537", "0.5262413", "0.52606004", "0.52538526", "0.525372", "0.5235527", "0.52248794" ]
0.750709
1
Function to remove test results and confirmations older than 10 blocks
async def cleanTestResults(CURRENT_HEIGHT): LAST_GOOD_HEIGHT = int(CURRENT_HEIGHT) - 10 for testId in list(testResults): if int(testId) <= LAST_GOOD_HEIGHT: del testResults[testId] for testId in list(testConfirmations): if int(testId) <= LAST_GOOD_HEIGHT: del testConfirmations[testId]
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def remaining_batch_tests(loaded_batch_tests):\n remaining_tests = batch_test_set - set(loaded_batch_tests)\n with open('remaining_tests.txt', mode='w') as outfile:\n for batch_test in remaining_tests:\n outfile.write(\"%s\\n\" % batch_test)", "def clean_leftovers(tests):\n for test in tests:\n test.clean()", "def remove_totally_failed_tests(df):\n all_runs = df.group_uuid.unique()\n removed_guuids = []\n for test_run in all_runs:\n overall_status = df[(df.group_uuid == test_run) & ~get_failed_mask(df)]\n if not len(overall_status):\n df = df[df.group_uuid != test_run]\n removed_guuids.append(test_run)\n return df, removed_guuids", "def stopTestRun(self):", "def trim_data_back_to(monthToKeep):\n global g_failed_tests_info_dict\n current_time = time.time() # unit in seconds\n\n oldest_time_allowed = current_time - monthToKeep*30*24*3600 # in seconds\n\n clean_up_failed_test_dict(oldest_time_allowed)\n clean_up_summary_text(oldest_time_allowed)", "def test_clean_exit(self):\n ch = connection_helper()\n qr = list_test_artifacts(None, ch.tables)\n self.assertFalse(bool(qr), \"\"\"Run 'removefacts --conf <config> --removetestlist' or \nexecute 'tests/scripts/removetestfacts.py' to fix\"\"\")", "def cleanse_priest_list(priests_list):", "def worker_unscheduled(self, node, indices):\n self.sched.remove_pending_tests_from_node(node, indices)", "def test_concurrent_add_and_delete_pending_test_case_result(self):\n result = xml_reporter._TextAndXMLTestResult(None, self.stream, None, 0,\n None)\n def add_and_delete_pending_test_case_result(test_name):\n test = MockTest(test_name)\n result.addSuccess(test)\n result.delete_pending_test_case_result(test)\n\n for i in range(50):\n add_and_delete_pending_test_case_result('add_and_delete_test%s' % i)\n self.assertEqual(result.pending_test_case_results, {})", "def test_remove_all_pass(self):\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n [formerly failing subtest]\n expected: FAIL\n \"\"\")\n self.update({\n 'results': [{\n 'test':\n '/variant.html?foo=baz',\n 'status':\n 'OK',\n 'subtests': [{\n 'name': 'formerly failing subtest',\n 'status': 'PASS',\n 'message': None,\n 'expected': 'FAIL',\n 'known_intermittent': [],\n }],\n }],\n })\n self.assertFalse(self.exists('external/wpt/variant.html.ini'))", "def reset(self):\n # Remove all successful action records\n to_remove = []\n for action_record, (p_valid, result_text) in self.action_records.items():\n if p_valid > .5:\n to_remove.append(action_record)\n for action_record in to_remove:\n del self.action_records[action_record]", "def remove_test(self, file_path):\n for parser_name in self.parser_names:\n results_file_path = self.get_results_filepath(parser_name)\n results_list = []\n removed = False\n for results in self.read_results_file(results_file_path):\n if results[INPUT_FILE_PATH] == file_path:\n logger.info(\"Removed results for {} in {}\".format(file_path, results_file_path))\n removed = True\n else:\n results_list.append(results)\n\n if removed:\n self.write_results_file(results_list, results_file_path)", "def remove_a_result(self, idblock):\n self.resultPanel.remove_item(idblock)", "def CleanUpTestResults(self):\n name_key = lambda v: v.name\n results_by_name = sorted(self.results, key=name_key)\n\n for name, res_iter in groupby(results_by_name, key=name_key):\n results = set(res_iter)\n\n # If DejaGnu was unable to compile a test it will create following result:\n failed = DejaGnuTestResult(name, '(test for excess errors)', 'FAIL',\n False)\n\n # If a test compilation failed, remove all results that are dependent.\n if failed in results:\n dependants = set(filter(lambda r: r.result != 'FAIL', results))\n\n self.results -= dependants\n\n for res in dependants:\n logging.info('Removed {%s} dependance.', res)\n\n # Remove all UNRESOLVED results that were also marked as UNSUPPORTED.\n unresolved = [res._replace(result='UNRESOLVED')\n for res in results if res.result == 'UNSUPPORTED']\n\n for res in unresolved:\n if res in self.results:\n self.results.remove(res)\n logging.info('Removed {%s} duplicate.', res)", "def teardown_module(module):\n cf.delete_from_table(idm.SURVEY_SUBSAMPLE_TABLE, 'RUN_ID', '=', RUN_ID)\n\n # List of tables to cleanse where [RUN_ID] = RUN_ID\n tables_to_cleanse = ['[dbo].[PROCESS_VARIABLE_PY]',\n '[dbo].[PROCESS_VARIABLE_TESTING]',\n '[dbo].[TRAFFIC_DATA]',\n '[dbo].[SHIFT_DATA]',\n '[dbo].[NON_RESPONSE_DATA]',\n '[dbo].[UNSAMPLED_OOH_DATA]',\n idm.SURVEY_SUBSAMPLE_TABLE]\n\n # Try to delete from each table in list where condition. If exception occurs,\n # assume table is already empty, and continue deleting from tables in list.\n for table in tables_to_cleanse:\n try:\n cf.delete_from_table(table, 'RUN_ID', '=', RUN_ID)\n except Exception:\n continue\n\n print(\"Duration: {}\".format(time.strftime(\"%H:%M:%S\", time.gmtime(time.time() - START_TIME))))", "def cleanupRequests(n=10):\n\n # formula for filtering data from airtable\n formula = 'AND(DATETIME_DIFF(NOW(), {Last Modified}, \"days\") > 30, Status = \"Request Complete\")'\n\n # airtable query\n headers = {\"Authorization\": \"Bearer {}\".format(os.environ['AIRTABLE_AUTH_TOKEN'])}\n params = params = {\n 'maxRecords': 10,\n 'view': 'All Requests + Data',\n 'sortField':'Last Modified',\n 'sortDirection': 'asc',\n 'filterByFormula': formula\n\n }\n\n\n r = requests.get(os.environ['PROD_URL'], headers=headers, params=params)\n\n # if status code is good ...\n if r.status_code == 200:\n\n # instantiate twilio client\n client = Client(os.environ['ACCOUNT_SID'], os.environ['TWILIO_AUTH_TOKEN'])\n\n # iterate through records\n for record in r.json()['records']:\n\n data = {\n 'fields':\n {'Message': \"\",\n 'First Name': \"\"\n }\n }\n\n # patch the requisite fields\n r = requests.patch(\n os.environ['PROD_URL'] + record['id'] , headers=headers, json=data\n )\n\n # erase the recordings associated with the call SID\n call_sid = record['fields']['Twilio Call Sid']\n call = client.calls(call_sid).fetch()\n\n for recording_sid in call.recordings.list():\n client.recordings(recording_sid).delete()\n\n # confirm deletion\n successfully_deleted = 0\n r = requests.get(os.environ['PROD_URL'] + record['id'], headers=headers)\n call = client.calls(call_sid).fetch()\n\n if all([r.status_code == 200, \n 'Message' not in r.json().keys(), \n 'First Name' not in r.json().keys(),\n len(call.recordings.list()) == 0]):\n print('succesfully deleted')\n successfully_deleted += 1\n \n else:\n print('error')\n\n return str(successfully_deleted)", "def test_balanced_removal(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler(balance=True)\n new_ids = [randint(0, handler.uid) for _ in range(randint(HEIGHT[0], handler.expected_height))]\n new_ids = list(set(new_ids)) # make sure there are no duplicates\n try:\n new_ids.remove(handler.golden_id) # remove golden id from removal if it was randomly selected\n except ValueError:\n pass\n\n for val in new_ids:\n handler.delNodeByID(val)\n if handler.balanced is False:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_ids:\n if 'node' + str(val) in state['node_points']:\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly remove nodes (balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated removing nodes in balancing mode in {successes} trees.{BColors.ENDC}\")", "def delete_runs(self):\n for run in self.get_runs():\n run.delete()", "def main():\n dir_path = '/home/ubuntu/test_files' # path for the log files that needs to be pruned\n stat_file_name = 'file_status_info' # temp file will be created to store the stat of each files to calculate when to delete\n \n # Get the list of all the files where we want to perfrom the delete operations\n file_list = get_list_of_files_in_dir(dir_path)\n\n # Get the current system date\n current_date = get_current_date()\n\n # Iterate through all the log, error, info files in the specified directory path and check for the criteria of file older than 5 days and delete.\n for fil in file_list:\n get_file_stat(dir_path, stat_file_name, fil)\n filename, file_date = get_file_last_modification_date(stat_file_name)\n\n print(\"*********** %s file stat is written **************\" % fil)\n days = abs(current_date - file_date).days\n \n # Check if the file modification date if older than 5 days.\n if days > 5:\n remove_files(os.path.join(dir_path, fil))\n else:\n print(\"No eligible file(s) found to be deleted\")", "def test_remove_stale_expectation(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [OK, FAIL]\n \"\"\")\n self.update({\n 'results': [{\n 'test': '/fail.html',\n 'status': 'FAIL',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }, {\n 'test': '/fail.html',\n 'status': 'CRASH',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }],\n })\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [FAIL, CRASH]\n \"\"\")", "def test_remove_yield(self, affiliate_items_url_factory, affiliate_network_factory):\n network = affiliate_network_factory(name='Network')\n\n with mock.patch('chiton.rack.affiliates.bulk.create_affiliate') as create_affiliate:\n affiliate = ValidatingAffiliate()\n affiliate.valid_tlds = ['com', 'org']\n create_affiliate.return_value = affiliate\n\n items = affiliate_items_url_factory(['biz', 'com', 'net', 'org'])\n for index, item in enumerate(items):\n item.name = 'Item %d' % (index + 1)\n item.network = network\n item.save()\n\n assert AffiliateItem.objects.count() == 4\n\n pruned = []\n for item_name, network_name, was_pruned in prune_affiliate_items(items.order_by('name')):\n pruned.append([item_name, network_name, was_pruned])\n\n assert pruned[0] == ['Item 1', 'Network', True]\n assert pruned[1] == ['Item 2', 'Network', False]\n assert pruned[2] == ['Item 3', 'Network', True]\n assert pruned[3] == ['Item 4', 'Network', False]", "def test_imbalanced_removal(self):\n successes = 0\n failures = 0\n iterations = NUM_CALLS\n\n for _ in range(iterations):\n\n failure_callback = False\n handler = self.new_handler()\n new_ids = [randint(0, handler.uid) for _ in range(randint(HEIGHT[0], handler.expected_height))]\n new_ids = list(set(new_ids)) # make sure there are no duplicates\n try:\n new_ids.remove(handler.golden_id) # remove golden id from removal if it was randomly selected\n except ValueError:\n pass\n\n for val in new_ids:\n handler.delNodeByID(val)\n true_bal = check_balance(handler.root)\n if handler.balanced is not true_bal:\n failures += 1\n failure_callback = True\n break\n\n if failure_callback:\n break\n state = handler.get_gamestate()\n for val in new_ids:\n if 'node' + str(val) in state['node_points']:\n failures += 1\n break\n\n successes += 1\n\n self.assertEqual(failures, 0,\n msg=f'{BColors.FAIL}\\n\\t[-]\\tModification: Failed to correctly remove nodes (non-balancing addition) ' +\n f'{failures}/{iterations} failures! {BColors.ENDC}')\n print(f\"{BColors.OKGREEN}\\t[+]\\tModification: Validated removing nodes in non-balancing mode in {successes} trees.{BColors.ENDC}\")", "def test_successful_subscriptions_remove(self) -> None:\n self.assertGreaterEqual(len(self.streams), 2)\n streams_to_remove = self.streams[1:]\n not_subbed = [\n stream.name\n for stream in Stream.objects.filter(realm=get_realm(\"zulip\"))\n if stream.name not in self.streams\n ]\n random.shuffle(not_subbed)\n self.assertNotEqual(len(not_subbed), 0) # necessary for full test coverage\n try_to_remove = not_subbed[:3] # attempt to remove up to 3 streams not already subbed to\n streams_to_remove.extend(try_to_remove)\n self.helper_check_subs_before_and_after_remove(\n streams_to_remove,\n {\"removed\": self.streams[1:], \"not_removed\": try_to_remove},\n self.test_email,\n [self.streams[0]],\n self.test_realm,\n )", "def poll_tests(self):\n for i, test in enumerate(self.tests):\n if test.process.poll() is not None:\n self.check_test(test)\n self.tests.pop(i)\n if self.test_numbers:\n self.start_next_test()", "def prune_alerts():\n from scoop.messaging.models.alert import Alert\n # Supptimer les alertes\n alerts = Alert.objects.read_since(minutes=2880)\n alerts.delete()", "def test_keep_unobserved_subtest(self):\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n [subtest that should not be removed]\n expected: CRASH\n \"\"\")\n self.update(\n {\n 'results': [{\n 'test': '/variant.html?foo=baz',\n 'status': 'CRASH',\n 'subtests': [],\n }],\n },\n overwrite_conditions='no')\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n [subtest that should not be removed]\n expected: CRASH\n \"\"\")", "def stopTest(self, test):", "def test_oldtestcases(self):\n\t\treturn oldtests()", "def test_remove_expensive(self):\n test_remove_expensive = self.info_list.remove_expensive()\n self.assertTrue(test_remove_expensive)", "def disaggregate_chunk(self, test_mains):\n raise NotImplementedError()", "def _delete_tcs(self, testcases):\n\n delete_q = []\n\n # Find all the metadata files associated with all the testcases\n for testcase in testcases:\n metadata_files = nh.get_metadata_files(testcase)\n delete_q += metadata_files.values()\n\n # Write the placeholder file to indicate that this file is deleted\n placeholder_f \\\n = nh.get_metadata_files(testcase, deleted=True)['deleted']\n with open(placeholder_f, 'w') as obj:\n obj.write('Deleted at epoch=%d' % int(time.time()))\n\n remove_files(delete_q, self.verbose, warn=True, force=True)", "def selenium_teardown():\n families_to_delete, visits_to_delete, responses_to_delete = [], [], []\n\n families_to_delete.extend(Family.objects.filter(study_id_number=59638))\n families_to_delete.extend(Family.objects.filter(study_id_number=83695))\n for f in families_to_delete:\n visits_to_delete.extend(f.visit_set.all())\n for v in visits_to_delete:\n responses_to_delete.extend(v.response_set.all())\n\n for r in responses_to_delete:\n r.delete()\n for v in visits_to_delete:\n v.delete()\n for f in families_to_delete:\n f.delete()", "def SuppressTestResults(self, manifests):\n\n # Get a set of tests results that are going to be suppressed if they fail.\n manifest_results = set()\n\n for manifest in filter(self._IsApplicable, manifests):\n manifest_results |= set(manifest.results)\n\n suppressed_results = self.results & manifest_results\n\n for result in sorted(suppressed_results):\n logging.debug('Result suppressed for {%s}.', result)\n\n new_result = '!' + result.result\n\n # Mark result suppression as applied.\n manifest_results.remove(result)\n\n # Rewrite test result.\n self.results.remove(result)\n self.results.add(result._replace(result=new_result))\n\n for result in sorted(manifest_results):\n logging.warning('Result {%s} listed in manifest but not suppressed.',\n result)", "def delete_successful_tasks():\n try:\n from django_q.models import Success\n\n from common.models import InvenTreeSetting\n\n days = InvenTreeSetting.get_setting('INVENTREE_DELETE_TASKS_DAYS', 30)\n threshold = timezone.now() - timedelta(days=days)\n\n # Delete successful tasks\n results = Success.objects.filter(\n started__lte=threshold\n )\n\n if results.count() > 0:\n logger.info(f\"Deleting {results.count()} successful task records\")\n results.delete()\n\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\"Could not perform 'delete_successful_tasks' - App registry not ready\")", "def NOtearDown(self):\n\n for f in self.testoutput:\n if os.path.exists(f):\n os.remove(f)", "def test_remove_all_items(self):\n lib1 = self._create_library(slug=\"test-lib-rm-all\", title=\"Title 1\", description=\"Description\")\n self._add_block_to_library(lib1['id'], \"problem\", \"problem1\")\n self._add_block_to_library(lib1['id'], \"problem\", \"problem2\")\n assert len(LibraryBlockIndexer.get_items()) == 2\n\n LibraryBlockIndexer.remove_all_items()\n assert len(LibraryBlockIndexer.get_items()) == 0", "def getValidTests(sourceTree):\n\n tests = getSections()\n newTests = tests[:]\n\n # [main] is reserved for test suite parameters\n newTests.remove(\"main\")\n\n removeList = []\n \n for test in newTests:\n\n print \" \"\n print \"checking parameters for test %s\" % (test)\n \n # check for the manditory parameters\n if (not (keyIsValid(\"%s.buildDir\" % (test)) and\n keyIsValid(\"%s.inputFile\" % (test)) and\n (sourceTree == \"fParallel\" or \n keyIsValid(\"%s.probinFile\" % (test)) ) and\n keyIsValid(\"%s.needs_helmeos\" % (test)) and\n keyIsValid(\"%s.dim\" % (test)) ) ):\n warning(\" WARNING: manditory runtime parameters for test %s not set\" % (test))\n warning(\" skipping test\")\n removeList.append(test)\n continue\n\n\n # check for optional parameters\n\n # restartTest\n if (not keyIsValid(\"%s.restartTest\" % (test)) ):\n warning(\" Assuming test is not restart run.\\n\")\n globalParams[\"%s.restartTest\" % (test)] = 0\n else:\n\n if (getParam(\"%s.restartTest\" % (test)) ):\n\n # make sure that the file number to restart from is defined\n if (not keyIsValid(\"%s.restartFileNum\" % (test)) ):\n warning(\"WARNING: test %s is a restart test, but is missing the restartFileNum parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # compileTest\n if (not keyIsValid(\"%s.compileTest\" % (test)) ):\n warning(\" Assuming test is not compilation test run.\\n\")\n globalParams[\"%s.compileTest\" % (test)] = 0\n\n\n # selfTest\n if (not keyIsValid(\"%s.selfTest\" % (test)) ):\n warning(\" Assuming test is not a self-test.\\n\")\n globalParams[\"%s.selfTest\" % (test)] = 0\n else:\n\n if (getParam(\"%s.selfTest\" % (test)) ):\n \n # make sure that the success string is defined\n if (not keyIsValid(\"%s.stSuccessString\" % (test)) ):\n warning(\"WARNING: test %s is a self-test, but is missing stSuccessString parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n\n\n # useMPI\n if (not keyIsValid(\"%s.useMPI\" % (test)) ):\n warning(\" Assuming normal (not MPI) run.\\n\")\n globalParams[\"%s.useMPI\" % (test)] = 0\n else:\n\n if (getParam(\"%s.useMPI\" % (test)) ):\n\n # make sure that the number of processors is defined\n if (not keyIsValid(\"%s.numprocs\" % (test)) ):\n warning(\"WARNING: test %s is a parallel test, but did not specify the numprocs parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # doVis\n if (not keyIsValid(\"%s.doVis\" % (test)) ):\n warning(\" Assuming no visualization.\\n\")\n globalParams[\"%s.doVis\" % (test)] = 0\n else:\n\n if (getParam(\"%s.doVis\" % (test)) ):\n\n # find out what variable to plot\n if (not keyIsValid(\"%s.visVar\" % (test)) ):\n warning(\"WARNING: test %s requested visualization but did not specify the visVar parameter.\\n\" % (test))\n warning(\" skipping test\\n\")\n removeList.append(test)\n continue\n \n\n # remove the invalid tests\n for test in removeList:\n newTests.remove(test)\n \n return newTests", "def dispose():\n results = get_result()\n shutil.rmtree(_dir)\n if str(results).lower() == \"action required\":\n print(\"Baseline action is required, visit {}?id={}\".format(\n _report_base_url, _execution_id))\n elif str(results).lower() == \"failed\":\n print(\"Tests failed, please review at {}?id={}\".format(\n _report_base_url, _execution_id))\n assert (str(results) == \"passed\")\n\n print(\"To view a detailed report of the execution please navigate to {}?id={}\".format(\n _report_base_url, _execution_id))", "def tearDownModule():\n print \"Removing toggl entries created by the test...\"\n for entry in toggl.TimeEntryList():\n if entry.get('description') is not None and entry.get('description').startswith('unittest_'):\n entry.delete()", "def test_unsuccessful_verification(self):\n for i in (-4, -3, 3, 4):\n description = \"TOTP verified for `i={0}`\".format(i)\n calculated = self.algorithm.calculate(self.device.secret, drift=i)\n confirmed = self.relate.verify(calculated, save=False)\n\n self.assertFalse(confirmed, description)\n\n self.relate.confirm = False", "def clear_delayed_analysis_requests(root, db, c):\n execute_with_retry(db, c, \"DELETE FROM delayed_analysis WHERE uuid = %s\", (root.uuid,), commit=True)", "def test_delete_run(self):\n pass", "def get_tests_to_run(self, test_list_old):\n\n # if we only want to run the tests that failed previously,\n # remove the others\n if self.args.redo_failed or not self.args.copy_benchmarks is None:\n last_run = self.get_last_run()\n failed = self.get_test_failures(last_run)\n\n test_list = [t for t in test_list_old if t.name in failed]\n else:\n test_list = test_list_old[:]\n\n # if we only want to run tests of a certain dimensionality, remove\n # the others\n if self.args.d in [1, 2, 3]:\n test_list = [t for t in test_list_old if t.dim == self.args.d]\n\n # if we specified any keywords, only run those\n if self.args.keyword is not None:\n test_list = [t for t in test_list_old if self.args.keyword in t.keywords]\n\n # if we are doing a single test, remove all other tests; if we\n # specified a list of tests, check each one; if we did both\n # --single_test and --tests, complain\n if not self.args.single_test == \"\" and not self.args.tests == \"\":\n self.log.fail(\"ERROR: specify tests either by --single_test or --tests, not both\")\n\n if not self.args.single_test == \"\":\n tests_find = [self.args.single_test]\n elif not self.args.tests == \"\":\n tests_find = self.args.tests.split()\n else:\n tests_find = []\n\n if len(tests_find) > 0:\n new_test_list = []\n for test in tests_find:\n _tmp = [o for o in test_list if o.name == test]\n if len(_tmp) == 1:\n new_test_list += _tmp\n else:\n self.log.fail(f\"ERROR: {test} is not a valid test\")\n\n test_list = new_test_list\n\n if len(test_list) == 0:\n self.log.fail(\"No valid tests defined\")\n\n return test_list", "def _periodically_cleanup_candidates(self):\n while True:\n yield 5 * 60.0\n\n now = time()\n for key, candidate in [(key, candidate) for key, candidate in self._candidates.iteritems() if candidate.is_all_obsolete(now)]:\n if __debug__: dprint(\"removing obsolete candidate \", candidate)\n del self._candidates[key]\n self.wan_address_unvote(candidate)", "def delete_obsolete_shared_tests(quali_api, expected_tests, shared_test_folder):\n try:\n r = quali_api.get_tests_from_shared(shared_test_folder)\n data = json.loads(r)\n if 'Children' in data:\n num_entries = len(data['Children'])\n for i in range(num_entries):\n entry_type = data['Children'][i]['Type']\n entry_name = data['Children'][i]['Name']\n entry_name = entry_name.replace(' ', '%20')\n full_path = shared_test_folder + '/' + entry_name\n if entry_type == 'Folder':\n if full_path not in expected_tests:\n print('Delete obsolete Shared test folder that is no longer on repository: ' + full_path)\n quali_api.delete_test_from_shared(full_path)\n else:\n delete_obsolete_shared_tests(quali_api, expected_tests, full_path)\n elif entry_type == 'Test' and full_path not in expected_tests:\n print('Delete obsolete Shared test that is no longer on repository: ' + full_path)\n quali_api.delete_test_from_shared(full_path)\n except Exception as error:\n print(\"Caught error: \" + repr(error))", "def test_clean_run(self):\n Historical_ROAs_Parsed_Table(clear=True)\n with Historical_ROAs_Table(clear=True) as t:\n Historical_ROAs_Parser().run()\n assert t.get_count() > 2000000", "def test_cancelling_not_full_event_emails_waiting_list(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 13, 17, 15, tzinfo=dt_timezone.utc\n )\n\n # make full event (setup has one paid and one unpaid)\n # cancellation period =1, date = 2015, 2, 13, 18, 0\n self.event.max_participants = 3\n self.event.save()\n\n # make some waiting list users\n for i in range(3):\n baker.make_recipe(\n 'booking.waiting_list_user', event=self.event,\n user__email='test{}@test.com'.format(i)\n )\n\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking (1) studio (1) and waitinglist (1, with bcc)\n self.assertEqual(len(mail.outbox), 3)", "def stopTest(self, test):\n self.complete_output()", "def tearDown(self):\r\n self.app.application_close(self.util.client, self.app_name)\r\n\r\n self.common.generate_report(self.util.client, False)\r\n # Releases the client so that other clients can approach the agent in the near future.\r\n self.common.release_client(self.util.client)\r\n self.logger.info(\"==============Results=================\")\r\n self.logger.info(\"Number of Strings verified: \" + str(len(Config.results_list)/2))\r\n for i in range(0, len(Config.results_list), 2):\r\n self.logger.info(str(Config.results_list[i]) + \"{:>36}\".format('=====> ')\r\n + str(Config.results_list[i+1]))\r\n self.logger.info(\"Testcase tear-down: COMPLETED\")", "def after_test(self, test_results):\n pass", "def resetTestsCB(self):\n trace.into('ProctorGUI', 'resetTestsCB')\n self.tests = []\n self.result = None\n self.resetTestButtons()\n self.setState('idle')\n self.showTestOutput(None)\n self.updateProgress(0)\n trace.outof()\n return", "def test_answer_non_repeating_dependency_repeating_validate_all_of_block_and_group_removed(self):\n # Given\n schema = load_schema_from_params('test', 'titles_repeating_non_repeating_dependency')\n colour_answer_location = Location('colour-group', 0, 'favourite-colour')\n colour_answer = {'fav-colour-answer': 'blue'}\n\n # When\n with self._application.test_request_context():\n with patch('app.data_model.questionnaire_store.QuestionnaireStore.remove_completed_blocks') as patch_remove:\n update_questionnaire_store_with_form_data(self.question_store, colour_answer_location, colour_answer, schema)\n\n # Then\n patch_remove.assert_called_with(group_id='repeating-group', block_id='repeating-block-3')", "def compute_transactions_that_became_invalid(self, new_best_height: int) -> list[BaseTransaction]:\n from hathor.transaction.validation_state import ValidationState\n to_remove: list[BaseTransaction] = []\n for tx in self.iter_mempool_from_best_index():\n tx_min_height = tx.get_metadata().min_height\n assert tx_min_height is not None\n # We use +1 here because a tx is valid if it can be confirmed by the next block\n if new_best_height + 1 < tx_min_height:\n tx.set_validation(ValidationState.INVALID)\n to_remove.append(tx)\n return to_remove", "def clean_trial(src_loc: Path, test_cmds: List[str]) -> timedelta:\n cache.remove_existing_cache_files(src_loc)\n\n LOGGER.info(\"Running clean trial\")\n\n # clean trial will show output all the time for diagnostic purposes\n start = datetime.now()\n clean_run = subprocess.run(test_cmds, capture_output=False)\n end = datetime.now()\n\n if clean_run.returncode != 0:\n raise BaselineTestException(\n f\"Clean trial does not pass, mutant tests will be meaningless.\\n\"\n f\"Output: {str(clean_run.stdout)}\"\n )\n\n return end - start", "def delete_failed_tasks():\n\n try:\n from django_q.models import Failure\n\n from common.models import InvenTreeSetting\n\n days = InvenTreeSetting.get_setting('INVENTREE_DELETE_TASKS_DAYS', 30)\n threshold = timezone.now() - timedelta(days=days)\n\n # Delete failed tasks\n results = Failure.objects.filter(\n started__lte=threshold\n )\n\n if results.count() > 0:\n logger.info(f\"Deleting {results.count()} failed task records\")\n results.delete()\n\n except AppRegistryNotReady: # pragma: no cover\n logger.info(\"Could not perform 'delete_failed_tasks' - App registry not ready\")", "def run_test_second():\n os.system(\n \"sed -n '/(Failed)$/p' test_op_log.txt | awk '{print $3}' >& rerun_op.txt\"\n )\n rerun_list = get_op_list('rerun_op.txt')\n if len(rerun_list):\n print(\n \"-------there are \"\n + str(len(rerun_list))\n + \" op(s) need to rerun!!!-------\"\n )\n for failed_op in rerun_list:\n os.system(\"ctest -R \\\"(\" + failed_op + \")\\\" \")\n else:\n print(\"-------all op passed successfully!!!-------\")", "def unblock_all(t):\n blocked_count = 0\n\n while True:\n blocked_user_ids = t.blocks.ids()[\"ids\"]\n if not blocked_user_ids:\n print(\"No more IDs to unblock\")\n break\n\n for user_id in blocked_user_ids:\n blocked_count = blocked_count + 1\n print(f\"{blocked_count}: {user_id}\")\n try:\n t.blocks.destroy(user_id=user_id, include_entities=False, skip_status=True)\n except:\n print(\"error\")", "def test_keep_existing_expectations(self):\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [OK, FAIL]\n \"\"\")\n self.update(\n {\n 'results': [{\n 'test': '/fail.html',\n 'status': 'CRASH',\n 'expected': 'OK',\n 'known_intermittent': ['FAIL'],\n }],\n },\n keep_statuses=True)\n # The disable only works for flaky results in a single run.\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: [CRASH, OK, FAIL]\n \"\"\")", "def teardown(self):\n for mr in self.mrs:\n mr.restore_pretest(pretest=mr.pretest_info)", "def test_remove(numbers, result):\n from sum_of_two_lowest_possible_integers import sum_two_smallest_numbers\n assert sum_two_smallest_numbers(numbers) == result", "def test_dry_run():\n config = get_config(\"delete.conf\")\n path = get_config_path(config)\n test_file = make_test_file(path)\n\n console.pushbroom(config, dry_run=True)\n assert test_file.exists()\n\n console.pushbroom(config)\n assert not test_file.exists()\n\n path.rmdir()", "def test_disable_retainUnsent_logs():\n stmt = sqlalchemy.select([_LOGGING_TABLE.c.total_unsent_rows_removed]).select_from(_LOGGING_TABLE).order_by(\n _LOGGING_TABLE.c.id.desc()).limit(1)\n config_info = read_config()\n config_info['retainUnsent'] = False\n open(config_file, 'w').close()\n with open(config_file, 'r+') as conf:\n conf.write(json.dumps(config_info))\n\n time.sleep(convert_sleep(config_info['wait'])*2)\n result = execute_command_with_return_value(stmt)\n\n assert int(result[0][0]) >= 0", "def remove_erroneous_blocks(blocks):\n # Identify duplicated blocks. Consecutive time stamps are usually less than 2 seconds apart.\n mask = blocks.groupby(['session_uid'])['time'].diff() < 2.0\n try:\n n_errors = mask.value_counts()[True]\n except KeyError:\n n_errors = 0\n blocks = blocks.loc[~mask, :]\n # Now, after removal of erroneous data a session might not have all 3 blocks we expect. Exclude whole session.\n invalid_sessions = blocks['session_uid'].value_counts() != 3\n invalid_sessions = invalid_sessions.loc[invalid_sessions].index.to_list()\n blocks = blocks.loc[~blocks['session_uid'].isin(invalid_sessions), :]\n return blocks, n_errors, invalid_sessions", "def prune(i):\n\n return {'return':1, 'error':'pruning is not yet supported in this scenario'}", "def short():\n countneg = 0\n countpos = 0\n testset_id = 4\n\n testfiles = db.session.query(evaluation.Testfile).filter(evaluation.Testfile.testset_id==testset_id)\n print \"Number testfiles: %s\" % testfiles.count()\n for i, tf in enumerate(testfiles):\n if i % 100 == 0:\n print i\n with audioread.audio_open(tf.file.path.encode(\"utf-8\")) as f:\n duration = f.duration\n if duration < 60.0:\n if tf.file.negative:\n countneg+=1\n else:\n countpos+=1\n print \"Removing short duration file: %s (%s)\" % (tf.file.path.encode(\"utf-8\"), duration)\n cur = db.session.query(evaluation.Result).filter(evaluation.Result.testfile_id==tf.id)\n print \"%d results to remove\" % cur.count()\n cur.delete()\n db.session.query(evaluation.Testfile).filter(evaluation.Testfile.id==tf.id).delete()\n db.session.commit()\n testfiles = db.session.query(evaluation.Testfile).filter(evaluation.Testfile.testset_id==testset_id)\n print \"New number testfiles: %s\" % testfiles.count()\n print \"deleted negative: %s\" % countneg\n print \"deleted positive: %s\" % countpos", "def test_ten_results_returned(delete_previous_db_record):\n request = create_client().gateway.getResults(\n search=\"some string\").response()\n\n # Assert sucessful request\n assert_that(request.result.status, equal_to('200'))\n\n \"\"\"\n I'm assuming the json object uses a list to contain\n the results\n \"\"\"\n assert_that(len(request.result.results, equal_to(10)))", "def test_remove_file_group0(self):\n with copy_of_directory(assets.path_to('SBB0000F29300010000/data')) as tempdir:\n mets = OcrdMets(filename=join(tempdir, 'mets.xml'))\n self.assertEqual(len(mets.file_groups), 17)\n self.assertEqual(len(mets.find_all_files()), 35)\n # print()\n # before = sorted([x.ID for x in mets.find_all_files()])\n with self.assertRaisesRegex(Exception, \"not empty\"):\n mets.remove_file_group('OCR-D-GT-ALTO')\n mets.remove_file_group('OCR-D-GT-PAGE', recursive=True)\n # print([x for x in before if x not in sorted([x.ID for x in mets.find_all_files()])])\n self.assertEqual(len(mets.file_groups), 16)\n self.assertEqual(len(mets.find_all_files()), 33)", "def test_create_negative_feedback_removal(self):\n pass", "def process_test_stop(self, config, results, result_id, db):\n pass", "def trial_clean_up(self):\n pass", "def test_vsg_for_multiple_vcpes_in_vsg_vm_with_one_vcpe_removed(self):", "def filter_runs_by_run():\n this_ipts_number, run_tup_list = my_data.get_ipts_runs()\n\n first_run = 80230\n last_run = 80240\n status, filter_run_tup_list = vdapi.filter_runs_by_run(run_tup_list, first_run, last_run)\n assert len(filter_run_tup_list) == 10\n\n my_data.set_ipts_runs(ipts_number, filter_run_tup_list)\n\n return", "def __execute_experiment__(self, *args, **kwargs):\n from klibs.KLGraphics import clear\n\n if self.blocks == None:\n self.blocks = self.trial_factory.export_trials()\n\n P.block_number = 0\n P.trial_id = 0\n for block in self.blocks:\n P.recycle_count = 0\n P.block_number += 1\n P.practicing = block.practice\n self.block()\n P.trial_number = 1\n for trial in block: # ie. list of trials\n try:\n P.trial_id += 1 # Increments regardless of recycling\n self.__trial__(trial, block.practice)\n P.trial_number += 1\n except TrialException:\n block.recycle()\n P.recycle_count += 1\n clear() # NOTE: is this actually wanted?\n self.rc.reset()\n self.clean_up()\n\n self.incomplete = False\n if 'session_info' in self.database.tables:\n where = {'session_number': P.session_number}\n self.database.update('session_info', {'complete': True}, where)", "def test_delete7(self):\n pass", "def test_remove_through_timeout(self):\n TestStorage.set_timeout(20)\n TestStorage.set_time(100)\n store = RatedStatisticStorage()\n store._RatedStatisticStorage__add_single_outcome(\n \"n!node3\", \"cpu\", Outcome.HIGH, rospy.Time(100))\n self.assertEqual(\n store.get_outcome(\"n!node3\", \"cpu\"), Outcome.HIGH)\n TestStorage.set_time(120)\n self.assertEqual(\n store.get_outcome(\"n!node3\", \"cpu\"), Outcome.UNKNOWN)", "def test_remove_recurring_schedule(self):\n pass", "def test_remove(self):\n pass", "def test_unminimized(self):\n self.testcases[0].security_flag = True\n self.testcases[0].crash_state = 'abc\\ndef'\n self.testcases[0].crash_type = 'Heap-buffer-overflow\\nREAD {*}'\n self.testcases[0].minimized_keys = None\n self.testcases[1].security_flag = True\n self.testcases[1].crash_state = 'abc\\ndef'\n self.testcases[1].crash_type = 'Heap-buffer-overflow\\nREAD 3'\n\n for t in self.testcases:\n t.put()\n\n grouper.group_testcases()\n\n testcases = []\n for testcase_id in data_handler.get_open_testcase_id_iterator():\n testcases.append(data_handler.get_testcase_by_id(testcase_id))\n\n self.assertEqual(len(testcases), 2)\n self.assertEqual(testcases[0].group_id, 0)\n self.assertFalse(testcases[0].is_leader)\n self.assertEqual(testcases[1].group_id, 0)\n self.assertTrue(testcases[1].is_leader)", "def compactTests(self,mode='TOP'):\n if tuple(self.tests) == (0,0,0,0,0,0): return False\n if mode == 'TOP':\n newTests = self.tests[:]\n while newTests and not newTests[0]:\n del newTests[0]\n else:\n newTests = [test for test in self.tests if test]\n while len(newTests) < 6: newTests.append(0)\n if tuple(self.tests) != tuple(newTests):\n self.tests = newTests\n self.setChanged()\n return True", "def test_basic_deletion(self):\n args = self.get_args()\n config_filename = 'testsuite_cfg.json'\n args.config = config_filename\n config = self.create_config_file()\n\n config_file = open(config_filename, 'w')\n config_file.write(str(json.dumps(config)))\n config_file.close()\n\n # Create the \"stale\" entry on the remote site\n mac = '00:11:22:33:33:33'\n ip = '3.4.3.4'\n site2 = Session(SITE2_URL, SITE2_LOGIN, SITE2_PASSWORD)\n resp = site2.login()\n self.assertTrue(resp.ok)\n tag = IntersiteTag('intersite-testsuite', 'app', 'epg', 'Site1')\n remote_tenant = Tenant('intersite-testsuite')\n remote_l3out = OutsideL3('l3out', remote_tenant)\n remote_epg = OutsideEPG('intersite-testsuite-app-epg', remote_l3out)\n remote_ep = OutsideNetwork(ip, remote_epg)\n remote_ep.ip = ip + '/32'\n remote_tenant.push_to_apic(site2)\n\n time.sleep(2)\n self.assertTrue(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))\n\n execute_tool(args, test_mode=True)\n\n time.sleep(2)\n self.assertFalse(self.verify_remote_site_has_entry(mac, ip, 'intersite-testsuite',\n 'l3out', 'intersite-testsuite-app-epg'))", "def test_clear_old(self):\n user1 = UserFactory.create() # Total: 38 clicks\n self._link_with_clicks(user1, 5, [4, 6, 3]) # 18 clicks\n self._link_with_clicks(user1, 1, [8, 9, 2]) # 20 clicks\n\n user2 = UserFactory.create() # Total: 49 clicks\n self._link_with_clicks(user2, 13, [12, 11, 13]) # 49 clicks\n\n # Create existing leaderboard with users in opposite order.\n LeaderboardStandingFactory.create(user=user1, ranking=1, metric='link_clicks')\n LeaderboardStandingFactory.create(user=user2, ranking=2, metric='link_clicks')\n\n self.command.handle()\n ok_(not (LeaderboardStanding.objects\n .filter(user=user1, ranking=1, metric='link_clicks')\n .exists()))\n ok_(not (LeaderboardStanding.objects\n .filter(user=user2, ranking=2, metric='link_clicks')\n .exists()))", "def cleanup(self):\r\n check = []\r\n delete_this = [(API.url_plan, self.plans), (API.url_workout, self.workouts), (API.url_schl, self.schedules), (API.url_link, self.links)]\r\n for delete in delete_this:\r\n while delete[1] != []:\r\n self.delete_field(delete[0], delete[1])\r\n if requests.get(delete[0], headers=self.headers).json()['results'] == []:\r\n check.append(True)\r\n else:\r\n check.append(False) \r\n if False in check:\r\n return False\r\n return True", "def clean(self):\n self.df = _data.prune(self.df, [REGEX_PATTERN_GCI, REGEX_PATTERN_DB_ID])\n self.df, _ = _data.remove_totally_failed_tests(self.df)\n self.is_cleaned = True", "def pytest_sessionfinish(session, exitstatus):\n\n # dat files are created when using attachements\n print(\"\\n-------------------------\\nClean dpytest_*.dat files\")\n fileList = glob.glob('./dpytest_*.dat')\n for filePath in fileList:\n try:\n os.remove(filePath)\n except Exception:\n print(\"Error while deleting file : \", filePath)", "def test_no_change_without_enough_results(self):\n MetadataUpdater.min_results_for_update = 2\n self.write_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: FAIL\n \"\"\")\n self.update({\n 'results': [{\n 'test': '/fail.html',\n 'status': 'PASS',\n 'expected': 'FAIL',\n }],\n })\n self.assert_contents(\n 'external/wpt/fail.html.ini', \"\"\"\\\n [fail.html]\n expected: FAIL\n \"\"\")", "def test_999_remove_testfiles(self):\n __dir_game_saves = os.path.dirname(__file__)\n __dir_game_saves = os.path.join(__dir_game_saves, 'games')\n __test_filename = consts.TEST_FILENAME\n __dir_game_testfile = os.path.join(__dir_game_saves, __test_filename)\n __test_filename_append1 = __test_filename + \"__1\"\n __dir_game_testfile_append1 = os.path.join(__dir_game_saves, __test_filename_append1)\n __test_filename_append2 = __test_filename + \"__2\"\n __dir_game_testfile_append2 = os.path.join(__dir_game_saves, __test_filename_append2)\n __test_filename_append3 = __test_filename + \"__3\"\n __dir_game_testfile_append3 = os.path.join(__dir_game_saves, __test_filename_append3)\n __dir_game_log = os.path.join(__dir_game_saves, \"log\")\n __test_logname = __test_filename + \"_log.txt\"\n __dir_game_logfile = os.path.join(__dir_game_log, __test_logname)\n os.remove(__dir_game_logfile)\n self.assertFalse(os.path.isfile(__dir_game_logfile))\n __list_files = os.listdir(__dir_game_log)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_log)\n os.remove(__dir_game_testfile)\n self.assertFalse(os.path.isfile(__dir_game_testfile))\n os.remove(__dir_game_testfile_append1)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append1))\n os.remove(__dir_game_testfile_append2)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append2))\n os.remove(__dir_game_testfile_append3)\n self.assertFalse(os.path.isfile(__dir_game_testfile_append3))\n __list_files = os.listdir(__dir_game_saves)\n if len(__list_files) == 0:\n os.removedirs(__dir_game_saves)", "def test_command__dont_update_finished_survey(self):\n assignment = WorkflowCollectionAssignment.objects.get(id=self.assignment_3.id)\n self.assertEqual(assignment.status, \"CLOSED_COMPLETE\")\n\n out = StringIO()\n call_command(\"assignment_terminator\", days_old=\"30\", type=\"SURVEY\", stdout=out)\n\n assignment.refresh_from_db()\n self.assertEqual(assignment.status, \"CLOSED_COMPLETE\")", "def clean_passes(threshold=misc.get_now_utc()):\n logger.info('>>> Cleaning passes, threshold = ' + str(threshold))\n\n try:\n\n no_deleted = pass_models.PassSlots.objects.filter(\n end__lte=threshold\n ).delete()\n logger.debug('>>> tasks@clean_passes.filtered = ' + str(no_deleted))\n\n except Exception as ex:\n\n logger.exception('>>> Exception cleaning passes, ex = ' + str(ex), ex)\n return\n\n logger.info('>>> DONE cleaning passes')", "def test_milestone_remove_ok(self):\n test_name = sys._getframe().f_code.co_name\n self._execute('milestone remove milestone3')\n rv, output = self._execute('milestone list')\n self.assertEqual(0, rv)\n self.assertEqual(self.expected_results[test_name], output)", "def test_xblockcompletion_get_all_data_no_responses(self, report):\n generated_report_data = {} \n report.return_value = generated_report_data\n data = {'format': False, 'course': str(self.course.id), 'base_url':'this_is_a_url'}\n task_input = {'data': data }\n with patch('lms.djangoapps.instructor_task.tasks_helper.runner._get_current_task'):\n result = generate(\n None, None, self.course.id,\n task_input, 'EOL_Xblock_Completion'\n )\n report_store = ReportStore.from_config(config_name='GRADES_DOWNLOAD')\n header_row = \";\".join(['\"Username\"', '\"Email\"', '\"Run\"', '\"Seccion\"', '\"SubSeccion\"', '\"Unidad\"', '\"Titulo\"', '\"Pregunta\"', '\"Respuesta Estudiante\"', '\"Resp. Correcta\"', '\"Intentos\"', '\"Pts Ganados\"', '\"Pts Posibles\"', '\"Pts Total Componente\"', '\"block id\"', '\"Has saved answers\"'])\n base_student_row = \";\".join([\n self.items[0].display_name,\n self.student.username,\n self.student.email,\n '',\n self.chapter.display_name,\n self.section.display_name,\n self.subsection.display_name\n ])\n report_csv_filename = report_store.links_for(self.course.id)[0][0]\n report_path = report_store.path_to(self.course.id, report_csv_filename)\n with report_store.storage.open(report_path) as csv_file:\n csv_file_data = csv_file.read()\n # Removing unicode signature (BOM) from the beginning\n csv_file_data = csv_file_data.decode(\"utf-8-sig\")\n self.assertIn(header_row, csv_file_data)\n self.assertFalse(base_student_row in csv_file_data)", "def test_remove_all(self): #SAUCE-LAB-8\n login = LoginPage(self.driver)\n login.open()\n inventory_page = login.login(_DEF_USER, _DEF_PASSWORD)\n first_item = inventory_page.products\n first_item: InventoryItem\n for item in first_item:\n item.add_to_cart()\n if inventory_page.header.get_total_cart_items() == 6:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were added')\n for item in first_item:\n item.remove_from_cart()\n if inventory_page.header.get_total_cart_items() == 0:\n print('\\n')\n print(f'Total of products {inventory_page.header.get_total_cart_items()}')\n else:\n print('Not all items were removed')", "def test_message_remove(url):\n test_clear(url)\n admin_tk = channel_user_create_0(url)[0]\n \n test_channels = {\n 'token': admin_tk,\n 'name': 'channel_1',\n 'is_public': True,\n }\n requests.post(url + \"channels/create\", json=test_channels)\n\n test_message_1 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'message': 'Hello'\n }\n resp = requests.post(url + \"message/send\", json=test_message_1)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 1\n\n test_message_2 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'message': 'Hello Again'\n }\n resp = requests.post(url + \"message/send\", json=test_message_2)\n message_send_resp = resp.json()\n assert message_send_resp['message_id'] == 2\n\n test_remove_msg_1 = {\n 'token': admin_tk,\n 'message_id': 1 \n }\n resp = requests.delete(url + \"message/remove\", json=test_remove_msg_1)\n message_remove_resp = resp.json()\n assert message_remove_resp == {}\n\n test_channel_msgs_1 = {\n 'token': admin_tk,\n 'channel_id': 1,\n 'start': 0,\n }\n resp = requests.get(url + \"channel/messages\", params=test_channel_msgs_1)\n channel_msgs_resp = resp.json()\n assert channel_msgs_resp['messages'][0]['message_id'] == 2\n assert channel_msgs_resp['messages'][0]['u_id'] == 1\n assert channel_msgs_resp['messages'][0]['message'] == 'Hello Again'\n\n test_remove_msg_2 = {\n 'token': admin_tk,\n 'message_id': 2\n }\n resp = requests.delete(url + \"message/remove\", json=test_remove_msg_2)\n message_remove_resp = resp.json()\n assert message_remove_resp == {}\n\n resp = requests.get(url + \"channel/messages\", params={\n 'token': admin_tk,\n 'channel_id': 1,\n 'start': 0, \n })\n channel_msgs_resp = resp.json()\n assert channel_msgs_resp['messages'] == []", "def fin():\n result_list = list()\n result_list.append(\n ll_clusters.updateCluster(\n positive=True,\n cluster=conf.CLUSTER_NAME[0],\n scheduling_policy=conf.POLICY_NONE\n )\n )\n result_list.append(\n ll_sch_policies.remove_scheduling_policy(\n policy_name=conf.AFFINITY_POLICY_NAME\n )\n )\n assert all(result_list)", "def test_cancelling_for_full_event_emails_waiting_list(self, mock_tz):\n mock_tz.now.return_value = datetime(\n 2015, 2, 13, 17, 15, tzinfo=dt_timezone.utc\n )\n\n # make full event (setup has one paid and one unpaid)\n # cancellation period =1, date = 2015, 2, 13, 18, 0\n self.event.max_participants = 2\n self.event.save()\n\n # make some waiting list users\n for i in range(3):\n baker.make_recipe(\n 'booking.waiting_list_user', event=self.event,\n user__email='test{}@test.com'.format(i)\n )\n\n management.call_command('cancel_unpaid_bookings')\n # emails are sent to user per cancelled booking (1) and\n # one email with bcc to waiting list (1) and studio (1)\n self.assertEqual(len(mail.outbox), 3)\n self.assertEqual(\n sorted(mail.outbox[1].bcc),\n ['[email protected]', '[email protected]', '[email protected]']\n )", "def run(self):\n msg = \"\"\"Remove most traces of pakit. You are warned!\n\n Will delete ...\n - all links from the link directory to pakit's programs.\n - all programs pakit built, including the source trees.\n - all downloaded recipes.\n - all logs and configs EXCEPT the pakit.yml file.\n OK? y/n \"\"\"\n if user_input(msg).strip().lower()[0] != 'y':\n USER.info('Aborted.')\n return\n\n USER.info('Removing all links made by pakit.')\n config = pakit.conf.CONFIG\n unlink_man_pages(config.path_to('link'))\n walk_and_unlink_all(config.path_to('link'), config.path_to('prefix'))\n\n uris_file = os.path.join(config.path_to('recipes'), 'uris.yml')\n ruri_db = pakit.conf.RecipeURIDB(uris_file)\n to_remove = [config.path_to('prefix'),\n config.path_to('source'),\n uris_file]\n to_remove += [ruri_db[uri]['path'] for uri in ruri_db\n if ruri_db[uri]['is_vcs']]\n to_remove += glob.glob(config.get('pakit.log.file') + '*')\n\n for path in to_remove:\n try:\n USER.info('Deleting: %s', path)\n if os.path.isdir(path):\n shutil.rmtree(path)\n else:\n os.remove(path)\n except OSError:\n logging.error('Could not delete path: %s', path)", "def closeOutMonterCarloRequests(url, workflows):\n noSiteWorkflows = []\n for workflow in workflows:\n datasets = reqMgrClient.outputdatasetsWorkflow(url, workflow)\n closeOutWorkflow = True\n #skip montecarlos on a special queue\n if reqMgrClient.getRequestTeam(url, workflow) == 'analysis':\n continue\n for dataset in datasets:\n closePercentage = 0.95\n # validation for SMS montecarlos\n if 'SMS' in dataset:\n closePercentage= 1.00\n percentage = percentageCompletion(url, workflow, dataset)\n phedexSubscription = phedexClient.getCustodialMoveSubscriptionSite(dataset)\n transPerc = 0\n closedBlocks = None\n duplicate = None\n # if dataset has subscription and enough events we check\n # duplicates, transfer percentage and closed blocks\n if phedexSubscription and percentage >= float(closePercentage):\n transPerc = phedexClient.getTransferPercentage(url, dataset, phedexSubscription)\n duplicate = dbs3Client.duplicateLumi(dataset)\n if not duplicate:\n closeOutDataset = True\n else:\n closeOutDataset = False\n else:\n closeOutDataset = False\n #validate when percentage is ok but has not phedex subscription\n if percentage >= float(closePercentage) and not phedexSubscription:\n noSiteWorkflows.append(workflow)\n #if at least one dataset is not ready wf cannot be closed out\n closeOutWorkflow = closeOutWorkflow and closeOutDataset\n print '| %80s | %100s | %4s | %5s| %3s | %5s| %5s|' % (workflow, dataset,str(int(percentage*100)),\n str(phedexSubscription), str(int(transPerc*100)), duplicate, closeOutDataset)\n #workflow can only be closed out if all datasets are ready\n if closeOutWorkflow:\n reqMgrClient.closeOutWorkflowCascade(url, workflow)\n #separation line\n print '-'*180\n return noSiteWorkflows", "def test_remove_nonexistent_subtest(self):\n self.write_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n expected: CRASH\n\n [subtest that was removed]\n expected: CRASH\n custom_key: should not prevent removal\n \"\"\")\n self.update(\n {\n 'results': [{\n 'test': '/variant.html?foo=baz',\n 'status': 'CRASH',\n 'subtests': [],\n }],\n },\n overwrite_conditions='yes')\n self.assert_contents(\n 'external/wpt/variant.html.ini', \"\"\"\\\n [variant.html?foo=baz]\n expected: CRASH\n \"\"\")", "def test_10(self):\n assert 'False' == Api.requestBlock('test-10')", "def tearDown():\n for output_file_path in Path(output_dir).glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n for output_file_path in Path(\".\").glob(\"test_voting_learner_cross_validate*\"):\n output_file_path.unlink()\n\n config_file_path = Path(config_dir) / \"test_voting_learner_cross_validate.cfg\"\n config_file_path.unlink()\n\n remove_jsonlines_feature_files(train_dir)", "def test_01_delete_run(self):\n client = self.client\n\n j = check_json(client, 'api/db_default/v4/nts/runs/1')\n sample_ids = [s['id'] for s in j['tests']]\n self.assertNotEqual(len(sample_ids), 0)\n for sid in sample_ids:\n resp = client.get('api/db_default/v4/nts/samples/{}'.format(sid))\n self.assertEqual(resp.status_code, 200)\n\n resp = client.delete('api/db_default/v4/nts/runs/1')\n self.assertEqual(resp.status_code, 401)\n\n resp = client.delete('api/db_default/v4/nts/runs/1',\n headers={'AuthToken': 'wrong token'})\n self.assertEqual(resp.status_code, 401)\n\n resp = client.delete('api/db_default/v4/nts/runs/1',\n headers={'AuthToken': 'test_token'})\n self.assertEqual(resp.status_code, 200)\n\n resp = client.get('api/db_default/v4/nts/runs/1')\n self.assertEqual(resp.status_code, 404)\n\n for sid in sample_ids:\n resp = client.get('api/db_default/v4/nts/samples/{}'.format(sid))\n self.assertEqual(resp.status_code, 404)" ]
[ "0.6036735", "0.58126444", "0.5707902", "0.5680992", "0.5655867", "0.56548756", "0.5543772", "0.5505886", "0.5502805", "0.5481863", "0.54809284", "0.5464488", "0.5420158", "0.5416538", "0.5402889", "0.5371889", "0.5359482", "0.5342731", "0.53407174", "0.5334368", "0.5320321", "0.5319818", "0.5316838", "0.5309624", "0.5308823", "0.53065723", "0.5304401", "0.5288708", "0.5285671", "0.52796096", "0.5258622", "0.5258167", "0.52535665", "0.5247057", "0.5233678", "0.5224422", "0.52068543", "0.52063316", "0.5205479", "0.5204781", "0.51991135", "0.51871526", "0.5178748", "0.51764625", "0.5171399", "0.5167618", "0.51597434", "0.51558995", "0.51515496", "0.5149447", "0.51492333", "0.5129171", "0.5118131", "0.5115508", "0.5108625", "0.51034355", "0.5102129", "0.51010054", "0.510053", "0.50931644", "0.50894904", "0.5075946", "0.5069702", "0.50684386", "0.50601876", "0.5055868", "0.5051439", "0.50498134", "0.50487155", "0.5047785", "0.50438714", "0.50431687", "0.5030203", "0.5023631", "0.50161386", "0.50153846", "0.500833", "0.50076437", "0.50051147", "0.5001215", "0.500072", "0.49974707", "0.4994085", "0.49930957", "0.49852464", "0.4984379", "0.4984333", "0.4978922", "0.49755922", "0.49742457", "0.49718657", "0.49654174", "0.4965212", "0.49628776", "0.49599913", "0.49565536", "0.49523222", "0.49519947", "0.49476856", "0.49469188" ]
0.73820996
0
POST handler that accepts uploads of files. It does not store the content of the file, just creates entry in the dictionary to keep track of upload activity.
async def UploadHandlerMem(request): # You cannot rely on Content-Length if transfer is chunked. try: fileSize = 0 timestampStart = datetime.utcnow() usedHandler = 'UploadHandlerMem' try: testId = request.match_info['testId'] except Exception as e: testId = "noId" try: sourceHostPort = request.match_info['sourceHostPort'] except Exception as e: sourceHostPort = "noPort" try: fileName = request.match_info['fileName'] except Exception as e: fileName = "noFileName" while True: chunk, is_end_of_http_chunk = await request.content.readchunk() if not chunk: break fileSize += len(chunk) peername = request.transport.get_extra_info('peername') if peername is not None: host, port = peername else: host = 'nohost' timestampEnd = datetime.utcnow() taskDuration = str(timestampEnd - timestampStart) if testId != "noId" and sourceHostPort != "noPort" and fileName != "noFileName": testConfirmations.setdefault(testId, {}).setdefault(NODE_ADDRESS[0]+':'+str(HOST_PORT), {}).setdefault(host+':'+sourceHostPort, {})['UploadHandlerMem'] = { 'status': 'success', 'message': 'File uploaded', 'testId': testId, 'usedHandler': 'UploadHandlerMem', 'taskDuration': taskDuration, 'fileName': fileName, 'sourceFileName': '10k.txt', 'fileSize': fileSize, 'timestampStart': str(timestampStart), 'timestampEnd': str(timestampEnd) } response_obj = { 'status': 'success', 'message': 'File uploaded to memory', 'testId': testId, 'usedHandler': usedHandler, 'taskDuration': taskDuration, 'fileName': fileName, 'fileSize': fileSize, 'timestampStart': str(timestampStart), 'timestampEnd': str(timestampEnd) } return web.json_response(response_obj) except Exception as e: response_obj = { 'status' : 'failed', 'reason': str(e) } print(str(e)) return web.json_response(response_obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n filename = str(time.time())\n filepath = os.path.join(\n os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n with open(filepath, 'bw') as uploadfile:\n chunk_size = 1024\n while True:\n chunk = request.stream.read(chunk_size)\n if len(chunk) == 0:\n break\n uploadfile.write(chunk)\n current_app.logger.info('file %s upload successfully', filename)\n return {'timestamp': filename}, http.HTTPStatus.CREATED", "def post(self):\n data = self.post_parser.parse_args()\n\n try:\n LOGGER.debug('Trying to upload file to storage')\n self.storage.upload(data.file)\n LOGGER.debug('The file was uploaded with success')\n return {\n 'filename': data.file.filename,\n 'message': 'The file was uploaded with success'\n }\n except BaseException:\n abort(500, message='The file was not uploaded')\n LOGGER.error('A generic exception has occurred.', exc_info=True)", "def upload():\n return handle_upload(app, request)", "def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}", "def post(self, request: HttpRequest) -> HttpResponse:\n for __, _file in request.FILES.items():\n new_upload, _created = self.handle_post_file(_file)\n new_upload.user = request.user\n new_upload.save()\n # Count initial view\n ObjectViewFile.count_view(new_upload, request)\n LOGGER.info(\"Uploaded %s\", new_upload.filename)\n return HttpResponse(status=204)", "def post(cls, flow_name: str):\n data = file_schema.load(request.files) # {\"file\": FileStorage}\n try:\n file_path = uploads.save_file(data[\"file\"], folder=flow_name)\n basename = uploads.get_basename(file_path)\n return {\"message\": gettext(\"file_uploaded\").format(basename)}, 200\n \n except UploadNotAllowed:\n extension = uploads.get_extension(data[\"file\"])\n return {\"message\": gettext(\"file_illegal_extension\").format(extension)}, 400", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def post(self, request, *args, **kwargs):\n self.create_flow_file_db_entry()\n self.handle_chunk(request)\n return self.return_response(self.flow_file.identifier)", "def post(self):\n request_data = request.get_json(force=True)\n current_path = self.get_current_path()\n file_name = request_data.get('file_name')\n\n if not file_name:\n abort(400, message=\"File name must not be empty!\")\n\n full_path = os.path.join(current_path, file_name)\n\n if os.path.exists(full_path):\n abort(400, message=\"File already exists!\")\n\n if not self.is_allowed(full_path):\n abort(403, message=\"You are not allowed to this path\")\n\n with open(full_path, 'w+') as fp:\n pass\n\n return {\"message\": \"OK\"}", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def upload_doc():\n file = request.files[\"file\"]\n meta_data = {\"name\": request.form[\"name\"].lower()}\n file_id = save_file(meta_data, file)\n print('file-id: ' + file_id)\n index_after_uploading(file_id)\n return jsonify({\"file_id\": file_id})", "def upload():\n\n # TODO: decorator to check token\n token = request.headers.get(\"Authorization\")\n\n has_text = bool(request.get_json())\n has_file = request.files and request.files[\"file\"]\n if not has_text and not has_file:\n error = \"No text input and no file provided\"\n return jsonify({\"success\": False, \"message\": error})\n\n filename, error = save_text(request)\n if error:\n return jsonify({\"success\": False, \"message\": error})\n\n job_id = schedule(filename, token)\n add_user_job(job_id, token)\n\n return jsonify({\"success\": True, \"data\": {\"jobId\": job_id}})", "def post(self, request, *args, **kw):\n logger.debug(\"POST request on UploadHandler\")\n try:\n if request.data is None or 'site' not in request.data or 'data' not in request.data:\n raise BadRequestException(\"Empty data or site\")\n site = get_object_or_404(Site, pk=request.data.get('site', None))\n logger.error('POST request at UploadHandler for site {}'.format(site))\n if request.user is None or not request.user.profile.canUpload or not site.isActive:\n raise BadRequestException(\"Unauthorized\")\n Timer(0, lambda: UploadHandler._save_to_database(request)).start()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n except BadRequestException as ex:\n logger.debug(ex)\n return Response(status=status.HTTP_400_BAD_REQUEST, data=ex.strerror)\n except Exception as ex:\n logger.error(ex)\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR, data=ex.strerror)", "def post(self):\n folder_path = \"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, current_user.net_id, request.headers[\"folder_name\"])\n\n request_submitted = path.exists(\"{0}request.submitted\".format(folder_path))\n request_processed = path.exists(\"{0}request.processed\".format(folder_path))\n request_voided = path.exists(\"{0}request.voided\".format(folder_path))\n\n if not request_submitted and not request_processed and not request_voided:\n if 'file' not in request.files or \"folder_name\" not in request.headers:\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Invalid request format.\"})\n\n file = request.files['file']\n\n if file and allowed_file(file.filename, self.__ALLOWED_EXTENSIONS__):\n try:\n Path(folder_path).mkdir(parents=True, exist_ok=True)\n filename = secure_filename(file.filename)\n file.save(path.join(folder_path, filename))\n\n return jsonify({\"success\": True, \"type\": \"success\", \"message\": \"File successfully uploaded.\", \"filename\": filename})\n except Exception as e:\n print(e)\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"An error occurred while saving the file.\"})\n\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Invalid file or file extension.\"})\n return jsonify({\"success\": False, \"type\": \"error\", \"message\": \"Status of the request has changed.\"})", "def api_upload():\n return make_response(file_manager.save_uploaded_file(), 200)", "def handle_post_file(self, post_file) -> Tuple[Object, bool]:\n _, ext = os.path.splitext(post_file.name)\n # Remove leading dot from extension\n ext = ext[1:] if ext.startswith(\".\") else ext\n # Generate hashes first to check if upload exists already\n hashes = generate_hashes(post_file)\n # Reset reading position so we can read the file again\n post_file.seek(0)\n # Check if hashes already exists\n existing = Object.objects.filter(sha512=hashes.get(\"sha512\"))\n if existing.exists():\n LOGGER.debug(\"De-duped existing upload %s\", existing.first().filename)\n return existing.first(), False\n # Create new upload object\n new_upload = Object(file=save_from_post(post_file.read(), extension=ext))\n new_upload.save()\n LOGGER.info(\"Uploaded %s\", new_upload.filename)\n return new_upload, True", "def post(self, request, *args, **kwargs):\n try:\n files = request.session['__ddoc_files']\n\n except KeyError:\n files = {}\n\n action = request.POST.get('action', 'add_file')\n\n if action == 'remove_file':\n file_name = request.POST.get('file_name', '')\n\n n_files = {}\n for key, file in files.items():\n if key != file_name:\n n_files[key] = file\n\n files = n_files\n\n else:\n for key, file in request.FILES.items():\n file_name = file.name\n\n idx = 1\n while file_name in files.keys():\n file_name = '%d_%s' % (idx, file_name)\n idx += 1\n\n files[file_name] = dict(\n content=force_text(base64.b64encode(file.read())),\n content_type=file.content_type,\n size=file.size,\n )\n\n request.session['__ddoc_files'] = files\n\n return HttpResponseRedirect('.')", "def upload_file_handle(self,\n path: str,\n content_type: str,\n *,\n generate_preview: bool = False,\n storage_location_id: int = SYNAPSE_DEFAULT_STORAGE_LOCATION_ID,\n use_multiple_threads: bool = True) -> dict:\n validate_type(str, path, \"path\")\n validate_type(str, content_type, \"content_type\")", "async def create_upload_file(file: UploadFile = File(...)):\n return dict(filename=file.filename, content_type=file.content_type)", "def upload_file(self, file: Union[str, bytes, StringIO, TextIOWrapper, BytesIO], filename: str, directory: str = 'gcodes') -> Dict:\n raise NotImplementedError", "def post(self, request: HttpRequest) -> HttpResponse:\n if \"id\" in request.POST and \"imagedata\" in request.FILES:\n # Instantiate BrowserObjectView to use handle_post_file\n upload_view = BrowserObjectView()\n upload, created = upload_view.handle_post_file(request.FILES[\"imagedata\"])\n if created:\n # Run auto-claim\n if CONFIG.y(\"auto_claim_enabled\", False) and \"username\" in request.POST:\n matching = get_user_model().objects.filter(\n username=request.POST.get(\"username\")\n )\n if matching.exists():\n upload.user = matching.first()\n LOGGER.debug(\n \"Auto-claimed upload to user '%s'\",\n request.POST.get(\"username\"),\n )\n upload.save()\n # Count initial view\n ObjectViewFile.count_view(upload, request)\n LOGGER.info(\"Uploaded %s\", upload.filename)\n # Generate url for client to open\n default_return_view = CONFIG.y(\"default_return_view\", \"sha256\").replace(\n \"view_\", \"\"\n )\n upload_hash = getattr(upload, default_return_view, \"sha256\")\n url = reverse(\n \"view_\" + default_return_view,\n kwargs={\"file_hash\": upload_hash},\n )\n return HttpResponse(request.build_absolute_uri(url))\n return HttpResponse(status=400)", "def upload():\n file = None\n if 'file' in request.files:\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return json_response(\n message=\"Upload successful\",\n result=\"/v/{}\".format(filename)\n )\n return json_response(\n message=\"Invalid filename or extension (jpg, png, gif)\",\n status_code=500\n )", "def post(self):\n file_ = self.verify_param('file', cgi.FieldStorage)\n data, filemask = self.build_post_data(file_)\n return data, filemask", "def upload():\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n upload_key = str(uuid4())\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = app.config['UPLOAD_FOLDER'] + \"/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n for image_upload in request.files.getlist(\"file\"):\n filename = secure_filename(image_upload.filename)\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n image_upload.save(destination)\n upload_image.delay(destination)\n\n if is_ajax:\n return ajax_response(True, upload_key)\n else:\n return redirect(\"/\")", "def handle_request_upload(self, msg):\n\n\t\tdirect_response = not msg.arguments or msg.arguments[0] in ('', '/')\n\t\tresult = []\n\t\tfor file_obj in msg.options:\n\t\t\ttmpfilename, filename, name = file_obj['tmpfile'], file_obj['filename'], file_obj['name']\n\n\t\t\t# limit files to tmpdir\n\t\t\tif not os.path.realpath(tmpfilename).startswith(TEMPUPLOADDIR):\n\t\t\t\traise BadRequest('invalid file: invalid path')\n\n\t\t\t# check if file exists\n\t\t\tif not os.path.isfile(tmpfilename):\n\t\t\t\traise BadRequest('invalid file: file does not exists')\n\n\t\t\t# don't accept files bigger than umc/server/upload/max\n\t\t\tst = os.stat(tmpfilename)\n\t\t\tmax_size = int(ucr.get('umc/server/upload/max', 64)) * 1024\n\t\t\tif st.st_size > max_size:\n\t\t\t\tos.remove(tmpfilename)\n\t\t\t\traise BadRequest('filesize is too large, maximum allowed filesize is %d' % (max_size,))\n\n\t\t\tif direct_response:\n\t\t\t\twith open(tmpfilename) as buf:\n\t\t\t\t\tb64buf = base64.b64encode(buf.read())\n\t\t\t\tresult.append({'filename': filename, 'name': name, 'content': b64buf})\n\n\t\tif direct_response:\n\t\t\tself.finished(msg.id, result)\n\t\telse:\n\t\t\tself.handle_request_command(msg)", "def handle_upload(f, attrs):\n\n # chunked = False\n dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def post_file(self, file_, api=None):\n api = api or self.api\n url = utils.join_url(self.path)\n files = {'data': file_}\n new_attributes = api.post(url, {}, {}, files)\n # self.error = None\n self.merge(new_attributes)\n return self.success()", "def handle_upload(f, attrs):\n\n # chunked = False\n print 'UPLOAD DIRECTORY:', UPLOAD_DIRECTORY\n dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def upload_file(self):\n request = copy.deepcopy(self.request_template)\n data = json.dumps(request)\n curr_file = {\n 'request': data,\n 'file': open(self.file_path, 'rb')\n }\n print(\"Sending Upload request of av for file {}\".format(self.file_name))\n try:\n response = requests.post(url=self.url + \"upload\", files=curr_file, verify=False)\n except Exception as E:\n print(\"Upload file failed. file: {} , failure: {}\".format(self.file_name, E))\n raise\n response_j = response.json()\n print(\"av Upload response status for file {} : {}\".format(self.file_name,\n response_j[\"response\"][0][\"status\"][\"label\"]))\n return response_j", "def fileupload():\r\n response.view = 'generic.json'\r\n\r\n if not session.add_files:\r\n session.add_files = []\r\n\r\n def GET(tulip_url, file=None, deletefile=None, uploads=None, commit=None):\r\n try:\r\n tulip_url = request.args[0]\r\n tulip = Tulip(url=tulip_url)\r\n except:\r\n return json.dumps({\"success\": \"false\"})\r\n if not tulip.is_wb():\r\n return json.dumps({\"success\": \"false\"})\r\n\r\n if deletefile:\r\n session.add_files = [f for f in session.add_files \\\r\n if f.filename != deletefile]\r\n return json.dumps(FileUpload.delete(uploads=True))\r\n elif file:\r\n upload = json.loads(FileUpload.get())\r\n\r\n filedir = FileUpload.get_file_dir(leak_id=tulip.leak.id)\r\n\r\n src_file = os.path.join(request.folder, 'uploads',\r\n session.upload_dir, upload[0]['name'])\r\n dst_folder = os.path.join(request.folder, 'material', filedir)\r\n\r\n return json.dumps(upload)\r\n elif commit:\r\n # print \"Session value: %s\" % session.add_files\r\n if not session.add_files:\r\n return json.dumps({\"success\": \"false\"})\r\n filedir = FileUpload.get_file_dir(leak_id=tulip.leak.id)\r\n # finding right progressive number\r\n prog = 1\r\n dst_folder = os.path.join(request.folder, 'material',\r\n filedir, str(prog))\r\n while os.path.exists(dst_folder):\r\n prog += 1\r\n dst_folder = os.path.join(request.folder, 'material',\r\n filedir, str(prog))\r\n os.makedirs(dst_folder)\r\n\r\n for filedata in session.add_files:\r\n if os.path.exists(os.path.join(request.folder,\r\n 'uploads', session.upload_dir,\r\n filedata.filename)):\r\n src_file = os.path.join(request.folder, 'uploads',\r\n session.upload_dir, filedata.filename)\r\n try:\r\n shutil.move(src_file,\r\n os.path.join(dst_folder.decode(\"utf-8\"),\r\n filedata.filename))\r\n except OSError:\r\n pass\r\n else:\r\n session.add_files.remove(filedata)\r\n\r\n tulip.leak.add_material(tulip.leak.id, prog, None,\r\n file=json.dumps(session.add_files))\r\n add_files = [(f.ext, f.filename, f.size)\r\n for f in session.add_files]\r\n session.add_files = None\r\n # Leak needs to be spooled again\r\n db(db.leak.id == tulip.leak.id).update(spooled=False)\r\n\r\n for t_id in gl.get_targets(None):\r\n target = gl.get_target(t_id)\r\n try:\r\n t_url = db((db.tulip.leak_id==tulip.leak.id) & (db.tulip.target_id==t_id.id)).select().first().url\r\n db.notification.insert(target=target.name,\r\n address=target.contact,\r\n tulip=t_url,\r\n leak_id=tulip.leak.id,\r\n type=\"material\")\r\n except:\r\n print \"problem in adding to notification DB\"\r\n\r\n db.commit()\r\n\r\n return json.dumps({\"success\": \"true\", \"data\": add_files})\r\n elif uploads:\r\n return \"not implemented\"\r\n else:\r\n return json.dumps({\"success\": \"false\"})\r\n\r\n def POST(tulip_url, **vars):\r\n try:\r\n tulip = Tulip(url=tulip_url)\r\n except:\r\n return json.dumps({\"success\": \"false\"})\r\n if not tulip.is_wb():\r\n return json.dumps({\"success\": \"false\"})\r\n upload = FileUpload.post(tulip.leak.id)\r\n\r\n upload = json.loads(upload)\r\n\r\n filedata = Storage()\r\n\r\n # Store the number of bytes of the uploaded file\r\n filedata.bytes = upload[0]['size']\r\n\r\n # Store the file size in human readable format\r\n filedata.size = mutils.human_size(filedata.bytes)\r\n\r\n filedata.fileid = upload[0]['id']\r\n\r\n # Store filename and extension\r\n filedata.filename = upload[0]['name']\r\n\r\n filedata.ext = mutils.file_type(upload[0]['name'].split(\".\")[-1])\r\n\r\n session.add_files.append(filedata)\r\n\r\n return json.dumps(upload)\r\n\r\n return locals()", "def upload(self, filename, file_path):\n return", "def file_upload(title, action=\"\", fields=None, data=None):\n\n f = {}\n\n if fields:\n for field_id, _field in fields.items():\n f[field_id] = {\"value\": data.get(field_id, None)}\n f[field_id].update(_field)\n\n return {\n \"class\": \"file_upload\",\n \"title\": title,\n \"action\": action,\n \"fields\": f\n }", "def fpupload(request, dataset_id):\n\n dataset = Dataset.objects.get(id=dataset_id)\n logger.debug('called fpupload')\n\n if request.method == 'POST':\n logger.debug('got POST')\n for key, val in request.POST.items():\n splits = val.split(\",\")\n for url in splits:\n try:\n fp = FilepickerFile(url)\n except ValueError:\n pass\n else:\n picked_file = fp.get_file()\n filepath = write_uploaded_file_to_dataset(dataset,\n picked_file)\n datafile = Dataset_File(dataset=dataset,\n filename=picked_file.name,\n size=picked_file.size)\n replica = Replica(datafile=datafile,\n url=filepath,\n protocol='',\n location=Location.get_default_location())\n replica.verify(allowEmptyChecksums=True)\n datafile.save()\n replica.datafile = datafile\n replica.save()\n\n return HttpResponse(json.dumps({\"result\": True}))", "def upload(self, *route, **req_data):\n # Read the FieldStorage.\n file_desc = req_data['file']\n file_mimetype = req_data['mimetype']\n if not isinstance(file_desc, FieldStorage):\n # Python is dangerous when the type is incorrectly assumed.\n return Response(b'invalid request body', status='400 Bad Request')\n\n # Persist the file.\n data_id = get_bucket().put(file_desc.value)\n to_store = StoredFile(\n id=uuid4().hex,\n data_id=data_id,\n mimetype=file_mimetype,\n content_length=len(file_desc.value),\n original_name=file_desc.filename\n )\n StoredFile.collection().put(to_store)\n\n log_activity('%s uploaded file %s'%(\n context.user.link, to_store.access_link\n ))\n\n # Respond.\n return Response(\n bytes(to_store.access_url, 'utf-8'),\n status='201 Created'\n )", "def handle_upload_files(request):\n\n files = []\n for filename, file in request.FILES.items():\n files.append(file)\n overwrite = request.POST.get('overwrite',None)\n\n if overwrite == 'false':\n overwrite = False\n elif overwrite == 'true':\n overwrite = True\n\n json_message = upload_files(files,request.session['username'],overwrite)\n return JsonResponse(json_message)", "def upload(request):\n # We pass the 'file_id' in the query string as a GET parameter. If\n # we read it from the POSTed data, WebOb would read all POSTed\n # data, which has various features and traps (like setting the\n # \"Content-Length\" header to 0) that we do not need since we are\n # going to read the data ourselves anyway.\n file_id = request.GET['X-Progress-ID']\n input_file, file_size, filename = get_file_from_request(request)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n upload_dir = request.registry.settings['poulda.upload_dir']\n user_id = authenticated_userid(request)\n # We use a temporary path to detect unfinished uploads (post\n # mortem, not in the application itself).\n path = os.path.join(upload_dir, '_'.join((user_id, file_id)))\n u.tmp_path = path\n u.started = int(time.time())\n u.size = file_size\n u.state = u'uploading'\n session.flush()\n # We need to commit the transaction so that changes to the Upload\n # object can be seen by the other threads (which will serve the\n # 'progress' JSON view called by the upload page).\n transaction.commit()\n with open(path, 'w') as output:\n # We must read only 'file_size' bytes from the 'input_file',\n # not all of it since it also contains the MIME boundary.\n copy_to_file(input_file, file_size, output)\n final_path = filename[1 + filename.rfind(os.sep):]\n final_path = os.path.join(upload_dir, final_path)\n os.rename(path, final_path)\n session = DBSession()\n u = session.query(Upload).filter_by(id=file_id).one()\n u.state = u'done'\n u.final_path = unicode(final_path, 'utf-8')\n return HTTPFound(location='success')", "def post(self, request):\n serializer = FileToFilesystemSerializer(\n path=settings.PATH_TO_STORE_FILE, data=request.data)\n if serializer.is_valid():\n serializer.save()\n return Response(serializer.instance,\n status=status.HTTP_201_CREATED)\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)", "def upload():\n\n file = request.files['query']\n filepath = upload_filepath(secure_filename(file.filename))\n file.save(filepath)\n classification = classify(filepath)\n classification['filename'] = file.filename\n return render_template('index.html', classification=classification)", "def uploader():\n\tif request.method == 'POST':\n\t\t\n\t\tif \"file\" not in request.files:\n\t\t\treturn \"No data in file.\"\n\n\t\tFile = request.files['file']\n\t\t\n\t\tif File.filename == \"\":\n\t\t\treturn \"No file selected.\"\n\t\t\n\t\tfilename, ext = secure_filename(File.filename).split('.')\n\t\t#Check if file stream exists and file tpye correct.\n\t\tif File and ext == \"hepmc\":\n\t\t\t#The file is a byte stream by default which is not compatible with the current version of hepmcio.\n\t\t\tstring_stream = io.StringIO(File.read().decode('utf-8'))\n\n\t\t\t#Get all events from file and jsonify them.\n\t\t\tevents = hepmcio.HepMCReader(string_stream).all_events()\n\t\t\thepMCEncoder = hepmcio_json.HepMCJSONEncoder()\n\t\t\tjsonified = [hepMCEncoder.encode(event) for event in events]\n\n\t\t\t#Each collection contains all the data in a file.\n\t\t\tif filename not in mongo.db.collection_names():\n\t\t\t\tcollection = mongo.db[filename]\n\t\t\t\tjsonDecoder = json.JSONDecoder()\n\n\t\t\t\t#MongoDB takes in Python objects and not JSON strings, so have to decode before adding documents.\n\t\t\t\tfor jsonObject in jsonified:\n\t\t\t\t\tjsonEvent = jsonDecoder.decode(jsonObject.evt)\n\t\t\t\t\tjsonParticles = [jsonDecoder.decode(p) for p in jsonObject.particles]\n\t\t\t\t\tjsonVertices = [jsonDecoder.decode(v) for v in jsonObject.vertices]\n\n\t\t\t\t\tcollection.insert_one(jsonEvent)\n\t\t\t\t\tcollection.insert_many(jsonParticles)\n\t\t\t\t\tcollection.insert_many(jsonVertices)\n\t\t\n\t\t\t\treturn \"Succesfully uploaded file.\"\n\t\t\t\n\t\t\treturn \"File already in database.\"\n\n\t\treturn \"Incorrect file type.\"", "async def _upload(self) -> None:\n\n # filename given?\n filename = str(uuid.uuid4()) if self.filename is None else self.filename\n\n # check\n if self._upload_path is None:\n raise ValueError(\"No upload URL given.\")\n\n # send data and return image ID\n async with aiohttp.ClientSession() as session:\n data = aiohttp.FormData()\n data.add_field(\"file\", self._buffer, filename=self.filename)\n async with session.post(self._upload_path, auth=self._auth, data=data, timeout=self._timeout) as response:\n if response.status == 401:\n log.error(\"Wrong credentials for uploading file.\")\n raise FileNotFoundError\n elif response.status != 200:\n log.error(f\"Could not upload file to filecache: {response.status} {response.reason}\")\n raise FileNotFoundError", "def upload_file():\r\n if not LOGGEDIN:\r\n return render_template(\"login_temp.html\", msg=\"\")\r\n\r\n if request.method == 'POST':\r\n firstname = flask.request.form[\"firstname\"]\r\n lastname = flask.request.form[\"lastname\"]\r\n city = flask.request.form[\"city\"]\r\n state = flask.request.form[\"state\"]\r\n status = flask.request.form[\"status\"]\r\n date = flask.request.form[\"date\"]\r\n photo = flask.request.form[\"photo\"]\r\n\r\n f_d = open(\"users/\" + lastname + firstname + \".txt\", \"a\")\r\n f_d.write(firstname + \"\\n\")\r\n f_d.write(lastname + \"\\n\")\r\n f_d.write(city + \"\\n\")\r\n f_d.write(state + \"\\n\")\r\n f_d.write(status + \"\\n\")\r\n f_d.write(date + \"\\n\")\r\n f_d.write(photo + \"\\n\")\r\n f_d.close()\r\n return render_template(\"home.html\")\r\n else:\r\n return render_template('check_in.html')", "def submitFiles(self):\n formData =__new__(FormData)();\n \"\"\"\n Iteate over any file sent over appending the files\n to the form data.\n \"\"\"\n i=0\n console.log(self.vue.files)\n while i < self.vue.files.length:\n file = self.vue.files[i];\n formData.append('files[' + i + ']', file);\n i+=1\n \"\"\"\n Make the request to the POST /file-drag-drop URL\n \"\"\"\n formData.append(\"type\",\"upload\")\n __pragma__ ('jsiter') \n fetch('/json/plugins/',\n {\n \"method\":\"POST\",\n \"body\":formData,\n })\\\n .then(lambda res:res.json())\\\n .then(self.uploaded)\\\n .catch(lambda e:console.log('FAILURE!!',e));\n __pragma__ ('nojsiter')", "def upload_submission(request, learner, trigger, no_thumbnail=True):\n base_dir_for_file_uploads = settings.MEDIA_ROOT\n thumbnail_file_name_django = ''\n entry_point = trigger.entry_point\n\n files = request.FILES.getlist('file_upload', None)\n if files is None:\n return None\n\n # Is the storage space reachable?\n deepest_dir = base_dir_for_file_uploads + 'uploads/{0}/tmp/'.format(\n entry_point.id)\n\n try:\n os.makedirs(deepest_dir)\n except OSError:\n if not os.path.isdir(deepest_dir):\n logger.error('Cannot create directory for upload: {0}'.format(\n deepest_dir))\n raise\n\n if len(files) == 1:\n filename = files[0].name\n extension = filename.split('.')[-1].lower()\n submitted_file_name_django = 'uploads/{0}/{1}'.format(entry_point.id,\n generate_random_token(token_length=16) + '.' + extension)\n full_path = base_dir_for_file_uploads + submitted_file_name_django\n with open(full_path, 'wb+') as dst:\n for chunk in files[0].chunks():\n dst.write(chunk)\n\n\n f_size = os.path.getsize(full_path)\n if f_size > trigger.max_file_upload_size_MB * 1024 * 1024:\n logger.warning('File too large {0}'.format(\n submitted_file_name_django))\n return None, ('File too large ({0} MB); it must be less than '\n '{1} MB.'.format(round(float(f_size/1024.0/1024.0), 1),\n trigger.max_file_upload_size_MB))\n\n\n else: #if trigger.allow_multiple_files: this is removed for now\n filename = ''\n extension = ''\n submitted_file_name_django = ''\n full_path = ''\n\n\n # Check that the file format is PDF, if that is required.\n strike1 = False\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'application/pdf' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PDF upload: {0} [{1}]'.format(mime,\n full_path))\n #return None, 'Invalid file uploaded. Uploaded file must be a PDF.'\n\n doc = PdfFileReader(full_path)\n if doc.isEncrypted:\n logger.debug('Encrypted PDF upload: {0}'.format(full_path))\n return None, ('An encrypted PDF cannot be uploaded. Please remove '\n 'the encryption and try again.')\n\n\n strike1 = False\n if (('jpeg' in trigger.accepted_file_types_comma_separated.lower()) or \\\n ('jpg' in trigger.accepted_file_types_comma_separated.lower())) and \\\n extension in ('jpg', 'jpeg'):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/jpeg' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid JPG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable JPEG file.')\n\n\n strike1 = False\n if ('png' in trigger.accepted_file_types_comma_separated.lower()) and \\\n extension in ('png',):\n\n try:\n mime = magic.from_file(full_path, mime=True)\n if not(isinstance(mime, str)):\n mime = mime.decode('utf-8')\n except Exception as exp:\n logger.error('Could not determine MIME type: ' + str(exp))\n mime = ''\n strike1 = True\n\n if 'image/png' not in mime.lower():\n strike1 = True\n\n if strike1:\n logger.debug('Invalid PNG upload: {0} [{1}]'.format(mime,\n full_path))\n return None, ('Invalid file. Uploaded image should be a valid '\n 'and readable PNG file.')\n\n\n strike2 = False\n if extension.lower() not in \\\n trigger.accepted_file_types_comma_separated.lower():\n logger.debug('Invalid file type upload: received \".{0}\"; [{1}]'.format(\\\n extension, full_path))\n return None, ('Invalid file uploaded. Uploaded file must be: {}'.format(\\\n trigger.accepted_file_types_comma_separated))\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n prior = Submission.objects.filter(status='S',\n submitted_by=learner,\n entry_point=entry_point,\n is_valid=True\n )\n else:\n prior_indiv = Q(status='S', submitted_by=learner, entry_point=entry_point,\n trigger=trigger, is_valid=True)\n\n # We need this here, but also for the code later in the next\n # if (trigger==entry_point) part\n\n # Default returned by this function is ``None`` if the user is not\n # enrolled in a group, or if this course simply does not use groups.\n group_submitted = is_group_submission(learner, entry_point)\n if is_group_submission(learner, entry_point):\n group_submitted = group_submitted.group\n\n prior_group = Q(status='S', group_submitted=group_submitted,\n entry_point=entry_point, trigger=trigger,\n is_valid=True)\n else:\n prior_group = Q()\n\n prior = Submission.objects.filter(prior_indiv | prior_group)\n\n\n for item in prior:\n logger.debug(('Setting prior submission to False: {0} and name '\n '\"{1}\"'.format(str(item), item.submitted_file_name)))\n item.is_valid = False\n item.save()\n\n\n if trigger == entry_point:\n # In some instances we don't use triggers, just entry_points\n sub = Submission(submitted_by=learner,\n group_submitted=None,\n status='S',\n entry_point=entry_point,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n else:\n\n sub = Submission(submitted_by=learner,\n group_submitted=group_submitted,\n status='S',\n entry_point=entry_point,\n trigger=trigger,\n is_valid=True,\n file_upload=submitted_file_name_django,\n thumbnail=thumbnail_file_name_django,\n submitted_file_name=filename,\n ip_address=get_IP_address(request),\n )\n sub.save()\n\n if 'pdf' in trigger.accepted_file_types_comma_separated.lower() and \\\n extension in ('pdf',):\n clean_PDF(sub)\n\n return sub", "def handle_upload(request):\n storage = DefaultStorage()\n\n if request.method=='POST' and request.FILES:\n f = request.FILES.values()[0]\n name = settings.MEDIA_URL + handle_uploaded_file(storage,f,'')\n else:\n name = False;\n \n data = \"\"\"\n {\n error: '',\n filename: '%s',\n }\n \"\"\" % (name)\n \n return HttpResponse(data)", "def Uploads1():\n if request.method==\"POST\":\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file=request.files[\"file\"]\n file.save(os.path.join(\"Wind\", file.filename))\n return render_template(\"index.html\", message = \"File Uploaded Successfuly\")\n return render_template(\"index.html\", message = \"Upload Wind Maintenance File\")\n return \"File Uploaded!\"", "def _do_post(self, url, **kwargs):\n #TODO:\n # Add error handling. Check for HTTP status here would be much more conveinent than in each calling method\n scaleioapi_post_headers = {'Content-type':'application/json','Version':'1.0'}\n self.logger.debug(\"_do_post()\")\n\n if kwargs:\n for key, value in kwargs.iteritems():\n if key == 'headers':\n scaleio_post_headers = value\n print \"Adding custom POST headers\"\n if key == 'files':\n upl_files = value\n print \"Adding files to upload\"\n try:\n response = self._session.post(url, headers=scaleioapi_post_headers, verify_ssl=self._im_verify_ssl, files=upl_files)\n self.logger.debug(\"_do_post() - Response: \" + \"{}\".format(response.text))\n if response.status_code == requests.codes.ok:\n return response\n else:\n self.logger.error(\"_do_post() - Response Code: \" + \"{}\".format(response.status_code))\n raise RuntimeError(\"_do_post() - HTTP response error\" + response.status_code)\n except:\n raise RuntimeError(\"_do_post() - Communication error with ScaleIO gateway\")\n return response", "def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n filepath = 'static/images/{}/{}'.format(request.form['qquuid'], request.form['qqfilename'])\n session['img_upload_filepath'] = filepath\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})", "def add_file(self, fieldname, filename, fileHandle, mimetype=None):\n\tbody = fileHandle.read()\n if mimetype is None:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n self.files.append((fieldname, filename, mimetype, body))\n return", "def upload_handler(self):\n \n for root, dirs, files in os.walk(self.path):\n\n current_dir = os.path.basename(root)\n \n if root == self.path:\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True)\n else:\n parents_id = self.filesystem[os.path.dirname(root)][\"id\"]\n root_id = self.gapy.create_file(current_dir, path=root, isFolder=True, parents_id=[parents_id])\n print(f\"\\033[94m The directory {current_dir} was uploaded \\033[0m\")\n\n self.filesystem[root.rstrip(\"/\")] = { \"id\": root_id, \"files\": [] }\n \n if files:\n for f in files:\n if f not in IGNORE_FILES and os.path.getsize(root+\"/\"+f) > 0:\n file_id = self.gapy.create_file(f, path=root, parents_id=[root_id])\n self.filesystem[root][\"files\"].append({ \"name\": f, \"id\": file_id})\n print(f\"\\033[94m The file {f} was uploaded \\033[0m\")\n \n self.update_fs()", "def upload():\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n uuid = str(request.args.get('collection'))\n # uuid = str(uuid4())\n upload_key = str(current_user.id) + \"/\" + uuid\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = \"uploads/{}\".format(current_user.id)\n\n if not os.path.exists(target):\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create user directory: {}\".format(target))\n else:\n return \"Couldn't create user directory: {}\".format(target)\n\n # Target folder for these uploads.\n target = \"uploads/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n for upload in request.files.getlist(\"file\"):\n filename = upload.filename.rsplit(\"/\")[0]\n destination = \"/\".join([target, filename])\n print \"Accept incoming file:\", filename\n print \"Save it to:\", destination\n upload.save(destination)\n\n if is_ajax:\n return ajax_response(True, uuid)\n else:\n return redirect(url_for(\"core.upload_success\", uuid=uuid))", "def post(self, request, work_batch_id):\n\n from sentry.models.workbatch import WorkBatch\n\n try:\n work_batch = WorkBatch.objects.get(pk=int(work_batch_id))\n except WorkBatch.DoesNotExist:\n raise ResourceDoesNotExist\n\n logger = logging.getLogger('clims.files')\n logger.info('workbatchfile.start')\n\n if 'file' not in request.data:\n return Response({'detail': 'Missing uploaded file'}, status=400)\n\n fileobj = request.data['file']\n\n full_name = request.data.get('name', fileobj.name)\n if not full_name or full_name == 'file':\n return Response({'detail': 'File name must be specified'}, status=400)\n\n name = full_name.rsplit('/', 1)[-1]\n\n if _filename_re.search(name):\n return Response(\n {\n 'detail': 'File name must not contain special whitespace characters'\n }, status=400\n )\n\n headers = {\n 'Content-Type': fileobj.content_type,\n }\n for headerval in request.data.getlist('header') or ():\n try:\n k, v = headerval.split(':', 1)\n except ValueError:\n return Response({'detail': 'header value was not formatted correctly'}, status=400)\n else:\n if _filename_re.search(v):\n return Response(\n {\n 'detail': 'header value must not contain special whitespace characters'\n },\n status=400\n )\n headers[k] = v.strip()\n\n file = File.objects.create(\n name=name,\n type='work_batch.file',\n headers=headers,\n )\n file.putfile(fileobj, logger=logger)\n\n try:\n with transaction.atomic():\n # TODO: Remove the organization id from the user task file\n work_batch_file = WorkBatchFile.objects.create(\n organization_id=work_batch.organization_id,\n file=file,\n name=full_name,\n work_batch_id=work_batch.id\n )\n except IOError:\n file.delete()\n return Response({'detail': ERR_FILE_EXISTS}, status=409)\n\n return Response(serialize(work_batch_file, request.user), status=201)", "def upload_file():\n try:\n global current_file\n if request.method == \"POST\":\n # Validates a file has been uploaded\n if 'file' not in request.files:\n flash(\"No file submitted\")\n return redirect(url_for('index'))\n\n f = request.files['file']\n if f.filename == '':\n flash(\"No file submitted\")\n return redirect(url_for('index'))\n\n if app.config['UPLOAD_FOLDER'] == UPLOAD_FOLDER:\n app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER + \\\n session['username']\n\n if check_extension(f.filename):\n # Makes sure filename is safe\n filename = secure_filename(f.filename)\n filepath = app.config['UPLOAD_FOLDER'] + '/files/' + filename\n # Saves the uploaded file\n f.save(filepath)\n # Removes extension from filename\n filename = filename.replace('.txt', '')\n filename = filename.replace('.pdf', '')\n filename = filename.replace('.docx', '')\n\n current_file = main.Analyser(filepath, filename)\n analysed_texts = current_file.analysed_texts\n text_facts = current_file.stats\n with Database() as db:\n categories = db.loadCategories()\n keywords = ''\n for word in text_facts['Key Words']:\n keywords += word[0] + \", \"\n keywords = keywords[:-2]\n return render_template('textdisplay.html',\n title=current_file.title,\n texts=analysed_texts,\n text=analysed_texts['Regular'],\n facts=text_facts,\n ext=current_file.text.ext,\n categories=categories,\n keywords=keywords,\n upload=True)\n\n else:\n flash(\"File type not allowed\")\n return redirect(url_for('index'))\n\n else:\n return redirect(url_for('index'))\n except Exception as e:\n flash(\"Something went wrong, please try again\")\n return redirect(url_for('index'))", "def upload(self):\n\n # Try to retrieve the file from the request\n file = None\n try:\n file = request.files[\"file\"]\n except KeyError as e:\n logging.warning(f\"File was not found in request: {e}.\")\n flash(\"No file given.\", \"error\")\n return redirect(request.url)\n except AttributeError as e:\n logging.warning(f\"Error: Request did not contain any files: {e}.\")\n flash(\"No file given.\", \"error\")\n return redirect(request.url)\n\n # Check if file was correctly uploaded\n if not file or len(file.filename) == 0:\n flash(\"No file selected for upload.\", \"message\")\n return redirect(request.url)\n\n \"\"\" Check if file has correct extension. Allowed extensions depend on\n the connector. To make the code more readable, group connectors\n with the same allowed file extensions together like this:\n if connector in ['someconnector', 'someotherconnector']:\n extensions = [...] \"\"\"\n\n if self.connector in [\"overtime\"]:\n allowed_extensions = OVERTIME_SUPPORTED_EXTENSIONS\n else:\n allowed_extensions = []\n\n if not allowed_file(file, allowed_extensions=allowed_extensions):\n flash(\"File extension not allowed.\", \"warning\")\n return redirect(request.url)\n\n \"\"\" File seems uploaded correctly and has correct extension.\n Generate a new record ID to keep track of the uploaded file.\n \"\"\"\n rec_id = generate_temp_record_id()\n\n # Save file to disk\n path = store_temp_file(file, record_id=rec_id)\n\n if not path:\n flash(\"Error saving file!\", \"error\")\n return redirect(request.url)\n\n \"\"\" If everything ended successfully, send the user to the\n confirmation page so he can review his changes \"\"\"\n\n return redirect(url_for(f\"{self.endpoint}.upload_confirm\", rec_id=rec_id))", "def post(self, problem_id):\n while self.request.files['photos']:\n photo_file = self.request.files['photos'].pop()\n\n if not check_file_ext(photo_file.filename):\n return self.send_error(400, message=(\n 'Bad file extension. JPEG and JPG formats allowed.'\n ))\n\n if not check_file_format(photo_file.body):\n return self.send_error(400, message=(\n 'Bad format. Only JPEG and JPG allowed.'))\n\n new_filename = create_new_filename()\n\n store_photo_data_to_db(problem_id, new_filename, self)\n filepath = store_file_to_hd(new_filename, photo_file.body)\n store_thumbnail_to_hd(filepath)", "def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})", "def post():\n #file=request.body.read()\n #reader=json.loads(file)\n #name = reader[\"name\"]\n #return db.test_post.insert(name=name)\n return db.test_post.insert(**dict (request.vars))", "def action_POST(self):\n\n # Use the content-length header, though being user-defined input it's not really trustworthy.\n try:\n l = int(self.headers.get('content-length', 0))\n if l < 0:\n # Parsed properly, but some joker put in a negative number.\n raise ValueError()\n except ValueError:\n return self.serve_content(\"Illegal Content-Length header value: %s\" % self.headers.get('content-length', 0), 400)\n\n m = args[TITLE_MAX_LENGTH]\n if m and l > m:\n return self.serve_content('Maximum length: %d' % m, code = 413)\n\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={\n 'REQUEST_METHOD':'POST',\n 'CONTENT_TYPE':self.headers['Content-Type'],\n }\n )\n\n if 'file' not in form:\n return self.serve_content('No file provided.', 400)\n\n filename = form['file'].filename\n if not filename:\n # No FileName provided\n return self.serve_content('No file name.', 400)\n elif not re.match(r'^[^/\\\\]+$', filename) or filename in ['.', '..']:\n # Validate filename\n return self.serve_content('Invalid file name.', 400)\n\n if not os.path.isdir(self.file_path):\n return self.send_error(404)\n\n path_save = os.path.join(self.file_path, filename)\n\n if os.path.exists(path_save) and not os.path.isfile(path_save):\n return self.serve_content('Destination exists as a non-file', code = 406)\n\n if args[TITLE_UPLOAD_NO_CLOBBER] and os.path.isfile(path_save):\n return self.serve_content('File already exists.', code = 302)\n\n try:\n with open(path_save, 'wb') as output_file:\n # TODO: How to handle a user lying in their Content-Length header?\n self.copyobj(form['file'].file, output_file, False)\n except IOError:\n if os.path.isfile(path_save):\n os.remove(path_save)\n return self.serve_content('Failed to save file.', code = 500)\n\n return self.serve_content(self.render_file_table(self.file_path), code = 200)", "async def upload(self, request):\n\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n payload = await request.post()\n\n filename = payload['file'].filename\n upload_stream = payload['file'].file\n\n ext = os.path.splitext(filename)[1]\n\n if not re_filename_ext.match(ext):\n # paranoid check in case a script doesn't protect from code injection\n raise web.HTTPBadRequest(text='file extension not supported: %s' % filename)\n\n camera_id = uuid.uuid1().hex\n\n log = request['slog']\n log.debug('request: camera upload', filename=filename)\n\n config = request.app.config\n\n tmppath = dump_stream(config['media']['tempdir'], upload_stream)\n\n log.debug('file dump', camera_id=camera_id, tmppath=tmppath)\n\n await Camera.insert(request,\n camera_id=camera_id,\n filename=filename,\n project_id=project.project_id)\n\n await request.app.task_broker.publish('camera_upload', {\n 'userid': userid,\n 'project_id': project.project_id,\n 'camera_id': camera_id,\n 'tmppath': tmppath,\n 'filename': filename\n }, log=log)\n\n response_js = {\n 'camera_file_id': camera_id\n }\n\n return web.json_response(response_js, status=HTTPStatus.CREATED)", "def on_post(self, req, resp):\n LOGGER = logging.getLogger()\n \n resp.set_header('Content-Type', 'text/json')\n raw_json = req.stream.read().decode('utf-8')\n content = json.loads(raw_json, encoding='utf-8')\n\n try:\n files = content.get(\"files\")\n zip_name = content.get(\"name\")\n zip_file = DownloadFilesResource.compress_files(files, zip_name)\n resp.body = json.dumps({'file': zip_file})\n LOGGER.info(\"Zip created and ready to download\")\n except Exception as e:\n LOGGER.error(\"Error creating zip file\" , exc_info=True)\n raise falcon.HTTPInternalServerError(title=\"Error downloading files: \" + str(type(e)),\n description=(str(e) +\n ','.join(traceback.format_tb(e.__traceback__))))", "def upload():\n storeapps = APP.config[\"storage\"]\n binary = request.data\n\n # Add compatibility with POST requests\n if 'file' in request.files:\n binary = request.files['file'].read()\n\n logging.debug(\"Received file with size: %i\", len(binary))\n\n try:\n app = nativeapps.application.from_binary(binary)\n filepath = app.write(storeapps)\n return \"written: \" + filepath, 201 # 201 CREATED\n except nativeapps.application.InvalidApplicationError as exception:\n return exception, 400", "def uploadFile(self, filename, name=\"Dummy name\", type=\"DummyType\"):\n\n with open(filename, 'rb') as f:\n data = f.read()\n\n if (name == \"Dummy name\"):\n name = filename\n\n data = {'name': name,\n 'type': type,\n 'bits': xmlrpclib.Binary(data),\n 'overwrite': True}\n\n try:\n r = self.server.wp.uploadFile(\n self.blogid, self.username, self.password, data)\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"upload file \" + filename, fault)\n\n #FIXME: do we really need to split the url ?\n try:\n r['url'] = r['url'].split('?')[1]\n except IndexError:\n from urlparse import urlparse\n r['url'] = urlparse(r['url']).path\n\n print \"uploaded file file =\", r['file']\n print \"uploaded file url =\", r['url']\n print \"uploaded file type =\", r['type']", "def upload(self, upload_request):\n raise NotImplementedError", "def process_POST_request(self, path, data, http_s_obj):\n try:\n self.check_and_print_debug_message(\"POST directory path: \" + path)\n pathlib.Path(os.path.dirname(path)).mkdir(\n parents=True, exist_ok=True)\n self.filewrite(path, data)\n http_s_obj.setStatusCode(200)\n http_s_obj.setData(\"Data saved successfully.\")\n except Exception as e:\n self.check_and_print_debug_message(str(e))\n http_s_obj.setStatusCode(400)\n http_s_obj.setData(MAPPING_DICT.get(400))\n\n return http_s_obj", "def upload_single_file(request):\n message, success, title = \"\", 0, \"error\"\n is_data_ok = False\n\n if request.method == 'POST':\n data_in_post = [\"id_campaign\", \"field_name\"]\n # defined in utils.py\n is_data_in_post = check_all_data_available_in_post(\n data_in_post, request.POST)\n\n if is_data_in_post['success']:\n is_data_ok = True\n else:\n message = is_data_in_post['message']\n\n if is_data_ok:\n for filename, file in request.FILES.items():\n name = request.FILES[filename].name\n print(\"filename : \", name)\n\n # myfile = request.FILES['abm_company_list_file']\n myfile = request.FILES[filename]\n fs = FileSystemStorage()\n filename = fs.save(\"campaign/\" + myfile.name, myfile)\n print(filename)\n\n # get campaign id\n id_campaign = request.POST.get(\"id_campaign\")\n\n # django get campaign object from model\n campaign = Campaign.objects.filter(id=id_campaign).first()\n\n if campaign:\n # get specification record\n specification = Specification.objects.filter(campaign=campaign).first()\n if specification:\n # get field name to save\n field_name = request.POST.get(\"field_name\")\n\n # check object has property with field name\n if hasattr(specification, field_name):\n # nested_setattr(object, 'pet.name', 'Sparky')\n model_field_name = str(field_name) + \".name\"\n model_field_name = model_field_name.replace(\" \", \"\")\n print(model_field_name)\n\n # set nested attribute\n # ex. form.name\n nested_setattr(specification, model_field_name, filename)\n\n specification.save()\n print(nested_getattr(specification, model_field_name, 'default')) # will print string similar to filename\n\n success = 1\n title = 'success'\n message = \"specification updated successfully\"\n else:\n message += \"Error... Specification table has no field '\" + field_name + \"'\"\n\n else:\n message += \"Specification not exists with campaign: '\", str(campaign), \"'\"\n else:\n message += \"Campaign not exist with id : '\", id_campaign, \"'\"\n\n # uploaded_file_url = fs.url(filename)\n success = 1\n else:\n message = \"Please post data using post method\"\n\n jsonresponse = {\n \"success\": 1,\n \"title\": request.POST,\n \"message\": message,\n }\n return JsonResponse(jsonresponse, safe=False)", "def Uploads():\n if request.method==\"POST\":\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file=request.files[\"file\"]\n file.save(os.path.join(\"Solar\", file.filename))\n return render_template(\"index.html\", message = \"File Uploaded Successfuly\")\n return render_template(\"index.html\", message = \"Upload Solar Maintenance File\")\n return \"File Uploaded!\"", "def post(self, request, format=None):\n file_obj = request.FILES['file']\n customer = request.POST.get('customer', None)\n\n with open(file_obj.read(), newline='') as csvfile:\n processed_list_from_csv = self._csv_to_json(csvfile, cust_strategy)\n resp = self.postman.add_collection(processed_dictionary_from_csv)\n\n return Response(resp)", "def post(self) :\n\n self.msg = \"\"\n error = True\n importer = Importer(DataAccessor(self.addErrorMessage))\n\n try :\n target = self.request.POST.get('newFile').file.read()\n importer.parse(StringIO(target))\n\n except IOError :\n self.msg = \"Please select a valid file to import\"\n\n except Usage, err : \n self.msg = err.msg\n\n except AttributeError:\n self.msg = \"Please select a valid file to import\"\n\n if not self.msg : \n self.msg = 'Import was successful'\n error = False\n\n if len(self.msg) > 512 : \n self.msg = self.msg[0:512] + \"...\"\n \n setSessionMessageByRequest(self, self.msg, error)\n self.redirect('/admin')", "def post_wave(cnct):\n files = []\n\n if request.mimetype == 'multipart/form-data':\n for _, file in request.files.items():\n files.append((file.filename, file))\n else:\n files.append(('%s.wav' % uuid4(), request.stream))\n\n response = []\n for (name, fp) in files:\n parser = WaveParser(fp)\n try:\n audio_file = db.AudioFile.FromWaveParser(name, parser)\n cnct.add(audio_file)\n except WaveException as err:\n raise HttpError(406, str(err)) from None\n except Exception as err:\n print(err)\n raise HttpError(500) from None\n\n response.append(audio_file.info)\n\n cnct.commit()\n return {'files': response}", "def upload_file():\n global gui\n print(request.data)\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit a empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n print(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n gui.controller.main('openFile %s' % os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return redirect('/data')\n return render_template('upload_file.html')", "def on_post(self, req, resp, account, container):\n _handle_script_upload(req, resp, account, container)", "def collection_post(self):\n document = upload_file(self.request)\n document.author = self.request.authenticated_role\n self.context.documents.append(document)\n if save_tender(self.request):\n self.LOGGER.info(\n \"Created tender qualification document {}\".format(document.id),\n extra=context_unpack(\n self.request, {\"MESSAGE_ID\": \"tender_qualification_document_create\"}, {\"document_id\": document.id}\n ),\n )\n self.request.response.status = 201\n document_route = self.request.matched_route.name.replace(\"collection_\", \"\")\n self.request.response.headers[\"Location\"] = self.request.current_route_url(\n _route_name=document_route, document_id=document.id, _query={}\n )\n return {\"data\": document.serialize(\"view\")}", "def create(self, request):\n if len(request.FILES) == 0:\n return JsonResponse({\n 'success': False,\n 'message': 'Must pass file in as a Multipart/Form post'\n })\n\n the_file = request.data['file']\n file_type = BuildingFile.str_to_file_type(request.data.get('file_type', 'Unknown'))\n\n organization_id = self.get_organization(self.request)\n cycle = request.query_params.get('cycle_id', None)\n\n if not cycle:\n return JsonResponse({\n 'success': False,\n 'message': 'Cycle ID is not defined'\n })\n else:\n cycle = Cycle.objects.get(pk=cycle)\n\n # figure out if file is xml or zip\n the_filename = the_file._get_name()\n tmp_filename, file_extension = os.path.splitext(the_filename)\n # initialize\n p_status = True\n property_state = True\n messages = {'errors': [], 'warnings': []}\n\n if file_extension == '.zip':\n # ZIP FILE, extract and process files one by one\n # print(\"This file is a ZIP\")\n\n with zipfile.ZipFile(the_file, \"r\", zipfile.ZIP_STORED) as openzip:\n filelist = openzip.infolist()\n for f in filelist:\n # print(\"FILE: {}\".format(f.filename))\n # process xml files\n if '.xml' in f.filename and '__MACOSX' not in f.filename:\n # print(\"PROCESSING file: {}\".format(f.filename))\n with NamedTemporaryFile() as data_file:\n data_file.write(openzip.read(f))\n data_file.seek(0)\n size = os.path.getsize(data_file.name)\n content_type = 'text/xml'\n\n a_file = InMemoryUploadedFile(\n data_file, 'data_file', f.filename, content_type,\n size, charset=None)\n\n building_file = BuildingFile.objects.create(\n file=a_file,\n filename=f.filename,\n file_type=file_type,\n )\n\n p_status_tmp, property_state_tmp, property_view, messages_tmp = building_file.process(organization_id, cycle)\n\n # append errors to overall messages\n for i in messages_tmp['errors']:\n messages['errors'].append(f.filename + \": \" + i)\n for i in messages_tmp['warnings']:\n messages['warnings'].append(f.filename + \": \" + i)\n\n if not p_status_tmp:\n # capture error\n p_status = p_status_tmp\n else:\n # capture a real property_state (not None)\n property_state = property_state_tmp\n\n else:\n # just an XML\n building_file = BuildingFile.objects.create(\n file=the_file,\n filename=the_file.name,\n file_type=file_type,\n )\n\n p_status, property_state, property_view, messages = building_file.process(organization_id, cycle)\n\n if p_status and property_state:\n if len(messages['warnings']) > 0:\n return JsonResponse({\n 'success': True,\n 'status': 'success',\n 'message': {'warnings': messages['warnings']},\n 'data': {\n 'property_view': PropertyViewAsStateSerializer(property_view).data,\n # 'property_state': PropertyStateWritableSerializer(property_state).data,\n },\n })\n else:\n return JsonResponse({\n 'success': True,\n 'status': 'success',\n 'message': {'warnings': []},\n 'data': {\n 'property_view': PropertyViewAsStateSerializer(property_view).data,\n # 'property_state': PropertyStateWritableSerializer(property_state).data,\n },\n })\n else:\n return JsonResponse({\n 'success': False,\n 'status': 'error',\n 'message': messages\n }, status=status.HTTP_400_BAD_REQUEST)", "def handle_upload(self, req, folder_path):\n\t\tresult = UL_ACCESS_DENIED\n\t\t\n\t\tdata = req.data\n\t\tfileitem = data['NewFile']\n\t\t\n\t\tfilename = fileitem.filename\n\t\tdestination_path = os.path.join(self.get_selected_root(req), folder_path, filename)\n\t\tif(os.access(destination_path, os.F_OK)):\n\t\t\tparts = filename.split('.')\n\t\t\tif(len(parts) > 1):\n\t\t\t\tparts[len(parts) - 2] += '-%d' % int(time.time())\n\t\t\t\tfilename = '.'.join(parts)\n\t\t\t\tresult = UL_RENAME\n\t\t\telse:\n\t\t\t\tresult = UL_INVALID_TYPE\n\t\tif(result != UL_INVALID_TYPE):\n\t\t\ttry:\n\t\t\t\tuploaded_file = open(destination_path, 'w')\n\t\t\t\tbytes = fileitem.file.read(65536)\n\t\t\t\twhile(bytes):\n\t\t\t\t\tuploaded_file.write(bytes)\n\t\t\t\t\tbytes = fileitem.file.read(65536)\n\t\t\t\tuploaded_file.close()\n\t\t\t\tresult = SUCCESS\n\t\t\texcept:\n\t\t\t\timport traceback\n\t\t\t\tprint traceback.print_exc()\n\t\t\t\tresult = UL_ACCESS_DENIED\n\t\t\n\t\treturn result, filename", "def upload_file():\n retVal = None \n if request.method == 'POST' and upload_validated(request):\n retVal = render_successful_upload(request) \n else:\n retVal = render_index()\n return retVal", "def upload_file(self, file_path, file_name, output_path):", "def post_upload(self, url, file_path, metadata):\n full_url = self.api_url + starts_slash(ends_slash(url))\n headers = {\"Authorization\": \"Bearer \" + self.token}\n body = {\"metadata\": json.dumps(metadata)}\n logging.info(\"POST url: \" + str(full_url))\n logging.info(\"POST header: \" + str(headers))\n logging.info(\"POST body: \" + str(body))\n filedata = None\n if isfile(file_path):\n filedata = {\"filedata\": open(file_path, \"rb\")}\n result = requests.post(url=full_url, headers=headers,\n files=filedata, data=body).json()\n logging.info(\"POST result: \"+str(result))\n return result", "def post(self, slug = None):\n filename = self.request.form.get(\"filename\")\n imgdata = base64.b64decode(self.request.form['data'])\n stream = StringIO.StringIO(imgdata)\n content_length = len(imgdata)\n content_type = \"image/png\"\n\n asset = self.app.module_map.uploader.add(\n stream, \n filename = filename,\n content_type = content_type,\n content_length = content_length,\n )\n\n asset_id = unicode(asset._id)\n return {\n 'url' : self.url_for(\"asset\", asset_id = asset.variants['medium_user']._id),\n 'status' : \"success\",\n 'asset_id' : asset_id\n }", "def add_file(self, fieldname, filename, fileHandle, mimetype=None):\n body = fileHandle.read()\n if mimetype is None:\n mimetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'\n self.files.append((fieldname, filename, mimetype, body))\n return", "def PostInputsFile(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def upload(id):\n response_object = {'status': 'success'}\n print('upload ' + str(request.method) + '\\n')\n if request.method == 'POST':\n if 'file' not in request.files:\n return redirect(request.url)\n file = request.files['file']\n project = get_project(id)\n if file and allowed_file(file.filename):\n filename = file.filename\n filepath = os.path.normpath(os.path.join(flask.current_app.root_path,\n flask.current_app.config['DATA_DIRECTORY'], project['path'], filename))\n\n if not os.path.exists(os.path.dirname(filepath)):\n try:\n os.makedirs(os.path.dirname(filepath))\n except OSError as exc: # Guard against race condition\n if exc.errno != errno.EEXIST:\n raise\n\n file.save(filepath)\n _object = {\n 'filename': os.path.normpath(filename),\n 'project_id': project['project_id']\n }\n add_object(_object)\n print('save file')\n return jsonify(response_object)", "def POST(self):\n context = aq_inner(self.context)\n\n # If the module is published, do a transparent checkout\n if context.state == 'published':\n context.checkout(context.objectId)\n\n if self.request.get_header('Content-Type').lower().startswith(\n 'application/zip') or self.request.get_header(\n 'Packaging', '').endswith('/SimpleZip'):\n # Unpack the zip and add the various items\n try:\n zipfile = ZipFile(self.request['BODYFILE'], 'r')\n except BadZipfile:\n raise BadRequest(\"Invalid zip file\")\n\n namelist = zipfile.namelist()\n lenlist = len(namelist)\n\n # Empty zip file?\n if lenlist == 0:\n raise BadRequest(\"Zip file is empty\")\n\n if lenlist > 1:\n prefix = os.path.commonprefix(namelist)\n else:\n prefix = os.path.dirname(namelist[0]) + '/'\n namelist = [name[len(prefix):] for name in namelist]\n\n for f in namelist:\n if not f:\n continue # Directory name in the listing\n if f.find('/')!=-1:\n continue # Subdirectories ignored\n # When this moves to python2.6, please use zipfile.open here\n unzipfile = StringIO(zipfile.read(prefix + f))\n self.addFile(context, f, unzipfile)\n\n # Returned Location header points to the EM-IRI for zipfile upload\n self.request.response.setHeader('Location', '%s/editmedia' % context.absolute_url())\n self.request.response.setStatus(201)\n return ''\n else:\n # Rhaptos uses UnifiedFile for everything, so we will not use\n # content_type_registry here. First get the file name. The client\n # MUST supply this\n disposition = self.request.get_header('Content-Disposition')\n if disposition is None:\n raise BadRequest(\"The request has no Content-Disposition\")\n try:\n filename = [x.strip() for x in disposition.split(';') \\\n if x.strip().startswith('filename=')][0][9:]\n except IndexError:\n raise BadRequest(\n \"The Content-Disposition header has no filename\")\n\n obj = self.addFile(context, filename, self.request['BODYFILE'])\n\n # Returned Location header must point directly at the file\n self.request.response.setHeader('Location', obj.absolute_url())\n self.request.response.setStatus(201)\n return ''", "def add_post():\n user = get_user()\n if not user:\n return 'Error: not logged in.', 401\n url = app.config['POSTS_ENDPOINT'] + 'add'\n form_data = dict(**request.form.to_dict(), author_id=user['user_id'])\n images = ((img.filename, img.read())\n for img in request.files.getlist(\"images\") if img.filename != '')\n response = requests.post(\n url, data=form_data, files=images)\n if response.status_code == 201:\n # upload successful, redirect to index\n return redirect(url_for(\"index\"))\n return response.content, response.status_code", "def upload(request):\n # return render(request, 'upload.html')\n # print(request.FILES)\n if request.FILES == {}:\n return render(request, 'simple_upload.html')\n else:\n request.method == \"POST\" and request.FILES['myfile']\n myfile = request.FILES['myfile']\n fs = FileSystemStorage()\n filename = fs.save(myfile.name, myfile)\n uploaded_file_url = fs.url(filename)\n messages.success(request, 'Your Book was added successfully!')\n return render(request, 'simple_upload.html', {'uploaded_file_url': uploaded_file_url})", "def collection_post(self):\n if 'file' not in self.request.POST:\n self.request.errors.add('body', 'file', 'Not Found')\n self.request.errors.status = 404\n return\n tender = TenderDocument.load(self.db, self.tender_id)\n if not tender:\n self.request.errors.add('url', 'tender_id', 'Not Found')\n self.request.errors.status = 404\n return\n src = tender.serialize(\"plain\")\n bids = [i for i in tender.bids if i.id == self.bid_id]\n if not bids:\n self.request.errors.add('url', 'bid_id', 'Not Found')\n self.request.errors.status = 404\n return\n bid = bids[0]\n data = self.request.POST['file']\n document = Document()\n document.id = uuid4().hex\n document.title = data.filename\n document.format = data.type\n key = uuid4().hex\n document.url = self.request.route_url('Tender Bid Documents', tender_id=self.tender_id, bid_id=self.bid_id, id=document.id, _query={'download': key})\n bid.documents.append(document)\n filename = \"{}_{}\".format(document.id, key)\n tender['_attachments'][filename] = {\n \"content_type\": data.type,\n \"data\": b64encode(data.file.read())\n }\n patch = make_patch(tender.serialize(\"plain\"), src).patch\n tender.revisions.append(revision({'changes': patch}))\n try:\n tender.store(self.db)\n except Exception, e:\n return self.request.errors.add('body', 'data', str(e))\n self.request.response.status = 201\n self.request.response.headers['Location'] = self.request.route_url('Tender Bid Documents', tender_id=self.tender_id, bid_id=self.bid_id, id=document.id)\n return {'data': document.serialize(\"view\")}", "def preprocess(event, context):\n # create file in DDB\n file_id = utils.generate_id()\n file_request = json.loads(event.get('body'))\n FileModel.create({\n 'id': file_id,\n 'name': file_request.get('name')\n })\n LOGGER.debug('Files item created. service=ddb method=put_item id={}'.format(file_id))\n # generate signed URL for posting file\n url = S3_CLIENT.generate_presigned_url(\n ClientMethod='put_object',\n Params={\n 'Bucket': runtime_context.BUCKET_NAME,\n 'Key': file_id\n },\n ExpiresIn=runtime_context.EXPIRATION\n )\n LOGGER.debug('Presigned URL generated. service=s3 method=put_object id={}'.format(file_id))\n # send back the signed url\n return {\n \"statusCode\": 200,\n \"body\": json.dumps({\n 'id': file_id,\n 'url': url\n }),\n # CORS header\n \"headers\": {\n \"Access-Control-Allow-Origin\": \"*\"\n }\n }", "def upload():\n global FILE_NAME\n target = os.path.join(APP_ROOT, \"images\")\n print(target)\n\n if not os.path.isdir(target):\n os.mkdir(target)\n\n for file in request.files.getlist(\"file\"):\n print(file)\n filename = file.filename\n destination = \"/\".join([target, filename])\n FILE_NAME = destination\n file.save(destination)\n return render_template(\"complete.html\")", "def post_config_upload(self, req, **_kwargs):\n if req.POST:\n meters = req.json.get('meters', None)\n groups = req.json.get('groups', None)\n flows = req.json.get('flows', None)\n\n rm = self.api.process_meter_upload(meters) if meters else ''\n gm = self.api.process_group_upload(groups) if groups else ''\n fm = self.api.process_flow_upload(flows) if flows else ''\n res = Response()\n s = \"{}, {}, {}\".format(rm, gm, fm)\n res.text = s if PYTHON3 else unicode(s, \"utf-8\")\n return res\n\n return Response(status=400) # bad request", "def upload(api_token, base_url, upload_file, metadata):\n\n upload_url = f\"{base_url}data_files/api_create?auth_token={api_token}\"\n files = {'file': open(upload_file, 'rb')}\n response = requests.post(upload_url, files=files, data=metadata)\n\n # Print out the outcome of the upload\n if response.status_code == 200:\n print(f'File {upload_file} successfully uploaded to HIEv')\n else:\n print(\n f'ERROR - There was a problem uploading file {upload_file} to HIEv')", "def _push_one(self, f, **kwargs):\n\n d = self.UploaderClass(f, **kwargs)\n\n # Submit the data to the database\n d.submit(self.session)\n self.uploaded += 1", "def post(self, request, *args, **kwargs):\n\n serializer = serializers.CollectionUploadSerializer(\n data=request.data, context={'request': request})\n serializer.is_valid(raise_exception=True)\n data = serializer.validated_data\n filename = data['filename']\n\n # TODO(cutwater): Merge Artifact and UploadCollectionSerializers\n namespace = self._get_namespace(data)\n self._check_namespace_access(namespace, request.user)\n self._check_max_file_size(request.data['file'].size)\n self._check_role_name_conflict(namespace, filename.name)\n self._check_multi_repo_name_conflict(namespace, filename.name)\n self._check_version_conflict(namespace, filename)\n self._check_is_tarfile(request.data['file'].file.name)\n\n artifact_data = {'file': request.data['file']}\n if serializer.data['sha256'] is not None:\n artifact_data['sha256'] = data['sha256']\n\n repository = pulp_models.Repository.objects.get(\n name=settings.GALAXY_PULP_REPOSITORY)\n\n artifact = self._save_artifact(artifact_data)\n\n task = tasking.create_task(\n tasks.import_collection,\n task_cls=models.CollectionImport,\n params={\n 'artifact_id': artifact.pk,\n 'repository_id': repository.pk,\n },\n task_args={\n 'namespace': namespace,\n 'name': filename.name,\n 'version': filename.version,\n })\n\n data = {'task': reverse('api:v2:collection-import-detail',\n args=[task.pk], request=request)}\n return Response(data, status=http_codes.HTTP_202_ACCEPTED)", "def post(self):\n args = request.args or request.json\n file_id = args.get('file_id')\n services.file.add_star(file_id)\n return {\n \"status\": True\n }", "def fileUpload(fieldName):\n## we don't deal with OS specific \"\\n\"\n## because R does not have a problem (at least with Windows files)\n## no problem in R either with empty carriage returns at end of file\n \n if fs.has_key(fieldName):\n fileClient = fs[fieldName].file\n if not fileClient:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\" \n print \"<p> The \", fieldName, \"file you entered is not a file </p>\"\n print \"<p> Please fill up the required fields and try again</p>\"\n print \"</body></html>\"\n sys.exit()\n else:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\" \n print \"<p> \", fieldName, \"file required </p>\"\n print \"<p> Please fill up the required fields and try again</p>\"\n print \"</body></html>\"\n sys.exit()\n \n # transferring files to final destination;\n\n fileInServer = tmpDir + \"/\" + fieldName\n srvfile = open(fileInServer, mode = 'w')\n fileString = fs[fieldName].value\n srvfile.write(fileString)\n srvfile.close()\n\n os.chmod(fileInServer, 0666)\n \n if os.path.getsize(fileInServer) == 0:\n shutil.rmtree(tmpDir)\n commonOutput()\n print \"<h1> ADaCGH ERROR </h1>\"\n print \"<p>\", fieldName, \" file has size 0 </p>\"\n print \"<p> Please enter a file with something in it.</p>\"\n print \"<p> (Did you enter only a single file, but did not check 'One file'?\\\n If you are using only one file, the 'Two files' button should not be checked.)</p>\"\n print \"</body></html>\"\n sys.exit()", "def upload(self, requests, file):\n # Set the source and dest paths\n dest_url = self.base_url + '/upload'\n source_path = os.path.join(self.data_dir, file)\n\n # Get the sha256 hash of the file\n with open(source_path, 'rb') as afile:\n beforeDigest = hashlib.sha256(afile.read()).hexdigest()\n\n print(\"Generated a hash of the temp file: \" + beforeDigest)\n\n # Upload the file and time it\n with open(source_path, 'rb') as f:\n startTime = time.time()\n r = requests.post(dest_url, files={'file': (file, f)}, max_price=5)\n endTime = time.time()\n uploadElapsedTime = endTime - startTime\n\n print(\"Uploaded the file. Elapsed time: \" + str(uploadElapsedTime))\n\n # Verify the upload was successful\n if r.json()['success'] is not True:\n return {'success': False}\n\n retVal = {\n 'success': True,\n 'time': uploadElapsedTime,\n 'digest': beforeDigest,\n 'upload_filename': r.json()['filename']\n }\n\n return retVal", "def post(self):\n upload_files = upload_images(request.files, 'users', 'user')\n record = request.json if request.content_type == 'application/json' else request.form\n \"\"\" Values assign to Category Model \"\"\"\n pprint.pprint(record)\n model_record = UserModel(\n record['name'], \n record['password'], \n record['email'],\n record['mobile'] if 'mobile' in record else None, \n record['userType'] if 'userType' in record else None,\n upload_files['fileArr']\n )\n \"\"\" Model converts to document like json object \"\"\"\n record_document = model_record.to_document()\n \"\"\" Below line will insert record and get objectID \"\"\"\n insertedId = col_users.insert_one(record_document).inserted_id\n \"\"\" Below line does files move from one place to another \"\"\"\n if len(upload_files['fileArr']) > 0:\n moved_file(upload_files['fileArr'], upload_files['folder'], str(insertedId)) \n\n return jsonify(str(insertedId))", "def files(self):\r\n files = FormsDict()\r\n for name, item in self.POST.iterallitems():\r\n if hasattr(item, 'filename'):\r\n files[name] = item\r\n return files", "def new_upload_image():\n log_request(request)\n\n if not valid_params(['username', 'session_id'], request.form) or\\\n not valid_params(['file'], request.files):\n logging.debug(\"Missing parameters\")\n return jsonify({'error' : 500})\n \n username = request.form['username']\n sId = request.form['session_id']\n fil = request.files['file']\n\n \n # check session before upload\n if not user.verify(username, sId):\n logging.debug(\"Invalid username or session id\")\n return jsonify({'error' : 101})\n\n if fil and allowed_file(fil.filename):\n # get the file extension\n ext = os.path.splitext(fil.filename)[1]\n # create a temporary file\n f = tempfile.NamedTemporaryFile(delete=False, dir=\"/var/www/resources/tmp/\", suffix=\"{0}\".format(ext))\n os.chmod(f.name, 0644)\n name = os.path.basename(f.name)\n f.write(fil.read())\n f.close()\n # get the dividing points for the page\n i = Image.open(f.name)\n divs = divLines(i)\n del i\n # return the dividing points and the name of the page in json form\n return jsonify(\n name = name,\n divs = divs,\n error = 0)\n else:\n logging.debug(\"Image processing failed, invalid filetype?\")\n return jsonify({'error' : 200})", "def add_animal_process():\n\n email = session['current_admin']\n admin = c.get_admin_by_session(email)\n admin_id = admin.admin_id\n\n if request.method == 'POST':\n # Check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect('/admin/' + str(admin_id))\n # Get the name of the uploaded file\n uploaded_file = request.files['file']\n # If user does not select a file, browser also\n # submits an empty part without filename\n if uploaded_file.filename == '':\n flash('No selected file')\n return redirect('/admin/' + str(admin_id))\n # Check if the file is one of the allowed types/extensions\n if uploaded_file and c.allowed_file(uploaded_file.filename, ALLOWED_EXTENSIONS):\n # passing the request and session object\n animal = c.add_animal(request, session, app.config['UPLOAD_FOLDER'])\n\n return redirect('/rescue/' + str(animal.rescue_id))", "def upload_file(self, f):\n with FilesOpener(f) as files:\n response = self.session.post(\n 'https://{}/upload'.format(self.domain),\n files=files\n ).json()\n\n if isinstance(response, list):\n error = response[0].get('error')\n else:\n error = response.get('error')\n\n if error:\n if isinstance(error, str) and error.startswith('FLOOD_WAIT_'):\n retry_after = int(error.rsplit('_',1)[-1])\n raise RetryAfterError(retry_after)\n else:\n raise TelegraphException(error)\n\n return response", "def _openstack_file_upload_handler(self, real_file_path, file_path, file_name, **kwargs):\r\n\r\n container = kwargs.get('container', None)\r\n connection = kwargs.get('connection', None)\r\n\r\n if container is None:\r\n raise TypeError('container argument is required')\r\n\r\n if connection is None:\r\n raise TypeError('connection argument is required')\r\n\r\n object_key = (os.path.relpath(file_path, '/') + '/' + file_name).replace('\\\\', '/')\r\n if not object_key.startswith('/'):\r\n object_key = '/' + object_key\r\n\r\n remove_from_key = kwargs.get('remove_from_key', None)\r\n if remove_from_key is not None:\r\n object_key = object_key.replace(remove_from_key, '')\r\n\r\n prefix = kwargs.get('prefix', '')\r\n if prefix is None:\r\n prefix = ''\r\n if prefix != '' and not prefix.endswith('/'):\r\n prefix += '/'\r\n\r\n object_key = prefix + object_key\r\n\r\n real_file_path = real_file_path.replace('\\\\', '/')\r\n self._upload_file_to_container(container, object_key, real_file_path, connection)", "def upload_file():\n response = Response()\n\n size = 256, 256\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n # Prevent dir traversal/NUL byte injection\n filename = secure_filename(file.filename)\n\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n im = Image.open(\n os.path.join(app.config['UPLOAD_FOLDER'], filename))\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(app.config['UPLOAD_FOLDER'] + '/small_' + filename, \"PNG\")\n _upload_to_s3(filename)\n _upload_to_s3('small_{}'.format(filename))\n return response", "def upload(request):\n if request.method != \"POST\":\n return probe(request)\n\n md5chunk = request.args.get('md5chunk', False)\n md5total = request.args.get('md5total', False)\n\n chunk = int(request.args.get('chunk', 0))\n chunks = int(request.args.get('chunks', 0))\n\n if md5chunk and md5total:\n filename = upload_with_checksum(request, md5chunk, md5total, chunk, chunks)\n else:\n filename = upload_simple(request, chunk)\n\n return Response('%s uploaded' % filename)" ]
[ "0.7198825", "0.7094963", "0.69860333", "0.69567823", "0.6920896", "0.6851648", "0.68024033", "0.6728201", "0.6680211", "0.65697706", "0.6507112", "0.6479238", "0.6470818", "0.64561504", "0.64233595", "0.64167655", "0.6380935", "0.63550353", "0.62920946", "0.62501407", "0.62389594", "0.6235487", "0.6233165", "0.6138275", "0.61178815", "0.6117856", "0.6113433", "0.6112578", "0.6110093", "0.6104514", "0.60921735", "0.6077656", "0.6069915", "0.6068483", "0.6065389", "0.60424536", "0.60218304", "0.6014146", "0.60139996", "0.59930545", "0.5991382", "0.5989311", "0.5984731", "0.59798825", "0.59752786", "0.5969653", "0.5969145", "0.59439343", "0.5926052", "0.5922424", "0.59216267", "0.5918194", "0.5915725", "0.5915575", "0.591541", "0.5908709", "0.58923733", "0.5889357", "0.5885751", "0.5879395", "0.58654654", "0.5863596", "0.585836", "0.58370787", "0.58266515", "0.5822983", "0.58223826", "0.5813476", "0.5807336", "0.5803591", "0.58015084", "0.5793195", "0.5788489", "0.57866275", "0.57828593", "0.57812065", "0.5776043", "0.5771082", "0.5765184", "0.57557285", "0.5742627", "0.57369983", "0.5731799", "0.5730394", "0.57266647", "0.5726484", "0.5725769", "0.57132", "0.5709056", "0.5708061", "0.5701955", "0.57001877", "0.5698125", "0.5688094", "0.56867766", "0.56856406", "0.56826335", "0.5680469", "0.5677116", "0.5676971", "0.56705487" ]
0.0
-1
POST handler that uploads file to remote host.
async def UploadToRemoteNodeHandler(request): fileSize = 0 timestampStart = datetime.utcnow() usedHandler = 'uploadFileToRemoteNode' try: data = await request.post() try: testId = data['testId'] except Exception as e: testId = str(uuid.uuid4()) try: fileName = data['fileName'] except Exception as e: fileName = '1k.txt' try: destinationFileName = data['destinationFileName'] except Exception as e: destinationFileName = fileName try: destinationHost = data['destinationHost'] except Exception as e: print('no destinationHost') fileUrl = 'http://'+destinationHost+'/uploadFile/'+testId #print(fileUrl) async with ClientSession() as session: try: async with ClientSession() as session: async with session.post(fileUrl, data ={ 'testId': testId, 'fileName': destinationFileName, 'file': download_files[fileName] }) as response: data = await response.json() fileSize = len(download_files[fileName]) timestampEnd = datetime.utcnow() taskDuration = str(timestampEnd - timestampStart) testResults.setdefault(testId, {}).setdefault(usedHandler, {})["fileName"] = fileName testResults.setdefault(testId, {}).setdefault(usedHandler, {})["destinationFileName"] = destinationFileName testResults.setdefault(testId, {}).setdefault(usedHandler, {})["fileSize"] = fileSize testResults.setdefault(testId, {}).setdefault(usedHandler, {})["timestampStart"] = str(timestampStart) testResults.setdefault(testId, {}).setdefault(usedHandler, {})["timestampEnd"] = str(timestampEnd) testResults.setdefault(testId, {}).setdefault(usedHandler, {})["taskDuration"] = taskDuration response_obj = { 'status': 'success', 'message': 'File uploaded', 'testId': testId, 'usedHandler': usedHandler, 'taskDuration': taskDuration, 'fileName': fileName, 'destinationFileName': destinationFileName, 'fileSize': fileSize, 'timestampStart': str(timestampStart), 'timestampEnd': str(timestampEnd) } return web.json_response(response_obj) except Exception as e: response_obj = { 'status' : 'failed', 'message': str(e) } return web.json_response(response_obj) except Exception as e: response_obj = { 'status' : 'failed', 'message': str(e) } return web.json_response(response_obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def post(self):\n filename = str(time.time())\n filepath = os.path.join(\n os.path.join(current_app.config['UPLOAD_FOLDER'], filename))\n with open(filepath, 'bw') as uploadfile:\n chunk_size = 1024\n while True:\n chunk = request.stream.read(chunk_size)\n if len(chunk) == 0:\n break\n uploadfile.write(chunk)\n current_app.logger.info('file %s upload successfully', filename)\n return {'timestamp': filename}, http.HTTPStatus.CREATED", "def upload(self, filename, file_path):\n return", "def upload(self, fullfilename, remotefolder=None):\n print(\"[Remote Server] Uploading %s to %s:%s\" %(fullfilename, self.server, self.remotefolder))\n\n if not self.status:\n return 0\n\n if remotefolder == None:\n remotefolder = self.remotefolder\n\n if not self.cd(remotefolder):\n return 0\n\n if not self.sendFile(fullfilename):\n print(\"[Remote Server] Error uploading file %s\" %fullfilename)\n return 0\n\n print(\"[Remote Server] upload finished successfully\")\n\n return 1", "def send_file(cobj, dest, port, fname, hash, handler):\n pass", "def api_upload():\n return make_response(file_manager.save_uploaded_file(), 200)", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def upload():\n return handle_upload(app, request)", "def _upload(url, data_file, username, password):\n url_match = '(http(s)?)\\:\\/\\/localhost'\n if re.search(url_match, url):\n print(\"Please configure url settings.\")\n exit(1)\n\n polarion_request = post(url,\n data=data_file,\n auth=auth.HTTPBasicAuth(username,\n password))\n status_code = polarion_request.status_code\n if status_code == codes.ok:\n return status_code\n else:\n print(\"Results upload failed with the follow: {}\".format(\n polarion_request.status_code))\n raise exceptions.RequestException", "def upload(context, request):\n if request.method == 'POST':\n if not hasattr(request.POST['content'], 'file'):\n raise RuntimeError('No file attached')\n\n fieldstorage = request.POST['content']\n filename = fieldstorage.filename\n logger.info(\"%s posted\", filename)\n\n with bm(\"%s released\" %filename):\n dest = path(request.file_root) / request.namer(filename)\n dest.write_bytes(fieldstorage.file.read())\n try:\n request.registry.notify(event.PackageAdded(request.index, path=dest))\n request.response.headers['X-Swalow-Status'] = 'SUCCESS'\n try:\n for ep in pkg_resources.iter_entry_points('cheeseprism.on_upload'):\n func = ep.load()\n func(context, request, dest)\n except Exception as e:\n logger.exception('Entry point %r failed', ep)\n return request.response\n except :\n logger.exception(\"Processing of %s failed\", filename)\n raise\n return {}", "def upload_image():\n\n response = \"\"\n response_code = 400\n if 'file' not in request.files:\n response = \"Error with request: No file field in body of request.\"\n else:\n file = request.files['file']\n if file.filename == '':\n response = \"Error with request: File field in body of response with no file present.\"\n elif file and allowed_file(file.filename, ALLOWED_IMG_EXTENSIONS):\n filename = secure_filename(file.filename)\n save_path = os.path.dirname(os.path.abspath(__file__)) + IMAGE_UPLOAD_FOLDER\n file.save(os.path.join(save_path, filename))\n response = \"Success: Image saved.\"\n response_code = 201\n\n try:\n add_remote_image(save_path, filename)\n except CalledProcessError:\n response = \"Error: Failed to copy file to hosts\"\n response_code = 500\n else:\n response = \"Error with request: File extension not allowed.\"\n return make_response(jsonify({'message': response}), response_code)", "def upload_file(self, instance, local_obj, remote_file):\n client = self.connect(instance)\n try:\n sftp = client.open_sftp()\n try:\n self._send_file(sftp, local_obj, remote_file)\n finally:\n sftp.close()\n finally:\n client.close()", "def post(self, request, *args, **kwargs):\n if kwargs['optionset'] == 'sftp':\n optinon_sets = self.get_optionset(**kwargs)\n optinon_sets['roots'][0]['storageKwArgs'] = {'host':'127.0.0.1','params':{'port':22,'username':'test','password':'password','timeout':30},'root_path':'/','interactive':False}\n self.elfinder = ElfinderConnector(optinon_sets, request.session)\n else:\n self.elfinder = ElfinderConnector(self.get_optionset(**kwargs), request.session) \n cmd = self.get_command(request.POST)\n \n if not cmd in ['upload']:\n self.render_to_response({'error' : self.elfinder.error(ElfinderErrorMessages.ERROR_UPLOAD, ElfinderErrorMessages.ERROR_UPLOAD_TOTAL_SIZE)})\n return self.output(cmd, request.POST)", "def upload_file(self, file_path, file_name, output_path):", "def upload_file(self, f):\n return self._telegraph.upload_file(f)", "def __PostFile(self, url, fileName, prefix):\n CRLF = '\\r\\n'\n\n f = open(fileName, \"rb\")\n content = f.read()\n boundary = \"-------------------------------\"+ \\\n \"\".join([ random.choice('0123456789') for x in range(28) ])\n\n output = []\n output.append(\"--\"+boundary)\n output.append('Content-Disposition: form-data; name=\"'+prefix+ \\\n '\"; filename=\"avatar.png\"')\n output.append('Content-Type: '+mimetypes.guess_type(fileName)[0] \\\n or 'application/octet-stream')\n output.append(\"\")\n output.append(content)\n output.append(\"--\"+boundary+\"--\")\n output.append(\"\")\n\n encoded = CRLF.join(output)\n\n conn = self.__GetConnection()\n headers = self.__MakeHeaders(True)\n\n conn.putrequest(\"POST\", url)\n for (k,v) in headers.iteritems():\n conn.putheader(k, v)\n\n conn.putheader(\"Content-Type\", \"multipart/form-data; boundary=\" + \\\n boundary)\n conn.putheader(\"Content-Length\", str(len(encoded)))\n\n conn.endheaders()\n conn.send(encoded)\n response = conn.getresponse()\n self.__CheckResponse(response)", "def do_POST(self):\r\n content_length = int(self.headers['Content-Length'])\r\n body = self.rfile.read(content_length)\r\n\r\n response = BytesIO()\r\n try:\r\n res = webServer.handle_post_msg(body)\r\n print(res)\r\n self.send_response(200)\r\n except Exception as e:\r\n print(e)\r\n res = str(e)\r\n self.send_response(500)\r\n self.end_headers()\r\n response.write(res.encode())\r\n self.wfile.write(response.getvalue())", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n\n h = httplib.HTTP(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n \n errcode, errmsg, headers = h.getreply()\n \n print \"CODE=%d\"%errcode\n print \"ERRMSG=%s\"%errmsg\n print \"*** Headers ***\"\n print headers\n print \"*** End of Headers ***\"\n \n if 'Failed' in headers.get('Location'):\n print 'ERROR: Upload failed'\n sys.exit(-1)\n \n print 'Package successfully deployed'\n \n return h.file.read()", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTPConnection(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n response = h.getresponse()\n output = response.read()\n return output\n # return h.file.read()", "def uploadFile(self, filename, name=\"Dummy name\", type=\"DummyType\"):\n\n with open(filename, 'rb') as f:\n data = f.read()\n\n if (name == \"Dummy name\"):\n name = filename\n\n data = {'name': name,\n 'type': type,\n 'bits': xmlrpclib.Binary(data),\n 'overwrite': True}\n\n try:\n r = self.server.wp.uploadFile(\n self.blogid, self.username, self.password, data)\n except xmlrpclib.Fault as fault:\n display_XMLRPC_errors(\"upload file \" + filename, fault)\n\n #FIXME: do we really need to split the url ?\n try:\n r['url'] = r['url'].split('?')[1]\n except IndexError:\n from urlparse import urlparse\n r['url'] = urlparse(r['url']).path\n\n print \"uploaded file file =\", r['file']\n print \"uploaded file url =\", r['url']\n print \"uploaded file type =\", r['type']", "def post_file(self, file_, api=None):\n api = api or self.api\n url = utils.join_url(self.path)\n files = {'data': file_}\n new_attributes = api.post(url, {}, {}, files)\n # self.error = None\n self.merge(new_attributes)\n return self.success()", "def upload(cls, local_file, remote_file='', bucket_name=QINIU_BUCKET_NAME):\n if remote_file == '':\n remote_file = cls.__gen_uuid()\n local_file = cls.__get_abs_path(local_file)\n url = \"/v1/qiniu/upload?key=%s&localFile=%s&token=root-weimiyun-9@usstpwd!\" % (remote_file, local_file)\n try:\n conn = httplib.HTTPConnection(UPLOAD_API_HOST)\n conn.request(method=\"POST\", url=url)\n response = conn.getresponse()\n res = response.read()\n if AUTO_DELETE:\n os.remove(local_file)\n return res, True\n except Exception, e:\n return 'Connection refused', False", "def post(self):\n data = self.post_parser.parse_args()\n\n try:\n LOGGER.debug('Trying to upload file to storage')\n self.storage.upload(data.file)\n LOGGER.debug('The file was uploaded with success')\n return {\n 'filename': data.file.filename,\n 'message': 'The file was uploaded with success'\n }\n except BaseException:\n abort(500, message='The file was not uploaded')\n LOGGER.error('A generic exception has occurred.', exc_info=True)", "def upload():\n file = None\n if 'file' in request.files:\n file = request.files['file']\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n return json_response(\n message=\"Upload successful\",\n result=\"/v/{}\".format(filename)\n )\n return json_response(\n message=\"Invalid filename or extension (jpg, png, gif)\",\n status_code=500\n )", "def peer_server_upload(self, conn, data_received):\n try:\n file_size = os.path.getsize(SHARED_DIR+'/'+data_received['file_name'])\n f = open(SHARED_DIR+'/'+data_received['file_name'], 'rb')\n #print \"Hosting File: %s for download\" % data_received\n for chunk_start, chunk_size in self.get_chunks(file_size):\n file_chunk = f.read(chunk_size)\n conn.sendall(file_chunk)\n '''\n while True:\n data = f.readline()\n if data:\n conn.sendall(data)\n else:\n break\n '''\n f.close()\n conn.sendall('')\n conn.close()\n except Exception as e:\n print \"File Upload Error, %s\" % e", "def send_file():\n data = ARGS.data\n filename = ARGS.file\n outstream = \"POST||\" + filename + \"||\" + data\n CLIENT_SOCKET.send(outstream.encode())", "async def upload_file(self):\n logger.debug(\"uploading %s\", self.tgzfile)\n with aiohttp.MultipartWriter(\"form-data\") as mpwriter:\n with open(self.tgzfile, \"rb\") as file_handle:\n part = mpwriter.append(file_handle)\n part.set_content_disposition(\n \"form-data\", name=\"file\", filename=\"inventory.gz\"\n )\n part.headers[aiohttp.hdrs.CONTENT_TYPE] = self.UPLOAD_CONTENT_TYPE\n\n headers = {}\n # TODO : Use mTLS certs not userid/password\n auth = aiohttp.BasicAuth(\n self.config[\"AUTH\"][\"username\"], self.config[\"AUTH\"][\"password\"]\n )\n headers[\"Authorization\"] = auth.encode()\n async with aiohttp.ClientSession(headers=headers) as session:\n async with session.post(\n self.upload_url, ssl=self.ssl_context, data=mpwriter\n ) as response:\n logger.debug(\"Status: %s\", response.status)\n logger.debug(\n \"Content-type: %s\", response.headers[\"Content-Type\"]\n )\n\n return await response.text()", "def upload_file(cls, uri, fobj):\n msg = \"Backend doesn't implement upload_file()\"\n raise NotImplementedError(msg)", "def _upload_file(sftp, local_file, remote_file) -> None:\n # Check if local_file is a file-like object and use the proper\n # paramiko function to upload it to the remote machine.\n if hasattr(local_file, \"read\"):\n sftp.putfo(local_file, remote_file)\n else:\n sftp.put(local_file, remote_file)", "def submitFiles(self):\n formData =__new__(FormData)();\n \"\"\"\n Iteate over any file sent over appending the files\n to the form data.\n \"\"\"\n i=0\n console.log(self.vue.files)\n while i < self.vue.files.length:\n file = self.vue.files[i];\n formData.append('files[' + i + ']', file);\n i+=1\n \"\"\"\n Make the request to the POST /file-drag-drop URL\n \"\"\"\n formData.append(\"type\",\"upload\")\n __pragma__ ('jsiter') \n fetch('/json/plugins/',\n {\n \"method\":\"POST\",\n \"body\":formData,\n })\\\n .then(lambda res:res.json())\\\n .then(self.uploaded)\\\n .catch(lambda e:console.log('FAILURE!!',e));\n __pragma__ ('nojsiter')", "def upload_file(self, source, dest):\n print(f\"Uploading {source} to {dest}\")\n with open(source, \"rb\") as data:\n self.client.upload_blob(name=dest, data=data)", "def sendRequest(event, context):\n file = event\n print(f\"Processing file: {file['name']}.\")\n\n filename = file['name']\n\n url = 'http://34.123.136.112:5000'\n myobj = {'filename': filename}\n\n x = requests.post(url, data = myobj)\n\n print(x.text)", "def post(self):\n source = 'uploaded by user'\n upload_files = self.get_uploads('file')\n blob_key = upload_files[0].key()\n name = self.request.get('name')\n\n user = users.get_current_user()\n\n username = 'admin'\n date = datetime.datetime.now()\n str_blob_key = str(blob_key)\n key = FileMetadata.get_key_name(username, date, str_blob_key)\n\n ctx = ndb.get_context()\n meta = FileMetadata(key_name=key, parent=_PARENT)\n meta.owner = user\n meta.filename = name\n meta.uploaded_on = date\n meta.source = source\n meta.blobkey = str_blob_key\n meta.put()\n ctx.clear_cache()\n self.redirect('/admin')", "def post_multipart(host, selector, fields):\n\treturn post_multipart_formdata(host, selector, fields)[3]", "def upload_file(self, f):\n with FilesOpener(f) as files:\n response = self.session.post(\n 'https://{}/upload'.format(self.domain),\n files=files\n ).json()\n\n if isinstance(response, list):\n error = response[0].get('error')\n else:\n error = response.get('error')\n\n if error:\n if isinstance(error, str) and error.startswith('FLOOD_WAIT_'):\n retry_after = int(error.rsplit('_',1)[-1])\n raise RetryAfterError(retry_after)\n else:\n raise TelegraphException(error)\n\n return response", "def fpupload(request, dataset_id):\n\n dataset = Dataset.objects.get(id=dataset_id)\n logger.debug('called fpupload')\n\n if request.method == 'POST':\n logger.debug('got POST')\n for key, val in request.POST.items():\n splits = val.split(\",\")\n for url in splits:\n try:\n fp = FilepickerFile(url)\n except ValueError:\n pass\n else:\n picked_file = fp.get_file()\n filepath = write_uploaded_file_to_dataset(dataset,\n picked_file)\n datafile = Dataset_File(dataset=dataset,\n filename=picked_file.name,\n size=picked_file.size)\n replica = Replica(datafile=datafile,\n url=filepath,\n protocol='',\n location=Location.get_default_location())\n replica.verify(allowEmptyChecksums=True)\n datafile.save()\n replica.datafile = datafile\n replica.save()\n\n return HttpResponse(json.dumps({\"result\": True}))", "def post(url, fields, files=[]):\n pm = PostMultipart()\n return pm.post(url, fields, files)", "def upload_file(self, file_name, content):\n return self.files.upload(file_name, content)", "def upload(self, file_path: str, remote_name: str = None) -> dict:\n url = self.get_method_url('storage', 'upload')\n remote_name = self.get_remote_name(file_path, remote_name)\n with open(file_path, 'rb') as file:\n files = {\n 'payload': file,\n 'file_name': remote_name\n }\n json_data = self.request(\n url=url,\n files=files,\n method='POST'\n )\n self.remote_app = json_data\n return self.remote_app", "def upload(self, remote, local, force = False):\n fl = self.list([ remote ])\n if force == False and remote in fl:\n remote_hash = fl[remote]\n h = hashlib.sha256()\n commonl.hash_file(h, local)\n if remote_hash == h.hexdigest():\n # remote hash is the same, no need to upload\n return\n\n with io.open(local, \"rb\") as inf:\n self.target.ttbd_iface_call(\"store\", \"file\", method = \"POST\",\n file_path = remote,\n files = { 'file': inf })", "def post_upload(self, url, file_path, metadata):\n full_url = self.api_url + starts_slash(ends_slash(url))\n headers = {\"Authorization\": \"Bearer \" + self.token}\n body = {\"metadata\": json.dumps(metadata)}\n logging.info(\"POST url: \" + str(full_url))\n logging.info(\"POST header: \" + str(headers))\n logging.info(\"POST body: \" + str(body))\n filedata = None\n if isfile(file_path):\n filedata = {\"filedata\": open(file_path, \"rb\")}\n result = requests.post(url=full_url, headers=headers,\n files=filedata, data=body).json()\n logging.info(\"POST result: \"+str(result))\n return result", "def post(self, request: HttpRequest) -> HttpResponse:\n if \"id\" in request.POST and \"imagedata\" in request.FILES:\n # Instantiate BrowserObjectView to use handle_post_file\n upload_view = BrowserObjectView()\n upload, created = upload_view.handle_post_file(request.FILES[\"imagedata\"])\n if created:\n # Run auto-claim\n if CONFIG.y(\"auto_claim_enabled\", False) and \"username\" in request.POST:\n matching = get_user_model().objects.filter(\n username=request.POST.get(\"username\")\n )\n if matching.exists():\n upload.user = matching.first()\n LOGGER.debug(\n \"Auto-claimed upload to user '%s'\",\n request.POST.get(\"username\"),\n )\n upload.save()\n # Count initial view\n ObjectViewFile.count_view(upload, request)\n LOGGER.info(\"Uploaded %s\", upload.filename)\n # Generate url for client to open\n default_return_view = CONFIG.y(\"default_return_view\", \"sha256\").replace(\n \"view_\", \"\"\n )\n upload_hash = getattr(upload, default_return_view, \"sha256\")\n url = reverse(\n \"view_\" + default_return_view,\n kwargs={\"file_hash\": upload_hash},\n )\n return HttpResponse(request.build_absolute_uri(url))\n return HttpResponse(status=400)", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n headers = {'Content-Type': content_type,\n 'Content-Length': str(len(body))}\n\n #r = urllib2.Request(\"%s%s\" % (host, selector), b2a_base64(body), headers)\n r = urllib2.Request(\"%s%s\" % (host, selector), body, headers)\n #return urllib2.urlopen(r).read()\n return urllib2.urlopen(r)", "def post_multipart(host, fields, files, submit_to_server):\r\n content_type, body = encode_multipart_formdata(fields, files)\r\n h = httplib.HTTP(host)\r\n # needed to change the following url to be handled properly by MG-RAST\r\n h.putrequest('POST', 'http://metagenomics.anl.gov/qiime.cgi')\r\n h.putheader('Content-Type', content_type)\r\n h.putheader('Content-Length', str(len(body)))\r\n h.endheaders()\r\n\r\n # put a check in place for testing purposes on whether the data should be\r\n # posted on the MG-RAST website\r\n if submit_to_server:\r\n h.send(body)\r\n errcode, errmsg, headers = h.getreply()\r\n\r\n # verify the data was received by MG-RAST\r\n if errcode == 200:\r\n response = h.file.read()\r\n else:\r\n raise OSError(\r\n 'MG-RAST could not fulfill the request, which means that the server is unavailable!')\r\n else:\r\n response = body\r\n\r\n return response", "def handle_uploaded_file(f):\n path = settings.ABS_PATH + \"Server_data_visualization/uploads/executable\"\n destination = open(path, \"wb+\")\n for chunk in f.chunks():\n destination.write(chunk)\n destination.close()\n # os.chmod(path, stat.S_IXUSR | stat.S_IWUSR | stat.S_IRUSR)", "def do_POST(self):\n ctype, pdict = cgi.parse_header(self.headers.getheader('content-type'))\n self.body = cgi.FieldStorage(fp=self.rfile,\n headers=self.headers, environ = {'REQUEST_METHOD':'POST'},\n keep_blank_values = 1, strict_parsing = 1)\n # throw away additional data [see bug #427345]\n while select.select([self.rfile._sock], [], [], 0)[0]:\n if not self.rfile._sock.recv(1):\n break\n self.handle_data()", "def serve_upload(conn, ssn_key, file_name, client_name):\n # get signal to begin upload\n request = aes.decrypt(ssn_key, conn.recv(1024))\n if request != SIG_START:\n conn.sendall(aes.encrypt(ssn_key, SIG_BAD))\n return print(\"Bob: something went wrong with file transfer\")\n response = aes.encrypt(ssn_key, SIG_GOOD)\n conn.sendall(response)\n print(\"Bob: beginning transfer for {}...\".format(file_name))\n\n # get file contents from client\n contents = list()\n completed_upload = False\n response = aes.encrypt(ssn_key, SIG_GOOD)\n while not completed_upload:\n request = aes.decrypt(ssn_key, conn.recv(1024))\n if request == SIG_END:\n completed_upload = True\n print(\"Bob: completed transfer for {}\".format(file_name))\n else:\n contents.append(request)\n conn.sendall(response)\n\n # save file to server folder\n file_path = \"{}/{}\".format(client_name, file_name)\n os.makedirs(os.path.dirname(file_path), exist_ok=True)\n with open(file_path, \"w\") as outputStream:\n outputStream.write(''.join(contents))\n print(\"Bob: file saved in {}\".format(file_path))", "def action_POST(self):\n\n # Use the content-length header, though being user-defined input it's not really trustworthy.\n try:\n l = int(self.headers.get('content-length', 0))\n if l < 0:\n # Parsed properly, but some joker put in a negative number.\n raise ValueError()\n except ValueError:\n return self.serve_content(\"Illegal Content-Length header value: %s\" % self.headers.get('content-length', 0), 400)\n\n m = args[TITLE_MAX_LENGTH]\n if m and l > m:\n return self.serve_content('Maximum length: %d' % m, code = 413)\n\n form = cgi.FieldStorage(\n fp=self.rfile,\n headers=self.headers,\n environ={\n 'REQUEST_METHOD':'POST',\n 'CONTENT_TYPE':self.headers['Content-Type'],\n }\n )\n\n if 'file' not in form:\n return self.serve_content('No file provided.', 400)\n\n filename = form['file'].filename\n if not filename:\n # No FileName provided\n return self.serve_content('No file name.', 400)\n elif not re.match(r'^[^/\\\\]+$', filename) or filename in ['.', '..']:\n # Validate filename\n return self.serve_content('Invalid file name.', 400)\n\n if not os.path.isdir(self.file_path):\n return self.send_error(404)\n\n path_save = os.path.join(self.file_path, filename)\n\n if os.path.exists(path_save) and not os.path.isfile(path_save):\n return self.serve_content('Destination exists as a non-file', code = 406)\n\n if args[TITLE_UPLOAD_NO_CLOBBER] and os.path.isfile(path_save):\n return self.serve_content('File already exists.', code = 302)\n\n try:\n with open(path_save, 'wb') as output_file:\n # TODO: How to handle a user lying in their Content-Length header?\n self.copyobj(form['file'].file, output_file, False)\n except IOError:\n if os.path.isfile(path_save):\n os.remove(path_save)\n return self.serve_content('Failed to save file.', code = 500)\n\n return self.serve_content(self.render_file_table(self.file_path), code = 200)", "def post(self):\n request_data = request.get_json(force=True)\n current_path = self.get_current_path()\n file_name = request_data.get('file_name')\n\n if not file_name:\n abort(400, message=\"File name must not be empty!\")\n\n full_path = os.path.join(current_path, file_name)\n\n if os.path.exists(full_path):\n abort(400, message=\"File already exists!\")\n\n if not self.is_allowed(full_path):\n abort(403, message=\"You are not allowed to this path\")\n\n with open(full_path, 'w+') as fp:\n pass\n\n return {\"message\": \"OK\"}", "async def upload(self, request):\n\n userid = await authenticated_userid(request)\n project = await request.app.context_project(request, userid)\n\n payload = await request.post()\n\n filename = payload['file'].filename\n upload_stream = payload['file'].file\n\n ext = os.path.splitext(filename)[1]\n\n if not re_filename_ext.match(ext):\n # paranoid check in case a script doesn't protect from code injection\n raise web.HTTPBadRequest(text='file extension not supported: %s' % filename)\n\n camera_id = uuid.uuid1().hex\n\n log = request['slog']\n log.debug('request: camera upload', filename=filename)\n\n config = request.app.config\n\n tmppath = dump_stream(config['media']['tempdir'], upload_stream)\n\n log.debug('file dump', camera_id=camera_id, tmppath=tmppath)\n\n await Camera.insert(request,\n camera_id=camera_id,\n filename=filename,\n project_id=project.project_id)\n\n await request.app.task_broker.publish('camera_upload', {\n 'userid': userid,\n 'project_id': project.project_id,\n 'camera_id': camera_id,\n 'tmppath': tmppath,\n 'filename': filename\n }, log=log)\n\n response_js = {\n 'camera_file_id': camera_id\n }\n\n return web.json_response(response_js, status=HTTPStatus.CREATED)", "def upload(url, file_data=io.BytesIO(b'ShareX text upload test'), file_name='Test.txt', form_name='sharex', secret=None, field_name='secret'):\n files = {form_name: (file_name, file_data, 'text/plain')}\n\n data = {field_name: secret} if secret is not None else {}\n\n return requests.post(url, headers={'User-Agent': 'ShareX/13.2.1'}, files=files, data=data)", "def upload(conn, localpath, remotepath, filter = None, ignore_invalid = False, chunk_size = 16000):\n if os.path.isdir(localpath):\n upload_dir(conn, localpath, remotepath, filter, chunk_size)\n elif os.path.isfile(localpath):\n upload_file(conn, localpath, remotepath, chunk_size)\n else:\n if not ignore_invalid:\n raise ValueError(\"cannot upload %r\" % (localpath,))", "async def _upload(self) -> None:\n\n # filename given?\n filename = str(uuid.uuid4()) if self.filename is None else self.filename\n\n # check\n if self._upload_path is None:\n raise ValueError(\"No upload URL given.\")\n\n # send data and return image ID\n async with aiohttp.ClientSession() as session:\n data = aiohttp.FormData()\n data.add_field(\"file\", self._buffer, filename=self.filename)\n async with session.post(self._upload_path, auth=self._auth, data=data, timeout=self._timeout) as response:\n if response.status == 401:\n log.error(\"Wrong credentials for uploading file.\")\n raise FileNotFoundError\n elif response.status != 200:\n log.error(f\"Could not upload file to filecache: {response.status} {response.reason}\")\n raise FileNotFoundError", "def node_file_upload(ctx, file, filename):\n filepath = click.format_filename(file)\n filename = filename if filename else filepath\n\n with open(file, \"r\") as f:\n content = f.read()\n\n try:\n ctx.obj['node'].create_file(filename, content=content)\n except TimeoutError as e:\n logger.error('Error: %s' % e)\n exit(1)", "def post(cls, flow_name: str):\n data = file_schema.load(request.files) # {\"file\": FileStorage}\n try:\n file_path = uploads.save_file(data[\"file\"], folder=flow_name)\n basename = uploads.get_basename(file_path)\n return {\"message\": gettext(\"file_uploaded\").format(basename)}, 200\n \n except UploadNotAllowed:\n extension = uploads.get_extension(data[\"file\"])\n return {\"message\": gettext(\"file_illegal_extension\").format(extension)}, 400", "def _do_post(self, url, **kwargs):\n #TODO:\n # Add error handling. Check for HTTP status here would be much more conveinent than in each calling method\n scaleioapi_post_headers = {'Content-type':'application/json','Version':'1.0'}\n self.logger.debug(\"_do_post()\")\n\n if kwargs:\n for key, value in kwargs.iteritems():\n if key == 'headers':\n scaleio_post_headers = value\n print \"Adding custom POST headers\"\n if key == 'files':\n upl_files = value\n print \"Adding files to upload\"\n try:\n response = self._session.post(url, headers=scaleioapi_post_headers, verify_ssl=self._im_verify_ssl, files=upl_files)\n self.logger.debug(\"_do_post() - Response: \" + \"{}\".format(response.text))\n if response.status_code == requests.codes.ok:\n return response\n else:\n self.logger.error(\"_do_post() - Response Code: \" + \"{}\".format(response.status_code))\n raise RuntimeError(\"_do_post() - HTTP response error\" + response.status_code)\n except:\n raise RuntimeError(\"_do_post() - Communication error with ScaleIO gateway\")\n return response", "def handle_upload(f, attrs):\n\n # chunked = False\n print 'UPLOAD DIRECTORY:', UPLOAD_DIRECTORY\n dest_folder = os.path.join(UPLOAD_DIRECTORY, attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def post_multipart(host, selector, fields, files):\n content_type, body = encode_multipart_formdata(fields, files)\n h = httplib.HTTP(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n print content_type\n h.send(body)\n errcode, errmsg, headers = h.getreply()\n return h.file.read()", "def upload_file_helper(CREATED_BY, remote_file, obj):\n try:\n\n upload_file(CREATED_BY, remote_file,\n filename=obj['display_name'],\n file_extension=obj['file_extension'],\n description=obj['description'],\n display_name=obj['display_name'],\n data_id=obj['data_id'],\n format_id=obj['format_id'],\n status=obj['status'],\n topic_id=obj['topic_id'],\n is_public=obj['is_public'],\n is_in_spell=obj['is_in_spell'],\n is_in_browser=obj['is_in_browser'],\n file_date=obj['file_date'],\n readme_file_id=obj['readme_file_id'],\n source_id=obj['source_id']\n )\n except Exception as e:\n logging.error(\"Exception occurred\", exc_info=True)", "def upload_file( processor, user, local_path ):\n operations.publish_work_item(\n operations.create_asset_from_file(\n file_name = local_path,\n owner = user,\n producer = processor,\n child_number = 0,\n asset_class = models.AssetClass.UPLOAD ))", "def _upload_file_to_file_system(upload_details):\n upload_url = \"%s%s\" % (main_url, upload_details['upload_path'])\n fsysparams = {\n 'qqfile': upload_filepath,\n 'import_record': upload_dataset_id,\n 'source_type': upload_datatype\n }\n return requests.post(upload_url,\n params=fsysparams,\n files={'file': open(upload_filepath, 'rb')},\n headers=upload_header)", "def upload_file(self, local_absolute_filepath, upload_filename):\n url = self.base_url + \"/oasis/doTaskUploadFileHelper/\"\n\n with open(local_absolute_filepath) as f:\n in_file = f\n response = self.do_request(\n url,\n in_file_dict={upload_filename:in_file}\n )\n return response", "def handle_upload(f, attrs):\n\n # chunked = False\n dest_folder = os.path.join(app.config['UPLOAD_DIRECTORY'], attrs['qquuid'])\n dest = os.path.join(dest_folder, attrs['qqfilename'])\n save_upload(f, dest)", "def handle_request(self,host,path,data=b''):\n\t\tif data:\n\t\t\tself.response_code(4,\"Uploads are not accepted.\")\n\t\t\treturn\n\t\tif not hasattr(self,\"root\"):\n\t\t\tself.response_code(5,\"Server is unable to handle requests at this time due to misconfiguration.\")\n\t\t\treturn\n\t\tself.root = os.path.abspath(self.root)\n\t\tif not (prefix:=os.path.abspath(os.path.join(self.root,host))).startswith(self.root):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not (filepath:=os.path.abspath(os.path.join(prefix,unquote(path.lstrip(\"/\"))))).startswith(prefix):\n\t\t\tself.response_code(4,\"Cowardly refusing to serve file outside of root.\")\n\t\t\treturn\n\t\tif not os.path.exists(filepath):\n\t\t\tself.response_code(4,\"Not Found\")\n\t\t\treturn\n\t\tif os.path.isdir(filepath):\n\t\t\tif os.path.exists(os.path.join(filepath,\"index.gmi\")):\n\t\t\t\tfilepath = os.path.join(filepath,\"index.gmi\")\n\t\t\telse:\n\t\t\t\tself.response_code(5,\"Cowardly refusing to generate folder listing.\")\n\t\t\t\treturn\n\t\text = os.path.splitext(filepath)[1]\n\t\tmimetype = mimetypes.guess_type(filepath,False)\n\t\tif ext in self.OVERRIDE_MIMETYPES:\n\t\t\tmimetype = self.OVERRIDE_MIMETYPES[ext]\n\t\tmimetype = mimetype or \"application/octet-stream\"\n\t\twith open(filepath,\"rb\") as f:\n\t\t\tself.response_code(2,mimetype)\n\t\t\tshutil.copyfileobj(f,self.wfile)", "def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n filepath = 'static/images/{}/{}'.format(request.form['qquuid'], request.form['qqfilename'])\n session['img_upload_filepath'] = filepath\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})", "def upload_file(file_directory, file_path, login_request, user_id):\n\n data, headers, server_host, server_port = process_request_header(file_directory, file_path, login_request, user_id)\n\n request = requests.post(\"http://\" + server_host + \":\" + server_port + \"/fileOperations/uploadFile\", data=data,\n headers=headers)\n return request.text", "def post_multipart(url, fields, files=()):\r\n content_type, data = encode_multipart_formdata(fields, files)\r\n url_parts = urlparse.urlparse(url)\r\n if url_parts.scheme == 'http':\r\n h = httplib.HTTPConnection(url_parts.netloc)\r\n elif url_parts.scheme == 'https':\r\n h = httplib.HTTPSConnection(url_parts.netloc)\r\n else:\r\n raise Exception('Unsupported URL scheme')\r\n path = urlparse.urlunparse(('', '') + url_parts[2:])\r\n h.request('POST', path, data, {'content-type':content_type})\r\n return h.getresponse().read()", "def upload():\n\n # TODO: decorator to check token\n token = request.headers.get(\"Authorization\")\n\n has_text = bool(request.get_json())\n has_file = request.files and request.files[\"file\"]\n if not has_text and not has_file:\n error = \"No text input and no file provided\"\n return jsonify({\"success\": False, \"message\": error})\n\n filename, error = save_text(request)\n if error:\n return jsonify({\"success\": False, \"message\": error})\n\n job_id = schedule(filename, token)\n add_user_job(job_id, token)\n\n return jsonify({\"success\": True, \"data\": {\"jobId\": job_id}})", "def handle_request_upload(self, msg):\n\n\t\tdirect_response = not msg.arguments or msg.arguments[0] in ('', '/')\n\t\tresult = []\n\t\tfor file_obj in msg.options:\n\t\t\ttmpfilename, filename, name = file_obj['tmpfile'], file_obj['filename'], file_obj['name']\n\n\t\t\t# limit files to tmpdir\n\t\t\tif not os.path.realpath(tmpfilename).startswith(TEMPUPLOADDIR):\n\t\t\t\traise BadRequest('invalid file: invalid path')\n\n\t\t\t# check if file exists\n\t\t\tif not os.path.isfile(tmpfilename):\n\t\t\t\traise BadRequest('invalid file: file does not exists')\n\n\t\t\t# don't accept files bigger than umc/server/upload/max\n\t\t\tst = os.stat(tmpfilename)\n\t\t\tmax_size = int(ucr.get('umc/server/upload/max', 64)) * 1024\n\t\t\tif st.st_size > max_size:\n\t\t\t\tos.remove(tmpfilename)\n\t\t\t\traise BadRequest('filesize is too large, maximum allowed filesize is %d' % (max_size,))\n\n\t\t\tif direct_response:\n\t\t\t\twith open(tmpfilename) as buf:\n\t\t\t\t\tb64buf = base64.b64encode(buf.read())\n\t\t\t\tresult.append({'filename': filename, 'name': name, 'content': b64buf})\n\n\t\tif direct_response:\n\t\t\tself.finished(msg.id, result)\n\t\telse:\n\t\t\tself.handle_request_command(msg)", "def upload_file(bucket, local_file_path, remote_destination_path):\n bucket = get_bucket(bucket)\n k = Key(bucket)\n k.key = remote_destination_path\n k.set_contents_from_filename(local_file_path)", "def upload(api_token, base_url, upload_file, metadata):\n\n upload_url = f\"{base_url}data_files/api_create?auth_token={api_token}\"\n files = {'file': open(upload_file, 'rb')}\n response = requests.post(upload_url, files=files, data=metadata)\n\n # Print out the outcome of the upload\n if response.status_code == 200:\n print(f'File {upload_file} successfully uploaded to HIEv')\n else:\n print(\n f'ERROR - There was a problem uploading file {upload_file} to HIEv')", "def upload(upload_url: str, file_path: str) -> None:\n with open(file_path, 'r') as data:\n try:\n r = requests.put(\n upload_url,\n data=data,\n headers={\"Content-Type\": \"application/octet-stream\"},\n )\n r.raise_for_status()\n except requests.exceptions.HTTPError as err:\n print('Error uploading file')\n raise err", "def upload_post(path: Path, email: str, password: str, host_url: str):\n click.echo(f\"upload.py on path={path}\")\n try:\n post = client_util.load_post(path)\n except ValueError as e:\n click.echo(e, err=True)\n sys.exit(1)\n\n client = client_util.make_client(host_url, email, password)\n\n # TODO: should file upload reject duplicates explicitly?\n # TODO: how to catch failure?\n featured_path = (path / post.metadata.image).resolve()\n print(f\"Uploading featured image {featured_path}...\")\n with open(featured_path, mode=\"rb\") as contents:\n featured_id = client_util.upload_file(\n client, UploadFile(contents, featured_path.name)\n ).id\n\n banner_path = (path / post.metadata.banner).resolve()\n print(f\"Uploading banner image {banner_path}...\")\n with open(banner_path, mode=\"rb\") as contents:\n banner_id = client_util.upload_file(\n client, UploadFile(contents, banner_path.name)\n ).id\n\n thumbnail_path = (path / post.metadata.thumbnail).resolve()\n print(f\"Uploading thumbnail image {thumbnail_path}...\")\n with open(thumbnail_path, mode=\"rb\") as contents:\n thumbnail_id = client_util.upload_file(\n client, UploadFile(contents, thumbnail_path.name)\n ).id\n\n click.echo(\"Creating post...\")\n res_create = api_create_post.sync_detailed(\n client=client,\n json_body=PostPostsJsonBody(\n slug=post.metadata.slug if post.metadata.slug else UNSET,\n title=post.metadata.title if post.metadata.title else UNSET,\n byline=post.metadata.byline if post.metadata.byline else UNSET,\n featured_image=featured_id if featured_id else UNSET,\n banner_image=banner_id if banner_id else UNSET,\n thumbnail_image=thumbnail_id if thumbnail_id else UNSET,\n ),\n )\n if res_create.status_code != HTTPStatus.CREATED:\n click.echo(f\"Failed with content={res_create.content}\", err=True)\n sys.exit(1)\n post_id = res_create.parsed.id\n click.echo(f\"Post created with id={post_id}\")\n\n for tag_slug in post.metadata.tags:\n click.echo(f'Adding tag \"{tag_slug}\"')\n res_add_tag = api_add_tag_to_post.sync_detailed(\n post_id,\n client=client,\n json_body=PostPostsPostIdTagsJsonBody(tag_slug),\n )\n if res_add_tag.status_code != HTTPStatus.NO_CONTENT:\n click.echo(f\"Warning: failed with content={res_add_tag.content}\", err=True)\n\n with open(post.md_path, encoding=\"utf-8\", errors=\"strict\") as markdown_file:\n post_md = markdown_file.read()\n # Get the list of image filenames referenced in the Markdown\n for filename in renderer.find_images(post_md):\n # Resolve absolute path\n full_path = (path / filename).resolve()\n # Upload image and get its online filename\n click.echo(f\"Uploading image {full_path}...\")\n with open(full_path, mode=\"rb\") as contents:\n new_filename = client_util.upload_file(\n client, UploadFile(contents, full_path.name)\n ).filename\n # Update Markdown to use the new filename TODO: the naive find-and-replace is risky!\n post_md = post_md.replace(filename, new_filename)\n\n click.echo(\"Uploading Markdown...\")\n res_content = api_set_post_content.sync_detailed(\n post_id,\n client=client,\n multipart_data=PostPostsPostIdContentMultipartData(\n UploadFile(post_md, \"post.md\")\n ),\n )\n if res_content.status_code != HTTPStatus.NO_CONTENT:\n click.echo(f\"Failed with content={res_content.content}\", err=True)\n sys.exit(1)", "def post(self, request, *args, **kw):\n logger.debug(\"POST request on UploadHandler\")\n try:\n if request.data is None or 'site' not in request.data or 'data' not in request.data:\n raise BadRequestException(\"Empty data or site\")\n site = get_object_or_404(Site, pk=request.data.get('site', None))\n logger.error('POST request at UploadHandler for site {}'.format(site))\n if request.user is None or not request.user.profile.canUpload or not site.isActive:\n raise BadRequestException(\"Unauthorized\")\n Timer(0, lambda: UploadHandler._save_to_database(request)).start()\n return Response(None, status=status.HTTP_204_NO_CONTENT)\n except BadRequestException as ex:\n logger.debug(ex)\n return Response(status=status.HTTP_400_BAD_REQUEST, data=ex.strerror)\n except Exception as ex:\n logger.error(ex)\n return Response(status=status.HTTP_500_INTERNAL_SERVER_ERROR, data=ex.strerror)", "def _upload_file(cluster_connection, girder_client, file, path):\n\n r = requests.get(\n '%s/file/%s/download' % (girder_client.urlBase, file['_id']),\n headers={'Girder-Token': girder_client.token}, stream=True)\n check_status(r)\n cluster_connection.put(r.raw, os.path.join(path, file['name']))", "def upload():\n form = request.form\n\n # Create a unique \"session ID\" for this particular batch of uploads.\n upload_key = str(uuid4())\n\n # Is the upload using Ajax, or a direct POST by the form?\n is_ajax = False\n if form.get(\"__ajax\", None) == \"true\":\n is_ajax = True\n\n # Target folder for these uploads.\n target = app.config['UPLOAD_FOLDER'] + \"/{}\".format(upload_key)\n try:\n os.mkdir(target)\n except:\n if is_ajax:\n return ajax_response(False, \"Couldn't create upload directory: {}\".format(target))\n else:\n return \"Couldn't create upload directory: {}\".format(target)\n\n for image_upload in request.files.getlist(\"file\"):\n filename = secure_filename(image_upload.filename)\n destination = \"/\".join([target, filename])\n print(\"Accept incoming file:\", filename)\n print(\"Save it to:\", destination)\n image_upload.save(destination)\n upload_image.delay(destination)\n\n if is_ajax:\n return ajax_response(True, upload_key)\n else:\n return redirect(\"/\")", "def send_data(self, fp, dest: PathLike, force: bool = False):", "def upload_file():\r\n # Define an image object\r\n image_path = r'F:\\Testing_Development\\Projects\\Interface_requests\\Interface_requests\\upload_files\\Napoleon Bonaparte.jpg'\r\n file = {'file': open('Napoleon Bonaparte.jpg', 'rb')}\r\n # response = requests.post(base_url + '/post', files=file, timeout=3)\r\n response = requests.post(base_url + '/post', files=file)\r\n print(response.status_code)\r\n print(response.text)", "def multipart_post(host, selector, fields, files):\n content_type, body = encode_multipart_data(fields, files)\n h = httplib.HTTP(host)\n h.putrequest('POST', selector)\n h.putheader('content-type', content_type)\n h.putheader('content-length', str(len(body)))\n h.endheaders()\n h.send(body)\n errcode, errmsg, headers = h.getreply()\n return h.file.read()", "def post(self):\n postUrl = 'http://' + self.ws + ':80/cgi-bin/post.py'\n\n # Create the form with simple fields\n logform = MultiPartForm()\n logfilename = string.rsplit(self.fullLogFile, '/', 1)[1]\n logform.add_file('file', logfilename, open(self.fullLogFile))\n body = str(logform)\n\n # Build the request\n request = urllib2.Request(postUrl)\n request.add_header('Content-type', logform.get_content_type())\n request.add_header('Content-length', len(body))\n request.add_data(body)\n\n # print request.get_data()\n urllib2.urlopen(request).read()\n\n htmlFile = self.format_html()\n htmlform = MultiPartForm()\n htmlfilename = string.rsplit(htmlFile, '/', 1)[1]\n htmlform.add_file('file', htmlfilename, open(htmlFile))\n\n request = urllib2.Request(postUrl)\n body = str(htmlform)\n request.add_header('Content-type', htmlform.get_content_type())\n request.add_header('Content-length', len(body))\n request.add_data(body)\n # request.get_data()\n response = urllib2.urlopen(request)\n data = response.read()\n\n s = re.search(\"^file location: (.+)\", data, re.MULTILINE)\n location = s.group(1)\n\n print \"http://%s%s\\n\" % (self.ws, location)", "def upload_file_handle(self,\n path: str,\n content_type: str,\n *,\n generate_preview: bool = False,\n storage_location_id: int = SYNAPSE_DEFAULT_STORAGE_LOCATION_ID,\n use_multiple_threads: bool = True) -> dict:\n validate_type(str, path, \"path\")\n validate_type(str, content_type, \"content_type\")", "def upload_file(local_file, remote_file, key_filename=None, hostname=None, username=None) -> None:\n with get_sftp_session(hostname=hostname, username=username, key_filename=key_filename) as sftp:\n _upload_file(sftp, local_file, remote_file)", "def post(self):\n if validate(request.form):\n handle_upload(request.files['qqfile'], request.form)\n return make_response(200, {\"success\": True})\n else:\n return make_response(400, {\"error\": \"Invalid request\"})", "def upload_file(filename, server, account, projname, language=None,\n username=None, password=None,\n append=False, stage=False, date_format=None):\n stream = transcode_to_stream(filename, date_format)\n upload_stream(stream_json_lines(stream),\n server, account, projname, language=language,\n username=username, password=password,\n append=append, stage=stage)", "def on_post(self, req, resp, account, container):\n _handle_script_upload(req, resp, account, container)", "def upload(self, localfile: str, remotefile: str, overwrite: bool = True, permission: str = '', **kwargs):\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n remf = remotefile\n else:\n if valid == {}:\n remf = remotefile + self._sb.hostsep + localfile.rpartition(os.sep)[2]\n else:\n remf = remotefile\n if overwrite == False:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" exists and overwrite was set to False. Upload was stopped.\"}\n\n try:\n fd = open(localfile, 'rb')\n except OSError as e:\n return {'Success' : False, \n 'LOG' : \"File \"+str(localfile)+\" could not be opened. Error was: \"+str(e)}\n\n fsize = os.path.getsize(localfile)\n\n if fsize > 0:\n code = \"filename _sp_updn '\"+remf+\"' recfm=N permission='\"+permission+\"';\"\n ll = self.submit(code, 'text')\n log1 = ll['LOG']\n\n self.stdin[0].send(str(fsize).encode()+b'tom says EOL=UPLOAD \\n')\n\n while True:\n buf = fd.read1(32768)\n sent = 0\n send = len(buf)\n blen = send\n if blen == 0:\n break\n while send:\n try:\n sent = 0\n sent = self.stdout[0].send(buf[blen-send:blen])\n except (BlockingIOError):\n pass\n send -= sent\n \n code = \"filename _sp_updn;\"\n else:\n log1 = ''\n code = \"\"\"\n filename _sp_updn '\"\"\"+remf+\"\"\"' recfm=F encoding=binary lrecl=1 permission='\"\"\"+permission+\"\"\"';\n data _null_;\n fid = fopen('_sp_updn', 'O');\n if fid then\n rc = fclose(fid);\n run;\n filename _sp_updn;\n \"\"\"\n\n ll2 = self.submit(code, 'text')\n fd.close()\n\n return {'Success' : True, \n 'LOG' : log1+ll2['LOG']}", "def test_post_file(self):\n self._register_uri(httpretty.POST)\n with open(self.test_file, 'rb') as in_file:\n response = self.client.post(self.test_endpoint,\n files={\"file\": in_file})\n self.assertEqual(response, self.test_data)\n body = str(self._last_request().body)\n self.assertIn(\"Content-Disposition: form-data; \"+\n \"name=\\\"file\\\"; filename=\\\"test_file.txt\\\"\", body)\n self.assertIn(\"Test File\", str(body))", "def upload_file(\n self, bucket_id: uplink.Path, filename: uplink.Path, file: uplink.Body\n ):\n pass", "def upload_file():\n response = Response()\n response.headers.add('Access-Control-Allow-Origin', request.headers.get(\"Origin\", None))\n response.headers.add('Access-Control-Allow-Methods', 'GET,POST')\n response.headers.add('Access-Control-Max-Age', '21600')\n response.headers.add('Access-Control-Allow-Credentials', 'true')\n response.headers.add('Access-Control-Allow-Headers', \"Origin, X-Requested-With, Content-Type, Accept\")\n\n size = 256, 256\n if request.method == 'POST':\n file = request.files['file']\n if file and allowed_file(file.filename):\n # Prevent dir traversal/NUL byte injection\n filename = secure_filename(file.filename)\n\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n\n file.save(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n im = Image.open(os.path.join(app.config['UPLOAD_FOLDER'], filename))\n im.thumbnail(size, Image.ANTIALIAS)\n im.save(app.config['UPLOAD_FOLDER'] + '/small_' + filename, \"PNG\")\n _upload_to_s3(filename)\n _upload_to_s3('small_{}'.format(filename))\n return response", "def upload(self, endpoint, path, name=None, mime_type=None):\n url = f\"{self.url}/fileuploads/{endpoint}\"\n self.log.debug(f\"uploading: {url!r}: {path!r}\")\n \n # determine filename (if unspecified)\n if not name:\n name = os.path.basename(path) \n \n # NOTE: JSS requires filename extension (or upload will fail)\n if not os.path.splitext(name)[1]:\n raise APIError(response, f\"missing file extension: {path!r}\")\n \n # determine mime-type of file (if unspecified)\n if not mime_type:\n mime_type = mime_type(path)\n \n with open(path, 'rb') as f:\n # Example of posted data:\n # {'name': ('example.png', \n # <_io.BufferedReader name=\"./example.png\">,\n # 'image/png')}\n files = {'name': (name, f, mime_type)}\n self.log.debug(f\"files: {files}\")\n response = self.session.post(url, files=files)\n \n if response.status_code != 201:\n err = f\"PUT failed: {url}: {response}\"\n self.log.error(err)\n self.log.debug(f\"TEXT: {response.text}\")\n raise APIError(response, err)", "def upload_file(self, session, output, serverdir):\n name = output.metadata['filename']\n self.log.debug(\"uploading %r to %r as %r\",\n output.file.name, serverdir, name)\n\n kwargs = {}\n if self.blocksize is not None:\n kwargs['blocksize'] = self.blocksize\n self.log.debug(\"using blocksize %d\", self.blocksize)\n\n upload_logger = KojiUploadLogger(self.log)\n session.uploadWrapper(output.file.name, serverdir, name=name,\n callback=upload_logger.callback, **kwargs)\n path = os.path.join(serverdir, name)\n self.log.debug(\"uploaded %r\", path)\n return path", "def upload(self,filename,remotefolder=None):\n\n if remotefolder == None:\n remotefolder = self.remotefolder\n\n self.status = 0\n\n try:\n self.ftp.cwd(remotefolder)\n\n self.file = open(filename, 'rb')\n\n (head, tail) = os.path.split(filename)\n\n command = \"STOR \" + tail\n\n print('Uploading: ' + tail)\n self.ftp.storbinary(command, self.file)\n print('Upload Completed')\n\n except ftplib.all_errors:\n print('Error Uploading ' + tail)\n self.status = 1\n return self.status\n\n self.file.close()\n\n #back to initial directory in __init__()\n self.ftp.cwd(self.remotefolder)\n\n return self.status", "def upload_file():\r\n if not LOGGEDIN:\r\n return render_template(\"login_temp.html\", msg=\"\")\r\n\r\n if request.method == 'POST':\r\n firstname = flask.request.form[\"firstname\"]\r\n lastname = flask.request.form[\"lastname\"]\r\n city = flask.request.form[\"city\"]\r\n state = flask.request.form[\"state\"]\r\n status = flask.request.form[\"status\"]\r\n date = flask.request.form[\"date\"]\r\n photo = flask.request.form[\"photo\"]\r\n\r\n f_d = open(\"users/\" + lastname + firstname + \".txt\", \"a\")\r\n f_d.write(firstname + \"\\n\")\r\n f_d.write(lastname + \"\\n\")\r\n f_d.write(city + \"\\n\")\r\n f_d.write(state + \"\\n\")\r\n f_d.write(status + \"\\n\")\r\n f_d.write(date + \"\\n\")\r\n f_d.write(photo + \"\\n\")\r\n f_d.close()\r\n return render_template(\"home.html\")\r\n else:\r\n return render_template('check_in.html')", "def post(file: str, data: bytes, addr: tuple):\n assert (\n type(file) == str or type(addr) == tuple or type(data) == bytes\n ), \"Invalid Parameter Types\"\n request = pickle.dumps([\"post\", file])\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect(addr)\n s.sendall(request)\n s.recv(1)\n data = bdtp.new_send_data_port(data, (\"\", 0))\n data.send(s)\n s.close()", "def upload_file(self):\n request = copy.deepcopy(self.request_template)\n data = json.dumps(request)\n curr_file = {\n 'request': data,\n 'file': open(self.file_path, 'rb')\n }\n print(\"Sending Upload request of av for file {}\".format(self.file_name))\n try:\n response = requests.post(url=self.url + \"upload\", files=curr_file, verify=False)\n except Exception as E:\n print(\"Upload file failed. file: {} , failure: {}\".format(self.file_name, E))\n raise\n response_j = response.json()\n print(\"av Upload response status for file {} : {}\".format(self.file_name,\n response_j[\"response\"][0][\"status\"][\"label\"]))\n return response_j", "def upload(request):\n if request.method != \"POST\":\n return probe(request)\n\n md5chunk = request.args.get('md5chunk', False)\n md5total = request.args.get('md5total', False)\n\n chunk = int(request.args.get('chunk', 0))\n chunks = int(request.args.get('chunks', 0))\n\n if md5chunk and md5total:\n filename = upload_with_checksum(request, md5chunk, md5total, chunk, chunks)\n else:\n filename = upload_simple(request, chunk)\n\n return Response('%s uploaded' % filename)", "def do_upload(ftp):\n # Active (PORT), Passive (PASV), ExtActive (EPRT), or ExtPassive (EPSV)?\n output, sock, transfer_type = get_transfer_output_and_socket(ftp)\n print_debug(output + \"\\n\")\n\n # What file to upload?\n local_file = raw_input(\"What local file do you want to upload?\\n> \")\n is_file = os.path.isfile(local_file)\n while not local_file or not is_file:\n if not is_file:\n print(\"File not found.\")\n local_file = raw_input(\"What local file do you want to upload?\\n> \")\n is_file = os.path.isfile(local_file)\n # What to save file as?\n remote_path = raw_input(\"What do you want to name the remote file?\\n> \")\n while not remote_path:\n remote_path = raw_input(\"What do you want to name the remote file?\\n> \")\n try:\n msg_rec, data_rec = ftp.stor_cmd(sock, local_file, remote_path, transfer_type)\n print_debug(str(data_rec))\n except Exception as e:\n print(\"An error has occurred: \" + str(e) + \"\\nPlease try again.\")\n return main_menu(ftp)\n main_menu(ftp)", "def put(host, username, localpath, remotepath=None, port=22):\n log = logging.getLogger('device.remotecall')\n log.info('sending file from local:%s -> %s', localpath, remotepath)\n if not remotepath:\n remotepath = os.path.split(localpath)[1]\n cmd = 'scp -P %s %s %s@%s:%s' % (port, localpath, username, host, remotepath)\n try:\n null = open('/dev/null', 'w')\n subprocess.call(shlex.split(cmd), stdin=subprocess.PIPE, stdout=null, stderr=null)\n null.close()\n except Exception as e:\n log.debug('Could not send %s file to %s: Error %s', localpath, host, e)", "def UploadFile(filename, file_id, content, is_binary, status, is_base):\r\n file_too_large = False\r\n if is_base:\r\n type = \"base\"\r\n else:\r\n type = \"current\"\r\n if len(content) > MAX_UPLOAD_SIZE:\r\n print (\"Not uploading the %s file for %s because it's too large.\" %\r\n (type, filename))\r\n file_too_large = True\r\n content = \"\"\r\n checksum = md5(content).hexdigest()\r\n if options.verbose > 0 and not file_too_large:\r\n print \"Uploading %s file for %s\" % (type, filename)\r\n url = \"/%d/upload_content/%d/%d\" % (int(issue), int(patchset), file_id)\r\n form_fields = [(\"filename\", filename),\r\n (\"status\", status),\r\n (\"checksum\", checksum),\r\n (\"is_binary\", str(is_binary)),\r\n (\"is_current\", str(not is_base)),\r\n ]\r\n if file_too_large:\r\n form_fields.append((\"file_too_large\", \"1\"))\r\n if options.email:\r\n form_fields.append((\"user\", options.email))\r\n ctype, body = EncodeMultipartFormData(form_fields,\r\n [(\"data\", filename, content)])\r\n response_body = rpc_server.Send(url, body,\r\n content_type=ctype)\r\n if not response_body.startswith(\"OK\"):\r\n StatusUpdate(\" --> %s\" % response_body)\r\n sys.exit(1)", "def upload(self, host, node, local, remote=\"\"):\n cmd = [\"scp\", local, host + \":\" + remote]\n if os.path.isdir(local):\n cmd.insert(1, \"-r\")\n pipe = self._issuecommand(cmd)\n return self._waitforall([pipe]) == 0", "def upload_local_file_to_oh(file_path, file_name, file_metadata, access_token, member_id):\r\n try:\r\n with open(file_path, 'rb') as fs:\r\n upload_stream(fs, file_name, file_metadata, access_token)\r\n return True\r\n except:\r\n print(f'Failed to upload {file_path} to OH for OH member {member_id}')\r\n return False" ]
[ "0.68752784", "0.672209", "0.6701313", "0.66228", "0.6602018", "0.6584897", "0.6579177", "0.65558815", "0.6509413", "0.65081227", "0.64988804", "0.6480952", "0.6429412", "0.64271265", "0.6385441", "0.6369564", "0.63390976", "0.63218266", "0.6314229", "0.63084084", "0.6304595", "0.6253425", "0.62463784", "0.6236206", "0.62192416", "0.62155473", "0.6210443", "0.6209265", "0.6194807", "0.6185606", "0.61735946", "0.6172074", "0.6168351", "0.616671", "0.6156829", "0.6151325", "0.61441356", "0.6142351", "0.6140808", "0.61259633", "0.61100036", "0.6109251", "0.6098339", "0.6095151", "0.60935247", "0.60896504", "0.6082085", "0.6079379", "0.6073774", "0.60733616", "0.6071767", "0.60689914", "0.6049927", "0.6031234", "0.60153174", "0.6008854", "0.6001867", "0.5996366", "0.5993383", "0.5988827", "0.5983681", "0.597251", "0.59645176", "0.5963535", "0.5945195", "0.5940251", "0.59353113", "0.59342927", "0.5930132", "0.59257764", "0.5923906", "0.59182084", "0.5900737", "0.5892371", "0.58839446", "0.5879591", "0.5866682", "0.5863279", "0.58622503", "0.5861934", "0.58619195", "0.5854975", "0.58539295", "0.5850481", "0.582908", "0.582782", "0.5824364", "0.5823813", "0.5816019", "0.58148193", "0.5794954", "0.5793965", "0.5791378", "0.5788218", "0.57858175", "0.5778385", "0.5770659", "0.5759371", "0.57581335", "0.57543427" ]
0.577518
96
POST handler that downloads file from remote host.
async def DownloadFromRemoteNodeHandler(request): fileSize = 0 timestampStart = datetime.utcnow() usedHandler = 'downloadFileFromRemoteNode' try: data = await request.post() try: testId = data['testId'] except Exception as e: print('no testId, generating own') testId = str(uuid.uuid4()) try: fileName = data['destinationFileName'] except Exception as e: print('no fileName, generating own') fileName = "".join(choice(allchar) for x in range(randint(5, 5))) try: sourceHost = data['sourceHost'] sourceFileName = data['sourceFileName'] except Exception as e: print('no sourceHost and sourceFileName') fileUrl = 'http://'+sourceHost+'/downloadFile/'+sourceFileName+'/'+testId async with ClientSession() as session: try: async with session.get(fileUrl) as response: async for data in response.content.iter_chunked(1024): fileSize += len(data) timestampEnd = datetime.utcnow() taskDuration = str(timestampEnd - timestampStart) testResults.setdefault(testId, {}).setdefault(usedHandler, {})["fileName"] = fileName testResults.setdefault(testId, {}).setdefault(usedHandler, {})["sourceFileName"] = sourceFileName testResults.setdefault(testId, {}).setdefault(usedHandler, {})["fileSize"] = fileSize testResults.setdefault(testId, {}).setdefault(usedHandler, {})["timestampStart"] = str(timestampStart) testResults.setdefault(testId, {}).setdefault(usedHandler, {})["timestampEnd"] = str(timestampEnd) testResults.setdefault(testId, {}).setdefault(usedHandler, {})["taskDuration"] = taskDuration response_obj = { 'status': 'success', 'message': 'File downloaded', 'testId': testId, 'usedHandler': usedHandler, 'taskDuration': taskDuration, 'fileName': fileName, 'sourceFileName': sourceFileName, 'fileSize': fileSize, 'timestampStart': str(timestampStart), 'timestampEnd': str(timestampEnd) } return web.json_response(response_obj) except Exception as e: response_obj = { 'status' : 'failed', 'message': str(e) } return web.json_response(response_obj) except Exception as e: response_obj = { 'status' : 'failed', 'message': str(e) } return web.json_response(response_obj)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def download_file(self, remote_file):\n remote_file.download()", "def download_file():\n\n if 'POST' == request.method:\n file_id = request.form['file_id']\n else:\n file_id = request.args.get('file_id')\n\n # 1 ==> example_1.tgz\n file_path = file_manager.get_file_path_from_id(file_id)\n print \"serving file: \" + file_path\n return send_file(file_path, as_attachment=True)", "def post_download(self, remote_files):\n pass", "def downloadFile(remote_path, fobj):\n logger.msg(\n \"downloading file\", remote_path=remote_path, function='downloadFile'\n )\n\n def file_writer(data):\n fobj.write(data)\n\n remote_path = remote_path.encode('utf-8')\n r = yield treq.get(remote_path, timeout=5)\n try:\n yield treq.collect(r, file_writer)\n except Exception as e:\n print e\n raise", "def download_file(filename):\n return send_from_directory('uploads', filename, as_attachment=True)", "def https_download_file(**data):\n import os\n import requests\n\n ##minimal data inputs payload\n server_url = data.get('server_url', '')\n file_name = data.get('file_name', '')\n file_path = data.get('file_path', '')\n headers = data.get('headers', '')\n ##extra data inputs payload\n ##\n ##\n\n if server_url==None:\n raise(NameError('No `server URL` specified'))\n \n if file_name==None:\n raise(NameError('No `file_name` specified'))\n\n file_url = os.path.join(server_url,file_name)\n\n if not os.path.exists(file_path):\n os.mkdir(file_path)\n\n full_name = os.path.join(file_path,file_name)\n \n if not os.path.isfile(full_name):\n r = requests.get(file_url, headers=headers)\n if not r.status_code==200: \n raise r.raise_for_status()\n open(full_name , 'wb').write(r.content)\n\n return full_name", "def main(url, localfile):\n ph.download_file(url, localfile)", "def download(connection, priv_key, server_pub_key):\r\n\r\n # Get the filename from the user\r\n file_name = input('What file would you like to download from the server?: ')\r\n\r\n # Tell the server to prepare to download a file\r\n connection.sendall(rsa.encrypt(b'DOWNLOAD', server_pub_key))\r\n\r\n # Send the file name to the server\r\n connection.sendall(rsa.encrypt(file_name.encode(), server_pub_key))\r\n\r\n # Attempt to download the file\r\n try:\r\n shared.download_file(connection, priv_key, file_name)\r\n\r\n # If the server can't find the file that is asked for\r\n except ValueError:\r\n print(''.join(['\\nThe file does not exist']), file=sys.stderr)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def download():\n return response.download(request, db)", "def filedownload(source, destination):\n\n # Initiate the download\n urllib.request.urlretrieve(source, destination)", "def download(self, url: str, dest: PathLike, force: bool = False):", "def on_post(self, req, resp):\n LOGGER = logging.getLogger()\n \n resp.set_header('Content-Type', 'text/json')\n raw_json = req.stream.read().decode('utf-8')\n content = json.loads(raw_json, encoding='utf-8')\n\n try:\n files = content.get(\"files\")\n zip_name = content.get(\"name\")\n zip_file = DownloadFilesResource.compress_files(files, zip_name)\n resp.body = json.dumps({'file': zip_file})\n LOGGER.info(\"Zip created and ready to download\")\n except Exception as e:\n LOGGER.error(\"Error creating zip file\" , exc_info=True)\n raise falcon.HTTPInternalServerError(title=\"Error downloading files: \" + str(type(e)),\n description=(str(e) +\n ','.join(traceback.format_tb(e.__traceback__))))", "def download_file():\n data = c.recv(BUFFER)\n \n if data == b\"terminate\":\n print(\"DOWNLOADING FAILED !!!\")\n return\n\n file = open(FILE_NAME,\"wb\")\n while True:\n if data == b\"DONE\":\n break\n \n print(\"Receiving. . . \")\n file.write(data)\n data = c.recv(BUFFER)\n \n file.close()\n print(\"Successfully received!!!\")\n \n print(\"Webpage saved as {} at {}\".format(FILE_NAME, getcwd())) \n return None", "def download():\n\treturn response.download(request, db)", "def download_file(self, parsed_event, input_dir_path):", "def _download(self):\n self._system.download_file(\"http://curl.haxx.se/download/\" + self._tar_name)", "def send_file(cobj, dest, port, fname, hash, handler):\n pass", "def download_uploaded_file(url, user, dst):\n token, _ = Token.objects.get_or_create(user=user)\n parsed = urllib_parse.urlsplit(url)\n upload_uid = TatorCache().get_upload_uid_cache(parsed.path)\n cmd = ['wget',\n f'--header=Authorization: Token {token}',\n f'--header=Upload-Uid: {upload_uid}',\n '-O', f'{dst}',\n f\"{urllib_parse.urljoin('http://nginx-internal-svc', parsed.path)}\"]\n subprocess.run(cmd, check=True)", "def do_POST(self):\r\n content_length = int(self.headers['Content-Length'])\r\n body = self.rfile.read(content_length)\r\n\r\n response = BytesIO()\r\n try:\r\n res = webServer.handle_post_msg(body)\r\n print(res)\r\n self.send_response(200)\r\n except Exception as e:\r\n print(e)\r\n res = str(e)\r\n self.send_response(500)\r\n self.end_headers()\r\n response.write(res.encode())\r\n self.wfile.write(response.getvalue())", "def download(self, method, url, downloader, *args, **kwargs):\n if method.startswith('URL'):\n return downloader.enqueue_file(url, filename=partial(self.mk_filename, *args))\n\n raise NoData", "def download(self, url, destination):\n fileDownloader = utils.HttpFileDownloader(url, destination)\n fileDownloader.download()", "def do_POST(self):\n self.send_response(httplib.OK)\n self.send_header('Content-Type', 'application/octet-stream')\n self.end_headers()\n\n response = remote_api_pb.Response()\n try:\n request = remote_api_pb.Request()\n\n\n\n request.ParseFromString(\n self.rfile.read(int(self.headers['content-length'])))\n api_response = _ExecuteRequest(request).Encode()\n response.set_response(api_response)\n except Exception, e:\n logging.debug('Exception while handling %s\\n%s',\n request,\n traceback.format_exc())\n response.set_exception(pickle.dumps(e))\n if isinstance(e, apiproxy_errors.ApplicationError):\n application_error = response.mutable_application_error()\n application_error.set_code(e.application_error)\n application_error.set_detail(e.error_detail)\n self.wfile.write(response.Encode())", "def download(self, download_path):\n return", "def download_with_callback(self, url, path=None, filename=None, headers=None, force=False, func=None):", "def download_file(cls, uri, fobj):\n msg = \"Backend doesn't implement download_file()\"\n raise NotImplementedError(msg)", "def downloadFile(self, base_url, file_name):\n url = os.path.join(base_url, file_name)\n req = urllib2.Request(url)\n try:\n f = urllib2.urlopen(req, timeout=self.timeout)\n local_file = open(os.path.join(self.config.get('PATHS', 'pdfdir'), file_name), \"w\")\n local_file.write(f.read())\n local_file.close()\n except Exception, err:\n print \"[ Failed ]\"\n print \"\\n***ERROR in downloadFile: %s\" % err\n sys.exit(0)", "def download (httpfile, path_unzip = None, outfile = None) :\n if path_unzip is None : path_unzip = GetPath ()\n file = _check_source (httpfile, path_unzip = path_unzip, outfile = outfile)\n return file", "def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", self.mimetype)\n self.end_headers()\n\n with open(filename, \"rb\") as file_:\n self.wfile.write(file_.read())\n file_.close()", "def download(path):\n return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True)", "def download(path):\n return send_from_directory(UPLOAD_DIRECTORY, path, as_attachment=True)", "def __getFile_httplib(self, _src, _dst):\n\n #-------------------- \n # Pre-download callbacks\n #-------------------- \n self.runEventCallbacks('downloadStarted', _src, -1)\n self.runEventCallbacks('downloading', _src, 0)\n\n\n\n #-------------------- \n # Download\n #-------------------- \n response = self.__httpsRequest('GET', _src)\n data = response.read() \n with open(_dst, 'wb') as f:\n f.write(data) \n\n\n\n #-------------------- \n # Post-download callbacks\n #-------------------- \n self.removeFromDownloadQueue(_src)\n self.runEventCallbacks('downloadFinished', _src)", "def _get(self, remote_filename, local_path):\n\n with local_path.open('wb') as local_file:\n file_id = self.get_file_id(remote_filename)\n if file_id is None:\n raise BackendException(\n 'File \"%s\" cannot be downloaded: it does not exist' %\n remote_filename)\n\n response = self.http_client.get(\n self.content_url + '/nodes/' + file_id + '/content', stream=True)\n response.raise_for_status()\n for chunk in response.iter_content(chunk_size=DEFAULT_BUFFER_SIZE):\n if chunk:\n local_file.write(chunk)\n local_file.flush()", "def _download_file(self, report_date):\n fdate = report_date.strftime('%Y-%m-%d')\n ddate = '/'.join(fdate.split('-')[:-1])\n link = FILE_URL % (ddate, fdate)\n name = os.path.basename(urlparse(link).path)\n \n try:\n print ' Accessing %s.' % name\n r = requests.get(link, stream=True)\n r.raise_for_status()\n except RequestException as e:\n status = r.status_code\n \n if status == 404:\n pass\n if status >= 500:\n print ' - Unable to download %s: %s\\n' % (name, e)\n self.failed += 1\n else:\n print ' - Downloading %s.' % name\n fpath = os.path.join(self.path, name)\n \n with open(fpath, 'wb') as f:\n for chunk in r.iter_content(1024):\n f.write(chunk)\n \n self.counts += 1\n print ' - Saved %s.' % name", "def download(filename):\n print \"Downloading\", filename\n file_content = urlopen(\n urljoin(URL_PATH, filename)\n )\n write_data_to_file(\n file_content.read(),\n os.path.join(\n '/tmp',\n filename\n )\n )", "def download(handle):\n storage = get_storage()\n # FIXME: 404 if not found or invalid?\n return storage.route(handle)", "def _download(self, url, rel_path):\n \n tmp_dir = \"TMP_DIR=`mktemp -d`;\"\n wget_cmd = [ tmp_dir, \"wget\", \"-nv\", \"-O\", \"$TMP_DIR/archive.tgz\", url, \";\" ]\n wget_cmd = ' '.join(wget_cmd)\n \n mkdir_cmd = \"mkdir -p %s ;\" % (\"./remote_resources/\" + rel_path)\n \n cleandir_cmd = \"rm -Rf %s/* ;\" % (\"./remote_resources/\" + rel_path)\n \n untar_cmd = [ \"tar\", \"xf\", \"$TMP_DIR/archive.tgz\", \"-C\", \"./remote_resources/%s\" % rel_path, \";\" ]\n untar_cmd = ' '.join(untar_cmd)\n \n remove_cmd = \"rm -Rf $TMP_DIR;\"\n \n return self._ssh(' '.join([ wget_cmd, mkdir_cmd, cleandir_cmd, untar_cmd, remove_cmd ]))", "def http_download(url, target_path):\n try:\n resp = urllib2.urlopen(url)\n except urllib2.URLError, e:\n if not hasattr(e, 'code'):\n raise\n resp = e\n if resp.code != 200:\n raise IOError(\"Request url(%s) expect 200 but got %d\" %(url, resp.code))\n\n with open(target_path, 'wb') as f:\n shutil.copyfileobj(resp, f)\n return target_path", "def remote(self, requests, file, remoteHost):\n # Set the source and dest paths\n remote_url = self.base_url + '/remote?file=' + file + \"&host=\" + remoteHost\n\n print(\"Making remote request: \" + remote_url)\n\n r = requests.get(remote_url, max_price=10)\n\n print(\"Remote request completed.\")\n\n return r.json()", "def download_file(self, url, path):\n print('\\tDownloading: ', path)\n with open(path, 'w') as outfile:\n try:\n response = self._http_client.get(url)\n outfile.write(response.text)\n finally:\n response.close()\n outfile.close()\n gc.collect()", "def serve_download(conn, ssn_key, file_name, client_name):\n # read file contents\n file_path = \"{}/{}\".format(client_name, file_name)\n contents = None\n with open(file_path, \"r\") as fileStream:\n buffer = fileStream.read()\n contents = [buffer[0+i:16+i] for i in range(0, len(buffer), 16)]\n # get signal to begin download\n request = aes.decrypt(ssn_key, conn.recv(1024))\n if request != SIG_START:\n conn.sendall(aes.encrypt(ssn_key, SIG_BAD))\n return print(\"Bob: something went wrong with file transfer\")\n print(\"Bob: beginning transfer for {}...\".format(file_name))\n # upload file contents to client\n for i, content in enumerate(contents):\n response = aes.encrypt(ssn_key, content)\n conn.sendall(response)\n if aes.decrypt(ssn_key, conn.recv(1024)) != SIG_GOOD:\n return print(\"Bob: something went wrong with file transfer, exiting...\")\n print(\"Bob: transferring file... ({}/{})\".format(i+1, len(contents)))\n # send signal that transfer is complete\n request = aes.encrypt(ssn_key, SIG_END)\n conn.sendall(request)\n if aes.decrypt(ssn_key, conn.recv(1024)) != SIG_GOOD:\n return print(\"Bob: something went wrong with file transfer, exiting...\")\n print(\"Bob: successful upload for {}\".format(file_name))", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download():\n return response.download(request,db)", "def download_file(self, net_id, request_id, file_name):\n current_user_roles = get_user_roles()\n if current_user_roles[\"STFADM\"] or net_id == current_user.net_id:\n try:\n return send_from_directory(\"{0}/user_uploads/{1}/{2}/\".format(self.__APP_PATH__, net_id, request_id),\n \"{0}\".format(secure_filename(file_name)), mimetype=\"blob\")\n except Exception as e:\n print(e)\n return abort(404)\n return abort(403)", "def downloadFile(self, path, out):\n\t\ttry:\n\t\t\tlogger.info(\"downloadFile('%s', ...)\" % (path))\n\n\t\t\t# Downloads from dropbox\n\t\t\t# Manually :( update the metadata cache\n\t\t\tf, metadata = self.client.get_file_and_metadata(path)\n\t\t\tf = f.read()\n\t\t\tlogger.info('* file downloaded')\n\t\t\tself.cache_metadata.setNewValue(path, metadata)\n\t\t\tlogger.info('* metadata updated')\n\t\t\t# Write to tmp file and close\n\t\t\tos.write(out, f)\n\t\t\tlogger.info(\"* file written\")\n\t\t\tos.close(out)\n\t\t\tlogger.info('* file closed')\n\t\t\n\t\t\treturn True\n\t\texcept Exception, e:\n\t\t\tlogger.error(\"Exception %s at downloadFile(%s)\" % (sys.exc_info()[0], path))\n\t\t\tlogger.debug(pformat(sys.exc_info()))\n\t\t\treturn False", "def download(self, path, rev=None, **kwargs):\n return self.get('fileops/download_file', api='CONTENT', params={\n 'root': self.root,\n 'path': path,\n 'rev': rev,\n }, stream=True, timeout=kwargs.get('timeout', 1.5), **kwargs)", "def send(self, request, **kwargs):\n\n # Check that the method makes sense. Only support GET\n if request.method not in (\"GET\", \"HEAD\"):\n raise ValueError(f\"Invalid request method {request.method}\")\n\n # Parse the URL\n url_parts = urlparse(request.url)\n\n # Make the Windows URLs slightly nicer\n if is_win32 and url_parts.netloc.endswith(\":\"):\n url_parts = url_parts._replace(path=f\"/{url_parts.netloc}{url_parts.path}\", netloc=\"\")\n\n # Reject URLs with a hostname component\n if url_parts.netloc and url_parts.netloc not in (\"localhost\", \".\", \"..\", \"-\"):\n raise ValueError(\"file: URLs with hostname components are not permitted\")\n\n # If the path is relative update it to be absolute\n if url_parts.netloc in (\".\", \"..\"):\n pwd = os.path.abspath(url_parts.netloc).replace(os.sep, \"/\") + \"/\"\n if is_win32:\n # prefix the path with a / in Windows\n pwd = f\"/{pwd}\"\n url_parts = url_parts._replace(path=urljoin(pwd, url_parts.path.lstrip(\"/\")))\n\n resp = Response()\n resp.url = request.url\n\n # Open the file, translate certain errors into HTTP responses\n # Use urllib's unquote to translate percent escapes into whatever\n # they actually need to be\n try:\n # If the netloc is - then read from stdin\n if url_parts.netloc == \"-\":\n resp.raw = sys.stdin.buffer\n # make a fake response URL, the current directory\n resp.url = \"file://\" + os.path.abspath(\".\").replace(os.sep, \"/\") + \"/\"\n else:\n # Split the path on / (the URL directory separator) and decode any\n # % escapes in the parts\n path_parts = [unquote(p) for p in url_parts.path.split('/')]\n\n # Strip out the leading empty parts created from the leading /'s\n while path_parts and not path_parts[0]:\n path_parts.pop(0)\n\n # If os.sep is in any of the parts, someone fed us some shenanigans.\n # Treat is like a missing file.\n if any(os.sep in p for p in path_parts):\n raise IOError(errno.ENOENT, os.strerror(errno.ENOENT))\n\n # Look for a drive component. If one is present, store it separately\n # so that a directory separator can correctly be added to the real\n # path, and remove any empty path parts between the drive and the path.\n # Assume that a part ending with : or | (legacy) is a drive.\n if path_parts and (path_parts[0].endswith('|') or path_parts[0].endswith(':')):\n path_drive = path_parts.pop(0)\n if path_drive.endswith('|'):\n path_drive = f\"{path_drive[:-1]}:\"\n\n while path_parts and not path_parts[0]:\n path_parts.pop(0)\n else:\n path_drive = ''\n\n # Try to put the path back together\n # Join the drive back in, and stick os.sep in front of the path to\n # make it absolute.\n path = path_drive + os.sep + os.path.join(*path_parts)\n\n # Check if the drive assumptions above were correct. If path_drive\n # is set, and os.path.splitdrive does not return a drive, it wasn't\n # reall a drive. Put the path together again treating path_drive\n # as a normal path component.\n if path_drive and not os.path.splitdrive(path):\n path = os.sep + os.path.join(path_drive, *path_parts)\n\n # Use io.open since we need to add a release_conn method, and\n # methods can't be added to file objects in python 2.\n resp.raw = io.open(path, \"rb\")\n resp.raw.release_conn = resp.raw.close\n except IOError as e:\n if e.errno == errno.EACCES:\n resp.status_code = codes.forbidden\n elif e.errno == errno.ENOENT:\n resp.status_code = codes.not_found\n else:\n resp.status_code = codes.bad_request\n\n # Wrap the error message in a file-like object\n # The error message will be localized, try to convert the string\n # representation of the exception into a byte stream\n resp_str = str(e).encode(locale.getpreferredencoding(False))\n resp.raw = BytesIO(resp_str)\n resp.headers['Content-Length'] = len(resp_str)\n\n # Add release_conn to the BytesIO object\n resp.raw.release_conn = resp.raw.close\n else:\n resp.status_code = codes.ok\n\n # If it's a regular file, set the Content-Length\n resp_stat = os.fstat(resp.raw.fileno())\n if stat.S_ISREG(resp_stat.st_mode):\n resp.headers['Content-Length'] = resp_stat.st_size\n\n return resp", "def _download(item):\n\n filename = item.filename()\n filename = os.path.join(item.vdir(), filename)\n logger.info(\"Downloading '%s' to %s\" % (item.show, filename))\n\n f = open(filename, \"wb\")\n\n buf = net.tivoget(item.show.url)\n for chunk in buf:\n f.write(chunk)\n\n f.close()\n\n item.downloaded = True\n item.save()", "def httpretrieve_save_file(url, filename, querydata=None, postdata=None, \\\r\n httpheaders=None, proxy=None, timeout=None):\r\n\r\n # Open the output file object and http file-like object.\r\n outfileobj = open(filename, 'w')\r\n httpobj = httpretrieve_open(url, querydata=querydata, postdata=postdata, \\\r\n httpheaders=httpheaders, proxy=proxy, timeout=timeout)\r\n\r\n # Repeatedly read from the file-like HTTP object into our file, until the\r\n # response is finished.\r\n responsechunkstr = None\r\n while responsechunkstr != '':\r\n responsechunkstr = httpobj.read(4096)\r\n outfileobj.write(responsechunkstr)\r\n\r\n outfileobj.close()\r\n httpobj.close()", "def download_tmp_file(path, filename):\n try:\n _file = file(_path)\n wrapper = FileWrapper(_file)\n\n # use the same type for all files\n response = HttpResponse(wrapper, content_type='text/plain') \n response['Content-Disposition'] = \\\n 'attachment; filename=%s' % unicode(filename)\n response['Content-Length'] = os.path.getsize(_path)\n return response\n except Exception,e:\n logger.exception('could not find attached file object for id: %r', id)\n raise e", "def send_data(self, fp, dest: PathLike, force: bool = False):", "async def _download_remote_file(\n self,\n server_name: str,\n media_id: str,\n ) -> dict:\n\n file_id = random_string(24)\n\n file_info = FileInfo(server_name=server_name, file_id=file_id)\n\n with self.media_storage.store_into_file(file_info) as (f, fname, finish):\n request_path = \"/\".join(\n (\"/_matrix/media/r0/download\", server_name, media_id)\n )\n try:\n length, headers = await self.client.get_file(\n server_name,\n request_path,\n output_stream=f,\n max_size=self.max_upload_size,\n args={\n # tell the remote server to 404 if it doesn't\n # recognise the server_name, to make sure we don't\n # end up with a routing loop.\n \"allow_remote\": \"false\"\n },\n )\n except RequestSendFailed as e:\n logger.warning(\n \"Request failed fetching remote media %s/%s: %r\",\n server_name,\n media_id,\n e,\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except HttpResponseException as e:\n logger.warning(\n \"HTTP error fetching remote media %s/%s: %s\",\n server_name,\n media_id,\n e.response,\n )\n if e.code == twisted.web.http.NOT_FOUND:\n raise e.to_synapse_error()\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n except SynapseError:\n logger.warning(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise\n except NotRetryingDestination:\n logger.warning(\"Not retrying destination %r\", server_name)\n raise SynapseError(502, \"Failed to fetch remote media\")\n except Exception:\n logger.exception(\n \"Failed to fetch remote media %s/%s\", server_name, media_id\n )\n raise SynapseError(502, \"Failed to fetch remote media\")\n\n await finish()\n\n if b\"Content-Type\" in headers:\n media_type = headers[b\"Content-Type\"][0].decode(\"ascii\")\n else:\n media_type = \"application/octet-stream\"\n upload_name = get_filename_from_headers(headers)\n time_now_ms = self.clock.time_msec()\n\n # Multiple remote media download requests can race (when using\n # multiple media repos), so this may throw a violation constraint\n # exception. If it does we'll delete the newly downloaded file from\n # disk (as we're in the ctx manager).\n #\n # However: we've already called `finish()` so we may have also\n # written to the storage providers. This is preferable to the\n # alternative where we call `finish()` *after* this, where we could\n # end up having an entry in the DB but fail to write the files to\n # the storage providers.\n await self.store.store_cached_remote_media(\n origin=server_name,\n media_id=media_id,\n media_type=media_type,\n time_now_ms=self.clock.time_msec(),\n upload_name=upload_name,\n media_length=length,\n filesystem_id=file_id,\n )\n\n logger.info(\"Stored remote media in file %r\", fname)\n\n media_info = {\n \"media_type\": media_type,\n \"media_length\": length,\n \"upload_name\": upload_name,\n \"created_ts\": time_now_ms,\n \"filesystem_id\": file_id,\n }\n\n return media_info", "def download(self, localfile: str, remotefile: str, overwrite: bool = True, **kwargs):\n logf = ''\n logn = self._logcnt()\n logcodei = \"%put E3969440A681A24088859985\" + logn + \";\"\n logcodeo = \"\\nE3969440A681A24088859985\" + logn\n logcodeb = logcodeo.encode()\n\n valid = self._sb.file_info(remotefile, quiet = True)\n\n if valid is None:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" does not exist.\"}\n\n if valid == {}:\n return {'Success' : False, \n 'LOG' : \"File \"+str(remotefile)+\" is a directory.\"}\n\n if os.path.isdir(localfile):\n locf = localfile + os.sep + remotefile.rpartition(self._sb.hostsep)[2]\n else:\n locf = localfile\n\n try:\n fd = open(locf, 'wb')\n fd.write(b'write can fail even if open worked, as it turns out')\n fd.close()\n fd = open(locf, 'wb')\n except OSError as e:\n return {'Success' : False, \n 'LOG' : \"File \"+str(locf)+\" could not be opened or written to. Error was: \"+str(e)}\n\n code = \"filename _sp_updn '\"+remotefile+\"' recfm=F encoding=binary lrecl=4096;\"\n\n ll = self.submit(code, \"text\")\n logf = ll['LOG']\n\n self.stdin[0].send(b'tom says EOL=DNLOAD \\n')\n self.stdin[0].send(b'\\n'+logcodei.encode()+b'\\n'+b'tom says EOL='+logcodeb+b'\\n')\n\n done = False\n datar = b''\n bail = False\n\n while not done:\n while True:\n if os.name == 'nt':\n try:\n rc = self.pid.wait(0)\n self.pid = None\n self._sb.SASpid = None\n return {'Success' : False, \n 'LOG' : \"SAS process has terminated unexpectedly. RC from wait was: \"+str(rc)}\n except:\n pass\n else:\n rc = os.waitpid(self.pid, os.WNOHANG)\n if rc[1]:\n self.pid = None\n self._sb.SASpid = None\n return {'Success' : False, \n 'LOG' : \"SAS process has terminated unexpectedly. RC from wait was: \"+str(rc)}\n\n if bail:\n if datar.count(logcodeb) >= 1:\n break\n try:\n data = self.stdout[0].recv(4096)\n except (BlockingIOError):\n data = b''\n\n if len(data) > 0:\n datar += data\n if len(datar) > 8300:\n fd.write(datar[:8192])\n datar = datar[8192:]\n else:\n sleep(0.1)\n try:\n log = self.stderr[0].recv(4096).decode(self.sascfg.encoding, errors='replace')\n except (BlockingIOError):\n log = b''\n\n if len(log) > 0:\n logf += log\n if logf.count(logcodeo) >= 1:\n bail = True\n done = True\n\n fd.write(datar.rpartition(logcodeb)[0])\n fd.flush()\n fd.close()\n\n self._log += logf\n final = logf.partition(logcodei)\n z = final[0].rpartition(chr(10))\n prev = '%08d' % (self._log_cnt - 1)\n zz = z[0].rpartition(\"\\nE3969440A681A24088859985\" + prev +'\\n')\n logd = zz[2].replace(\";*\\';*\\\";*/;\", '')\n\n ll = self.submit(\"filename _sp_updn;\", 'text')\n logd += ll['LOG']\n \n return {'Success' : True, \n 'LOG' : logd}", "async def download_file(\n location_id: LocationID,\n file_id: StorageFileID,\n user_id: UserID,\n link_type: LinkType = LinkType.PRESIGNED,\n):", "def download_finish(self, cloud_file):", "def download(path):\n\treturn send_from_directory(\"results\", path, as_attachment=True)", "def download():\n raise NotImplementedError", "def send_file():\n data = ARGS.data\n filename = ARGS.file\n outstream = \"POST||\" + filename + \"||\" + data\n CLIENT_SOCKET.send(outstream.encode())", "def sendRequest(event, context):\n file = event\n print(f\"Processing file: {file['name']}.\")\n\n filename = file['name']\n\n url = 'http://34.123.136.112:5000'\n myobj = {'filename': filename}\n\n x = requests.post(url, data = myobj)\n\n print(x.text)", "def download_files(self):", "def post(self):\n\n upload_files = self.get_uploads('file')\n blob_info = upload_files[0]\n self.redirect('/?upload_info=%s' % urllib.quote(blob_info.filename))", "def download_file(self, url, filename):\n r = requests.get(url, stream=True)\n r.raise_for_status()\n\n with open(filename, 'wb') as f:\n for chunk in r.iter_content():\n if chunk:\n f.write(chunk)\n f.flush()", "def download(self, url, filename):\n print(\"url\", url)\n print(\"filename\", filename)\n # open in binary mode\n with open(filename, \"wb\") as file:\n # get request\n try:\n r = requests.get(url)\n if r.status_code == 404:\n raise NotFoundException(\n \"URL: \", url, \" is not working. Status code 404\")\n # write to file\n file.write(r.content)\n print(\"file downloaded\")\n except ConnectionError as ex:\n print(ex)\n except NotFoundException as ex:\n print(ex)\n except Exception as ex:\n print(ex)", "def _download_http(source_uri, dest_path, version):\n\n try:\n logger.info(\"Downloading the dataset.\")\n download_file(source_uri=source_uri, dest_path=dest_path)\n except DownloadError as e:\n logger.info(\n f\"The request download from {source_uri} -> {dest_path} can't \"\n f\"be completed.\"\n )\n raise e\n expected_checksum = GroceriesReal.GROCERIES_REAL_DATASET_TABLES[\n version\n ].checksum\n try:\n validate_checksum(dest_path, expected_checksum)\n except ChecksumError as e:\n logger.info(\"Checksum mismatch. Delete the downloaded files.\")\n os.remove(dest_path)\n raise e", "def download_file(url, fname):\n urllib.request.urlretrieve(url, fname)" ]
[ "0.70488554", "0.7008286", "0.6648603", "0.65757704", "0.6246295", "0.61764085", "0.6117598", "0.611518", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.608899", "0.6073451", "0.60563403", "0.6028612", "0.60241234", "0.6023703", "0.6021517", "0.6014625", "0.6009214", "0.60033023", "0.5943582", "0.5933319", "0.59139717", "0.5909919", "0.59021175", "0.58739036", "0.5872492", "0.58696324", "0.5862277", "0.5849184", "0.5837383", "0.5837383", "0.583645", "0.5816978", "0.581659", "0.5807695", "0.5801353", "0.58012974", "0.58009017", "0.58000386", "0.5790061", "0.5772705", "0.57600737", "0.57600737", "0.57600737", "0.57600737", "0.57600737", "0.57600737", "0.5759733", "0.5749518", "0.5741315", "0.57286406", "0.57244796", "0.5709818", "0.5694729", "0.5690385", "0.5687368", "0.5687021", "0.56799716", "0.56584233", "0.56549895", "0.56542796", "0.5650565", "0.5644878", "0.5641255", "0.56336176", "0.5629423", "0.56282836", "0.56020933", "0.5590981" ]
0.0
-1
calculates the information reduction between layers This function computes the multiinformation (total correlation) reduction after a linear transformation.
def information_reduction( X: np.ndarray, Y: np.ndarray, uni_entropy: Callable, tol_dims: int, p: float = 0.25, ) -> float: # calculate the marginal entropy hx = jax.vmap(uni_entropy)(X.T) hy = jax.vmap(uni_entropy)(Y.T) # Information content delta_info = np.sum(hy) - np.sum(hx) tol_info = np.sqrt(np.sum((hy - hx) ** 2)) # get tolerance n_dimensions = X.shape[1] # conditional cond = np.logical_or( tol_info < np.sqrt(n_dimensions * p * tol_dims ** 2), delta_info < 0 ) return np.array(np.where(cond, 0.0, delta_info))
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def MotcorCatenate(self, info, base, anat_tgt):\n# First compute the transformation matrices due to epi-to-epi motion.\n fmt = '3dvolreg -prefix NULL -1Dmatrix_save %s -twopass ' + \\\n '-verbose -base %s+orig[%s] -dfile %s %s+orig'\n cmd = fmt % (info['matfile_m'], info['basefile'], base, \\\n info['mot_file'], info['imgfile_t'])\n self.CheckExec(cmd, [info['matfile_m']])\n\n# Catenate with transformation from epi base image to the anatomical.\n cmd = 'cat_matvec -ONELINE %s -P %s -P > %s' % \\\n (self.info[anat_tgt]['matfile'], info['matfile_m'], \\\n info['matfile_mcat'])\n self.CheckExec(cmd, [info['matfile_mcat']])\n\n# Interpolate the data to the new grid.\n fmt = '3dAllineate -prefix %s -interp cubic -1Dmatrix_apply %s ' + \\\n '-warp shift_rotate -base %s+orig[%s] %s+orig'\n cmd = fmt % (info['imgfile_m'], info['matfile_mcat'], info['basefile'], \\\n base, info['imgfile_t'])\n self.CheckExec(cmd, ['%s+orig.BRIK'%info['imgfile_m'], \\\n '%s+orig.HEAD'%info['imgfile_m']])", "def main():\n feature_extraction_model = \"HOG\"\n # feature_extraction_models = [\"CM\", \"HOG\"]\n feature_extraction_model_1 = \"CM\"\n dimension_reduction_model = \"PCA\"\n k_value = 10\n dim_k_value = 40\n # K_value = 20\n # lab_folder = \"Dataset3/Labelled/Set1\"\n # unlab_folder = \"Dataset3/Unlabelled/Set 2\"\n lab_folder = get_input_folder(\"Labelled Folder\")\n unlab_folder = get_input_folder(\"Classify\")\n start = time.time()\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab = dim_red.get_object_feature_matrix()\n features_list_lab = np.array(obj_feat_lab['featureVector'].tolist())\n images_list_lab = np.array(obj_feat_lab['imageId'])\n # filtering the labelled set\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab = dim_red.get_object_feature_matrix()\n features_list_unlab = np.array(obj_feat_unlab['featureVector'].tolist())\n images_list_unlab = np.array(obj_feat_unlab['imageId'])\n\n # ================================================================================================================\n # labelled Images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=lab_folder,\n metadata_collection=\"labelled\")\n obj_feat_lab_1 = dim_red.get_object_feature_matrix()\n features_list_lab_1 = np.array(obj_feat_lab_1['featureVector'].tolist())\n # images_list_lab = np.array(obj_feat_lab_1['imageId'])\n # filtering the labelled set\n\n\n # unlabelled images\n dim_red = DimensionReduction(feature_extraction_model_1, dimension_reduction_model, dim_k_value,\n folder_metadata=unlab_folder,\n metadata_collection=\"unlabelled\")\n obj_feat_unlab_1 = dim_red.get_object_feature_matrix()\n features_list_unlab_1 = np.array(obj_feat_unlab_1['featureVector'].tolist())\n # images_list_unlab = np.array(obj_feat_unlab['imageId'])\n features_list_lab = np.concatenate((features_list_lab, features_list_lab_1), axis=1)\n features_list_unlab = np.concatenate((features_list_unlab, features_list_unlab_1), axis=1)\n\n # ================================================================================================================\n\n dorsal_list, palmar_list = filter_images_by_label(images_list_lab)\n features_list = np.concatenate((features_list_lab, features_list_unlab))\n images_list = np.concatenate((images_list_lab, images_list_unlab))\n images_list = list(images_list)\n # Finding Similarity Matrix\n cos_sim = cosine_similarity(features_list)\n sim_graph = np.empty((0, len(cos_sim)))\n for row in cos_sim:\n k_largest = np.argsort(-np.array(row))[1:k_value + 1]\n sim_graph_row = [d if i in k_largest else 0 for i, d in enumerate(row)]\n sim_graph = np.append(sim_graph, np.array([sim_graph_row]), axis=0)\n\n row_sums = sim_graph.sum(axis=1)\n sim_graph = sim_graph / row_sums[:, np.newaxis]\n idx = 0\n results_dorsal = ppr(sim_graph, images_list, dorsal_list)\n results_palmar = ppr(sim_graph, images_list, palmar_list)\n final_results = {}\n\n for img in images_list_unlab:\n if results_dorsal[img] < results_palmar[img]:\n final_results[img] = \"dorsal\"\n else:\n final_results[img] = \"palmar\"\n\n actual_labels = fetch_actual_labels(images_list_unlab)\n print(\"Classification\")\n no_correct = 0\n correctly_classified = []\n incorrectly_classified = []\n print(\"| ImageId | Prediction | Actual |\")\n for r in final_results:\n print(\"| {} | {} | {} |\".format(r, final_results[r], actual_labels[r]))\n if final_results[r] == actual_labels[r]:\n correctly_classified.append(r)\n no_correct += 1\n else:\n incorrectly_classified.append(r)\n\n print(\"Correctly classified: {}\\n\".format(correctly_classified))\n print(\"InCorrectly classified: {}\\n\".format(incorrectly_classified))\n\n print(\"Classification Accuracy: {}%\".format(no_correct / len(images_list_unlab) * 100))\n print(\"Execution time: {} seconds\".format(time.time() - start))", "def implement_processing(self, output_intermediate, processing_matrix, forward = True):\n \n # Attention implementation\n output_intermediate = torch.matmul(output_intermediate, processing_matrix)\n output_intermediate = torch.transpose(output_intermediate,2,1)\n \n # Linear layer output reduction \n # from [batch, hidden_size, input_size - 1] to [batch, hidden_size,1]\n # different linear layers required to ensure good performance\n if forward:\n output_reduced = self.linear_fw(output_intermediate).squeeze(2)\n else:\n output_reduced = self.linear_bw(output_intermediate).squeeze(2)\n \n output_reduced = self._activation_fn(output_reduced)\n \n return output_reduced", "def estimateInfo(Analysis, ImageData, diffract, print_opt=False): \n total_images = ImageData['totalImages'] \n\n for key in Analysis:\n Analysis[key]['cones'] = [1,2,3,4,5,6]\n Analysis[key]['info'] = np.zeros(len(\n Analysis[key]['cones']))\n\n for amp in ImageData['rawAmp']:\n\n for key in Analysis:\n ind = len(diffract['cpd'])\n fooInfo = info.SingleConeEntropyFunc((amp[ind] ** 2 *\n Analysis[key]['retina']), \n Analysis[key]['cones']) \n Analysis[key]['info'] += fooInfo / total_images\n\n if print_opt == True:\n print ' '\n print 'Information'\n print '------------'\n for key in Analysis:\n print key, ': ', Analysis[key]['info']\n\n return Analysis", "def _process_matrices(self, **kwargs):\n\n if self.datasource == 'graph':\n # Must store the adj_matrix\n self.data.adjacencymat = self.data.adj_matrix()\n return self._sum_on_axis(self.data.adjacencymat, **kwargs)\n elif self.datasource == 'custom':\n return self._sum_on_axis(self.data, **kwargs)\n elif self.datasource == 'MRD':\n return {k: self._sum_on_axis(self.data.data[k].todense(), undirected=False) for k in self.data.data.keys() if k != 'pk'}", "def sublayer_reconstruct(self, layer, sublayer, timesurface, method,\n noise_ratio, sparsity_coeff, sensitivity):\n if method == \"CG\":\n error = self.sublayer_response_CG(layer, sublayer,\n timesurface,\n sparsity_coeff,\n noise_ratio)\n if method == \"Exp distance\": \n error = self.sublayer_response_exp_distance(layer, sublayer,\n timesurface,\n sparsity_coeff, \n noise_ratio, \n sensitivity)\n if method==\"Dot product\":\n error = self.sublayer_response_dot_product(layer, sublayer,\n timesurface,\n sparsity_coeff, \n noise_ratio, \n sensitivity)\n print(\"Layer activity : \")\n print(self.activations[layer][sublayer]) \n S_tilde = sum([a*b for a,b in zip(self.basis[layer][sublayer],\n self.activations[layer][sublayer])]) \n plt.figure(\"Reconstructed Figure\")\n sns.heatmap(S_tilde)\n print(\"Reconstruction error : \")\n print(error)\n return error", "def cal_topology_feature(self):\n self.NPL()\n self.topo_efficiency_cal()\n self.efficiency_cal()\n self.cluster_cal()\n self.topo_diameter()\n self.spatial_diameter()", "def data_model_residual(surface, dem, unw, incidence):\n los,fem_los,residual = pu.los2pylith(surface,dem,unw,incidence)\n\n # Using image_grid\n fig = plt.figure()\n grid = ImageGrid(fig, 111, # similar to subplot(111)\n nrows_ncols = (1, 3),\n direction=\"row\",\n axes_pad = 0.05,\n add_all=True,\n label_mode = \"1\",\n share_all = True,\n cbar_location=\"top\",\n cbar_mode=\"each\", #\"single\"\n cbar_size=\"5%\",\n cbar_pad=0.05,\n )\n #grid[0].set_xlabel(\"X\")\n #grid[0].set_ylabel(\"Y\")\n #grid2[0].set_xticks([-2, 0])\n #grid2[0].set_yticks([-2, 0, 2])\n\n #NOTE: could find global min/max from three arrays here\n norm = Normalize(vmin=np.nanmin(los), vmax=np.nanmax(los))\n #for ax,data in zip(grid,[los,fem_los,residual]):\n im = grid[0].imshow(los,origin='upper',norm=norm,cmap=plt.cm.jet)\n grid[0].axhline(100,color='m') #show profile\n cax = grid.cbar_axes[0]\n cax.colorbar(im)\n grid[1].axhline(100,color='k') #show profile\n im1 = grid[1].imshow(fem_los,origin='upper',norm=norm,cmap=plt.cm.jet)\n\n cax = grid.cbar_axes[1]\n cax.colorbar(im1)\n\n im2 = grid[2].imshow(residual,origin='upper',cmap=plt.cm.jet)\n cax = grid.cbar_axes[2]\n cax.colorbar(im2)\n\n # Add letter labels\n for ax, label in zip(grid,['A', 'B', 'C']):\n ax.text(0.05, 0.95, label, transform=ax.transAxes,\n fontsize=16, fontweight='bold', va='top')\n\n # Annotate\n # NOTE: way too high!\n #plt.suptitle('FEM Results')\n\n # Add profile\n # NOTE: for now EW, but would be easy to do arbitrary line, and convert to km\n fig = plt.figure()\n #x = arange(los.shape[0])\n plt.axhline(color='k',ls='--')\n plt.plot(los[100],'m.',label='data')\n plt.plot(fem_los[100],'k-',lw=2,label='model')\n plt.xlabel('Distance [km]')\n plt.ylabel('Distance [km]')\n plt.legend(loc='upper left')\n\n plt.show()", "def data_augmentation_and_vectorization(self,imlist, lb,im_labels, average_image = None):\n\t\tX,Y,X_original = [] ,[], []\n\n\t\ti = 0\n\t\tfor im in imlist:\n\t\t\tim=Image.fromarray(im,mode=self.mode)\n\t\t\t#try:\n\t\t\t#im_ini = im\n\t\t\tim_original = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t#im = self.substract_average_image(im, average_image)\n\t\t\t#print 'i:{} is a: {}' .format(i,im_labels[i])\n\t\t\t#im.show()\n\t\t\tX_original.append(im_original)\n\n\t\t\t#Rotations \n\t\t\t#im_r = im.rotate(15)\n\t\t\t# im_r_2 = im.rotate(-15)\n\t\t\t# im_r_3 = im.rotate(180)\n\t\t\t#im_r.show()\n\t\t\t#im_r_2.show()\n\n\t\t\t#Filters\n\t\t\t#im_f = im_ini.filter(ImageFilter.DETAIL)\n\t\t\t#im_f = im.filter(ImageFilter.FIND_EDGES)\n\t\t\t\n\t\t\tif self.mode == 'RGB':\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#Uncomment this if you want to use cross-correlate for 2D arrays http://docs.scipy.org/doc/scipy-0.16.0/reference/generated/scipy.signal.correlate2d.html\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# im = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t\t# im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t\t# im = (im - im.mean()) / im.std()\n\n\t\t\tif self.mode == 'L':\n\t\t\t\t# im = np.asarray(im, dtype='float64')\n\t\t\t\t# im = filters.sobel(im)\n\t\t\t\t#im = filters.roberts(im)\n\t\t\t\tim = np.asarray(im, dtype=theano.config.floatX) / 256.\n\t\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t#im = np.asarray(im, dtype=np.uint8)\n\t\t\t#print im.shape\n\t\t\t#print im.shape\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#im = self.flaten_aux(im)\n\t\t\t#print im.shape\n\t\t\t#im = data.coins() # or any NumPy arr\n\t\t\t#print im.shape\n\t\t\t#image = data.coins() # or any NumPy array!\n\t\t\t#print im\n\t\t\t#im = filter.sobel(im)\n\t\t\t#im = filter.roberts(im)\n\n\t\t\t# im_original = sp.inner(im, [299, 587, 114]) / 1000.0\n\t\t\t# im_original = np.asarray(im_original, dtype=theano.config.floatX)\n\t\t\t# # normalize per http://en.wikipedia.org/wiki/Cross-correlation\n\t\t\t# im = (im_original - im_original.mean()) / im_original.std()\n\t\t\t#print im.shape\n\t\t\t#print edges\n\t\t\t# edges = np.asarray(edges, dtype=np.uint8)\n\t\t\t#Image.fromarray(edges,mode=self.mode).show()\n\n\t\t\t#print edges\n\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX) / 256.\n\n\t\t\t#print edges.shape\n\t\t\t# io.imshow(im)\n\t\t\t# io.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t\n\t\t\t# plt.suptitle(im_labels[i], size=16)\n\t\t\t# plt.imshow(im, cmap=plt.cm.gray, interpolation='nearest')\n\t\t\t# plt.show()\n\t\t\t#im = np.asarray(im, dtype=theano.config.floatX)\n\t\t\t#print im.shape\n\t\t\t#self.reconstructImage(im).show()\n\n\t\t\t#im_r = np.asarray(im_r, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_2 = np.asarray(im_r_2, dtype=theano.config.floatX) / 256.\n\t\t\t# im_r_3 = np.asarray(im_r_3, dtype=theano.config.floatX) / 256.\n\t\t\t#im_f = np.asarray(im_f, dtype=theano.config.floatX) / 256.\n\t\t\t\n\t\t\t#im = im.transpose(2, 0, 1)\n\t\t\t#X.append(np.array(im, dtype=theano.config.floatX))\n\t\t\t#X.append(np.array(im_raw, dtype=theano.config.floatX))\n\t\t\t#X.append(im)\n\t\t\tX.append(im)\n\t\t\t# if i % 100 == 0:\n\t\t\t# \tX.append(im)\n\t\t\t#X.append(im_r)\n\t\t\t# X.append(im_r_2)\n\t\t\t# X.append(im_r_3)\n\t\t\t#X.append(im_f)\n\t\t\t#X_original.append(im)\n\n\t\t\t# X.append(np.array(im_r, dtype=theano.config.floatX))\n\t\t\t# X.append(np.array(im_r_2, dtype=theano.config.floatX))\n\n\t\t\t#Uncomment this if you want to work with monochrome\n\t\t\t# im = im.convert('L')\n\t\t\t# pixels_monochrome = np.array(list(im.getdata()), dtype=np.float)\n\t\t\t\t\t\t\n\t\t\t# # scale between 0-1 to speed up computations\n\t\t\t# min_max_scaler = preprocessing.MinMaxScaler(feature_range=(0,1), copy=True)\n\t\t\t# pixels_monochrome = min_max_scaler.fit_transform(pixels_monochrome)\n\n\t\t\t# X.append(pixels_monochrome)\n\n\t\t\t#Y.append(lb.transform([im_labels[i]])[0][0])\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t\n\t\t\tlabel = lb.transform([im_labels[i]])[0][0]\n\t\t\t#print lb.transform([im_labels[i]])\n\t\t\t# label_vector = lb.transform([im_labels[i]])[0]\n\t\t\t# label = np.where( label_vector == 1 )[0][0]\n\t\t\t# print \"Label: {}\".format(label)\n\t\t\t#print label\n\t\t\t#Y.append(label)\n\t\t\tY.append(label)\n\t\t\t#Y.append(im_labels[i])\t\n\n\t\t\t\n\t\t\t#Y.append(label)\t\n\t\t\t# Y.append(label)\t\n\t\t\t# except Exception, e:\n\t\t\t# \tprint e\n\t\t\t# \t#raise e\n\n\t\t\t# if i == 30:\n\t\t\t# \tbreak\n\n\t\t\ti += 1\n\t\t\tif self.verbose:\n\t\t\t\tsys.stdout.write(\"\\r Process: {0}/{1}\".format(i, len(imlist)))\n\t\t\t\tsys.stdout.flush()\n\t\t\n\t\t# output = open(self.data_path + 'X_original.pkl', 'wb')\n\t\t# cPickle.dump(X_original, output,protocol=-1)\n\t\t# output.close()\n\n\t\treturn X,Y", "def combined_costs(matrix_MSLL_IO):\r\n return", "def mutual_information_3d(self,max_lag,percent_calc=.5,digitize=True):\n\n if digitize:\n M = utilities.mi_digitize(self.X)\n else:\n M = self.X\n\n rs, cs, zs = np.shape(M)\n\n rs_iters = int(rs*percent_calc)\n cs_iters = int(cs*percent_calc)\n\n r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)\n c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)\n\n\n # The r_picks are used to calculate the MI in the columns\n # and the c_picks are used to calculate the MI in the rows\n\n c_mi = np.zeros((rs_iters,max_lag))\n r_mi = np.zeros((cs_iters,max_lag))\n\n for i in range(rs_iters):\n for j in range(max_lag):\n\n rand_z = np.random.randint(0,zs)\n ind = j+1\n unshift = M[r_picks[i],ind:,rand_z]\n shift = M[r_picks[i],:-ind,rand_z]\n c_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n for i in range(cs_iters):\n for j in range(max_lag):\n\n rand_z = np.random.randint(0,zs)\n ind=j+1\n unshift = M[ind:, c_picks[i],rand_z]\n shift = M[:-ind, c_picks[i],rand_z]\n r_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n #for the z dimension\n rs,cs = np.where(np.random.rand(rs,cs)<percent_calc)\n z_mi = np.zeros( (len(rs),max_lag) )\n\n for i, (rs,cs) in enumerate(zip(r_picks,c_picks)):\n for j in range(max_lag):\n\n ind=j+1\n\n unshift = M[rs, cs, ind:]\n shift = M[rs, cs, :-ind]\n z_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n r_mut = np.mean(r_mi,axis=0)\n c_mut = np.mean(c_mi,axis=0)\n z_mut = np.mean(z_mi,axis=0)\n\n return r_mut, c_mut, z_mut", "def apply_per_reducts_cmip6():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Get the year-on-year proportional reductions in RCP2.6\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply year on year proportional reductions (globally uniform) from RCP2.6 in 2015 onwards\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_rcp262015.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_rcp262015.nc\")\n\n return", "def L14_Net112(mode=\"train\"):\n data = mx.symbol.Variable(name=\"data\")\n landmark_target = mx.symbol.Variable(name=\"landmark_target\")\n landmark_vis = mx.symbol.Variable(name=\"landmark_vis\")\n \n # data = 112X112\n # conv1 = 56X56\n conv1 = Conv(data, num_filter=res_base_dim, kernel=(3, 3), pad=(1, 1), stride=(2, 2), name=\"conv1\")\n conv2 = Residual(conv1, num_block=1, num_out= res_base_dim, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim, name=\"res2\")\n \n\t#conv23 = 28X28\n conv23 = DResidual(conv2, num_out=res_base_dim*2, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*2, name=\"dconv23\")\n conv3 = Residual(conv23, num_block=2, num_out=res_base_dim*2, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*2, name=\"res3\")\n \n\t#conv34 = 14X14\n conv34 = DResidual(conv3, num_out=res_base_dim*4, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*4, name=\"dconv34\")\n conv4 = Residual(conv34, num_block=3, num_out=res_base_dim*4, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*4, name=\"res4\")\n \n\t#conv45 = 7X7\n conv45 = DResidual(conv4, num_out=res_base_dim*8, kernel=(3, 3), stride=(2, 2), pad=(1, 1), num_group=res_base_dim*8, name=\"dconv45\")\n conv5 = Residual(conv45, num_block=2, num_out=res_base_dim*8, kernel=(3, 3), stride=(1, 1), pad=(1, 1), num_group=res_base_dim*8, name=\"res5\")\n \n\t# conv6 = 1x1\n conv6 = Conv(conv5, num_filter=res_base_dim*8, kernel=(7, 7), pad=(0, 0), stride=(1, 1), name=\"conv6\")\n fc1 = Conv(conv6, num_filter=res_base_dim*16, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc1\")\n fc2 = Conv(fc1, num_filter=res_base_dim*32, kernel=(1, 1), pad=(0, 0), stride=(1, 1), name=\"fc2\")\t\n conv6_3 = mx.symbol.FullyConnected(data=fc2, num_hidden=42, name=\"conv6_3\")\t\n bn6_3 = mx.sym.BatchNorm(data=conv6_3, name='bn6_3', fix_gamma=False,momentum=0.9)\n\t\n if mode == \"test\":\n landmark_pred = bn6_3\n group = mx.symbol.Group([landmark_pred])\n else:\n \n out = mx.symbol.Custom(landmark_vis = landmark_vis, landmark_pred=bn6_3, landmark_target=landmark_target, \n op_type='negativemining_hand21', name=\"negative_mining\")\n group = mx.symbol.Group([out])\n \n return group", "def calculate(self, inputs):\r\n output = inputs\r\n for layer in self.layers:\r\n output = layer.calculate(output)\r\n return output", "def calculate_correlations(input_data, index_col, cat_features, exclu_elements): \r\n try:\r\n # encode the categorical features\r\n encoded_data = pd.get_dummies(input_data,columns=cat_features,drop_first=True)\r\n\r\n pd_transposed_data = encoded_data.set_index('Style_display_code').T\r\n\r\n # get the number of items\r\n items_list = [str(a) for a in pd_transposed_data.columns]\r\n\r\n print(\"Number of items to correlate :{}_Timestamp:{}\".format(str(len(items_list)), \r\n format(str(datetime.now()))))\r\n \r\n\r\n #compute correlations and save the pickle file\r\n# matrix = pd_transposed_data.corr().values\r\n# pickle.dump(matrix, open(staging_dir+ '/corr_matrix_output_py3.p', 'wb'))\r\n \r\n # read from the saved pickle file - ONLY FOR CONSECUTIVE RUNS, TO SAVE TIME\r\n matrix = pickle.load(open(staging_dir+ '/corr_matrix_output_py3.p', \"rb\" ) )\r\n\r\n print(\"Corr Matrix size:{}_Timestamp:{}\".format(str(matrix.size),\r\n format(str(datetime.now()))))\r\n\r\n except Exception as e:\r\n print(\" Error !!\", e)\r\n \r\n # return the top correlated items\r\n return top_correlateditems(items_list,matrix, index_col, exclu_elements)", "def CVNeuralNetLayers(\n theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires\n):\n\n wires = Wires(wires)\n repeat = _preprocess(\n theta_1, phi_1, varphi_1, r, phi_r, theta_2, phi_2, varphi_2, a, phi_a, k, wires\n )\n\n for l in range(repeat):\n\n Interferometer(theta=theta_1[l], phi=phi_1[l], varphi=varphi_1[l], wires=wires)\n\n r_and_phi_r = qml.math.stack([r[l], phi_r[l]], axis=1)\n broadcast(unitary=Squeezing, pattern=\"single\", wires=wires, parameters=r_and_phi_r)\n\n Interferometer(theta=theta_2[l], phi=phi_2[l], varphi=varphi_2[l], wires=wires)\n\n a_and_phi_a = qml.math.stack([a[l], phi_a[l]], axis=1)\n broadcast(unitary=Displacement, pattern=\"single\", wires=wires, parameters=a_and_phi_a)\n\n broadcast(unitary=Kerr, pattern=\"single\", wires=wires, parameters=k[l])", "def create(self):\n \n \"\"\" A solo prepressing reduction network in the head \"\"\"\n print(\"pre_reduction\")\n with tf.name_scope('pre_reduction'):\n conv1 = NW.conv(self.X, 7, 7, 64, 2, 2, name='conv1')\n pool1 = NW.max_pool(conv1, 3, 3, 2, 2, name='pool1')\n norm1 = NW.lrn(pool1, 2, 2e-05, 0.75, name='norm1')\n reduction2 = NW.conv(norm1, 1, 1, 64, 1, 1, name='reduction2')\n conv2 = NW.conv(reduction2, 3, 3, 192, 1, 1,name='conv2')\n norm2 = NW.lrn(conv2, 2, 2e-05, 0.75, name='norm2')\n pool2 = NW.max_pool(norm2, 3, 3, 2, 2, name='pool2')\n \n \"\"\" 1st inception layer group \"\"\"\n print(\"icp1\")\n with tf.name_scope('icp1'):\n # branch 0\n icp1_out0 = NW.conv(pool2, 1, 1, 64, 1, 1, name='icp1_out0')\n # branch 1\n icp1_reduction1 = NW.conv(pool2, 1, 1, 96, 1, 1, name='icp1_reduction1')\n icp1_out1 = NW.conv(icp1_reduction1, 3, 3, 128, 1, 1, name='icp1_out1')\n # branch 2\n icp1_reduction2 = NW.conv(pool2, 1, 1, 16, 1, 1, name='icp1_reduction2')\n icp1_out2 = NW.conv(icp1_reduction2, 5, 5, 32, 1, 1, name='icp1_out2')\n # branch 3\n icp1_pool = NW.max_pool(pool2, 3, 3, 1, 1, name='icp1_pool')\n icp1_out3 = NW.conv(icp1_pool, 1, 1, 32, 1, 1, name='icp1_out3')\n # concat\n icp2_in = NW.concat([icp1_out0,\n icp1_out1,\n icp1_out2,\n icp1_out3], 3, 'icp2_in')\n\n \"\"\" 2nd inception layer group \"\"\"\n print(\"icp2\")\n with tf.name_scope('icp2'):\n # branch 0\n icp2_out0 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_out0')\n # branch 1\n icp2_reduction1 = NW.conv(icp2_in, 1, 1, 128, 1, 1, name='icp2_reduction1')\n icp2_out1 = NW.conv(icp2_reduction1, 3, 3, 192, 1, 1, name='icp2_out1')\n # branch 2\n icp2_reduction2 = NW.conv(icp2_in, 1, 1, 32, 1, 1, name='icp2_reduction2')\n icp2_out2 = NW.conv(icp2_reduction2, 5, 5, 96, 1, 1, name='icp2_out2')\n # branch 3\n icp2_pool = NW.max_pool(icp2_in, 3, 3, 1, 1, name='icp2_pool')\n icp2_out3 = NW.conv(icp2_pool, 1, 1, 64, 1, 1, name='icp2_out3')\n # concat\n icp2_out = NW.concat([icp2_out0,\n icp2_out1,\n icp2_out2,\n icp2_out3], 3, 'icp2_out')\n \n \"\"\" 3rd inception layer group \"\"\"\n print(\"icp3\")\n with tf.name_scope('icp3'):\n icp3_in = NW.max_pool(icp2_out, 3, 3, 2, 2, name='icp3_in')\n # branch 0\n icp3_out0 = NW.conv(icp3_in, 1, 1, 192, 1, 1, name='icp3_out0')\n # branch 1\n icp3_reduction1 = NW.conv(icp3_in, 1, 1, 96, 1, 1, name='icp3_reduction1')\n icp3_out1 = NW.conv(icp3_reduction1, 3, 3, 208, 1, 1, name='icp3_out1')\n # branch 2\n icp3_reduction2 = NW.conv(icp3_in, 1, 1, 16, 1, 1, name='icp3_reduction2')\n icp3_out2 = NW.conv(icp3_reduction2, 5, 5, 48, 1, 1, name='icp3_out2')\n # branch 3\n icp3_pool = NW.max_pool(icp3_in, 3, 3, 1, 1, name='icp3_pool')\n icp3_out3 = NW.conv(icp3_pool, 1, 1, 64, 1, 1, name='icp3_out3')\n # concat\n icp3_out = NW.concat([icp3_out0,\n icp3_out1,\n icp3_out2,\n icp3_out3], 3, 'icp3_out')\n \n \"\"\" 1st classify branch \"\"\"\n with tf.name_scope('cls1'):\n cls1_pool = NW.avg_pool(icp3_out, 5, 5, 3, 3, padding='VALID', name='cls1_pool')\n cls1_reduction_pose = NW.conv(cls1_pool, 1, 1, 128, 1, 1, name='cls1_reduction_pose')\n cls1_fc1_pose = NW.fc(cls1_reduction_pose, 1024, name='cls1_fc1_pose')\n cls1_fc_pose_xy = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_xy')\n cls1_fc_pose_ab = NW.fc(cls1_fc1_pose, 2, relu=False, name='cls1_fc_pose_ab')\n self.layers[\"cls1_fc_pose_xy\"] = cls1_fc_pose_xy\n self.layers[\"cls1_fc_pose_ab\"] = cls1_fc_pose_ab\n \n \"\"\" 4st inception layer group \"\"\"\n print(\"icp4\")\n with tf.name_scope('icp4'):\n # branch 0\n icp4_out0 = NW.conv(icp3_out, 1, 1, 160, 1, 1, name='icp4_out0')\n # branch 1\n icp4_reduction1 = NW.conv(icp3_out, 1, 1, 112, 1, 1, name='icp4_reduction1')\n icp4_out1 = NW.conv(icp4_reduction1, 3, 3, 224, 1, 1, name='icp4_out1')\n # branch 2\n icp4_reduction2 = NW.conv(icp3_out, 1, 1, 24, 1, 1, name='icp4_reduction2')\n icp4_out2 = NW.conv(icp4_reduction2, 5, 5, 64, 1, 1, name='icp4_out2')\n # branch 3\n icp4_pool = NW.max_pool(icp3_out, 3, 3, 1, 1, name='icp4_pool')\n icp4_out3 = NW.conv(icp4_pool, 1, 1, 64, 1, 1, name='icp4_out3')\n # concat\n icp4_out = NW.concat([icp4_out0,\n icp4_out1,\n icp4_out2,\n icp4_out3],3, name='icp4_out')\n\n \"\"\" 5st inception layer group \"\"\"\n print(\"icp5\")\n with tf.name_scope('icp5'):\n # branch 0\n icp5_out0 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_out0')\n # branch 1\n icp5_reduction1 = NW.conv(icp4_out, 1, 1, 128, 1, 1, name='icp5_reduction1')\n icp5_out1 = NW.conv(icp5_reduction1, 3, 3, 256, 1, 1, name='icp5_out1')\n # branch 2\n icp5_reduction2 = NW.conv(icp4_out,1, 1, 24, 1, 1, name='icp5_reduction2')\n icp5_out2 = NW.conv(icp5_reduction2, 5, 5, 64, 1, 1, name='icp5_out2')\n # branch 3\n icp5_pool = NW.max_pool(icp4_out,3, 3, 1, 1, name='icp5_pool')\n icp5_out3 = NW.conv(icp5_pool, 1, 1, 64, 1, 1, name='icp5_out3')\n # concat\n icp5_out = NW.concat([icp5_out0, \n icp5_out1, \n icp5_out2, \n icp5_out3], 3, name='icp5_out')\n \n \"\"\" 6st inception layer group \"\"\"\n print(\"icp6\")\n with tf.name_scope('icp6'):\n # branch 0\n icp6_out0 = NW.conv(icp5_out, 1, 1, 112, 1, 1, name='icp6_out0')\n # branch 1\n icp6_reduction1 = NW.conv(icp5_out, 1, 1, 144, 1, 1, name='icp6_reduction1')\n icp6_out1 = NW.conv(icp6_reduction1, 3, 3, 288, 1, 1, name='icp6_out1')\n # branch 2\n icp6_reduction2 = NW.conv(icp5_out, 1, 1, 32, 1, 1, name='icp6_reduction2')\n icp6_out2 = NW.conv(icp6_reduction2, 5, 5, 64, 1, 1, name='icp6_out2')\n # branch 3\n icp6_pool = NW.max_pool(icp5_out,3, 3, 1, 1, name='icp6_pool')\n icp6_out3 = NW.conv(icp6_pool, 1, 1, 64, 1, 1, name='icp6_out3')\n # concat\n icp6_out = NW.concat([icp6_out0,\n icp6_out1,\n icp6_out2,\n icp6_out3], 3, name='icp6_out')\n\n \"\"\" 2nd classify branch \"\"\"\n with tf.name_scope('cls2'):\n cls2_pool = NW.avg_pool(icp6_out, 5, 5, 3, 3, padding='VALID', name='cls2_pool')\n cls2_reduction_pose = NW.conv(cls2_pool, 1, 1, 128, 1, 1, name='cls2_reduction_pose')\n cls2_fc1 = NW.fc(cls2_reduction_pose, 1024, name='cls2_fc1')\n cls2_fc_pose_xy = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_xy')\n cls2_fc_pose_ab = NW.fc(cls2_fc1, 2, relu=False, name='cls2_fc_pose_ab')\n self.layers[\"cls2_fc_pose_xy\"] = cls2_fc_pose_xy\n self.layers[\"cls2_fc_pose_ab\"] = cls2_fc_pose_ab\n\n \"\"\" 7st inception layer group \"\"\"\n print(\"icp7\")\n with tf.name_scope('icp7'):\n # branch 0\n icp7_out0 = NW.conv(icp6_out, 1, 1, 256, 1, 1, name='icp7_out0')\n # branch 1\n icp7_reduction1 = NW.conv(icp6_out, 1, 1, 160, 1, 1, name='icp7_reduction1')\n icp7_out1 = NW.conv(icp7_reduction1, 3, 3, 320, 1, 1, name='icp7_out1')\n # branch 2\n icp7_reduction2 = NW.conv(icp6_out, 1, 1, 32, 1, 1, name='icp7_reduction2')\n icp7_out2 = NW.conv(icp7_reduction2, 5, 5, 128, 1, 1, name='icp7_out2')\n # branch 3\n icp7_pool = NW.max_pool(icp6_out, 3, 3, 1, 1, name='icp7_pool')\n icp7_out3 = NW.conv(icp7_pool, 1, 1, 128, 1, 1, name='icp7_out3')\n # concat\n icp7_out = NW.concat([icp7_out0,\n icp7_out1,\n icp7_out2,\n icp7_out3], 3, name='icp7_out')\n\n \"\"\" 8st inception layer group \"\"\"\n print(\"icp8\")\n with tf.name_scope('icp8'):\n icp8_in = NW.max_pool(icp7_out, 3, 3, 2, 2, name='icp8_in')\n # branch 0\n icp8_out0 = NW.conv(icp8_in, 1, 1, 256, 1, 1, name='icp8_out0')\n # branch 1\n icp8_reduction1 = NW.conv(icp8_in, 1, 1, 160, 1, 1, name='icp8_reduction1')\n icp8_out1 = NW.conv(icp8_reduction1, 3, 3, 320, 1, 1, name='icp8_out1')\n # branch 2\n icp8_reduction2 = NW.conv(icp8_in, 1, 1, 32, 1, 1, name='icp8_reduction2')\n icp8_out2 = NW.conv(icp8_reduction2, 5, 5, 128, 1, 1, name='icp8_out2')\n # branch 3\n icp8_pool = NW.max_pool(icp8_in, 3, 3, 1, 1, name='icp8_pool')\n icp8_out3 = NW.conv(icp8_pool, 1, 1, 128, 1, 1, name='icp8_out3')\n # concat\n icp8_out = NW.concat([icp8_out0,\n icp8_out1,\n icp8_out2,\n icp8_out3], 3, name='icp8_out')\n \n \"\"\" 9st inception layer group \"\"\"\n print(\"icp9\")\n with tf.name_scope('icp9'):\n # branch 0\n icp9_out0 = NW.conv(icp8_out, 1, 1, 384, 1, 1, name='icp9_out0')\n # branch 1\n icp9_reduction1 = NW.conv(icp8_out, 1, 1, 192, 1, 1, name='icp9_reduction1')\n icp9_out1 = NW.conv(icp9_reduction1, 3, 3, 384, 1, 1, name='icp9_out1')\n # branch 2\n icp9_reduction2 = NW.conv(icp8_out, 1, 1, 48, 1, 1, name='icp9_reduction2')\n icp9_out2 = NW.conv(icp9_reduction2, 5, 5, 128, 1, 1, name='icp9_out2')\n # branch 3\n icp9_pool = NW.max_pool(icp8_out, 3, 3, 1, 1, name='icp9_pool')\n icp9_out3 = NW.conv(icp9_pool, 1, 1, 128, 1, 1, name='icp9_out3')\n # concat\n icp9_out = NW.concat([icp9_out0,\n icp9_out1,\n icp9_out2,\n icp9_out3], 3, name='icp9_out')\n\n \"\"\" 3rd classify branch \"\"\"\n with tf.name_scope('cls3'):\n cls3_pool = NW.avg_pool(icp9_out, 7, 7, 1, 1, padding='VALID', name='cls3_pool')\n cls3_fc1_pose = NW.fc(cls3_pool, 2048, name='cls3_fc1_pose')\n cls3_fc_pose_xy = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_xy')\n cls3_fc_pose_ab = NW.fc(cls3_fc1_pose, 2, relu=False, name='cls3_fc_pose_ab')\n self.layers[\"cls3_fc_pose_xy\"] = cls3_fc_pose_xy\n self.layers[\"cls3_fc_pose_ab\"] = cls3_fc_pose_ab", "def forward(self, img):\n device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')\n H, W = img.size()[2], img.size()[3]\n #print('x',x)\n #print('x.shape',x.shape) ## 32 x 3 x 96 x 128\n z32 = self.start(img)\n z64 = self.layer1(z32) + self.layer1_ds(z32)\n #print('z1',z64.shape)\n z128 = self.layer2(z64) + self.layer2_ds(z64)\n #print('z2',z128.shape)\n z256 = self.layer3(z128) + self.layer3_ds(z128)\n #print('z3',z256.shape)\n z256d = self.drop_out_layer(z256)\n #print('z_drop',z256d.shape)\n z256u = self.layer4(z256d)\n #print('z4',z256u.shape)\n z128u = self.layer5(torch.cat((z256u, F.interpolate(z256d,size=z256u.size()[2:] )), 1))\n #print('z5',z128u.shape)\n z64u = self.layer6(torch.cat((z128u, F.interpolate(z128,size=z128u.size()[2:] )), 1))\n #print('z6',z64u.shape)\n\n z32u = self.final(torch.cat((z64u, F.interpolate(z64,size=z64u.size()[2:] )), 1))\n #print('z6_plus',z32u.shape)\n\n #print('z7_result',self.classifer(z32u)[:, :, :H, :W].shape)\n result_class = self.classifer(z32u)[:, :, :H, :W]\n\n #print('model result shape',result_class.shape)\n ## 16 x 1 x 300 x 400\n\n # using soft argmax\n spa_argmax = spatial_argmax(torch.squeeze(result_class,1))\n\n #one hot with spatial argmax\n #xy_val = torch.zeros(spa_argmax.shape).float()\n #for idx, pt in enumerate(spa_argmax):\n # x_val = (pt[0]+1.0)*63.5\n # y_val = (pt[1]+1.0)*47.5\n # # for each batch. [0...127][0...95]\n # xy_val[idx][0] = x_val\n # xy_val[idx][1] = y_val\n\n xy_val = (spa_argmax+1.0).to(device)\n #print('spa_argmax',spa_argmax)\n scaling_factor = torch.FloatTensor([[(W-1)/2,0.],[0.,(H-1)/2]]).to(device)\n #scaling_factor = torch.FloatTensor([[63.5,0.],[0.,44.5]]).to(device)\n xy_val = xy_val.mm(scaling_factor)\n\n return xy_val", "def method3(self):\n cres=0.\n Ux_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_aloc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.ALDM[ix ,iy, : , : ]\n mat2=self.ALDM[(ix%self.kS.Nx)+1, iy, : , : ]\n mat3=self.ALDM[ix ,(iy%self.kS.Ny)+1, : , : ]\n \n Ux_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat2)[self.NL-1:,self.NL-1:])\n Uy_aloc[ix,iy]=np.linalg.det(np.dot(np.conj(mat1.T),mat3)[self.NL-1:,self.NL-1:])\n\n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_aloc[ix,iy]*Uy_aloc[ix+1,iy]/Ux_aloc[ix,iy+1]/Uy_aloc[ix,iy])\n cres+=ftemp/2./pi/1j\n \n return cres.real\n #End of method3", "def learn(self):\r\n \r\n # unpack\r\n X = self.Train.X\r\n Y = self.Train.Y\r\n DY = self.Train.DY\r\n \r\n NX ,ND = X.shape\r\n NDY,_ = DY.shape\r\n \r\n print 'Build Information Matricies ...'\r\n \r\n # functions\r\n ay0 = np.array([[1.]]*NX)\r\n ay1 = X\r\n ay2 = np.reshape( np.einsum('ij,ik->ijk',X,X) , [-1,ND*ND] )\r\n\r\n # reduce redundant basis variables\r\n i_doub = np.tri(ND,k=-1).T == 1\r\n ay2[:,i_doub.ravel()] = ay2[:,i_doub.ravel()] * 2. \r\n i_keep = np.tri(ND,k=0).T == 1\r\n ay2 = ay2[:,i_keep.ravel()]\r\n\r\n # basis matrix, functions\r\n Ay = np.hstack([ay0,ay1,ay2])\r\n \r\n # arrays for the least squares regression\r\n At = Ay\r\n Yt = Y\r\n \r\n # gradients\r\n if NDY:\r\n ad0 = np.array([[0.]]*NX*ND)\r\n \r\n ad1 = np.tile( np.eye(ND) , [NX,1] )\r\n \r\n ad2a = np.repeat( np.eye(ND)[:,None,:] , ND , 1 )\r\n ad2a = np.reshape( ad2a , [-1,ND*ND] ) \r\n ad2a = np.repeat( ad2a, NX, axis=0 ) * np.repeat( np.tile( X, [ND,1] ) , ND, axis=1 )\r\n \r\n ad2b = np.repeat( np.eye(ND)[:,:,None] , ND , 2 )\r\n ad2b = np.reshape( ad2b , [-1,ND*ND] ) \r\n ad2b = np.repeat( ad2b, NX, axis=0 ) * np.tile( np.tile( X, [ND,1] ) , [1,ND] )\r\n \r\n ad2 = ad2a + ad2b\r\n \r\n # reduce redundant bases\r\n ad2[:,i_doub.ravel()] = ad2[:,i_doub.ravel()] * 2.\r\n ad2 = ad2[:,i_keep.ravel()] \r\n \r\n Ad = np.hstack([ad0,ad1,ad2])\r\n \r\n # add to arrays for least squares regression\r\n At = np.vstack([At,Ad])\r\n Yt = np.vstack([Yt, np.ravel(DY.T)[:,None]])\r\n \r\n print 'Least Squares Solve ...'\r\n B = sp.linalg.lstsq(At,Yt)[0] \r\n \r\n # unpack data\r\n c = B[0,0]\r\n b = B[1:ND+1]\r\n \r\n A = np.zeros([ND,ND])\r\n A[i_keep] = B[ND+1:,0]\r\n A[i_keep.T] = A.T[i_keep.T]\r\n \r\n # problem forumulation\r\n A = A*2.\r\n \r\n # store results\r\n self.c = c\r\n self.b = b\r\n self.A = A\r\n \r\n print ''", "def neuron_stats_to_layer(neuron_info, input_layer_count, input_neurons_per_layer, network_info, target_column=\"synapse_count\"):\n # assuming the first neuron in neuron\n input_layer = list()\n\n input_neuron_ids = neuron_info[ neuron_info.ids < 0].ids.values - np.min(neuron_info.ids)\n input_neuron_values = neuron_info[ neuron_info.ids < 0][target_column]\n\n input_collector = np.zeros((input_layer_count, input_neurons_per_layer))\n\n for inp_l in range(input_layer_count):\n mask_above_first = inp_l * input_neurons_per_layer <= input_neuron_ids\n below_last = input_neuron_ids < (inp_l + 1 ) * input_neurons_per_layer\n\n mask = mask_above_first & below_last\n\n ids_for_this_layer = input_neuron_ids[mask] - inp_l * input_neurons_per_layer\n\n input_collector[inp_l, ids_for_this_layer] = input_neuron_values[mask]\n\n # now the normal layers\n normal_neuron_info = neuron_info[ neuron_info.ids >= 0]\n\n\n exc_collector = np.zeros((network_info[\"num_layers\"], network_info[\"num_exc_neurons_per_layer\"]))\n inh_collector = np.zeros((network_info[\"num_layers\"], network_info[\"num_inh_neurons_per_layer\"]))\n\n layerwise = helper.split_into_layers(normal_neuron_info, network_info)\n for l_id, layer in enumerate(layerwise):\n exc, inh = helper.split_exc_inh(layer, network_info)\n exc_collector[l_id, exc.ids] = exc[target_column].values\n inh_collector[l_id, inh.ids] = inh[target_column].values\n\n return input_collector, exc_collector, inh_collector", "def method2(self):\n cres=np.zeros(self.NL,dtype=float) # List of invariants\n # The U matrices from Fukui's method; storage...\n Ux_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n Uy_loc=np.zeros((self.kS.Nx+1,self.kS.Ny+1),dtype=complex)\n \n for il in range(self.NL):\n # ... and calculation of U matrices for each layer\n for ix in range(self.kS.Nx+1):\n for iy in range(self.kS.Ny+1):\n mat1=self.LDM[il,ix ,iy ,:,:]\n mat2=self.LDM[il,(ix%self.kS.Nx)+1 ,iy ,:,:]\n mat3=self.LDM[il,ix ,(iy%self.kS.Ny)+1 ,:,:]\n \n Ux_loc[ix,iy]=np.dot(np.conj(mat1.T),mat2)[1,1]\n Uy_loc[ix,iy]=np.dot(np.conj(mat1.T),mat3)[1,1]\n \n for ix in range(self.kS.Nx):\n for iy in range(self.kS.Ny):\n ftemp=np.log(Ux_loc[ix,iy]*Uy_loc[ix+1,iy]/Ux_loc[ix,iy+1]/Uy_loc[ix,iy])\n cres[il]+=(ftemp/2./pi/1j).real # Layer specific topological invariant\n \n return cres", "def mutual_information_spatial(self,max_lag,percent_calc=.5,digitize=True):\n if digitize:\n M = utilities.mi_digitize(self.X)\n else:\n M = self.X\n\n rs, cs = np.shape(M)\n\n rs_iters = int(rs*percent_calc)\n cs_iters = int(cs*percent_calc)\n\n r_picks = np.random.choice(np.arange(rs),size=rs_iters,replace=False)\n c_picks = np.random.choice(np.arange(cs),size=cs_iters,replace=False)\n\n\n # The r_picks are used to calculate the MI in the columns\n # and the c_picks are used to calculate the MI in the rows\n\n c_mi = np.zeros((rs_iters,max_lag))\n r_mi = np.zeros((cs_iters,max_lag))\n\n for i in range(rs_iters):\n for j in range(max_lag):\n\n ind = j+1\n unshift = M[r_picks[i],ind:]\n shift = M[r_picks[i],:-ind]\n c_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n for i in range(cs_iters):\n for j in range(max_lag):\n\n ind=j+1\n unshift = M[ind:, c_picks[i]]\n shift = M[:-ind, c_picks[i]]\n r_mi[i,j] = skmetrics.mutual_info_score(unshift,shift)\n\n r_mut = np.mean(r_mi,axis=0)\n c_mut = np.mean(c_mi,axis=0)\n\n return r_mut, c_mut, r_mi, c_mi", "def cminfo_compute():\n from hera_mc import cm_sysutils \n h = cm_sysutils.Handling()\n cminfo = h.get_cminfo_correlator()\n snap_to_ant = {}\n ant_to_snap = {}\n for antn, ant in enumerate(cminfo['antenna_numbers']):\n name = cminfo['antenna_names'][antn]\n for pol in cminfo['correlator_inputs'][antn]:\n if pol.startswith('e'):\n e_pol = pol\n if pol.startswith('n'):\n n_pol = pol\n ant_to_snap[ant] = {}\n if e_pol != 'None':\n snapi_e, channel_e = snap_part_to_host_input(cminfo['correlator_inputs'][antn][0])\n ant_to_snap[ant]['e'] = {'host': snapi_e, 'channel': channel_e}\n if snapi_e not in snap_to_ant.keys():\n snap_to_ant[snapi_e] = [None] * 6\n snap_to_ant[snapi_e][channel_e] = name + 'E'\n if n_pol != 'None':\n snapi_n, channel_n = snap_part_to_host_input(cminfo['correlator_inputs'][antn][1])\n ant_to_snap[ant]['n'] = {'host': snapi_n, 'channel': channel_n}\n if snapi_n not in snap_to_ant.keys():\n snap_to_ant[snapi_n] = [None] * 6\n snap_to_ant[snapi_n][channel_n] = name + 'N'\n return snap_to_ant, ant_to_snap", "def forward(self, images):\n features = self.resnet(images)\n features = Variable(features.data)\n features = self.pooling(features)\n # print(features)\n features = features.view(features.size(0),-1)\n # print(features)\n # print(resnet.fc.in_features)\n features = self.bn(self.linear(features))\n return features\n # with torch.no_grad():\n # features = self.resnet(images)\n # features = features.reshape(features.size(0), -1)\n # features = self.bn(self.linear(features))\n # return features", "def forward(self, x):\n for name, module in self.base._modules.items():\n if name == 'avgpool':\n break\n\n if name == 'layer3':\n l2 = Variable(x)\n\n x = Variable(module(x))\n l4 = Variable(x)\n\n \"\"\"for name, param in self.base.named_parameters():\n print(name, param.size())\n\n res50_model = self.base\n res50_conv2 = ResNet50Bottom(res50_model)\n for i,child in enumerate(self.base.children()):\n print(i)\n if i==8:\n l4=x\n break\n if i==6:\n l2=x\n x=res50_conv2(x.detach())\"\"\"\n\n s2 = l2.sum(1) #/ 100\n #\n s4 = l4.sum(1) #/ 1000\n\n\n sw2 = s2 / (s2.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n sw4 = s4 / (s4.view(x.size(0), -1).sum(1)).unsqueeze(1).unsqueeze(2)\n\n\n l2 = l2 * sw2.unsqueeze(1)\n l4 = l4 * sw4.unsqueeze(1)\n\n \n c2 = self.inconv2(l2)\n c4 = self.inconv4(l4)\n c2 = self.bn2(c2)\n c4 = self.bn4(c4)\n \n n2 = F.softmax(torch.mean(torch.mean(c2, dim=2), dim=2), dim=1)\n n4 = F.softmax(torch.mean(torch.mean(c4, dim=2), dim=2), dim=1)\n nn2 = n2.data.cpu().numpy()\n nn4 = n4.data.cpu().numpy()\n cam2 = np.zeros((x.size(0), 28, 28), dtype=float)\n cam4 = np.zeros((x.size(0), 7, 7), dtype=float)\n\n\n for i in range(0, x.size(0)):\n for j in range(0, 2):\n temp1 = c2[i, j, :, :].data.cpu().numpy()\n temp1 = np.maximum(temp1, 0)\n temp1 = temp1 - np.min(temp1)\n temp1 = temp1 / (np.max(temp1)+1e-8)\n cam2[i] = cam2[i] + nn2[i, j] * temp1\n cam2 = torch.FloatTensor(cam2)\n l2 = l2 * (cam2.unsqueeze(1).cuda())\n l2 = self.stack1(l2)\n l2 = self.stack1_1(l2)\n\n for i in range(0, x.size(0)):\n for j in range(0, 8):\n temp2 = c4[i, j, :, :].data.cpu().numpy()\n temp2 = np.maximum(temp2, 0)\n temp2 = temp2 - np.min(temp2)\n temp2 = temp2 / (np.max(temp2)+1e-8)\n cam4[i] =cam4[i] + nn4[i, j] * temp2\n cam4 = torch.FloatTensor(cam4)\n l4 = l4 * cam4.unsqueeze(1).cuda()\n l4 = self.stack3(l4)\n X = l2.view(x.size(0), 512, 7 ** 2)\n Y = l4.view(x.size(0), 512, 7 ** 2)\n Z = self.cross_bilinear(X, Y)\n return n2, n4, Z", "def processAlgorithm(self, parameters, context, feedback):\n NO2_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTNP, context)\n NO2_present_data_source = gdal.Open(NO2_present_raster.dataProvider().dataSourceUri())\n arr_NO2_present = NO2_present_data_source.GetRasterBand(1).ReadAsArray()\n\n PM10_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTPP, context)\n PM10_present_data_source = gdal.Open(PM10_present_raster.dataProvider().dataSourceUri())\n arr_PM10_present = PM10_present_data_source.GetRasterBand(1).ReadAsArray()\n\n ozono_present_raster = self.parameterAsRasterLayer(parameters, self.INPUTOP, context)\n ozono_present_data_source = gdal.Open(ozono_present_raster.dataProvider().dataSourceUri())\n arr_ozono_present = ozono_present_data_source.GetRasterBand(1).ReadAsArray()\n\n arr_present = arr_ozono_present + arr_PM10_present + arr_NO2_present\n\n NO2_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTNF, context)\n NO2_future_data_source = gdal.Open(NO2_future_raster.dataProvider().dataSourceUri())\n arr_NO2_future = NO2_future_data_source.GetRasterBand(1).ReadAsArray()\n\n PM10_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTPF, context)\n PM10_future_data_source = gdal.Open(PM10_future_raster.dataProvider().dataSourceUri())\n arr_PM10_future = PM10_future_data_source.GetRasterBand(1).ReadAsArray()\n\n ozono_future_raster = self.parameterAsRasterLayer(parameters, self.INPUTOF, context)\n ozono_future_data_source = gdal.Open(ozono_future_raster.dataProvider().dataSourceUri())\n arr_ozono_future = ozono_future_data_source.GetRasterBand(1).ReadAsArray()\n\n arr_future = arr_ozono_future + arr_PM10_future + arr_NO2_future\n\n area_pixel = self.parameterAsInt(parameters, self.PIXEL_RES, context) * self.parameterAsInt(\n parameters, self.PIXEL_RES, context)\n\n NO2_euro_coeff = 77641.89\n ozono_euro_coeff = 14658.11\n PM10_euro_coeff = 17132.56\n\n arr_euro_present_NO2 = arr_NO2_present * NO2_euro_coeff\n arr_euro_present_ozono = arr_ozono_present * ozono_euro_coeff\n arr_euro_present_PM10 = arr_PM10_present * PM10_euro_coeff\n arr_value_present = arr_euro_present_PM10 + arr_euro_present_ozono + arr_euro_present_NO2\n\n arr_euro_future_NO2 = arr_NO2_future * NO2_euro_coeff\n arr_euro_future_ozono = arr_ozono_future * ozono_euro_coeff\n arr_euro_future_PM10 = arr_PM10_future * PM10_euro_coeff\n arr_value_future = arr_euro_future_PM10 + arr_euro_future_ozono + arr_euro_future_NO2\n\n arr_diff_NO2 = arr_euro_future_NO2 - arr_euro_present_NO2\n arr_diff_PM10 = arr_euro_future_PM10 - arr_euro_present_PM10\n arr_diff_ozono = arr_euro_future_ozono - arr_euro_present_ozono\n\n arr_diff_tot = arr_diff_NO2 + arr_diff_PM10 + arr_diff_ozono\n\n # Initialize and write on output raster\n path_output = self.parameterAsString(parameters, self.OUTPUT, context)\n file_output = path_output + '/SE_02_rimozione_inquinanti_delta_euro.tiff'\n driver = gdal.GetDriverByName(\"GTiff\")\n [cols, rows] = arr_NO2_present.shape\n diff_tot = np.sum(arr_diff_tot) / (cols * rows )\n outdata = driver.Create(file_output, rows, cols, 1, gdal.GDT_Float64)\n outdata.SetGeoTransform(NO2_present_data_source.GetGeoTransform()) ##sets same geotransform as input\n outdata.SetProjection(NO2_present_data_source.GetProjection()) ##sets same projection as input\n outdata.GetRasterBand(1).WriteArray(arr_diff_tot)\n print(np.max(outdata.GetRasterBand(1).ReadAsArray()))\n outdata.FlushCache()\n\n # Years\n present = self.parameterAsInt(parameters, self.INPUTPRE, context)\n future = self.parameterAsInt(parameters, self.INPUTFUT, context)\n report_output = path_output + '/SE_rimozione_inquinanti.txt'\n f = open(report_output, \"w+\")\n today = datetime.today().strftime('%Y-%m-%d-%H:%M:%S')\n f.write(\"Sommario dell'analisi della rimozione inquinanti\\n\")\n f.write(\"Data: \" + today +\"\\n\\n\\n\")\n f.write(\"Analisi stato di fatto\\n\\n\")\n f.write(\"Anno corrente: %i \\n\" % (present))\n f.write(\"Rimozione NO2 Stato attuale (ton): %f \\n\" % (np.sum(arr_NO2_present)))\n f.write(\"Rimozione PM10 Stato attuale (ton): %f \\n\" % (np.sum(arr_PM10_present)))\n f.write(\"Rimozione ozono Stato attuale (ton): %f \\n\" % (np.sum(arr_ozono_present)))\n f.write(\"Valore totale della rimozione inquinanti (€): %f \\n\\n\\n\" % (np.sum(arr_value_present)))\n f.write(\"Analisi stato di progetto\\n\\n\")\n f.write(\"Anno progetto: %i \\n\" % (future))\n f.write(\"Rimozione NO2 Stato di progetto (ton): %f \\n\" % (np.sum(arr_NO2_future)))\n f.write(\"Rimozione PM10 Stato di progetto (ton): %f \\n\" % (np.sum(arr_PM10_future)))\n f.write(\"Rimozione ozono Stato di progetto (ton): %f \\n\" % (np.sum(arr_ozono_future)))\n f.write(\"Valore totale della rimozione inquinanti (€): %f \\n\\n\\n\" % (np.sum(arr_value_future)))\n f.write(\"Differenze tra stato di progetto e stato attuale\\n\\n\")\n f.write(\"Anno progetto: %i - %i\\n\" % (present, future))\n f.write(\"Differenza della rimozione inquinanti (ton):: %f \\n\" % (np.sum(arr_future - arr_present)))\n f.write(\"Differenza sequestro inquinanti per unità di superficie (ton/ha): %f \\n\" % (\n np.sum(arr_future - arr_present) / (cols * rows * area_pixel) * 10000))\n f.write(\"Differenza in termini economici del SE Rimozione inquinanti (stato di progetto – stato attuale) (€):%d \\n\" % (\n np.sum(arr_diff_tot))) \n return {self.OUTPUT: 'Completed'}\n\n \n # ----------------------------------------------------------------------------------- \n # Copyright (c) 2021 Città di Torino.\n # \n # This material is free software: you can redistribute it and/or modify\n # it under the terms of the GNU General Public License as published by\n # the Free Software Foundation, either version 2 of the License, or\n # (at your option) any later version.\n # \n # This program is distributed in the hope that it will be useful,\n # but WITHOUT ANY WARRANTY; without even the implied warranty of\n # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n # GNU General Public License for more details.\n # \n # You should have received a copy of the GNU General Public License\n # along with this program. If not, see http://www.gnu.org/licenses.\n # ----------------------------------------------------------------------------------- ", "def run(layers):\n\n # Value above which people are regarded affected\n # For this dataset, 0 is no data, 1 is cloud, 2 is normal water level\n # and 3 is overflow.\n threshold = 0\n\n # Identify hazard and exposure layers\n inundation = get_hazard_layer(layers)\n\n [population] = get_exposure_layers(layers)\n\n # Extract data as numeric arrays\n D = inundation.get_data(nan=0.0) # Depth\n\n # Scale the population layer\n P = population.get_data(nan=0.0, scaling=True)\n I = numpy.where(D > threshold, P, 0)\n\n # Assume an evenly distributed population for Gender\n G = 0.5\n pregnant_ratio = 0.024 # 2.4% of women are estimated to be pregnant\n\n # Calculate breakdown\n P_female = P * G\n P_male = P - P_female\n P_pregnant = P_female * pregnant_ratio\n\n I_female = I * G\n I_male = I - I_female\n I_pregnant = I_female * pregnant_ratio\n\n # Generate text with result for this study\n total = str(int(sum(P.flat) / 1000))\n count = str(int(sum(I.flat) / 1000))\n\n total_female = str(int(sum(P_female.flat) / 1000))\n total_male = str(int(sum(P_male.flat) / 1000))\n total_pregnant = str(int(sum(P_pregnant.flat) / 1000))\n\n affected_female = str(int(sum(I_female.flat) / 1000))\n affected_male = str(int(sum(I_male.flat) / 1000))\n affected_pregnant = str(int(sum(I_pregnant.flat) / 1000))\n\n # Create raster object and return\n R = Raster(I,\n projection=inundation.get_projection(),\n geotransform=inundation.get_geotransform(),\n name='People affected',\n keywords={'total': total, 'count': count,\n 'total_female': total_female, 'affected_female': affected_female,\n 'total_male': total_male, 'affected_male': affected_male,\n 'total_pregnant': total_pregnant, 'affected_pregnant': affected_pregnant,\n })\n return R", "def three_layer_neuralnetwork(X, model, y=None, reg=0.0,verbose=0):\n \n # Unpack weights\n W1, b1, W2, b2, W3, b3 = model['W1'], model['b1'], model['W2'], model['b2'],model['W3'],model['b3']\n N,D= X.shape\n\n assert W1.shape[0] == D, ' W1 2nd dimenions must match number of features'\n \n dW1,dW2,dW3,db1,db2,db3=np.zeros_like(W1),np.zeros_like(W2),np.zeros_like(W3),np.zeros_like(b1),np.zeros_like(b2),np.zeros_like(b3)\n # Compute the forward pass\n \n '''\n AffineLayer = X.dot(W1)+b1 \n ReluLayer,_ = relu_forward(AffineLayer)\n AffineLayer2 = ReluLayer.dot(W2) + b2\n ReluLayer2,_ = relu_forward(AffineLayer2)\n AffineLayer3 = ReluLayer2.dot(W3) + b3\n scores = AffineLayer3\n \n print X.shape\n print W1.shape\n print b1.shape\n print W2.shape\n print b2.shape\n print W3.shape\n print b3.shape\n '''\n affine_out1,cache1 = affine_forward(X, W1, b1)\n relu_out1,cache_relu1 = relu_forward(affine_out1)\n \n affine_out2,cache2 = affine_forward(relu_out1, W2, b2)\n relu_out2,cache_relu2 = relu_forward(affine_out2)\n \n affine_out3,cache3 = affine_forward(relu_out2, W3, b3)\n scores = affine_out3\n\n #if verbose:\n #print ['Layer {} Variance = {}'.format(i+1, np.var(l[:])) for i,l in enumerate([a1, a2, cache3[0]])][:]\n if y is None:\n return scores\n data_loss,d_softmax = softmax_loss(scores,y)\n data_loss += reg * (np.sum(W1*W1) + np.sum(W2*W2) + np.sum(W3*W3))\n '''\n max_scores = np.max(scores)\n scores -= max_scores\n correct_class_scores = scores[y,np.arange(N)]\n exp_score = np.exp(scores)\n sumexp = np.sum(exp_score,axis=0)\n loss_i = -correct_class_scores + np.log(sumexp)\n loss = np.sum(loss_i) / N \n ''' \t\n # Compute the backward pass\n \n d_affine_out3, dW3, db3 = affine_backward(d_softmax, cache3) \n d_relu2 = relu_backward(d_affine_out3, cache_relu2)\n \n d_affine_out2, dW2, db2 = affine_backward(d_relu2, cache2) \n d_relu1 = relu_backward(d_affine_out2, cache_relu1)\n \n d_affine_out1, dW1, db1 = affine_backward(d_relu1, cache1) \n \n #\n reg_loss = 0\n\n loss = data_loss + reg_loss\n grads = {'W1': dW1, 'b1': db1, 'W2': dW2, 'b2': db2,'W3':dW3,'b3':db3}\n \n return loss, grads", "def optimize_correlation(self,dataloader,num_epochs=10, optimizer=None):\n print(\" Start Correlation optimizing...\")\n if optimizer is None:\n optimizer = optim.SGD(self.parameters(),lr=0.001,momentum=0.9)\n loss_sub_log = []\n for epoch in range(num_epochs):\n current_loss = float(0)\n # batch_num = 0\n for batch_idx, batch_data in enumerate(dataloader,start=0):\n data, labels = batch_data\n optimizer.zero_grad()\n forward_correlation_result = F.softmax(self.forward(data),dim=1)\n # labels_extended = labels.expand(forward_correlation_result.shape)\n labels_extended = torch.zeros(forward_correlation_result.shape)\n for idx, label in enumerate(labels):\n labels_extended[idx][label] = 1\n\n error = forward_correlation_result-labels_extended\n # print(forward_correlation_result[0])\n # print(labels_extended[0])\n # print(labels[0])\n loss = -correlation_loss(self.latest_hidden_out,error)\n loss.backward()\n optimizer.step()\n current_loss = loss.item()\n # batch_num+=1\n loss_sub_log.append(current_loss)\n print(f\" sub epoch {epoch} correlation loss: {-current_loss}\")\n\n return optimizer", "def get_final_reconstruction(self):", "def transformation(self):\n for key in self.combination_dict.keys():\n if self.combination_dict[key]['column_count'] == 2:\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'tem' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'tem':\n self.temporal_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'cat' or self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'cat':\n self.categorical_transformation(self.combination_dict[key])\n elif self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num' and self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n self.numerical_transformation(self.combination_dict[key])\n\n elif self.combination_dict[key]['column_count'] == 3:\n num_count = 0\n num_column = []\n if self.data_dict[self.combination_dict[key]['column1']]['data_type'] == 'num':\n num_count += 1\n num_column.append(0)\n elif self.data_dict[self.combination_dict[key]['column2']]['data_type'] == 'num':\n num_count += 1\n num_column.append(1)\n elif self.data_dict[self.combination_dict[key]['column3']]['data_type'] == 'num':\n num_count += 1\n num_column.append(2)\n\n if num_count == 1:\n self.three_column_groupby_logic(self.combination_dict[key], num_column)\n\n m_score_pie = []\n m_score_bar = []\n m_score_line = []\n m_score_scatter = []\n # for key in self.scenario_dict:\n # if self.scenario_dict\n for key in self.scenario_dict:\n if math.isnan(self.scenario_dict[key][\"Scatter_chart_score\"]):\n m_score_scatter.append(0)\n else:\n m_score_scatter.append(self.scenario_dict[key][\"Scatter_chart_score\"])\n m_score_pie.append(self.scenario_dict[key][\"Pie_chart_score\"])\n m_score_bar.append(self.scenario_dict[key][\"Bar_chart_score\"])\n m_score_line.append(self.scenario_dict[key][\"Line_chart_score\"])\n\n m_score_pie /= np.max(m_score_pie)\n m_score_bar /= np.max(m_score_bar)\n m_score_line /= np.max(m_score_line)\n m_score_scatter /= np.max(m_score_scatter)\n m_score = [m_score_pie, m_score_bar, m_score_line, m_score_scatter]\n match_index = np.argmax(m_score, axis = 0)\n i = 0\n for key in self.scenario_dict:\n if match_index[i] == 0:\n self.scenario_dict[key][\"Chart_Type\"] = \"pie\"\n if match_index[i] == 1:\n self.scenario_dict[key][\"Chart_Type\"] = \"bar\"\n if match_index[i] == 2:\n self.scenario_dict[key][\"Chart_Type\"] = \"line\"\n if match_index[i] == 3:\n self.scenario_dict[key][\"Chart_Type\"] = \"scatter\"\n self.scenario_dict[key][\"m_score\"] = m_score[match_index[i]][i]\n i += 1\n\n return self.scenario_dict", "def MR1():\n global CO1_OUTPUT, MR1_OUTPUT\n\n for b in range(MR1_OB):\n for f in range(MR1_OF):\n # Maxpool begins here\n for r in range(MR1_OR):\n for c in range(MR1_OC):\n frame = np.zeros(4)\n frame[0] = CO1_OUTPUT[b][f][r*2 ][c*2 ]\n frame[1] = CO1_OUTPUT[b][f][r*2+1][c*2 ]\n frame[2] = CO1_OUTPUT[b][f][r*2 ][c*2+1]\n frame[3] = CO1_OUTPUT[b][f][r*2+1][c*2+1]\n\n # ReLU begins here\n maximum = 0\n for i in range(4):\n if frame[i] > maximum:\n maximum = frame[i]\n\n MR1_OUTPUT[b][f][r][c] = maximum", "def calculate_output(self):", "def pca_reduction(X, ncomp=20):\n print('Performing dimensionality reduction ...')\n\n # PCA fitting\n pca = PCA(n_components=ncomp)\n weights = pca.fit_transform(X)\n basis = pca.components_\n\n # # Plot cumsum(explained_variance) versus component\n # plt.semilogy(pca.explained_variance_ratio_*100, 's')\n # plt.ylabel('Explained Variance Ratio (%)', size=20)\n # plt.xticks(size=20)\n # plt.xlabel('Component', size=20)\n # plt.yticks(size=20)\n # plt.show()\n\n print('Explained variance ratio : '+str(round(np.cumsum(pca.explained_variance_ratio_)[-1]*100, 2))+' %.')\n\n # pickle.dump(pca, '/../Data/GPmodel/pca_'+str(ncomp))\n\n # Some plots on PCA\n # plot_pca(basis, weights)\n\n return pca, weights", "def residmapComparison():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('dataFiles/6gev_image.fits')\n filename = get_pkg_data_filename('dataFiles/6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = -25.0\n vmax = 25.0\n cbStep = 5.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data-np.sum(modelData001,axis=0)),cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Point Source ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Residual counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((image_data-np.sum(modelData03,axis=0)), cmap='seismic',origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('GC Extended Source ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Residual counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n #plt.savefig('plots/residComparison.pdf',bbox_inches='tight')\n plt.show()", "def __update(self):\n\n # Make sure loads have been assigned to group\n if type(self.appliedLoad) == Load:\n self.appliedLoad = LoadSet(self.appliedLoad)\n elif type(self.appliedLoad) != LoadSet:\n raise TypeError(\"Applied load must be a Load or LoadSet\")\n\n # Begin Calculations\n _cg = self.cg # calculate the cg once to save computation time\n _appLoad = self.appliedLoad.totalForce\n _appMoment = self.appliedLoad.totalMoment\n\n coef_mat = np.zeros((len(self) * 3, len(self) * 3)) # coeff matrix\n soln_mat = np.zeros(len(self) * 3) # solution matrix\n\n cSet = [[i, i+1, i+2] for i in range(0, 3 * len(self), 3)]\n rSet = [[i+6, i+7, i+8] for i in range(0, 3 * (len(self) - 2), 3)]\n\n for i, j in enumerate(cSet):\n # i = column fastener ID\n # j = column fastener set\n # Mx = yFz - zFy\n # My = zFx - xFz\n # Mz = xFy - yFx\n\n Fx = j[0]\n Fy = j[1]\n Fz = j[2]\n\n # fill in first three rows\n coef_mat[0][Fx] = 1 # sum of Fx\n coef_mat[1][Fy] = 1 # sum of Fy\n coef_mat[2][Fz] = 1 # sum of Fz\n\n # fill in fourth row (sum of Mx at CG)\n coef_mat[3][Fy] = -(F[i].xyz[2] - _cg[2]) # -zFy\n coef_mat[3][Fz] = +(F[i].xyz[1] - _cg[1]) # +yFz\n\n # fill in fifth row (sum of My at CG)\n coef_mat[4][Fx] = +(F[i].xyz[2] - _cg[2]) # +zFx\n coef_mat[4][Fz] = -(F[i].xyz[0] - _cg[0]) # -xFz\n\n # fill in sixth row (sum of Mz at CG)\n coef_mat[5][Fx] = -(F[i].xyz[1] - _cg[1]) # -yFx\n coef_mat[5][Fy] = +(F[i].xyz[0] - _cg[0]) # +xFy\n\n for u, w in enumerate(rSet):\n # u = row fastener ID\n # w = row fastener set\n\n rX = w[0]\n rY = w[1]\n rZ = w[2]\n\n coef_mat[rX][Fy] = -(F[i].xyz[2] - F[u].xyz[2]) # -zFy\n coef_mat[rX][Fz] = +(F[i].xyz[1] - F[u].xyz[1]) # +yFz\n\n coef_mat[rY][Fx] = +(F[i].xyz[2] - F[u].xyz[2]) # +zFx\n coef_mat[rY][Fz] = -(F[i].xyz[0] - F[u].xyz[0]) # -xFz\n\n coef_mat[rZ][Fx] = -(F[i].xyz[1] - F[u].xyz[1]) # -yFx\n coef_mat[rZ][Fy] = +(F[i].xyz[0] - F[u].xyz[0]) # +xFy\n\n # fill in the solution matrix (soln_mat)\n for i in range(3):\n soln_mat[i] = -_netLoad.force[i]\n soln_mat[i+3] = -_netLoad.moment[i]\n\n # fill in the remaining rows\n for i, j in enumerate(rSet):\n # i = fastener\n # j = row\n\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n # Mx = (y_cg - y_i)F_znet - (z_cg - z_i)F_ynet + M_xnet\n soln_mat[rX] = - ((_cg[1] - F[i].xyz[1]) * _netLoad.force[2]\n - (_cg[2] - F[i].xyz[2]) * _netLoad.force[1]\n + _netLoad.moment[0])\n\n # My = (z_cg - z_i)F_xnet - (x_cg - x_i)F_znet + M_ynet\n soln_mat[rY] = -((_cg[2] - F[i].xyz[2]) * _netLoad.force[0]\n - (_cg[0] - F[i].xyz[0]) * _netLoad.force[2]\n + _netLoad.moment[1])\n\n # Mz = (x_cg - x_i)F_ynet - (y_cg - y_i)F_xnet + M_znet\n soln_mat[rZ] = -((_cg[0] - F[i].xyz[0]) * _netLoad.force[1]\n - (_cg[1] - F[i].xyz[1]) * _netLoad.force[0]\n + _netLoad.moment[2])\n\n # Solve system of equations\n matSol = np.linalg.lstsq(coef_mat, soln_mat)[0]\n\n # Add resulting fastener loads to fastener objects\n for i, j in enumerate(cSet):\n rX = j[0]\n rY = j[1]\n rZ = j[2]\n\n F[i].force[0] = matSol[rX]\n F[i].force[1] = matSol[rY]\n F[i].force[2] = matSol[rZ]", "def compute(self) -> Union[Tensor, Tuple[Tensor, Tensor]]:\n preds_dataloader = _get_dataloader(\n input_ids=dim_zero_cat(self.preds_input_ids),\n attention_mask=dim_zero_cat(self.preds_attention_mask),\n idf=self.idf,\n batch_size=self.batch_size,\n num_workers=self.num_threads,\n )\n target_dataloader = _get_dataloader(\n input_ids=dim_zero_cat(self.target_input_ids),\n attention_mask=dim_zero_cat(self.target_attention_mask),\n idf=self.idf,\n batch_size=self.batch_size,\n num_workers=self.num_threads,\n )\n\n info_lm_score = _infolm_compute(\n self.model,\n preds_dataloader,\n target_dataloader,\n self.temperature,\n self.idf,\n self.information_measure_cls,\n self.special_tokens_map,\n self.verbose,\n )\n\n if self.return_sentence_level_score:\n return info_lm_score.mean(), info_lm_score\n\n return info_lm_score.mean()", "def _compute_carry_and_output(self, x, h_tm1, c_tm1):\n x_i, x_f, x_c, x_o = x\n h_tm1_i, h_tm1_f, h_tm1_c, h_tm1_o = h_tm1\n i = self.recurrent_activation(\n x_i + K.dot(h_tm1_i, self.recurrent_kernel[:, :self.units]))\n f = self.recurrent_activation(x_f + K.dot(\n h_tm1_f, self.recurrent_kernel[:, self.units:self.units * 2]))\n c = f * c_tm1 + i * self.activation(x_c + K.dot(\n h_tm1_c, self.recurrent_kernel[:, self.units * 2:self.units * 3]))\n o = self.recurrent_activation(\n x_o + K.dot(h_tm1_o, self.recurrent_kernel[:, self.units * 3:]))\n return c, o", "def __init__(self):\n super(LinearAggregationLayer, self).__init__()", "def compute_IC(self, n_params=1):\n self.IC = information_criterion(self, n_params=n_params)", "def rebuild_the_laplacians():\n local_matrix = InteractomeInterface()\n local_matrix.full_rebuild()\n\n annot_matrix = AnnotomeInterface()\n annot_matrix.full_rebuild()", "def global_analysis(tomo, b_th, c=18):\n\n ## Thesholding and Volume analysis\n if c == 6:\n con_mat = [ [[0, 0, 0], [0, 1, 0], [0, 0, 0]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[0, 0, 0], [0, 1, 0], [0, 0, 0]] ]\n elif c == 18:\n con_mat = [[[0, 1, 0], [1, 1, 1], [0, 1, 0]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[0, 1, 0], [1, 1, 1], [0, 1, 0]]]\n elif c == 26:\n con_mat = [[[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]],\n [[1, 1, 1], [1, 1, 1], [1, 1, 1]]]\n else:\n raise ValueError\n tomo_lbl, num_lbls = sp.ndimage.label(tomo >= b_th, structure=np.ones(shape=[3, 3, 3]))\n tomo_out = np.zeros(shape=tomo.shape, dtype=int)\n lut = np.zeros(shape=num_lbls+1, dtype=int)\n\n ## COUNTING REGIONS METHODS\n # import time\n # hold_t = time.time()\n # for lbl in range(1, num_lbls + 1):\n # ids = tomo == lbl\n # feat_sz = len(ids)\n # tomo_out[ids] = feat_sz\n # # print('[1]:', lbl, 'of', num_lbls)\n # print time.time() - hold_t\n\n ## COUNTING PIXELS METHOD\n ## Count loop\n # cont, total = 0, np.prod(tomo.shape)\n # import time\n # hold_t = time.time()\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n lut[id] += 1\n # cont += 1\n # print('[1]:', cont, 'of', total)\n #\n ## Write loop\n # cont, total = 0, np.prod(tomo.shape)\n\n for x in range(tomo.shape[0]):\n for y in range(tomo.shape[1]):\n for z in range(tomo.shape[2]):\n id = tomo_lbl[x, y, z]\n if id > 0:\n tomo_out[x, y, z] = lut[id]\n # cont += 1\n # print('[1]:', cont, 'of', total)\n # print time.time() - hold_t\n\n return tomo_out", "def multiply(self, layer):\n pass", "def scalarInfo(img, cnt):\n\tm = cntInfo(img, cnt)\n\td = {\"perimeter\":m[\"perimeter\"], \"oreientation\":m[\"orientation\"], \"solidity\":m[\"solidity\"],\"height\":m[\"height\"], \"extent\":m[\"extent\"], \"aspect ratio\":m[\"aspect ratio\"], \"area\":m[\"area\"], \"sum intensity\":m[\"sum intensity\"], \"width\":m[\"width\"], \"equivalent diameter\": m[\"equivalent diameter\"], \"mean intensity\": m[\"mean intensity\"]}\n\treturn d", "def dimensionality_reduction(dataset, algorithm, sampled_frac=0.05):\n num_samples = int(len(dataset.index)*sampled_frac)\n sample = dataset[:num_samples]\n category_labels = sample[\"Rings\"]\n sample = sample.drop(\"Rings\", axis =1)\n if algorithm ==\"PCA\":\n dim_reducer = PCA(n_components=3)\n if algorithm == \"MDS\":\n dim_reducer = MDS(n_components = 3)\n if algorithm ==\"TSNE\":\n dim_reducer = TSNE(n_components=3)\n if algorithm == \"isomap\":\n dim_reducer = Isomap(n_components=3)\n \n reduced_dataset = dim_reducer.fit_transform(sample)\n\n fig = plt.figure()\n plt.clf()\n ax = fig.add_subplot(111, projection='3d')\n\n plt.cla()\n ax.scatter(reduced_dataset[:,0], reduced_dataset[:,1], reduced_dataset[:,2],c=category_labels.astype(str),cmap=plt.cm.spectral)\n\n ax.view_init(azim=30)\n plt.show()\n \n # plt.scatter(reduced_dataset[:,0], reduced_dataset[:,1], c=category_labels.astype(str))\n # plt.xlabel(\"z1\")\n # plt.ylabel(\"z2\")\n # plt.show()", "def _calculate_information_gain(self, cur_state, next_state):\n\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n\n prob_cur = self.classifier.get_class1_prob(obs=cur_state)\n prob_next = self.classifier.get_class1_prob(obs=next_state)\n information_gain_true = (prob_next - prob_cur).reshape(-1, 1)\n\n next_state_null = np.copy(next_state)\n next_state_null[:, -self.action_dim:] = self.classifier.missing_value\n prob_next_null = self.classifier.get_class1_prob(next_state_null)\n\n for i in range(self.action_dim):\n next_state_i = np.copy(next_state)\n next_state_i[:, -self.action_dim:] = self.classifier.missing_value\n next_state_i[:, -i - 1] = next_state[:, -i - 1]\n\n prob_next_i = self.classifier.get_class1_prob(obs=next_state_i)\n information_gain_per_action[:, -i - 1] = prob_next_i - prob_next_null\n\n information_gain_sum = np.sum(information_gain_per_action, axis=1, keepdims=True)\n ratio = information_gain_true / information_gain_sum\n ratio[information_gain_sum == 0] = 0\n information_gain_per_action = information_gain_per_action * ratio\n return information_gain_per_action", "def specificity():\n\tatlas = 'power'\n\tproject='hcp'\n\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\ttasks = ['REST','WM','GAMBLING','RELATIONAL','MOTOR','LANGUAGE','SOCIAL',]\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tdf = pd.DataFrame(columns = df_columns)\n\tfor task in tasks:\n\t\tprint task\n\t\t# subjects = np.array(hcp_subjects).copy()\n\t\t# subjects = list(subjects)\n\t\t# subjects = remove_missing_subjects(subjects,task,atlas)\n\t\tsubjects = np.load('/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %('hcp',task,atlas))\n\t\tstatic_results = graph_metrics(subjects,task,atlas,'fz')\n\t\tsubject_pcs = static_results['subject_pcs']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tsubject_mods = static_results['subject_mods']\n\t\tsubject_wmds = static_results['subject_wmds']\n\t\tmatrices = static_results['matrices']\n\t\t#sum of weight changes for each node, by each node.\n\t\thub_nodes = ['WCD']\n\t\t# hub_nodes = ['PC']\n\t\tdriver_nodes_list = ['Q+','Q-']\n\t\t# driver_nodes_list = ['Q+']\n\t\tmean_pc = np.nanmean(subject_pcs,axis=0)\n\t\tmean_wmd = np.nanmean(subject_wmds,axis=0)\n\t\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\t\tfor i in range(subject_pcs.shape[1]):\n\t\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\t\tmod_wmd_corr = np.zeros(subject_wmds.shape[1])\n\t\tfor i in range(subject_wmds.shape[1]):\n\t\t\tmod_wmd_corr[i] = nan_pearsonr(subject_mods,subject_wmds[:,i])[0]\n\t\tfor hub_node in hub_nodes:\n\t\t\tif hub_node == 'PC':\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_pc_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_pc_corr<0.0)[0]\n\t\t\telse:\n\t\t\t\tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t\t\t\tconnector_nodes = np.where(mod_wmd_corr>0.0)[0]\n\t\t\t\tlocal_nodes = np.where(mod_wmd_corr<0.0)[0]\n\t\t\tedge_thresh_val = 50.0\n\t\t\tedge_thresh = np.percentile(np.nanmean(matrices,axis=0),edge_thresh_val)\n\t\t\tpc_edge_corr[:,np.nanmean(matrices,axis=0)<edge_thresh] = np.nan\n\t\t\tfor driver_nodes in driver_nodes_list:\n\t\t\t\tweight_change_matrix_between = np.zeros((num_nodes,num_nodes))\n\t\t\t\tweight_change_matrix_within = np.zeros((num_nodes,num_nodes))\n\t\t\t\tif driver_nodes == 'Q-':\n\t\t\t\t\tdriver_nodes_array = local_nodes\n\t\t\t\telse:\n\t\t\t\t\tdriver_nodes_array = connector_nodes\n\t\t\t\tfor n1,n2 in permutations(range(num_nodes),2):\n\t\t\t\t\tif n1 not in driver_nodes_array:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tif known_membership[n2] == 0:\n\t\t\t\t\t\tcontinue\n\t\t\t\t\tarray = pc_edge_corr[n1][n2]\n\t\t\t\t\tweight_change_matrix_between[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership!=known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\tweight_change_matrix_within[n1,n2] = np.nansum(pc_edge_corr[n1][n2][np.where((known_membership==known_membership[n2])&(np.arange(264)!=n1))])\n\t\t\t\t\t# for n3 in range(264):\n\t\t\t\t\t# \tif n1 == n3:\n\t\t\t\t\t# \t\tcontinue\n\t\t\t\t\t# \tif known_membership[n3]!= known_membership[n2]:\n\t\t\t\t\t# \t\tweight_change_matrix_between[n1,n2] = np.nansum([weight_change_matrix_between[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tbetween_len = between_len + 1\n\t\t\t\t\t# \telse:\n\t\t\t\t\t# \t\tweight_change_matrix_within[n1,n2] = np.nansum([weight_change_matrix_within[n1,n2],array[n3]])\n\t\t\t\t\t# \t\tcommunity_len = community_len + 1\n\t\t\t\t\t# weight_change_matrix_within[n1,n2] = weight_change_matrix_within[n1,n2] / community_len\n\t\t\t\t\t# weight_change_matrix_between[n1,n2] = weight_change_matrix_between[n1,n2] / between_len\n\t\t\t\ttemp_matrix = np.nanmean(matrices,axis=0)\n\t\t\t\tweight_matrix = weight_change_matrix_within-weight_change_matrix_between\n\t\t\t\tweight_matrix[np.isnan(weight_matrix)] = 0.0\n\t\t\t\tif hub_node == 'PC':\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's PC & j's Q\"]\n\t\t\t\telse:\n\t\t\t\t\tdf_columns=['Task','Hub Measure','Q+/Q-','Average Edge i-j Weight',\"Strength of r's, i's WCD & j's Q\"]\n\t\t\t\tdf_array = []\n\t\t\t\tfor i,j in zip(temp_matrix[weight_matrix!=0.0].reshape(-1),weight_matrix[weight_matrix!=0.0].reshape(-1)):\n\t\t\t\t\tdf_array.append([task,hub_node,driver_nodes,i,j])\n\t\t\t\tdf = pd.concat([df,pd.DataFrame(df_array,columns=df_columns)],axis=0)\n\t\t\t\tprint hub_node, driver_nodes\n\t\t\t\tprint pearsonr(weight_matrix[weight_matrix!=0.0].reshape(-1),temp_matrix[weight_matrix!=0.0].reshape(-1))\n\t\t\t\t1/0\n\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='PC')],\"Strength of r's, i's PC & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_pcqminus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q+') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqplus_%s.pdf'%(edge_thresh_val))\n\t# plot_connectivity_results(df[(df['Q+/Q-']=='Q-') &(df['Hub Measure']=='WCD')],\"Strength of r's, i's WCD & j's Q\",'Average Edge i-j Weight','/home/despoB/mb3152/dynamic_mod/figures/edge_spec_wmdqminus_%s.pdf'%(edge_thresh_val))\n\t# \"\"\"\n\t# Are connector nodes modulating the edges that are most variable across subjects?\n\t# \"\"\"\n\t# atlas='power'\n\t# known_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\t# for task in tasks:\n\t# \tpc_thresh = 75\n\t# \tlocal_thresh = 25\n\t# \tsubjects = np.array(hcp_subjects).copy()\n\t# \tsubjects = list(subjects)\n\t# \tsubjects = remove_missing_subjects(subjects,task,atlas)\n\t# \tstatic_results = graph_metrics(subjects,task,atlas)\n\t# \tsubject_pcs = static_results['subject_pcs']\n\t# \tsubject_wmds = static_results['subject_wmds']\n\t# \tmatrices = static_results['matrices']\n\t# \tmatrices[:,np.nanmean(matrices,axis=0)<0.0] = np.nan\n\t# \tpc_edge_corr = np.arctanh(pc_edge_correlation(subject_wmds,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_wmd_edge_corr_z.npy' %(project,task,atlas)))\n\t# \t# pc_edge_corr = pc_edge_correlation(subject_pcs,matrices,path='/home/despoB/mb3152/dynamic_mod/results/%s_%s_%s_pc_edge_corr_z.npy' %(project,task,atlas))\n\t# \tstd_mod = []\n\t# \ttstd = np.std(matrices,axis=0).reshape(-1)\n\t# \tfor i in range(num_nodes):\n\t# \t\tstd_mod.append(nan_pearsonr(pc_edge_corr[i].reshape(-1),tstd)[0])\n\t# \t# print task, pearsonr(np.nanmean(subject_pcs,axis=0),std_mod)\n\t# \tprint task, pearsonr(np.nanmean(subject_wmds,axis=0),std_mod)\n\t# \tplot_corr_matrix(np.std(matrices,axis=0),network_names.copy(),out_file=None,plot_corr=True,return_array=False)", "def calc(self):\n\t\tfor neuron in self.neurons.items():\n\t\t\tneuron.calculate()", "def calculate_conservation(self):\n for pchain in self.system.ProteinList:\n ConservationTools.fetch_msq_conservation(pchain)\n ConservationTools.calculate_conservation(self.system)\n self.rebuild_color_menu()", "def _compute_outputs(self, *args, **kwargs):\n pass\n # self.outputs = self.model(input_ids=self.input_ids, masked_lm_labels=self.input_ids)\n # self.logits = self.outputs[0][0]\n # self.probs = torch.softmax(self.logits, 1)", "def back_propagate(self,y):\n if y.ndim > 1: #case of multioutput prediction\n m = np.shape(y)[0] #number of samples\n n = np.shape(y)[1] #number of output elements\n \n #calculating gradients for each layer\n for layer, activation in zip(reversed(range(len(self.architecture))), self.activations[::-1]):\n \n if layer == len(self.architecture)-1: #identifies output layer and does output layer specific calculations\n dCda_L = self.derivatives(function='mse', y_pred = self.all_data[f'A{layer}'], y= y)\n da_LdZ_L = self.derivatives(np.sum(self.all_data[f'Z{layer}'], axis=0)*(1/m),activation)\n delta_L = np.multiply(dCda_L,da_LdZ_L)\n self.all_data[f'dCda_{layer}'] = dCda_L\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_L\n self.all_data[f'delta_{layer}'] = delta_L\n else: #for other layers\n da_LdZ_l = self.derivatives(np.sum(self.all_data[f'Z{layer}'], axis=0)*(1/m),activation)\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_l\n delta_l = np.multiply(np.dot(self.all_data[f'delta_{layer+1}'], (self.weights_and_biases[f'W{layer+1}']).T), da_LdZ_l) \n self.all_data[f'delta_{layer}'] = delta_l\n \n dCdW_l = np.outer(np.sum(self.all_data[f'A{layer-1}'],axis=0)*(1/m),self.all_data[f'delta_{layer}'])\n dCdb_l = self.all_data[f'delta_{layer}']\n\n ##saving calculated data\n self.all_data[f'dCdW{layer}'] = dCdW_l\n self.all_data[f'dCdb{layer}'] = dCdb_l\n self.parameter_gradients[f'dCdW{layer}'] = dCdW_l\n self.parameter_gradients[f'dCdb{layer}'] = dCdb_l\n \n else: #calculations for single dataset(eg for SGD)\n m = 1\n n = len(y)\n\n ##calculating gradients for each layer\n for layer, activation in zip(reversed(range(len(self.architecture))), self.activations[::-1]):\n \n if layer == len(self.architecture)-1:\n #dCda_L = (self.all_data[f'A{layer}'] - y)*(1/(m*n))*2\n dCda_L = self.derivatives(function='mse', y_pred = self.all_data[f'A{layer}'], y=y)\n da_LdZ_L = self.derivatives(self.all_data[f'Z{layer}'],activation)\n delta_L = np.multiply(dCda_L,da_LdZ_L)\n self.all_data[f'dCda_{layer}'] = dCda_L\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_L\n self.all_data[f'delta_{layer}'] = delta_L\n else:\n da_LdZ_l = self.derivatives(self.all_data[f'Z{layer}'],activation)\n self.all_data[f'da_{layer}dZ_{layer}'] = da_LdZ_l\n delta_l = np.multiply(np.dot(self.all_data[f'delta_{layer+1}'], (self.weights_and_biases[f'W{layer+1}']).T), da_LdZ_l) \n self.all_data[f'delta_{layer}'] = delta_l\n\n dCdW_l = np.outer(self.all_data[f'A{layer-1}'],self.all_data[f'delta_{layer}'])\n dCdb_l = self.all_data[f'delta_{layer}']\n\n #saving data\n self.all_data[f'dCdW{layer}'] = dCdW_l\n self.all_data[f'dCdb{layer}'] = dCdb_l\n self.parameter_gradients[f'dCdW{layer}'] = dCdW_l\n self.parameter_gradients[f'dCdb{layer}'] = dCdb_l", "def _reduce(self, x, y):\n coef = self._update_parallel_coef_constraints(x)\n self.coef_ = coef.T", "def mutual_information(pred, true):\n \n #for now , only for univariate forecasting. So reshapes entire batch of K timesteps into vector as if single feature\n MI = mutual_info_regression(true.detach().numpy().flatten().reshape(-1,1), pred.detach().numpy().flatten())[0]\n return torch.tensor(MI)", "def computeConeActivity(Analysis, ImageData, rec_field, cpd, _meta,\n _brownian=True, glasses=False):\n Rec_Field = (rec_field[540]['fft'] / rec_field['max']) \n \n ImageData['fitLaw'] = ImageData['powerlaw'](cpd[1:])\n powerlaw = ImageData['fitLaw']\n\n if _brownian: \n temp = np.arange(1, 80)\n movement_filter = brownian_motion(cpd[1:], temp)\n powerlaw *= movement_filter\n\n # compute the diffraction limited case seperately\n diffract = {}\n diff, _x = o.diffraction(_meta['samples'], \n _meta['pupil_size'],\n 16.6, \n ref_index=1.4, \n wavelength=550.0)\n # now interpolate mtf into cpd's of analysis:\n mtf = np.interp(cpd, _x, diff)\n\n # remove zeros to avoid errors in future computations:\n ind = np.where(mtf != 0)[0]\n diffract['cpd'] = cpd[ind]\n diffract['mtf'] = mtf[ind]\n\n diffract['preCone'] = (powerlaw[ind] * \n diffract['mtf'][ind])\n diffract['retina'] = (diffract['preCone'] *\n Rec_Field[ind])\n\n if glasses:\n # if glasses on, compute effect after diffraction case\n powerlaw *= gauss(cpd[1:], 10)\n\n for key in Analysis:\n # find cone fft:\n wv = Analysis[key]['wavelength']\n Rec_Field = (rec_field[wv]['fft'] / rec_field['max'])\n\n # generate MTFs for each condition:\n intensity = traceEye(\n Analysis[key]['dist'], \n Analysis[key]['off_axis'], \n Analysis[key]['pupil_size'], \n Analysis[key]['focus'],\n Analysis[key]['wavelength'])\n psf = o.genPSF(intensity, _meta['samples'])[1]\n Analysis[key]['mtf'] = o.genMTF(psf)[ind]\n\n Analysis[key]['preCone'] = (powerlaw[ind] * \n Analysis[key]['mtf'])\n\n Analysis[key]['retina'] = (Analysis[key]['preCone'] *\n Rec_Field[ind])\n\n return Analysis, diffract", "def forward(self, image1, image2, intrinsic, posepred, insmap, initialdepth=None, iters=12):\n image2_org = torch.clone(image2)\n\n image1 = 2 * image1 - 1.0\n image2 = 2 * image2 - 1.0\n\n image1 = image1.contiguous()\n image2 = image2.contiguous()\n\n hdim = self.hidden_dim\n cdim = self.context_dim\n\n # run the feature network\n fmap1, fmap2 = self.fnet([image1, image2])\n\n bz, _, featureh, featurew = fmap1.shape\n device = fmap1.device\n self.init_sampling_pts(bz, featureh, featurew, device)\n \n fmap1 = fmap1.float()\n fmap2 = fmap2.float()\n corr_fn = CorrBlock(fmap1, fmap2, intrinsic, posepred, insmap, nedges=self.nedges, maxinsnum=self.args.maxinsnum)\n\n # run the context network\n cnet = self.cnet(image1)\n net, inp = torch.split(cnet, [hdim, cdim], dim=1)\n net = torch.tanh(net)\n inp = torch.relu(inp)\n\n bz, _, h, w = image1.shape\n if initialdepth is None:\n logdepth = self.initialize_logdepth(image1)\n else:\n logdepth = F.interpolate(torch.log(torch.clamp_min(initialdepth, min=1)), [int(h / 8), int(w / 8)], mode='nearest')\n\n depth_predictions = []\n logdepth_predictions = []\n local_sample_pts2ds = []\n delta_depths = []\n outputs = dict()\n for itr in range(iters):\n corr, local_sample_pts2d = corr_fn(logdepth, sampled_rld=self.sampled_rld, pts2ddict=self.pts2ddict) # index correlation volume\n\n net, up_mask, delta_depth = self.update_block(net, inp, corr, logdepth)\n\n # F(t+1) = F(t) + \\Delta(t)\n logdepth = logdepth + delta_depth\n depth = torch.exp(logdepth)\n\n depth_up = self.upsample_flow(depth, up_mask)\n\n if itr == iters - 1:\n flowpred, img1_recon, projMimg = self.depth2rgb(depth_up, image2_org, intrinsic, posepred, insmap)\n outputs['flowpred'] = flowpred\n outputs['img1_recon'] = img1_recon\n # with torch.no_grad():\n # outputs['orgflow'] = self.depth2flow(initialdepth, projMimg)\n \n depth_predictions.append(depth_up)\n logdepth_predictions.append(F.interpolate(logdepth, [h, w], mode='bilinear', align_corners=False))\n local_sample_pts2ds.append(local_sample_pts2d)\n delta_depths.append(delta_depth)\n\n outputs['depth_predictions'] = depth_predictions\n outputs['logdepth_predictions'] = logdepth_predictions\n outputs['local_sample_pts2ds'] = local_sample_pts2ds\n outputs['delta_depths'] = delta_depths\n return outputs", "def calc_perc_reducts():\n #Load RCP2.6 datq\n cubes = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')\n #Get the surface and high level SO2 emissions\n surf_cube = cubes[3][:,0]\n high_cube = cubes[1][:,0]\n cubes = iris.cube.CubeList([surf_cube,high_cube])\n\n for i in range(0,len(cubes)):\n #Add the year and month to the cube and extract for 2009 onwards\n iris.coord_categorisation.add_year(cubes[i],'time',name='year')\n iris.coord_categorisation.add_month(cubes[i],'time',name='month')\n cubes[i] = cubes[i].extract(iris.Constraint(year = lambda y: y >=2009))\n\n #Make the year-on-year reduction rates\n yoy_rates = []\n for cube in cubes:\n #Calculate the global mean timeseries\n cube.coord('latitude').guess_bounds()\n cube.coord('longitude').guess_bounds()\n area_weights = iris.analysis.cartography.area_weights(cube)\n cube_mean = cube.collapsed(['latitude','longitude'],iris.analysis.MEAN,weights=area_weights)\n\n cube_rates = np.ones((cube_mean.shape))\n #Loop over the months and calculate the changes from the previous year\n #Calculate the year on year proportional changes in the global mean\n for i in range(12,cube_mean.shape[0]):\n cube_rates[i] = cube_mean[i].data / cube_mean[(i-12)].data\n\n yoy_rates.append(cube_rates)\n\n return yoy_rates", "def empirical_cor(l):\n\n global fig_dphi\n\n # first clean los\n maps_temp = np.matrix.copy(maps[:,:,l]) - np.matrix.copy(models[:,:,l])\n\n # no estimation on the ref image set to zero \n if np.nansum(maps[:,:,l]) != 0:\n\n maxlos,minlos=np.nanpercentile(maps_temp[ibeg_emp:iend_emp,jbeg_emp:jend_emp],float(arguments[\"--perc_los\"])),np.nanpercentile(maps_temp[ibeg_emp:iend_emp,jbeg_emp:jend_emp],100-float(arguments[\"--perc_los\"]))\n logger.debug('Set Max-Min LOS for empirical estimation: {0}-{1}'.format(maxlos,minlos))\n kk = np.nonzero(np.logical_or(maps_temp==0.,np.logical_or((maps_temp>maxlos),(maps_temp<minlos))))\n maps_temp[kk] = float('NaN')\n\n itemp = ibeg_emp\n for lign in range(ibeg_emp,iend_emp,10):\n if np.isnan(np.nanmean(maps[lign:lign+10,:,l])):\n itemp = lign\n else:\n break\n logger.debug('Begining of the image: {}'.format(itemp))\n\n if arguments[\"--topofile\"] is not None:\n ax_dphi = fig_dphi.add_subplot(4,int(N/4)+1,l+1)\n else:\n ax_dphi = None\n\n logger.debug('Threshold RMS: {}'.format(float(arguments[\"--threshold_rms\"])))\n\n # selection pixels\n index = np.nonzero(np.logical_and(elev<maxtopo,\n np.logical_and(elev>mintopo,\n np.logical_and(mask_flat>float(arguments[\"--threshold_mask\"]),\n np.logical_and(~np.isnan(maps_temp),\n np.logical_and(~np.isnan(rmsmap),\n np.logical_and(~np.isnan(elev),\n np.logical_and(rmsmap<float(arguments[\"--threshold_rms\"]),\n np.logical_and(rmsmap>1.e-6,\n np.logical_and(~np.isnan(maps_temp),\n np.logical_and(pix_az>ibeg_emp,\n np.logical_and(pix_az<iend_emp,\n np.logical_and(pix_rg>jbeg_emp,\n np.logical_and(pix_rg<jend_emp, \n slope>0.,\n ))))))))\n ))))))\n\n # extract coordinates for estimation\n temp = np.array(index).T\n x = temp[:,0]; y = temp[:,1]\n # clean maps\n los_clean = maps_temp[index].flatten()\n topo_clean = elev[index].flatten()\n rms_clean = rmsmap[index].flatten()\n \n logger.debug('Number of points for empirical estimation: {}'.format(len(los_clean)))\n if len(los_clean) < 1:\n logger.critical('No points left for empirical estimation. Exit!')\n logger.critical('threshold RMS: {0}, threshold Mask: {1}, Min-Max LOS: {2}-{3}, Min-Max topo: {4}-{5}, lines: {6}-{7}, \\\n cols: {8}- {9}'.format(float(arguments[\"--threshold_rms\"]),float(arguments[\"--threshold_mask\"]),minlos,maxlos,mintopo,maxtopo,ibeg_emp,iend_emp,jbeg_emp,jend_emp))\n sys.exit()\n\n # print itemp, iend_emp\n if flat>5 and iend_emp-itemp < .6*(iend_emp-ibeg_emp):\n logger.warning('Image too short in comparison to master, set flat to 5')\n temp_flat=5\n else:\n temp_flat=flat\n\n if ivar>0 and iend_emp-itemp < .6*(iend_emp-ibeg_emp):\n logger.warning('Image too short in comparison to master, set ivar to 0')\n ivar_temp=0\n nfit_temp=0\n else:\n ivar_temp=ivar\n nfit_temp=nfit\n\n # call ramp estim\n los = as_strided(maps[:,:,l]).flatten()\n samp = int(arguments[\"--emp_sampling\"])\n\n map_ramp, map_flata, map_topo, rmsi = estim_ramp(los,los_clean[::samp],topo_clean[::samp],x[::samp],\\\n y[::samp],temp_flat,rms_clean[::samp],nfit_temp, ivar_temp, l, ax_dphi)\n\n if (lin_start is not None) and (lin_end is not None):\n # try:\n indexref = np.nonzero(np.logical_and(elev<maxtopo,\n np.logical_and(elev>mintopo,\n np.logical_and(mask_flat>float(arguments[\"--threshold_mask\"]),\n np.logical_and(~np.isnan(maps_temp),\n np.logical_and(~np.isnan(rmsmap),\n np.logical_and(~np.isnan(elev),\n np.logical_and(rmsmap<float(arguments[\"--threshold_rms\"]),\n np.logical_and(rmsmap>1.e-6,\n np.logical_and(~np.isnan(maps_temp),\n np.logical_and(pix_az>lin_start,\n np.logical_and(pix_az<lin_end,\n np.logical_and(pix_rg>col_start,\n np.logical_and(pix_rg<col_end, \n slope>0.,\n ))))))))))))\n ))\n \n if len(indexref[0]) == 0:\n logger.warning('Ref zone is empty! Re-define --ref_zone argument. Exit!')\n sys.exit()\n\n ## Set data minus temporal model to zero in the ref area\n zone = as_strided(map_flata[:,:] - models[:,:,l])\n los_ref2 = zone[indexref].flatten()\n rms_ref = rmsmap[indexref].flatten()\n amp_ref = 1./rms_ref\n amp_ref = amp_ref/np.nanmax(amp_ref)\n # weigth avera of the phase\n cst = np.nansum(los_ref2*amp_ref) / np.nansum(amp_ref)\n logger.info('Re-estimation of a constant within lines {0}-{1} and cols {2}-{3}'.format(lin_start,lin_end,col_start,col_end))\n logger.info('Average phase within ref area: {0}:'.format(cst))\n if np.isnan(cst):\n cst = 0.\n map_ramp, map_flata = map_ramp + cst, map_flata - cst\n del zone\n \n else:\n map_flata = np.copy(maps[:,:,l])\n map_ramp, map_topo = np.zeros(np.shape(map_flata)), np.zeros(np.shape(map_flata))\n rmsi = 1\n\n # set ramp to NaN to have ramp of the size of the images\n kk = np.nonzero(np.isnan(map_flata))\n ramp = as_strided(map_ramp)\n ramp[kk] = float('NaN')\n topo = as_strided(map_topo)\n topo[kk] = float('NaN')\n del ramp, topo\n \n return map_ramp, map_flata, map_topo, rmsi", "def calculate_information_criterion(R, u_t, R_test, u_t_test, inverse_transform, inverse_transform_test, algo, x_nodes):\n model_list = generate_models(R, u_t, inverse_transform, algo) # iterates hyperparameters to generate models\n evidence_list = calculate_bic(R_test, u_t_test, inverse_transform_test, model_list, x_nodes)\n return evidence_list, model_list", "def calculate_all_metrcis(self):\n self.calculate_gc_metrcis()\n self.calculate_sam_metrics()\n self.calculate_classification_metrics()\n self.calculate_losses()", "def dataModel():\n srcmap001 = fits.open('dataFiles/6gev_srcmap_001.fits')\n srcmap03 = fits.open('dataFiles/6gev_srcmap_03.fits')\n\n image_data = fits.getdata('6gev_image.fits')\n filename = get_pkg_data_filename('6gev_image.fits')\n hdu = fits.open(filename)[0]\n wcs = WCS(hdu.header)\n\n #Given the results of the fit, calculate the model\n modelData001 = np.zeros(srcmap001[0].shape)\n modelData03 = np.zeros(srcmap03[0].shape)\n\n file = open('plotsData/fitResults001.pk1','rb')\n fit001 = pickle.load(file)\n file.close()\n\n file = open('plotsData/fitResults03.pk1','rb')\n fit03 = pickle.load(file)\n file.close()\n\n\n for source in fit001:\n the_index = srcmap001.index_of(source)\n\n modelData001 += fit001[source][:, None, None]*srcmap001[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap001[the_index].data, axis=2), axis=1)[:-1, None, None]\n for source in fit03:\n the_index = srcmap03.index_of(source)\n modelData03 += fit03[source][:, None, None]*srcmap03[the_index].data[:-1, :, :]/np.sum(np.sum(srcmap03[the_index].data, axis=2), axis=1)[:-1, None, None]\n\n fig = plt.figure(figsize=[12, 4.5])\n\n vmin = 0\n vmax = 70.0\n cbStep = 10.0\n ax = fig.add_subplot(121, projection=wcs)\n ax=plt.gca()\n ax.tick_params(direction='in')\n c = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax.get_transform('galactic'))\n ax.add_patch(c)\n mappable=plt.imshow((image_data),cmap='inferno',origin='lower',norm=colors.PowerNorm(gamma=0.6),vmin=vmin, vmax=vmax, interpolation='gaussian')#\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Data ($>6$ GeV)')\n cb = plt.colorbar(mappable, label='Counts per pixel', pad=0.01,ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb.ax.tick_params(width=0)\n\n\n ax2=fig.add_subplot(122, projection=wcs)\n ax2 = plt.gca()\n\n sources = []\n sources.append({\n 'Name':'3FGL J1745.3-2903c',\n 'RA':266.3434922,\n 'DEC':-29.06274323,\n 'color':'xkcd:bright light blue'})\n\n sources.append({\n 'Name':'1FIG J1748.2-2816',\n 'RA':267.1000722,\n 'DEC':-28.27707114,\n 'color':'xkcd:fire engine red'\n })\n\n sources.append({\n 'Name':'1FIG J1746.4-2843',\n 'RA':266.5942898,\n 'DEC':-28.86244442,\n 'color':'xkcd:fluorescent green'\n })\n\n sources.append({\n 'Name':'Galactic Center',\n 'RA':266.417,\n 'DEC':-29.0079,\n 'color':'black'\n })\n\n #Add source names:\n for source in sources:\n l, b = ra_dec_to_l_b(source['RA'], source['DEC'])\n ax2.scatter(l, b, color=source['color'],marker='x',s=45.0, transform=ax2.get_transform('galactic'), label=source['Name'])\n\n c2 = Wedge((gc_l, gc_b), 1.0, theta1=0.0, theta2=360.0, width=14.0, edgecolor='black', facecolor='#474747', transform=ax2.get_transform('galactic'))\n ax2.add_patch(c2)\n mappable2 = plt.imshow((np.sum(modelData03,axis=0)), cmap='inferno',norm=colors.PowerNorm(gamma=0.6),origin='lower',vmin=vmin, vmax=vmax, interpolation='gaussian')\n plt.xlabel('Galactic Longitude')\n plt.ylabel('Galactic Latitude')\n plt.title('Model ($>6$ GeV)')\n cb2 = plt.colorbar(mappable2, label='Counts per pixel', pad=0.01, ticks=np.arange(vmin, vmax+cbStep, cbStep))\n cb2.ax.tick_params(width=0)\n leg = plt.legend(loc=1,frameon=True)\n leg.get_frame().set_alpha(0.5)\n leg.get_frame().set_edgecolor('white')\n text1 = leg.get_texts()\n for text in text1:\n text.set_color('black')\n\n fig.tight_layout()\n plt.subplots_adjust(wspace = 0.13, left=0.04, bottom=0.13, top=0.92)\n plt.show()\n #plt.savefig('plots/dataModelComparison.pdf',bbox_inches='tight')", "def summary(self):\n for i,layer in enumerate(self.chain):\n x = Input([2])\n y = layer.forward(x)\n Model(x,y,name=f'layer_{i}_summary').summary()", "def calculate_correlation(data):\n pass", "def constant_2015():\n\n #Load the CMIP6 historical\n cubes = iris.load(data_dir+'SO2DMS-em-anthro_input4MIPs_emissions_CMIP_CEDS-v2016-07-26-gr_200001-201412_n48.nc')\n #Get low and high level emissions just in the last year (2014)\n cubes = iris.cube.CubeList([cubes[2],cubes[1]])\n final_cubes = iris.cube.CubeList()\n for cube in cubes:\n final_cube = cube[-12:]\n final_cubes.append(final_cube)\n \n #Set the year-on-year proportional reductions to be nothing\n yoy_rates = calc_perc_reducts()\n yoy_rates = np.array(yoy_rates)\n yoy_rates = np.ones_like(yoy_rates)\n\n #Create coordinates for new nc file between 2014 and 2100\n lat_coord = cubes[0].coord('latitude')\n lon_coord = cubes[0].coord('longitude')\n time_coord = DimCoord(np.arange(95055.,95055.+(2100-2014+1)*360.,30.),standard_name=u'time', units=cf_units.Unit('days since 1750-1-1 00:00:00', calendar='360_day'), long_name=u'time', var_name='time')\n\n #Create the cube date\n cube_data_surf = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n cube_data_high = np.zeros((len(time_coord.points),cubes[0].shape[1],cubes[0].shape[2]))\n #Set first year equal to 2014 in CMIP6 historical\n cube_data_surf[:12,...] = final_cubes[0].data\n cube_data_high[:12,...] = final_cubes[1].data\n #Apply equal emissions in all other years too\n for i in range(12,cube_data_surf.shape[0]):\n cube_data_surf[i,...] = cube_data_surf[(i-12),...] * yoy_rates[0,i]\n cube_data_high[i,...] = cube_data_high[(i-12),...] * yoy_rates[1,i]\n #Make the output cubes\n fut_cube_surf = iris.cube.Cube(cube_data_surf,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[0].standard_name, long_name=final_cubes[0].long_name, var_name=final_cubes[0].var_name, units=final_cubes[0].units, attributes=final_cubes[0].attributes)\n fut_cube_high = iris.cube.Cube(cube_data_high,dim_coords_and_dims=[(time_coord,0),(lat_coord, 1),(lon_coord, 2)],standard_name=final_cubes[1].standard_name, long_name=final_cubes[1].long_name, var_name=final_cubes[1].var_name, units=final_cubes[1].units, attributes=final_cubes[1].attributes)\n\n fut_cube_high.var_name = 'field569_1'\n fut_cube_high.units='kg/m2/s'\n fut_cube_high.long_name ='HIGH LEVEL SO2 EMISSIONS KG/M2/S'\n fut_cube_surf.var_name = 'field569'\n fut_cube_surf.units='kg/m2/s'\n fut_cube_surf.long_name ='SULPHUR DIOXIDE EMISSIONS'\n\n #Load the DMS cube from standard RCP2.6\n dms_cube = iris.load(data_dir+'DMSSO2NH3_18502100_RCP26_monthly.nc')[0]\n iris.coord_categorisation.add_year(dms_cube,'time',name='year')\n dms_cube = dms_cube.extract(iris.Constraint(year = lambda y: y>=2014))\n\n dms_cube.var_name = 'field570'\n dms_cube.attributes.pop('name')\n dms_cube.coord('time').var_name = 'time'\n dms_cube.coord('time').long_name = 'time'\n\n fut_cube_high = fut_cube_high[:-2]\n fut_cube_surf = fut_cube_surf[:-2]\n\n fut_dms = iris.cube.Cube(dms_cube.data[:,0,::-1,:],dim_coords_and_dims=[(fut_cube_surf.coord('time'),0),(fut_cube_surf.coord('latitude'),1),(fut_cube_surf.coord('longitude'), 2)],standard_name=dms_cube.standard_name, long_name=dms_cube.long_name, var_name=dms_cube.var_name, units=dms_cube.units, attributes=dms_cube.attributes)\n\n #Save the final cubes as netcdf (cutting them to be the same length)\n iris.save(iris.cube.CubeList([fut_dms,fut_cube_high,fut_cube_surf]),data_dir+ \"SO2DMS_const2014.nc\")\n os.system('ncatted -O -a calendar,time,m,c,\"360_day\" '+data_dir+ \"SO2DMS_const2014.nc\")\n\n return", "def calculate_correlations(model_metric = \"MSE\", correlation_type = \"Pearsons\", save = True):\n \n if model_metric == \"MSE\":\n augmented_results = 'augmented_mse.csv'\n else:\n augmented_results = 'augmented_f1_score.csv'\n \n main_path = Path(os.path.dirname(__file__)).parent\n metadata_path = r\"visualize_results\"\n metadata = pd.read_csv(os.path.join(main_path, metadata_path, augmented_results))\n \n metadata['DateTime'] = pd.to_datetime(metadata['DateTime'], dayfirst = True)\n \n cleanup_nums = {\"Day_Night\": {\"day\": 0, \"night\": 1}}\n metadata = metadata.replace(cleanup_nums)\n \n tested_columns = [model_metric,\"Temperature\", \"Humidity\", \"Wind Speed\", \"Wind Direction\", \"Precipitation\",\"Activity\", \"Day_Night\", \"Hour\"] \n \n all_correlations = []\n all_p_values = []\n for model in metadata[\"model\"].unique():\n \n metadata_smaller = metadata[(metadata[\"model\"]== model)].copy()\n metadata_smaller = metadata_smaller[tested_columns]\n metadata_smaller = metadata_smaller.reset_index(drop=True)\n \n print(f\"For Model: {model}\")\n corr_curr_model = []\n p_value_curr_model =[]\n for column in metadata_smaller.columns:\n \n if correlation_type == \"Pearsons\":\n cor,p = stats.pearsonr(metadata_smaller[column], metadata_smaller[model_metric])\n else:\n cor,p = distcorr(metadata_smaller[column], metadata_smaller[model_metric],nruns = 5000)\n \n corr_curr_model.append(cor)\n p_value_curr_model.append(p)\n \n print(f\"between {model_metric} and {column} = R: {np.abs(cor)}, p-val: {p}\")\n \n corr_curr_model.insert(0,model)\n corr_curr_model.insert(0,model_metric)\n corr_curr_model.insert(0,correlation_type)\n \n p_value_curr_model.insert(0,model)\n p_value_curr_model.insert(0,model_metric)\n p_value_curr_model.insert(0,correlation_type)\n \n print(\"---------------\")\n all_correlations.append(corr_curr_model)\n all_p_values.append(p_value_curr_model)\n \n all_correlations_df = pd.DataFrame(all_correlations,\n columns = [\"Correlation\", \"Metric\", \"Model\"] + tested_columns)\n \n all_p_values_df = pd.DataFrame(all_p_values,\n columns = [\"Correlation\", \"Metric\", \"Model\"] + tested_columns)\n \n if save:\n all_correlations_df.to_csv(os.path.join(\"analysis_results\", f\"correlation_R_{correlation_type}_{model_metric}.csv\"), index=False)\n all_p_values_df.to_csv(os.path.join(\"analysis_results\", f\"correlation_Pvalue_{correlation_type}_{model_metric}.csv\"), index=False)\n \n return all_correlations_df", "def filter_and_correct_expression_and_image_features(tissue, model, aggregation, patch_size, M, k, pc_correction=False, tf_correction=False):\n\n\n\n\n # Filter expression\n Y, X, dIDs, tIDs, tfs, ths, t_idx = extract_final_layer_data(tissue, model, aggregation, patch_size)\n filt_X, filt_tIDs, final_exp_idx = filter_expression(X, tIDs, M, k)\n\n\n\n if pc_correction:\n print ('Correcting with {} expression PCs'.format(pc_correction))\n pca = PCA(n_components=pc_correction)\n\n\n pca_predictors = pca.fit_transform(filt_X)\n\n # Correct Y\n lr = LinearRegression()\n lr.fit(pca_predictors, Y)\n predicted_Y = lr.predict(pca_predictors)\n corrected_Y = Y - predicted_Y\n\n # Correct X\n projected_filt_X = np.dot(pca_predictors,pca.components_)\n corrected_filt_X = filt_X - projected_filt_X\n\n # Set as return variables\n final_X = corrected_filt_X\n final_Y = corrected_Y\n\n elif tf_correction:\n print('Correcting with all technical factors')\n tf_Y = Y[t_idx,:]\n tf_filt_X = filt_X[t_idx,:]\n\n tfs[list(ths).index('SMTSISCH')] = np.log2(tfs[list(ths).index('SMTSISCH')] + 1)\n tf_predictors = tfs\n\n #Correct Y\n lr_Y = LinearRegression()\n lr_Y.fit(tf_predictors, tf_Y)\n tf_Y_predicted = lr_Y.predict(tf_predictors)\n corrected_tf_Y = tf_Y - tf_Y_predicted\n\n #Correct X\n lr_X = LinearRegression()\n lr_X.fit(tf_predictors, tf_filt_X)\n tf_filt_X_predicted = lr_X.predict(tf_predictors)\n corrected_tf_filt_X = tf_filt_X - tf_filt_X_predicted\n\n # Set as return variables\n final_X = corrected_tf_filt_X\n final_Y = corrected_tf_Y\n else:\n # Set unmodified values as return variables\n final_X = filt_X\n final_Y = Y\n\n return final_Y, final_X, dIDs, filt_tIDs, tfs, ths, t_idx", "def pan_corr(file):\n\n # # infile = 'd:\\\\Projekti\\\\Satelit\\\\CO\\\\Razpis\\\\Flat field images_new2020\\\\flatfield\\\\NHDBflat_1D'\n # # infile = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Jure_naloga_banje_raw_pyt\\\\NHDRGoreMorje_3D'\n #\n # # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_1D_py.tif'\n # in_ref = in_path + in_pan_ref_file\n #\n # inreffil = gdal.Open(in_ref)\n # image_ref = inreffil.ReadAsArray()\n # # size_ref = image_ref.shape\n # # pix_count = size_ref[0]*size_ref[1]\n #\n # image_ref = image_ref[800:930, 1420:1640]\n # size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n #\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # raz_g1 = 1\n # raz_g2 = g1_avg/g2_avg\n # raz_r1 = g1_avg/r1_avg\n # raz_b1 = g1_avg/b1_avg\n #\n # avg = (g1+g2+r1+b1)/pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n raz_g1 = 1\n raz_g2 = 1.0245196396115988\n raz_r1 = 1.0131841989689434\n raz_b1 = 1.0517113199247086\n\n print('razmerje:', raz_g1, raz_g2, raz_r1, raz_b1)\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Peking_PAN\\\\'\n # in_pan_ref_file = 'NHDPfoc_swp6_4D_py.tif'\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\Flat field images_new2020\\\\20201028 Vignetting\\\\flatfield\\\\'\n # in_pan_ref_file = 'NHDPflat_3D_py.tif'\n\n # in_path = 'd:\\Projekti\\Satelit\\CO\\Razpis\\_POSNETKI\\Slo_PAN\\_26_30\\\\'\n # in_pan_ref_file = [filename for filename in os.listdir(in_path) if filename.lower().startswith(\"nhd\") and filename.lower().endswith(\"tif\")]\n\n \n\n \n\n # print('image', i)\n in_ref=file\n inreffil = gdal.Open(in_ref)\n image_ref = inreffil.ReadAsArray()\n size_ref = image_ref.shape\n # pix_count = size_ref[0] * size_ref[1]\n # pix_count = np.count_nonzero(image_ref)\n # pix_count = 3664*650\n\n # g1 = 0.\n # g2 = 0.\n # r1 = 0.\n # b1 = 0.\n #\n # for i in range(size_ref[0]):\n # for j in range(size_ref[1]):\n # if (i % 2) == 0 and (j % 2) == 0: g1 = g1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 1: g2 = g2 + image_ref[i, j]\n # if (i % 2) == 0 and (j % 2) == 1: r1 = r1 + image_ref[i, j]\n # if (i % 2) == 1 and (j % 2) == 0: b1 = b1 + image_ref[i, j]\n #\n # g1_avg = g1 / pix_count * 4\n # g2_avg = g2 / pix_count * 4\n # r1_avg = r1 / pix_count * 4\n # b1_avg = b1 / pix_count * 4\n #\n # avg = (g1 + g2 + r1 + b1) / pix_count\n #\n # print(g1_avg, g2_avg, r1_avg, b1_avg, avg)\n\n # popravek\n im_p_pop = np.zeros((size_ref[0], size_ref[1]), np.uint16)\n\n\n for i in range(size_ref[0]):\n for j in range(size_ref[1]):\n if (i % 2) == 0 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g1\n if (i % 2) == 1 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_g2\n if (i % 2) == 0 and (j % 2) == 1 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_r1\n if (i % 2) == 1 and (j % 2) == 0 and image_ref[i, j] != 0: im_p_pop[i, j] = image_ref[i, j] * raz_b1\n \n _,_,_,_,P=return_flatfield_set_path(2)\n P_flat=gdal_array.LoadFile(P)\n \n # im_p_pop=simple_flatfield_corr(P_flat, im_p_pop, 2, 1) \n \n # outout\n \n im_p_pop=BLUE_simple_flatfield_corr(P_flat, im_p_pop)\n \n out=os.path.abspath(file)+\"/corr/\"+os.path.basename(file)[:-4] + \"_pop_flat_corr.tif\"\n\n \n # out = in_ref[:-4] + \"_pop_flat_corr.tif\"\n\n driver = gdal.GetDriverByName('GTiff')\n\n # outRaster = driver.Create(out, size[1], size[0], 3, gdal.GDT_UInt16)\n outRaster = driver.Create(out, size_ref[1], size_ref[0], 1, gdal.GDT_UInt16)\n\n outband = outRaster.GetRasterBand(1)\n outband.WriteArray(im_p_pop)\n outband.FlushCache()", "def compute(self,input):\n\n for layer in self.layers:\n input = layer.compute(input)\n return input", "def update_variables(self):\n self.dl21 = self.l21-self.l11; self.dl22 = self.l22-self.l12; self.dl23 = self.l23-self.l13;\n self.kappa1, self.phi1, self.seg_len1 = self.configuration_space(self.l11, self.l12, self.l13, self.d, self.n)\n self.kappa2, self.phi2, self.seg_len2 = self.configuration_space(self.dl21, self.dl22, self.dl23, self.d, self.n)\n # aquire transformation matrices and tips for segment 1 and 2\n self.T01_bishop = self.transformation_matrix_bishop(self.kappa1, self.phi1, self.seg_len1)\n self.T12_bishop = self.transformation_matrix_bishop(self.kappa2, self.phi2, self.seg_len2)\n self.T02_bishop = np.matmul(self.T01_bishop, self.T12_bishop)\n self.T01_frenet = self.transformation_matrix_frenet(self.kappa1, self.phi1, self.seg_len1)\n self.T12_frenet = self.transformation_matrix_frenet(self.kappa2, self.phi2, self.seg_len2)\n self.T02_frenet = np.matmul(self.T01_frenet, self.T12_frenet)\n self.tip_vec1 = np.matmul(self.T01_bishop, self.base)[0:3]\n self.tip_vec2 = np.matmul(self.T02_bishop, self.base)[0:3]\n # Frenet frames\n self.normal_vec_frenet1 = self.T01_frenet[0:3, 0]\n self.binormal_vec_frenet1 = self.T01_frenet[0:3, 1]\n self.tangent_vec_frenet1 = self.T01_frenet[0:3, 2]\n self.normal_vec_frenet2 = self.T02_frenet[0:3, 0]\n self.binormal_vec_frenet2 = self.T02_frenet[0:3, 1]\n self.tangent_vec_frenet2 = self.T02_frenet[0:3, 2]\n # Bishop frames\n self.normal_vec_bishop1 = self.T01_bishop[0:3, 0]\n self.binormal_vec_bishop1 = self.T01_bishop[0:3, 1]\n self.tangent_vec_bishop1 = self.T01_bishop[0:3, 2]\n self.normal_vec_bishop2 = self.T02_bishop[0:3, 0]\n self.binormal_vec_bishop2 = self.T02_bishop[0:3, 1]\n self.tangent_vec_bishop2 = self.T02_bishop[0:3, 2]", "def image_forward(self, general_x, general_y, general_z):\n disti_p = F.pairwise_distance(general_y, general_z, 2)\n disti_n1 = F.pairwise_distance(general_y, general_x, 2)\n disti_n2 = F.pairwise_distance(general_z, general_x, 2)\n target = torch.FloatTensor(disti_p.size()).fill_(1)\n loss_sim_i1 = self.loss_sim_i(disti_p, disti_n1, target)\n loss_sim_i2 = self.loss_sim_i(disti_p, disti_n2, target)\n loss_sim_i = (loss_sim_i1 + loss_sim_i2) / 2.0\n return loss_sim_i", "def _calculate_information_gain(self, cur_state, next_state, next_label):\n n = len(cur_state)\n information_gain_per_action = np.zeros((n, self.action_dim))\n prob_prev = self.classifier.get_class1_prob(obs=cur_state)\n\n for i in range(self.action_dim):\n obs_i = np.copy(next_state)\n obs_i[:, -self.action_dim:] = cur_state[:, -self.action_dim:]\n obs_i[:, - i - 1] = next_state[:, -i - 1]\n\n prob_i = self.classifier.get_class1_prob(obs=obs_i)\n class_1_gain = (prob_i - prob_prev) * next_label[:, 0]\n class_0_gain = (prob_i - prob_prev) * (1 - next_label)[:, 0]\n\n if self.positive_only:\n class_1_gain[class_1_gain < 0] = 0\n class_0_gain[class_0_gain < 0] = 0\n else:\n class_0_gain = - class_0_gain\n\n information_gain_per_action[:, - i - 1] = (class_1_gain + class_0_gain)\n\n return information_gain_per_action", "def extract(self,image_path):#image_path\r\n\r\n img = caffe.io.load_image(image_path)\r\n \r\n #image1=cv2.imread(caffe_root + 'examples/images/cat.jpg') \r\n #img=cv2.cvtColor(image1,cv2.COLOR_BGR2RGB) \r\n #img=img/255. \r\n \r\n\r\n transformed_image = self.transformer.preprocess('data', img)\r\n self.net.blobs['data'].data[...] = transformed_image\r\n ft = self.net.forward()\r\n ft = np.squeeze(ft['pool5/7x7_s1'])\r\n ft = ft / LA.norm(ft)\r\n return ft", "def run(self):\n #self.source_fixed = center_of_mass(self.source_mesh.vs)\n #self.destination_fixed = center_of_mass(self.destination_mesh.vs)\n\n source_high_curvature = get_mesh_of_high_curvature(self.source_mesh)\n destination_high_curvature = get_mesh_of_high_curvature(self.destination_mesh)\n\n self.matrix, error = icp(source_high_curvature.vs, \n destination_high_curvature.vs, \n self.source_fixed, \n self.destination_fixed,\n 300,\n verbose = self.verbose)\n\n self.inverse = np.linalg.inv(self.matrix)\n self.global_confidence = 1 - error\n print self.global_confidence", "def computeIntercepts():\n pass", "def get_cost_updates(self, contraction_level, learning_rate):\r\n\r\n y = self.get_hidden_values(self.x)\r\n z = self.get_reconstructed_input(y)\r\n J = self.get_jacobian(y, self.W)\r\n # note : we sum over the size of a datapoint; if we are using\r\n # minibatches, L will be a vector, with one entry per\r\n # example in minibatch\r\n self.L_rec = - T.sum(self.x * T.log(z) +\r\n (1 - self.x) * T.log(1 - z),\r\n axis=1)\r\n\r\n # Compute the jacobian and average over the number of samples/minibatch\r\n self.L_jacob = T.sum(J ** 2) / self.n_batchsize\r\n\r\n # note : L is now a vector, where each element is the\r\n # cross-entropy cost of the reconstruction of the\r\n # corresponding example of the minibatch. We need to\r\n # compute the average of all these to get the cost of\r\n # the minibatch\r\n cost = T.mean(self.L_rec) + contraction_level * T.mean(self.L_jacob)\r\n\r\n # compute the gradients of the cost of the `cA` with respect\r\n # to its parameters\r\n gparams = T.grad(cost, self.params)\r\n # generate the list of updates\r\n updates = []\r\n for param, gparam in zip(self.params, gparams):\r\n updates.append((param, param - learning_rate * gparam))\r\n\r\n return (cost, updates)", "def model(input_shape, output_dim, num_hidden_units,num_hidden_units_2, num_code_units, filter_size, batch_size=BATCH_SIZE):\n shape = tuple([None]+list(input_shape[1:]))\n print(shape)\n l_in = lasagne.layers.InputLayer(shape=shape)\n\n print(\"Input shape: \",lasagne.layers.get_output_shape(l_in))\n\n # print(shaped_units)\n # shaped_units = shaped_units[0]\n shaped_units = 2800\n\n # print(shape)\n\n l_conv2D_1 = lasagne.layers.Conv2DLayer(\n l_in, \n num_filters=8,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n print(\"Conv 2D shape: \",lasagne.layers.get_output_shape(l_conv2D_1))\n\n l_reshape_1 = lasagne.layers.ReshapeLayer(\n l_conv2D_1,\n shape=(([0], -1))\n )\n\n print(\"Reshape 1 shape: \", lasagne.layers.get_output_shape(l_reshape_1))\n\n l_hidden_1 = lasagne.layers.DenseLayer(\n l_reshape_1,\n num_units= num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 1 shape: \", lasagne.layers.get_output_shape(l_hidden_1))\n\n l_code_layer = lasagne.layers.DenseLayer(\n l_hidden_1,\n num_units=num_code_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Code layer shape: \",lasagne.layers.get_output_shape(l_code_layer))\n\n l_hidden_2 = lasagne.layers.DenseLayer(\n l_code_layer,\n num_units=num_hidden_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 2 shape: \",lasagne.layers.get_output_shape(l_hidden_2))\n\n l_hidden_3 = lasagne.layers.DenseLayer(\n l_hidden_2,\n num_units=shaped_units,\n nonlinearity=lasagne.nonlinearities.rectify,\n )\n\n print(\"Hidden 3 shape: \",lasagne.layers.get_output_shape(l_hidden_3))\n\n l_reshape_2 = lasagne.layers.ReshapeLayer(\n l_hidden_3,\n shape=(([0],8,7,50))\n )\n\n print(\"Reshape 2 shape: \",lasagne.layers.get_output_shape(l_reshape_2))\n\n l_out = lasagne.layers.Conv2DLayer(\n l_reshape_2, \n num_filters=1,\n filter_size=filter_size, \n stride=(1, 1), \n border_mode=\"valid\", \n untie_biases=False, \n nonlinearity=None,\n )\n\n # print(\"Deconv shape: \",lasagne.layers.get_output_shape(l_deconv2D_1))\n\n print(\"Output shape: \",lasagne.layers.get_output_shape(l_out))\n\n return l_out", "def loss(self, z1, z2_con_z1, z3, z1_rec, z2_con_z1_rec, z3_rec):\n pass", "def get_cost_updates(self, contraction_level, learning_rate):\n self.n_batchsize = 20\n y = self.get_hidden_values(self.x)\n z = self.get_reconstructed_input(y)\n J = self.get_jacobian(y, self.W)\n # note : we sum over the size of a datapoint; if we are using\n # minibatches, L will be a vector, with one entry per\n # example in minibatch\n self.L_rec = - T.sum(self.x * T.log(z) +\n (1 - self.x) * T.log(1 - z),\n axis=1)\n\n # Compute the jacobian and average over the number of samples/minibatch\n self.L_jacob = T.sum(J ** 2) / self.n_batchsize\n\n # note : L is now a vector, where each element is the\n # cross-entropy cost of the reconstruction of the\n # corresponding example of the minibatch. We need to\n # compute the average of all these to get the cost of\n # the minibatch\n cost = T.mean(self.L_rec) + contraction_level * T.mean(self.L_jacob)\n\n # compute the gradients of the cost of the `cA` with respect\n # to its parameters\n gparams = T.grad(cost, self.params)\n # generate the list of updates\n updates = []\n for param, gparam in zip(self.params, gparams):\n updates.append((param, param - learning_rate * gparam))\n\n return (cost, updates)", "def compute_metrics(mat,language='English',method ='dimensional',output='data_frame'):\n language = language.lower()\n method = method.lower()\n if language == 'english':\n if method == 'dimensional':\n if output == 'data_frame':\n mat['NegCount'] = mat['DetectCount'] - mat['PosCount']\n mat['MeanNegVal'] = mat['NegVal'] / mat['NegCount']\n mat['MeanPosVal'] = mat['PosVal'] / mat['PosCount']\n mat['MeanArousal'] = mat['Arousal'] / mat['DetectCount']\n mat['MeanDominance'] = mat['Dominance'] / mat['DetectCount']\n mat['PosNegValDifference'] = mat['MeanPosVal'] - mat['MeanNegVal']\n mat['MeanValence'] = (mat['NegVal'] + mat['PosVal'])/ mat['DetectCount'] \n mat['AbsMeanNegVal'] = abs(mat['MeanNegVal'])\n mat['DetectPercent'] = mat['DetectCount'] / mat['TokenCount']\n mat['DensityValence'] =(mat['NegVal'] + mat['PosVal'])/ mat['TokenCount'] \n mat['DensityNegVal'] = mat['NegVal'] / mat['TokenCount']\n mat['DensityPosVal'] = mat['PosVal'] / mat['TokenCount']\n mat['DensityArousal'] = mat['Arousal'] / mat['TokenCount']\n mat['DensityDominance'] = mat['Dominance'] / mat['TokenCount']\n mat['MeanSquaredValence'] = mat['ValSq'] / mat['DetectCount']\n mat['ValenceDeviation'] = np.sqrt(mat['MeanSquaredValence'])\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['PosVal'] = mat[:,:,0]\n out_dict['NegVal'] = mat[:,:,1]\n out_dict['Arousal'] = mat[:,:,2]\n out_dict['Dominance'] = mat[:,:,3]\n out_dict['PosCount'] = mat[:,:,4]\n out_dict['DetectCount'] = mat[:,:,5]\n out_dict['TokenCount'] = mat[:,:,6]\n out_dict['ValSq'] = mat[:,:,7]\n\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'],out_dict['TokenCount'])\n out_dict['NegCount'] = np.subtract(out_dict['DetectCount'],out_dict['PosCount'])\n # Mean Values:\n out_dict['MeanValence'] = np.divide(np.add(out_dict['PosVal'],out_dict['NegVal']),out_dict['DetectCount'])\n out_dict['MeanNegVal'] = np.divide(out_dict['NegVal'],out_dict['NegCount'])\n out_dict['MeanPosVal'] = np.divide(out_dict['PosVal'],out_dict['PosCount'])\n out_dict['MeanArousal'] = np.divide(out_dict['Arousal'],out_dict['DetectCount'])\n out_dict['MeanDominance'] = np.divide(out_dict['Dominance'],out_dict['DetectCount'])\n out_dict['PosNegValDifference'] = np.subtract(out_dict['MeanPosVal'] ,out_dict['MeanNegVal'])\n # Percentages:\n out_dict['DetectPosPercent'] = np.divide(out_dict['PosCount'],out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['PosCount'],out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['NegCount'],out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['NegCount'],out_dict['TokenCount'])\n out_dict['MeanSquaredValence'] = np.divide(out_dict['ValSq'],out_dict['DetectCount'])\n out_dict['ValenceDeviation'] = np.sqrt(out_dict['MeanSquaredValence'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n mat['function_Percent'] = mat.function / mat.TokenCount\n mat['pronoun_Percent'] = mat.pronoun / mat.TokenCount\n mat['ppron_Percent'] = mat.ppron / mat.TokenCount\n mat['i_Percent'] = mat.i / mat.TokenCount\n mat['we_Percent'] = mat.we / mat.TokenCount\n mat['you_Percent'] = mat.you / mat.TokenCount\n mat['shehe_Percent'] = mat.shehe / mat.TokenCount\n mat['they_Percent'] = mat.they / mat.TokenCount\n mat['ipron_Percent'] = mat.ipron / mat.TokenCount\n mat['article_Percent'] = mat.article / mat.TokenCount\n mat['prep_Percent'] = mat.prep / mat.TokenCount\n mat['auxverb_Percent'] = mat.auxverb / mat.TokenCount\n mat['adverb_Percent'] = mat.adverb / mat.TokenCount\n mat['conj_Percent'] = mat.conj / mat.TokenCount\n mat['negate_Percent'] = mat.negate / mat.TokenCount\n mat['verb_Percent'] = mat.verb / mat.TokenCount\n mat['adj_Percent'] = mat.adj / mat.TokenCount\n mat['compare_Percent'] = mat.compare / mat.TokenCount\n mat['interrog_Percent'] = mat.interrog / mat.TokenCount\n mat['number_Percent'] = mat.number / mat.TokenCount\n mat['quant_Percent'] = mat.quant / mat.TokenCount\n mat['affect_Percent'] = mat.affect / mat.TokenCount\n mat['posemo_Percent'] = mat.posemo / mat.TokenCount\n mat['negemo_Percent'] = mat.negemo / mat.TokenCount\n mat['anx_Percent'] = mat.anx / mat.TokenCount\n mat['anger_Percent'] = mat.anger / mat.TokenCount\n mat['sad_Percent'] = mat.sad / mat.TokenCount\n mat['social_Percent'] = mat.social / mat.TokenCount\n mat['family_Percent'] = mat.family / mat.TokenCount\n mat['friend_Percent'] = mat.friend / mat.TokenCount\n mat['female_Percent'] = mat.female / mat.TokenCount\n mat['male_Percent'] = mat.male / mat.TokenCount\n mat['cogproc_Percent'] = mat.cogproc / mat.TokenCount\n mat['insight_Percent'] = mat.insight / mat.TokenCount\n mat['cause_Percent'] = mat.cause / mat.TokenCount\n mat['discrep_Percent'] = mat.discrep / mat.TokenCount\n mat['tentat_Percent'] = mat.tentat / mat.TokenCount\n mat['certain_Percent'] = mat.certain / mat.TokenCount\n mat['differ_Percent'] = mat.differ / mat.TokenCount\n mat['percept_Percent'] = mat.percept / mat.TokenCount\n mat['see_Percent'] = mat.see / mat.TokenCount\n mat['hear_Percent'] = mat.hear / mat.TokenCount\n mat['feel_Percent'] = mat.feel / mat.TokenCount\n mat['bio_Percent'] = mat.bio / mat.TokenCount\n mat['body_Percent'] = mat.body / mat.TokenCount\n mat['health_Percent'] = mat.health / mat.TokenCount\n mat['sexual_Percent'] = mat.sexual / mat.TokenCount\n mat['ingest_Percent'] = mat.ingest / mat.TokenCount\n mat['drives_Percent'] = mat.drives / mat.TokenCount\n mat['affiliation_Percent'] = mat.affiliation / mat.TokenCount\n mat['achieve_Percent'] = mat.achieve / mat.TokenCount\n mat['power_Percent'] = mat.power / mat.TokenCount\n mat['reward_Percent'] = mat.reward / mat.TokenCount\n mat['risk_Percent'] = mat.risk / mat.TokenCount\n mat['focuspast_Percent'] = mat.focuspast / mat.TokenCount\n mat['focuspresent_Percent'] = mat.focuspresent / mat.TokenCount\n mat['focusfuture_Percent'] = mat.focusfuture / mat.TokenCount\n mat['relativ_Percent'] = mat.relativ / mat.TokenCount\n mat['motion_Percent'] = mat.motion / mat.TokenCount\n mat['space_Percent'] = mat.space / mat.TokenCount\n mat['time_Percent'] = mat.time / mat.TokenCount\n mat['work_Percent'] = mat.work / mat.TokenCount\n mat['leisure_Percent'] = mat.leisure / mat.TokenCount\n mat['home_Percent'] = mat.home / mat.TokenCount\n mat['money_Percent'] = mat.money / mat.TokenCount\n mat['relig_Percent'] = mat.relig / mat.TokenCount\n mat['death_Percent'] = mat.death / mat.TokenCount\n mat['informal_Percent'] = mat.informal / mat.TokenCount\n mat['swear_Percent'] = mat.swear / mat.TokenCount\n mat['netspeak_Percent'] = mat.netspeak / mat.TokenCount\n mat['assent_Percent'] = mat.assent / mat.TokenCount\n mat['nonflu_Percent'] = mat.nonflu / mat.TokenCount\n mat['filler_Percent'] = mat.filler / mat.TokenCount\n mat['Detect_Percent'] = mat.DetectCount / mat.TokenCount\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,21]\n out_dict['Posemo'] = mat[:,:,22]\n out_dict['Negemo'] = mat[:,:,23]\n out_dict['Anx'] = mat[:,:,24]\n out_dict['Anger'] = mat[:,:,25]\n out_dict['Sad'] = mat[:,:,26]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,32]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n\n\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\")\n elif language == 'german':\n if method == 'dimensional':\n if output == 'data_frame':\n mat['NegCount'] = mat['DetectCount'] - mat['PosCount']\n mat['MeanNegVal'] = mat['NegVal'] / mat['NegCount']\n mat['MeanPosVal'] = mat['PosVal'] / mat['PosCount']\n mat['MeanArousal'] = mat['Arousal'] / mat['DetectCount']\n mat['MeanDominance'] = mat['Dominance'] / mat['DetectCount']\n mat['MeanPotency'] = mat['Potency'] / mat['DetectCount']\n mat['PosNegValDifference'] = mat['MeanPosVal'] - mat['MeanNegVal']\n mat['MeanValence'] = (mat['NegVal'] + mat['PosVal'])/ mat['DetectCount'] \n mat['AbsMeanNegVal'] = abs(mat['MeanNegVal'])\n mat['DetectPercent'] = mat['DetectCount'] / mat['TokenCount']\n mat['DensityValence'] =(mat['NegVal'] + mat['PosVal'])/ mat['TokenCount'] \n mat['DensityNegVal'] = mat['NegVal'] / mat['TokenCount']\n mat['DensityPosVal'] = mat['PosVal'] / mat['TokenCount']\n mat['DensityArousal'] = mat['Arousal'] / mat['TokenCount']\n mat['DensityDominance'] = mat['Dominance'] / mat['TokenCount']\n mat['MeanSquaredValence'] = mat['ValSq'] / mat['DetectCount']\n mat['ValenceDeviation'] = np.sqrt(mat['MeanSquaredValence'])\n return(mat)\n elif output == 'array':\n out_dict = {}\n out_dict['PosVal'] = mat[:,:,0]\n out_dict['NegVal'] = mat[:,:,1]\n out_dict['Arousal'] = mat[:,:,2]\n out_dict['Dominance'] = mat[:,:,3]\n out_dict['PosCount'] = mat[:,:,4]\n out_dict['DetectCount'] = mat[:,:,5]\n out_dict['Imagine'] = mat[:,:,6]\n out_dict['Potency'] = mat[:,:,7]\n out_dict['DomPot_Count'] = mat[:,:,8]\n out_dict['TokenCount'] = mat[:,:,9]\n out_dict['ValSq'] = mat[:,:,10]\n\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'],out_dict['TokenCount'])\n out_dict['NegCount'] = np.subtract(out_dict['DetectCount'],out_dict['PosCount'])\n # Mean Values:\n out_dict['MeanValence'] = np.divide(np.add(out_dict['PosVal'],out_dict['NegVal']),out_dict['DetectCount'])\n out_dict['MeanNegVal'] = np.divide(out_dict['NegVal'],out_dict['NegCount'])\n out_dict['MeanPosVal'] = np.divide(out_dict['PosVal'],out_dict['PosCount'])\n out_dict['MeanArousal'] = np.divide(out_dict['Arousal'],out_dict['DetectCount'])\n out_dict['MeanDominance'] = np.divide(out_dict['Dominance'],out_dict['DomPot_Count'])\n out_dict['MeanPotency'] = np.divide(out_dict['Potency'],out_dict['DomPot_Count'])\n out_dict['PosNegValDifference'] = np.subtract(out_dict['MeanPosVal'] ,out_dict['MeanNegVal'])\n # Percentages:\n out_dict['DetectPosPercent'] = np.divide(out_dict['PosCount'],out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['PosCount'],out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['NegCount'],out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['NegCount'],out_dict['TokenCount'])\n out_dict['MeanSquaredValence'] = np.divide(out_dict['ValSq'],out_dict['DetectCount'])\n out_dict['ValenceDeviation'] = np.sqrt(out_dict['MeanSquaredValence'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n mat['Pronoun_Percent'] = mat.Pronoun / mat.TokenCount\n mat['I_Percent'] = mat.I / mat.TokenCount\n mat['We_Percent'] = mat.We / mat.TokenCount\n mat['Self_Percent'] = mat.Self / mat.TokenCount\n mat['You_Percent'] = mat.You / mat.TokenCount\n mat['Other_Percent'] = mat.Other / mat.TokenCount\n mat['Negate_Percent'] = mat.Negate / mat.TokenCount\n mat['Assent_Percent'] = mat.Assent / mat.TokenCount\n mat['Article_Percent'] = mat.Article / mat.TokenCount\n mat['Preps_Percent'] = mat.Preps / mat.TokenCount\n mat['Number_Percent'] = mat.Number / mat.TokenCount\n mat['Affect_Percent'] = mat.Affect / mat.TokenCount\n mat['Posemo_Percent'] = mat.Posemo / mat.TokenCount\n mat['Posfeel_Percent'] = mat.Posfeel / mat.TokenCount\n mat['Optim_Percent'] = mat.Optim / mat.TokenCount\n mat['Negemo_Percent'] = mat.Negemo / mat.TokenCount\n mat['Anx_Percent'] = mat.Anx / mat.TokenCount\n mat['Anger_Percent'] = mat.Anger / mat.TokenCount\n mat['Sad_Percent'] = mat.Sad / mat.TokenCount\n mat['Cogmech_Percent'] = mat.Cogmech / mat.TokenCount\n mat['Cause_Percent'] = mat.Cause / mat.TokenCount\n mat['Insight_Percent'] = mat.Insight / mat.TokenCount\n mat['Discrep_Percent'] = mat.Discrep / mat.TokenCount\n mat['Inhib_Percent'] = mat.Inhib / mat.TokenCount\n mat['Tentat_Percent'] = mat.Tentat / mat.TokenCount\n mat['Certain_Percent'] = mat.Certain / mat.TokenCount\n mat['Senses_Percent'] = mat.Senses / mat.TokenCount\n mat['See_Percent'] = mat.See / mat.TokenCount\n mat['Hear_Percent'] = mat.Hear / mat.TokenCount\n mat['Feel_Percent'] = mat.Feel / mat.TokenCount\n mat['Social_Percent'] = mat.Social / mat.TokenCount\n mat['Comm_Percent'] = mat.Comm / mat.TokenCount\n mat['Othref_Percent'] = mat.Othref / mat.TokenCount\n mat['Friends_Percent'] = mat.Friends / mat.TokenCount\n mat['Family_Percent'] = mat.Family / mat.TokenCount\n mat['Humans_Percent'] = mat.Humans / mat.TokenCount\n mat['Time_Percent'] = mat.Time / mat.TokenCount\n mat['Past_Percent'] = mat.Past / mat.TokenCount\n mat['Present_Percent'] = mat.Present / mat.TokenCount\n mat['Future_Percent'] = mat.Future / mat.TokenCount\n mat['Space_Percent'] = mat.Space / mat.TokenCount\n mat['Up_Percent'] = mat.Up / mat.TokenCount\n mat['Down_Percent'] = mat.Down / mat.TokenCount\n mat['Incl_Percent'] = mat.Incl / mat.TokenCount\n mat['Excl_Percent'] = mat.Excl / mat.TokenCount\n mat['Motion_Percent'] = mat.Motion / mat.TokenCount\n mat['Occup_Percent'] = mat.Occup / mat.TokenCount\n mat['School_Percent'] = mat.School / mat.TokenCount\n mat['Job_Percent'] = mat.Job / mat.TokenCount\n mat['Achieve_Percent'] = mat.Achieve / mat.TokenCount\n mat['Leisure_Percent'] = mat.Leisure / mat.TokenCount\n mat['Home_Percent'] = mat.Home / mat.TokenCount\n mat['Sports_Percent'] = mat.Sports / mat.TokenCount\n mat['TV_Percent'] = mat.TV / mat.TokenCount\n mat['Music_Percent'] = mat.Music / mat.TokenCount\n mat['Money_Percent'] = mat.Money / mat.TokenCount\n mat['Metaph_Percent'] = mat.Metaph / mat.TokenCount\n mat['Relig_Percent'] = mat.Relig / mat.TokenCount\n mat['Death_Percent'] = mat.Death / mat.TokenCount\n mat['Physcal_Percent'] = mat.Physcal / mat.TokenCount\n mat['Body_Percent'] = mat.Body / mat.TokenCount\n mat['Sexual_Percent'] = mat.Sexual / mat.TokenCount\n mat['Eating_Percent'] = mat.Eating / mat.TokenCount\n mat['Sleep_Percent'] = mat.Sleep / mat.TokenCount\n mat['Groom_Percent'] = mat.Groom / mat.TokenCount\n mat['Swear_Percent'] = mat.Swear / mat.TokenCount\n mat['Nonfl_Percent'] = mat.Nonfl / mat.TokenCount\n mat['Fillers_Percent'] = mat.Fillers / mat.TokenCount\n mat['Swiss_Percent'] = mat.Swiss / mat.TokenCount\n mat['Ideo_Percent'] = mat.Ideo / mat.TokenCount\n mat['Personalpronomina_Percent'] = mat.Personalpronomina / mat.TokenCount\n mat['Indefinitpronomina_Percent'] = mat.Indefinitpronomina / mat.TokenCount\n mat['AuxiliaryVerbs_Percent'] = mat.AuxiliaryVerbs / mat.TokenCount\n mat['Konjunktionen_Percent'] = mat.Konjunktionen / mat.TokenCount\n mat['Adverbien_Percent'] = mat.Adverbien / mat.TokenCount\n mat['Detect_Percent'] = mat.LIWC_Counter / mat.TokenCount\n mat['Bedrohung_Percent'] = mat.Bedrohung / mat.TokenCount\n return(mat)\n\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,11]\n out_dict['Posemo'] = mat[:,:,12]\n out_dict['Posfeel'] = mat[:,:,13]\n out_dict['Optim'] = mat[:,:,14]\n out_dict['Negemo'] = mat[:,:,15]\n out_dict['Anx'] = mat[:,:,16]\n out_dict['Anger'] = mat[:,:,17]\n out_dict['Sad'] = mat[:,:,18]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,32]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectPosfeelPercent'] = np.divide(out_dict['Posfeel'], out_dict['DetectCount'])\n out_dict['OverallPosfeelPercent'] = np.divide(out_dict['Posfeel'], out_dict['TokenCount'])\n out_dict['DetectOptimPercent'] = np.divide(out_dict['Optim'], out_dict['DetectCount'])\n out_dict['OverallOptimPercent'] = np.divide(out_dict['Optim'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\") \n elif language == 'chinese':\n if method == 'dimensional':\n if output == 'data_frame':\n print(\"Error: This combination doesn't exist yet!\")\n elif output == 'array':\n print(\"Error: This combination doesn't exist yet!\")\n else:\n print(\"Error: Output Format not found!\")\n elif method == 'discrete':\n if output == 'data_frame':\n print(\"Error: This combination doesn't exist yet!\")\n elif output == 'array':\n out_dict = {}\n out_dict['Affect'] = mat[:,:,30]\n out_dict['Posemo'] = mat[:,:,31]\n out_dict['Negemo'] = mat[:,:,32]\n out_dict['Anx'] = mat[:,:,33]\n out_dict['Anger'] = mat[:,:,34]\n out_dict['Sad'] = mat[:,:,35]\n out_dict['Function'] = mat[:,:,0]\n out_dict['CogProc'] = mat[:,:,41]\n out_dict['DetectCount'] = mat[:,:,-2]\n out_dict['TokenCount'] = mat[:,:,-1]\n\n out_dict['DetectPosPercent'] = np.divide(out_dict['Posemo'], out_dict['DetectCount'])\n out_dict['OverallPosPercent'] = np.divide(out_dict['Posemo'], out_dict['TokenCount'])\n out_dict['DetectNegPercent'] = np.divide(out_dict['Negemo'], out_dict['DetectCount'])\n out_dict['OverallNegPercent'] = np.divide(out_dict['Negemo'], out_dict['TokenCount'])\n out_dict['EmoPosPercent'] = np.divide(out_dict['Posemo'],np.add(out_dict['Posemo'],out_dict['Negemo']))\n out_dict['DetectAnxPercent'] = np.divide(out_dict['Anx'], out_dict['DetectCount'])\n out_dict['OverallAnxPercent'] = np.divide(out_dict['Anx'], out_dict['TokenCount'])\n out_dict['DetectAngerPercent'] = np.divide(out_dict['Anger'], out_dict['DetectCount'])\n out_dict['OverallAngerPercent'] = np.divide(out_dict['Anger'], out_dict['TokenCount'])\n out_dict['DetectSadPercent'] = np.divide(out_dict['Sad'], out_dict['DetectCount'])\n out_dict['OverallSadPercent'] = np.divide(out_dict['Sad'], out_dict['TokenCount'])\n out_dict['DetectAffectPercent'] = np.divide(out_dict['Affect'], out_dict['DetectCount'])\n out_dict['OverallAffectPercent'] = np.divide(out_dict['Affect'], out_dict['TokenCount'])\n out_dict['DetectPercent'] = np.divide(out_dict['DetectCount'], out_dict['TokenCount'])\n\n out_dict['DetectFunctionPercent'] = np.divide(out_dict['Function'], out_dict['DetectCount'])\n out_dict['OverallFunctionPercent'] = np.divide(out_dict['Function'], out_dict['TokenCount'])\n out_dict['DetectCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['DetectCount'])\n out_dict['OverallCogprocPercent'] = np.divide(out_dict['CogProc'], out_dict['TokenCount'])\n return(out_dict)\n else:\n print(\"Error: Output Format not found!\") \n else:\n print(\"Error: Method not found!\") \n else:\n print(\"Error: Language not found!\")", "def run_all_layers(self, img): # noqa\n s1_outputs = [s1(img) for s1 in self.s1_units]\n\n # Each C1 layer pools across two S1 layers\n c1_outputs = []\n for c1, i in zip(self.c1_units, range(0, len(self.s1_units), 2)):\n c1_outputs.append(c1(s1_outputs[i:i+2]))\n\n s2_outputs = [s2(c1_outputs) for s2 in self.s2_units]\n c2_outputs = [c2(s2) for c2, s2 in zip(self.c2_units, s2_outputs)]\n\n return s1_outputs, c1_outputs, s2_outputs, c2_outputs", "def _derive_transformation_matrices(self):\n\n if hasattr(self, '_primaries') and hasattr(self, '_whitepoint'):\n if self._primaries is not None and self._whitepoint is not None:\n npm = normalised_primary_matrix(self._primaries,\n self._whitepoint)\n\n self._derived_RGB_to_XYZ_matrix = npm\n self._derived_XYZ_to_RGB_matrix = np.linalg.inv(npm)", "def calc_CC_operation(el_output_from_GT_W, GT_size_W, fuel_type, T_sup_K):\n\n (eta0, m0_exhaust_GT_kgpers) = calc_GT_operation_fullload(GT_size_W, fuel_type)\n (eta, m_exhaust_GT_kgpers, T_exhaust_GT_K, m_fuel_kgpers) = calc_GT_operation_partload(el_output_from_GT_W,\n GT_size_W, eta0,\n m0_exhaust_GT_kgpers,\n fuel_type)\n\n (q_output_ST_W, el_output_ST_W) = calc_ST_operation(m_exhaust_GT_kgpers, T_exhaust_GT_K, T_sup_K, fuel_type)\n\n LHV = LHV_NG if fuel_type == 'NG' else LHV_BG # read LHV of NG or BG\n\n eta_el = (el_output_from_GT_W + el_output_ST_W) / (m_fuel_kgpers * LHV)\n eta_thermal = q_output_ST_W / (m_fuel_kgpers * LHV)\n eta_total = eta_el + eta_thermal\n el_output_W = el_output_ST_W + el_output_from_GT_W\n\n return {'el_output_W': el_output_W, 'q_output_ST_W': q_output_ST_W, 'eta_el': eta_el, 'eta_thermal': eta_thermal,\n 'eta_total': eta_total}", "def coeff_display_M202(Nstar=1,seeing=[0.9,0.,0.],npix=npix,zenith=0,filter='r', theta=0., phi=0,corrector='corrector',x=0.,y=0.,z=0.,zernike_max_order=20,regular=False):\n hdu = genImgVallCCD(Nstar=Nstar,seeing=seeing,npix=npix,zenith=zenith,filter=filter, theta=theta,phi=phi, corrector=corrector,x=x,y=y,z=z,regular=regular)\n nn = len(hdu)\n data = []\n colnames = ['x','y','M20','M22','M31','M33']\n for hdui in hdu[1:]:\n Nobj = hdui.data.shape[0]\n M20=np.zeros(Nobj)\n M22=np.zeros(Nobj).astype(complex)\n M31=np.zeros(Nobj).astype(complex)\n M33=np.zeros(Nobj).astype(complex)\n for i in range(Nobj):\n img = hdui.data[i][4:].reshape(npix,npix)\n img = rebin(img,(40,40))\n M20,M22,M31,M33=complexMoments(data=img,sigma=4.)\n x=hdui.header['ccdXcen']\n y=hdui.header['ccdYcen']\n data.append([x,y,np.median(M20), np.median(M22), np.median(M31), np.median(M33)])\n data=np.array(data) \n betaAll=[]\n betaErrAll=[]\n R2adjAll=[]\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,2].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n for i in range(3,6):\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].real,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n beta,betaErr,R2_adj,fitted = zernikeFit(data[:,0].real,data[:,1].real,data[:,i].imag,max_order=zernike_max_order)\n betaAll.append(beta)\n betaErrAll.append(betaErr)\n R2adjAll.append(R2_adj)\n betaAll = np.array(betaAll)\n betaErrAll = np.array(betaErrAll)\n R2adjAll = np.array(R2adjAll)\n ind = np.arange(len(betaAll[0]))\n momname = ('M20','M22.Real','M22.imag','M31.real','M31.imag','M33.real','M33.imag')\n fmtarr = ['bo-','ro-','go-','co-','mo-','yo-','ko-']\n pl.figure(figsize=(17,7))\n for i in range(3):\n pl.subplot(4,1,i+1)\n pl.errorbar(ind[1:],betaAll[i][1:],yerr = betaErrAll[i][1:],fmt=fmtarr[i])\n if i == 0:\n pl.title('x: '+str(hdu[0].header['x'])+' y: '+str(hdu[0].header['y'])+' z: '+str(hdu[0].header['z'])+' tilt: '+str(hdu[0].header['theta'])+' fwhm: '+str(hdu[0].header['s_fwhm'])+' e1: '+str(hdu[0].header['e1'])+' e2: '+str(hdu[0].header['e2']))\n pl.grid()\n pl.xlim(-1,len(betaAll[i])+1)\n pl.ylim(min(betaAll[i][1:])-0.5,max(betaAll[i][1:])+0.5)\n #pl.ylim(-0.1,0.1)\n pl.xticks(ind,('','','','','','','','','','','','','','','','','','','',''))\n pl.ylabel(momname[i])\n pl.xticks(ind,('Piston','Tip','Tilt','Astignism','Defocus','Astignism','Trefoil','Coma','Coma','Trefoil','Ashtray','Astigm.5th','Spherical','Astigm.5th','Ashtray','16','17','18','19','20'),rotation=90)\n pl.xlabel('Zernike Coefficients')\n return betaAll,betaErrAll", "def residual_net_old(total_depth, data_layer_params, num_classes = 1000, acclayer = True):\n # figure out network structure\n net_defs = {\n 18:([2, 2, 2, 2], \"standard\"),\n 34:([3, 4, 6, 3], \"standard\"),\n 50:([3, 4, 6, 3], \"bottleneck\"),\n 101:([3, 4, 23, 3], \"bottleneck\"),\n 152:([3, 8, 36, 3], \"bottleneck\"),\n }\n assert total_depth in net_defs.keys(), \"net of depth:{} not defined\".format(total_depth)\n\n nunits_list, unit_type = net_defs[total_depth] # nunits_list a list of integers indicating the number of layers in each depth.\n nouts = [64, 128, 256, 512] # same for all nets\n\n # setup the first couple of layers\n n = caffe.NetSpec()\n n.data, n.label = L.Python(module = 'beijbom_caffe_data_layers', layer = 'ImageNetDataLayer',\n ntop = 2, param_str=str(data_layer_params))\n n.conv1, n.bn1, n.lrn1 = conv_bn(n.data, ks = 7, stride = 2, nout = 64, pad = 3)\n n.relu1 = L.ReLU(n.lrn1, in_place=True)\n n.pool1 = L.Pooling(n.relu1, stride = 2, kernel_size = 3)\n \n # make the convolutional body\n for nout, nunits in zip(nouts, nunits_list): # for each depth and nunits\n for unit in range(1, nunits + 1): # for each unit. Enumerate from 1.\n s = str(nout) + '_' + str(unit) + '_' # layer name prefix\n if unit_type == \"standard\":\n residual_standard_unit_old(n, nout, s, newdepth = unit is 1 and nout > 64)\n else:\n residual_bottleneck_unit_old(n, nout, s, newdepth = unit is 1)\n \n # add the end layers \n n.global_pool = L.Pooling(n.__dict__['tops'][n.__dict__['tops'].keys()[-1]], pooling_param = dict(pool = 1, global_pooling = True))\n n.score = L.InnerProduct(n.global_pool, num_output = num_classes,\n param=[dict(lr_mult=1, decay_mult=1), dict(lr_mult=2, decay_mult=0)])\n n.loss = L.SoftmaxWithLoss(n.score, n.label)\n if acclayer:\n n.accuracy = L.Accuracy(n.score, n.label)\n\n return n", "def main():\n conf_matrix1 = one_vs_all()\n conf_matrix2 = all_vs_all()\n results = my_info() + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix1)) + '\\t\\t'\n results += np.array_str(np.diagonal(conf_matrix2))\n print results + '\\t\\t'\n\n # sum = 0\n #\n # for i in range(len(conf_matrix1)):\n # sum += conf_matrix1[i][i]\n #\n # print \"One-vs-All corecct classifications: \", sum\n #\n # sum = 0\n #\n # for i in range(len(conf_matrix2)):\n # sum += conf_matrix2[i][i]\n #\n # print \"All-vs-All correct classificatinos: \", sum\n\n #print(\"onevsall\")\n #print_latex_table(conf_matrix1)\n #print(\"allvsall\")\n #print_latex_table(conf_matrix2)", "def _final_aggregation(means_x: Tensor, means_y: Tensor, vars_x: Tensor, vars_y: Tensor, corrs_xy: Tensor, nbs: Tensor) ->Tuple[Tensor, Tensor, Tensor, Tensor, Tensor, Tensor]:\n mx1, my1, vx1, vy1, cxy1, n1 = means_x[0], means_y[0], vars_x[0], vars_y[0], corrs_xy[0], nbs[0]\n for i in range(1, len(means_x)):\n mx2, my2, vx2, vy2, cxy2, n2 = means_x[i], means_y[i], vars_x[i], vars_y[i], corrs_xy[i], nbs[i]\n nb = n1 + n2\n mean_x = (n1 * mx1 + n2 * mx2) / nb\n mean_y = (n1 * my1 + n2 * my2) / nb\n element_x1 = (n1 + 1) * mean_x - n1 * mx1\n vx1 += (element_x1 - mx1) * (element_x1 - mean_x) - (element_x1 - mean_x) ** 2\n element_x2 = (n2 + 1) * mean_x - n2 * mx2\n vx2 += (element_x2 - mx2) * (element_x2 - mean_x) - (element_x2 - mean_x) ** 2\n var_x = vx1 + vx2\n element_y1 = (n1 + 1) * mean_y - n1 * my1\n vy1 += (element_y1 - my1) * (element_y1 - mean_y) - (element_y1 - mean_y) ** 2\n element_y2 = (n2 + 1) * mean_y - n2 * my2\n vy2 += (element_y2 - my2) * (element_y2 - mean_y) - (element_y2 - mean_y) ** 2\n var_y = vy1 + vy2\n cxy1 += (element_x1 - mx1) * (element_y1 - mean_y) - (element_x1 - mean_x) * (element_y1 - mean_y)\n cxy2 += (element_x2 - mx2) * (element_y2 - mean_y) - (element_x2 - mean_x) * (element_y2 - mean_y)\n corr_xy = cxy1 + cxy2\n mx1, my1, vx1, vy1, cxy1, n1 = mean_x, mean_y, var_x, var_y, corr_xy, nb\n return mean_x, mean_y, var_x, var_y, corr_xy, nb", "def __call__(self, affine, representations_list, aatype):\n act = [\n common_modules.Linear( # pylint: disable=g-complex-comprehension\n self.config.num_channel,\n name='input_projection')(jax.nn.relu(x))\n for x in representations_list\n ]\n # Sum the activation list (equivalent to concat then Linear).\n act = sum(act)\n\n final_init = 'zeros' if self.global_config.zero_init else 'linear'\n\n # Mapping with some residual blocks.\n for _ in range(self.config.num_residual_block):\n old_act = act\n act = common_modules.Linear(\n self.config.num_channel,\n initializer='relu',\n name='resblock1')(\n jax.nn.relu(act))\n act = common_modules.Linear(\n self.config.num_channel,\n initializer=final_init,\n name='resblock2')(\n jax.nn.relu(act))\n act += old_act\n\n # Map activations to torsion angles. Shape: (num_res, 14).\n num_res = act.shape[0]\n unnormalized_angles = common_modules.Linear(\n 14, name='unnormalized_angles')(\n jax.nn.relu(act))\n unnormalized_angles = jnp.reshape(\n unnormalized_angles, [num_res, 7, 2])\n angles = l2_normalize(unnormalized_angles, axis=-1)\n\n outputs = {\n 'angles_sin_cos': angles, # jnp.ndarray (N, 7, 2)\n 'unnormalized_angles_sin_cos':\n unnormalized_angles, # jnp.ndarray (N, 7, 2)\n }\n\n # Map torsion angles to frames.\n backb_to_global = r3.rigids_from_quataffine(affine)\n\n # Jumper et al. (2021) Suppl. Alg. 24 \"computeAllAtomCoordinates\"\n\n # r3.Rigids with shape (N, 8).\n all_frames_to_global = all_atom.torsion_angles_to_frames(\n aatype,\n backb_to_global,\n angles)\n\n # Use frames and literature positions to create the final atom coordinates.\n # r3.Vecs with shape (N, 14).\n pred_positions = all_atom.frames_and_literature_positions_to_atom14_pos(\n aatype, all_frames_to_global)\n\n outputs.update({\n 'atom_pos': pred_positions, # r3.Vecs (N, 14)\n 'frames': all_frames_to_global, # r3.Rigids (N, 8)\n })\n return outputs", "def forward_propagate(self):\n for i in range(0, len(self.output_layer)):\n output = 0\n\n # Loop through each Neuron in the hidden layer\n for neuron in self.hidden_layer:\n output += neuron.weights[i] * neuron.output\n\n # Update summation for output classifier\n self.output_layer[i] = output", "def _inexact_alm_l1(imgflt_stack,options):\n # Get basic image information and reshape input\n img_width = imgflt_stack.shape[0]\n img_height = imgflt_stack.shape[1]\n img_size = img_width* img_height\n img_3d = imgflt_stack.shape[2]\n imgflt_stack = np.reshape(imgflt_stack,(img_size, img_3d))\n options['weight'] = np.reshape(options['weight'],imgflt_stack.shape)\n\n # Matrix normalization factor\n temp = np.linalg.svd(imgflt_stack,full_matrices=False,compute_uv=False)\n norm_two = np.float64(temp[0])\n del temp\n\n # A is a low rank matrix that is being solved for\n A = np.zeros(imgflt_stack.shape,dtype=np.float64)\n A_coeff = np.ones((1, img_3d),dtype=np.float64) # per image scaling coefficient, accounts for things like photobleaching\n A_offset = np.zeros((img_size,1),dtype=np.float64) # offset per pixel across all images\n\n # E1 is the additive error. Since the goal is determining the background signal, this is the real signal at each pixel\n E1 = np.zeros(imgflt_stack.shape,dtype=np.float64)\n\n # Normalization factors\n ent1 = np.float64(1) # flatfield normalization\n ent2 = np.float64(10) # darkfield normalization\n\n # Weights\n weight_upd = _dct2(np.mean(np.reshape(A,(img_width, img_height, img_3d)),2))\n\n # Initialize gradient and weight normalization factors\n Y1 = np.float64(0)\n mu = np.float64(12.5)/norm_two\n mu_bar = mu * 10**7\n rho = np.float64(1.5)\n\n # Frobenius norm\n d_norm = np.linalg.norm(imgflt_stack,'fro')\n\n # Darkfield upper limit and offset\n B1_uplimit = np.min(imgflt_stack)\n B1_offset = np.float64(0)\n\n # Perform optimization\n iternum = 0\n converged = False\n while not converged:\n iternum += 1\n\n # Calculate the flatfield using existing weights, coefficients, and offsets\n W_idct_hat = _idct2(weight_upd)\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n temp_W = np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n\n # Update the weights\n temp_W = np.reshape(temp_W,(img_width, img_height, img_3d))\n temp_W = np.mean(temp_W,2)\n weight_upd = weight_upd + _dct2(temp_W)\n weight_upd = np.max(np.reshape(weight_upd - options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0) + np.min(np.reshape(weight_upd + options['lambda']/(ent1*mu),(img_width, img_height,1)),-1,initial=0)\n W_idct_hat = _idct2(weight_upd)\n\n # Calculate the flatfield using updated weights\n A = np.matmul(np.reshape(W_idct_hat,(img_size,1)),A_coeff) + A_offset\n\n # Determine the error\n E1 = E1 + np.divide(imgflt_stack - A - E1 + np.multiply(1/mu,Y1),ent1)\n E1 = np.max(np.reshape(E1 - options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0) + np.min(np.reshape(E1 + options['weight']/(ent1*mu),(img_size, img_3d,1)),-1,initial=0)\n\n # Calculate the flatfield coefficients by subtracting the errors from the original data\n R1 = imgflt_stack-E1\n A_coeff = np.reshape(np.mean(R1,0)/np.mean(R1),(1, img_3d))\n A_coeff[A_coeff<0] = 0 # pixel values should never be negative\n\n # Calculate the darkfield component if specified by the user\n if options['darkfield']:\n # Get images with predominantly background pixels\n validA1coeff_idx = np.argwhere(A_coeff<1)[:,1]\n R1_upper = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1)).astype(np.float64)>(np.float64(np.mean(W_idct_hat))-np.float64(10**-5)))[:,0],:]\n R1_upper = np.mean(R1_upper[:,validA1coeff_idx],0)\n R1_lower = R1[np.argwhere(np.reshape(W_idct_hat,(-1,1))<np.mean(W_idct_hat)+np.float64(10**-5))[:,0],:]\n R1_lower = np.mean(R1_lower[:,validA1coeff_idx],0)\n B1_coeff = (R1_upper-R1_lower)/np.mean(R1)\n k = validA1coeff_idx.size\n\n # Calculate the darkfield offset\n temp1 = np.sum(np.square(A_coeff[0,validA1coeff_idx]))\n temp2 = np.sum(A_coeff[0,validA1coeff_idx])\n temp3 = np.sum(B1_coeff)\n temp4 = np.sum(A_coeff[0,validA1coeff_idx]*B1_coeff)\n temp5 = temp2 * temp3 - k*temp4\n if temp5 == 0:\n B1_offset = np.float64(0)\n else:\n B1_offset = (temp1*temp3-temp2*temp4)/temp5\n B1_offset = np.max(B1_offset,initial=0)\n B1_offset = np.min(B1_offset,initial=B1_uplimit/(np.mean(W_idct_hat)+10**-7))\n B_offset = B1_offset * np.mean(W_idct_hat) - B1_offset*np.reshape(W_idct_hat,(-1,1))\n\n # Calculate darkfield\n A1_offset = np.reshape(np.mean(R1[:,validA1coeff_idx],1),(-1,1)) - np.mean(A_coeff[0,validA1coeff_idx]) * np.reshape(W_idct_hat,(-1,1))\n A1_offset = A1_offset - np.mean(A1_offset)\n A_offset = A1_offset - np.mean(A1_offset) - B_offset\n\n # Update darkfield weights\n W_offset = _dct2(np.reshape(A_offset,(img_width, img_height)))\n W_offset = np.max(np.reshape(W_offset - options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0) \\\n + np.min(np.reshape(W_offset + options['lambda_darkfield']/(ent2*mu),(img_width, img_height,1)),-1,initial=0)\n\n # Calculate darkfield based on updated weights\n A_offset = _idct2(W_offset)\n A_offset = np.reshape(A_offset,(-1,1))\n A_offset = np.max(np.reshape(A_offset - options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0) \\\n + np.min(np.reshape(A_offset + options['lambda_darkfield']/(ent2*mu),(A_offset.shape[0],A_offset.shape[1],1)),-1,initial=0)\n A_offset = A_offset + B_offset\n\n # Loss\n Z1 = imgflt_stack - A - E1\n\n # Update weight regularization term\n Y1 = Y1 + mu*Z1\n\n # Update learning rate\n mu = np.min(mu*rho,initial=mu_bar)\n\n # Stop if loss is below threshold\n stopCriterion = np.linalg.norm(Z1,ord='fro')/d_norm\n if stopCriterion < options['optimization_tol'] or iternum > options['max_iterations']:\n converged = True\n\n # Calculate final darkfield image\n A_offset = A_offset + B1_offset * np.reshape(W_idct_hat,(-1,1))\n\n return A,E1,A_offset", "def reconstruct(X_input, X_mean, X_std, Y, P, e_scaled, x_col = 0, y_col = 1, dimensions = [0, 1, 2, 3]):\n\t# Reconstruction degrees information retention (~25%, ~50%, ~75%, and ~100%).\n\tfor d in dimensions:\n\t\t# Reconstruct \n\t\tY_proj = Y.iloc[:,0:(d + 1)]\n\t\tX_rec = (Y_proj @ P.iloc[:,0:(d + 1)].T) * X_std + X_mean\n\t\tX_rec.columns = X_input.columns\n\n\t\t# Cumulate percentage information retained\n\t\tdata_retained = e_scaled[range(d + 1)].sum() * 100\n\n\t\tml.plt.figure()\n\t\tml.plt.title(f'Raw vs. Reconstructed D = {d + 1}')\n\t\tml.sns.scatterplot(data = X_input, x = X_input.iloc[:, x_col], y = X_input.iloc[:, y_col], alpha = 0.5, color = 'k', label = 'Raw Data (100%)')\n\t\tml.sns.scatterplot(data = X_rec, x = X_rec.iloc[:, x_col], y = X_rec.iloc[:, y_col], alpha = 0.5, color = 'r', label = f'Reconstructed Data ({data_retained: .2f}%)')", "def connector_mediation(task):\n\tatlas = 'power'\n\tproject='hcp'\n\tknown_membership,network_names,num_nodes,name_int_dict = network_labels(atlas)\n\tsubjects = np.load('%s/dynamic_mod/results/%s_%s_%s_subs_fz.npy' %(homedir,'hcp',task,atlas))\n\tstatic_results = graph_metrics(subjects,task,atlas,run_version='fz')\n\tmatrices = static_results['matrices']\n\tsubject_pcs = static_results['subject_pcs']\n\tsubject_mods = static_results['subject_mods']\n\tmod_pc_corr = np.zeros(subject_pcs.shape[1])\n\tfor i in range(subject_pcs.shape[1]):\n\t\tmod_pc_corr[i] = nan_pearsonr(subject_mods,subject_pcs[:,i])[0]\n\tmean_conn = np.nanmean(matrices,axis=0)\n\te_tresh = np.percentile(mean_conn,85)\n\tsubject_pcs[np.isnan(subject_pcs)] = 0.0\n\tm = np.zeros((264,264,264))\n\tpool = Pool(40)\n\tfor n in range(264):\n\t\tprint n\n\t\tsys.stdout.flush()\n\t\tvariables = []\n\t\tfor i,j in combinations(range(264),2):\n\t\t\tvariables.append(pd.DataFrame(data={'pc':subject_pcs[:,n],'weight':matrices[:,i,j],'q':subject_mods},index=range(len(subject_pcs))))\n\t\tresults = pool.map(multi_med,variables)\n\t\tfor r,i in zip(results,combinations(range(264),2)):\n\t\t\tm[n,i[0],i[1]] = r\n\t\t\tm[n,i[1],i[0]] = r\n\t\tnp.save('/home/despoB/mb3152/dynamic_mod/results/full_med_matrix_new_%s.npy'%(task),m)", "def TNFR1_to_SecondaryComplex():\n \n Parameter('TNFa_0' , 5000) # 3000 corresponds to 50ng/ml TNFa\n Parameter('TNFR1_0' , 200) # 200 receptors per cell\n Parameter('TRADD_0' , 1000) # molecules per cell (arbitrarily assigned)1000\n Parameter('CompI_0' , 0) # complexes per cell\n Parameter('RIP1_0' , 20000) # molecules per cell 20000\n Parameter('NFkB_0' , 0) # molecules per cell\n \n Initial(TNFa(blig=None), TNFa_0) # TNFa Ligand\n Initial(TNFR1(blig=None, bDD=None, state='norm'), TNFR1_0) # TNFR1\n Initial(TRADD(bDD1=None, bDD2=None, state='inactive'), TRADD_0) # TRADD\n Initial(CompI(bDD=None, state='unmod'), CompI_0) # Complex I\n Initial(RIP1(bDD=None, bRHIM = None, state = 'ub'), RIP1_0) # RIP1\n Initial(NFkB(bf=None), NFkB_0)\n\n # =========================================\n # TNFR1 ligation, formation of Complex I and release of RIP1 and TRADD rules\n # -----------------------------------------\n # TNFa+ TNFR1 <-> TNFa:TNFR1\n # TNFa:TNFR1 + TRADD <-> TNFa:TNFR1:TRADD >> CompI\n # CompI + RIP1 <-> CompI:RIP1 >> [active]CompI:RIP1-Ub\n # CompI + RIP1-Ub <-> CompI:RIP1-Ub \n \n # [active]CompI:RIP1-Ub >> NFkB # This reaction will consume the receptor.\n # [active]CompI:RIP1-Ub >> [active]CompI:RIP1\n # [active]CompI:RIP1 >> [active]CompI # A20 mediated degradation of RIP1\n # [active]CompI:RIP1 >> [active]CompI + RIP1\n # [active]CompI >> [active]TRADD + [norm]TNFR1 #receptor recycle typically distroys the ligand.\n\n # RIP1 >> RIP1-Ub #These reactions were added because Doug Green reported that FADD and RIP1 bind\n # RIP1-Ub >> RIP1 #independently of receptor, and FADD:RIP1 formation leads to Caspase 8 activation.\n #These reaction decrease the amount of RIP1 by spontaneously ubiquitinating it. \n\n # ------------------------------------------\n \n # -------------Complex I assembly----------------\n bind(TNFa(blig=None), 'blig', TNFR1(blig=None, bDD=None, state='norm'), 'blig', [KF, KR])\n bind(TNFR1(blig = ANY, bDD = None, state = 'norm'), 'bDD', TRADD(bDD1=None, bDD2=None, state='inactive'), 'bDD1', [KF, KR])\n preCompI = TNFa(blig=ANY)%TNFR1(blig=ANY, bDD=ANY, state = 'norm')%TRADD(bDD1 = ANY, bDD2=None, state = 'inactive')\n Rule('CompI_formation', preCompI >> CompI(bDD=None, state = 'unmod'), KC)\n \n # --------------Complex I - RIP1 Modification-----------------\n bind(CompI(bDD=None, state = 'unmod'), 'bDD', RIP1(bDD=None, bRHIM = None, state='unmod'), 'bDD',[KF, KR])\n bind(CompI(bDD=None, state = 'unmod'), 'bDD', RIP1(bDD=None, bRHIM = None, state='ub'), 'bDD',[KF, KR]) \n \n Rule('CompI_Ub', CompI(bDD=ANY, state = 'unmod')%RIP1(bDD=ANY,bRHIM=None, state = 'unmod')>> CompI(bDD=ANY, state = 'mod')%RIP1(bDD=ANY,bRHIM=None, state = 'ub'), KC)\n Rule('CompI_Ub2', CompI(bDD=ANY, state = 'unmod')%RIP1(bDD=ANY,bRHIM=None, state = 'ub')>> CompI(bDD=ANY, state = 'mod')%RIP1(bDD=ANY,bRHIM=None, state = 'ub'), KC)\n Rule('CompI_deUb', CompI(bDD=ANY, state='mod')%RIP1(bDD=ANY, bRHIM=None, state='ub')>>CompI(bDD=ANY, state='mod')%RIP1(bDD=ANY, bRHIM=None,state='unmod'),KC)\n bind(CompI(bDD=None, state='mod'), 'bDD', RIP1(bDD=None, bRHIM = None, state = 'unmod'), 'bDD', [Parameter('k2', 0), KR])\n #Rule('RIP1_rel', CompI(bDD=ANY, state='mod')%RIP1(bDD=ANY, bRHIM=None, state='unmod') >> CompI(bDD=None, state='mod') + RIP1(bDD=None, bRHIM = None, state = 'unmod'), KC3)\n #bind(CompI(bDD=None, state = 'mod'), 'bDD', RIP1(bDD=None, bRHIM = None, state='ub'), 'bDD',[KF, KR])\n #Rule('RIP1_deg', CompI(bDD=ANY, state='mod')%RIP1(bDD=ANY, bRHIM=None, state='unmod') >> CompI(bDD=None, state='mod'),KC3)\n Rule('TNFR1_recycle', CompI(bDD=None, state='mod') >> TRADD(bDD1=None, bDD2 = None, state='active') + TNFR1(blig = None, bDD = None, state = 'norm'), KC)\n Rule('NFkB_expression', CompI(bDD=ANY, state = 'mod')%RIP1(bDD=ANY,bRHIM=None, state = 'ub')>> CompI(bDD=ANY, state = 'mod')%RIP1(bDD=ANY,bRHIM=None, state = 'ub') + NFkB(bf=None), KE)\n # --------------RIP1 Ubiquitination---------------------------\n Rule('RIP1_Ub', RIP1(bDD=None, bRHIM = None, state='unmod')>> RIP1(bDD=None, bRHIM = None, state='ub'), KC2)\n # Rule('RIP1_deUb', RIP1(bDD=None, bRHIM = None, state='ub')>> RIP1(bDD=None, bRHIM = None, state='unmod'), KR)", "def forward_pass(X,architecture):\n \n architecture['layer1'][0] = X\n kernel_shape1 = architecture['layer1'][7]\n stride1 = architecture['layer1'][8]\n if kernel_shape1 is not None and not isinstance(kernel_shape1,int):\n X_input_1_im2col,imX = im2col(X,kernel_shape1,stride1,im_needed = False, shape_specified = True)\n architecture['layer1'][4] = X_input_1_im2col\n else:\n architecture['layer1'][4] = None\n\n for layer in range(len(architecture)): # Feedforward from the first till the second last layer\n X_input,X_output,weightsi,biasi,X_input_1_im2col,imi,output_shapei,kernel_shapei,stridei,operationi,imx = architecture['layer{}'.format(layer+1)]\n\n if operationi == 'conv_bn_relu':\n conv_output = relu(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_relu':\n conv_output = relu(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_bn_sigmoid':\n conv_output = sigmoid(BatchNorm(torch.t(X_input_1_im2col).mm(weightsi) + biasi))\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'conv_sigmoid':\n conv_output = sigmoid(torch.t(X_input_1_im2col).mm(weightsi) + biasi)\n conv_output = torch.reshape(conv_output,output_shapei)\n architecture['layer{}'.format(layer+1)][1] = conv_output # resetting output as convolved shape\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = conv_output # resetting intput of next layer as convolved shape\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n operationi__1 = architecture['layer{}'.format(layer+2)][9]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n if operationi__1 == 'maxpool':\n architecture['layer{}'.format(layer+2)][4] = maxpool_im2col(conv_output,kernel_shapei__1,stridei__1)\n else:\n architecture['layer{}'.format(layer+2)][4],imX = im2col(conv_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n # resetting input im2col of next layer as the im2col of the output of this layer\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'maxpool':\n maxpool_output = maxpool(X_input,kernel_shapei,stridei)\n\n maxpool_output = torch.reshape(maxpool_output,output_shapei)\n\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = maxpool_output\n kernel_shapei__1 = architecture['layer{}'.format(layer+2)][7]\n stridei__1 = architecture['layer{}'.format(layer+2)][8]\n if kernel_shapei__1 is not None and not isinstance(kernel_shapei__1,int):\n architecture['layer{}'.format(layer+2)][4],imX = im2col(maxpool_output,kernel_shapei__1,stridei__1,im_needed = False, shape_specified = True)\n else:\n architecture['layer{}'.format(layer+2)][4] = None\n elif operationi == 'flatten_dense_relu':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'relu',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_none':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'none',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'flatten_dense_sigmoid':\n # kernel_shapei in this case refers to the output channels: stride for dense layer will be None\n output = flatten_and_dense(X_input,kernel_shapei,weightsi,biasi,activation = 'sigmoid',initialise_weights = False)\n architecture['layer{}'.format(layer+1)][1] = output\n if layer != len(architecture) - 1:\n architecture['layer{}'.format(layer+2)][0] = output\n elif operationi == 'softmax':\n Xin = architecture['layer{}'.format(layer+1)][0]\n output = softmax(Xin).squeeze()\n architecture['layer{}'.format(layer+1)][1] = output\n if layer == len(architecture) - 1:\n y_pred = architecture['layer{}'.format(len(architecture))][1]\n \n return y_pred", "def _apply_individual_nbody1_accumulate_python(\n coeff: 'Nparray', ocoeff: 'Nparray', icoeff: 'Nparray',\n amap: 'Nparray', btarget: 'Nparray', bsource: 'Nparray',\n bparity: 'Nparray') -> None:\n for sourcea, targeta, paritya in amap:\n ocoeff[targeta, btarget] += coeff * paritya * numpy.multiply(\n icoeff[sourcea, bsource], bparity)", "def explain(self):\n # build the 2 versions of the model\n model = self.build_model()\n last_conv_model = self.build_cut_model()\n\n for i, label_name in enumerate(self.label_names):\n # This is the algorithm for the last convolution layer's tensor image\n # Get the index of the image that was classified correctly with the most confidence for the class\n predicted_col_proba = np.array(self.predicted_labels)[0][:, i]\n predicted_col_argsort = predicted_col_proba.argsort()[::-1]\n predicted_col = (predicted_col_proba > 0.2).astype(int)\n true_col = self.true_labels[:, 0]\n\n representative_image_index = None\n for most_probable_arg_index in predicted_col_argsort:\n if predicted_col[most_probable_arg_index] == true_col[most_probable_arg_index]:\n representative_image_index = most_probable_arg_index\n break\n\n # Resize the image to fit the neural network and keep the original resized image\n original_img = io.imread('{}/{}/{}'.format(path_to_img_directory, self.ex_format, np.array(self.image_names)[representative_image_index]))\n original_img = cv2.normalize(original_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n original_img = cv2.resize(original_img, dsize=(self.ex_input_size, self.ex_input_size), interpolation=cv2.INTER_CUBIC)\n img = np.expand_dims(original_img, axis=0)\n original_img = original_img[:, :, :3]\n\n # Get the output of the neural network for this image as a tensor\n model.predict(np.array(img))\n class_output = model.output[:, i]\n last_conv_layer = model.get_layer(self.ex_last_conv_layer_name1).output\n # if self.model_name == 'vit':\n # last_conv_layer = tf.nn.relu(tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024)))\n\n # Get the output for the cut model\n cut_img = last_conv_model.predict(np.array(img))[0]\n if self.model_name == 'vit':\n cut_img = np.reshape(cut_img[:256, :], (16, 16, 1024))\n cut_img = np.mean(cut_img, axis=-1)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n if self.model_name == 'vit':\n cut_img[0, 0] = np.mean(cut_img)\n cut_img = cv2.normalize(cut_img, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n cut_img = cv2.resize(cut_img, (self.ex_input_size, self.ex_input_size))\n\n # This is the algorithm of the Grad-CAM model\n # Refine the output of the last convolutional layer according to the class output\n grads = K.gradients(class_output, last_conv_layer)[0]\n if self.model_name == 'vit':\n last_conv_layer = tf.reshape(last_conv_layer[:, :256, :], (-1, 16, 16, 1024))\n last_conv_layer = last_conv_layer / tf.norm(last_conv_layer)\n\n grads = tf.reshape(grads[:, :256, :], (-1, 16, 16, 1024))\n grads = grads / tf.norm(grads)\n\n pooled_grads = K.mean(grads, axis=(0, 1, 2))\n iterate = K.function([model.input], [pooled_grads, last_conv_layer[0]])\n pooled_grads_value, conv_layer_output_value = iterate([img])\n for j in range(self.ex_last_conv_layer_filter_number):\n conv_layer_output_value[:, :, j] *= pooled_grads_value[j]\n\n # Create a 16x16 heatmap and scale it to the same size as the original image\n heatmap = np.mean(conv_layer_output_value, axis=-1)\n heatmap = np.maximum(heatmap, 0)\n heatmap /= np.max(heatmap)\n heatmap = cv2.resize(heatmap, (self.ex_input_size, self.ex_input_size))\n heatmap = np.uint8(255 * heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n heatmap = cv2.normalize(heatmap, None, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)\n superimposed_img = cv2.addWeighted(original_img, 0.7, heatmap, 0.4, 0)\n\n # save the original image\n plt.matshow(original_img)\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'original', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the cut image\n plt.matshow(cut_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'cut', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)\n\n # save the superimposed gradcam image\n plt.matshow(superimposed_img, cmap=plt.get_cmap('Spectral'))\n plt.colorbar(shrink=0.75, ticks=np.linspace(0, 1, 11).tolist())\n plt.axis('off')\n plt.title(label_name, fontdict={'fontsize': 18})\n plt.savefig('{}/{}/{}_{}.png'.format(path_to_explainable, 'gradcam', self.model_name, label_name), bbox_inches='tight', pad_inches=0.1)", "def process(self, mat):", "def UpdateLayers(self):\n pass", "def __init__(self, input_dim=(3, 32, 32), num_filters=32, filter_size=7,\n hidden_dim=100, num_classes=10, weight_scale=1e-3, reg=0.0,\n dtype=np.float32):\n self.params = {}\n self.reg = reg\n self.dtype = dtype\n \n ############################################################################\n # TODO: Initialize weights and biases for the three-layer convolutional #\n # network. Weights should be initialized from a Gaussian with standard #\n # deviation equal to weight_scale; biases should be initialized to zero. #\n # All weights and biases should be stored in the dictionary self.params. #\n # Store weights and biases for the convolutional layer using the keys 'W1' #\n # and 'b1'; use keys 'W2' and 'b2' for the weights and biases of the #\n # hidden affine layer, and keys 'W3' and 'b3' for the weights and biases #\n # of the output affine layer. #\n ############################################################################ \n C, H, W = input_dim;\n\n # Dimensions of data output by convolutional layer\n S = 1; pad = (filter_size - 1) / 2; # Stride and image padding\n hconv = (H - filter_size + 2*pad)/S + 1;\n wconv = (W - filter_size + 2*pad)/S + 1;\n\n # Get dimensions of 2x2 max-pool output\n hmp = hconv / 2;\n wmp = wconv / 2;\n\n # Get dimensions of vector fed into affine layer\n # Convert maxpool output by using np.reshape(v1,(N,-1))\n # Recover by using np.reshape(dv1,v1.shape)\n laff = hmp*wmp*num_filters;\n\n # Determine starting weight and bias matrices\n self.params['W1'] = weight_scale * np.random.randn(num_filters, C, filter_size, filter_size);\n self.params['b1'] = np.zeros(num_filters);\n self.params['W2'] = weight_scale * np.random.randn(laff, hidden_dim);\n self.params['b2'] = np.zeros(hidden_dim);\n self.params['W3'] = weight_scale * np.random.rand(hidden_dim,num_classes);\n self.params['b3'] = np.zeros(num_classes);\n ############################################################################\n # END OF YOUR CODE #\n ############################################################################\n\n for k, v in self.params.iteritems():\n self.params[k] = v.astype(dtype)", "def reduce(self,predictor):\n span,stack_top=self\n s0,s1,s2=stack_top\n if s0==None or s1==None:return []\n p_span,pstack=predictor\n\n print('reduce>>>>>>>>',self)\n\n rtn= [\n (ord('L'),pickle.dumps(( # ind\n (p_span[0],span[1]), #span\n ((s1[0],s1[1],s0[0]),pstack[1],pstack[2]))), ##\n ),\n (ord('R'),pickle.dumps(( #\n (p_span[0],span[1]),\n ((s0[0],s1[0],s0[2]),pstack[1],pstack[2]))),\n ),\n ]\n return rtn", "def compute_model(self, A_on_basis_vecs, B_on_standard_basis_array,\n C_on_basis_vecs):\n self.reduce_A(A_on_basis_vecs)\n self.reduce_B(B_on_standard_basis_array)\n self.reduce_C(C_on_basis_vecs)\n return self.A_reduced, self.B_reduced, self.C_reduced" ]
[ "0.5332517", "0.532662", "0.53114045", "0.5299733", "0.52236503", "0.5153382", "0.51122165", "0.5084768", "0.50838053", "0.50646925", "0.505108", "0.5036968", "0.5031033", "0.50278485", "0.50072664", "0.50027597", "0.4986492", "0.49739137", "0.49683747", "0.4963426", "0.49604002", "0.49494305", "0.49310955", "0.4921163", "0.49145284", "0.48912558", "0.48815235", "0.4856885", "0.48517102", "0.48389143", "0.4831509", "0.4829985", "0.4827887", "0.48197114", "0.48061967", "0.48023662", "0.47967127", "0.47875616", "0.47867706", "0.4785091", "0.47753817", "0.4767045", "0.47656783", "0.47571987", "0.4749793", "0.47479817", "0.47443974", "0.47426072", "0.47353458", "0.47317523", "0.47168535", "0.4709121", "0.4708894", "0.47077373", "0.47036442", "0.46909848", "0.46878287", "0.46877098", "0.4678217", "0.46777534", "0.46741447", "0.4671688", "0.46692717", "0.4659111", "0.46551213", "0.46521404", "0.46508342", "0.4645591", "0.46442693", "0.4643926", "0.463433", "0.4633213", "0.46314275", "0.46307734", "0.46285093", "0.4628346", "0.4627117", "0.46252033", "0.46219423", "0.46199664", "0.461906", "0.46120423", "0.46058103", "0.4604493", "0.46026528", "0.46025863", "0.45922992", "0.45913047", "0.45906913", "0.45880038", "0.45873788", "0.45850646", "0.45788586", "0.45783308", "0.4575647", "0.4575577", "0.45714313", "0.45685312", "0.45674506", "0.45667243" ]
0.45976505
86
Reads choice and directs on a path depending on input
def main_page(self): choice = "" while choice != "x": header, main_menu, choices, underline = self.__get_format.main_menu_format() choice = self.__main_menu.main_page(header,main_menu,choices,underline) if choice == "1": self.__rent_controller.Rent_page() elif choice == "2": try_again = "" while try_again != "n": try_again, valid = self.__salesman_controller.sign_in_page() if valid == True: self.__salesman_controller.salesman_menu() elif choice == "3": self.__order_controller.find_order_process(page=2) elif choice == "i": self.__information_controller.information_page()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def warriorPath1():\n with open('stories/warrior/warrior_path1.txt') as f:\n path1 = f.read()\n print(path1.format(NAME))\n while True:\n print(\"What will you do?\")\n print(\"1. Don't light the torch and keep walking\")\n print(\"2. Light the torch with magic\")\n secondPathChoice = input(\"1, 2\\n\")\n # Input validation\n while secondPathChoice != \"1\" and secondPathChoice != \"2\":\n print(\"Invalid input\")\n secondPathChoice = input(\"1, 2\\n\")\n\n if secondPathChoice == \"1\":\n warriorPath1_1()\n break\n elif secondPathChoice == \"2\":\n warriorPath1_2()", "def choose_file():\n chdir(getcwd()+'/data/US')\n f = []\n for (dirpath, dirnames, filenames) in walk(getcwd()):\n f.extend(filenames)\n print('Which file do you want to work on?')\n for i in f:\n print(str(f.index(i)) + ' - ' + i)\n while True:\n try:\n return f[int(input('Type its number: '))]\n except ValueError or IndexError:\n print('Invalid input.')", "def warriorPath1_1_1_2():\n with open('stories/warrior/warrior_path1_1_1_2.txt') as f:\n path1_1_1_2 = f.read()\n print(path1_1_1_2.format(NAME))\n print(\"1. Take the Caldun\")\n print(\"2. Leave\")\n fifthPathChoice = input(\"1, 2\\n\")\n # Input validation\n while fifthPathChoice != \"1\" and fifthPathChoice != \"2\":\n print(\"Invalid input\")\n fifthPathChoice = input(\"1, 2\\n\")\n\n if fifthPathChoice == \"1\":\n # Function called from gmail_sheet.py\n update_sheet_warrior_ending_1()\n # Display story\n with open('stories/warrior/warrior_end_1.txt') as f:\n end1 = f.read()\n print(end1.format(NAME))\n elif fifthPathChoice == \"2\":\n # Function called from gmail_sheet.py\n update_sheet_warrior_ending_2()\n # Display story\n with open('stories/warrior/warrior_end_2.txt') as f:\n end2 = f.read()\n print(end2.format(NAME))", "def choose_file(self):\n pass", "def warriorPath1_1_1():\n with open('stories/warrior/warrior_path1_1_1.txt') as f:\n path1_1_1 = f.read()\n print(path1_1_1.format(NAME))\n while True:\n print(\"What will you do?\")\n print(\"1. Collect courage\")\n print(\"2. Jump down\")\n fourthPathChoice = input(\"1, 2\\n\")\n # Input validation\n while fourthPathChoice != \"1\" and fourthPathChoice != \"2\":\n print(\"Invalid input\")\n fourthPathChoice = input(\"1, 2\\n\")\n\n if fourthPathChoice == \"1\":\n print(\"You collected courage\")\n elif fourthPathChoice == \"2\":\n warriorPath1_1_1_2()\n break", "def input_menu_choice():\r\n choice = str(input(\"Which option would you like to perform? [1 to 4] - \")).strip()\r\n return choice", "def warriorPath1_1():\n with open('stories/warrior/warrior_path1_1.txt') as f:\n path1_1 = f.read()\n print(path1_1.format(NAME))\n while True:\n print(\"1. Attack the ogre\")\n print(\"2. Sneak around the ogre\")\n thirdPathChoice = input(\"1, 2\\n\")\n # Input validation\n while thirdPathChoice != \"1\" and thirdPathChoice != \"2\":\n print(\"Invalid input\")\n thirdPathChoice = input(\"1, 2\\n\")\n\n if thirdPathChoice == \"1\":\n warriorBattle()\n break\n elif thirdPathChoice == \"2\":\n warriorPath1_1_2()", "def ask_path(prompt, default=None):\n response = simple_response(prompt, default)\n if os.path.exists(response):\n return response\n else:\n print('That path does not exist. Try again.')\n return ask_path(prompt, default)", "def warriorPath0():\n pathChoice = input(\"1\\n\")\n # Input validation\n while pathChoice != \"1\":\n print(\"Invalid input\")\n pathChoice = input(\"1\\n\")\n\n if pathChoice == \"1\":\n warriorPath1()", "def dmenu_input(scheme):\n choices = []\n for basedir, dirs , files in os.walk(scheme.prefix, followlinks=True):\n dirs.sort()\n files.sort()\n\n dirsubpath = basedir[len(scheme.prefix):].lstrip('/')\n for f in files:\n if f.endswith(scheme.suffix):\n full_path = os.path.join(dirsubpath, f.replace(scheme.suffix, '', -1))\n choices += [full_path]\n\n args = [\"-fn\", scheme.font, \\\n \"-nb\", scheme.nb, \\\n \"-nf\", scheme.nf, \\\n \"-sb\", scheme.sb, \\\n \"-sf\", scheme.sf, \\\n \"-i\" ]\n dmenu = subprocess.Popen(['dmenu'] + args,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE,\n stdout=subprocess.PIPE)\n\n choice_lines = '\\n'.join(map(str, choices))\n choice, errors = dmenu.communicate(choice_lines.encode('utf-8'))\n\n if dmenu.returncode not in [0, 1] \\\n or (dmenu.returncode == 1 and len(errors) != 0):\n print(\"'{} {}' returned {} and error:\\n{}\"\n .format(['dmenu'], ' '.join(args), dmenu.returncode,\n errors.decode('utf-8')))\n sys.exit(1)\n\n choice = choice.decode('utf-8').rstrip()\n\n return (scheme.prefix + \"/\" + choice + scheme.suffix) if choice in choices else sys.exit(1)", "def InputMenuChoice():\r\n choice = str(input(\"Which option would you like to perform? [1 to 4] - \")).strip()\r\n print() # Add an extra line for looks\r\n return choice", "def choose_file(fname=None, env=None, choices=[]):\n if fname:\n fname = os.path.expanduser(fname)\n if os.path.exists(fname):\n return fname\n else:\n raise LookupError(f'No such file {fname}')\n elif env and env in os.environ:\n fname = os.path.expanduser(os.environ[env])\n if os.path.exists(fname):\n return fname\n else:\n raise LookupError(f'No such file {env} = {fname}')\n else:\n ch = []\n for c in choices:\n fname = os.path.expanduser(str(c))\n if os.path.exists(fname):\n return fname\n else:\n ch.append(fname)\n raise LookupError('File not found (tried {}{})'.format(\n env + (', ' if choices else '') if env else '', ', '.join(ch)))", "def input_file_chooser(self):\n filename = tk.filedialog.askopenfilename()\n self._input_path_var.set(filename)", "def GetInputPath(self):\n self.inputDir = raw_input(\"Where should files be read from? This can be a file or a folder of files\\n\\r>>> \")\n if os.path.isabs(self.inputDir):\n if os.path.isdir(self.inputDir):\n self.isFolder = True\n self.inputDirs = os.listdir(self.inputDir)\n elif os.path.isfile(self.inputDir):\n self.isFolder = False\n self.inputDirs = [self.inputDir]\n else:\n print \"That path does not exist. Try again\"\n self.GetInputPath()\n else:\n print \"that was not an excepted path name. Try again.\"\n self.GetInputPath()", "def process_menu_page(self):\r\n self.print_options(self.menu,1)\r\n\r\n \"\"\"\r\n Asks for user input. Then redirects to the appropriate function.\r\n \"\"\"\r\n n = (input(\"What would you like to do? Please input the correpsonding integer:\"))\r\n\r\n if n == str(1):\r\n self.file_import()\r\n elif n == str(2):\r\n self.view_data()\r\n elif n == str(3):\r\n self.analysis()\r\n elif n == str(4):\r\n self.save()\r\n elif n == str('q'):\r\n quit()\r\n else:\r\n raise InputError(\"Please input a valid digit or 'q'\")", "def mainMenuChoice():\r\n print(\"What would you like to do?\")\r\n print(\" n) Add a class\")\r\n print(\" d) Delete a class\")\r\n print(\" e) Edit a class\")\r\n print(\" s) Show ongoing list of classes\")\r\n print(\" p) Print Schedule to Terminal\")\r\n print(\" g) Generate schedule in csv and print to Terminal\")\r\n print(\" q) Save and Exit program\\n\")\r\n \r\n choice = input(\"Input choice here: \")\r\n \r\n if choice.lower() in [\"n\",\"d\",\"e\",\"s\",\"g\",\"q\", \"p\"]:\r\n return choice\r\n else:\r\n print(\"\\nPlease enter a valid menu choice\")\r\n return None", "def userSpecify():\n valid = False\n while valid != True:\n userPath = raw_input(\"\\nPlease specify directory path or press Enter key for the current directory: \").strip()\n if userPath == \"\":\n path = \".\"\n else:\n path = userPath\n\n if os.path.exists(path):\n print(\"Path has been validated\")\n valid = True\n else:\n print(\"Invalid File Path, File Doesn't Exist! Please try again.\")\n continue\n return path", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get pathname\n pathname = tk.filedialog.askdirectory(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def _input_path() -> str:\n path_string = input('Path:').strip()\n return path_string", "def filepicker(dir=os.curdir):\n choices = {}\n files = os.listdir(dir)\n files = sorted(\n [f for f in files if os.path.isfile(f) and '.csv' in f[-4:]])\n\n # Print the filenames with corresponding integers\n for index, filename in enumerate(files, 1):\n choices[index] = filename\n print(\"[{}] {}\".format(index, filename))\n\n choice = int(input(\"\\n>\").strip()) # Prompt user for choice\n print(\"-\" * 20, \"\\n\")\n if dir == os.curdir:\n return choices[choice]\n else:\n return dir + choices[choice]", "def choose_click(self):\n choice = filedialog.askopenfilename(title='Alege baza de date', filetypes=[('Bază de date', '*.db')])\n if not choice:\n return\n\n self.path.set(choice)", "def prompt(text, choices):\n text += \" [\" + \"/\".join(choices) + \"] \"\n while True:\n inp = input(text)\n if inp in choices:\n return inp", "def askFilename():\n# print(\"\\nDo you have the file already?\"+\n# \"\\nYes - proceed\\t\\t No - go back to main menu\")\n# choice = input(\"(Y/N) \")\n# if choice.upper() == \"N\":\n# filename = None\n# elif choice.upper() == \"Y\": \n print(\"\\nInsert file name (without the filetype)\")\n print(\"(PRESS CTRL+C IF THERE IS NO FILE YET!!)\")\n fileOpt = input(\"or press enter if saved on default name: \") \n if fileOpt != \"\":\n filename = fileOpt+\".txt\"\n else:\n print(\"\\n\\nFinding file...\")\n print(\"\\n\\nWhich party is it for?\")\n print(\"A. Labor\\t\\t B. Liberal\")\n partyOpt = input(\"Selected party is (A/B): \")\n list1 = [\"A\", \"B\"]\n while partyOpt.upper() not in list1:\n partyOpt = input(\"Selected party is (A/B): \")\n marginOpt = input(\"\\nWhat was the margin used? (enter as int) \")\n if partyOpt.upper() == \"A\":\n filename = \"LaborParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n elif partyOpt.upper() == \"B\":\n filename = \"LiberalParty_MarginalSeatList\"+str(marginOpt)+\"%.txt\"\n return filename", "def user_choice():\n\n OPTIONS = \"\"\"\n a) See ratings\n b) Add rating\n c) Quit\n \"\"\"\n\n while True:\n print OPTIONS\n user_choice = raw_input(\"What would you like to do?: \")\n\n if user_choice == \"a\":\n restaurant_rating(filename)\n elif user_choice == \"b\":\n user_input()\n elif user_choice == \"c\":\n print \"Good-bye!\"\n break\n else:\n print \"Invalid input\"", "def choose(fdin, fdout, prompt, map_choices):\n cols = get_terminal_width(fdin)\n line = prompt\n print_line_ellipsized(fdout, cols, line)\n while True:\n char = read_one_character(fdin)\n if char in map_choices:\n return map_choices[char]", "def ask_input(task):\n\n folder_found = False\n folder = \"\"\n ped_name = \"\"\n # Accepted undo operations\n _undo_operation = [\n 'Q', 'QUIT',\n 'B', 'BACK',\n 'U', 'UNDO'\n ]\n\n # Request for the data folder and check of its existence (if undo operation, return to the menu)\n while not folder_found:\n folder = input(\"Insert the folder path containing the PED you want to analyze: \")\n if folder.upper() in _undo_operation:\n return None\n if not os.path.isdir(folder):\n print('WARNING : You must provide an existing data location\\n')\n else:\n folder_found = True\n\n exit_menu = False\n while not exit_menu:\n\n # Extract PED ID of files contained in the folder: if no one is present, return to the menu\n ped_names = extract_names(folder, task=task)\n if len(ped_names) == 0:\n print('WARNING : Folder not containing PED files!')\n return None\n\n # Request for the PED to be analyzed showing the ones present in the folder\n print('')\n print('Which PED do you want to analyze: ')\n for i, names in enumerate(ped_names):\n print('\\t{} - {}'.format(i, names))\n\n ped_name = input('\\nYour choice: ')\n\n # Check of the choice\n if ped_name.upper() in _undo_operation:\n return None\n else:\n exit_menu, ped_name = check_input(ped_name, ped_names)\n\n return [folder, ped_name]", "def select_path(self):\r\n pass", "def choose(self, choice):\n if self.available(choice):\n self.select(choice)", "def handle_select(self):\n #self.selected = input('>> ')\n self.selected = '0'\n if self.selected in ['Q', 'q']:\n sys.exit(1)\n elif self.selected in ['B', 'b']:\n self.back_to_menu = True\n return True\n elif is_num(self.selected):\n if 0 <= int(self.selected) <= len(self.hrefs) - 1:\n self.back_to_menu = False\n return True\n else:\n print(Colors.FAIL +\n 'Wrong index. ' +\n 'Please select an appropiate one or other option.' +\n Colors.ENDC)\n return False\n else:\n print(Colors.FAIL +\n 'Invalid input. ' +\n 'Please select an appropiate one or other option.' +\n Colors.ENDC)\n return False", "def get_user_choice():\n user_input = input('Your choice: ')\n return user_input", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get path name\n pathname = tk.filedialog.asksaveasfilename(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def ask_path():\n\n file_opt = options = {}\n options['initialdir'] = 'User\\\\'\n options['parent'] = root\n options['title'] = 'Choose directory'\n\n # get path name\n pathname = tk.filedialog.asksaveasfilename(**file_opt)\n\n if pathname:\n Data.out_dir = pathname\n path_var.set(pathname)", "def switch(self, an_input: str):\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.res_parser.parse_known_args(an_input.split())\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n os.system(\"cls||clear\")\n return None\n\n if other_args:\n print(f\"The following args were unexpected: {other_args}\")\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"Command not recognized!\"\n )(None)", "def main():\n switch_dict = {\"1\": thanks, \"2\": run_report, \"3\": create_thank_you_all, \"4\": exit_program}\n while True:\n response = input(menu)\n if response in switch_dict:\n switch_dict[response]()\n else:\n print(\"Not a valid option!\")", "def _selectInput(self):\n\n (my_file, my_path) = misc.get_file(FilterSpec='*.wav', \n DialogTitle='Select sound-input:', \n DefaultName='')\n if my_path == 0:\n print('No file selected')\n return 0\n else:\n full_in_file = os.path.join(my_path, my_file)\n print('Selection: ' + full_in_file)\n return full_in_file", "def browse_input(self):\n path = getAFolder()\n if len(path) > 0:\n self.in_directory.setText(path)\n self.out_directory.setText(join(path, 'merged_results'))\n self.preprocessfolder()", "def menu():\n print(\"Choose an option\")\n print(\"(L)ist Friends\")\n print(\"(A)dd Friend\")\n print(\"(C)lear List\")\n print(\"(Q)uit\")\n while True:\n choice = input(\"Now choose: \").lower().strip()\n if choice in 'lacq':\n return choice\n print(\"Invalid choice.\")", "def recipe_printer():\n\n path = os.curdir\n recipe = raw_input(\"What is the name of the recipe you want to view (don't use capital letters)? \")\n recipe += '.txt'\n if recipe in path:\n output = open(recipe)\n print output.read()\n output.close()\n else:\n print \"recipe was not found, please check your spelling.\"\n pass", "def choose_file(self):\n self.choice = self.client.choose_file()\n\n if self.choice.drive == 'document':\n from docsparser import DocsParser\n self.parser = DocsParser(self.client, self.choice)\n else:\n raise NotImplementedError('{} service not implemented'.format(self.choice.drive))\n\n return self.choice", "def get_choice(self):\n\n self.choice = int(input(\n \"\\nEntrez le chiffre correspondant à votre choix puis\"\n \"pressez sur ENTER : \"))\n return self.choice\n self.dispatch()", "def browse(self):\n\n self.filepath.set(fd.askopenfilename(initialdir=self._initaldir,\n filetypes=self._filetypes))", "def man_dir():\n\n print \"\\n\" + \"-\" * 8 + \"Select Direction\" + \"-\" * 8\n print \"1. Up\"\n print \"2. Down\"\n print \"3. Left\"\n print \"4. Right\"\n choice = valid(\"\\nSelect direction: \", 1, 4)\n\n if choice == 1:\n direct = \"U\"\n elif choice == 2:\n direct = \"D\"\n elif choice == 3:\n direct = \"L\"\n elif choice == 4:\n direct = \"R\"\n return direct", "def choose_directory():\n \n directory = askdirectory()\n chdir(directory)", "def input_user_choice_import(self):\r\n try:\r\n user_choice = input(\"Continuer l'importation? Y/N: \")\r\n if user_choice.lower() == 'y' or user_choice.lower() == 'n':\r\n return user_choice\r\n\r\n except ValueError:\r\n print(\"Veuillez choisir Y/N\")\r\n return self.input_user_choice_import()", "def load(file_choice = file):\n\t\tif file_choice!=file:\n\t\t\tfile = file_choice\n\n\t\tpass", "def choose(bot, trigger):\n if not trigger.group(2):\n return bot.reply('I\\'d choose an option, but you didn\\'t give me any.')\n choices = re.split('[\\|\\\\\\\\\\/]', trigger.group(2))\n pick = random.choice(choices)\n return bot.reply('Your options: %s. My choice: %s' % (', '.join(choices), pick))", "def choose_story():\n madLibQ = input(\n \"Now there are two mad lib stories you can journey through, \"\n \"which story would you like to choose? 1 or 2?: \")\n # Mad Lib Story #1\n if madLibQ == \"1\":\n madlib1()\n # Mad Lib Story #2\n elif madLibQ == \"2\":\n madlib2()\n else:\n print(\"\")\n print(\"Invalid entry. Please try again.\")\n print(\"\")\n choose_story()", "def file_path():\n file_name = input(\"Enter the file name:\")\n return file_name", "def player_choice(text):\n try:\n action_choice = input(text)\n return action_choice.lower()\n except NameError:\n print(\"Invalid input. Please try again.\")", "def pathfinder(Input):\n while True:\n if Input[-4::] == '.csv':\n return Input\n else:\n Input = input('Please enter a valid csv file: ')", "def choose_query(screening_log_path: str):\n while True:\n # Ask for what the user would like to do\n print(\"1. Basic Screening Log Stats\")\n print(\"2. Get basic stats between two dates\")\n print(\"3. Get basic stats by time of day\")\n choice = input(\"What actions would you like to take, q to quit \")\n\n choices = {'1': get_screening_log_basic_stats,\n '2': get_screening_log_stats_by_date,\n '3': get_screening_log_stats_by_time,\n }\n\n if choice is not None and choice.strip() and choices.get(choice) is not None:\n choices[choice](screening_log_path)\n\n elif choice is not None and choice.strip() and choice.lower() == 'q':\n break\n # Bad Entry\n else:\n print(\"Please enter a valid choice\")", "def handle_selection_eng(self):\n choice = self.get_input()\n if choice == '1':\n self.login_menu()\n elif choice == '2':\n self.authenticate_qr()\n elif choice == '3':\n self.authenticate_bluetooth()\n elif choice == '4':\n self.is_user = True\n self.display_main()", "def menu():\r\n cont = False\r\n while cont == False:\r\n choice = input(\"Enter a letter to choose an option:\\n\" +\r\n \"e - Enter preferences\\nr - Get recommendations\\n\" +\r\n \"p - Show most popular artists\\nh - How popular is the most popular\\n\" +\r\n \"m - Which user has the most likes\\nq - Save and quit\\n\")\r\n if isinstance(choice, str):\r\n cont = True\r\n else:\r\n print(\"please enter one of the choices above\")\r\n return choice", "def get_file():\n # Main Loop\n while True:\n filename = input(\"Please enter the name of the file you want to work on: \")\n # Check if file exists...\n if path.exists(filename):\n print(\"File sucessfully retrieved. Returning to previous menu...\")\n print()\n return filename\n \n print(\"That file does not exist in your current directroy. Try again.\")\n print()", "def main_menu(): \r\n \r\n print(\"Please enter in the major for the class you need to study for: \")\r\n print(\"1. for Electrical Engineering\")\r\n print(\"2. for Bioengineering\")\r\n print(\"3. for Chemcial Engineering\")\r\n print(\"4. for Mechanical Engineering\")\r\n print(\"5. for Civil Engineering\")\r\n print(\"6. for Biology\")\r\n print(\"7. for Data Analytics\")\r\n print(\"8. for Chemistry\")\r\n choice = input()\r\n return choice", "def get_choice():\n choice = input(\"Would you like to login/register: \")\n return choice", "def switch(self, an_input: str):\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.fx_parser.parse_known_args(an_input.split())\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n system_clear()\n return None\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"command not recognized!\"\n )(other_args)", "def _take_option(self, options, print_out):\n user_choice = input(\"Please, choose one of the follewing options: \\n \" + print_out \n + \"\\n Your choice: \" )\n try:\n user_option = options[int(user_choice)]\n except KeyError:\n print(\"Please enter a vaild number\")\n self._take_option(options, print_out)\n \n except ValueError:\n print(\"Please a enter vaild number, not a string or some signs\")\n self._take_option(options, print_out)\n else:\n return user_option()", "def get_valid_path(file_path: Path, prompt_title: str=\"PATH TO FILE\") -> Path:\n\n print(f\"{Color.EMPHASIS}{prompt_title}{Color.END}\")\n while True:\n if file_path.exists() and file_path.is_file():\n return file_path\n else:\n file_path = Path(input(f\"{Color.INFORMATION}Enter the file's path: {Color.END}\"))", "def pick_place(choices_arg, question='Where to next?',inv=True):\r\n \r\n choices_alt = []\r\n \r\n if isinstance(choices_arg,list):\r\n choices = list(choices_arg)\r\n if inv:\r\n choices += ['inventory','map']\r\n \r\n elif isinstance(choices_arg,tuple):\r\n choices = choices_arg[0]\r\n choices_alt = choices_arg[1]\r\n if inv:\r\n choices += ['inventory','map']\r\n choices_alt += ['inventory','map']\r\n\r\n staying = True\r\n \r\n while staying:\r\n\r\n print question + '\\n'\r\n\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #print alternate choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices_alt[index]))\r\n\r\n else:\r\n for index in range(len(choices)): #print choices in menu form\r\n if str(choices[index]) == 'inventory':\r\n print\r\n print(str(index+1) + ': ' + str(choices[index]))\r\n\r\n print('') #get some blank line in here yo\r\n chosen = raw_input('').lower()\r\n \r\n try:\r\n final = ''\r\n for index in range(len(choices)): #check if they typed a number\r\n item = choices[index]\r\n if index == int(chosen)-1:\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they type a number not in range\r\n question = 'Try again, foo.'\r\n except:\r\n final = ''\r\n if choices_alt:\r\n for index in range(len(choices_alt)): #check if they typed letters\r\n item = choices_alt[index]\r\n if chosen == str(item).lower():\r\n final = choices[index]\r\n staying = False\r\n\r\n else:\r\n for index in range(len(choices)): #check if they typed letters\r\n item = choices[index]\r\n if chosen == str(item).lower():\r\n final = item\r\n staying = False\r\n if final == '':\r\n print 'Nice Try.\\n' #if they misspelled\r\n question = 'Try again, foo.'\r\n\r\n if final == 'map':\r\n inspect_map()\r\n question = 'Where to?'\r\n staying = True\r\n if final == 'inventory':\r\n inspect_inventory()\r\n question = 'Where to?'\r\n staying = True\r\n\r\n return final", "def choice():\n choice = input(\"press e to encode press d to decode or press q to quit:\")\n if choice == \"e\":\n return \"e\"\n\n elif choice == \"d\":\n return \"d\"\n else:\n print(\"Okay bye\")", "def handle_selection_main(self):\n choice = self.get_input()\n if choice == '1':\n self.display_cust()\n elif choice == '2':\n self.is_user = False\n self.display_eng()", "def menu() -> str:\r\n user_choice = \"\"\r\n while user_choice not in [\"1\", \"2\", \"3\"]:\r\n user_choice = input(\"What do you want to do? \\n\"\r\n \"Enter 1 to search \\n\"\r\n \"Enter 2 to move a book \\n\"\r\n \"Enter 3 to save and quit\")\r\n\r\n if user_choice == \"1\":\r\n return \"search\"\r\n elif user_choice == \"2\":\r\n return \"move\"\r\n elif user_choice == \"3\":\r\n return \"quit\"\r\n else:\r\n print(\"Invalid input, try again\")", "def menu_choice():\r\n choice = ' '\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n choice = input('Which operation would you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() # Add extra space for layout\r\n return choice", "def select_dir(self):\n prev_val = self.var_path.get()\n if self.conf_dir == \"dir_app\" or self.conf_dir == \"urls\":\n dir_ = fd.askopenfilename(parent=self.parentframe,\n initialdir=Path.home()) or prev_val\n else:\n dir_ = fd.askdirectory(parent=self.parentframe,\n initialdir=Path.home()) or prev_val\n\n self.var_path.set(value=dir_)\n if dir_ != prev_val:\n conf[self.conf_dir] = dir_\n self.handle_modified()", "def select_path(self, path):\n self.exit_manager()\n\n if path.endswith('.obj'):\n EVENTS['IS_OBJ'] = True\n EVENTS['EDITOR_SAVED'] = True\n\n EVENTS['CAN_WRITE'] = True\n EVENTS['FILE_PATH'] = path\n EVENTS['LOADED_FILE'] = True\n toast(f'{path} loaded successfully')\n EVENTS['IS_RAM_EMPTY'] = False\n update_indicators(self.main_window, EVENTS['LOADED_FILE'])", "def askOption():\n while True:\n print(\"Do you want to (E)ncode or (D)ecode?\") \n choice = input(\">> \")\n \n if choice.lower() in ['d','e']:\n return choice", "async def choose(self, ctx, *args):\n query = \" \".join(args)\n choices = query.split(\" or \")\n if len(choices) < 2:\n await ctx.send(\"Give me at least 2 options to choose from! (separate options with `or`)\")\n self.logger.warning(misolog.format_log(ctx, f\"1 option\"))\n return\n choice = rd.choice(choices).strip()\n await ctx.send(f\"I choose **{choice}**\")\n self.logger.info(misolog.format_log(ctx, f\"{choice}\"))", "def menu(target=None, saveas=None, returnStr=False):\n stuff = dict(enumerate([file for file in os.listdir(os.path.join(currPath, \"h2comp\")) if file.endswith(\".py\")], 1))\n if target is None:\n print(\"what do u want?\")\n for k, v in stuff.items():\n print(f\"{k}:{v}\")\n target = \"\"\n while target not in stuff.keys():\n target = int(input(\"enter a valid key: \"))\n \n with open(currPath + f\"/h2comp/{stuff[target]}\") as f:\n if saveas is None:\n if returnStr:\n return f.read()\n else:\n print(f.read())\n else:\n with open(saveas, \"w\") as f2:\n f2.write(f.read())", "def browse(self):\n\t\tos.system(\"clear\")\n\t\tself._show_menu()\n\t\twhile True:\n\t\t\tchoice = input(\"Enter the according number above:\")\n\n\t\t\tif choice == \"1\":\n\t\t\t\tself._show_all_goods()\n\t\t\telif choice == \"2\":\n\t\t\t\tself._show_cate()\n\t\t\telif choice == \"3\":\n\t\t\t\tself._show_brand()\n\t\t\telif choice == \"4\":\n\t\t\t\tself._order()\n\t\t\t\tos.system('clear')\n\t\t\telse:\n\t\t\t\tcontinue\n\t\t\tself._show_menu()", "def execute_factory_menu(cls) -> LibraryItemFactory:\n print(\"Item Loader\")\n print(\"-----------\")\n print(\"What kind of items would you like to load?\")\n print(\"1. Manga\")\n print(\"2. Games\")\n print(\"3. Movies\")\n user_choice = int(input(\"Enter your choice (1-3):\"))\n factory = cls.factory_map[user_choice]\n path = input(\"Enter a path: \")\n return factory(path)", "def selection_input(\n self,\n prompt,\n choices,\n default=None,\n error_message=\"Invalid Selection\",\n transform=None\n ):\n while True:\n result = self.text_input(prompt, default)\n\n if transform is not None and result is not None:\n result = transform(result)\n\n if result in choices:\n return result\n\n print()\n print(error_message)", "def prompt_user():\n print('Please insert image direcory:')\n inpt = input('> ')\n return inpt", "def select_program(self):\r\n\r\n path_data = self.json_data # Deserialize json\r\n print(' '+path_data['title'] + '\\n\\n ' + path_data['desc'] + '\\n\\n' +\r\n path_data['path']['desc']) # Print title\r\n for option in path_data['path']['options']: # Print list of functions\r\n print(str(option['num'])+'. '+option['desc'])\r\n scenario = input()\r\n if scenario == '0':\r\n self.install_vk_api_for_python()\r\n elif scenario == '1':\r\n self.vk_sign_in()\r\n self.download_pics_from_dialogs()\r\n elif scenario == '2':\r\n self.vk_sign_in()\r\n self.tag = input('По какому тегу будем искать людей? (0 для отмены)\\n')\r\n if self.tag != '0':\r\n self.get_users_pool()\r\n self.get_friends_numbers()\r\n else:\r\n self.tag = None\r\n elif scenario == '3':\r\n counter = self.count_ffn()\r\n self.check_for_build_plot(counter)\r\n elif scenario == '4':\r\n self.vk_token_sing_in()\r\n self.find_most_popular()\r\n else:\r\n print('Ошибка ввода, попробуйте еще раз')\r\n time.sleep(0.5)\r\n self.select_program()\r\n print('Возвращаемся в главное меню...')\r\n time.sleep(1)\r\n self.select_program()", "def get_value(self):\r\n return input(\"Enter your choice :\")", "def main_menu_selection():\n action = input('''\n Pleaes select one:\n\n a - Send a thank you\n b - Create a report\n c - Quit\n >''')\n\n return action.strip()", "def menu():\r\n \r\n choice = int(input((\"1.BFS\\n2.DFS\\n3.BestFS\\n4.Beam Search\\n5.BBS\\n6.A*\\n7.Exit\\nChoose an option: \")))\r\n while choice < 1 or choice > 7:\r\n print(\"Choice is invalid! Try again\")\r\n choice = int(input((\"1.BFS\\n3.Beam Search\\n4.BBS\\n5.A*\\n6.Exit\\nChoose an option: \")))\r\n print(\"\")\r\n \r\n \r\n return choice", "def home(logger):\n while True:\n choice = input(\n \"Please choose: \"\n \"(1) view items to deliver, \"\n \"(2) view_products, \"\n \"(3) add new products, \"\n \"(4) update profile, \"\n \"(5) log out: \"\n )\n if choice not in ('1', '2', '3', '4', '5'):\n logger.log(\"Please pick a valid choice\")\n else:\n break\n logger.log(\n \"Please choose: \"\n \"(1) view items to deliver, \"\n \"(2) view_products, \"\n \"(3) add new products, \"\n \"(4) update profile, \"\n \"(5) log out: \"\n f\"{choice}\"\n )\n return choice", "def menu_page():\n while True:\n try:\n print(\"Please choose one of the following options(1,2,3):\"\n \"\\n1. Send a Thank you. \\n2. Create a report\"\n \"\\n3. Send Letters to Everyone \\n4. Quit\")\n option = int(input('--->'))\n except ValueError:\n print(\"You have made an invalid choice, try again.\")\n page_break()\n return option", "def select_file(category, performer=None):\n\n files = File.get_files_by_category(category, performer=performer)\n files_ = []\n for file in files:\n if isinstance(file, str):\n files_.append(PyInquirer.Separator())\n continue\n file.category = category\n file_ = {\n \"name\": file.get_title(),\n \"value\": file,\n }\n files_.append(file_)\n if len(files_) == 0:\n Settings.print(\"Missing Files\")\n return\n files_.append({\n \"name\": 'Back',\n \"value\": None,\n })\n question = {\n 'type': 'list',\n 'name': 'file',\n 'message': 'File Path:',\n 'choices': files_,\n # 'filter': lambda file: file.lower()\n }\n answer = PyInquirer.prompt(question)\n if not answer: return File.select_files()\n file = answer[\"file\"]\n if not Settings.confirm(file.get_path()): return None\n return file", "def logic(ghost):\n print_pause(\"Where do you want to go?\\n\", 2)\n choice = input(\n \"Choose on of the following numbers:\\n1.\" +\n \"Railway station\\n\" +\n \"2. church street\\n3. fort\\n\").lower()\n if choice not in \"1\" and choice not in \"2\" and choice not in \"3\":\n print_pause(\"\\nSorry I don't understand!\", 0)\n print_pause(\"Please repeat the input!\\n\", 0)\n logic(ghost)\n choice1(choice, ghost)\n choice2(choice, ghost)\n choice3(choice, ghost)", "def get_folder():\n return input(\"Folder: \")", "def choose(inp):\n if not inp.text:\n return lex.input.missing\n options = [i.strip() for i in inp.text.split(',') if i.strip()]\n if not options:\n return lex.input.incorrect\n return random.choice(options)", "def get_input(self):\n option = input(\"Enter the number of your choice: \")\n return option", "def Infor_file():\n \n import os\n import sys\n pdbfile_list()\n\n file_list = pdbfile_list()\n\n items = os.listdir('/home/njesh/python-mini-project-JaneNjeri/PDB_files/')\n \n file_list = [name for name in items if name.endswith('.pdb')]\n\n for count, fileName in enumerate(file_list, 0):\n sys.stdout.write(\"[%d] %s\\n\\r\" % (count, fileName))\n\n choice = int(input(\"Select pdb file[0-%s]: \" % count))\n print(file_list[choice])\n \n return choice\n choice = infile\n Infor_menu()\n \n Mainmenu()", "def menu_choice():\r\n choice = ' '\r\n while choice not in ['l', 'a', 'i', 'd', 's', 'x']:\r\n choice = input('Which operation wouild you like to perform? [l, a, i, d, s or x]: ').lower().strip()\r\n print() # Add extra space for layout\r\n return choice", "def question():\n print('Enter 1 to search database by habitat with detailed information\\nEnter 2 to search database by coordinates \\nEnter 3 to search by habitat in csv file for a quick overview without detail')\n print('habitat search options so far:\\n Alpenvorland, Niederrheinisches Tiefland, Oberrheinisches Tiefland')\n src = int(input('Enter here:'))\n\n if src == 1:\n habitat = input('Enter name of habitat\\n')\n query = \"habitat = '\" + habitat + \"'\"\n search_db_via_query(query)\n elif src == 2:\n search_by_coordinates()\n elif src == 3:\n search_by_habitat()\n else:\n print('no data')", "def prompt():\r\n inpt = -1\r\n valid_choices = ['1','2','3','4','5']\r\n while inpt not in valid_choices:\r\n inpt = input(\"\\nPlease select the number of the operation you wish \"\r\n \"to complete:\\n\" +\r\n \"1. Run file mover\\n2. Add directories\"\r\n \"\\n3. Remove directory\\n4. View saved directories\\n5. Quit\\n\").strip()\r\n if inpt not in valid_choices:\r\n print(\"\\n*** Invalid choice ***\")\r\n return inpt", "def execute_factory_menu(cls):\n print(\"Item Loader\")\n print(\"-----------\")\n print(\"What kind of items would you like to load?\")\n print(\"1. Manga\")\n print(\"2. Games\")\n print(\"3. Movies\")\n user_choice = int(input(\"Enter your choice (1-3):\"))\n factory_type = cls.factory_map[user_choice]\n\n file_name = input(\"\\nEnter file name of the excel file to load from:\")\n return factory_type(file_name)", "def inputChoice(self, question, options, hotkeys, default=None):\n options = options[:] # we don't want to edit the passed parameter\n for i in range(len(options)):\n option = options[i]\n hotkey = hotkeys[i]\n # try to mark a part of the option name as the hotkey\n m = re.search('[%s%s]' % (hotkey.lower(), hotkey.upper()), option)\n if hotkey == default:\n caseHotkey = hotkey.upper()\n else:\n caseHotkey = hotkey\n if m:\n pos = m.start()\n options[i] = '%s[%s]%s' % (option[:pos], caseHotkey,\n option[pos+1:])\n else:\n options[i] = '%s [%s]' % (option, caseHotkey)\n # loop until the user entered a valid choice\n while True:\n prompt = '%s (%s)' % (question, ', '.join(options))\n answer = self.input(prompt)\n if answer.lower() in hotkeys or answer.upper() in hotkeys:\n return answer\n elif default and answer=='': # empty string entered\n return default", "def transfer_menu():\n print(\"What type of transfer do you want to use?\")\n for key in sorted(TRANSFER_MENU_SELECTIONS):\n print(\"[%s] %s\" % (key, TRANSFER_MENU_SELECTIONS[key]))\n choice = raw_input(\"> \")\n while choice not in list(TRANSFER_MENU_SELECTIONS.keys()):\n choice = raw_input(\"> \")\n return choice", "def select_input(cls):\n #Change current working directory to root sdk directory\n Utility.pushd(Settings.rootSdkPath)\n print('Would you like to insert release notes?')\n print('0) Cancel')\n print('1) Insert directly from command line')\n print('2) Insert from a file')\n\n cls.init()\n\n inputValue = 0\n inputNote = False\n try:\n try: input = raw_input\n except NameError: pass\n inputValue = input('Select option: ')\n inputValue = int(inputValue)\n except SyntaxError:\n inputValue = 0\n if inputValue is 0:\n cls.logger.warning('Failed to create a release note')\n return False\n elif inputValue is 1:\n inputNote = cls.cmd_note()\n elif inputValue is 2:\n inputNote = cls.select_file()\n if inputNote is not False:\n mode = 'r+' if os.path.exists(Settings.releaseNotePath) else 'w+'\n with open(Settings.releaseNotePath, mode) as release_notes:\n oldContent = release_notes.read()\n release_notes.seek(0, 0)\n release_notes.write(inputNote.rstrip('\\r\\n') + '\\n' + oldContent)\n cls.logger.info('Successfuly created release note')\n # return to the base directory\n Utility.popd()\n return inputNote", "def switch(self, an_input: str):\n\n # Empty command\n if not an_input:\n print(\"\")\n return None\n\n (known_args, other_args) = self.etf_parser.parse_known_args(an_input.split())\n\n # Help menu again\n if known_args.cmd == \"?\":\n self.print_help()\n return None\n\n # Clear screen\n if known_args.cmd == \"cls\":\n os.system(\"cls||clear\")\n return None\n\n return getattr(\n self, \"call_\" + known_args.cmd, lambda: \"Command not recognized!\"\n )(other_args)", "def ask_for_sync_path():\n\n # Enable auto-completion of path and cursor movement using the readline and glob modules\n def path_completer(text, state):\n if u\"~\" in text:\n text = text.replace(u\"~\", os.path.expanduser(u\"~\"))\n\n paths = glob.glob(u\"%s*\" % text)\n paths.append(False)\n\n return os.path.abspath(paths[state]) + u'/'\n\n if unix:\n readline.set_completer_delims(u' \\t\\n;')\n readline.parse_and_bind(u\"tab: complete\")\n readline.set_completer(path_completer)\n\n found = False\n # Keep asking until a valid path has been entered by the user\n while not found:\n sync_path = input(u\"\\nEnter a relative or absolute path to sync to (~/Desktop/Canvas etc.):\\n$ \")\n\n # Expand tilde if present in the sync_path\n if u\"~\" in sync_path:\n sync_path = sync_path.replace(u\"~\", os.path.expanduser(u\"~\"))\n sync_path = os.path.abspath(sync_path)\n\n if not os.path.exists(os.path.split(sync_path)[0]):\n print(u\"\\n[ERROR] Base path '%s' does not exist.\" % os.path.split(sync_path)[0])\n else:\n found = True\n\n if unix:\n # Disable path auto-completer\n readline.parse_and_bind(u'set disable-completion on')\n\n return sync_path", "def get_filename_as_agrv_if_no_ask(prompt):\n Found = False\n ln = len(sys.argv)\n while not Found:\n if ln < 2:\n file = input( prompt)\n else:\n file = sys.argv[1]\n try:\n RFH = open(file)\n Found = True\n except FileNotFoundError:\n print(\"%%Error! File not found!\")\n ln = 1\n# break\n return RFH", "def get_user_choice():\n while True:\n direction = input(\"Please enter a direction as displayed above(type quit to exit): \").strip().upper()\n possible_directions = [\"N\", \"S\", \"W\", \"E\", \"QUIT\"]\n if direction not in possible_directions:\n print(\"Please enter only directions N, S, W, or E\")\n else:\n if direction == \"QUIT\":\n return \"quit\"\n return direction", "def display():\r\n name = input(\"Enter the filename:\\n\")\r\n if name==\"42.txt\":\r\n print(f42)\r\n elif name == \"1015.txt\":\r\n print(f1015)\r\n else:\r\n print(\"File not found\")", "def do_select(self, line):\n # Available data sources\n options = \"-csv\", \"-db\"\n args = list (arg.lower () for arg in str (line).split ())\n\n try:\n # Check if the input data source is available in this program or not\n if args[0] not in options:\n raise ValueError (\"The data resource is not available.\")\n else:\n # Code for initialise CSV data source\n if args[0] == \"-csv\":\n try:\n if len (args) == 1:\n self._shw.select_source (args[0][1:], \"employeeinfo.csv\")\n View.warning (\n \"No CSV file path specified. A default file \\\"employeeinfo.csv\\\" will be used.\")\n elif len (args) == 2:\n self._shw.select_source (args[0][1:], args[1])\n elif len (args) == 3:\n if args[1] == \"-a\":\n self._shw.select_source (args[0][1:], args[2], True)\n except (CSVError, OSError) as e:\n View.error (e)\n except Exception as e:\n View.error (e)\n else:\n View.success (\"Data source CSV is selected.\")\n\n # Code for initialise database source\n elif args[0] == \"-db\":\n try:\n self._shw.select_source (args[0][1:])\n except (ConnectionError, TypeError) as e:\n View.error (e)\n except Exception as e:\n View.error (e)\n else:\n View.success (\"Data source Database is selected.\")\n\n # Code for initialise XXXX data source\n else:\n pass\n # Catch and display error message\n except ValueError as e:\n View.error (str (e) + \"\\n\")\n View.help_select ()\n except Exception as e:\n View.error (e)", "def read_path():\n global path\n if len(sys.argv) >= 2:\n path = sys.argv[1]\n else:\n path = \"train\"", "def select_file_upload_method():\n\n if not Settings.prompt(\"upload files\"): \n return \"unset\"\n Settings.print(\"Select an upload source\")\n sources = Settings.get_source_options()\n question = {\n 'type': 'list',\n 'name': 'upload',\n 'message': 'Upload:',\n 'choices': [src.title() for src in sources]\n }\n upload = PyInquirer.prompt(question)[\"upload\"]\n\n\n # everything after this part should be in another function\n # this should just return the string of the upload source\n\n\n if str(upload) == \"Local\":\n return File.select_files()\n elif str(upload) == \"Google\":\n return Google_File.select_files()\n # elif str(upload) == \"Dropbox\":\n # return Dropbox.select_files()\n elif str(upload) == \"Remote\":\n return Remote.select_files()\n return File.select_files()", "def get_input(prompt, default=None, choices=None, option_value=None):\r\n if option_value is not None:\r\n return option_value\r\n \r\n choices = choices or []\r\n while 1:\r\n r = raw_input(prompt+' ').strip()\r\n if not r and default is not None:\r\n return default\r\n if choices:\r\n if r not in choices:\r\n r = None\r\n else:\r\n break\r\n else:\r\n break\r\n return r" ]
[ "0.6550537", "0.62204075", "0.61774844", "0.61713785", "0.6165053", "0.61213124", "0.6097766", "0.6087929", "0.60319537", "0.60028327", "0.59654415", "0.5920424", "0.59185994", "0.59036005", "0.5848893", "0.5798395", "0.5788869", "0.5716511", "0.5700164", "0.56988907", "0.5683611", "0.5629046", "0.5623778", "0.561621", "0.5597088", "0.5579222", "0.5576965", "0.5563234", "0.55550146", "0.5546257", "0.5540855", "0.5540855", "0.5521901", "0.55179477", "0.5511205", "0.5508837", "0.5503383", "0.55026823", "0.5491941", "0.54876757", "0.54766804", "0.5467942", "0.54505295", "0.54461294", "0.54319924", "0.54274917", "0.54249835", "0.5408128", "0.53945553", "0.5387609", "0.5387016", "0.53833956", "0.5357898", "0.535224", "0.5348514", "0.5338532", "0.5337121", "0.5331009", "0.5311084", "0.5310432", "0.53049046", "0.5299616", "0.5294007", "0.5290523", "0.5287872", "0.5287735", "0.5286779", "0.5266507", "0.5266069", "0.526603", "0.52619374", "0.5260811", "0.5258593", "0.525722", "0.5254329", "0.5253917", "0.5249332", "0.5247394", "0.52395064", "0.5236354", "0.5230498", "0.52281386", "0.52181333", "0.52060854", "0.5197987", "0.5195198", "0.5194893", "0.5193321", "0.51930386", "0.518885", "0.5185377", "0.5182879", "0.5180011", "0.5176222", "0.5175351", "0.5161326", "0.51594085", "0.51575845", "0.51531774", "0.5152769", "0.5149774" ]
0.0
-1
Run all dispatch tests
def dispatch(): suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(AmazonTestCase, 'test_dispatch')) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def runTests(self):\n \n pass", "def doAllTests(self):\n # Initial offset\n self.getAlertsFile()\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n # Do all tests\n # As the socket is not persistent, client side attacks have to be done before all tests\n for module in self.modules:\n # Test is performed only if selected in config.cfg\n if self.config.get('TESTS', module[1]) == '1':\n print \"\\n%s\\n------------\" % module[0].upper()\n if module[1]=='clientSideAttacks':\n self.doClientSideAttacksTest( clientSideAttacks.ClientSideAttacks(self._target).getPayloads() )\n# elif module[1]=='multipleFailedLogins':\n# self.doMultipleFailedLoginsTest( multipleFailedLogins.MultipleFailedLogins(self._target).getPayloads() )\n else:\n self.doTest( module[1], eval( ('%s.%s'+'(self._target,self._cnf).getPayloads()') % (module[1], module[1][:1].upper()+module[1][1:]) ) )\n\n # Done!\n print \"\\n\\n-----------------------\"\n print \"DONE. Check the report.\"\n print \"-----------------------\\n\"", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def dispatch():\n suite = ServiceTestSuite()\n suite.addTest(unittest.makeSuite(Test, 'test_dispatch'))\n return suite", "def main():\n run_test_all()", "def run_tests(self):\n raise NotImplementedError", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def run_tests(self):\n\n # log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n # test methods start here\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n\n # dummy_method\n self.dummy_method()\n\n # ------------------------------------------------------------------\n # ------------------------------------------------------------------\n # test methods end here\n\n # log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def tests():", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def run_code(self, test):\n for action in test:\n self.assertEquals(1, len(action))\n action_type, action = list(action.items())[0]\n\n if hasattr(self, \"run_\" + action_type):\n getattr(self, \"run_\" + action_type)(action)\n else:\n raise InvalidActionType(action_type)", "def runalltests():\n doctest.testmod()", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "async def run(self):\n print(\"\".join((\"-\" * 8, type(self).__name__, \"-\" * 8)))\n for method_name in dir(self):\n if not method_name.startswith(\"test\"):\n continue\n print(method_name, end=\"... \")\n try:\n await getattr(self, method_name)()\n except AssertionError:\n print(\"FAIL\")\n traceback.print_exception(*sys.exc_info())\n except Exception: # pylint: disable=broad-except\n print(\"ERROR\")\n traceback.print_exception(*sys.exc_info())\n else:\n print(\"PASS\")\n print()", "def runtest(self):", "def collectTests(self, global_ctx):\n pass", "def __main() :\n launchTests()", "def run(self) -> None:\n self.test_sanity()\n if self.has_errors():\n return\n\n tests: List[Callable[[], None]] = [\n self.test_headlines_predefined,\n self.test_headlines_required,\n self.test_headlines_dependencies,\n self.test_headlines_order,\n self.test_headlines_named_entities,\n self.test_named_entities,\n self.test_reading_attributes,\n self.test_forbidden_words,\n self.test_unwanted_words,\n self.test_police_abbreviations,\n self.test_spelling,\n self.test_grammar_rules_regex,\n ]\n\n for test in tests:\n if self.stop_on_error and self.has_errors():\n break\n test()", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def actionRunUnitTests():\n UnitTestRunner.init()\n \n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_BUILD, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Running unit tests for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration, ColoredFormatter.YELLOW)\n result = UnitTestRunner.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_RUN_UNITTESTS, target, platform, cpu, configuration, result, UnitTestRunner.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed to execute unit tests!')\n else:\n Logger.printEndActionMessage('Executed all unit tests')", "def run_tests(self):\n with self.report.timer.record(\"run\"):\n self.result.report.extend(self._run_tests())", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def test_methods(self):\n\n #log\n self.logger.debug('\\n\\nExecute test methods:\\n-----------------------------')\n\n\n \n #test methods here\n #------------------------------------------------------------------\n\n #dummy_method\n self.dummy_method()\n\n #stylesheet_test\n #self.stylesheet_test(self.wdgt_explanation)\n\n #------------------------------------------------------------------\n\n\n\n #log\n self.logger.debug('\\n\\n-----------------------------\\nFinished test methods.')", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def run_tests(tests):\n return [test(t) for t in tests]", "def dispatch():\n app.run()", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def __run_test_methods(self):\n for test_method in self.runnable_test_methods():\n\n result = TestResult(test_method)\n test_method.im_self.test_result = result\n\n try:\n self._method_level = True # Flag that we're currently running method-level stuff (rather than class-level)\n\n # run \"on-run\" callbacks. eg/ print out the test method name\n for callback in self.__callbacks[self.EVENT_ON_RUN_TEST_METHOD]:\n callback(result.to_dict())\n result.start()\n\n if self.__class_level_failure:\n result.end_in_failure(self.__class_level_failure)\n elif self.__class_level_error:\n result.end_in_error(self.__class_level_error)\n else:\n # first, run setup fixtures\n self._stage = self.STAGE_SETUP\n def _setup_block():\n for fixture_method in self.setup_fixtures + [ self.setUp ]:\n fixture_method()\n self.__execute_block_recording_exceptions(_setup_block, result)\n\n def _run_test_block():\n # then run the test method itself, assuming setup was successful\n self._stage = self.STAGE_TEST_METHOD\n if not result.complete:\n self.__execute_block_recording_exceptions(test_method, result)\n\n def _setup_teardown_block():\n self.__enter_context_managers(self.setup_teardown_fixtures, _run_test_block)\n\n # then run any setup_teardown fixtures, assuming setup was successful.\n if not result.complete:\n self.__execute_block_recording_exceptions(_setup_teardown_block, result)\n\n # finally, run the teardown phase\n self._stage = self.STAGE_TEARDOWN\n def _teardown_block():\n for fixture_method in [ self.tearDown ] + self.teardown_fixtures:\n fixture_method()\n self.__execute_block_recording_exceptions(_teardown_block, result)\n\n # if nothing's gone wrong, it's not about to start\n if not result.complete:\n result.end_in_success()\n except (KeyboardInterrupt, SystemExit):\n result.end_in_interruption(sys.exc_info())\n raise\n finally:\n for callback in self.__callbacks[self.EVENT_ON_COMPLETE_TEST_METHOD]:\n callback(result.to_dict())\n\n self._method_level = False\n\n if not result.success:\n self.failure_count += 1\n if self.failure_limit and self.failure_count >= self.failure_limit:\n return", "def test_standard_tap_tests():\n tests = get_standard_tap_tests(TapPartoo, config=SAMPLE_CONFIG)\n for test in tests:\n test()", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def run_all_unit_tests():\n original = verify.parse_content\n try:\n verify.parse_content = parse_string_in_scope\n\n test_list_of()\n\n test_activity_multiple_choice()\n test_activity_free_text()\n test_activity_multiple_choice_group()\n test_activity_ast()\n\n test_assessment()\n test_assessment_ast()\n\n # test existing verifier using parsing instead of exec/compile\n verify.test_sample_assets()\n finally:\n verify.parse_content = original", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def runall():\n sclogic.runall()", "def unittest():\n from a6test import test_all\n test_all()", "def test_catchall():\n\n dispatcher = ntelebot.dispatch.Dispatcher()\n dispatcher.add(lambda ctx: 'DISPATCHED')\n ctx = MockContext()\n assert dispatcher(ctx) == 'DISPATCHED'", "def spec_tests():\n pass", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def _do_test(self):\n\n process_all_events()\n\n if self.list:\n (callback, args, kwargs) = self.list.pop(0)\n callback(*args, **kwargs)\n else:\n safe_exit(force=1)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_run_all_some_rule_triggered(self, *args):\n rule1 = {\n 'conditions': 'condition1',\n 'actions': 'action name 1'\n }\n rule2 = {\n 'conditions': 'condition2',\n 'actions': 'action name 2'\n }\n variables = BaseVariables()\n actions = BaseActions()\n\n def return_action1(rule, *args, **kwargs):\n return rule['actions'] == 'action name 1'\n\n engine.run.side_effect = return_action1\n\n result = engine.run_all([rule1, rule2], variables, actions)\n self.assertTrue(result)\n self.assertEqual(engine.run.call_count, 2)\n\n # switch order and try again\n engine.run.reset_mock()\n\n result = engine.run_all([rule2, rule1], variables, actions)\n self.assertTrue(result)\n self.assertEqual(engine.run.call_count, 2)", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_dispatch(self):\n disp = Dispatcher()\n args = (1, 2)\n res = disp.dispatch(\"working\", *args)\n self.assertEqual(res, args)", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def run_suite(*test_classes):\n\n loader = unittest.TestLoader()\n suite = unittest.TestSuite()\n for test_class in test_classes:\n tests = loader.loadTestsFromTestCase(test_class)\n suite.addTests(tests)\n if suite is not None:\n unittest.TextTestRunner(verbosity=2).run(suite)\n return", "def run_test_suites(self, suites):\n for suite_class in suites:\n test_suite = suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def run_combined(self):\n self.runtest_autokey()\n self.runtest_mediaresource()\n self.runtest_composite_slug()\n self.runtest_all_types()\n self.runtest_complex_types()\n self.runtest_only_key()\n self.runtest_compound_key()\n self.runtest_simple_select()\n self.runtest_paging()\n self.runtest_nav_o2o()\n self.runtest_nav_o2o_1()\n self.runtest_nav_zo2o()\n self.runtest_nav_zo2o_f()\n self.runtest_nav_zo2o_b()\n self.runtest_nav_many2o()\n self.runtest_nav_many2o_f()\n self.runtest_nav_many2o_b()\n self.runtest_nav_many2zo()\n self.runtest_nav_many2zo_f()\n self.runtest_nav_many2zo_b()\n self.runtest_nav_many2zo_r()\n self.runtest_nav_many2zo_rf()\n self.runtest_nav_many2zo_rb()\n self.runtest_nav_many2many()\n self.runtest_nav_many2many_1()\n self.runtest_nav_many2many_r()\n self.runtest_nav_many2many_r1()", "def startTestRun(self):", "def run(self):\n list_test_scenarios = self.__get_list_scenarios_in_folder()\n\n if not list_test_scenarios:\n utils.print_error(\n \"\\n{}\\n\".format(constant.ERR_CANNOT_FIND_ANY_TEST_SCENARIOS))\n exit(1)\n\n (tests_pass, tests_fail) = self.__execute_tests(list_test_scenarios)\n\n complete_message = constant.INFO_TEST_PASS_FAIL.format(\n tests_pass, tests_fail)\n\n print(complete_message)\n\n self.__execute_reporter()", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def test_actions(self, actions):\n try:\n for action in actions:\n self.get_action(action['type'])(**action)\n except Exception as e:\n print('Exception: {}'.format(str(e)))", "def run(self):\n\n self.__run_class_setup_fixtures()\n self.__enter_context_managers(self.class_setup_teardown_fixtures, self.__run_test_methods)\n self.__run_class_teardown_fixtures()", "def _run_local_tests(self, *args, **kwargs):\n pass", "def test():\n test_app()\n test_pagebrowser()", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def run_tests(self):\n\n self.endurance_results = []\n self._mozmill.add_listener(self.endurance_event, eventType='mozmill.enduranceResults')\n self._mozmill.persisted['endurance'] = {'delay': self.delay,\n 'iterations': self.options.iterations,\n 'entities': self.options.entities,\n 'restart': self.options.restart}\n\n self.manifest_path = os.path.join('tests', 'endurance')\n if not self.options.reserved:\n self.manifest_path = os.path.join(self.manifest_path,\n \"manifest.ini\")\n else:\n self.manifest_path = os.path.join(self.manifest_path,\n 'reserved',\n self.options.reserved + \".ini\")\n TestRun.run_tests(self)", "def run_tests(self, cov, functionsToRun): # pragma: nested\n print(\"runed cases\")\n for context in functionsToRun:\n #print(context)\n info = context.split(\".\")\n suite_name =info[0]\n #print(suite_name)\n className = info[1]\n caseName = info[2]\n cov.start()\n suite = import_local_file(suite_name)\n #print(dir(suite))\n try:\n # Call all functions in this module\n for name in dir(suite):\n variable = getattr(suite, name)\n #print(\"variable.__name__\")\n #print(variable.__name__)\n if inspect.isclass(variable) and variable.__name__== className:\n obj = variable()\n \n memberNames = inspect.getmembers(variable,inspect.isfunction)\n \n for member in memberNames:\n if member[0].startswith('test_') and member[0] == caseName:\n \n print(context)\n getattr(obj, member[0])()\n #if inspect.isfunction(variable):\n # variable()\n finally:\n cov.stop()", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def test_basic_execution(self):", "def run_test(self):\n raise NotImplementedError", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_all_circuit_types(self):\n for circuit_type in self.circuits:\n\n # Create a subTest for each type of circuit\n with self.subTest(circuit_type=circuit_type):\n self.check_circuit_type(circuit_type)", "def execute_tests():\n\n if len(sys.argv) > 1:\n # Filter test list based on command line requests\n tests_to_run = []\n for requested in sys.argv[1:]:\n for func, param in registered_tests:\n if param == requested:\n tests_to_run += [(func, param)]\n break\n else:\n print('Unknown test ' + requested)\n sys.exit(1)\n else:\n tests_to_run = registered_tests\n\n failing_tests = []\n for func, param in tests_to_run:\n print(param + (' ' * (OUTPUT_ALIGN - len(param))), end='')\n sys.stdout.flush()\n try:\n func(param)\n print(COLOR_GREEN + 'PASS' + COLOR_NONE)\n except KeyboardInterrupt:\n sys.exit(1)\n except TestException as exc:\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, exc.args[0])]\n except Exception as exc: # pylint: disable=W0703\n print(COLOR_RED + 'FAIL' + COLOR_NONE)\n failing_tests += [(param, 'Test threw exception:\\n' +\n traceback.format_exc())]\n\n if failing_tests:\n print('Failing tests:')\n for name, output in failing_tests:\n print(name)\n print(output)\n\n print(str(len(failing_tests)) + '/' +\n str(len(tests_to_run)) + ' tests failed')\n if failing_tests != []:\n sys.exit(1)", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def main():\n test_merge_quick_sort()\n test_compare()", "def test_dispatch_all0(self):\n req1 = FakeRequest(1, False)\n req2 = FakeRequest(2, False)\n req3 = FakeRequest(3, False)\n req4 = FakeRequest(4, False)\n req5 = FakeRequest(5, False)\n\n self.request_buffer.append(req1)\n self.request_buffer.append(req2)\n self.request_buffer.append(req3)\n self.request_buffer.append(req4)\n self.request_buffer.append(req5)\n\n self.request_buffer.dispatch_all()\n\n self.assertEqual(\n [True]*5,\n [req.dispatched for req in self.request_buffer.requests]\n )", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def run_tests(self):\n\n self.test_report = []\n\n #dict of unsorted lists\n dict_of_un_lists = self.dict_un_lists_intersection_test(self.data_dict)\n self.test_report.append(dict_of_un_lists)\n\n #dict of sets\n dict_of_sets = self.build_dict_of_sets(self.data_dict)\n self.test_report.append(self.dict_sets_intersection_test(dict_of_sets))\n\n #pandas - experimental and probably not the way to use pandas\n # dict_of_pandas = self.build_dict_of_panda_series(self.data_dict)\n # self.test_report.append(self.dicts_any_intersection_node_test(dict_of_pandas))\n\n # print results\n\n if self.verbose:\n self.print_tests_results()", "def do_test():\n for x in execute_helper(test_info,crossmap_tests):\n yield x", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run(self, args):\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)", "def test_dispatch(self):\r\n self.hit = False\r\n\r\n def handler(event):\r\n self.hit = True\r\n\r\n self.events.register(handler, TestEvent)\r\n \r\n self.events.dispatch(TestEvent())\r\n\r\n self.assertTrue(self.hit)", "def init(self):\n \n self._nc_session = TestBedTests.TBNetconfSession(self.log, self.loop)\n self._nc_proxy = TestBedTests.TBNetconfProxy(self._nc_session, UtCompositeYang, self.log)\n self._netconf_test_objects = []\n self._pbreq_test_objects = []\n\n for cls in NETCONF_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._netconf_test_objects.append(obj)\n\n for cls in PBREQ_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._pbreq_test_objects.append(obj)\n\n @asyncio.coroutine\n def run_all_tests(xact_info, action, ks_path, msg):\n ro1 = yield from self.run_tests(self._netconf_test_objects, msg.continue_on_failure)\n if ro1.failed_count is 0 or msg.continue_on_failure is True:\n ro2 = yield from self.run_tests(self._pbreq_test_objects, msg.continue_on_failure)\n\n ro = RwAgentTestbedYang.AgentTestsOp()\n ro.total_tests = ro1.total_tests + ro2.total_tests\n ro.passed_count = ro1.passed_count + ro2.passed_count\n ro.failed_count = ro1.failed_count + ro2.failed_count\n #ro.failed_tests = ro1.failed_tests + ro2.failed_tests\n\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_netconf_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._netconf_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_pbreqs_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._pbreq_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n \n # Register for all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_tests))\n\n # Register for per category all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:netconf-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_netconf_tests))\n\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:pb-request-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_pbreqs_tests))", "def CASE10( self, main ):\n import time\n from tests.CHOTestMonkey.dependencies.events.Event import EventType\n from tests.CHOTestMonkey.dependencies.EventScheduler import EventScheduleMethod\n\n main.log.report( \"Run all enabled checks\" )\n main.log.report( \"__________________________________________________\" )\n main.case( \"Run all enabled checks\" )\n main.step( \"Run all enabled checks\" )\n main.caseResult = main.TRUE\n main.eventGenerator.triggerEvent( EventType().CHECK_ALL, EventScheduleMethod().RUN_BLOCK )\n # Wait for the scheduler to become idle before going to the next testcase\n with main.eventScheduler.idleCondition:\n while not main.eventScheduler.isIdle():\n main.eventScheduler.idleCondition.wait()\n utilities.assert_equals( expect=main.TRUE,\n actual=main.caseResult,\n onpass=\"All enabled checks passed\",\n onfail=\"Not all enabled checks passed\" )\n time.sleep( main.caseSleep )", "def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()", "def test_after_jam_step_two(self):\n for test_suite_class in self.jam_step_2_test_suite_list:\n test_suite = test_suite_class(self)\n results = test_suite.run()\n self.test_results += results", "def run_single_test(self, config):\n path_name = config['path_name']\n for request in config['request']:\n with self.subTest(request=request, test_name=config['test_name']):\n if 'args' in request:\n url = reverse(path_name, kwargs=request['args'])\n else:\n url = reverse(path_name)\n\n query_params = None\n if 'query_params' in request:\n query_params = urlencode(request['query_params'])\n url = '{}?{}'.format(url, query_params)\n\n data = None\n data_format = 'json'\n if 'data' in request:\n data = request['data']\n\n if 'data_format' in request:\n data_format = request['data_format']\n\n response_check = None\n if 'response_check' in request:\n response_check = request['response_check']\n\n self.call_api(\n url,\n data,\n self.tokens[request['user']],\n request['status'],\n config['type'],\n data_format=data_format,\n response_check=response_check)" ]
[ "0.7445618", "0.72676694", "0.72357136", "0.7137783", "0.69886786", "0.69496655", "0.69481313", "0.6913679", "0.6835688", "0.6833541", "0.68203133", "0.68139446", "0.6779515", "0.67011243", "0.6626831", "0.66243935", "0.6614983", "0.65842265", "0.6561729", "0.6560582", "0.6522628", "0.6441573", "0.6411267", "0.6393447", "0.6377413", "0.6375661", "0.635919", "0.6357565", "0.6333101", "0.6329656", "0.6311414", "0.62818956", "0.6278038", "0.6248356", "0.62319", "0.6221192", "0.6200442", "0.6184826", "0.6169045", "0.6164405", "0.61500186", "0.614703", "0.61333734", "0.613047", "0.6121119", "0.61183864", "0.6113423", "0.6111994", "0.6102002", "0.6094369", "0.60917616", "0.60893476", "0.608665", "0.60725355", "0.60693014", "0.60576034", "0.60570395", "0.6048917", "0.60480064", "0.6047662", "0.6036021", "0.6026652", "0.60247463", "0.6018374", "0.6010244", "0.6003191", "0.59893966", "0.5970744", "0.596313", "0.5961926", "0.59518117", "0.59466994", "0.59456545", "0.5942728", "0.5928756", "0.5928756", "0.5921319", "0.5914724", "0.59079313", "0.5892017", "0.5892017", "0.5892017", "0.5891779", "0.5876384", "0.5874596", "0.5873433", "0.5865875", "0.58601546", "0.5856308", "0.5856233", "0.58452326", "0.5844174", "0.5844174", "0.5843423", "0.5841481", "0.5830591", "0.58252394", "0.5821749", "0.5819941", "0.5819165" ]
0.6487395
21
Run all local tests
def local(): suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(AmazonTestCase, 'test_local')) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _run_local_tests(self, *args, **kwargs):\n pass", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def main():\n run_test_all()", "def tests():\n api.local('nosetests')", "def runalltests():\n doctest.testmod()", "def run_test_suite():\n local('. fabric_factory/ve/bin/activate; fabric_factory/src/project/manage.py test')", "def runTests(self):\n \n pass", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'remote',\n 'manifest.ini')\n TestRun.run_tests(self)", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def local_test():\n pass", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'functional',\n 'manifest.ini')\n TestRun.run_tests(self)", "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests():\n testfiles = ['tests.test_overall']\n exclude = ['__init__.py', 'test_overall.py']\n for t in glob(pjoin('tests', '*.py')):\n if True not in [t.endswith(ex) for ex in exclude]:\n if basename(t).startswith('test_'):\n testfiles.append('tests.%s' % splitext(basename(t))[0])\n\n suites = []\n for file in testfiles:\n __import__(file)\n suites.append(sys.modules[file].suite)\n\n tests = unittest.TestSuite(suites)\n runner = unittest.TextTestRunner(verbosity=2)\n\n # Disable logging output\n logging.basicConfig(level=100)\n logging.disable(100)\n\n result = runner.run(tests)\n return result", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def tests():", "def test():\n for cmd in [\n \"pytest --verbose --cov pike/ --cov-report term --cov-report html tests/\",\n ]:\n _run_in_venv(shlex.split(cmd))\n for linter in [[\"black\", \"--check\"], [\"flake8\"], [\"isort\", \"--check\"]]:\n _run_in_venv(linter + TEST_FILES)\n\n _run_in_venv(\n [\"mypy\", \"pike/\", \"tests/\", \"setup.py\", \"pikefile.py\", \"--show-error-codes\"]\n )\n _run_in_venv([\"mypy\", \"examples/\"])\n _run_in_venv([\"bandit\", \"-r\", \"pike/\"])", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def runtests(ctx):\n run('pytest -s tests', pty=pty_available)\n run('flake8 --ignore E265,E266,E501 --exclude src, lib', pty=pty_available)", "def doAllTests(self):\n # Initial offset\n self.getAlertsFile()\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n # Do all tests\n # As the socket is not persistent, client side attacks have to be done before all tests\n for module in self.modules:\n # Test is performed only if selected in config.cfg\n if self.config.get('TESTS', module[1]) == '1':\n print \"\\n%s\\n------------\" % module[0].upper()\n if module[1]=='clientSideAttacks':\n self.doClientSideAttacksTest( clientSideAttacks.ClientSideAttacks(self._target).getPayloads() )\n# elif module[1]=='multipleFailedLogins':\n# self.doMultipleFailedLoginsTest( multipleFailedLogins.MultipleFailedLogins(self._target).getPayloads() )\n else:\n self.doTest( module[1], eval( ('%s.%s'+'(self._target,self._cnf).getPayloads()') % (module[1], module[1][:1].upper()+module[1][1:]) ) )\n\n # Done!\n print \"\\n\\n-----------------------\"\n print \"DONE. Check the report.\"\n print \"-----------------------\\n\"", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def __main() :\n launchTests()", "def run_tests(tests):\n return [test(t) for t in tests]", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'l10n',\n 'manifest.ini')\n TestRun.run_tests(self)", "def run_tests(self):\n raise NotImplementedError", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def collectTests(self, global_ctx):\n pass", "def run_tests():\n test_command = \"pytest -s \" + os.path.join(root_path, \"cases\", \"test_cases.py::TestCases::test_cases\") + \" --html=\" + os.path.join(root_path, \"reports\", \"qa_testing_report.html\")\n\n subprocess.run(test_command, shell=True)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def tests(context):\n black(context)\n isort(context)\n flake8(context)\n pylint(context)\n yamllint(context)\n pydocstyle(context)\n bandit(context)\n pytest(context)\n\n print(\"All tests have passed!\")", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def runtest(self):", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def unittest():\n from a6test import test_all\n test_all()", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runtests():\r\n\r\n app_abspath = os.path.dirname(os.path.dirname(__file__))\r\n models_abspath = os.path.join(app_abspath, 'models.py')\r\n models_exists = os.path.isfile(models_abspath)\r\n urls_abspath = os.path.join(app_abspath, 'urls.py')\r\n urls_exists = os.path.isfile(urls_abspath)\r\n views_abspath = os.path.join(app_abspath, 'views')\r\n views_exists = os.path.isdir(views_abspath)\r\n tpls_abspath = os.path.join(app_abspath, 'templates')\r\n tpls_exists = os.path.isdir(tpls_abspath)\r\n\r\n for f in [models_abspath, urls_abspath]:\r\n if os.path.isfile(f):\r\n subprocess.call('cp {} {}.orig'.format(f, f), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(views_abspath, views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('cp -r {} {}.orig'.format(tpls_abspath, tpls_abspath), shell=True)\r\n\r\n overwrite_project_language('ja')\r\n subprocess.call('python manage.py generatescaffold test_app I18nModel title:string', shell=True)\r\n time.sleep(1)\r\n overwrite_project_language('en-us')\r\n time.sleep(1)\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedNoTimestampModel title:string description:text --no-timestamps', shell=True)\r\n time.sleep(2) # Give time for Django's AppCache to clear\r\n\r\n subprocess.call('python manage.py generatescaffold test_app GeneratedModel title:string description:text', shell=True)\r\n\r\n test_status = subprocess.call('python manage.py test --with-selenium --with-selenium-fixtures --with-cherrypyliveserver --noinput', shell=True)\r\n\r\n if models_exists:\r\n subprocess.call('mv {}.orig {}'.format(models_abspath, models_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(models_abspath), shell=True)\r\n\r\n if urls_exists:\r\n subprocess.call('mv {}.orig {}'.format(urls_abspath, urls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm {}'.format(urls_abspath), shell=True)\r\n\r\n if views_exists:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(views_abspath, views_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(views_abspath), shell=True)\r\n\r\n if tpls_exists:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n subprocess.call('mv {}.orig {}'.format(tpls_abspath, tpls_abspath), shell=True)\r\n else:\r\n subprocess.call('rm -rf {}'.format(tpls_abspath), shell=True)\r\n\r\n subprocess.call('rm {}/*.pyc'.format(app_abspath), shell=True)\r\n\r\n sys.exit(test_status)", "def django_run_tests():\r\n \r\n settings_py, manage_py = _CDjangoPluginActivator._instance._FindKeyFiles()\r\n loc = location.CreateFromName(settings_py)\r\n cmd = 'run_test_files(locs=\"%s\")' % settings_py\r\n wingapi.gApplication.ExecuteCommand(cmd)", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests():\n argument_parser = ArgumentParser(description=\"Run all tests for {{project name}}\")\n #TODO add some configuration here\n\n settings.configure(**{\n \"DATABASE_ENGINE\" : \"django.db.backends.sqlite3\",\n \"DATABASE_NAME\" : \"sqlite://:memory:\",\n \"ROOT_URLCONF\" : \"tests.urls\",\n \"TEMPLATE_LOADERS\" : (\n \"django.template.loaders.filesystem.load_template_source\",\n \"django.template.loaders.app_directory.load_template_source\",\n ),\n \"TEMPLATE_DIRS\" : (\n path(__file__).dirname() / 'templates',\n ),\n \"INSTALLED_APPS\" : (\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.sites',\n '{{ project_name }},\n ),\n })\n call_command(\"test\")", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def main():\n fix_sys_path()\n result = unittest.TextTestRunner(verbosity=2).run(createTestSuite())\n\n if result.testsRun != EXPECTED_TEST_COUNT:\n raise Exception(\n 'Expected %s tests to be run, not %s.' % (EXPECTED_TEST_COUNT, result.testsRun))\n\n if len(result.errors) != 0 or len(result.failures) != 0:\n raise Exception(\n \"Functional test suite failed: %s errors, %s failures of %s tests run.\" % (\n len(result.errors), len(result.failures), result.testsRun))", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def do_test(self):\n\t\tshutit_global.shutit_global_object.yield_to_draw()\n\t\tif not self.build['dotest']:\n\t\t\tself.log('Tests configured off, not running',level=logging.DEBUG)\n\t\t\treturn\n\t\t# Test in reverse order\n\t\tself.log('PHASE: test', level=logging.DEBUG)\n\t\tself.stop_all()\n\t\tself.start_all()\n\t\tfor module_id in self.module_ids(rev=True):\n\t\t\t# Only test if it's installed.\n\t\t\tif self.is_installed(self.shutit_map[module_id]):\n\t\t\t\tself.log('RUNNING TEST ON: ' + module_id, level=logging.DEBUG)\n\t\t\t\tself.login(prompt_prefix=module_id,command=shutit_global.shutit_global_object.bash_startup_command,echo=False)\n\t\t\t\tif not self.shutit_map[module_id].test(self):\n\t\t\t\t\tself.fail(module_id + ' failed on test', shutit_pexpect_child=self.get_shutit_pexpect_session_from_id('target_child').pexpect_child) # pragma: no cover\n\t\t\t\tself.logout(echo=False)", "def run_tests(virtual_env):\n signal.signal(signal.SIGINT, signal.SIG_IGN)\n args = [\n 'python',\n 'setup.py',\n 'nosetests',\n '--with-coverage',\n '--with-xunit',\n ]\n subprocess.call(args, cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=abspath(join(HOLLAND_ROOT, 'holland-core')), env=virtual_env)\n for plugin_dir in open(join(HOLLAND_ROOT, 'plugins', 'ACTIVE')):\n plugin_dir = plugin_dir.rstrip()\n plugin_path = join(HOLLAND_ROOT, 'plugins', plugin_dir)\n subprocess.call(args, cwd=plugin_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n for addon_dir in open(join(HOLLAND_ROOT, 'addons', 'ACTIVE')):\n addon_dir = addon_dir.rstrip()\n addon_path = join(HOLLAND_ROOT, 'addons', addon_dir)\n subprocess.call(args, cwd=addon_path, env=virtual_env)\n subprocess.call(['coverage', 'xml'], cwd=plugin_path, env=virtual_env)\n #return subprocess.call(args, env=virtual_env)", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests = unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\r\n import unittest\r\n tests=unittest.TestLoader().discover('tests')\r\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_all_unit_tests():\n original = verify.parse_content\n try:\n verify.parse_content = parse_string_in_scope\n\n test_list_of()\n\n test_activity_multiple_choice()\n test_activity_free_text()\n test_activity_multiple_choice_group()\n test_activity_ast()\n\n test_assessment()\n test_assessment_ast()\n\n # test existing verifier using parsing instead of exec/compile\n verify.test_sample_assets()\n finally:\n verify.parse_content = original", "def test():\n loader = unittest.TestLoader()\n suite = loader.discover(os.path.dirname(__file__))\n runner = unittest.TextTestRunner()\n runner.run(suite)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n import unittest\n import tests\n tests = unittest.TestLoader().discover('tests', pattern='*tests.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def run_test_suite(*args):\n test_args = list(args) or []\n execute_from_command_line([\"manage.py\", \"test\"] + test_args)", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def test():\n import unittest\n tests = unittest \n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_main(): # pragma: no cover\n RunTestsCLI.run()", "def test_generate_all_testing(self):\n pass", "def test():\n with lcd(BASEDIR):\n local('virtenv/bin/coverage run runtests.py -v2')\n local('virtenv/bin/coverage report -m')", "def run_tests(self, test_labels):\n import pytest\n\n argv = []\n if self.verbosity == 0:\n argv.append('--quiet')\n if self.verbosity == 2:\n argv.append('--verbose')\n if self.verbosity == 3:\n argv.append('-vv')\n if self.failfast:\n argv.append('--exitfirst')\n if self.keepdb:\n argv.append('--reuse-db')\n\n argv.extend(test_labels)\n return pytest.main(argv)", "def test_all_envs(func):\n register_tests(func, [func.__name__ + '_emulator',\n func.__name__ + '_verilator'])", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def startTestRun(self):", "def test_lint(self):\n l = self.l\n l.loadTestsFromTestCase\n l.loadTestsFromModule\n l.loadTestsFromName\n l.loadTestsFromNames", "def test_script(self) -> None:\n main()", "def run_tests(session):\n set_environment_variables(PYBAMM_ENV, session=session)\n session.run_always(\"pip\", \"install\", \"-e\", \".[all]\")\n if sys.platform == \"linux\" or sys.platform == \"darwin\":\n session.run_always(\"pip\", \"install\", \"-e\", \".[odes]\")\n session.run_always(\"pip\", \"install\", \"-e\", \".[jax]\")\n session.run(\"python\", \"run-tests.py\", \"--all\")", "def test():\n tests = unittest.TestLoader().discover('project/tests', pattern='test*.py')\n result = unittest.TextTestRunner(verbosity=2).run(tests)\n if result.wasSuccessful():\n return 0\n sys.exit(result)", "def test(test_names):\n import unittest\n if test_names:\n tests = unittest.TestLoader().loadTestsFromNames(test_names)\n else:\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def execute_tests():\n import django\n\n sys.exc_clear()\n\n os.environ[\"DJANGO_SETTINGS_MODULE\"] = \"django.conf.global_settings\"\n from django.conf import global_settings\n\n global_settings.INSTALLED_APPS = ()\n global_settings.MIDDLEWARE_CLASSES = ()\n global_settings.SECRET_KEY = \"not-very-secret\"\n\n global_settings.DATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.sqlite3',\n }\n }\n\n # http://django.readthedocs.org/en/latest/releases/1.7.html#standalone-scripts\n if django.VERSION >= (1,7):\n django.setup()\n\n from django.test.utils import get_runner\n test_runner = get_runner(global_settings)\n\n test_runner = test_runner()\n failures = test_runner.run_tests(['s3cache'])\n sys.exit(failures)", "def runTest(self):\n self.setUp()\n self.test_modul1()" ]
[ "0.8297117", "0.7959936", "0.7950929", "0.7797158", "0.768714", "0.766804", "0.76199144", "0.75607073", "0.7456279", "0.744708", "0.7380929", "0.7380103", "0.72950643", "0.7281221", "0.727362", "0.7181298", "0.71580607", "0.7139827", "0.7125284", "0.70671266", "0.7064775", "0.7064144", "0.7061675", "0.70427084", "0.7038519", "0.70306754", "0.700941", "0.7003991", "0.69682187", "0.6947895", "0.6941255", "0.69383717", "0.69304764", "0.6918372", "0.6917669", "0.6915974", "0.68886817", "0.68886817", "0.68886817", "0.68653417", "0.68638426", "0.6857378", "0.6847217", "0.68454874", "0.6836972", "0.683234", "0.68306434", "0.6830312", "0.68217397", "0.67976826", "0.6779681", "0.6779171", "0.6775913", "0.6768906", "0.6766041", "0.6759414", "0.67546356", "0.6753306", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6749121", "0.6745705", "0.6733487", "0.67334163", "0.67334163", "0.67249024", "0.6722685", "0.67225426", "0.6721593", "0.67075443", "0.67040694", "0.6702716", "0.6687353", "0.6657964", "0.6657964", "0.66578114", "0.6655818", "0.66553026", "0.66426146", "0.6637389", "0.6626522", "0.6612849", "0.6580203", "0.6577367", "0.657586", "0.65680844", "0.655509", "0.65472496", "0.65414315", "0.6520224" ]
0.0
-1
Run all network tests
def net(): suite = ServiceTestSuite() suite.addTest(unittest.makeSuite(AmazonTestCase, 'test_net')) return suite
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def test_all():\n test_get_to()\n test_error_type()\n test_exchange()\n print(\"All tests passed.\")", "def run_all_tests():\n remove_dbs()\n run_training_tests()\n run_custom_training_tests()\n run_training_save_tests()\n run_validation_tests()\n run_feature_extraction_tests()", "def runTests(self):\n \n pass", "def runAll():\n\n loader = unittest.TestLoader()\n test_dir = pkg_resources.resource_filename('frvcpy.test','.')\n suite = loader.discover(test_dir)\n\n runner = unittest.TextTestRunner(verbosity=2)\n runner.run(suite)", "def main():\n run_test_all()", "def doAllTests(self):\n # Initial offset\n self.getAlertsFile()\n self.offset = self.getOffset(self.config.get('PATHS', 'tempfile'))\n\n # Do all tests\n # As the socket is not persistent, client side attacks have to be done before all tests\n for module in self.modules:\n # Test is performed only if selected in config.cfg\n if self.config.get('TESTS', module[1]) == '1':\n print \"\\n%s\\n------------\" % module[0].upper()\n if module[1]=='clientSideAttacks':\n self.doClientSideAttacksTest( clientSideAttacks.ClientSideAttacks(self._target).getPayloads() )\n# elif module[1]=='multipleFailedLogins':\n# self.doMultipleFailedLoginsTest( multipleFailedLogins.MultipleFailedLogins(self._target).getPayloads() )\n else:\n self.doTest( module[1], eval( ('%s.%s'+'(self._target,self._cnf).getPayloads()') % (module[1], module[1][:1].upper()+module[1][1:]) ) )\n\n # Done!\n print \"\\n\\n-----------------------\"\n print \"DONE. Check the report.\"\n print \"-----------------------\\n\"", "def tests():\n api.local('nosetests')", "def runalltests():\n doctest.testmod()", "def run_tests():\n \n test_constructor_positive()\n test_constructor_negative()\n test_game_move_positive()\n test_game_move_negative()\n test_game_move_edge()\n print(\"Congratulations ! You passed all the game test cases.\")", "def run_custom_training_tests():\n test_custom_training()\n test_custom_distributed_training()\n test_custom_multimodel_training()\n test_custom_distributed_multimodel_training()", "def test_run():\n # Only few steps for test\n timesteps = 128\n\n # Compute all sub testing conf\n envs = ['CartPole-v0']\n ml_platforms = ['torch', 'tf']\n agents = ['dqn', 'a2c']\n\n test_combinations = list(it.product(\n envs,\n ml_platforms,\n agents\n )\n )\n\n # Finally test them all\n for conf in test_combinations:\n env_str, ml_platform_str, agent_str = conf\n run(\n agent_str,\n ml_platform_str,\n env_str,\n 'dense',\n timesteps,\n './target/')", "def run_all_tests(self):\n for index in range(len(self.__test_set_list)):\n self.run_test(index)", "def test_get_networks(self):\n pass", "def test_core(c):\n def test_python(c, filename):\n \"\"\"Run a single python test file in tests/python.\"\"\"\n test_dir = 'tests/python'\n filepath = '{}/{}'.format(test_dir, filename)\n\n print('Restarting network before executing test {}'.format(filepath))\n rm_network(c)\n start_core(c)\n start_horizon(c)\n network(c)\n\n with c.cd(test_dir):\n print('Executing test {}'.format(filepath))\n c.run('pipenv run python {filename} \"{passphrase}\" {whitelist_seed}'.format(\n filename=filename,\n passphrase=PASSPHRASE,\n whitelist_seed=WHITELIST_SEED))\n\n test_python(c, 'test_base_reserve.py')\n test_python(c, 'test_tx_order_by_fee.py')\n test_python(c, 'test_tx_order_by_whitelist.py')\n test_python(c, 'test_tx_priority_for_whitelist_holder.py')\n test_python(c, 'test_whitelist_affected_on_next_ledger.py')\n\n # XXX obsolete\n # see source file for more information\n # test_python(c, 'test_multiple_cores.py')", "def main():\r\n test = TesterNeighbour()\r\n test.setUp()\r\n test.test_result_n()\r\n print(\"result_of_algorithm_test - passed\")", "def run_tests(self):\n raise NotImplementedError", "def tests():", "def main():\n test_network_connection()\n parser()", "def RunTest(self):\n self.TestLs()\n self.TestTerminate()\n self.TestMultipleProcesses()", "def main():\n test_runner = TestRunner(\n FLAGS.workspace, FLAGS.bench_home, imagenet_dir=FLAGS.train_data_dir)\n test_runner.run_tests(FLAGS.test_list.split(','))", "def run_all(self):\n failures, errors = [], []\n\n # Run each test case registered with us and agglomerate the results.\n for case_ in self.cases:\n case_.run()\n update_results(failures, errors, case_)\n\n # Display our results.\n print_errors(errors)\n print_failures(failures)\n print_overview(errors, failures)\n\n # Exit with 0 if all tests passed, >0 otherwise.\n sys.exit(len(failures) + len(errors))", "def run_all_unit_tests(cls):\n suites_list = []\n for test_class in cls.TESTS:\n suite = unittest.TestLoader().loadTestsFromTestCase(test_class)\n suites_list.append(suite)\n result = unittest.TextTestRunner().run(unittest.TestSuite(suites_list))\n if not result.wasSuccessful() or result.errors:\n raise Exception(result)", "def RunTestAll(ss):\n ss.StopNow = False\n ss.TestAll()\n ss.Stopped()", "def run_test_cases(self):\n count = 1\n for test_case in self.test_cases:\n print(\"Running test case #%d\" % count)\n if test_case.name == 'RouteDistance':\n distance = self.get_distance_for_route(test_case.args)\n print('%s distance: %s' % (test_case.args, distance))\n elif test_case.name == 'RouteShortest':\n args = test_case.args.split('|')\n shortest_distance = self.find_shortest_path_between_cities(args[0], args[1])\n print(\"Shortest distance between %s and %s: %d\" % (args[0], args[1], shortest_distance))\n elif test_case.name == 'RouteLessThanHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with hops less than or equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteEqualHops':\n args = test_case.args.split('|')\n paths = self.trips_hop_constraint_bfs(args[0], args[1], int(args[2]), equal=True)\n print('Paths between %s and %s with hops equal to %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n elif test_case.name == 'RouteLessThanDistance':\n args = test_case.args.split('|')\n paths = self.trips_distance_constraint_bfs(args[0], args[1], int(args[2]))\n print('Paths between %s and %s with distance less than %d: %d (%s)' % (\n args[0], args[1], int(args[2]), len(paths), paths\n ))\n else:\n raise Exception('Unknown test case: %s' % test_case.name)\n count += 1\n print()", "def run_all_tests(self) -> None:\n self.run_trt_precision_tests()\n logging.info(\"Check analysis result at: %s\", self._output_dir)", "def test_add_network(self):\n pass", "def runtest(self):", "def run(self):\n if self.all:\n cmd = self.apply_options(self.test_all_cmd)\n self.call_and_exit(cmd)\n else:\n cmds = (self.apply_options(self.unit_test_cmd, (\"coverage\",)),)\n if self.coverage:\n cmds += (self.apply_options(self.coverage_cmd),)\n self.call_in_sequence(cmds)", "def main(tests, root_uri, verbose):\n if verbose:\n rv_config.set_verbosity(verbosity=Verbosity.DEBUG)\n\n if len(tests) == 0:\n # no tests specified, so run all\n tests = list(ALL_TESTS.keys())\n else:\n # run all tests that start with the given string e.g \"chip\" will match\n # both \"chip_classification.basic\" and \"chip_classification.nochip\"\n _tests = []\n for t in tests:\n t = t.strip().lower()\n matching_tests = [k for k in ALL_TESTS.keys() if k.startswith(t)]\n _tests.extend(matching_tests)\n if len(matching_tests) == 0:\n console_error(\n f'{t} does not match any valid tests. Valid tests are: ')\n console_error(pformat(list(ALL_TESTS.keys())))\n continue\n tests = _tests\n\n console_info('The following tests will be run:')\n console_info(pformat(tests, compact=False))\n\n with get_tmp_dir() as tmp_dir:\n if root_uri:\n tmp_dir = root_uri\n\n num_failed = 0\n errors = {}\n for test_id in tests:\n test_cfg = ALL_TESTS[test_id]\n errors[test_id] = run_test(test_id, test_cfg, tmp_dir)\n if len(errors[test_id]) > 0:\n num_failed += 1\n\n for error in errors[test_id]:\n console_error(str(error))\n\n for test_id in tests:\n if test_id not in errors:\n continue\n if len(errors[test_id]) == 0:\n console_success(f'{test_id}: test passed!', bold=True)\n else:\n console_error(f'{test_id}: test failed!', bold=True)\n\n if num_failed > 0:\n console_error(\n f'Tests passed: {len(tests) - num_failed} of {len(tests)}')\n console_error('Error counts:')\n console_error(pformat({k: len(es) for k, es in errors.items()}))\n exit(1)", "def run():\n\tsubsuite_list = []\n\tfor _, modname, _ in pkgutil.iter_modules(test.__path__):\n\t\tif modname.startswith(\"test_\"):\n\t\t\tmodule = importlib.import_module('test.' + modname)\n\t\t\tsubsuite = unittest.TestLoader().loadTestsFromModule(module)\n\t\t\tsubsuite_list.append(subsuite)\n\tsuite = unittest.TestSuite(subsuite_list)\n\n\tprint(\"Testing:\\n\")\n\tunittest.TextTestRunner(verbosity=2).run(suite)", "def test_generate_all_testing(self):\n pass", "def train(self, iterations=1):\n for _ in range(iterations):\n self.trainer.train()\n self.test_network()", "def runTest(self):\n self.setUp()\n self.test_NeuroPath1()", "def run_tests(self):\n setup_layers = {}\n layers_to_run = list(self.ordered_layers())\n should_resume = False\n\n while layers_to_run:\n layer_name, layer, tests = layers_to_run[0]\n for feature in self.features:\n feature.layer_setup(layer)\n try:\n self.ran += run_layer(self.options, layer_name, layer, tests,\n setup_layers, self.failures, self.errors,\n self.skipped, self.import_errors)\n except zope.testrunner.interfaces.EndRun:\n self.failed = True\n break\n except CanNotTearDown:\n if not self.options.resume_layer:\n should_resume = True\n break\n\n layers_to_run.pop(0)\n if self.options.processes > 1:\n should_resume = True\n break\n\n if self.options.stop_on_error and (self.failures or self.errors):\n break\n\n if should_resume:\n if layers_to_run:\n self.ran += resume_tests(\n self.script_parts, self.options, self.features,\n layers_to_run, self.failures, self.errors,\n self.skipped, self.cwd)\n\n if setup_layers:\n if self.options.resume_layer is None:\n self.options.output.info(\"Tearing down left over layers:\")\n tear_down_unneeded(\n self.options, (), setup_layers, self.errors, optional=True)\n\n self.failed = bool(self.import_errors or self.failures or self.errors)", "def main():\n run_test_summary1a()\n run_test_summary1c()\n run_test_summary1c()", "def runAllTests():\n\tttr = unittest.TextTestRunner(verbosity=3).run(suite())\n\tnTests = ttr.testsRun + len(ttr.skipped)\n\tprint(\"Report:\")\n\tprint(\"\\t\" + str(len(ttr.failures)) + \"/\" + str(nTests) + \" failed\")\n\tprint(\"\\t\" + str(len(ttr.errors)) + \"/\" + str(nTests) + \" errors\")\n\tprint(\"\\t\" + str(len(ttr.skipped)) + \"/\" + str(nTests) + \" skipped\")", "def __main() :\n launchTests()", "def test_all():\n test_prepare_text()\n test_end_chat()\n test_choose_author()\n test_choose_book()", "def runtests():\n #- Load all TestCase classes from desistar/test/test_*.py\n tests = desistar_test_suite()\n #- Run them\n unittest.TextTestRunner(verbosity=2).run(tests)", "def run_tests():\n good_car = UnreliableCar(\"Good Car\", 100, 90)\n bad_car = UnreliableCar(\"Bad Car\", 100, 10)\n\n for i in range(1, 15):\n print(\"Attempting to drive {}km:\".format(i))\n print(\"{:12} drove {:2}km\".format(good_car.name, good_car.drive(i)))\n print(\"{:12} drove {:2}km\".format(bad_car.name, bad_car.drive(i)))\n\n \"\"\"final states of the cars\"\"\"\n print(good_car)\n print(bad_car)", "def run_tests(self):\n\n self.manifest_path = os.path.join('tests',\n 'remote',\n 'manifest.ini')\n TestRun.run_tests(self)", "def test_routers(self):\n if self.output_file == '':\n self.run_suite()\n else:\n with open(self.output_file, \"w\") as fp:\n self.run_suite(fp)", "def init(self):\n \n self._nc_session = TestBedTests.TBNetconfSession(self.log, self.loop)\n self._nc_proxy = TestBedTests.TBNetconfProxy(self._nc_session, UtCompositeYang, self.log)\n self._netconf_test_objects = []\n self._pbreq_test_objects = []\n\n for cls in NETCONF_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._netconf_test_objects.append(obj)\n\n for cls in PBREQ_TESTS:\n obj = cls(self._dts, self.log, self._nc_proxy, self._loop)\n yield from obj.dts_self_register()\n self._pbreq_test_objects.append(obj)\n\n @asyncio.coroutine\n def run_all_tests(xact_info, action, ks_path, msg):\n ro1 = yield from self.run_tests(self._netconf_test_objects, msg.continue_on_failure)\n if ro1.failed_count is 0 or msg.continue_on_failure is True:\n ro2 = yield from self.run_tests(self._pbreq_test_objects, msg.continue_on_failure)\n\n ro = RwAgentTestbedYang.AgentTestsOp()\n ro.total_tests = ro1.total_tests + ro2.total_tests\n ro.passed_count = ro1.passed_count + ro2.passed_count\n ro.failed_count = ro1.failed_count + ro2.failed_count\n #ro.failed_tests = ro1.failed_tests + ro2.failed_tests\n\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_netconf_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._netconf_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n\n @asyncio.coroutine\n def run_all_pbreqs_tests(xact_info, action, ks_path, msg):\n ro = yield from self.run_tests(self._pbreq_test_objects)\n xpath = \"O,/agt-tb:agent-tests\"\n xact_info.respond_xpath(rwdts.XactRspCode.ACK, xpath, ro)\n \n # Register for all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_tests))\n\n # Register for per category all test-cases\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:netconf-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_netconf_tests))\n\n yield from self._dts.register(\n xpath=\"I,/agt-tb:agent-tests/agt-tb:pb-request-tests/agt-tb:all\",\n flags=rwdts.Flag.PUBLISHER,\n handler=rift.tasklets.DTS.RegistrationHandler(on_prepare=run_all_pbreqs_tests))", "def test_network(self, weights, env, episodes, seed, network=None, timeout=None):\n return self.run(\n backend_test_network, weights, #model creation params\n self.network_generator, env, episodes, seed, #test network params\n timeout=timeout\n )", "def run(self) -> None:\n self.test_sanity()\n if self.has_errors():\n return\n\n tests: List[Callable[[], None]] = [\n self.test_headlines_predefined,\n self.test_headlines_required,\n self.test_headlines_dependencies,\n self.test_headlines_order,\n self.test_headlines_named_entities,\n self.test_named_entities,\n self.test_reading_attributes,\n self.test_forbidden_words,\n self.test_unwanted_words,\n self.test_police_abbreviations,\n self.test_spelling,\n self.test_grammar_rules_regex,\n ]\n\n for test in tests:\n if self.stop_on_error and self.has_errors():\n break\n test()", "def test_01(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_01\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"1-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network).doc(\"1-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"1-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"1-2-1\") \\\n .success()\n\n # network 2 sees router to networks 1 and 3\n tnet.sniffer2.start_state.doc(\"1-3-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1, 3],\n ).doc(\"1-3-1\") \\\n .success()\n\n # network 3 sees router to networks 1 and 2\n tnet.sniffer3.start_state.doc(\"1-4-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[1, 2],\n ).doc(\"1-4-1\") \\\n .success()\n\n # run the group\n tnet.run()", "def _run_ci_test():\n _run_install(False)\n _run_coverage_html(False)\n _run_typecheck_xml(False)\n _run_lint(True)", "def runTest(self):\n\t\tself.setUp()\n\t\tself.test_postopProgramming1()", "def runTest(self):\n self.setUp()\n self.test_BiplaneRegistration1()", "def test_get_network(self):\n pass", "def main():\n # add all new test suites per test module here\n suite_date = test_date.suite()\n suite_ng = test_ng.suite()\n suite_page = test_page.suite()\n suite_container = test_container.suite()\n\n # add the suite to be tested here\n alltests = unittest.TestSuite((suite_date,\n suite_ng,\n suite_page,\n suite_container))\n\n # run the suite\n runner = unittest.TextTestRunner()\n runner.run(alltests)", "def _run_local_tests(self, *args, **kwargs):\n pass", "def test(self):\n for arch, python in self.python:\n self.run(f\"{python} -m pytest\")", "def run_tests(tests):\n return [test(t) for t in tests]", "def run_all_tests():\n successes = 0\n testsrun = 0\n testsdir = tests_dirpath()\n for test in os.listdir(testsdir):\n path = os.path.join(testsdir, test)\n if os.path.isdir(path):\n testsrun += 1\n if run_test(path):\n successes += 1\n print(\"--- %d/%d TESTS PASSED ---\" % (successes, testsrun))\n return successes == testsrun", "def test_on_all(self) -> None:\n x_test, y_test = self.mnist.test.images, self.mnist.test.labels\n N = self.mnist.test.num_examples\n\n # I have replaced all -1 with self.mb_size to be sure about exact shapes of all layers.\n assert N % self.mb_size == 0,\\\n \"Sorry, mb_size must divide the number of images in test set\"\n\n results = np.array([0., 0.])\n for batch_no in range(N // self.mb_size):\n beg = batch_no * self.mb_size\n end = min(N, (batch_no + 1) * self.mb_size)\n len_batch = end - beg\n batch_results = np.array(self.test_on_batch(x_test[beg:end], y_test[beg:end]))\n results += batch_results * len_batch\n results /= N\n self.logger.info(\"(Test(final): Loss: {0[0]}, accuracy: {0[1]}\".format(results))", "def net():\n suite = ServiceTestSuite()\n suite.addTest(unittest.makeSuite(Test, 'test_net'))\n return suite", "def test():\n import unittest\n testmodules = [\n 'bettermathlib_tests',\n 'randomwebapp_tests',\n ]\n suite = unittest.TestSuite()\n for t in testmodules:\n suite.addTest(unittest.defaultTestLoader.loadTestsFromName(t))\n unittest.TextTestRunner(verbosity=2).run(suite)", "def run_training_save_tests():\n test_training_save()\n test_distributed_training_save()\n test_multimodel_training_save()\n test_distributed_multimodel_training_save()", "def exe_tests(self):\n self.rank = mpicom.rank()\n self.size = mpicom.size()\n if mpicom.parallel():\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpicom.so\")\n else:\n self.test(\"libname\",os.path.split(mpicom.__file__)[1],\"mpistub.pyc\")\n self.test_broadcast()\n self.test_reduce()\n self.test_p2p()\n self.test_gather()\n self.test_scatter()\n #self.test_alltoall()", "def test(all=False):\n\n # Do the import internally, so that this function doesn't increase total\n # import time\n from iptest import run_iptestall\n run_iptestall(inc_slow=all)", "def run(self, args):\n start_time = time.time()\n self._printer.write_update('Collecting tests ...')\n running_all_tests = False\n\n try:\n paths, all_test_names, running_all_tests = self._collect_tests(\n args)\n except IOError:\n # This is raised if --test-list doesn't exist\n return test_run_results.RunDetails(\n exit_code=exit_codes.NO_TESTS_EXIT_STATUS)\n\n test_names = self._finder.split_into_chunks(all_test_names)\n if self._options.order == 'natural':\n test_names.sort(key=self._port.test_key)\n elif self._options.order == 'random':\n test_names.sort()\n random.Random(self._options.seed).shuffle(test_names)\n elif self._options.order == 'none':\n # Restore the test order to user specified order.\n # base.tests() may change the order as it returns tests in the\n # real, external/wpt, virtual order.\n if paths:\n test_names = self._restore_order(paths, test_names)\n\n if not self._options.no_expectations:\n self._printer.write_update('Parsing expectations ...')\n self._expectations = test_expectations.TestExpectations(self._port)\n\n tests_to_run, tests_to_skip = self._prepare_lists(paths, test_names)\n\n self._printer.print_found(\n len(all_test_names), len(test_names), len(tests_to_run),\n self._options.repeat_each, self._options.iterations)\n\n # Check to make sure we're not skipping every test.\n if not tests_to_run:\n msg = 'No tests to run.'\n if self._options.zero_tests_executed_ok:\n _log.info(msg)\n # Keep executing to produce valid (but empty) results.\n else:\n _log.critical(msg)\n code = exit_codes.NO_TESTS_EXIT_STATUS\n return test_run_results.RunDetails(exit_code=code)\n\n exit_code = self._set_up_run(tests_to_run)\n if exit_code:\n return test_run_results.RunDetails(exit_code=exit_code)\n\n if self._options.num_retries is None:\n # If --test-list is passed, or if no test narrowing is specified,\n # default to 3 retries. Otherwise [e.g. if tests are being passed by\n # name], default to 0 retries.\n if self._options.test_list or len(paths) < len(test_names):\n self._options.num_retries = 3\n else:\n self._options.num_retries = 0\n\n should_retry_failures = self._options.num_retries > 0\n\n try:\n self._register_termination_handler()\n self._start_servers(tests_to_run)\n if self._options.watch:\n run_results = self._run_test_loop(tests_to_run, tests_to_skip)\n else:\n run_results = self._run_test_once(tests_to_run, tests_to_skip,\n should_retry_failures)\n initial_results, all_retry_results = run_results\n finally:\n _log.info(\"Finally stop servers and clean up\")\n self._stop_servers()\n self._clean_up_run()\n\n if self._options.no_expectations:\n return test_run_results.RunDetails(0, [], [], initial_results,\n all_retry_results)\n\n # Some crash logs can take a long time to be written out so look\n # for new logs after the test run finishes.\n self._printer.write_update('Looking for new crash logs ...')\n self._look_for_new_crash_logs(initial_results, start_time)\n for retry_attempt_results in all_retry_results:\n self._look_for_new_crash_logs(retry_attempt_results, start_time)\n\n self._printer.write_update('Summarizing results ...')\n summarized_full_results = test_run_results.summarize_results(\n self._port, self._options, self._expectations, initial_results,\n all_retry_results)\n summarized_failing_results = test_run_results.summarize_results(\n self._port,\n self._options,\n self._expectations,\n initial_results,\n all_retry_results,\n only_include_failing=True)\n run_histories = test_run_results.test_run_histories(\n self._options, self._expectations, initial_results,\n all_retry_results)\n\n exit_code = summarized_failing_results['num_regressions']\n if exit_code > exit_codes.MAX_FAILURES_EXIT_STATUS:\n _log.warning('num regressions (%d) exceeds max exit status (%d)',\n exit_code, exit_codes.MAX_FAILURES_EXIT_STATUS)\n exit_code = exit_codes.MAX_FAILURES_EXIT_STATUS\n\n if not self._options.dry_run:\n self._write_json_files(summarized_full_results,\n summarized_failing_results, initial_results,\n running_all_tests, run_histories)\n\n self._copy_results_html_file(self._artifacts_directory,\n 'results.html')\n if (initial_results.interrupt_reason is\n test_run_results.InterruptReason.EXTERNAL_SIGNAL):\n exit_code = exit_codes.INTERRUPTED_EXIT_STATUS\n else:\n if initial_results.interrupted:\n exit_code = exit_codes.EARLY_EXIT_STATUS\n if (self._options.show_results\n and (exit_code or initial_results.total_failures)):\n self._port.show_results_html_file(\n self._filesystem.join(self._artifacts_directory,\n 'results.html'))\n self._printer.print_results(time.time() - start_time,\n initial_results)\n\n return test_run_results.RunDetails(exit_code, summarized_full_results,\n summarized_failing_results,\n initial_results, all_retry_results)", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def make_all():\n\n if not MASTER.exists():\n os.makedirs(MASTER)\n members = inspect.getmembers(sys.modules[__name__])\n members = [f for f in members if 'test_' in f[0]]\n for member in members:\n print('Running %s...' % member[0], end='')\n member[1](master=True)\n print('done!')", "def runTest(self):\n unittest.main()\n ChoreTest.clean_up()", "def main():\n dims = params['dims']\n\n for d in dims:\n print('**** Running test for d={0:d} ****'.format(d))\n run_test(d)", "def _run_tests(self):\n for pyunit_testcase in self.cfg.testcases:\n yield self._run_testsuite(pyunit_testcase)", "def test():\n import unittest\n\n tests = unittest.TestLoader().discover(\"tests\")\n unittest.TextTestRunner(verbosity=2).run(tests)", "def main():\n return run_network_interface_check()", "def unittest():\n from a6test import test_all\n test_all()", "def test_register_network(self):\n pass", "def test():\n import unittest\n tests = unittest.TestLoader().discover(tests)\n unittest.TextTestRunner(verbosity=2).run(tests)", "def RunAll():\n testfunctions = []\n for name, obj in inspect.getmembers(sys.modules[__name__]):\n if inspect.isfunction(obj) and name != 'RunAll':\n testfunctions.append(obj)\n\n # run all the functions\n for f in testfunctions:\n print('Running %s' % str(f))\n f()", "def run_feature_extraction_tests():\n test_feature_extraction()\n test_distributed_feature_extraction()\n test_multimodel_feature_extraction()\n test_distributed_multimodel_feature_extraction()", "def run_tests(remit, sourcelist):\n for source in sourcelist:\n # - move into source's directory\n os.chdir(source)\n # - build worklist of commands\n commands = list()\n commands += test_matrix(remit, source)\n commands += extra_tests(remit, source)\n commands = remove_blacklist(remit, source, commands)\n # - run the commands\n for i, command in enumerate(commands):\n print('[test %s: %s of %d] %s'\n % (source,\n str(i+1).rjust(len(str(len(commands)))),\n len(commands),\n ' '.join(command)))\n subprocess.call(command)\n # - move out of source's directory\n os.chdir('..')", "def test():\n import unittest\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_network(self):\n train_accuracy = 100 - percentError(map(self.neural_result,\n self.train_inputs),\n self.train_outputs)\n print 'Train accuracy:', train_accuracy\n\n test_accuracy = 100 - percentError(map(self.neural_result,\n self.test_inputs),\n self.test_outputs)\n print 'Test accuracy:', test_accuracy\n\n print '#' * int(train_accuracy), 'TR'\n print '#' * int(test_accuracy), 'TE'", "def execute(self):\n for test in self.tests:\n test.execute()\n self.logger.dump()\n print(\"Finished!\")", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def run_tests(self):\n # Trigger a config change which triggers a deferred hook.\n self.run_charm_change_hook_test('configure_ovs')\n\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'openvswitch-switch',\n 'openvswitch-switch')", "def test_network(bpn, test_data):\n DisplayNetwork.display_green(\"[INFO] Started to test the network\")\n output = bpn.Run(np.array(test_data))\n return output", "def run_tests(self):\n # Charm does not defer hooks so that test is not included.\n # Trigger a package change which requires a restart\n self.run_package_change_test(\n 'ovn-central',\n 'ovn-central')", "def runWithDirector(global_options, create_input_store=True):\n from ooni.director import Director\n start_tor = False\n director = Director()\n if global_options['list']:\n net_tests = [net_test for net_test in director.getNetTests().items()]\n log.msg(\"\")\n log.msg(\"Installed nettests\")\n log.msg(\"==================\")\n for net_test_id, net_test in net_tests:\n optList = []\n for name, details in net_test['arguments'].items():\n optList.append({'long': name, 'doc': details['description']})\n\n desc = ('\\n' +\n net_test['name'] +\n '\\n' +\n '-'*len(net_test['name']) +\n '\\n' +\n '\\n'.join(textwrap.wrap(net_test['description'], 80)) +\n '\\n\\n' +\n '$ ooniprobe {}/{}'.format(net_test['category'],\n net_test['id']) +\n '\\n\\n' +\n ''.join(usage.docMakeChunks(optList))\n )\n map(log.msg, desc.split(\"\\n\"))\n log.msg(\"Note: Third party tests require an external \"\n \"application to run properly.\")\n\n raise SystemExit(0)\n\n if global_options.get('annotations') is not None:\n global_options['annotations'] = setupAnnotations(global_options)\n\n if global_options.get('preferred-backend') is not None:\n config.advanced.preferred_backend = global_options['preferred-backend']\n\n if global_options['no-collector']:\n log.msg(\"Not reporting using a collector\")\n global_options['collector'] = None\n start_tor = False\n elif config.advanced.get(\"preferred_backend\", \"onion\") == \"onion\":\n start_tor = True\n\n if (global_options['collector'] and\n config.advanced.get(\"preferred_backend\", \"onion\") == \"onion\"):\n start_tor |= True\n\n return runTestWithDirector(\n director=director,\n start_tor=start_tor,\n global_options=global_options,\n create_input_store=create_input_store\n )", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test():\n tests = unittest.TestLoader().discover('tests')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def test_03(self):\n if _debug: TestIAmRouterToNetwork._debug(\"test_03\")\n\n # create a network\n tnet = TNetwork()\n\n # test device sends request\n tnet.iut.start_state.doc(\"3-1-0\") \\\n .call(tnet.iut.nse.i_am_router_to_network,\n destination=Address(\"1:*\"),\n ).doc(\"3-1-1\") \\\n .success()\n\n # network 1 sees router to networks 2 and 3\n tnet.sniffer1.start_state.doc(\"3-2-0\") \\\n .receive(IAmRouterToNetwork,\n iartnNetworkList=[2, 3],\n ).doc(\"3-2-1\") \\\n .success()\n\n # network 2 sees nothing\n tnet.sniffer2.start_state.doc(\"3-3-0\") \\\n .timeout(10).doc(\"3-3-1\") \\\n .success()\n\n # network 3 sees nothing\n tnet.sniffer3.start_state.doc(\"3-4-0\") \\\n .timeout(10).doc(\"3-4-1\") \\\n .success()\n\n # run the group\n tnet.run()", "def actionRunUnitTests():\n UnitTestRunner.init()\n \n for target in Settings.targets:\n for platform in Settings.targetPlatforms:\n for cpu in Settings.targetCPUs:\n for configuration in Settings.targetConfigurations:\n if not Summary.checkIfActionFailed(ACTION_BUILD, target, platform, cpu, configuration):\n Logger.printStartActionMessage('Running unit tests for ' + target + ' ' + platform + ' ' + cpu + ' ' + configuration, ColoredFormatter.YELLOW)\n result = UnitTestRunner.run(target, platform, cpu, configuration)\n Summary.addSummary(ACTION_RUN_UNITTESTS, target, platform, cpu, configuration, result, UnitTestRunner.executionTime)\n if result != NO_ERROR:\n Logger.printEndActionMessage('Failed to execute unit tests!')\n else:\n Logger.printEndActionMessage('Executed all unit tests')", "def CASE3( self, main ):\n\n from tests.USECASE.SegmentRouting.SRRouting.dependencies.SRRoutingTest import SRRoutingTest\n\n SRRoutingTest.runTest( main,\n test_idx=3,\n onosNodes=3,\n dhcp=1,\n routers=1,\n ipv4=1,\n ipv6=1,\n countFlowsGroups=False,\n linkFailure=False,\n description=\"Ping between all ipv4 and ipv6 hosts in the topology\" )", "def run_tests(self):\n manifest = manifestparser.TestManifest(\n manifests=[os.path.join(self.repository_path, self.manifest_path)],\n strict=False)\n\n tests = manifest.active_tests(**mozinfo.info)\n self._mozmill.run(tests, self.options.restart)\n\n # Whenever a test fails it has to be marked, so we quit with the correct exit code\n self.last_failed_tests = self.last_failed_tests or self._mozmill.results.fails\n\n self.testrun_index += 1", "def test_and_multi_epoch(self):\n\n for epochs in range(10, 100, 10):\n perceptron, network, perceptron_estimated_values, \\\n network_estimated_values, perceptron_unit_error, network_unit_error \\\n = TestPerceptronNetwork.and_setup(epochs)\n\n self.assert_same_results(perceptron, network, perceptron_estimated_values,\n network_estimated_values, perceptron_unit_error,\n network_unit_error)", "def test():\n\n tests = unittest.TestLoader().discover('api/tests/', pattern='*/test_*.py')\n unittest.TextTestRunner(verbosity=2).run(tests)", "def runTest(self):\n self.setUp()\n self.test_FiberDistance1()", "def runTests():\r\n\r\n print(\"running a few tests\")\r\n\r\n average = compute .gpsAverage (4, 5)\r\n print(\"average = \", average)\r\n \r\n print (\"hello!\")", "def run_tests(output_dir, fstype):\n global options\n if options.debug:\n print \"Run NUMA test\"\n for num_disks in [2]:\n for num_dirs in range(1, 5):\n postmark = PostMarkTest(output_dir, fstype, num_disks, num_dirs)\n run_one_test(postmark)", "def runTest(self):\n self.setUp()\n self.test_STLModelBuilder1()", "def run_all_tests():\n model_configs = (model_handler.ModelConfig(\n saved_model_dir=platform_test.test_src_dir_path(\n \"python/compiler/tensorrt/model_tests/sample_model\"),\n default_batch_size=128),)\n if FLAGS.use_tf2:\n model_handler_cls = model_handler.ModelHandlerV2\n trt_model_handeler_cls = model_handler.TrtModelHandlerV2\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=True)\n else:\n model_handler_cls = model_handler.ModelHandlerV1\n trt_model_handeler_cls = model_handler.TrtModelHandlerV1\n default_trt_convert_params = DEFAUL_TRT_CONVERT_PARAMS._replace(\n is_dynamic_op=False)\n for model_config in model_configs:\n trt_convert_params = default_trt_convert_params._replace(\n max_batch_size=model_config.default_batch_size)\n base_model = model_handler_cls(model_config)\n random_inputs = base_model.generate_random_inputs()\n base_model_result = base_model.run(random_inputs)\n trt_fp32_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP32)).run(random_inputs)\n trt_fp16_model_result = trt_model_handeler_cls(\n model_config=model_config,\n trt_convert_params=trt_convert_params._replace(\n precision_mode=trt.TrtPrecisionMode.FP16)).run(random_inputs)\n\n logging.info(\"Base model latency: %f ms\",\n _get_mean_latency(base_model_result))\n logging.info(\"TensorRT FP32 model latency: %f ms\",\n _get_mean_latency(trt_fp32_model_result))\n logging.info(\"TensorRT FP16 model latency: %f ms\",\n _get_mean_latency(trt_fp16_model_result))", "def main_test():\n full = unittest.TestSuite()\n full.addTest(unittest.makeSuite(TestToolOptions))\n full.addTest(unittest.makeSuite(TestBadConfiguration))\n full.addTest(unittest.makeSuite(TestBasicEndpoints))\n full.addTest(unittest.makeSuite(TestMultipleEPG))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpoints))\n full.addTest(unittest.makeSuite(TestBasicExistingEndpointsAddPolicyLater))\n full.addTest(unittest.makeSuite(TestExportPolicyRemoval))\n full.addTest(unittest.makeSuite(TestBasicEndpointsWithContract))\n full.addTest(unittest.makeSuite(TestBasicEndpointMove))\n full.addTest(unittest.makeSuite(TestPolicyChangeProvidedContract))\n full.addTest(unittest.makeSuite(TestChangeL3Out))\n full.addTest(unittest.makeSuite(TestDuplicates))\n full.addTest(unittest.makeSuite(TestDuplicatesTwoL3Outs))\n full.addTest(unittest.makeSuite(TestDeletions))\n\n unittest.main()", "def run_tests():\n fail = []\n okay = []\n for i in os.listdir(\".\"):\n if i.find(\"_test_\") > -1 and i.endswith(\".py\"):\n if 0 != subprocess.call(\"python \" + i, shell=True):\n fail.append(i)\n else:\n okay.append(i)\n if fail:\n print(\"[ERROR] The following %u tests failed: %r\" % (len(fail), fail))\n return False\n print(\"[DONE] All %u tests completely successfully!\" % (len(okay)))\n return True", "def testAutomodeNetwork(self):\n ### create test resources\n instance_name = \"end-to-end-test-instance-1\"\n instance_selfLink = \\\n self.test_resource_creator.create_instance_using_template(\n instance_name,\n self.test_resource_creator.legacy_instance_template_selfLink)[\n 'targetLink']\n original_config = self.google_api_interface.get_instance_configs(\n instance_name)\n auto_subnetwork_name = 'end-to-end-test-auto-subnetwork'\n try:\n network_selfLink = self.google_api_interface.get_network(auto_subnetwork_name)['selfLink']\n except:\n network_selfLink = self.google_api_interface.create_auto_subnetwork(auto_subnetwork_name)['targetLink']\n\n ### start migration\n selfLink_executor = SelfLinkExecutor(self.compute, instance_selfLink,\n auto_subnetwork_name,\n None,\n True)\n\n migration_handler = selfLink_executor.build_migration_handler()\n migration_handler.network_migration()\n\n ### check result\n new_config = self.google_api_interface.get_instance_configs(\n instance_name)\n self.assertTrue(\n resource_config_is_unchanged_except_for_network(new_config,\n original_config))\n self.assertTrue(\n compare_instance_external_ip(new_config, original_config))\n # network changed\n self.assertTrue(check_instance_network(new_config,\n network_selfLink,\n ))\n print('Pass the current test')", "def setUp(self):\n sumo_bin = sumolib.checkBinary('sumo')\n traci.start([sumo_bin, \"-n\", self.network_path, \"-r\", self.routes_path])\n traci.simulationStep()" ]
[ "0.7459378", "0.74455136", "0.7278033", "0.71453613", "0.713573", "0.7003208", "0.69377714", "0.6935737", "0.6916369", "0.68943274", "0.6879508", "0.6857769", "0.67931044", "0.6778059", "0.6765904", "0.675124", "0.67291695", "0.66894144", "0.66677123", "0.6644143", "0.66383016", "0.66262877", "0.6606613", "0.65838885", "0.65819764", "0.6575092", "0.6560523", "0.65121496", "0.6498272", "0.6486818", "0.6473093", "0.64716804", "0.6462493", "0.6453718", "0.6447231", "0.64346874", "0.6416155", "0.6393734", "0.63839215", "0.63750714", "0.63733876", "0.63679796", "0.6366267", "0.6364766", "0.6361186", "0.6359695", "0.63449484", "0.63288456", "0.6327869", "0.631808", "0.6306854", "0.62915", "0.629062", "0.62853813", "0.62805665", "0.6279111", "0.627527", "0.6272608", "0.6264477", "0.6261897", "0.6246347", "0.6242292", "0.6236423", "0.6236423", "0.62357223", "0.62352246", "0.62344855", "0.6231711", "0.62198985", "0.62170976", "0.62089044", "0.61940396", "0.618697", "0.6174979", "0.6164178", "0.6163468", "0.61597663", "0.6157005", "0.6155535", "0.6155535", "0.615398", "0.61510664", "0.61473453", "0.6140028", "0.6140028", "0.6140028", "0.6135964", "0.6131416", "0.61288404", "0.6128426", "0.6109904", "0.61080134", "0.6102849", "0.609976", "0.60980934", "0.6088969", "0.60838574", "0.6079074", "0.60788894", "0.607493", "0.60718" ]
0.0
-1
enforceZeroSlope ensures that the last slope of all elbow fits is always 0
def find_elbows_per_boots(dfr, nElbows, enforceZeroSlope=False): rows = [] for dotmode, dfp in dfr.groupby('dotmode'): if dotmode == '3d': # always take out 3d's first two dis # since pcor's correlation with duration is too weak to fit pmf dfp = remove_dis(dfp, [1, 2], dotmode) for bi, dfpts in dfp.groupby('bi'): row = find_elbows_one_boot(dfpts, nElbows, enforceZeroSlope) row.update({'dotmode': dotmode, 'bi': bi}) rows.append(row) return pd.DataFrame(rows)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def fit_slope_with_zero_intercept_residue(X,Y):\n X = np.array(X)\n Y = np.array(Y)\n slope = np.sum(Y*X)/np.sum(np.power(X,2))\n return slope*X - Y", "def zeroCrossing(self,evap_threshold):\r\n\t\tself.splitBaseline =(np.mean(self.splitData[0:10]))\t\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData)\r\n\r\n\t\tif split_max_index >= split_min_index:\r\n\t\t\treturn self.zeroCrossingPosSlope(evap_threshold)\r\n\t\t\r\n\t\tif split_max_index < split_min_index:\r\n\t\t\treturn self.zeroCrossingNegSlope(evap_threshold)", "def leakyrelu(x, negative_slope: float = 0.01):\n return np.max(x, 0) + negative_slope * np.min(-x, 0)", "def zeroCrossingNegSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\ttry:\r\n\t\t\tsplit_min_index = np.argmin(self.splitData)\r\n\t\t\tsplit_max_index = np.argmax(self.splitData[0:split_min_index])\r\n\t\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\t\tsplit_min_value = self.splitData[split_min_index]\r\n\t\texcept:\r\n\t\t\tzero_crossing = -3\r\n\t\t\treturn zero_crossing\r\n\t\t#print 'split',\tsplit_min_index, (self.splitBaseline-split_min_value), split_max_index,(split_max_value-self.splitBaseline)\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >= evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_max_index, split_min_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] > self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\tif self.splitData[index] <= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1\r\n\t\t\r\n\t\telse: \r\n\t\t\tzero_crossing = -2\r\n\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing", "def zeroCrossingPosSlope(self, evap_threshold):\r\n\t\tself.splitBaseline = np.mean(self.splitData[0:10])\r\n\t\tsplit_max_index = np.argmax(self.splitData)\r\n\t\tsplit_min_index = np.argmin(self.splitData[0:split_max_index])\r\n\t\tsplit_max_value = self.splitData[split_max_index]\r\n\t\tsplit_min_value = self.splitData[split_min_index]\r\n\r\n\t\tif (self.splitBaseline-split_min_value) >= evap_threshold and (split_max_value-self.splitBaseline) >=evap_threshold: #avoid particles evaporating before the notch position can be properly determined (details in Taylor et al. 10.5194/amtd-7-5491-2014)\r\n\t\t\ttry:\r\n\t\t\t\tfor index in range(split_min_index, split_max_index+1): #go to max +1 because 'range' function is not inclusive\r\n\t\t\t\t\tif self.splitData[index] < self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_neg = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_neg = index\r\n\t\t\t\t\tif self.splitData[index] >= self.splitBaseline:\r\n\t\t\t\t\t\tvalue_zero_cross_pos = float(self.splitData[index])\r\n\t\t\t\t\t\tindex_zero_cross_pos = index\r\n\t\t\t\t\t\tbreak\r\n\t\t\t\tzero_crossing = index+((value_zero_cross_pos-self.splitBaseline)*(index_zero_cross_pos-index_zero_cross_neg))/(value_zero_cross_pos-value_zero_cross_neg) \r\n\t\t\texcept:\r\n\t\t\t\tzero_crossing = -1 \r\n\t\t\t\t\r\n\t\telse:\r\n\t\t\tzero_crossing = -2 \r\n\t\t\r\n\t\tself.zeroCrossingPos = zero_crossing\r\n\t\treturn zero_crossing", "def testSlopeSetNegative(self):\n def setSlope():\n self.cc.slope = [-1.3782, 278.32, 0.738378233782]\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('278.32'), Decimal('0.738378233782')),\n self.cc.slope\n )", "def testSlopeSetNegative(self):\n def setSlope():\n self.node.slope = [-1.3782, 278.32, 0.738378233782]\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('278.32'), Decimal('0.738378233782')),\n self.node.slope\n )", "def test_positive_slope(self):\n slopes = []\n for i in range(100):\n neighborhood, pc = create_point_cloud_in_plane_and_neighborhood()\n slopes += list(EigenValueVectorizeFeatureExtractor().extract(pc, neighborhood, None, None, None)[6])\n np.testing.assert_array_less(np.zeros_like(slopes), slopes)", "def find_zero_cross(x, y):\r\n\r\n try:\r\n zp = np.where(y[:-1] * y[1:] <= 0)[0][0] #make a list of A[x] * A[x -1] without usinf \"for\" loop in original python.\r\n m = np.polyfit(x[(zp - 1):(zp + 1)], y[(zp -1):(zp + 1)], 1)\r\n zc = -m[1]/m[0] #For y = ax + b and y = 0, then x = -b/a.\r\n\r\n except:\r\n print 'error at zero_cross'\r\n zc = 0\r\n\r\n return zc", "def calculate_edges_zero(self, verbose = False):\n\n ## calculates the first and last wavelength that has non-zero\n # w = np.where(self.throughput > 0)[0]\n # if verbose: print(w)\n # self._upper_edge = self.wavelength[w[-1]]\n # self._lower_edge = self.wavelength[w[0]]\n\n w = np.where(self.throughput > 0)[0]\n if verbose: print(w)\n if w[0] - 1 < 0:\n w_low = 0\n else:\n w_low = w[0] - 1\n\n if w[-1] + 1 == len(self.throughput):\n w_high = w[-1]\n else:\n w_high = w[-1] + 1\n\n self._upper_edge = self.wavelength[w_high]\n self._lower_edge = self.wavelength[w_low]", "def test_check_null_weight_with_zeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sample_weight[:1] = 0.0\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, np.array([1, 1, 1, 1, 1]))\n np.testing.assert_almost_equal(X_out, np.array([[1], [2], [3], [4], [5]]))\n np.testing.assert_almost_equal(y_out, np.array([7, 9, 11, 13, 15]))", "def slope(self):\n if self.b == 0:\n return None\n else:\n return (-1) * self.a/self.b", "def testSetSlopeWithNegativeInt(self):\n def setSlope():\n self.node.slope = -20\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.slope\n )", "def slope(a, b):\r\n if a[0] == b[0]: #If the x values are both 0\r\n return 0 #Technically, undefined, but doesn't matter for finding collinearity\r\n return (a[1] - b[1]) / (a[0] - b[0])", "def test__validate_with_synthetic_data(elbow_with_synthetic_data):\n x, y, break_pt = elbow_with_synthetic_data\n expected_elbow = np.argmin(np.abs(x - break_pt))\n assert expected_elbow == find_elbow_point(x, y)", "def testSetSlopeWithNegativeFloat(self):\n def setSlope():\n self.node.slope = -20.1\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.slope\n )", "def test_non_linear_fit_zero_degree_polynomial_with_weights(fitter):\n\n model = models.Polynomial1D(0, c0=0)\n fitter = fitter()\n\n x = np.arange(10, dtype=float)\n y = np.ones((10,))\n weights = np.ones((10,))\n\n fit = fitter(model, x, y)\n assert_almost_equal(fit.c0, 1.0)\n\n fit = fitter(model, x, y, weights=weights)\n assert_almost_equal(fit.c0, 1.0)", "def loss_piecewise(y, a):\n if not y: # Runs if y == 0\n return -1 * np.log10(1 - a)\n else: # Runs if y != 0\n return -1 * np.log10(a)", "def asymtotic_approx(self, zero_crossings):\n x = np.sqrt(2 * np.log(zero_crossings))\n return x + 0.5772 / x", "def test_null_bootstrapping(self):\n\n apply = lambda p, x, i: x[:, :1]\n output = apply(self._params, self._batch.x, self._index)\n # y is zero, hence the loss is just the mean square of the output.\n expected_loss = np.mean(np.square(output))\n\n loss_fn = single_index.L2Loss()\n loss, unused_metrics = loss_fn(\n apply=apply, params=self._params, batch=self._batch, index=self._index)\n self.assertEqual(\n loss, expected_loss,\n (f'expected loss with null bootstrapping is {expected_loss}, '\n f'but it is {loss}'))", "def findzero(x,y):\n\t\targs = np.where(y > 0)[0]\n\t\tif len(args) == 0:\n\t\t\tprint \"Unable to find zero! Using maximum T.\"\n\t\t\ti = len(y) - 1\n\t\telse:\n\t\t\ti = min(args)\n\n\t\t#Do a fit using the surrounding points, and perform interpolation based on that\n\t\tp = np.polyfit(x[i-1:i+1],y[i-1:i+1],1)\n\t\tyfit = p[0]*x[i-1:i+1] + p[1]\n\t\tf = interp.interp1d(yfit, x[i-1:i+1])\n\t\treturn float(f(0))", "def test_non_zero_loss(self):\n # Reset models.\n self.model.load_state_dict(self.initial_model_dict)\n self.actor_model.load_state_dict(self.initial_actor_model_dict)\n\n polybeast.learn(*self.learn_args)\n\n self.assertNotEqual(self.stats[\"total_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"pg_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"baseline_loss\"], 0.0)\n self.assertNotEqual(self.stats[\"entropy_loss\"], 0.0)", "def test_intra_power_law_fit_no_model(self):\n\n\t\tdetails= self.watcher.analyze(model=self.model, layers=self.fc_layers, intra=True, randomize=False, vectors=False)\n\t\tactual_alpha = details.alpha[0]\n\n\t\texpected_alpha = 2.654 # not very accurate because of the sparisify transform\n\t\tself.assertAlmostEqual(actual_alpha,expected_alpha, places=1)", "def test_elbo_loss(self):\n\n batch_size = 4\n batch = base.Batch(\n x=np.expand_dims(np.arange(batch_size), 1),\n y=np.arange(batch_size),\n )\n params = dict()\n apply = lambda p, x, i: x[:, 0]\n index = np.array([])\n output = apply(params, batch.x, index)\n\n log_likelihood_fn = lambda out, batch: out\n model_prior_kl_fn = lambda out, params, index: np.zeros_like(out)\n\n elbo_loss = ElboLoss(\n log_likelihood_fn=log_likelihood_fn,\n model_prior_kl_fn=model_prior_kl_fn)\n\n loss, unused_loss_metrics = elbo_loss(\n apply=apply, params=params, batch=batch, index=index)\n self.assertTrue((loss == -output).all(),\n f'expected elbo loss to be {-output} but it is {loss}')", "def are_residuals_near_zero(pts):\n y = regress(pts)[1]\n return all(map(near_zero, y))", "def grad_zero(self):\r\n pass", "def grad_zero(self):\r\n pass", "def test_Sobol_G_raises_error_if_values_lt_zero():\n evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))", "def test_invalid_target(self):\n y_valid = np.random.randint(2, size=100)\n y_invalid = np.random.uniform(size=100)\n\n oz = ClassBalance()\n\n with pytest.raises(YellowbrickValueError):\n oz.fit(y_invalid)\n\n with pytest.raises(YellowbrickValueError):\n oz.fit(y_valid, y_invalid)", "def testSlopeBadLength(self):\n def setSlope():\n self.node.slope = ['banana']\n\n self.assertRaises(\n ValueError,\n setSlope\n )", "def test_check_null_weight_with_nonzeros() -> None:\n sample_weight = np.ones_like(y_toy)\n sw_out, X_out, y_out = check_null_weight(sample_weight, X_toy, y_toy)\n np.testing.assert_almost_equal(sw_out, sample_weight)\n np.testing.assert_almost_equal(X_out, X_toy)\n np.testing.assert_almost_equal(y_out, y_toy)", "def model_zero(df, offset, harmonize_year=\"2015\"):\n # current decision is to return a simple offset, this will be a straight\n # line for all time periods. previous behavior was to set df[numcols] = 0,\n # i.e., report 0 if model reports 0.\n return constant_offset(df, offset)", "def zero_one_loss(self, normalize=True):\n zero_one_loss = len((self.y_true - self.y_pred).nonzero()[0])\n if normalize:\n zero_one_loss /= len(self.y_pred)\n return zero_one_loss", "def set_zero(self, loc=None):\n self.Y[loc] -= self.Y[loc]", "def zero_negative_weights(self):\n for k in range(len(self)):\n self[k] *= 0 if self[k] < 0 else 1\n self.finalized = True\n return self", "def testSetSlopeWithNegativeString(self):\n def setSlope():\n self.node.slope = '-20'\n\n cdl_convert.config.HALT_ON_ERROR = True\n\n self.assertRaises(\n ValueError,\n setSlope\n )\n\n cdl_convert.config.HALT_ON_ERROR = False\n\n setSlope()\n\n self.assertEqual(\n (Decimal('0.0'), Decimal('0.0'), Decimal('0.0')),\n self.node.slope\n )", "def elbow_with_synthetic_data():\n delta = 0.1\n slope_2 = 2\n slope_3 = 3\n break_pt = 5\n intercept_2 = 0.0\n line_2 = np.arange(0, break_pt, delta) * slope_2 + intercept_2\n line_3 = (\n np.arange(break_pt, break_pt * 2, delta) * slope_3\n + (slope_2 - slope_3) * break_pt\n )\n x = np.arange(0, break_pt * 2, delta)\n y = np.concatenate((line_2, line_3))\n break_pt = break_pt\n\n return x, y, break_pt", "def __init__(self, negative_slope=0.01, inplace=False):\n super(LeakyReLU, self).__init__()\n self.negative_slope = negative_slope\n self.inplace = inplace", "def grad_zero(self):\r\n for layer in self.layers:\r\n layer.grad_zero()", "def test_greaterThanZero(self):\n\t\tX = []\n\t\tY1 = []\n\t\tY2 = []\n\t\tcandidate = [0,2,3,4]\n\t\tfor i in range(DATA_POINTS_NUM):\n\t\t\tX.append((i - DATA_POINTS_NUM/2)*0.1)\n\t\t\tY1.append(polynomi_3N(REFERENCE, X[-1]))\n\t\t\tY2.append(polynomi_3N(candidate, X[-1]))\n\t\tret = calculate_fitness(Y1, Y2)\n\t\tself.assertFalse( 0 > ret, \n\t\t \"Something wrong with fitness calculation\")\n\t\tprint ret", "def test__validate_nan_output(straight_line):\n x, y0, y1, y2 = straight_line\n # Horizontal line\n elbow = find_elbow_point(x, y0)\n assert np.isnan(elbow)\n\n # Vertical line\n elbow = find_elbow_point(x, y2)\n assert np.isnan(elbow)\n\n # A line, but no elbow.\n elbow = find_elbow_point(x, y1)\n assert np.isnan(elbow)", "def test_extreme_values(self):\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0\n with pytest.warns(RuntimeWarning) as warninfo:\n assert np.exp(laplace_approx(999999, 999999, self.data)) == 0", "def min_zero_crossings(self):\n return self._MIN_ZERO_CROSSINGS", "def testSlopeBadLength(self):\n def setSlope():\n self.cc.slope = ['banana']\n\n self.assertRaises(\n ValueError,\n setSlope\n )", "def initializeWeightsToZero(self):\n\t\t## YOUR CODE BELOW\n\t\t\n\t\tutil.raiseNotDefined()\n\t\treturn", "def is_zero(self):\n return -0.0001 <= self.l2_norm() <= 0.0001", "def test_Sobol_G_raises_error_if_values_lt_zero():\n with raises(ValueError):\n evaluate(np.array([0, -1, -.02, 1, 1, -0.1, -0, -12]))", "def slope(l):\n if l[1] == l[0]:\n return float(\"inf\")\n else:\n return float(l[3]-l[2])/(l[1]-l[0])", "def model_fixed_slope(train_x, train_y, test_x, slope=1):\n intercept = np.mean(train_y - train_x*slope)\n model_info = {'model': 'fixed_slope', 'const': intercept}\n predictions = test_x*slope + intercept\n return predictions, model_info", "def test__validate_with_synthetic_data_plus_noise(elbow_with_synthetic_data):\n # Test with an odd number of points\n x, y, break_pt = elbow_with_synthetic_data\n np.random.seed(12)\n range = (y.max() - y.min()) * 0.02\n y = y + np.random.randn(y.shape[0]) * range\n expected_elbow = np.argmin(np.abs(x - break_pt))\n found_elbow = find_elbow_point(x, y)\n assert abs(found_elbow - expected_elbow) < 10\n\n # Test with an even number of points\n x, y, break_pt = elbow_with_synthetic_data\n range = (y.max() - y.min()) * 0.02\n y = y + np.random.randn(y.shape[0]) * range\n expected_elbow = np.argmin(np.abs(x - break_pt))\n found_elbow = find_elbow_point(x, y, max_iter=40)\n assert abs(found_elbow - expected_elbow) < 10", "def test_linear_binomial_fails_leading_zero(self):\n self.assertRaises(ValueError, LinearBinomial, 0, 1)", "def baseline_deviation_1_rule(_m, y):\r\n\r\n if y == m.Y.first():\r\n return Constraint.Skip\r\n else:\r\n return m.z_b1[y] >= m.baseline[y] - m.baseline[y - 1]", "def _no_reg(self, y, x, y_minus_g, w_0):\n # Initialize weight vector to return\n w_1 = np.zeros(len(w_0))\n \n ascent = self._epsilon * y_minus_g\n \n for j in range(len(x)):\n w_1[j] = w_0[j] + x[j]*ascent\n \n return w_1", "def testSlopeDefault(self):\n self.assertEqual(\n (Decimal('1.0'), Decimal('1.0'), Decimal('1.0')),\n self.node.slope\n )", "def handle_slopes(self, slopeG):\n\n\n\n colSprite = pygame.sprite.spritecollideany(self, slopeG)\n if colSprite: #and self.rect.y < colSprite.rect.y:\n self.fall = False\n\n tl = colSprite.rect.topleft # used for slope calculation only\n br = colSprite.rect.bottomright\n\n m1 = float((br[1]-tl[1])/(br[0]-tl[0])) # y2-y1/(x2-x1)\n angle_rad = math.atan(m1) # from atan(m1 - m1 /(1+m1m2))\n # The angle is normally 45 degrees\n\n if self.x_vel:\n #le = self.x_vel / abs(self.x_vel) * 4\n le = self.x_vel\n else:\n le = 0\n\n x_move_len = le\n y_move_len = self.calc_vertical(x_move_len, angle_rad)\n\n # just for debugging\n self.d1 = x_move_len\n self.d2 = y_move_len\n\n # Now, it is needed to move the player down till\n # he reaches the 'essence' of the slope. This is because I\n # am too lazy to implement pixel-perfect collision.\n # Since this is to be done only once, a variable will be used\n # to keep track of whether this has beend donef for one slope or not\n\n # tolerance for height changing\n tol = False\n if abs(colSprite.rect.topleft[1] - self.rect.bottomleft[1]) <= 10:\n tol = True\n #print \"ABS \", abs(colSprite.rect.topleft[1] - self.rect.bottomleft[1])\n\n if not self.prev_slope and tol:\n self.prev_slope = True\n\n x_off_mov = colSprite.rect.topleft[0] - self.rect.bottomleft[0]\n y_off_mov = self.calc_vertical(x_off_mov, angle_rad)\n\n # handling for rightwards velocity\n if self.direction == RIGHT:\n y_off_mov = -y_off_mov\n\n\n self.rect.move_ip((0, y_off_mov))\n\n # check collision with any slope\n\n #self.rect.move_ip((x_move_len, y_move_len))\n # it seems that the above code is redundant; will check\n self.rect.move_ip((-self.x_vel, 0)) # undo the shifting\n self.rect.move_ip((x_move_len, y_move_len))\n\n else:\n self.prev_slope = False", "def calculate_y(y0,ey,ymax,PBC=True):\r\n\ty = y0 + dt*np.cumsum(ey)\r\n\twhile (y>ymax).any() or (y<0.0).any():\r\n\t\tidg = (y>ymax)\r\n\t\ty[idg] = 2*ymax - y[idg] if PBC else ymax\r\n\t\tidl = (y<0.0)\r\n\t\ty[idl] *= -1.0\r\n\treturn y", "def test_is_valid_fujita_rating_ef_leading_zero(self):\n\n self.assertTrue(\n tornado_io._is_valid_fujita_rating(EF_SCALE_RATING_LEADING_ZERO)\n )", "def test_nans_in_x_data(self):\n alpha = 0.9\n num_x = 150\n num_y = 100\n x_vals = np.linspace(0, 2 * np.pi, num_x)\n x_vals[45] = np.nan\n y_vals = (np.sin(x_vals[:num_y]) + 0.3 * np.random.randn(num_y) + 0.5)\n\n with pytest.raises(RuntimeError):\n extrapolated_lowess(x_vals, y_vals, alpha=alpha)", "def smooth_negative_labels(y):\n return y + (np.random.random(y.shape) * 0.1)", "def test_zero(self):\n controller = LinearController(self.G, 2, mode='zero')\n sim = simulation.Simulation(self.G, controller, dt=self.dt)\n sim.run(self.dt)\n\n self.assertAlmostEqual(np.linalg.norm(controller.W.ravel()), 0.0)\n self.assertAlmostEqual(np.linalg.norm(controller.out), 0.0)", "def zero_crossings(x):\n return np.array(np.where(np.diff(np.sign(x)))[0])", "def __init__(self):\n self.slope = -1.0\n self.last_obs = -1.0\n self.last_obs_ind = -1\n self._fitted = False", "def _correct_coefs(coefs):\n coefs = coefs.copy().flatten()\n coefs[coefs < 0] = 0\n coefs /= coefs.sum() # Normalize to sum to 1\n return coefs", "def absolute_trick(bias, slope, predictor, current_value, learning_rate):\n predicted_value = bias + slope*predictor\n if current_value > predicted_value:\n slope += learning_rate*predictor\n bias += learning_rate\n else:\n slope -= learning_rate*predictor\n bias -= learning_rate\n return slope, bias", "def test_find_append_zero_crossings():\n x = np.arange(11) * units.hPa\n y = np.array([3, 2, 1, -1, 2, 2, 0, 1, 0, -1, 2]) * units.degC\n x2, y2 = _find_append_zero_crossings(x, y)\n\n x_truth = np.array([0., 1., 2., 2.4494897, 3., 3.3019272, 4., 5.,\n 6., 7., 8., 9., 9.3216975, 10.]) * units.hPa\n y_truth = np.array([3, 2, 1, 0, -1, 0, 2, 2, 0, 1, 0, -1, 0, 2]) * units.degC\n assert_array_almost_equal(x2, x_truth, 6)\n assert_almost_equal(y2, y_truth, 6)", "def _test5():\n# import matplotlib.pyplot as plt\n from math import pi, cos, sin\n n = 800\n PI2 = 2.0*pi\n angle = PI2 / n\n pts = []\n r = 10.0\n for i in range(n):\n beta = i * angle\n x = r*cos(beta)\n y = r*sin(beta)\n pts.append((x, y))\n print (regress(pts))\n are_zero = are_residuals_near_zero(pts)", "def fixed_baseline(random_seeds, train_data, test_data):\n\n err = 0.\n\n for seed in random_seeds:\n np.random.seed(seed)\n\n data = prep_data(train_data, test_data)\n\n wrong = 0.\n for i in range(data['test_rows']):\n if not data['test_ys'][i] == 1.0:\n wrong += 1\n\n err += wrong / data['test_rows']\n\n return err / len(random_seeds)", "def test_init_vals_length_error_in_fit_mle(self):\n # Note there is only one beta, so we can't go lower than zero betas.\n original_intercept_ref_position = self.fake_intercept_ref_pos\n for intercept_ref_position in [None, original_intercept_ref_position]:\n self.base_clog.intercept_ref_position = intercept_ref_position\n for i in [1, -1]:\n # This will ensure we have too many or too few intercepts\n num_coefs = self.fake_betas.shape[0] + i\n\n # Test to ensure that the ValueError when using an\n # init_intercepts kwarg with an incorrect number of parameters\n self.assertRaisesRegexp(ValueError,\n \"dimension\",\n self.base_clog.fit_mle,\n np.arange(num_coefs),\n print_res=False)\n\n return None", "def model_fun(params, slope, x):\n w = params['w']\n t0 = params['t0']\n offset = params['offset']\n return offset + slope * jax.nn.sigmoid(jnp.dot(x, w) - t0)", "def test_zero_degree_polynomial(cls):\n\n MESSAGE = \"Degree of polynomial must be positive or null\"\n\n if cls.n_inputs == 1: # Test 1D polynomials\n p1 = cls(degree=0, c0=1)\n assert p1(0) == 1\n assert np.all(p1(np.zeros(5)) == np.ones(5))\n\n x = np.linspace(0, 1, 100)\n # Add a little noise along a straight line\n y = 1 + np.random.uniform(0, 0.1, len(x))\n\n p1_init = cls(degree=0)\n fitter = fitting.LinearLSQFitter()\n p1_fit = fitter(p1_init, x, y)\n\n # The fit won't be exact of course, but it should get close to within\n # 1%\n assert_allclose(p1_fit.c0, 1, atol=0.10)\n\n # Error from negative degree\n with pytest.raises(ValueError, match=MESSAGE):\n cls(degree=-1)\n elif cls.n_inputs == 2: # Test 2D polynomials\n if issubclass(cls, OrthoPolynomialBase):\n p2 = cls(x_degree=0, y_degree=0, c0_0=1)\n\n # different shaped x and y inputs\n a = np.array([1, 2, 3])\n b = np.array([1, 2])\n with mk.patch.object(\n PolynomialBase,\n \"prepare_inputs\",\n autospec=True,\n return_value=((a, b), mk.MagicMock()),\n ):\n with pytest.raises(\n ValueError, match=r\"Expected input arrays to have the same shape\"\n ):\n p2.prepare_inputs(mk.MagicMock(), mk.MagicMock())\n\n # Error from negative degree\n with pytest.raises(ValueError, match=MESSAGE):\n cls(x_degree=-1, y_degree=0)\n with pytest.raises(ValueError, match=MESSAGE):\n cls(x_degree=0, y_degree=-1)\n else:\n p2 = cls(degree=0, c0_0=1)\n\n # Error from negative degree\n with pytest.raises(ValueError, match=MESSAGE):\n cls(degree=-1)\n\n assert p2(0, 0) == 1\n assert np.all(p2(np.zeros(5), np.zeros(5)) == np.ones(5))\n\n y, x = np.mgrid[0:1:100j, 0:1:100j]\n z = (1 + np.random.uniform(0, 0.1, x.size)).reshape(100, 100)\n\n if issubclass(cls, OrthoPolynomialBase):\n p2_init = cls(x_degree=0, y_degree=0)\n else:\n p2_init = cls(degree=0)\n fitter = fitting.LinearLSQFitter()\n p2_fit = fitter(p2_init, x, y, z)\n\n assert_allclose(p2_fit.c0_0, 1, atol=0.10)", "def y_axis_left_zero(self) -> Optional[pulumi.Input[bool]]:\n return pulumi.get(self, \"y_axis_left_zero\")", "def init_standard_poly(basetensor, ind, x):\n ind.zeroAllBut(0, 0)\n basetensor[ind.all+ind.getCurrent()] = 1.\n for i in range(x.shape[1]):\n ind.zeroAllBut(i, 1)\n basetensor[ind.all + ind.getCurrent()] = x[:, i]", "def aerosols(self):\n return 1.", "def calc_error(self):\n if self._fit_data.y is not None and self._fit_data.y_fit is not None:\n self._fit_data.error_vector = self._fit_data.y - self._fit_data.y_fit", "def zero_calibrationn(self):\n self.link.write(self._calibrateZeroSequence)", "def zero_crossings(y_axis, window = 11):\n # smooth the curve\n length = len(y_axis)\n x_axis = np.asarray(range(length), int)\n \n # discard tail of smoothed signal\n y_axis = _smooth(y_axis, window)[:length]\n zero_crossings = np.where(np.diff(np.sign(y_axis)))[0]\n indices = [x_axis[index] for index in zero_crossings]\n \n # check if zero-crossings are valid\n diff = np.diff(indices)\n if diff.std() / diff.mean() > 0.2:\n print diff.std() / diff.mean()\n print np.diff(indices)\n raise(ValueError, \n \"False zero-crossings found, indicates problem {0} or {1}\".format(\n \"with smoothing window\", \"problem with offset\"))\n # check if any zero crossings were found\n if len(zero_crossings) < 1:\n raise(ValueError, \"No zero crossings found\")\n \n return indices \n # used this to test the fft function's sensitivity to spectral leakage\n #return indices + np.asarray(30 * np.random.randn(len(indices)), int)\n # Calculate X profile for page split", "def accuracy_on_zero(y_true, y_pred):\n \n y_true_flipped = 1 - y_true\n y_pred_flipped = 1- y_pred\n \n sum_true_negatives = K.sum(K.round(K.clip( y_true_flipped * y_pred_flipped, 0, 1)))\n \n sum_all_negatives = K.sum(K.round(K.clip(y_true_flipped, 0, 1)))\n \n acc_on_zero = sum_true_negatives / (sum_all_negatives + K.epsilon())\n return acc_on_zero", "def leoGiddingsFit(self,zeroX_to_LEO_limit,calib_zeroX_to_peak,calib_gauss_width):\r\n\r\n\t\t#run the scatteringPeakInfo method to retrieve various peak attributes \r\n\t\tself.scatteringPeakInfo()\r\n\t\t\r\n\t\t#get the baseline\r\n\t\tbaseline = self.scatteringBaseline\r\n\t\t\r\n\t\t#get the zero-crossing for the particle\r\n\t\tzero_crossing_pt_LEO = self.zeroCrossing(evap_threshold)\r\n\t\t\r\n\t\tif zero_crossing_pt_LEO < 0: #ie we can't find the zero crossing\r\n\t\t\tself.LF_scattering_amp = -2\r\n\t\t\tself.LF_baseline = -2\r\n\t\t\tself.LF_results = []\r\n\t\t\tself.LF_max_index = -2\r\n\t\t\tself.beam_center_pos = -2\r\n\t\t\t\r\n\t\telse:\r\n\t\t\t#LEO max index sets the x-limit for fitting based on the desired magnification factor\r\n\t\t\tLEO_max_index = int(round(zero_crossing_pt_LEO-zeroX_to_LEO_limit))\r\n\t\t\r\n\t\t\tself.LF_max_index = LEO_max_index\r\n\t\t\tLEO_min_index = 0\r\n\t\t\t\r\n\t\t\tx_vals_all = np.array(self.getAcqPoints()) + 1 #avoids divide by zero in Giddings fit\r\n\t\t\tself.LF_x_vals_to_use = x_vals_all[LEO_min_index:LEO_max_index]\r\n\r\n\t\t\ty_vals_all = np.array(self.getScatteringSignal())\r\n\t\t\tself.LF_y_vals_to_use = y_vals_all[LEO_min_index:LEO_max_index]\r\n\r\n\t\t\tself.beam_center_pos = zero_crossing_pt_LEO-calib_zeroX_to_peak\r\n\t\t\t\t\t\t\t\r\n\t\t\tdef LEOGiddings(x, a):\r\n\t\t\t\t# Note: scipy.special.iv is the modified Bessel function of the first kind\r\n\t\t\t\treturn baseline + (a/calib_gauss_width)*np.sqrt(self.beam_center_pos/x)*(scipy.special.iv(0,(2*np.sqrt(self.beam_center_pos*x)/calib_gauss_width))) * np.exp((-x-self.beam_center_pos)/calib_gauss_width)\r\n\r\n\t\t\t#run the fitting\r\n\t\t\ttry:\r\n\t\t\t\tpopt, pcov = curve_fit(LEOGiddings, self.LF_x_vals_to_use, self.LF_y_vals_to_use)\r\n\t\t\texcept:\r\n\t\t\t\tpopt, pcov = [-1], [np.nan] \r\n\t\r\n\r\n\t\t\tfit_result = []\r\n\t\t\tfor x in x_vals_all:\r\n\t\t\t\tfit_result.append(LEOGiddings(x,popt[0]))\r\n\t\t\tself.LF_results = fit_result\r\n\t\t\tself.LF_scattering_amp = np.max(fit_result)-baseline#popt[0] \r\n\t\t\tself.LF_baseline = np.nan", "def hinge(datax,datay,w):\n #hinge: si bien classifie, -y<w.x><0 sinon >0. On ne compte celui sont mal-classifies\n fx = np.dot(datax, w.T)\n return np.mean(np.where(-datay*fx<0,0,-datay*fx))", "def fun(params, slope, data):\n x, y_true = data\n return y_true - model_fun(params, slope, x)", "def zero_crosser(indicator: pd.Series) -> pd.Series:\n indicator = indicator.fillna(0)\n return (((indicator.shift() * indicator) <= 0) * np.sign(indicator)).astype(int)", "def testAlphaZeroNllsMatchACauchyDistribution(self):\n x = jnp.linspace(-10, 10, 1000)\n scale = 1.7\n nll = self.variant(self._distribution.nllfun)(x, 0, scale)\n nll_true = -scipy.stats.cauchy(0, scale * jnp.sqrt(2)).logpdf(x)\n chex.assert_tree_all_close(nll, nll_true, atol=1e-5, rtol=1e-5)", "def zzX_zero_of(f, d=0):\n return zzX_zero(poly_level(f)-d)", "def update_weights_negative(self):\n eta = self.config.eta\n self.w_xh -= eta * (self.x.T @ self.h)\n self.w_th -= eta * (self.t.T @ self.h)\n self.w_ho -= eta * (self.h.T @ self.o) \n self.w_hz -= eta * (self.h.T @ self.z)", "def test_y0_provided(self):\r\n dev = qml.device(\"default.qubit\", wires=2)\r\n\r\n with qml.tape.JacobianTape() as tape:\r\n qml.RX(0.543, wires=[0])\r\n qml.RY(-0.654, wires=[0])\r\n qml.expval(qml.PauliZ(0))\r\n\r\n f0 = dev.execute(tape)\r\n tapes, fn = finite_diff(tape, approx_order=1, f0=f0)\r\n\r\n # one tape per parameter, plus one global call\r\n assert len(tapes) == tape.num_params", "def linear_slope_fit(wf, mean_y, sigma_y, slope, intercept):\n\n sum_x = sum_x2 = sum_xy = sum_y = mean_y[0] = sigma_y[0] = 0\n isum = len(wf)\n\n for i,value in enumerate(wf):\n sum_x += i \n sum_x2 += i**2\n sum_xy += (value * i)\n sum_y += value\n mean_y += (value-mean_y) / (i+1)\n sigma_y += (value-mean_y)**2\n\n\n sigma_y /= (isum + 1)\n np.sqrt(sigma_y, sigma_y)\n\n\n slope[0] = (isum * sum_xy - sum_x * sum_y) / (isum * sum_x2 - sum_x * sum_x)\n intercept[0] = (sum_y - sum_x * slope[0])/isum", "def e0_xy_correction(map : ASectorMap ,\n norm_strat : norm_strategy = norm_strategy.max):\n normalization = get_normalization_factor(map , norm_strat)\n get_xy_corr_fun = maps_coefficient_getter (map.mapinfo, map.e0)\n def geo_correction_factor(x : np.array,\n y : np.array) -> np.array:\n return correct_geometry_(get_xy_corr_fun(x,y))* normalization\n return geo_correction_factor", "def y0(self):\n return self._y0", "def y0(self):\n return self.params['y0']", "def test_wrong_gradients_raises_assertion(self):\n model = PoincareModel(self.data, negative=3)\n model._loss_grad = Mock(return_value=np.zeros((2 + model.negative, model.size)))\n with self.assertRaises(AssertionError):\n model.train(epochs=1, batch_size=1, check_gradients_every=1)", "def __init__(self, neg_slope):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n\n\n\n self.neg_slope = neg_slope\n\n\n\n ########################\n # END OF YOUR CODE #\n #######################", "def test_init_intercepts_length_error_in_fit_mle(self):\n # Create a variable for the arguments to the fit_mle function.\n # Note `None` is the argument passed when using the init_intercepts\n # and init_coefs keyword arguments.\n fit_args = [None]\n\n # Create base set of kwargs for fit_mle function\n kwargs = {\"init_coefs\": self.fake_betas,\n \"print_res\": False}\n\n for i in [1, -1]:\n # This will ensure we have too many or too few intercepts\n num_intercepts = self.fake_intercepts.shape[0] + i\n kwargs[\"init_intercepts\"] = np.arange(num_intercepts)\n\n # Test to ensure that the ValueError is raised when using an\n # init_intercepts kwarg with an incorrect number of parameters\n self.assertRaises(ValueError, self.base_clog.fit_mle,\n *fit_args, **kwargs)\n\n return None", "def test_zero_bootstrapping(self, num_classes: int):\n\n loss_fn = single_index.XentLoss(num_classes)\n batch_size = 4\n batch = base.Batch(\n x=np.expand_dims(np.arange(batch_size), 1),\n y=np.random.random_integers(0, num_classes - 1, size=(batch_size, 1)),\n data_index=np.expand_dims(np.arange(batch_size), 1),\n weights=np.zeros([batch_size, 1]),\n )\n\n # Test when apply always return a uniform distribution over labels\n apply = lambda p, x, i: np.ones(shape=(x.shape[0], num_classes))\n loss, unused_metrics = loss_fn(\n apply=apply, params=self._params, batch=batch, index=self._index)\n self.assertEqual(\n loss, 0.0, ('expected loss with zero bootstrapping weights to be zero, '\n f'but it is {loss}'))", "def fit_slope_1d(X,Y):\n Sx = np.sum(X)\n Sy = np.sum(Y)\n Sxx = np.sum(np.power(X,2))\n Sxy = np.sum(X*Y)\n Syy = np.sum(np.power(Y,2)) \n n = len(X)*1.\n slope = (n*Sxy - Sx*Sy)/(n*Sxx-Sx**2)\n alpha = Sy/n - slope*Sx/n\n return slope, alpha", "def calculate_zero_cross_rate(series):\n series_mean = np.mean(series)\n series = [v - series_mean for v in series]\n zero_cross_count = (np.diff(np.sign(series)) != 0).sum()\n return zero_cross_count / len(series)", "def test__repeated_median_catch_division_by_zero(repeated_median):\n *_, divzero_x, divzero_y = repeated_median\n assert repeated_median_slope(divzero_x, divzero_y) == 1.0", "def __init__(self, neg_slope):\n\n ########################\n # PUT YOUR CODE HERE #\n #######################\n self.neg_slope = neg_slope\n self.input_cache = None\n ########################\n # END OF YOUR CODE #\n #######################", "def NoisySlope(x,y, dx, plot=None):\n # compute slopes\n slopes = []\n synth = np.zeros(len(x))\n x0 = x[0]\n xmax = x.max()\n\n # default slope\n def_slope = (y[-1]-y[0])/(x[-1]-x[0])\n while x0+dx<xmax:\n w = np.where((x>=x0)*(x<=x0+dx))\n if len(w[0])>10:\n coef = np.polyfit(x[w]-x[w].mean(), y[w], 1)\n slopes.append(coef[0])\n synth[w] = np.polyval(coef, x[w]-x[w].mean())\n else:\n if len(w[0])>0:\n synth[w] = y[w][0]+def_slope*(x[w]-x[w][0])\n x0 += dx\n\n # sigma clipping\n slopes = np.array(slopes)\n t = np.linspace(slopes.min(), slopes.max(), 500)\n s = slopes.argsort()\n slopes = slopes[s]\n r = slopes[int(len(slopes)*.7)]-slopes[int(len(slopes)*.3)]\n w = np.where((slopes>=(np.median(slopes)-2*r))*\\\n (slopes<=(np.median(slopes)+2*r)))\n guess = [0.0, 0.0, np.median(slopes), r]\n best = myfit.fit(myfit.erf, slopes[w], guess, \\\n np.linspace(0,1,len(slopes))[w])\n best = best.leastsqfit()\n\n plt.figure(20)\n plt.clf()\n plt.plot(slopes*1e6, np.linspace(0,1,len(slopes)), 'ob')\n plt.plot(slopes[w]*1e6, np.linspace(0,1,len(slopes))[w], 'or')\n plt.plot(t*1e6, myfit.erf(t,best), 'r', linewidth=2)\n plt.xlim((best[2]-5*best[3])*1e6,\\\n (best[2]+5*best[3])*1e6)\n plt.xlabel('slope (um/s)')\n\n return (best[2],best[3], synth)", "def zero_norm(arr):\n arr = 2 * (arr - min(arr)) / (max(arr) - min(arr)) - 1\n return arr - np.sum(arr) / len(arr)", "def test_PredictionEnsemble_smooth_None(\r\n perfectModelEnsemble_initialized_control_1d_ym_cftime,\r\n):\r\n pm = perfectModelEnsemble_initialized_control_1d_ym_cftime\r\n pm_smoothed = pm.smooth(None)\r\n assert_PredictionEnsemble(pm, pm_smoothed)", "def formula_0(x: np.ndarray) -> np.ndarray:\n logx = np.log(x)\n denom = x - 1\n k0 = (x - logx * x - 1) / denom\n return k0" ]
[ "0.61061126", "0.5953062", "0.5737827", "0.5657327", "0.56395733", "0.56212384", "0.5598636", "0.558573", "0.55028105", "0.5377003", "0.53153276", "0.5299184", "0.52155274", "0.52150947", "0.5214609", "0.5210322", "0.52011937", "0.5195882", "0.51850694", "0.5184016", "0.5161594", "0.5149885", "0.51342463", "0.5130136", "0.51177347", "0.5021125", "0.5021125", "0.50197446", "0.50143087", "0.50131196", "0.4991711", "0.49904516", "0.4983365", "0.49773583", "0.49698606", "0.49690804", "0.49657696", "0.4946001", "0.49392942", "0.49315557", "0.49232957", "0.49210867", "0.49179593", "0.49164388", "0.48947844", "0.4886675", "0.48836887", "0.48832315", "0.48785064", "0.48680747", "0.4863717", "0.48566017", "0.48536232", "0.4848468", "0.4835618", "0.4827457", "0.48210138", "0.48161885", "0.4801223", "0.48008335", "0.47959286", "0.47924757", "0.47910172", "0.47875008", "0.47750688", "0.477314", "0.4772114", "0.47711724", "0.47660536", "0.47597596", "0.47590104", "0.47520363", "0.4734945", "0.4734138", "0.47290692", "0.47259548", "0.47234258", "0.47171393", "0.47118706", "0.47078735", "0.47068343", "0.47065902", "0.4704764", "0.46891892", "0.4687773", "0.46853852", "0.46829256", "0.46804616", "0.4675477", "0.46743616", "0.46661332", "0.4665546", "0.4658609", "0.46541867", "0.46533725", "0.465131", "0.4649526", "0.46484128", "0.46470562", "0.46465614", "0.46453163" ]
0.0
-1
Instance data use_wsdl if True try to construct XML Instance from information in WSDL.
def __init__(self, wsdl, service=None, port=None, tracefile=None, typesmodule=None, nsdict=None, soapAction=None, ns=None, op_ns=None, use_wsdl=False): if not hasattr(wsdl, 'targetNamespace'): wsdl = wstools.WSDLTools.WSDLReader().loadFromURL(wsdl) # for item in wsdl.types.items(): # self._serializer.loadSchema(item) self._service = wsdl.services[service or 0] self.__doc__ = self._service.documentation self._port = self._service.ports[port or 0] self._name = self._service.name self._wsdl = wsdl self._tracefile = tracefile self._typesmodule = typesmodule self._nsdict = nsdict or {} self._soapAction = soapAction self._ns = ns self._op_ns = op_ns self._use_wsdl = use_wsdl binding = self._port.getBinding() portType = binding.getPortType() for item in portType.operations: callinfo = wstools.WSDLTools.callInfoFromWSDL(self._port, item.name) method = MethodProxy(self, callinfo) setattr(self, item.name, method)
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _prepare_wsdl_objects(self):\r\n # This holds some optional options for the request..\r\n self.AddressValidationOptions = self.client.factory.create('AddressValidationOptions')\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.AddressValidationOptions)", "def create_wsdl_object_of_type(self, type_name):\r\n return self.client.factory.create(type_name)", "def __prepare_wsdl_objects(self):\r\n pass", "def get_instance(self, instance_type, name, subdir=\"\"):\r\n\r\n def read_data(data_syntax, file_name, sub_dir=\"\"):\r\n alns_data = pickle.load(open(join(self.base_path, data_syntax, \"data\", sub_dir, file_name), 'rb'))\r\n return alns_data\r\n\r\n if instance_type == \"simple\":\r\n nr_vehicles = 2\r\n nr_nodes = 4\r\n nr_customers = 3\r\n load_bucket_size = 10\r\n demand = [110, 100, 150]\r\n service_times = [10, 10, 10]\r\n start_window = [0, 0, 0]\r\n end_window = [100, 100, 100]\r\n\r\n distance_matrix = [[0, 3, 2, 3], [3, 0, 1.75, 4], [2, 1.75, 0, 2.5], [3, 4, 2.5, 0]]\r\n elevation_matrix = [[0.1, 0.2, 0, 0], [0, 0, 0.3, 0], [0, 0, 0, 0], [0, 0, 0, 0]]\r\n\r\n return ALNSData(nr_veh=nr_vehicles,\r\n nr_nodes=nr_nodes,\r\n nr_customers=nr_customers,\r\n demand=demand,\r\n service_times=service_times,\r\n start_window=start_window,\r\n end_window=end_window,\r\n elevation_m=elevation_matrix,\r\n distance_m=distance_matrix,\r\n load_bucket_size=load_bucket_size)\r\n\r\n try:\r\n return read_data(self.code_lookup[instance_type], file_name=name, sub_dir=subdir)\r\n except KeyError:\r\n raise ValueError(\"instance_type is not known: Valid inputs are: pirmin, solomon, cordeau, homberger\")", "def __init__(__self__, *,\n wsdl_endpoint_name: Optional[pulumi.Input[str]] = None,\n wsdl_service_name: Optional[pulumi.Input[str]] = None):\n if wsdl_endpoint_name is not None:\n pulumi.set(__self__, \"wsdl_endpoint_name\", wsdl_endpoint_name)\n if wsdl_service_name is not None:\n pulumi.set(__self__, \"wsdl_service_name\", wsdl_service_name)", "def get_instance_from_words(data):\n inst = Dataset.get_instance_template()\n inst[\"words\"] = data\n return inst", "def __init__(self, py_dict=None):\n super(RuntimeNicInfoSchema, self).__init__()\n self.set_data_type('xml')\n self.index = None\n self.label = None\n self.network = NetworkSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def convertInstanceData(self, builder, typeName, data):\n\t\tif typeName not in self.instanceDataTypeMap:\n\t\t\traise Exception('Instance data type \"' + typeName + '\" hasn\\'t been registered.')\n\n\t\tconvertedData = self.instanceDataTypeMap[typeName](self, data)\n\n\t\ttypeNameOffset = builder.CreateString(typeName)\n\t\tdataOffset = builder.CreateByteVector(convertedData)\n\n\t\tObjectData.Start(builder)\n\t\tObjectData.AddType(builder, typeNameOffset)\n\t\tObjectData.AddData(builder, dataOffset)\n\t\treturn ObjectData.End(builder)", "def _prepare_wsdl_objects(self):\r\n self.DeletionControlType = self.client.factory.create('DeletionControlType')\r\n self.TrackingId = self.client.factory.create('TrackingId')\r\n self.TrackingId.TrackingIdType = self.client.factory.create('TrackingIdType')", "def __init__(self, py_dict=None):\n super(ServiceManagerSchema, self).__init__()\n self.set_data_type('xml')\n self.name = None\n self.description = None\n self.revision = None\n self.objectTypeName = None\n self.vendorName = None\n self.vendorId = None\n self.thumbprint = None\n self.login = None\n self.password = None\n self.verifyPassword = None\n self.url = None\n self.restUrl = None\n self.status = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def test_create_instance(self):\n engine = Engine(self.config_file, self.api_token)\n\n assert isinstance(engine, Engine) is True\n assert isinstance(engine.backend, Backend) is True\n assert isinstance(engine.backend, BossBackend) is True\n assert isinstance(engine.validator, Validator) is True\n assert isinstance(engine.validator, BossValidatorV02) is True\n assert isinstance(engine.config, Configuration) is True\n\n # Schema loaded\n assert isinstance(engine.config.schema, dict) is True\n assert engine.config.schema[\"type\"] == \"object\"", "def __init__(self, config_obj, wsdl_name, *args, **kwargs):\r\n self.logger = logging.getLogger('fedex')\r\n \"\"\"@ivar: Python logger instance with name 'fedex'.\"\"\"\r\n self.config_obj = config_obj\r\n \"\"\"@ivar: The FedexConfig object to pull auth info from.\"\"\"\r\n\r\n # If the config object is set to use the test server, point\r\n # suds at the test server WSDL directory.\r\n if config_obj.use_test_server:\r\n self.logger.info(\"Using test server.\")\r\n self.wsdl_path = os.path.join(config_obj.wsdl_path,\r\n 'test_server_wsdl', wsdl_name)\r\n else:\r\n self.logger.info(\"Using production server.\")\r\n self.wsdl_path = os.path.join(config_obj.wsdl_path, wsdl_name)\r\n\r\n self.client = Client('file:///%s' % self.wsdl_path.lstrip('/'))\r\n\r\n #print self.client\r\n\r\n self.VersionId = None\r\n \"\"\"@ivar: Holds details on the version numbers of the WSDL.\"\"\"\r\n self.WebAuthenticationDetail = None\r\n \"\"\"@ivar: WSDL object that holds authentication info.\"\"\"\r\n self.ClientDetail = None\r\n \"\"\"@ivar: WSDL object that holds client account details.\"\"\"\r\n self.response = None\r\n \"\"\"@ivar: The response from Fedex. You will want to pick what you\r\n want out here here. This object does have a __str__() method,\r\n you'll want to print or log it to see what possible values\r\n you can pull.\"\"\"\r\n self.TransactionDetail = None\r\n \"\"\"@ivar: Holds customer-specified transaction IDs.\"\"\"\r\n\r\n self.__set_web_authentication_detail()\r\n self.__set_client_detail()\r\n self.__set_version_id()\r\n self.__set_transaction_detail(*args, **kwargs)\r\n self._prepare_wsdl_objects()", "def __init__(self, use=True):\n self.use = use", "def _create_soap_object(self, name):\n return self.client.factory.create(name)", "def __init__(self, enable_gateway=False, topology_name=None, topologies=None, exposed_services=None, token_cert=None, gateway_type=None, sso_type=None):\n\n self._enable_gateway = None\n self._topology_name = None\n self._topologies = None\n self._exposed_services = None\n self._token_cert = None\n self._gateway_type = None\n self._sso_type = None\n\n if enable_gateway is not None:\n self.enable_gateway = enable_gateway\n if topology_name is not None:\n self.topology_name = topology_name\n if topologies is not None:\n self.topologies = topologies\n if exposed_services is not None:\n self.exposed_services = exposed_services\n if token_cert is not None:\n self.token_cert = token_cert\n if gateway_type is not None:\n self.gateway_type = gateway_type\n if sso_type is not None:\n self.sso_type = sso_type", "def given_an_instance() -> machine_learning.StationMachineLearning:\n # super weird bug? with pytest_bdd that hooked into isoformat on the coordinate and points fields.\n # tried forever to figure out why - and gave up in the end. removed coordinate and point from\n # feature file and just loading it in here.\n coordinate = _load_json_file(__file__, 'coordinate.json')\n points = _load_json_file(__file__, 'points.json')\n return machine_learning.StationMachineLearning(\n session=None,\n model=PredictionModel(id=1),\n grid=PredictionModelGridSubset(id=1),\n points=points,\n target_coordinate=coordinate,\n station_code=None,\n max_learn_date=datetime.now())", "def validate_is_instance(var: Any,\n var_name: str,\n instance_type: Any,\n class_name: Optional[str] = None,\n log_metadata_validation_failures: bool = True) -> None:\n if var is None:\n return\n splits = str(instance_type).split(\"<class \")[-1].split(\"'\")\n if len(splits) > 1:\n print_type = splits[1]\n else:\n print_type = splits[0]\n if log_metadata_validation_failures:\n if class_name is None:\n logging.debug(\n \"XAI Validation :: Metadata: Variable `%s` should be of type `%s`\",\n var_name, print_type)\n else:\n logging.debug(\n \"XAI Validation :: Metadata: [%s] Variable `%s` should be of type \"\n \"`%s`\", class_name, var_name, print_type)\n if not isinstance(var, instance_type):\n raise TypeError(\"{} must be of type {}. Got {}\".format(\n var_name, str(instance_type), str(type(var))))", "def __init__(self, data):\n self.jssp_instance_data = data", "def load_instance(self, instance_path, input_shapes):\n metadata = load_json(os.path.join(instance_path, 'instance.meta'))\n self.log('load metadata')\n\n instance_class_name = metadata[MODEL_METADATA_KEY_INSTANCE_CLASS_NAME]\n instance_source_path = metadata[MODEL_METADATA_KEY_INSTANCE_SOURCE_PATH]\n model = import_class_from_module_path(instance_source_path, instance_class_name)\n self.log('instance source code load')\n\n self.instance = model(metadata[MODEL_METADATA_KEY_INSTANCE_PATH])\n self.instance.load_model(metadata, input_shapes)\n self.log('load instance')\n\n instance_id = metadata[MODEL_METADATA_KEY_INSTANCE_ID]\n self.log('load instance id : %s' % instance_id)", "def _prepare_wsdl_objects(self):\r\n self.TrackPackageIdentifier = self.client.factory.create('TrackPackageIdentifier')\r\n # Default to tracking number.\r\n self.TrackPackageIdentifier.Type = 'TRACKING_NUMBER_OR_DOORTAG'", "def __init__(self, api_use=False):\n self.api_use = api_use", "def from_url(cls, wsdl_path):\n return cls(safe_parse_url(wsdl_path))", "def create_instance(c_instance):\n\treturn 0", "def __init__(self, services, tns):\r\n\r\n return super(DjangoSoapApp, self).__init__(Application(services, tns))", "def __init__(self, py_dict=None):\n super(EdgeNATRulesSchema, self).__init__()\n self.set_data_type('xml')\n self.natRule = EdgeNATRuleSchema()\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def _prepare_wsdl_objects(self):\r\n\r\n\t# Default behavior is to not request transit information\r\n\tself.ReturnTransitAndCommit = False\r\n\r\n # This is the primary data structure for processShipment requests.\r\n self.RequestedShipment = self.client.factory.create('RequestedShipment')\r\n self.RequestedShipment.ShipTimestamp = datetime.now()\r\n \r\n TotalWeight = self.client.factory.create('Weight')\r\n # Start at nothing.\r\n TotalWeight.Value = 0.0\r\n # Default to pounds.\r\n TotalWeight.Units = 'LB'\r\n # This is the total weight of the entire shipment. Shipments may\r\n # contain more than one package.\r\n self.RequestedShipment.TotalWeight = TotalWeight\r\n \r\n # This is the top level data structure for Shipper information.\r\n ShipperParty = self.client.factory.create('Party')\r\n ShipperParty.Address = self.client.factory.create('Address')\r\n ShipperParty.Contact = self.client.factory.create('Contact')\r\n \r\n # Link the ShipperParty to our master data structure.\r\n self.RequestedShipment.Shipper = ShipperParty\r\n\r\n # This is the top level data structure for Recipient information.\r\n RecipientParty = self.client.factory.create('Party')\r\n RecipientParty.Contact = self.client.factory.create('Contact')\r\n RecipientParty.Address = self.client.factory.create('Address')\r\n \r\n # Link the RecipientParty object to our master data structure.\r\n self.RequestedShipment.Recipient = RecipientParty\r\n \r\n Payor = self.client.factory.create('Payor')\r\n # Grab the account number from the FedexConfig object by default.\r\n Payor.AccountNumber = self._config_obj.account_number\r\n # Assume US.\r\n Payor.CountryCode = 'US'\r\n \r\n ShippingChargesPayment = self.client.factory.create('Payment')\r\n ShippingChargesPayment.Payor = Payor\r\n\r\n self.RequestedShipment.ShippingChargesPayment = ShippingChargesPayment\r\n \r\n # ACCOUNT or LIST\r\n self.RequestedShipment.RateRequestTypes = ['ACCOUNT'] \r\n \r\n # Start with no packages, user must add them.\r\n self.RequestedShipment.PackageCount = 0\r\n self.RequestedShipment.RequestedPackageLineItems = []\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.RequestedShipment)", "def test_hidden_instantiate(self):\n context = self.framework.get_bundle_context()\n\n # Prepare random values\n hidden_value = random.randint(0, 100)\n public_value = random.randint(0, 100)\n\n # Instantiate the component\n with use_ipopo(context) as ipopo:\n svc = ipopo.instantiate(self.module.FACTORY_HIDDEN_PROPS, NAME_A,\n {\"hidden.prop\": hidden_value,\n \"public.prop\": public_value})\n\n # Check default values (and accesses)\n self.assertEqual(svc.hidden, hidden_value)\n self.assertEqual(svc.public, public_value)\n\n # Check instance details\n with use_ipopo(context) as ipopo:\n details = ipopo.get_instance_details(NAME_A)\n\n self.assertNotIn(\"hidden.prop\", details[\"properties\"])", "def make_instance(self, data, **kwargs):\n instance = self.instance or self.get_instance(data)\n if instance is not None:\n for key, value in iteritems(data):\n setattr(instance, key, value)\n return instance\n kwargs, association_attrs = self._split_model_kwargs_association(data)\n instance = self.opts.model(**kwargs)\n for attr, value in iteritems(association_attrs):\n setattr(instance, attr, value)\n return instance", "def use_instance_table(self, name, typename):\n if typename in ['VkInstance', 'VkPhysicalDevice']:\n return True\n # vkSetDebugUtilsObjectNameEXT and vkSetDebugUtilsObjectTagEXT\n # need to be probed from GetInstanceProcAddress due to a loader issue.\n # https://github.com/KhronosGroup/Vulkan-Loader/issues/1109\n # TODO : When loader with fix for issue is widely available, remove this\n # special case.\n if name in ['vkSetDebugUtilsObjectNameEXT', 'vkSetDebugUtilsObjectTagEXT']:\n return True\n return False", "def create_instance(c_instance):\n return OpenLabs(c_instance)", "def build_from_mongo(cls, data, use_cls=True):\n # If a _cls is specified, we have to use this document class\n if use_cls and '_cls' in data:\n cls = cls.opts.instance.retrieve_embedded_document(data['_cls'])\n doc = cls()\n doc.from_mongo(data)\n return doc", "def from_file(cls, wsdl_path):\n return cls(safe_parse_path(wsdl_path))", "def ontology_instance(self,target=None,path=None,value=None,**kwargs):\n\n if target == None or path == None or value == None:\n print(\"Usage: ontology_instance target path value\",file=sys.stderr)\n sys.exit(2)\n\n force = kwargs.get('force')\n\n payload={\"path\":path,\"value\":value}\n if force:\n r=self.put(self.ONTOLOGY_INSTANCE_RT,target,payload,**kwargs)\n else:\n r=self.post(self.ONTOLOGY_INSTANCE_RT,None,target,payload,**kwargs)\n\n return r", "def add_instance(\n self,\n name,\n base_config_dir=None,\n main_configs=None,\n user_configs=None,\n dictionaries=None,\n macros=None,\n with_zookeeper=False,\n with_zookeeper_secure=False,\n with_mysql_client=False,\n with_mysql=False,\n with_mysql8=False,\n with_mysql_cluster=False,\n with_kafka=False,\n with_kerberized_kafka=False,\n with_kerberos_kdc=False,\n with_secrets=False,\n with_rabbitmq=False,\n with_nats=False,\n clickhouse_path_dir=None,\n with_odbc_drivers=False,\n with_postgres=False,\n with_postgres_cluster=False,\n with_postgresql_java_client=False,\n clickhouse_log_file=CLICKHOUSE_LOG_FILE,\n clickhouse_error_log_file=CLICKHOUSE_ERROR_LOG_FILE,\n with_hdfs=False,\n with_kerberized_hdfs=False,\n with_mongo=False,\n with_mongo_secure=False,\n with_meili=False,\n with_nginx=False,\n with_redis=False,\n with_minio=False,\n with_azurite=False,\n with_cassandra=False,\n with_jdbc_bridge=False,\n with_hive=False,\n with_coredns=False,\n allow_analyzer=True,\n hostname=None,\n env_variables=None,\n image=\"clickhouse/integration-test\",\n tag=None,\n stay_alive=False,\n ipv4_address=None,\n ipv6_address=None,\n with_installed_binary=False,\n external_dirs=None,\n tmpfs=None,\n zookeeper_docker_compose_path=None,\n minio_certs_dir=None,\n minio_data_dir=None,\n use_keeper=True,\n main_config_name=\"config.xml\",\n users_config_name=\"users.xml\",\n copy_common_configs=True,\n config_root_name=\"clickhouse\",\n extra_configs=[],\n ) -> \"ClickHouseInstance\":\n\n if self.is_up:\n raise Exception(\"Can't add instance %s: cluster is already up!\" % name)\n\n if name in self.instances:\n raise Exception(\n \"Can't add instance `%s': there is already an instance with the same name!\"\n % name\n )\n\n if tag is None:\n tag = self.docker_base_tag\n if not env_variables:\n env_variables = {}\n\n self.use_keeper = use_keeper\n\n # Code coverage files will be placed in database directory\n # (affect only WITH_COVERAGE=1 build)\n env_variables[\n \"LLVM_PROFILE_FILE\"\n ] = \"/var/lib/clickhouse/server_%h_%p_%m.profraw\"\n\n clickhouse_start_command = CLICKHOUSE_START_COMMAND\n if clickhouse_log_file:\n clickhouse_start_command += \" --log-file=\" + clickhouse_log_file\n if clickhouse_error_log_file:\n clickhouse_start_command += \" --errorlog-file=\" + clickhouse_error_log_file\n logging.debug(f\"clickhouse_start_command: {clickhouse_start_command}\")\n\n instance = ClickHouseInstance(\n cluster=self,\n base_path=self.base_dir,\n name=name,\n base_config_dir=base_config_dir\n if base_config_dir\n else self.base_config_dir,\n custom_main_configs=main_configs or [],\n custom_user_configs=user_configs or [],\n custom_dictionaries=dictionaries or [],\n macros=macros or {},\n with_zookeeper=with_zookeeper,\n zookeeper_config_path=self.zookeeper_config_path,\n with_mysql_client=with_mysql_client,\n with_mysql=with_mysql,\n with_mysql8=with_mysql8,\n with_mysql_cluster=with_mysql_cluster,\n with_kafka=with_kafka,\n with_kerberized_kafka=with_kerberized_kafka,\n with_kerberos_kdc=with_kerberos_kdc,\n with_rabbitmq=with_rabbitmq,\n with_nats=with_nats,\n with_nginx=with_nginx,\n with_kerberized_hdfs=with_kerberized_hdfs,\n with_secrets=with_secrets\n or with_kerberized_hdfs\n or with_kerberos_kdc\n or with_kerberized_kafka,\n with_mongo=with_mongo or with_mongo_secure,\n with_meili=with_meili,\n with_redis=with_redis,\n with_minio=with_minio,\n with_azurite=with_azurite,\n with_jdbc_bridge=with_jdbc_bridge,\n with_hive=with_hive,\n with_coredns=with_coredns,\n with_cassandra=with_cassandra,\n allow_analyzer=allow_analyzer,\n server_bin_path=self.server_bin_path,\n odbc_bridge_bin_path=self.odbc_bridge_bin_path,\n library_bridge_bin_path=self.library_bridge_bin_path,\n clickhouse_path_dir=clickhouse_path_dir,\n with_odbc_drivers=with_odbc_drivers,\n with_postgres=with_postgres,\n with_postgres_cluster=with_postgres_cluster,\n with_postgresql_java_client=with_postgresql_java_client,\n clickhouse_start_command=clickhouse_start_command,\n main_config_name=main_config_name,\n users_config_name=users_config_name,\n copy_common_configs=copy_common_configs,\n hostname=hostname,\n env_variables=env_variables,\n image=image,\n tag=tag,\n stay_alive=stay_alive,\n ipv4_address=ipv4_address,\n ipv6_address=ipv6_address,\n with_installed_binary=with_installed_binary,\n external_dirs=external_dirs,\n tmpfs=tmpfs or [],\n config_root_name=config_root_name,\n extra_configs=extra_configs,\n )\n\n docker_compose_yml_dir = get_docker_compose_path()\n\n self.instances[name] = instance\n if ipv4_address is not None or ipv6_address is not None:\n self.with_net_trics = True\n self.base_cmd.extend(\n [\"--file\", p.join(docker_compose_yml_dir, \"docker_compose_net.yml\")]\n )\n\n self.base_cmd.extend([\"--file\", instance.docker_compose_path])\n\n cmds = []\n if with_zookeeper_secure and not self.with_zookeeper_secure:\n cmds.append(\n self.setup_zookeeper_secure_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_zookeeper and not self.with_zookeeper:\n if self.use_keeper:\n cmds.append(\n self.setup_keeper_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n else:\n cmds.append(\n self.setup_zookeeper_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_mysql_client and not self.with_mysql_client:\n cmds.append(\n self.setup_mysql_client_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_mysql and not self.with_mysql:\n cmds.append(\n self.setup_mysql_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_mysql8 and not self.with_mysql8:\n cmds.append(\n self.setup_mysql8_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_mysql_cluster and not self.with_mysql_cluster:\n cmds.append(\n self.setup_mysql_cluster_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_postgres and not self.with_postgres:\n cmds.append(\n self.setup_postgres_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_postgres_cluster and not self.with_postgres_cluster:\n cmds.append(\n self.setup_postgres_cluster_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_postgresql_java_client and not self.with_postgresql_java_client:\n cmds.append(\n self.setup_postgresql_java_client_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_odbc_drivers and not self.with_odbc_drivers:\n self.with_odbc_drivers = True\n if not self.with_mysql:\n cmds.append(\n self.setup_mysql_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if not self.with_postgres:\n cmds.append(\n self.setup_postgres_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_kafka and not self.with_kafka:\n cmds.append(\n self.setup_kafka_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_kerberized_kafka and not self.with_kerberized_kafka:\n cmds.append(\n self.setup_kerberized_kafka_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_kerberos_kdc and not self.with_kerberos_kdc:\n cmds.append(\n self.setup_kerberos_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_rabbitmq and not self.with_rabbitmq:\n cmds.append(\n self.setup_rabbitmq_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_nats and not self.with_nats:\n cmds.append(\n self.setup_nats_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_nginx and not self.with_nginx:\n cmds.append(\n self.setup_nginx_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_hdfs and not self.with_hdfs:\n cmds.append(\n self.setup_hdfs_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_kerberized_hdfs and not self.with_kerberized_hdfs:\n cmds.append(\n self.setup_kerberized_hdfs_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if (with_mongo or with_mongo_secure) and not (\n self.with_mongo or self.with_mongo_secure\n ):\n if with_mongo_secure:\n cmds.append(\n self.setup_mongo_secure_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n else:\n cmds.append(\n self.setup_mongo_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_coredns and not self.with_coredns:\n cmds.append(\n self.setup_coredns_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_meili and not self.with_meili:\n cmds.append(\n self.setup_meili_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if self.with_net_trics:\n for cmd in cmds:\n cmd.extend(\n [\"--file\", p.join(docker_compose_yml_dir, \"docker_compose_net.yml\")]\n )\n\n if with_redis and not self.with_redis:\n cmds.append(\n self.setup_redis_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_minio and not self.with_minio:\n cmds.append(\n self.setup_minio_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if with_azurite and not self.with_azurite:\n cmds.append(\n self.setup_azurite_cmd(instance, env_variables, docker_compose_yml_dir)\n )\n\n if minio_certs_dir is not None:\n if self.minio_certs_dir is None:\n self.minio_certs_dir = minio_certs_dir\n else:\n raise Exception(\"Overwriting minio certs dir\")\n\n if minio_data_dir is not None:\n if self.minio_data_dir is None:\n self.minio_data_dir = minio_data_dir\n else:\n raise Exception(\"Overwriting minio data dir\")\n\n if with_cassandra and not self.with_cassandra:\n cmds.append(\n self.setup_cassandra_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_jdbc_bridge and not self.with_jdbc_bridge:\n cmds.append(\n self.setup_jdbc_bridge_cmd(\n instance, env_variables, docker_compose_yml_dir\n )\n )\n\n if with_hive:\n cmds.append(\n self.setup_hive(instance, env_variables, docker_compose_yml_dir)\n )\n\n logging.debug(\n \"Cluster name:{} project_name:{}. Added instance name:{} tag:{} base_cmd:{} docker_compose_yml_dir:{}\".format(\n self.name,\n self.project_name,\n name,\n tag,\n self.base_cmd,\n docker_compose_yml_dir,\n )\n )\n return instance", "def __init__(self):\n\t\tself.parsed = False\n\t\tdir_path = os.path.dirname(os.path.realpath(__file__))\n\t\tself.xsdfilename = os.path.join(dir_path, 'xml', 'schema.xsd')\n\t\tself.schema = 'schema.xsd'\n\t\tself.predictors = []\n\t\tself.predictors_types = []\n\t\tself.preprocessing_methods = []", "def _prepare_wsdl_objects(self):\r\n # This is the primary data structure for processShipment requests.\r\n self.RequestedShipment = self.client.factory.create('RequestedShipment')\r\n self.RequestedShipment.ShipTimestamp = datetime.now()\r\n \r\n TotalWeight = self.client.factory.create('Weight')\r\n # Start at nothing.\r\n TotalWeight.Value = 0.0\r\n # Default to pounds.\r\n TotalWeight.Units = 'LB'\r\n # This is the total weight of the entire shipment. Shipments may\r\n # contain more than one package.\r\n self.RequestedShipment.TotalWeight = TotalWeight\r\n \r\n # This is the top level data structure for Shipper information.\r\n ShipperParty = self.client.factory.create('Party')\r\n ShipperParty.Address = self.client.factory.create('Address')\r\n ShipperParty.Contact = self.client.factory.create('Contact')\r\n \r\n # Link the ShipperParty to our master data structure.\r\n self.RequestedShipment.Shipper = ShipperParty\r\n\r\n # This is the top level data structure for Recipient information.\r\n RecipientParty = self.client.factory.create('Party')\r\n RecipientParty.Contact = self.client.factory.create('Contact')\r\n RecipientParty.Address = self.client.factory.create('Address')\r\n \r\n # Link the RecipientParty object to our master data structure.\r\n self.RequestedShipment.Recipient = RecipientParty\r\n \r\n Payor = self.client.factory.create('Payor')\r\n # Grab the account number from the FedexConfig object by default.\r\n Payor.AccountNumber = self._config_obj.account_number\r\n # Assume US.\r\n Payor.CountryCode = 'US'\r\n \r\n ShippingChargesPayment = self.client.factory.create('Payment')\r\n ShippingChargesPayment.Payor = Payor\r\n\r\n self.RequestedShipment.ShippingChargesPayment = ShippingChargesPayment\r\n self.RequestedShipment.LabelSpecification = self.client.factory.create('LabelSpecification')\r\n # ACCOUNT or LIST\r\n self.RequestedShipment.RateRequestTypes = ['ACCOUNT'] \r\n \r\n # Start with no packages, user must add them.\r\n self.RequestedShipment.PackageCount = 0\r\n self.RequestedShipment.RequestedPackageLineItems = []\r\n \r\n # This is good to review if you'd like to see what the data structure\r\n # looks like.\r\n self.logger.debug(self.RequestedShipment)", "def __init__(self, name, pair_instance, dof_cls):\n self._name = name\n self._dof_cls = dof_cls\n self._pair_instance = pair_instance\n self._indexer = hoomd.data.parameterdicts._SmartTypeIndexer(2)\n self._data = {}", "def load_class_variables(cls, app_configs):\n cls.is_connected = False # Set is_connected to false initially\n \n validate_fields(['sdlp_host', 'sdlp_wsdl', 'sdlp_username', 'sdlp_password', 'sdlp_savedreportid', 'sdlp_incident_endpoint'], kwargs=app_configs)\n\n LOG.debug(\"Validated Mandatory app.configs for DLPSoapClient\")\n\n cls.host = app_configs.get('sdlp_host')\n cls.wsdl = app_configs.get('sdlp_wsdl')\n # Gather the DLP User Name\n cls.dlp_username = app_configs.get('sdlp_username')\n # Gather the DLP User Password\n cls.dlp_password = app_configs.get('sdlp_password')\n\n # Gather the DLP Cert\n cls.dlp_cert = app_configs.get('sdlp_cafile', False)\n\n # Gather the DLP Saved Report ID\n cls.dlp_saved_report_id = app_configs.get('sdlp_savedreportid')\n\n # Gather the DLP Incident Endpoint\n cls.sdlp_incident_endpoint = app_configs.get('sdlp_incident_endpoint')\n\n cls.session = Session()\n # Use DLP Cert if provided or if None, set verify to false\n cls.session.verify = cls.dlp_cert\n cls.session.auth = SymantecAuth(cls.dlp_username, cls.dlp_password, cls.host)\n\n\n mimefile_abs_path = os.path.join(\n os.path.dirname(os.path.realpath(__file__)),\n os.path.pardir, \"data\", \"xmlmime.xml\")\n # If the Xmlmime file was provided\n if os.path.isfile(mimefile_abs_path):\n LOG.info(\"A Local XML file was found in the data directory, %s \\n Loading this into the cache\", mimefile_abs_path)\n # Open it and add it to a Cache\n with open(mimefile_abs_path, mode=\"rb\") as f:\n filecontent = f.read()\n dlp_cache = InMemoryCache()\n dlp_cache.add(\"http://www.w3.org/2005/05/xmlmime\", filecontent)\n dlp_cache.add(\"https://www.w3.org/2005/05/xmlmime\", filecontent)\n # Setup Transport with credentials and the cached mimefile\n cls.transport = zeep.Transport(session=cls.session, cache=dlp_cache)\n else:\n # Setup Transport with our credentials\n cls.transport = zeep.Transport(session=cls.session)\n\n try: # Try to create a soap_client from the wsdl and transport\n cls.soap_client = zeep.Client(wsdl=cls.wsdl, transport=cls.transport)\n except Exception as caught_exc: # We got an error when setting up a client, catch and release the error in logs so circuits doesn't stop\n # Put the traceback into DEBUG\n LOG.debug(traceback.format_exc())\n # Log the Connection error to the user\n LOG.error(u\"Problem: %s\", repr(caught_exc))\n LOG.error(u\"[Symantec DLP] Encountered an exception when setting up the SOAP Client\")\n \n else: # No connection error, client is setup with the URL. Allow the poller to be setup\n cls.is_connected = True\n cls.class_vars_loaded = True", "def create_client(wsdl: str, raw_response: bool = True) -> CachingClient:\n # We want the raw response as there is an error when Zeep parses the XML\n settings: Settings = Settings(raw_response=raw_response)\n\n # Client that caches the WSDL\n client: CachingClient = CachingClient(\n wsdl=wsdl,\n # TODO: Store PW encrypted\n wsse=UsernameToken(\"n00394gz\", \"g427Ix19LMB\"),\n settings=settings,\n )\n logger.debug(f\"Client created\")\n\n return client", "def test_instantiation(self):\n classifier = WidgetClassifier()\n classifier.load_model()\n assert(classifier.encoder is not None)\n assert(classifier.cluster is not None)\n assert(classifier.telemetry_keys is not None)", "def from_dict(data, decomposer_=None, rx=None, ax=None):\n if decomposer_ is not None:\n decomposer = decomposer_\n else:\n decomposer = SOAPDecomposer(**data[\"decomposer\"])\n \n result = SOAPVector(data[\"P\"], decomposer)\n result.dcP = data[\"dcP\"]\n result.dnP = data[\"dnP\"]\n if rx is not None and data[\"rx\"] is None:# pragma: no cover\n result.rx = rx\n else:\n result.rx = data[\"rx\"]\n if ax is not None and data[\"ax\"] is None:# pragma: no cover\n result.ax = ax\n else:\n result.ax = data[\"ax\"]\n \n if data[\"cRDF\"] is not None:\n result.cRDF = DF(data[\"dcP\"], True, result.rx, decomposer,\n calculate=False)\n result.cRDF.df = data[\"cRDF\"]\n if data[\"nRDF\"] is not None:\n result.nRDF = DF(data[\"dnP\"], False, result.rx, decomposer,\n calculate=False)\n result.nRDF.df = data[\"nRDF\"]\n if data[\"cADF\"] is not None:\n result.cADF = DF(data[\"dcP\"], True, result.ax, decomposer,\n calculate=False)\n result.cADF.df = data[\"cADF\"]\n if data[\"nADF\"] is not None:\n result.nADF = DF(data[\"dnP\"], False, result.ax, decomposer,\n calculate=False)\n result.nADF.df = data[\"nADF\"]\n\n return result", "def test_instanceBuilder(self):\n instance = igwt.IInstanceFactory(ChangeType()).buildInstance()\n self.assertTrue(instance is not None)\n self.assertTrue(isinstance(instance, Change))", "def newInstance(self, isTraining):\r\n #-------------------------------------------------------\r\n # Training Data\r\n #-------------------------------------------------------\r\n if isTraining: \r\n if self.dataRef < (self.formatData.numTrainInstances-1):\r\n self.dataRef += 1\r\n self.currentTrainState = self.formatData.trainFormatted[self.dataRef][0]\r\n self.currentTrainPhenotype = self.formatData.trainFormatted[self.dataRef][1]\r\n\r\n else: #Once learning has completed an epoch (i.e. learning iterations though the entire dataset) it starts back at the first instance in the data)\r\n self.resetDataRef(isTraining)\r\n #-------------------------------------------------------\r\n # Testing Data\r\n #-------------------------------------------------------\r\n else: \r\n if self.dataRef < (self.formatData.numTestInstances-1):\r\n self.dataRef += 1\r\n self.currentTestState = self.formatData.testFormatted[self.dataRef][0]\r\n self.currentTestPhenotype = self.formatData.testFormatted[self.dataRef][1]", "def load(self, data, session=None, instance=None, transient=False, *args, **kwargs):\n self._session = session or self._session\n self._transient = transient or self._transient\n if not (self.transient or self.session):\n raise ValueError(\"Deserialization requires a session\")\n self.instance = instance or self.instance\n try:\n return super(ModelSchema, self).load(data, *args, **kwargs)\n finally:\n self.instance = None", "def get_single_ns_info(self, ns_inst, is_sol=False):\n if is_sol:\n nsInstance = {}\n nsInstance['id'] = ns_inst.id\n nsInstance['nsInstanceName'] = ns_inst.name\n nsInstance['nsInstanceDescription'] = ns_inst.description\n nsInstance['nsdId'] = ns_inst.nsd_id\n nsInstance['nsdInfoId'] = ns_inst.nspackage_id\n nsInstance['nsState'] = ns_inst.status\n if ns_inst.nsd_invariant_id:\n nsInstance['nsdInvariantId'] = ns_inst.nsd_invariant_id\n if ns_inst.flavour_id:\n nsInstance['flavourId'] = ns_inst.flavour_id\n # todo 'nsScaleStatus':{}\n # todo 'additionalAffinityOrAntiAffinityRule':{}\n logger.debug(\" test \")\n vnf_instance_list = self.get_vnf_infos(ns_inst.id, is_sol)\n if vnf_instance_list:\n nsInstance['vnfInstance'] = vnf_instance_list\n # todo 'pnfInfo': self.get_pnf_infos(ns_inst.id,is_sol),\n vl_list = self.get_vl_infos(ns_inst.id, is_sol)\n if vl_list:\n nsInstance['virtualLinkInfo'] = vl_list\n # todo 'vnffgInfo': self.get_vnffg_infos(ns_inst.id, ns_inst.nsd_model),\n # todo 'sapInfo':{},\n # todo nestedNsInstanceId\n logger.debug(\" test \")\n nsInstance['_links'] = {\n 'self': {'href': NS_INSTANCE_BASE_URI % ns_inst.id},\n 'instantiate': {'href': NS_INSTANCE_BASE_URI % ns_inst.id + '/instantiate'},\n 'terminate': {'href': NS_INSTANCE_BASE_URI % ns_inst.id + '/terminate'},\n 'update': {'href': NS_INSTANCE_BASE_URI % ns_inst.id + '/update'},\n 'scale': {'href': NS_INSTANCE_BASE_URI % ns_inst.id + '/scale'},\n 'heal': {'href': NS_INSTANCE_BASE_URI % ns_inst.id + '/heal'}\n }\n logger.debug(\" test \")\n return nsInstance\n return {\n 'nsInstanceId': ns_inst.id,\n 'nsName': ns_inst.name,\n 'description': ns_inst.description,\n 'nsdId': ns_inst.nsd_id,\n 'nsdInvariantId': ns_inst.nsd_invariant_id,\n 'vnfInfo': self.get_vnf_infos(ns_inst.id, is_sol),\n 'pnfInfo': self.get_pnf_infos(ns_inst.id),\n 'vlInfo': self.get_vl_infos(ns_inst.id, is_sol),\n 'vnffgInfo': self.get_vnffg_infos(ns_inst.id, ns_inst.nsd_model, is_sol),\n 'nsState': ns_inst.status}", "def _get_instance(self):", "def _get_instance(self):", "def create_instance(self, date):\n raise NotImplementedError", "def create_instance(self, **attrs):\n return self._create(_instance.Instance, **attrs)", "def create(self):\n raise WufooException(\"InstanceResource creation not supported\")", "def __init__(self):\n self.id = None\n \"\"\"\"true if individual services can be enabled/disabled\"\"\"\n self.canenableindividualservice = None\n \"\"\"\"the destination physical network\"\"\"\n self.destinationphysicalnetworkid = None\n \"\"\"\"the provider name\"\"\"\n self.name = None\n \"\"\"\"the physical network this belongs to\"\"\"\n self.physicalnetworkid = None\n \"\"\"\"services for this provider\"\"\"\n self.servicelist = None\n \"\"\"\"state of the network provider\"\"\"\n self.state = None", "def _is_dataclass_instance(obj):\n return hasattr(type(obj), '__dataclass_fields__')", "def do_instance_start(self, component_handle, instance_handle):\n logger.debug(\"RwdtstaskletPython: do_instance_start function called\")\n\n # Create an instance of DTS API - This object is needed by all DTS\n # member and query APIs directly or indirectly.\n # DTS invokes the callback to notify the tasklet that the DTS API instance is ready \n # for use.\n\n foo = Callback()\n #sub = SubscribeInsideXactExample(self)\n self.dts_api = RwDts.Api.new(self.taskletinfo, # tasklet object\n RwDtsToyTaskletYang.get_schema(), # Schema object\n foo.rwdts_tasklet_state_change_cb, # The callback for DTS state change\n #sub.rwdts_tasklet_state_change_cb,\n self) # user data in the callback - in this case self", "def __init__(self, dry_run=False):\n\t\t# TODO(jchaloup):\n\t\t# - inject the product together with buildsystem client\n\t\t# TODO(jchaloup):\n\t\t# - inject the act and replace it with datasource instead\n\t\t# so the artefact/data can be picked from more sources", "def prepare_instance(self, idx):\n instance = self.instances[idx]\n component = instance['components'][0]\n \n results = self.prepare_component(instance, component,instance['dec_per'])\n return results", "def GetInstance():\n pass", "def __init__(self):\n self.swagger_types = {\n 'id_conta': 'int',\n 'id_produto': 'int',\n 'id_pessoa': 'int',\n 'id_parentesco': 'int',\n 'tipo_portador': 'str',\n 'nome_impresso': 'str',\n 'id_tipo_cartao': 'int',\n 'flag_ativo': 'int',\n 'data_cadastro_portador': 'str',\n 'data_cancelamento_portador': 'str'\n }\n\n self.attribute_map = {\n 'id_conta': 'idConta',\n 'id_produto': 'idProduto',\n 'id_pessoa': 'idPessoa',\n 'id_parentesco': 'idParentesco',\n 'tipo_portador': 'tipoPortador',\n 'nome_impresso': 'nomeImpresso',\n 'id_tipo_cartao': 'idTipoCartao',\n 'flag_ativo': 'flagAtivo',\n 'data_cadastro_portador': 'dataCadastroPortador',\n 'data_cancelamento_portador': 'dataCancelamentoPortador'\n }\n\n self._id_conta = None\n self._id_produto = None\n self._id_pessoa = None\n self._id_parentesco = None\n self._tipo_portador = None\n self._nome_impresso = None\n self._id_tipo_cartao = None\n self._flag_ativo = None\n self._data_cadastro_portador = None\n self._data_cancelamento_portador = None", "def __init__(self, instance=None):\n self.instance = instance\n self.schema = None\n if self.instance:\n self.schema = surveys.SurveySchema(self.instance.survey)", "def _new_instance(cls, data, wcs, errors=None, **kwargs):\n return cls(data, wcs, errors=errors, **kwargs)", "def __init__(self, py_dict=None):\n super(TypeSchema, self).__init__()\n self.set_data_type('xml')\n\n self.typeName = None", "def __call__(self,setup_options=True, instantiate_options=True, verbose=False):\n model = self.setup(setup_options)\n model(instantiate_options, verbose)\n return model", "def __init__(self, py_dict=None):\n super(PagedSystemEventListSchema, self).__init__()\n self.set_data_type(\"xml\")\n self.dataPage = DataPageSchema()\n\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def create_instance(self, data_defs):\n if not data_defs:\n msg = 'No data definitions are provided.'\n raise AppResponseException(msg)\n\n if (any(dd.source.name == 'packets' for dd in data_defs)\n and any(dd.source.name != 'packets' for dd in data_defs)):\n # Two report instance needs to be created, one uses 'npm.reports'\n # service, the other one uses 'npm.probe.reports' service\n # it would create unnecessary complexity to support this case\n # thus report error and let the user to create two separate\n # report instances\n\n msg = ('Both packets data source and non-packets data source are '\n 'being queried in this report, which is not supported. The '\n 'data source names include {}'\n .format(', '.join(set([dd.source.name\n for dd in data_defs]))))\n raise AppResponseException(msg)\n\n def _create_and_run(service_name, data_defs):\n\n config = dict(data_defs=[dd.to_dict() for dd in data_defs])\n logger.debug(\"Creating instance with data definitions %s\" % config)\n\n svcdef = self.appresponse.find_service(service_name)\n datarep = svcdef.bind('instances')\n resp = datarep.execute('create', _data=config)\n\n instance = ReportInstance(data=resp.data, datarep=resp)\n while not instance.is_complete():\n time.sleep(1)\n\n if instance.errors:\n err_msgs = ';\\n'.join(instance.errors)\n raise AppResponseException(err_msgs)\n\n return instance\n\n if data_defs[0].source.name == 'packets':\n # Needs to create a clip for for capture job packets source\n # Keep the clip till the instance is completed\n with self.appresponse.clips.create_clips(data_defs):\n # capture job data_defs are modified in place\n instance = _create_and_run(PACKETS_REPORT_SERVICE_NAME,\n data_defs)\n else:\n instance = _create_and_run(GENERAL_REPORT_SERVICE_NAME,\n data_defs)\n return instance", "def create(self, validated_data):\n security_groups = validated_data.pop('security_groups', [])\n internal_ips = validated_data.pop('internal_ips_set', [])\n floating_ips_with_subnets = validated_data.pop('floating_ips', [])\n spl = validated_data['service_project_link']\n ssh_key = validated_data.get('ssh_public_key')\n if ssh_key:\n # We want names to be human readable in backend.\n # OpenStack only allows latin letters, digits, dashes, underscores and spaces\n # as key names, thus we mangle the original name.\n safe_name = re.sub(r'[^-a-zA-Z0-9 _]+', '_', ssh_key.name)[:17]\n validated_data['key_name'] = '{0}-{1}'.format(ssh_key.uuid.hex, safe_name)\n validated_data['key_fingerprint'] = ssh_key.fingerprint\n\n flavor = validated_data['flavor']\n validated_data['flavor_name'] = flavor.name\n validated_data['cores'] = flavor.cores\n validated_data['ram'] = flavor.ram\n validated_data['flavor_disk'] = flavor.disk\n\n image = validated_data['image']\n validated_data['image_name'] = image.name\n validated_data['min_disk'] = image.min_disk\n validated_data['min_ram'] = image.min_ram\n\n system_volume_size = validated_data['system_volume_size']\n data_volume_size = validated_data.get('data_volume_size', 0)\n validated_data['disk'] = data_volume_size + system_volume_size\n\n instance = super(InstanceSerializer, self).create(validated_data)\n\n # security groups\n instance.security_groups.add(*security_groups)\n # internal IPs\n for internal_ip in internal_ips:\n internal_ip.instance = instance\n internal_ip.save()\n # floating IPs\n for floating_ip, subnet in floating_ips_with_subnets:\n _connect_floating_ip_to_instance(floating_ip, subnet, instance)\n # volumes\n volumes = []\n system_volume = models.Volume.objects.create(\n name='{0}-system'.format(instance.name[:143]), # volume name cannot be longer than 150 symbols\n service_project_link=spl,\n size=system_volume_size,\n image=image,\n image_name=image.name,\n bootable=True,\n )\n volumes.append(system_volume)\n\n if data_volume_size:\n data_volume = models.Volume.objects.create(\n name='{0}-data'.format(instance.name[:145]), # volume name cannot be longer than 150 symbols\n service_project_link=spl,\n size=data_volume_size,\n )\n volumes.append(data_volume)\n\n for volume in volumes:\n volume.increase_backend_quotas_usage()\n\n instance.volumes.add(*volumes)\n return instance", "def __init__(self, doc: Dict, validate: Optional[bool] = True):\n self.doc = doc\n if validate:\n jsonschema.validate(instance=doc, schema=DESCRIPTOR_SCHEMA)", "def __init__(self, use_spacy=True):\n self._use_spacy = use_spacy", "def configure_stp_instance(self, instance, **kwargs):\n pass", "def __init__(self, options):\n # option[0] = true means use all instances to calculate the w, w0.\n # option[0] = false means use SDG to calculate the w, w0.\n self.gradients_type = options[0]\n self.X_train = options[1]\n self.y_train = options[2]\n self.X_test = options[3]\n self.y_test = options[4]\n self.X = options[5]\n self.y = options[6]\n # initial w select zeros\n self.lamda = 0.1\n self.w_result = 0\n self.tim = []\n self.los_plt = []", "def test_load_str(self):\n o = \"soap\"\n cls = pytan3.adapters.load(o)\n assert issubclass(cls, pytan3.adapters.Soap)", "def __init__(self, mxd, servicename, url, datasources):\n self.name = servicename\n self.url = url\n self.mxd = mxd\n self._dbnames = sorted(datasources.keys())\n self._featureclasses = [y for x in datasources.values() for y in x]\n self._datastructure = datasources", "def __init__(self):\n self.swagger_types = {\n 'discovery': 'Discovery',\n 'groups': 'list[str]',\n 'labels': 'object'\n }\n\n self.attribute_map = {\n 'discovery': 'discovery',\n 'groups': 'groups',\n 'labels': 'labels'\n }\n\n self._discovery = None\n self._groups = None\n self._labels = None", "def make_instance(scene, features, params, instances, direction,\n focus_measures, classification, weight):\n evaluator = action_feature_evaluator(focus_measures, scene.step_count)\n\n # Randomly select only a subset of instances based on their weight.\n if params.outlierHandling == OutlierHandling.SAMPLING:\n if random.random() <= weight * params.uniformSamplingRate:\n instance = ( [ evaluator(feature) for _, feature in features ], \n classification )\n instances.append(instance)\n elif params.outlierHandling == OutlierHandling.WEIGHTING:\n if random.random() <= params.uniformSamplingRate:\n instance = ( [ evaluator(feature) for _, feature in features ], \n classification, weight )\n instances.append(instance)\n else:\n assert False", "def __init__(self, instance_or_wire):\n super(TimeSeries, self).__init__()\n\n self._collection = None\n self._data = None\n\n if isinstance(instance_or_wire, TimeSeries):\n # copy ctor\n # pylint: disable=protected-access\n self._collection = instance_or_wire._collection\n self._data = instance_or_wire._data\n\n elif isinstance(instance_or_wire, dict):\n if 'events' in instance_or_wire:\n # list of events dict(name='events', events=[list, of, events])\n\n self._collection = Collection(instance_or_wire.get('events', []))\n\n meta = copy.copy(instance_or_wire)\n meta.pop('events')\n\n self._data = self.build_metadata(meta)\n\n elif 'collection' in instance_or_wire:\n # collection dict(name='collection', collection=collection_obj)\n\n self._collection = instance_or_wire.get('collection', None)\n\n meta = copy.copy(instance_or_wire)\n meta.pop('collection')\n\n self._data = self.build_metadata(meta)\n\n elif 'columns' in instance_or_wire and 'points' in instance_or_wire:\n # coming from the wire format\n\n event_type = instance_or_wire.get('columns')[0]\n event_fields = instance_or_wire.get('columns')[1:]\n\n events = list()\n\n for i in instance_or_wire.get('points'):\n time = i[0]\n event_values = i[1:]\n data = dict(list(zip(event_fields, event_values)))\n try:\n events.append(self.event_type_map[event_type](time, data))\n except KeyError:\n msg = 'invalid event type {et}'.format(et=event_type)\n raise TimeSeriesException(msg)\n\n self._collection = Collection(events)\n\n meta = copy.copy(instance_or_wire)\n meta.pop('columns')\n meta.pop('points')\n\n self._data = self.build_metadata(meta)\n\n else:\n msg = 'unable to determine dict format'\n raise TimeSeriesException(msg)\n else:\n # unable to determine\n msg = 'arg must be a TimeSeries instance or dict'\n raise TimeSeriesException(msg)\n\n if self._collection.is_chronological() is not True:\n msg = 'Events supplied to TimeSeries constructor must be chronological'\n raise TimeSeriesException(msg)", "def __init__(self, py_dict=None):\n super(EventThresholdsSchema, self).__init__()\n self.set_data_type(\"xml\")\n self.cpu = CpuSchema()\n self.memory = MemorySchema()\n self.connectionsPerSecond = CpsSchema()\n\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def restore_object(self, attrs, instance=None):\n if instance:\n # update existing instance\n instance.name = attrs.get('name', instance.name)\n instance.price = attrs.get('price', instance.price)\n\n # create new instance\n return Service(**attrs)", "def __init__(self, py_dict=None):\n super(BGPNeighbourSchema, self).__init__()\n self.set_data_type(\"xml\")\n self.bgpFilters = None\n self.holdDownTimer = None\n self.weight = None\n self.remoteAS = None\n self.protocolAddress = None\n self.forwardingAddress = None\n self.password = None\n self.ipAddress = None\n self.keepAliveTimer = None\n\n if py_dict is not None:\n self.get_object_from_py_dict(py_dict)", "def _create_dummy_wsdl(self, xsd):\n\n if (path.exists(xsd)):\n try:\n with open(xsd, 'r') as f:\n tns = search(r'targetNamespace=\"(.*)\"', f.read()).group(1)\n if ('\"' in tns):\n tns = tns[: tns.index('\"')]\n\n filename = xsd.split('/')[-1]\n wsdl = path.abspath(xsd)[:-3] + 'wsdl'\n\n with open(wsdl, 'w') as f:\n f.write(wsdl_tmpl.format(tns, tns, tns, filename))\n\n return wsdl\n except AttributeError as ex:\n raise ValueError('File {0} is not valid XSD'.format(xsd))\n\n else:\n raise ValueError('File {0} not found'.format(xsd))", "def startInstance(self, name=None,\n location=None,\n familyName=None,\n styleName=None,\n fileName=None,\n postScriptFontName=None,\n styleMapFamilyName=None,\n styleMapStyleName=None,\n\n ):\n if self.currentInstance is not None:\n # We still have the previous one open\n self.endInstance()\n instanceElement = ET.Element('instance')\n if name is not None:\n instanceElement.attrib['name'] = name\n if location is not None:\n locationElement = self._makeLocationElement(location)\n instanceElement.append(locationElement)\n if familyName is not None:\n instanceElement.attrib['familyname'] = familyName\n if styleName is not None:\n instanceElement.attrib['stylename'] = styleName\n if fileName is not None:\n instanceElement.attrib['filename'] = self._posixPathRelativeToDocument(fileName)\n if postScriptFontName is not None:\n instanceElement.attrib['postscriptfontname'] = postScriptFontName\n if styleMapFamilyName is not None:\n instanceElement.attrib['stylemapfamilyname'] = styleMapFamilyName\n if styleMapStyleName is not None:\n instanceElement.attrib['stylemapstylename'] = styleMapStyleName\n\n self.currentInstance = instanceElement", "def test_create(self):\n from supvisors.statistics import StatisticsInstance\n instance = StatisticsInstance(17, 10)\n # check attributes\n self.assertEqual(3, instance.period)\n self.assertEqual(10, instance.depth)\n self.assertEqual(-1, instance.counter)\n self.assertIsNone(instance.ref_stats)\n self.assertIs(list, type(instance.cpu))\n self.assertFalse(instance.cpu)\n self.assertIs(list, type(instance.mem))\n self.assertFalse(instance.mem)\n self.assertIs(dict, type(instance.io))\n self.assertFalse(instance.io)\n self.assertIs(dict, type(instance.proc))\n self.assertFalse(instance.proc)", "def _fill_instance_child(self, xmldoc, element_name, return_type):\n xmlelements = self._get_child_nodes(xmldoc, self._get_serialization_name(element_name))\n\n if not xmlelements:\n return None\n\n return_obj = return_type()\n self._fill_data_to_return_object(xmlelements[0], return_obj)\n\n return return_obj", "def instantiate(self, features=()):\n\n if self._cls and not self._tool:\n tool = self._cls.instance(features)\n return getattr(tool, self.name)\n else:\n return self", "def createService(data):\n return Service(data).create()", "def __init__(self, name=None, description=None, address_1=None, address_2=None, city=None, county=None, postcode=None, country_id=None, currency_id=None, business_email=None, business_phone=None, vat_number=None, registration_number=None, latitude=None, longitude=None, timezone=None, contact=None, website=None):\n self.swagger_types = {\n 'name': 'str',\n 'description': 'str',\n 'address_1': 'str',\n 'address_2': 'str',\n 'city': 'str',\n 'county': 'str',\n 'postcode': 'str',\n 'country_id': 'int',\n 'currency_id': 'int',\n 'business_email': 'str',\n 'business_phone': 'str',\n 'vat_number': 'str',\n 'registration_number': 'str',\n 'latitude': 'float',\n 'longitude': 'float',\n 'timezone': 'str',\n 'contact': 'str',\n 'website': 'str'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'description': 'description',\n 'address_1': 'address_1',\n 'address_2': 'address_2',\n 'city': 'city',\n 'county': 'county',\n 'postcode': 'postcode',\n 'country_id': 'country_id',\n 'currency_id': 'currency_id',\n 'business_email': 'business_email',\n 'business_phone': 'business_phone',\n 'vat_number': 'vat_number',\n 'registration_number': 'registration_number',\n 'latitude': 'latitude',\n 'longitude': 'longitude',\n 'timezone': 'timezone',\n 'contact': 'contact',\n 'website': 'website'\n }\n\n self._name = name\n self._description = description\n self._address_1 = address_1\n self._address_2 = address_2\n self._city = city\n self._county = county\n self._postcode = postcode\n self._country_id = country_id\n self._currency_id = currency_id\n self._business_email = business_email\n self._business_phone = business_phone\n self._vat_number = vat_number\n self._registration_number = registration_number\n self._latitude = latitude\n self._longitude = longitude\n self._timezone = timezone\n self._contact = contact\n self._website = website", "def create_custom():\n # Extract initialisation parameters\n alpha = request.args.get('alpha')\n alpha = float(alpha)\n generations = request.args.get('generations')\n generations = int(generations)\n beta = request.args.get('beta')\n beta = float(beta)\n pec = request.args.get('pec')\n pec = float(pec)\n q = request.args.get('q')\n q = float(q)\n\n # Extract the custom coordinates and create a list of nodes\n coords = request.args.get('custom_coords')\n coords = str(coords)\n nodes = custom_nodes(coords)\n\n # Initialise instance\n i = Instance(nodes, alpha, beta, pec, q)\n\n return jsonify(nodes=i.nodes, alpha=i.alpha, beta=i.beta, decay=i.decay,\n min_pheromone=i.min_pheromone, q=i.q,\n local_deposit=i.local_deposit, distances=i.distances,\n pheromones=i.pheromones, ants=i.ants, shortest_path=i.shortest_path,\n min_distance=i.min_distance, message=\"Instance Initialised\")", "def __init__(self, **kwargs):\n source = kwargs\n if 'xml_element' in kwargs:\n source = kwargs.get('xml_element')\n validate_xml_element(source, self.ELEMENT_TAG)\n\n self.tag = source.get('tag', '')\n\n # Some versions of Stop have additional attributes. Only add them to the class if they exist.\n title = source.get('title')\n if title:\n self.title = title\n\n short_title = source.get('shortTitle')\n if short_title:\n self.short_title = short_title\n\n lat = source.get('lat')\n if lat:\n self.lat = lat\n\n lon = source.get('lon')\n if lon:\n self.lon = lon", "def __init__(self, file_name,\n word_vocab,\n max_sent_len,\n num_class):\n self.examples = examples = ParseExample.load_data(file_name)\n\n instance_id_list = []\n warrant0_list = []\n warrant1_list = []\n correct_label_w0_or_w1_list = []\n reason_list = []\n claim_list = []\n debate_meta_data_list = []\n\n warrant0_len = []\n warrant1_len = []\n reason_len = []\n claim_len = []\n debate_meta_data_len = []\n\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n instance_id = example.get_id()\n correct_label_w0_or_w1 = example.get_label()\n\n # convert to the ids\n warrant0 = data_utils.sent_to_index(warrant0, word_vocab)\n warrant1 = data_utils.sent_to_index(warrant1, word_vocab)\n reason = data_utils.sent_to_index(reason, word_vocab)\n claim = data_utils.sent_to_index(claim, word_vocab)\n debate_meta_data = data_utils.sent_to_index(debate_meta_data, word_vocab)\n correct_label_w0_or_w1 = data_utils.onehot_vectorize(correct_label_w0_or_w1, num_class)\n\n warrant0_len.append(min(len(warrant0), max_sent_len))\n warrant1_len.append(min(len(warrant1), max_sent_len))\n reason_len.append(min(len(reason), max_sent_len))\n claim_len.append(min(len(claim), max_sent_len))\n debate_meta_data_len.append(min(len(debate_meta_data), max_sent_len))\n\n # add to the result\n instance_id_list.append(instance_id)\n warrant0_list.append(warrant0)\n warrant1_list.append(warrant1)\n correct_label_w0_or_w1_list.append(correct_label_w0_or_w1)\n reason_list.append(reason)\n claim_list.append(claim)\n debate_meta_data_list.append(debate_meta_data)\n\n warrant0_list = data_utils.pad_2d_matrix(warrant0_list, max_sent_len)\n warrant1_list = data_utils.pad_2d_matrix(warrant1_list, max_sent_len)\n reason_list = data_utils.pad_2d_matrix(reason_list, max_sent_len)\n claim_list = data_utils.pad_2d_matrix(claim_list, max_sent_len)\n debate_meta_data_list = data_utils.pad_2d_matrix(debate_meta_data_list, max_sent_len)\n\n # text\n self.instance_id_list = np.array(instance_id_list)\n # float\n self.correct_label_w0_or_w1_list = np.array(correct_label_w0_or_w1_list, dtype=np.float32)\n # int\n self.warrant0_list = np.array(warrant0_list, dtype=np.int32)\n self.warrant1_list = np.array(warrant1_list, dtype=np.int32)\n self.reason_list = np.array(reason_list, dtype=np.int32)\n self.claim_list = np.array(claim_list, dtype=np.int32)\n self.debate_meta_data_list = np.array(debate_meta_data_list, dtype=np.int32)\n\n self.warrant0_len = np.array(warrant0_len, dtype=np.int32)\n self.warrant1_len = np.array(warrant1_len, dtype=np.int32)\n self.reason_len = np.array(reason_len, dtype=np.int32)\n self.claim_len = np.array(claim_len, dtype=np.int32)\n self.debate_meta_data_len = np.array(debate_meta_data_len, dtype=np.int32)\n\n self.do_id = word_vocab['do']\n\n # obtain the diff part\n diff_warrant0_list = []\n diff_warrant1_list = []\n diff_claim_list = []\n diff_warrant0_len_list = []\n diff_warrant1_len_list = []\n diff_claim_len_list = []\n id = 0\n for example in examples:\n warrant0, warrant1, reason, claim, debate_meta_data, negclaim = example.get_six(type=WORD_TYPE)\n la, ra, lb, rb = diffsents(warrant0, warrant1)\n diff_warrant0 = warrant0[la: ra + 1]\n diff_warrant1 = warrant1[lb: rb + 1]\n la, ra, _, _ = diffsents(claim, negclaim)\n diff_claim = claim[la: ra+1]\n # print(warrant0, warrant1, diff_warrant0, diff_warrant1)\n # print(claim, negclaim, diff_claim)\n # id += 1\n # if id == 10:\n # exit(1)\n\n # convert to the ids\n diff_warrant0 = data_utils.sent_to_index(diff_warrant0, word_vocab)\n diff_warrant1 = data_utils.sent_to_index(diff_warrant1, word_vocab)\n diff_claim = data_utils.sent_to_index(diff_claim, word_vocab)\n\n diff_warrant0_list.append(diff_warrant0)\n diff_warrant1_list.append(diff_warrant1)\n diff_claim_list.append(diff_claim)\n\n diff_warrant0_len_list.append(min(len(diff_warrant0), config.max_diff_len))\n diff_warrant1_len_list.append(min(len(diff_warrant1), config.max_diff_len))\n diff_claim_len_list.append(min(len(diff_claim), config.max_diff_len))\n\n diff_warrant0_list = data_utils.pad_2d_matrix(diff_warrant0_list, config.max_diff_len)\n diff_warrant1_list = data_utils.pad_2d_matrix(diff_warrant1_list, config.max_diff_len)\n diff_claim_list = data_utils.pad_2d_matrix(diff_claim_list, config.max_diff_len)\n\n # int\n self.diff_warrant0_list = np.array(diff_warrant0_list, dtype=np.int32)\n self.diff_warrant1_list = np.array(diff_warrant1_list, dtype=np.int32)\n self.diff_claim_list = np.array(diff_claim_list, dtype=np.int32)\n\n self.diff_warrant0_len = np.array(diff_warrant0_len_list, dtype=np.int32)\n self.diff_warrant1_len = np.array(diff_warrant1_len_list, dtype=np.int32)\n self.diff_claim_len = np.array(diff_claim_len_list, dtype=np.int32)", "def learn(instance: machine_learning.StationMachineLearning):\n instance.learn()", "def __init__(self, use_existing_vocab: bool = True):\n\n self._db_connection = postgresql.DatabaseConnection()\n self._use_existing_vocab = use_existing_vocab", "def create_stp_instance(self, instance, priority):\n pass", "def from_xml(cls, xml_data, system, id_generator):\r\n\r\n xml_object = etree.fromstring(xml_data)\r\n system.error_tracker('WARNING: the <{tag}> tag is deprecated. '\r\n 'Instead, use <customtag impl=\"{tag}\" attr1=\"...\" attr2=\"...\"/>. '\r\n .format(tag=xml_object.tag))\r\n\r\n tag = xml_object.tag\r\n xml_object.tag = 'customtag'\r\n xml_object.attrib['impl'] = tag\r\n\r\n return system.process_xml(etree.tostring(xml_object))", "def build_from_instance(self, instanceDict={}):\n if not instanceDict.keys():\n print \"Instance's dictionary not provided. Please provide a dictionary that contains instance's information.\"\n return\n\n from node import Node\n from edge import Edge\n\n self.__instance_dictionary = instanceDict\n self.__name = instanceDict[\"header\"][\"NAME\"] # name of the graph\n self.__nodes = [] # Array of nodes\n self.__num_nodes = 0 # Number of nodes\n self.__edges = [] # Array of edges\n self.__num_edges = 0 # Number of edges\n self.__adjacency_matrix = [] # initialize an empty adjacency matrix\n self.__adjacency_matrix_dictionary = {} # initialize an empty double dictionary adjacency object\n\n # add nodes to the graph\n for curNodeVal in xrange(0, instanceDict[\"header\"][\"DIMENSION\"]):\n new_node = Node(curNodeVal, instanceDict[\"nodes\"][curNodeVal]) # create a new node instance\n self.add_node(new_node) # add node to the graph\n\n # add edges to graph\n for tupleEdge in instanceDict[\"edges\"]:\n node_a_id = tupleEdge[0]\n node_b_id = tupleEdge[1]\n edge_weight = tupleEdge[2]\n\n #if edge_weight > 0:\n if node_a_id != node_b_id:\n new_edge = Edge(self.__nodes[node_a_id], self.__nodes[node_b_id], edge_weight) # create edge\n self.add_edge(new_edge)", "def _readSingleInstanceElement(\n self,\n instanceElement,\n makeGlyphs=True,\n makeKerning=True,\n makeInfo=True,\n bendLocations=False,\n ):\n # get the data from the instanceElement itself\n filename = instanceElement.attrib.get('filename')\n\n instancePath = os.path.join(os.path.dirname(self.path), filename)\n self.reportProgress(\"generate\", 'start', instancePath)\n if self.verbose and self.logger:\n self.logger.info(\"\\tGenerating instance %s\", os.path.basename(instancePath))\n filenameTokenForResults = os.path.basename(filename)\n\n instanceObject = self._instanceWriterClass(\n instancePath,\n ufoVersion=self.ufoVersion,\n roundGeometry=self.roundGeometry,\n axes = self.axes,\n verbose=self.verbose,\n logger=self.logger,\n bendLocations=bendLocations,\n )\n self.results[filenameTokenForResults] = instancePath\n\n # set the masters\n instanceObject.setSources(self.sources)\n self.unicodeMap = instanceObject.makeUnicodeMapFromSources()\n instanceObject.setMuted(self.muted)\n familyname = instanceElement.attrib.get('familyname')\n if familyname is not None:\n instanceObject.setFamilyName(familyname)\n stylename = instanceElement.attrib.get('stylename')\n if stylename is not None:\n instanceObject.setStyleName(stylename)\n postScriptFontName = instanceElement.attrib.get('postscriptfontname')\n if postScriptFontName is not None:\n instanceObject.setPostScriptFontName(postScriptFontName)\n styleMapFamilyName = instanceElement.attrib.get('stylemapfamilyname')\n if styleMapFamilyName is not None:\n instanceObject.setStyleMapFamilyName(styleMapFamilyName)\n styleMapStyleName = instanceElement.attrib.get('stylemapstylename')\n if styleMapStyleName is not None:\n instanceObject.setStyleMapStyleName(styleMapStyleName)\n\n # location\n instanceLocation = self.locationFromElement(instanceElement)\n\n if instanceLocation is not None:\n instanceObject.setLocation(instanceLocation)\n\n if makeGlyphs:\n\n # step 1: generate all glyphs we have mutators for.\n names = instanceObject.getAvailableGlyphnames()\n for n in names:\n unicodes = self.unicodeMap.get(n, None)\n try:\n instanceObject.addGlyph(n, unicodes)\n except AssertionError:\n if self.verbose and self.logger:\n self.logger.info(\"Problem making glyph %s, skipping.\", n)\n # step 2: generate all the glyphs that have special definitions.\n for glyphElement in instanceElement.findall('.glyphs/glyph'):\n self.readGlyphElement(glyphElement, instanceObject)\n\n # read the kerning\n if makeKerning:\n for kerningElement in instanceElement.findall('.kerning'):\n self.readKerningElement(kerningElement, instanceObject)\n break\n\n # read the fontinfo\n if makeInfo:\n for infoElement in instanceElement.findall('.info'):\n self.readInfoElement(infoElement, instanceObject)\n\n # copy the features\n if self.featuresSource is not None:\n instanceObject.copyFeatures(self.featuresSource)\n\n # copy the groups\n if self.groupsSource is not None:\n if self.groupsSource in self.sources:\n groupSourceObject, loc = self.sources[self.groupsSource]\n # copy the groups from the designated source to the new instance\n # note: setGroups will filter the group members\n # only glyphs present in the font will be added to the group.\n # Depending on the ufoversion we might or might not expect the kerningGroupConversionRenameMaps attribute.\n if hasattr(groupSourceObject, \"kerningGroupConversionRenameMaps\"):\n renameMap = groupSourceObject.kerningGroupConversionRenameMaps\n else:\n renameMap = {}\n instanceObject.setGroups(groupSourceObject.groups, kerningGroupConversionRenameMaps=renameMap)\n\n # lib items\n if self.libSource is not None:\n if self.libSource in self.sources:\n libSourceObject, loc = self.sources[self.libSource]\n instanceObject.setLib(libSourceObject.lib)\n\n # save the instance. Done.\n success, report = instanceObject.save()\n if not success and self.logger:\n # report problems other than validation errors and failed glyphs\n self.logger.info(\"%s:\\nErrors generating: %s\", filename, report)\n\n # report failed glyphs\n failed = instanceObject.getFailed()\n if failed:\n failed.sort()\n msg = \"%s:\\nErrors calculating %s glyphs: \\n%s\"%(filename, len(failed),\"\\t\"+\"\\n\\t\".join(failed))\n self.reportProgress('error', 'glyphs', msg)\n if self.verbose and self.logger:\n self.logger.info(msg)\n\n # report missing unicodes\n missing = instanceObject.getMissingUnicodes()\n if missing:\n missing.sort()\n msg = \"%s:\\nPossibly missing unicodes for %s glyphs: \\n%s\"%(filename, len(missing),\"\\t\"+\"\\n\\t\".join(missing))\n self.reportProgress('error', 'unicodes', msg)\n\n # store\n self.instances[postScriptFontName] = instanceObject\n self.reportProgress(\"generate\", 'stop', filenameTokenForResults)", "def get_serialization_instance(cls, value):\n\n # if the instance is a list, convert it to a cls instance.\n # this is only useful when deserializing method arguments for a client\n # request which is the only time when the member order is not arbitrary\n # (as the members are declared and passed around as sequences of\n # arguments, unlike dictionaries in a regular class definition).\n if isinstance(value, list) or isinstance(value, tuple):\n assert len(value) <= len(cls._type_info)\n\n cls_orig = cls\n if cls.__orig__ is not None:\n cls_orig = cls.__orig__\n inst = cls_orig()\n\n keys = cls._type_info.keys()\n for i in range(len(value)):\n setattr(inst, keys[i], value[i])\n\n elif isinstance(value, dict):\n inst = cls()\n\n for k in cls._type_info:\n setattr(inst, k, value.get(k, None))\n\n else:\n inst = value\n\n return inst", "def __init__(self, name=None, debug_mode=False, features=None, ui=None, is_default=False, created=None, modified=None, id=None, team_id=None, team=None, portals=None, product_groups=None, product_types=None, product_sizes=None, product_size_materials=None, product_size_materials_rel=None):\n self.swagger_types = {\n 'name': 'str',\n 'debug_mode': 'bool',\n 'features': 'object',\n 'ui': 'object',\n 'is_default': 'bool',\n 'created': 'datetime',\n 'modified': 'datetime',\n 'id': 'str',\n 'team_id': 'str',\n 'team': 'Team',\n 'portals': 'list[Portal]',\n 'product_groups': 'list[ProductGroup]',\n 'product_types': 'list[ProductType]',\n 'product_sizes': 'list[ProductSize]',\n 'product_size_materials': 'list[ProductSizeMaterial]',\n 'product_size_materials_rel': 'list[TeamBuilderConfigProductSizeMaterial]'\n }\n\n self.attribute_map = {\n 'name': 'name',\n 'debug_mode': 'debugMode',\n 'features': 'features',\n 'ui': 'ui',\n 'is_default': 'isDefault',\n 'created': 'created',\n 'modified': 'modified',\n 'id': 'id',\n 'team_id': 'teamId',\n 'team': 'team',\n 'portals': 'portals',\n 'product_groups': 'productGroups',\n 'product_types': 'productTypes',\n 'product_sizes': 'productSizes',\n 'product_size_materials': 'productSizeMaterials',\n 'product_size_materials_rel': 'productSizeMaterialsRel'\n }\n\n self._name = name\n self._debug_mode = debug_mode\n self._features = features\n self._ui = ui\n self._is_default = is_default\n self._created = created\n self._modified = modified\n self._id = id\n self._team_id = team_id\n self._team = team\n self._portals = portals\n self._product_groups = product_groups\n self._product_types = product_types\n self._product_sizes = product_sizes\n self._product_size_materials = product_size_materials\n self._product_size_materials_rel = product_size_materials_rel", "def create(cls, xml):\n raise Exception('Not Implemented Yet')", "def __init__(self, *args, **kwargs):\r\n super(OWS, self).__init__(*args, **kwargs)\r\n\r\n self.query = {\"VERSION\": \"1.0.0\", \"REQUEST\": \"GetCapabilities\"}\r\n # Amended with \"SERVICE\" parameter by subclasses\r\n\r\n # If service or version parameters are left in query string, it can lead to a protocol error and false negative\r\n self.service_url = re.sub(r\"(?i)(version|service|request)=.*?(&|$)\", \"\", self.service_url)\r\n\r\n self.layer = self.layer.lower() if self.layer else None\r\n self.layer_elements = None", "def create_foundation_sdk_instance():\n instance = LockedInstance(\n lock=threading.Lock(),\n instance=SDK(**(request.get_json() or {})),\n module=None,\n entity=SDK_ENTITY_NAME,\n uuid=str(uuid.uuid4().hex),\n created_at=datetime.datetime.utcnow(),\n )\n STORE[instance.uuid] = instance\n response = app.response_class(\n response=json.dumps(serialise_instance(instance)),\n status=201,\n mimetype='application/json'\n )\n return response", "def from_xml(cls, xml_data, system, id_generator):\r\n raise NotImplementedError('Modules must implement from_xml to be parsable from xml')", "def create_service_instance(self, serviceManufacturer, serviceType) -> Union[UpnpServiceProxy, None]:\n serviceInst = None\n if serviceType is not None:\n extkey = generate_extension_key(serviceManufacturer, serviceType)\n if extkey in self._service_registry:\n serviceClass = self._service_registry[extkey]\n serviceInst = serviceClass()\n return serviceInst", "def __init__(self, **kwargs):\n with KWArgs(kwargs) as k:\n metadata = k.optional(\"metadata\")\n\n self._ax = ca.boolean()\n self.metadata = metadata" ]
[ "0.5577236", "0.5367035", "0.5321477", "0.5150564", "0.50830454", "0.5067312", "0.4986971", "0.49749762", "0.4935389", "0.48817945", "0.4843432", "0.48354465", "0.48144224", "0.47744632", "0.47571477", "0.47335753", "0.47333562", "0.47290888", "0.47188824", "0.46990353", "0.46851358", "0.4661856", "0.46545747", "0.4653167", "0.46447936", "0.46366242", "0.46229136", "0.4611534", "0.45930302", "0.45751363", "0.45721653", "0.4568326", "0.45665586", "0.45643023", "0.45603958", "0.45551097", "0.45549124", "0.45527393", "0.45511377", "0.454809", "0.45466587", "0.4541401", "0.4528824", "0.45241785", "0.45232117", "0.4512391", "0.4512391", "0.44884825", "0.44836727", "0.44669187", "0.44640434", "0.4463944", "0.44630235", "0.44497624", "0.4445383", "0.4444116", "0.4437977", "0.4432881", "0.44288945", "0.4421241", "0.44199684", "0.4417163", "0.44122428", "0.4408873", "0.44018352", "0.4401643", "0.43859872", "0.4380369", "0.4373761", "0.43577474", "0.43551284", "0.43491828", "0.43488842", "0.4344803", "0.4335016", "0.43249065", "0.43232444", "0.43219897", "0.4321757", "0.43200162", "0.43187308", "0.43151832", "0.4314413", "0.4313697", "0.43102914", "0.43095985", "0.43034405", "0.42970973", "0.4297028", "0.4296706", "0.42934772", "0.42891452", "0.42884478", "0.42837813", "0.4279225", "0.42708585", "0.4262313", "0.42596236", "0.42594934", "0.42594898" ]
0.54205626
1
Call the named remote web service method.
def _call(self, name, *args, **kwargs): if len(args) and len(kwargs): raise TypeError( 'Use positional or keyword argument only.' ) callinfo = getattr(self, name).callinfo soapAction = callinfo.soapAction url = callinfo.location (protocol, host, uri, query, fragment, identifier) = urlparse(url) port = '80' if host.find(':') >= 0: host, port = host.split(':') binding = Binding(host=host, tracefile=self._tracefile, ssl=(protocol == 'https'), port=port, url=None, typesmodule=self._typesmodule, nsdict=self._nsdict, soapaction=self._soapAction, ns=self._ns, op_ns=self._op_ns) if self._use_wsdl: request, response = self._getTypeCodes(callinfo) if len(kwargs): args = kwargs if request is None: request = Any(oname=name) binding.Send(url=uri, opname=None, obj=args, nsdict=self._nsdict, soapaction=soapAction, requesttypecode=request) return binding.Receive(replytype=response) apply(getattr(binding, callinfo.methodName), args) return binding.Receive()
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def _call(self, rpc_method_name, *args, **kwargs):\n method = getattr(self, rpc_method_name)\n return method(*args, **kwargs)", "def _method_call(self, msg):\n #print(\"Performing service: %s, method_name: %s\" % (msg.service_name, msg.method_name))\n service = self._services.get(msg.service_name)\n if service is None:\n raise MessageHandleError(MessageHandleError.RESULT_UNKNOWN_SERVICE, msg)\n\n try:\n return execute_remote_method_call(service, msg.method_name, *msg.pargs, **msg.kwargs)\n #return service.call(msg.method_name, *msg.pargs, **msg.kwargs)\n except MessageHandleError as error:\n error.original_message = msg\n raise error", "def callRemote(self, methname, *args, **kwargs):\n schema = self._referenceable.getInterface()[methname]\n if self.check_args:\n schema.checkAllArgs(args, kwargs, inbound=False)\n # TODO: Figure out how to call checkResults on the result.\n return execute(\n self._referenceable.doRemoteCall,\n methname,\n args,\n kwargs,\n )", "def callRemote(self, methname, *args, **kwargs):\n try:\n schema = self._referenceable.getInterface()[methname]\n if self.check_args:\n schema.checkAllArgs(args, kwargs, inbound=True)\n _check_copyables(list(args) + kwargs.values())\n result = self._referenceable.doRemoteCall(\n methname,\n args,\n kwargs,\n )\n schema.checkResults(result, inbound=False)\n _check_copyables([result])\n return succeed(result)\n except:\n return fail()", "def call(self, method, name, params=None, payload=None, **kwds):", "def callRemote(self, name, *args, **kw):\n if hasattr(self, 'sync_'+name):\n return getattr(self, 'sync_'+name)(*args, **kw)\n try:\n method = getattr(self, \"async_\" + name)\n return defer.succeed(method(*args, **kw))\n except:\n f = Failure()\n if self.reportAllTracebacks:\n f.printTraceback()\n return defer.fail(f)", "def export_callProxyMethod( self, name, args, kargs ):\n res = pythonCall( 120, self.__proxyWrapper, name, args, kargs )\n if res['OK']:\n return res['Value']\n else:\n return res", "def execute(self, methodname, *args):\n if not methodname in self.methods:\n raise BlogError(BlogError.METHOD_NOT_SUPPORTED)\n\n try:\n r = getattr(self.server, methodname)(args)\n except xmlrpclib.Fault, fault:\n raise BlogError(fault.faultString)\n\n return r", "def call(self, name, *args, **kwargs):\r\n return self.client.call(self.name, name, *args, **kwargs)", "def __executeOperation( self, url, method ):\n fcn = None\n if hasattr( self, method ) and callable( getattr( self, method ) ):\n fcn = getattr( self, method )\n if not fcn:\n return S_ERROR( \"Unable to invoke %s, it isn't a member function of DIPStorage\" % method )\n\n res = fcn( url )\n if not res['OK']:\n return res\n elif url not in res['Value']['Successful']:\n return S_ERROR( res['Value']['Failed'][url] )\n\n return S_OK( res['Value']['Successful'][url] )", "def remote(self, method, params=()):\n\n response = self.transport.request(self.host, \n '/RPC2',\n dumps(params, method))\n return response", "def call(self, method, *args, **kwargs):\n if method in self.handlers:\n handler = self.handlers[method]\n if self.single_server:\n self.send_request(self.single_server, method, handler, *args, **kwargs)\n else:\n if method in [\"completion\", \"completion_item_resolve\", \"diagnostics\", \"code_action\", \"execute_command\"]:\n method_server_names = self.multi_servers_info[method]\n else:\n method_server_names = [self.multi_servers_info[method]]\n\n for method_server_name in method_server_names:\n method_server = self.multi_servers[method_server_name]\n self.send_request(method_server, method, handler, *args, **kwargs)\n elif hasattr(self, method):\n getattr(self, method)(*args, **kwargs)", "def call(self, obj_name, method_name, param_dict=dict()):\n result = self.ctrl_client.call(obj_name, method_name, param_dict)\n if result['type'] == 'error':\n self.logger.error(\"API call error: {}\".format(result['msg']))\n return None\n else:\n return result['call_return']", "def _call_method(self, module, method, *args, **kwargs):\n return self.invoke_api(module, method, *args, **kwargs)", "def __call__(self):\n params, method = parse_xmlrpc_request(self.request)\n return xmlrpc_response(getattr(self,method)(*params))", "def callRemote(self, _name, *args, **kw):\r\n if self.__failure is not None:\r\n d = fail(self.__failure)\r\n elif self.__pending is not None:\r\n d = Deferred()\r\n self.__pending.append(d)\r\n else:\r\n d = succeed(self.__obj)\r\n\r\n d.addCallback(lambda ref: ref.callRemote(_name, *args, **kw))\r\n d.addErrback(self.__filter, _name)\r\n return d", "def _remote_call(self,\n method_name,\n target='Widget',\n args=None,\n kwargs=None):\n args = [] if args is None else args\n kwargs = {} if kwargs is None else kwargs\n\n msg = {}\n\n if 'component_index' in kwargs:\n msg['component_index'] = kwargs.pop('component_index')\n if 'repr_index' in kwargs:\n msg['repr_index'] = kwargs.pop('repr_index')\n\n msg['target'] = target\n msg['type'] = 'call_method'\n msg['methodName'] = method_name\n msg['args'] = args\n msg['kwargs'] = kwargs\n\n def callback(widget, msg=msg):\n widget.send(msg)\n\n callback._method_name = method_name\n callback._ngl_msg = msg\n\n if self.loaded:\n self._remote_call_thread.q.append(callback)\n else:\n # send later\n # all callbacks will be called right after widget is loaded\n self._ngl_displayed_callbacks_before_loaded.append(callback)\n\n if callback._method_name not in _EXCLUDED_CALLBACK_AFTER_FIRING:\n self._ngl_displayed_callbacks_after_loaded.append(callback)", "def _execApiCall(headers, params, method_name,\r\n domain='ma.gnolia.com',\r\n urlhead='/api/rest/1/'):\r\n \r\n if 'api_key' not in params and method_name not in ['echo', 'get_key']:\r\n raise MagnoliaException('Required API Key parameter missing')\r\n conn = httplib.HTTPConnection(domain)\r\n conn.request('POST', urlhead + method_name, params, headers)\r\n return conn.getresponse()", "def __getattr__(self, method):\n def run_callback(func, plus, result):\n \"\"\"Execute the given callback safely.\n Get data and/or error from result and call func passing it\n data, plus (if needed) and error. Catch, log and suppress\n all exceptions.\n func (function): the callback to invoke.\n plus (object): optional additional data.\n result (AsyncResult): the result of a (finished) RPC call.\n \"\"\"\n data = result.value\n error = None if result.successful() else \"%s\" % result.exception\n try:\n if plus is None:\n func(data, error=error)\n else:\n func(data, plus, error=error)\n except Exception as error:\n logger.error(\"RPC callback for %s.%s raised exception.\",\n self.remote_service_coord.name, method,\n exc_info=True)\n\n def remote_method(**data):\n \"\"\"Forward arguments to execute_rpc.\n \"\"\"\n callback = data.pop(\"callback\", None)\n plus = data.pop(\"plus\", None)\n result = self.execute_rpc(method=method, data=data)\n if callback is not None:\n callback = functools.partial(run_callback, callback, plus)\n result.rawlink(functools.partial(gevent.spawn, callback))\n return result\n\n return remote_method", "def call(self, uri, method, arg, extras):\n pass", "def do_rpc(self, method, **params):\n data = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'authorization': self._auth_token,\n 'id': '1'\n }\n\n r = requests.post(self._url, json=data)\n validate_response(r)\n\n return r.json()['result']", "def call(self, port, method, *args, **kwargs):\n method = self.provides[port][method]\n return method(*args, **kwargs)", "def xen_rpc_call(ip, method, *args):\n try:\n if not ip:\n return xen_api_error(\"Invalid ip for rpc call\")\n # create\n proxy = ServerProxy(\"http://\" + ip + \":9363/\")\n \n # login \n response = proxy.session.login('root')\n if cmp(response['Status'], 'Failure') == 0:\n log.exception(response['ErrorDescription'])\n return xen_api_error(response['ErrorDescription']) \n session_ref = response['Value']\n \n # excute\n method_parts = method.split('_')\n method_class = method_parts[0]\n method_name = '_'.join(method_parts[1:])\n \n if method.find(\"host_metrics\") == 0:\n method_class = \"host_metrics\"\n method_name = '_'.join(method_parts[2:])\n #log.debug(method_class)\n #log.debug(method_name)\n if method_class.find(\"Async\") == 0:\n method_class = method_class.split(\".\")[1]\n response = proxy.__getattr__(\"Async\").__getattr__(method_class).__getattr__(method_name)(session_ref, *args)\n else:\n response = proxy.__getattr__(method_class).__getattr__(method_name)(session_ref, *args)\n if cmp(response['Status'], 'Failure') == 0:\n log.exception(response['ErrorDescription'])\n return xen_api_error(response['ErrorDescription'])\n # result\n return response\n except socket.error:\n return xen_api_error('socket error')", "def call(self, method=None, args=[]):\n\n client = xmlclient.ServerProxy(uri=self.api_endpoint, encoding='utf-8',\n allow_none=True)\n response = getattr(client, method)(self.username, self.password, *args)\n if response == 'OK':\n return True\n elif 'AUTH_ERROR' in response:\n raise AuthError()\n elif response == 'DOMAIN_OCCUPIED':\n raise DomainOccupiedError()\n elif response == 'RATE_LIMITED':\n raise RateLimitedError()\n elif response == 'BAD_INDATA':\n raise BadInDataError()\n elif response == 'UNKNOWN_ERROR':\n raise UnknownError()\n else:\n return response", "def _dispatch(self, method, params):\n logging.debug('Calling %s%s', method, params)\n self._rpc_received_event.set()\n return SimpleJSONRPCServer.SimpleJSONRPCServer._dispatch(\n self, method, params)", "def call_rpc(rpc_user, rpc_pwd, method):\n base_url = 'http://127.0.0.1:37128/'\n try:\n response = post(base_url,\n data=method,\n auth=(rpc_user, rpc_pwd)\n )\n except Exception as e:\n response = e\n return evaluate_response(response)", "def __call__(self, **action_kwargs):\n\n return SOAP.send(self.service, self, **action_kwargs)", "def call_selenium_api(self, method_name, *args):\n try:\n method = getattr(self._selenium, method_name)\n except AttributeError:\n method = lambda *args: self._selenium.do_command(method_name, args)\n return method(*args)", "def api_call(self, method, host, params):\n session_id = self.rpc_login(host)\n params.insert(0, session_id)\n json_rpc_request = {\n 'jsonrpc': '2.0',\n 'method': method,\n 'params': params,\n 'id': self.ID\n }\n\n self.ID += 1\n response = requests.post(host, data=json.dumps(json_rpc_request), headers=self.headers)\n\n return response", "def send_method_call(self, service_name, method_name, *pargs, **kwargs):\n msg = MethodCallMessage(service_name=service_name, method_name=method_name, pargs=pargs, kwargs=kwargs)\n return self.send_message_blocking(msg)", "def remote_method(**data):\n callback = data.pop(\"callback\", None)\n plus = data.pop(\"plus\", None)\n result = self.execute_rpc(method=method, data=data)\n if callback is not None:\n callback = functools.partial(run_callback, callback, plus)\n result.rawlink(functools.partial(gevent.spawn, callback))\n return result", "def call_method(self, request, context):\n response = CallMethodResponse()\n args = []\n for arg in request.args:\n args.append(decode(arg))\n if args != []:\n result = \\\n self._delegator.call_method(\n request.component, request.method, *args)\n else:\n result = \\\n self._delegator.call_method(\n request.component, request.method, None)\n response.result = encode(result)\n return response", "def make_call(self, method, _):\n raise CallNotFound('{}.{} does not exist'.format(self.service_name,\n method))", "def xmlrpc_methods():", "def make_xml_rpc_api_call(uri, method, args=None, headers=None,\r\n http_headers=None, timeout=None, proxy=None):\r\n if args is None:\r\n args = tuple()\r\n try:\r\n largs = list(args)\r\n largs.insert(0, {'headers': headers})\r\n\r\n payload = xmlrpc_client.dumps(tuple(largs),\r\n methodname=method,\r\n allow_none=True)\r\n session = requests.Session()\r\n req = requests.Request('POST', uri, data=payload,\r\n headers=http_headers).prepare()\r\n LOGGER.debug(\"=== REQUEST ===\")\r\n LOGGER.info('POST %s', uri)\r\n LOGGER.debug(req.headers)\r\n LOGGER.debug(payload)\r\n\r\n response = session.send(req,\r\n timeout=timeout,\r\n proxies=_proxies_dict(proxy))\r\n LOGGER.debug(\"=== RESPONSE ===\")\r\n LOGGER.debug(response.headers)\r\n LOGGER.debug(response.content)\r\n response.raise_for_status()\r\n result = xmlrpc_client.loads(response.content,)[0][0]\r\n return result\r\n except xmlrpc_client.Fault as ex:\r\n # These exceptions are formed from the XML-RPC spec\r\n # http://xmlrpc-epi.sourceforge.net/specs/rfc.fault_codes.php\r\n error_mapping = {\r\n '-32700': NotWellFormed,\r\n '-32701': UnsupportedEncoding,\r\n '-32702': InvalidCharacter,\r\n '-32600': SpecViolation,\r\n '-32601': MethodNotFound,\r\n '-32602': InvalidMethodParameters,\r\n '-32603': InternalError,\r\n '-32500': ApplicationError,\r\n '-32400': RemoteSystemError,\r\n '-32300': TransportError,\r\n }\r\n raise error_mapping.get(ex.faultCode, SoftLayerAPIError)(\r\n ex.faultCode, ex.faultString)\r\n except requests.HTTPError as ex:\r\n raise TransportError(ex.response.status_code, str(ex))\r\n except requests.RequestException as ex:\r\n raise TransportError(0, str(ex))", "def get_method(self, name):\n try:\n return self.methods[name]\n except KeyError:\n raise ServiceException('Service method \"%s\" not registered' % name)", "def call_method(self, name, method, params):\n self.logger.debug(\"API call: {}.{}({})\".format(name, method, params))\n if name in self.systems:\n obj = self.systems[name]\n if is_api_method(obj, method):\n try:\n # Calls given obj.method, unpacking and passing params dict\n call_return = getattr(obj, method)(**params)\n msg = \"Called {}.{}\".format(name, method)\n self.logger.debug(msg + \",returned:{}\".format(call_return))\n return msgs.call_reply(msg, call_return)\n except TypeError:\n # Raised when we have a mismatch of the method's kwargs\n # TODO: Return argspec here?\n err_msg = \"Invalid params for {}.{}\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n except Exception as e:\n # Catch exception raised by called method, notify client\n err_msg = \"Exception: '{}'\".format(str(e))\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid method: '{}.{}'\".format(name, method)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)\n else:\n err_msg = \"Invalid object: '{}'\".format(name)\n self.logger.warning(err_msg)\n return msgs.error(err_msg)", "def call(self, name, request=None, **params):\r\n if not name in self.resources:\r\n raise exceptions.HttpError('Unknown method \\'%s\\'' % name,\r\n status=status.HTTP_501_NOT_IMPLEMENTED)\r\n request = request or HttpRequest()\r\n resource = self.resources[name]\r\n view = resource.as_view(api=self)\r\n return view(request, **params)", "def call_method(self, action):\n\n\t\tif action[0] in self.methods:\n\t\t\tself.methods[action[0]](action[0:])\n\t\telse:\n\t\t\tself.no_such_method()", "def _execute_impl(self, *args, **kwargs):\n # Execute with bound args.\n method_body = getattr(self._deployment_node, self._deployment_method_name)\n return method_body.remote(\n *self._bound_args,\n **self._bound_kwargs,\n )", "def __call__(self, method, *args, **kwargs):\n if hasattr(self, 'channel'):\n kwargs.setdefault('channel', self.channel)\n resp = self._client.api_call(method, *args, **kwargs)\n if not resp['ok']:\n print unicode(resp)\n raise RuntimeError(resp.get('error', unicode(resp)))\n return resp", "def rpc_call(self, request, method=None, params=None, **kwargs):\r\n args = []\r\n kwargs = dict()\r\n if isinstance(params, dict):\r\n kwargs.update(params)\r\n else:\r\n args = list(as_tuple(params))\r\n\r\n method_key = \"{0}.{1}\".format(self.scheme_name, method)\r\n if method_key not in self.methods:\r\n raise AssertionError(\"Unknown method: {0}\".format(method))\r\n method = self.methods[method_key]\r\n\r\n if hasattr(method, 'request'):\r\n args.insert(0, request)\r\n\r\n return method(*args, **kwargs)", "def call(self, **kwargs):\n return getattr(self.resource, self.function)(**kwargs)", "def _ExecuteRequest(request):\n service = request.service_name()\n method = request.method()\n service_methods = remote_api_services.SERVICE_PB_MAP.get(service, {})\n request_class, response_class = service_methods.get(method, (None, None))\n if not request_class:\n raise apiproxy_errors.CallNotFoundError('%s.%s does not exist' % (service,\n method))\n\n request_data = request_class()\n request_data.ParseFromString(request.request())\n response_data = response_class()\n\n def MakeRequest():\n apiproxy_stub_map.MakeSyncCall(service, method, request_data,\n response_data)\n\n\n\n if service in THREAD_SAFE_SERVICES:\n MakeRequest()\n else:\n with GLOBAL_API_LOCK:\n MakeRequest()\n return response_data", "def\trunMethod(self, name = \"__default\", prms = {}):\n\n\t\tprms = self.CSLBuildPrmList(prms)\n\t\tlocalTbl = { 'vars':{}, 'status':0, 'props':{}, 'alias':{}, 'persistent':{}, 'instance':{}}\n\n\t\tres = self.callMethod(name, prms, localTbl)\n\n\t\tif localTbl[\"status\"] == 2:\n\t\t\treturn self.COMARValue.COMARRetVal(1, None)\n\t\telse:\t\t\t\n\t\t\tret = self.CSLtoCOMARValue(res)\n\t\t\tself.debug(DEBUG_FATAL, \"RETS:\", res, '(%s)->' % (res.type), ret)\n\t\t\treturn self.COMARValue.COMARRetVal(0, ret)", "def ServiceMethod(fn):\n\n fn.IsServiceMethod = True\n return fn", "def client_member_function(self, method: ProtoServiceMethod) -> None:", "def rpc_request(method, params, url=LOCAL):\n client = HTTPClient(url)\n return client.request(method, params)", "def call(self, method, *args):\n flatcall = flatten(\n m(n=method, t=self.groupName)[[\n squish(x) for x in args if x is not None]])\n self.socket.write(flatcall + '\\0')", "def call_method(self, method_name: str, timeout=None, **params) -> dict:\n timeout = timeout or self.timeout\n request_id = uuid.uuid4().hex\n data = {'@type': method_name,\n '@extra': {'request_id': request_id}}\n data.update(params)\n self._tdjson_client.send(data)\n\n result = self._wait_result(request_id, timeout)\n return result", "def execute_rpc(self, method, data):\n result = gevent.event.AsyncResult()\n result.set_exception(\n RPCError(\"Called a method of a non-configured service.\"))\n return result", "def rpc_call(self, ctx, rpc_request):\n rpc_method = rpc_request['method']\n start_time = time.time()\n try:\n return self._rpc_call([ctx.proxyfsd_addrinfo], rpc_request)\n finally:\n duration = time.time() - start_time\n self.logger.debug(\"RPC %s took %.6fs\", rpc_method, duration)", "def __call__(self, method, url, *args, **kwargs):\n log.debug('{} {}'.format(method.upper(), url))\n if 'params' in kwargs:\n kwargs['query'] = kwargs.pop('params')\n return getattr(self.client, method)(url, *args, **kwargs).json", "def call(self, method, **kwargs):\n response = self._do_request(method, **kwargs)\n return self._deserialize_response(response)", "def call(self, command, *args, **kwargs):\n\n\tif command in self.command_table:\n\t cmdopts = self.command_table[command]\n\t function = cmdopts.get('function', None)\n\t method = cmdopts.get('method', None)\n\t template = cmdopts.get('url_template', None)\n\t retval = function(method, template, *args, **kwargs)\n\t if not retval:\n\t\traise RuntimeError, 'request returned no data'\n\t return retval\n\telse:\n\t raise RuntimeError, 'unknown command: %s' % command", "def _platformix_call(self, context, fake_reply, method, *args, **kwargs):\r\n if hasattr(self.host, method):\r\n if not callable(getattr(self.host, method)):\r\n self._reply(context, proto_failure(\"Attribute {} of {} is a property\".format(\r\n property, self.host.name)), fake_reply)\r\n return\r\n try:\r\n result = getattr(self.host, method)(*args, **kwargs)\r\n except Exception as e:\r\n eprint(\"Platformix protocol: failed to call method {} of {} with args {}, kwargs {} \"\r\n \"due to exception {}\".format(method, self.host.name, args, kwargs, e))\r\n exprint()\r\n self._reply(context, proto_failure(\r\n \"Failed to call method {} of {} with args {}, kwargs {} \"\r\n \"due to exception {}\".format(method, self.host.name, args, kwargs, e)), fake_reply)\r\n return\r\n self._reply(context, proto_success(result), fake_reply)\r\n else:\r\n self._reply(context, proto_failure(\"Method {} not found on {}\".format(property, self.host.name)),\r\n fake_reply)", "def call_async(self, name, *args, **kwargs):", "def __call__(self, *args, **kwargs):\n return self.method(*args, **kwargs)", "def _invokeMember(obj, memberName, *args, **kwargs):\n return getattr(obj, memberName)(*args, **kwargs)", "def exec_method(self, module_name, version=None, client_class=None,\n method_name=None, *args, **kwargs):\n client_class = client_class or 'Client'\n client_version = version or 2\n _client = self.create_client(module_name, client_version,\n client_class)\n try:\n # NOTE(kiennt): method_name could be a combination\n # for example 'servers.list'. Here is the\n # workaround.\n method = getattr(_client, method_name.split('.')[0])\n for attr in method_name.split('.')[1:]:\n method = getattr(method, attr)\n return method(*args, **kwargs)\n except Exception as err:\n raise err", "def __getattr__(self, method_name):\n return partial(self.exec, method_name.replace(\"_\", \" \"))", "def rpcmethod(func):\n func.rpcmethod = True\n return func", "def _api_call(self, method, resource, **kwargs):\n uri = \"/%s?%s&username=%s\" % (resource, urlencode(kwargs), self.username)\n print (uri)\n c = self.get_connection()\n c.request(method, uri)\n response = c.getresponse()\n if not 200 == response.status:\n raise GeoNameException(\"Expected a 200 reponse but got %s.\" % (response.status))\n return self.response_handler.get_processed_data(response.read())", "def call_service(func, api_kwargs, context, request):\n pattern = request.matched_route.pattern\n service = request.registry['soap_services'].get(pattern)\n request.META = request.headers.environ # to be used by soapbox, like django\n request.service = service\n\n SOAP = service.version\n\n if request.method == 'GET' and 'wsdl' in request.params:\n tree = py2wsdl.generate_wsdl(request.service)\n body = etree.tostring(tree, pretty_print=True)\n return Response(body=body, content_type=SOAP.CONTENT_TYPE)\n\n try:\n xml = request.body\n envelope = SOAP.Envelope.parsexml(xml)\n message = envelope.Body.content()\n soap_action = SOAP.determin_soap_action(request)\n tagname, return_object = call_the_method(service,\n request, message, soap_action)\n soap_message = SOAP.Envelope.response(tagname, return_object)\n return Response(body=soap_message, content_type=SOAP.CONTENT_TYPE)\n except (ValueError, etree.XMLSyntaxError) as e:\n response = SOAP.get_error_response(SOAP.Code.CLIENT, str(e))\n except Exception, e:\n response = SOAP.get_error_response(SOAP.Code.SERVER, str(e))\n return Response(body=response, content_type=SOAP.CONTENT_TYPE)", "def invoke_method(self, method, uri, query_param={}, request_param={}, headers=None, **kwargs):\n query_param.setdefault(\"apiKey\", self.api_key)\n\n method = method.lower()\n\n if method == \"get\":\n resp = self.get(method, uri, query_param, request_param, headers, **kwargs)\n\n elif method == \"post\":\n resp = self.post(method, uri, query_param, request_param, headers, **kwargs)\n\n elif method == \"delete\":\n resp = self.delete(method, uri, query_param, request_param, headers, **kwargs)\n\n elif method == \"patch\":\n resp = self.patch(method, uri, query_param, request_param, headers, **kwargs)\n\n else:\n raise BacklogError(\"Not supported http method: {}\".format(method))\n\n return resp", "def _call(self, method, url, params):\n if not url.startswith('http'):\n url = self.root + url\n headers = self._auth_headers()\n headers['Content-Type'] = 'application/json'\n\n r = self._session.request(method, url,\n headers=headers,\n proxies=self.proxies,\n params=params,\n timeout=self.requests_timeout)\n r.raise_for_status() # Check for error\n return r.json()", "def on_action(self, message):\n with self.handler.wrapee as wrapee:\n log.debug(\"Calling {method} on {name}\", method=message['action'], name=self.name)\n try:\n func = getattr(wrapee, message['action'])\n except AttributeError as ex:\n log.warn(\"Trying to call a method {method} that does not exsist!\",\n method=ex.args[0])\n return\n res, msg = func(*message['args'])\n if not res:\n log.warn(\"Error while calling {method}: {msg}\", msg=msg,\n method=message['action'])\n else:\n log.debug(\"Called method succesfully\")\n for protocol in self.service.protocols:\n protocol.send_packet()\n if msg != '':\n protocol.send_news(msg)", "def callmethod(\n self, method: str, *args: Sequence[Any], **kwargs: Sequence[Any]\n ) -> List[Any]:\n return getattr(self, method)(*args, **kwargs)", "def proxy_method(self, rest_path, sign, kwargs):", "def _exe(self, method):\n request_path = self.request.path\n path = request_path.split('/')\n services_and_params = list(filter(lambda x: x!='',path))\n\n # Get all function names configured in the class RestHandler\n functions = list(filter(lambda op: hasattr(getattr(self,op),'_service_name') == True and inspect.ismethod(getattr(self,op)) == True, dir(self)))\n # Get all http methods configured in the class RestHandler\n http_methods = list(map(lambda op: getattr(getattr(self,op),'_method'), functions))\n\n if method not in http_methods:\n raise tornado.web.HTTPError(405,'The service not have %s verb' % method)\n for operation in list(map(lambda op: getattr(self,op), functions)):\n service_name = getattr(operation,\"_service_name\")\n service_params = getattr(operation,\"_service_params\")\n # If the _types is not specified, assumes str types for the params\n services_from_request = list(filter(lambda x: x in path,service_name))\n\n if operation._method == self.request.method and service_name == services_from_request and len(service_params) + len(service_name) == len(services_and_params):\n try:\n params_values = self._find_params_value_of_url(service_name,request_path) + self._find_params_value_of_arguments(operation)\n p_values = self._convert_params_values(params_values)\n body = str(self.request.body,'utf-8')\n self.request_data = None\n if body:\n self.request_data = json.loads(body)\n response = operation(*p_values)\n self.request_data = None\n\n if response == None:\n return\n\n self.set_header(\"Content-Type\",'application/json')\n self.write(json.dumps(response))\n self.finish()\n except Exception as detail:\n self.request_data = None\n self.gen_http_error(500,\"Internal Server Error : %s\"%detail)\n raise", "def __call__(self, *args, **params):\n\t\treturn self.send(params)", "def _dispatch(self, method, params):\n func = None\n try:\n # check to see if a matching function has been registered\n func = self.server.funcs[method]\n except KeyError:\n if self.server.instance is not None:\n # check for a _dispatch method\n if hasattr(self.server.instance, '_dispatch'):\n return self.server.instance._dispatch(method, params)\n else:\n # call instance method directly\n try:\n func = resolve_dotted_attribute(\n self.server.instance,\n method,\n self.server.allow_dotted_names\n )\n except AttributeError:\n pass\n\n request = Request(\n client_address=self.client_address,\n headers=self.headers,\n )\n if func is not None:\n return func(request, *params)\n else:\n raise Exception('method \"%s\" is not supported' % method)", "def _call_it(params): # pragma: no cover\n instance, name, args = params\n kwargs = {}\n return getattr(instance, name)(*args, **kwargs)", "def _call_method(self, method, req, resp_class):\n payload = req.SerializeToString()\n headers = {\n 'Content-Type': 'application/x-protobuf',\n 'Content-Length': str(len(payload))\n }\n response, content = self._http.request(\n self._url + method, method='POST', body=payload, headers=headers)\n if response.status != 200:\n raise RPCError(method, response, content)\n resp = resp_class()\n resp.ParseFromString(content)\n return resp", "def _ServerProxy__request(self, methodname, params):\n\n paddedHandler = self._ServerProxy__handler\n\n # add on the methodName\n sep = '&'\n if '?' not in paddedHandler:\n sep = '?'\n paddedHandler = paddedHandler + \"%smethod=%s\" % (sep, methodname)\n sep = '&'\n\n # add on the auth token\n if self._authToken:\n paddedHandler = paddedHandler + \"%sauth_token=%s\" % (sep, urllib.quote_plus(self._authToken))\n\n # add on the partnerId\n if self._partnerId:\n paddedHandler = paddedHandler + \"%spartner_id=%s\" % (sep, self._partnerId)\n\n # add on the userId\n if self._userId:\n paddedHandler = paddedHandler + \"%suser_id=%s\" % (sep, self._userId)\n\n EXCLUDED_PAYLOAD_CALLS = ([\n \"auth.partnerLogin\",\n \"test.\",\n \"debug.\",\n \"testability.\"\n ])\n encryptRequest = True\n if self._requestCipher:\n for excludedMethodPattern in EXCLUDED_PAYLOAD_CALLS:\n if methodname.startswith(excludedMethodPattern):\n encryptRequest = False\n break\n else:\n encryptRequest = False\n\n # add the syncTime request\n if encryptRequest and self._sync:\n server_value, sync_time = self._sync\n params[0]['syncTime'] = server_value + int(time.time()) - sync_time\n\n request = xmlrpclib.dumps(params, methodname,\n encoding=self._ServerProxy__encoding,\n allow_none=self._ServerProxy__allow_none)\n\n #print \"------- XML REQUEST --------\"\n #print request\n\n if encryptRequest:\n request = self.encodeRequest(request)\n\n if self.x509:\n response = self._ServerProxy__transport.request(\n (self._ServerProxy__host, self.x509),\n paddedHandler,\n request,\n verbose=self._ServerProxy__verbose\n )\n else:\n response = self._ServerProxy__transport.request(\n self._ServerProxy__host,\n paddedHandler,\n request,\n verbose=self._ServerProxy__verbose\n )\n\n if len(response) == 1:\n response = response[0]\n\n #print \"------ RESPONSE ------\"\n #print response\n\n return response", "def rpc_call(self, method: str, params: Optional[list] = None) -> Any:\r\n if params is None:\r\n params = []\r\n data = json.dumps({ # json string used in HTTP requests\r\n 'jsonrpc': '2.0',\r\n 'method': method,\r\n 'params': params,\r\n 'id': self.id\r\n })\r\n url = \"http://{}:{}\".format(self.ip.address, self.rpc_port)\r\n with SEMAPHORE:\r\n with requests.Session() as r:\r\n # sleep(0.01) ###\r\n response = r.post(url=url, data=data, headers=self._headers)\r\n while response.headers['Content-Type'] != 'application/json':\r\n print(self.ip.address, self.rpc_port)\r\n print(response.status_code, response.headers)\r\n print(response.content)\r\n sleep(0.05)\r\n response = r.post(url=url, data=data, headers=self._headers)\r\n content = response.json()\r\n # sleep(0.02)\r\n print(content)\r\n result = content.get('result')\r\n err = content.get('error')\r\n if err:\r\n raise RuntimeError(self.ip.address, self.rpc_port, err.get('message'))\r\n\r\n print('%s @%s : %s %s' % (method, self.ip.address, self.rpc_port, result))\r\n return result", "async def call_rpc(self, rpc_message: RpcMessage, options: dict, bus_client: \"BusClient\"):\n raise NotImplementedError()", "def _call_method(self, module, method, *args, **kwargs):\n if not self._is_vim_object(module):\n return self.invoke_api(module, method, self.vim, *args, **kwargs)\n else:\n return self.invoke_api(module, method, *args, **kwargs)", "def testNoSuchRemoteMethod(self):\n self.ExpectRpcError(self.rpc_mapper1,\n remote.RpcState.METHOD_NOT_FOUND_ERROR,\n 'Unrecognized RPC method: not_remote')\n\n self.mox.ReplayAll()\n\n self.handler.handle('POST', '/my_service', 'not_remote')\n\n self.VerifyResponse('400', 'Unrecognized RPC method: not_remote', '')\n\n self.mox.VerifyAll()", "def _do_call(cls, method, url, params={}):\n headers = {\n 'User-Agent': 'py-retain/' + __version__,\n 'content-type': 'application/json'\n }\n try:\n r = cls.request_map[method.lower()]\n except KeyError:\n raise ValueError(\"Unknow HTTP Method\")\n response = r(\n url,\n auth=(cls.app_id, cls.api_key),\n headers=headers,\n data=json.dumps(params),\n timeout=cls.timeout)\n return response.json()", "def call(self, service, method, *args, **kwargs):\r\n if kwargs.pop('iter', False):\r\n return self.iter_call(service, method, *args, **kwargs)\r\n\r\n invalid_kwargs = set(kwargs.keys()) - VALID_CALL_ARGS\r\n if invalid_kwargs:\r\n raise TypeError(\r\n 'Invalid keyword arguments: %s' % ','.join(invalid_kwargs))\r\n\r\n if not service.startswith(self._prefix):\r\n service = self._prefix + service\r\n\r\n headers = kwargs.get('headers', {})\r\n\r\n if self.auth:\r\n headers.update(self.auth.get_headers())\r\n\r\n if kwargs.get('id') is not None:\r\n headers[service + 'InitParameters'] = {'id': kwargs.get('id')}\r\n\r\n if kwargs.get('mask') is not None:\r\n headers.update(self.__format_object_mask(kwargs.get('mask'),\r\n service))\r\n\r\n if kwargs.get('filter') is not None:\r\n headers['%sObjectFilter' % service] = kwargs.get('filter')\r\n\r\n if kwargs.get('limit'):\r\n headers['resultLimit'] = {\r\n 'limit': kwargs.get('limit'),\r\n 'offset': kwargs.get('offset', 0),\r\n }\r\n\r\n http_headers = {\r\n 'User-Agent': USER_AGENT,\r\n 'Content-Type': 'application/xml',\r\n }\r\n\r\n if kwargs.get('compress', True):\r\n http_headers['Accept'] = '*/*'\r\n http_headers['Accept-Encoding'] = 'gzip, deflate, compress'\r\n\r\n if kwargs.get('raw_headers'):\r\n http_headers.update(kwargs.get('raw_headers'))\r\n\r\n uri = '/'.join([self.endpoint_url, service])\r\n return make_xml_rpc_api_call(uri, method, args,\r\n headers=headers,\r\n http_headers=http_headers,\r\n timeout=self.timeout,\r\n proxy=self.proxy)", "def call_thread_method(self, name, *args, **kwargs):\n return self._thread_methods[name](*args,**kwargs)", "def call_firstresult(self, methname, *args, **kwargs): \n return MultiCall(self.listattr(methname), *args, **kwargs).execute(firstresult=True)", "def _call(self, args):\n a = args.split(' ', 1)\n if a:\n getattr(self, a[0])(*a[1:])", "def rpc(self) -> global___Rpc:", "def method(self,methodname):\n\t\tif methodname in self.methods:\n\t\t\treturn self.methods[methodname]\n\t\treturn None", "def xmlrpc_method(returns='string', args=None, name=None):\r\n # Args should be a list\r\n if args is None:\r\n args = []\r\n\r\n def _xmlrpc_func(func):\r\n \"\"\"Inner function for XML-RPC method decoration. Adds a signature to\r\n the method passed to it.\r\n\r\n func\r\n The function to add the signature to\r\n \"\"\"\r\n # Add a signature to the function\r\n func._xmlrpc_signature = {\r\n 'returns': returns,\r\n 'args': args\r\n }\r\n return func\r\n\r\n return _xmlrpc_func", "def exec(self, method=\"get\", content_type=\"application/json\"):\n str_method = method.lower()\n m = self.i[\"method\"].split()\n\n if str_method in m:\n try:\n par_dict = {}\n par_man = match(self.op, self.op_url).groups()\n for idx, par in enumerate(findall(\"{([^{}]+)}\", self.i[\"url\"])):\n try:\n par_type = self.i[par].split(\"(\")[0]\n if par_type == \"str\":\n par_value = par_man[idx]\n else:\n par_value = self.dt.get_func(par_type)(par_man[idx])\n except KeyError:\n par_value = par_man[idx]\n par_dict[par] = par_value\n\n if self.addon is not None:\n self.preprocess(par_dict, self.i, self.addon)\n\n query = self.i[\"sparql\"]\n for param in par_dict:\n query = query.replace(\"[[%s]]\" % param, str(par_dict[param]))\n\n if self.sparql_http_method == \"get\":\n r = get(\n self.tp + \"?query=\" + quote(query),\n headers={\"Accept\": \"text/csv\"},\n )\n else:\n r = post(\n self.tp,\n data=query,\n headers={\n \"Accept\": \"text/csv\",\n \"Content-Type\": \"application/sparql-query\",\n },\n )\n r.encoding = \"utf-8\"\n sc = r.status_code\n if sc == 200:\n # This line has been added to avoid a strage behaviour of the 'splitlines' method in\n # presence of strange characters (non-UTF8).\n list_of_lines = [\n line.decode(\"utf-8\")\n for line in r.text.encode(\"utf-8\").splitlines()\n ]\n res = self.type_fields(list(reader(list_of_lines)), self.i)\n if self.addon is not None:\n res = self.postprocess(res, self.i, self.addon)\n q_string = parse_qs(quote(self.url_parsed.query, safe=\"&=\"))\n res = self.handling_params(q_string, res)\n res = self.remove_types(res)\n s_res = StringIO()\n writer(s_res).writerows(res)\n return (sc,) + Operation.conv(\n s_res.getvalue(), q_string, content_type\n )\n else:\n return sc, \"HTTP status code %s: %s\" % (sc, r.reason), \"text/plain\"\n except TimeoutError:\n exc_type, exc_obj, exc_tb = exc_info()\n sc = 408\n return (\n sc,\n \"HTTP status code %s: request timeout - %s: %s (line %s)\"\n % (sc, exc_type.__name__, exc_obj, exc_tb.tb_lineno),\n \"text/plain\",\n )\n except TypeError:\n exc_type, exc_obj, exc_tb = exc_info()\n sc = 400\n return (\n sc,\n \"HTTP status code %s: \"\n \"parameter in the request not compliant with the type specified - %s: %s (line %s)\"\n % (sc, exc_type.__name__, exc_obj, exc_tb.tb_lineno),\n \"text/plain\",\n )\n except:\n exc_type, exc_obj, exc_tb = exc_info()\n sc = 500\n return (\n sc,\n \"HTTP status code %s: something unexpected happened - %s: %s (line %s)\"\n % (sc, exc_type.__name__, exc_obj, exc_tb.tb_lineno),\n \"text/plain\",\n )\n else:\n sc = 405\n return (\n sc,\n \"HTTP status code %s: '%s' method not allowed\" % (sc, str_method),\n \"text/plain\",\n )", "def SoapAction(self) -> str:", "def _handler(self, message):\n\n data = pickle.loads(message['data'])\n\n if not data[2]:\n # empty method call; bail out\n return\n\n # call the function and respond to the proxy object with return value\n uuid = data[0]\n proxy = data[1]\n func = getattr(self, data[2])\n result = (uuid, func(*data[3], **data[4]))\n self._redis.publish('proxy:%s' % proxy, pickle.dumps(result))", "def call(self) -> global___Snippet.ClientCall:", "def call(self) -> global___Snippet.ClientCall:", "def get_method(self, method): \n for provider in self.method_handlers:\n for candidate in provider.xmlrpc_methods():\n #self.env.log.debug(candidate)\n p = Method(provider, *candidate)\n if p.name == method:\n return p\n raise MethodNotFound('RPC method \"%s\" not found' % method)", "def rpc_method(func):\n func.rpc_callable = True\n return func", "def call(self, method, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params})", "def call(self, method, params, callback=None):\n cur_id = self._next_id()\n if callback:\n self._callbacks[cur_id] = callback\n self.send({'msg': 'method', 'id': cur_id, 'method': method, 'params': params})", "def _call(self, method, endpoint, content=None, params=None):\n\t\tparams = params or {}\n\t\tcontent = content or {}\n\n\t\tjson_data = json.dumps(content)\n\t\tendpoint = endpoint.strip(\"/\")\n\t\theaders = {\"X-Signed-Request-Hash\": self.__sign(method, endpoint, json_data)}\n\n\t\tresponse = self.session.request(\n\t\t\tmethod=method,\n\t\t\turl=(CloudClient.BASE_API + endpoint),\n\t\t\theaders = headers,\n\t\t\tparams=params,\n\t\t\tdata = json_data\n\t\t)\n\n\t\treturn WeeblyCloudResponse(self.session, response)", "def __call__(self, api_function):\n return hug_core.interface.Local(self.route, api_function)", "def post(self):\n return getServiceListMethod(self)", "def client_static_function(self, method: ProtoServiceMethod) -> None:" ]
[ "0.72344095", "0.7016205", "0.6945265", "0.6935145", "0.6773144", "0.6740759", "0.6691549", "0.667169", "0.6600439", "0.6472304", "0.63347936", "0.62916875", "0.6260245", "0.6260234", "0.62297845", "0.6161755", "0.61566997", "0.6142578", "0.60809994", "0.6068365", "0.6018835", "0.5999659", "0.5979393", "0.59466517", "0.5936772", "0.5917524", "0.59063303", "0.5898535", "0.5888847", "0.58327883", "0.5819823", "0.5812759", "0.57909536", "0.5762939", "0.57540464", "0.5753469", "0.5740224", "0.57056314", "0.57008713", "0.56889045", "0.56634057", "0.5662527", "0.5646987", "0.5629939", "0.56171477", "0.5604055", "0.55708927", "0.55599356", "0.55094177", "0.5493917", "0.5473947", "0.54718786", "0.54648304", "0.54638994", "0.54581743", "0.54478", "0.54476047", "0.54474163", "0.5446686", "0.5443296", "0.54363257", "0.5427234", "0.5427015", "0.5414552", "0.54081243", "0.54033923", "0.54031295", "0.53969204", "0.5370876", "0.5369888", "0.5356865", "0.5355548", "0.5355164", "0.53417975", "0.53210396", "0.532103", "0.53172845", "0.53129125", "0.53081024", "0.53058565", "0.53023595", "0.53003985", "0.5299197", "0.5293735", "0.52790064", "0.527514", "0.5271433", "0.5266537", "0.5265649", "0.5264083", "0.52579147", "0.52579147", "0.52565897", "0.5256426", "0.52559865", "0.52559865", "0.52482104", "0.5246863", "0.52457726", "0.52444935" ]
0.6306776
11
Returns typecodes representing input and output messages, if request and/or response fails to be generated return None for either or both. callinfo WSDLTools.SOAPCallInfo instance describing an operation.
def _getTypeCodes(self, callinfo): prefix = None self._resetPrefixDict() if callinfo.use == 'encoded': prefix = self._getPrefix(callinfo.namespace) try: requestTC = self._getTypeCode(parameters=callinfo.getInParameters(), literal=(callinfo.use=='literal')) except EvaluateException, ex: print "DEBUG: Request Failed to generate --", ex requestTC = None self._resetPrefixDict() try: replyTC = self._getTypeCode(parameters=callinfo.getOutParameters(), literal=(callinfo.use=='literal')) except EvaluateException, ex: print "DEBUG: Response Failed to generate --", ex replyTC = None request = response = None if callinfo.style == 'rpc': if requestTC: request = TC.Struct(pyclass=None, ofwhat=requestTC, pname=callinfo.methodName) if replyTC: response = TC.Struct(pyclass=None, ofwhat=replyTC, pname='%sResponse' %callinfo.methodName) else: if requestTC: request = requestTC[0] if replyTC: response = replyTC[0] #THIS IS FOR RPC/ENCODED, DOC/ENCODED Wrapper if request and prefix and callinfo.use == 'encoded': request.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':prefix, 'name':request.oname, 'namespaceURI':callinfo.namespace} return request, response
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def GetModelOutputInfo(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def verify_call(obj):\n\tassert obj.tag == 'OMOBJ'\n\tattr = obj[0]\n\t\n\tassert attr.tag == 'OMATTR'\n\tpairs, application = attr\n\t\n\tassert application.tag == 'OMA'\n\tsymbol, args = application\n\t\n\tassert symbol.tag == 'OMS'\n\tassert symbol.get('cd') == \"scscp1\"\n\tassert symbol.get('name') == \"procedure_call\"\n\t\n\tassert args.tag == 'OMA'\n\tassert len(args) > 0\n\tname_symbol = args[0]\n\t\n\tassert name_symbol.tag == 'OMS'\n\tcd = name_symbol.get('cd')\n\tproc_name = name_symbol.get('name')\n\t\n\t#2. Now handle the extra information\n\tassert pairs.tag == 'OMATP'\n\tassert len(pairs) % 2 == 0\n\t\n\textras = {}\n\tcall_id = None\n\treturn_type = None\n\t\n\tfor i in range(0, len(pairs), 2):\n\t\tsymbol = pairs[i]\n\t\tassert symbol.tag == 'OMS'\n\t\tassert symbol.get('cd') == \"scscp1\"\n\t\tname = symbol.get('name')\n\t\textras[name] = pairs[i+1]\n\t\t\n\t\tif name == 'call_id':\n\t\t\tassert call_id is None\n\t\t\tcall_id = pairs[i+1].text\n\t\t\tprint(call_id)\n\t\telif name.startswith('option_return_'):\n\t\t\tassert return_type is None\n\t\t\treturn_type = ReturnTypes[name[14:]]\n\t\n\t#Some information is mandatory\n\tassert call_id is not None\n\tassert return_type is not None\n\t\n\treturn cd, proc_name, call_id, return_type, args[1:], extras", "def get_method_type(request_streaming, response_streaming):\n if request_streaming and response_streaming:\n return BIDI_STREAMING\n elif request_streaming and not response_streaming:\n return CLIENT_STREAMING\n elif not request_streaming and response_streaming:\n return SERVER_STREAMING\n return UNARY", "def get_spi_response_type(cmd_num):\n length = 8 # Default length of a response\n resp_type = 1\n if cmd_num in [8]:\n # CMD8 gets R7\n resp_type = 7\n length = 40\n if cmd_num in [5]:\n # CMD5 gets a R4 back in SPI mode\n resp_type = 4\n length = 40\n if cmd_num in [52,53]:\n resp_type = 5\n length = 16\n \n log.debug(\"Cmd %d expects response type R%s\" %(cmd_num,resp_type))\n return (resp_type, length)", "def GetNativeInputInfo(is_optional):\r\n raise Exception(\"Abstract method\")", "def operation_type(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"operation_type\")", "def _on_response(self, response_type, p_api1, p_api2, double1, double2, ptr1, size1, ptr2, size2, ptr3, size3):\n if self.debug:\n print \"Response: \", ord(response_type)\n if response_type == OnConnectionStatus.value:\n self._on_connect_status(p_api2, chr(int(double1)), ptr1, size1)\n elif self._callbacks:\n for callback in self._callbacks:\n if response_type == OnRtnDepthMarketData.value:\n if self._is_market:\n callback.on_market_rtn_depth_market_data_n(p_api2, ptr1)\n elif response_type == OnRspQryInstrument.value:\n obj = cast(ptr1, POINTER(InstrumentField)).contents\n callback.on_trading_rsp_qry_instrument(p_api2, obj, bool(double1))\n elif response_type == OnRspQryTradingAccount.value:\n obj = cast(ptr1, POINTER(AccountField)).contents\n callback.on_trading_rsp_qry_trading_account(p_api2, obj, bool(double1))\n elif response_type == OnRspQryInvestorPosition.value:\n obj = cast(ptr1, POINTER(PositionField)).contents\n callback.on_trading_rsp_qry_investor_position(p_api2, obj, bool(double1))\n elif response_type == OnRspQrySettlementInfo.value:\n obj = cast(ptr1, POINTER(SettlementInfoField)).contents\n callback.on_trading_rsp_qry_settlement_info(p_api2, obj, bool(double1))\n elif response_type == OnRtnOrder.value:\n obj = cast(ptr1, POINTER(OrderField)).contents\n callback.on_trading_rtn_order(p_api2, obj)\n elif response_type == OnRtnTrade.value:\n obj = cast(ptr1, POINTER(TradeField)).contents\n callback.on_trading_rtn_trade(p_api2, obj)\n elif response_type == OnRtnQuote.value:\n obj = cast(ptr1, POINTER(QuoteField)).contents\n callback.on_trading_rtn_quote(p_api2, obj)\n elif response_type == OnRtnQuoteRequest.value:\n obj = cast(ptr1, POINTER(QuoteRequestField)).contents\n callback.on_trading_rtn_quote_request(p_api2, obj)\n elif response_type == OnRspQryHistoricalTicks.value:\n obj = cast(ptr1, POINTER(TickField)).contents\n obj2 = cast(ptr2, POINTER(HistoricalDataRequestField)).contents\n callback.on_trading_rsp_qry_historical_ticks(p_api2, obj, obj2, bool(double1))\n elif response_type == OnRspQryHistoricalBars.value:\n obj = cast(ptr1, POINTER(BarField)).contents\n obj2 = cast(ptr2, POINTER(HistoricalDataRequestField)).contents\n callback.on_trading_rsp_qry_historical_bars(p_api2, obj, obj2, bool(double1))\n elif response_type == OnRspQryInvestor.value:\n obj = cast(ptr1, POINTER(InvestorField)).contents\n callback.on_trading_rsp_qry_investor(p_api2, obj)\n elif response_type == OnFilterSubscribe.value:\n instrument = c_char_p(ptr1).value\n callback.on_trading_filter_subscribe(p_api2, ExchangeType(double1), size1, size2, size3, instrument)\n elif response_type == OnRtnError.value:\n obj = cast(ptr1, POINTER(ErrorField)).contents\n if self._is_market:\n callback.on_market_rsp_error(p_api2, obj, bool(double1))\n else:\n callback.on_trading_rsp_error(p_api2, obj, bool(double1))", "def RequestInformation(self, request, inInfo, outInfo):\n if self.need_to_read():\n self._read_up_front()\n self._update_time_steps()\n return 1 # NOTE: ALWAYS return 1 on pipeline methods", "def _fc_out_parameters(self) -> Tuple[str, List[str]]:\n out_pars = self.ret_type.fc_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_strl = list() # type: List[str]\n for type_name, postfix in out_pars:\n out_par_strl.append('{} {}'.format(\n type_name, self.ret_type.name + postfix))\n return ('void', out_par_strl)", "def __validate_input(self, request_data):\n call_id = request_data.get(strings.CALL_ID_KEY)\n request_timestamp = request_data.get(strings.TIMESTAMP_KEY)\n request_start = request_data.get(strings.START_KEY)\n validation = None\n if call_id and request_timestamp and request_start is not None:\n call_detail_query = CallDetail.objects.filter(call_id=call_id)\n if call_detail_query:\n if len(call_detail_query) < CALL_DETAILS_LIMIT:\n stored_call_detail = call_detail_query[0]\n if isinstance(request_start, str):\n if request_start in strings.TRUE_VALUES:\n request_start = True\n else:\n request_start = False\n if stored_call_detail.start == request_start:\n validation = {strings.INPUT_ERROR_KEY:\n strings.START_END_ERROR}\n stored_timestamp = standardize_date(\n stored_call_detail.timestamp,\n strings.COMPLETE_DATE_PATTERN)\n request_timestamp = standardize_date(request_timestamp,\n strings.\n COMPLETE_DATE_PATTERN)\n if stored_timestamp == request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.EQUAL_TIMESTAMPS_ERROR}\n if stored_call_detail.start and not request_start:\n if stored_timestamp > request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n elif not stored_call_detail.start and request_start:\n if stored_timestamp < request_timestamp:\n validation = {strings.INPUT_ERROR_KEY:\n strings.SOONER_END_ERROR}\n else:\n validation = {strings.INPUT_ERROR_KEY:\n strings.CALL_LIMIT_ERROR}\n\n return validation", "def function_response_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input['EventSourceMappingFunctionResponseTypesItem']]]]:\n return pulumi.get(self, \"function_response_types\")", "def construct_sp(self, info):\n if \"query\" in info.keys():\n if info[\"query\"].upper().startswith(\"CALL\"):\n self.q_str = info[\"query\"]\n self.sql_type_ind = (info[\"q_type_ind\"] if \"q_type_ind\" in info.keys() else\n sql_type.STORED_PROCEDURE_NO_RES)\n\n self.return_result = bool((self.sql_type_ind is sql_type.SELECT or\\\n self.sql_type_ind is sql_type.STORED_PROCEDURE_RES))\n\n elif \"procedure\" in info.keys():\n self.q_str = info[\"procedure\"]\n\n self.sql_type_ind = (info[\"q_type_ind\"] if \"q_type_ind\" in info.keys() else\n sql_type.STORED_PROCEDURE_NO_RES)\n\n self.return_result = bool((self.sql_type_ind is sql_type.SELECT or\\\n self.sql_type_ind is sql_type.STORED_PROCEDURE_RES))", "def build_method_call(code, line, method_object):\n full_signature = method_object[\"methodSignature\"]\n normalised_signature = normalise_signature(full_signature)\n param_values = get_method_parameter_values(code, line, full_signature)\n string_values, cmplx_types = get_string_values(param_values, full_signature)\n\n rpc_payload_length = str(\n 4 + len(normalised_signature) + len(string_values)\n )\n # Default to stub value if method-to-service correlation failed\n strong_name = (\n method_object[\"service\"][\"strongName\"]\n if method_object[\"service\"] is not None\n else \"X\"*32\n )\n rpc_blocks = []\n rpc_blocks.extend([\n RPC_VERSION,\n RPC_FLAGS,\n rpc_payload_length,\n BASE_URL,\n strong_name,\n method_object[\"rmtSvcIntName\"],\n method_object[\"methodName\"],\n ])\n rpc_blocks.extend(normalised_signature)\n rpc_blocks.extend(string_values)\n rpc_blocks.extend([\n \"1\", \"2\", \"3\", \"4\",\n method_object[\"paramCount\"]\n ])\n rpc_blocks.extend(\n generate_parameter_map(\n rpc_blocks,\n full_signature,\n param_values\n )\n )\n return rpc_blocks, cmplx_types", "def checkRequestType(packet):\n info = [packet[i : i + 2] for i in range(0, len(packet), 2)]\n RequestType = int.from_bytes(info[2], \"big\")\n if RequestType == 0x0001:\n return \"date\"\n elif RequestType == 0x0002:\n return \"time\"\n else:\n return -1", "def get_operation(operation):\n if operation == 'query':\n return banking_pb2.QUERY\n if operation == 'deposit':\n return banking_pb2.DEPOSIT\n if operation == 'withdraw':\n return banking_pb2.WITHDRAW", "def _handle_one_message(self):\n\n type, data = self.cxn.recv_message()\n\n if type.startswith(\"call\"):\n if len(data) != 3:\n message = (type, data)\n raise MessageError.invalid(message, \"incorrect number of args\")\n flags = {\n \"want_response\": type == \"call\",\n }\n call = Call(data[0], data[1], data[2], flags, self.client)\n self._handle_call(call)\n return False\n\n raise MessageError.bad_type(type)", "def analyze_input():\n\n # Generate action_id classes for OF 1.3\n for wire_version, ordered_classes in of_g.ordered_classes.items():\n if not wire_version in [of_g.VERSION_1_3]:\n continue\n classes = versions[of_g.of_version_wire2name[wire_version]]['classes']\n for cls in ordered_classes:\n if not loxi_utils.class_is_action(cls):\n continue\n action = cls[10:]\n if action == '' or action == 'header':\n continue\n name = \"of_action_id_\" + action\n members = classes[\"of_action\"][:]\n of_g.ordered_classes[wire_version].append(name)\n if type_maps.action_id_is_extension(name, wire_version):\n # Copy the base action classes thru subtype\n members = classes[\"of_action_\" + action][:4]\n classes[name] = members\n\n # @fixme If we support extended actions in OF 1.3, need to add IDs\n # for them here\n\n for wire_version in of_g.wire_ver_map.keys():\n version_name = of_g.of_version_wire2name[wire_version]\n calculate_offsets_and_lengths(\n of_g.ordered_classes[wire_version],\n versions[version_name]['classes'],\n wire_version)", "def get_response_type(cmd_num):\n length = 48 # Default length of a response\n if cmd_num in [0,4,15]:\n # No response expected\n rv = None\n if cmd_num in [11,13,16,17,18,19,23,55,56]:\n # Response type 1\n rv = 1\n if cmd_num in [7,12,20]:\n # Response type 1b, means it could also be a busy\n rv = 1.5\n if cmd_num in [2,9,10]:\n # Reponse type 2, CID/CSD register, not on SDIO but here for completeness\n length = 136\n rv = 2\n if cmd_num in [4,5]:\n rv = 4\n if cmd_num in [52,53]:\n rv = 5\n if cmd_num in [3]:\n rv = 6\n if cmd_num in [8]:\n rv = 7\n log.debug(\"Cmd %d expects response type R%s\" %(cmd_num,rv))\n return (rv, length)", "def _fi_out_parameters(self) -> Tuple[str, List[Tuple[str, str]]]:\n out_pars = self.ret_type.fi_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_list = list() # type: List[Tuple[str, str]]\n for par_type, par_name in out_pars:\n out_par_list.append((par_type, 'ret_val' + par_name))\n\n return ('', out_par_list)", "def processRequest(cls, ps, **kw):\n resource = kw['resource']\n method = resource.getOperation(ps, None) # This getOperation method is valid for ServiceSOAPBinding subclass\n rsp = method(ps, **kw)[1] # return (request, response) but we only need response\n return rsp", "def validate_args(self, in_args, cmd_call):\n valid_1, valid_2 = None, None\n\n if len(in_args) > 0 and type(in_args) is not list:\n args = in_args.split()\n valid_1 = args[0]\n elif type(in_args) is list and len(in_args) > 0:\n args = in_args\n valid_1 = args[0]\n else:\n args = []\n\n if cmd_call in ['default']:\n # Default : Returns a valid cui type for an input cui\n # checks to see if there is more than 2 arguments\n # if so, arg[0] may be a valid code\n # arg[1] may be a valid code type\n # if not ask the user what type of code type arg[0] is\n # valid_1 = valid cui type\n # valid_2 = None\n while True:\n if len(args) >= 2 and len(args) <= 3:\n input_type = args[1].upper()\n else:\n input_type = input(\"What type of id is '{0}'? [LOCAL/RXCUI/NDC/SNOMED]\".format(args[0])).upper()\n\n # Confirm it's a valid code type\n valid_type = self.validate_id_type(input_type)\n # Valid type is a boolean of True\n if isinstance(valid_type, str) or valid_type is None:\n return None\n elif valid_type:\n break\n elif not valid_type:\n print('Invalid Option, Please Try Again')\n continue\n valid_1 = input_type\n\n elif cmd_call in self.cmd_config_default:\n # valid_1 : Valid Cui , valid_2 : Valid Cui Type\n valid_2, _ = self.validate_args(args, 'default')\n valid_1 = args[0]\n\n elif cmd_call == 'code_lookup':\n # args[0] : Initial CUI, args[1] : Initial CUI Type, args[2] : Target CUI Type\n # valid_1 : valid cui, valid_2 : list valid source and target\n _dict_opts = util.OPTIONS_CUI_TYPES.copy()\n _avail = list(set(smores.get_dict_sources()) & set(_dict_opts))\n if len(_avail) == 0 and len(args) < 2:\n print('There are no available starting cui types that can be crosswalked.\\n'\n 'Please load a file containing valid cui types: {0}'.format(_dict_opts))\n return False, None\n\n if len(args) >= 2:\n if len(args) == 3:\n # provided cui, cui source, and target\n valid_2, _ = self.validate_args(args, 'default')\n source, target = args[1].upper(), args[2].upper()\n else:\n source, target = args[0].upper(), args[1].upper()\n valid_1 = simple_input(\"Is {0} the correct starting source? \".format(source), ['YES', 'NO', 'exit'])\n if valid_1 == 'exit':\n return False, None\n # TODO need path for valid_2\n else:\n valid_1 = simple_input(\"Which code set do you want to start with?\", _avail)\n if valid_1 != 'exit':\n _dict_opts.remove(valid_1) # Don't lookup what we've already got\n valid_2 = simple_input(\"Which code set do you want to get results for?\", _dict_opts)\n if valid_2 == 'exit':\n return False, None\n else:\n return False, None\n\n elif cmd_call == 'errors':\n _current_err = list(self.errors.keys())\n if len(args) > 1:\n smores_error('#Cx001.7', console_p=True)\n return\n elif len(args) == 1 and args[0].lower() in _current_err:\n valid_1 = args[0]\n elif len(args) == 1:\n print('There are currently no errors logged for that command.')\n return\n else:\n valid_1 = simple_input(\"Please choose a command from the list to see errors: \", _current_err)\n\n elif cmd_call in ['csv', 'remap', 'fhir', 'json']:\n # Format: [File] [Output]\n if not self.inputs['loaded']:\n print(\"No Files Loaded!\\nYou Must load a file containing local medications first\")\n else:\n _file_opts = list(self.inputs['files'].keys()) + ['All']\n _dict_opts = list(smores.get_dict_sources(True)) #+ ['All']\n _file_or_dict = None\n\n if cmd_call in ['csv', 'json']:\n if len(args) == 0:\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n elif args[0] not in _file_opts and args[0] not in _dict_opts:\n print('That option was not recognized as a valid source.')\n _file_or_dict = simple_input(\"Do you want results for a File or a constructed Dictionary?\",\n ['File', 'Dictionary', 'exit'], True)\n else:\n valid_1 = args[0]\n\n if _file_or_dict.upper() == 'FILE':\n valid_1 = 'FILE|' + simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n elif _file_or_dict.upper() == 'DICTIONARY':\n valid_1 = 'DICT|' + simple_input(\"Please choose a code dictionary to output\", _dict_opts, True)\n elif _file_or_dict.upper() == 'EXIT':\n return None, None\n\n else:\n valid_1 = simple_input(\"Please choose a loaded file\", _file_opts, True)\n\n if cmd_call in ['csv', 'json', 'fhir']:\n if len(args) == 2 and len(args[1]) > 0:\n valid_2 = args[1]\n else:\n valid_2= input(\"Please provide an output file name:\").strip()\n\n if len(valid_2) > 0:\n if \".\" in valid_2:\n valid_2, ext = valid_2.split(\".\")\n else:\n valid_2 = ''\n print('Empty file name provided, using default.')\n else:\n valid_2 = args[0]\n\n elif cmd_call == 'file':\n re_use = False\n if self.inputs['loaded'] and len(in_args) == 0:\n print(\"The following file(s) have already been loaded: \\n\" + str(self.inputs['files']))\n _load_more = simple_input(\"Would you like to load an additional file?\", ['Y', 'N', 'exit'])\n if _load_more == 'Y':\n pass\n elif _load_more == 'N':\n _re_use = simple_input(\"Would you like to re-use a loaded file?\", ['Y', 'N', 'exit'])\n if _re_use == 'Y':\n re_use = True\n else:\n return False, None\n else:\n return False, None\n\n if in_args is not None and len(in_args) > 0:\n valid_1 = in_args\n else:\n valid_1 = input(\"Please enter the name of the file to load: \") if not re_use else simple_input(\n 'Select the file to be used: ', list(self.inputs['files'].keys()), index=True)\n\n while True:\n if valid_1 in self.inputs['files']:\n if not re_use:\n print(\"It looks like you've already loaded that file. Please try a different file.\")\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n elif len(valid_1) == 0:\n smores_error('#Cx001.7', logger=smoresLog)\n valid_1, valid_2 = input(\"Please enter the name of the file to load: \")\n else:\n break\n\n if not resolve_target_path(valid_1):\n valid_1, valid_2 = self.validate_args('', 'file')\n\n elif '.smr' in valid_1:\n if len(self.inputs['files']) > 0:\n print(\n 'It looks like you are trying to load a session, this will replace the current session and '\n 'all previous work.')\n _save = simple_input('Do you want to save the current session first?', ['Y', 'N', 'EXIT'])\n if _save == 'Y':\n smores.save_session(self.__version__)\n elif _save == 'EXIT':\n return False, None\n valid_2 = 'session'\n else:\n valid_2 = 'file'\n\n smoresLog.debug('Args: {0}, Validated as: {1}'.format(valid_1, valid_2))\n return valid_1, valid_2", "def GetNativeOutputInfo(\r\n is_struct=False,\r\n featurizer_name=\"\",\r\n ):\r\n raise Exception(\"Abstract method\")", "def _get_request_parser(self, operation):\n\n wpsrequest = self\n\n def parse_get_getcapabilities(http_request):\n \"\"\"Parse GET GetCapabilities request\n \"\"\"\n\n acceptedversions = _get_get_param(http_request, 'acceptversions')\n wpsrequest.check_accepted_versions(acceptedversions)\n\n def parse_get_describeprocess(http_request):\n \"\"\"Parse GET DescribeProcess request\n \"\"\"\n version = _get_get_param(http_request, 'version')\n wpsrequest.check_and_set_version(version)\n\n language = _get_get_param(http_request, 'language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.identifiers = _get_get_param(\n http_request, 'identifier', aslist=True)\n\n def parse_get_execute(http_request):\n \"\"\"Parse GET Execute request\n \"\"\"\n version = _get_get_param(http_request, 'version')\n wpsrequest.check_and_set_version(version)\n\n language = _get_get_param(http_request, 'language')\n wpsrequest.check_and_set_language(language)\n\n wpsrequest.identifier = _get_get_param(http_request, 'identifier')\n wpsrequest.store_execute = _get_get_param(\n http_request, 'storeExecuteResponse', 'false')\n wpsrequest.status = _get_get_param(http_request, 'status', 'false')\n wpsrequest.lineage = _get_get_param(\n http_request, 'lineage', 'false')\n wpsrequest.inputs = get_data_from_kvp(\n _get_get_param(http_request, 'DataInputs'), 'DataInputs')\n wpsrequest.outputs = {}\n\n # take responseDocument preferably\n resp_outputs = get_data_from_kvp(\n _get_get_param(http_request, 'ResponseDocument'))\n raw_outputs = get_data_from_kvp(\n _get_get_param(http_request, 'RawDataOutput'))\n wpsrequest.raw = False\n if resp_outputs:\n wpsrequest.outputs = resp_outputs\n elif raw_outputs:\n wpsrequest.outputs = raw_outputs\n wpsrequest.raw = True\n # executeResponse XML will not be stored and no updating of\n # status\n wpsrequest.store_execute = 'false'\n wpsrequest.status = 'false'\n\n if not operation:\n raise MissingParameterValue('Missing request value', 'request')\n else:\n self.operation = operation.lower()\n\n if self.operation == 'getcapabilities':\n return parse_get_getcapabilities\n elif self.operation == 'describeprocess':\n return parse_get_describeprocess\n elif self.operation == 'execute':\n return parse_get_execute\n else:\n raise OperationNotSupported(\n 'Unknown request %r' % self.operation, operation)", "def _process_operation(operation_pb):\n match = _OPERATION_NAME_RE.match(operation_pb.name)\n if match is None:\n raise ValueError('Operation name was not in the expected '\n 'format after instance creation.',\n operation_pb.name)\n location_id = match.group('location_id')\n operation_id = int(match.group('operation_id'))\n\n request_metadata = _parse_pb_any_to_native(operation_pb.metadata)\n operation_begin = _pb_timestamp_to_datetime(\n request_metadata.request_time)\n\n return operation_id, location_id, operation_begin", "def test_decoding_method(self):\n data = service_call.encode_call(\"foo\", [42])\n name, params = service_call.decode_call(data)\n\n self.assertEqual(name, \"foo\")\n self.assertEqual(params, [42])", "def GetInput(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def _extract_commands(self, data):\n version = data[6]\n if version > 1:\n raise CarError('only version 1 is supported')\n if data[8] == 1:\n if self.inputastext is None:\n self.inputastext = True\n if data[9] == 1:\n if self.outputastext is None:\n self.outputastext = True\n data = data[10:]\n data = struct.unpack('<' + 'I' * (len(data) // 4), data)\n commands = tuple((data[i], data[i + 1]) for i in range(3, len(data), 2))\n for x, a in filter(lambda x: x[0] in (GOTO, IF), commands):\n if a >= len(commands):\n raise CarError('code position out of scope')\n return commands, data[:3]", "def traffic_statuscodes_requestresponsetype(self, **kwargs):\n url_path = 'traffic/statuscodes/requestresponsetype'\n self.logger.debug(f\"Get list of request-response types\")\n body = self._make_body(kwargs)\n return self._common_get(request_path=url_path, parameters=body)", "def function_response_types(self) -> pulumi.Output[Optional[Sequence['EventSourceMappingFunctionResponseTypesItem']]]:\n return pulumi.get(self, \"function_response_types\")", "def pre_get_operation(\n self,\n request: operations_pb2.GetOperationRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]:\n return request, metadata", "def pre_get_operation(\n self,\n request: operations_pb2.GetOperationRequest,\n metadata: Sequence[Tuple[str, str]],\n ) -> Tuple[operations_pb2.GetOperationRequest, Sequence[Tuple[str, str]]]:\n return request, metadata", "def action_GetProtocolInfo(self, extract_returns=True):\n arguments = { }\n\n out_params = self._proxy_call_action(\"GetProtocolInfo\", arguments=arguments)\n\n rtn_args = out_params\n if extract_returns:\n rtn_args = [out_params[k] for k in (\"Source\", \"Sink\",)]\n if len(rtn_args) == 1:\n rtn_args = rtn_args[0]\n\n return rtn_args", "def _find_collection_response(op: Operation) -> Tuple[int, Any]:\n for code, resp_model in op.response_models.items():\n if resp_model is None or resp_model is NOT_SET:\n continue\n\n model = resp_model.__annotations__[\"response\"]\n if is_collection_type(model):\n item_schema = get_collection_args(model)[0]\n return code, item_schema\n\n raise ConfigError(\n f'\"{op.view_func}\" has no collection response (e.g. response=List[SomeSchema])'\n )", "def gen_proto_recv(signame, argname, typename, size, is_enum, is_struct, is_varlen):\n add_code = None\n wordoff = word_offset(signame, argname)\n if is_varlen:\n # Array. Logic is identical to send direction; copying\n # is done elsewhere, we just return an offset.\n # The offset's the same for send, so we don't need\n # to generate any code.\n proto_code = None\n copy_code = None\n signature = None\n else:\n signature = mangle_type(typename)\n if is_struct:\n proto_code = \"%s *%s\" % (typename, argname)\n copy_code = \" CCP_%s_%s_GET(pdu, %s);\" % (\n signame.upper(), argname.upper(), argname)\n else:\n proto_code = \"%s *%s\" % (typename, argname)\n cast = \"(%s)\" % (typename)\n copy_code = \" *%s = %sCCP_%s_%s_GET(pdu);\" % (\n argname, cast, signame.upper(), argname.upper())\n return (proto_code, copy_code, add_code, signature)", "def gen_CRM(call_text, response_text):\n pass", "def _format_call(value: ast3.Call, context: types.Context) -> typing.Text:\n\ttry:\n\t\treturn _format_call_horizontal(value, context)\n\texcept errors.NotPossible:\n\t\treturn _format_call_vertical(value, context)", "def GetOutputType(self, response_type):\n if response_type == \"KML\":\n return \"xml\"\n return \"json\"", "def _process_request(self, request):\n try:\n self._validate_rpc_request(request)\n except ValueError as err:\n return self._build_rpc_error(None, RpcErrors.INVALID_REQUEST, err, keep_null_id=True)\n\n id = request.get('id', None)\n\n try:\n method = getattr(rpc, request['method'])\n except AttributeError as err:\n return self._build_rpc_error(id, RpcErrors.METHOD_NOT_FOUND, err)\n\n try:\n params = request.get('params', None)\n if params is None:\n result = method()\n elif isinstance(params, list):\n result = method(*params)\n elif isinstance(params, dict):\n result = method(**params)\n\n return self._build_rpc_result(id, result)\n\n except TypeError as err:\n return self._build_rpc_error(id, RpcErrors.INVALID_PARAMS, err)\n except Exception as err:\n return self._build_rpc_error(id, RpcErrors.INTERNAL_ERROR, err)", "def check_message_params(message : dict) -> (bool, dict):\n logging.info('CHECKING message: '+str(message))\n\n data_tag = 'data'\n chat_data_tag = 'chat-data'\n param_tag = 'sessionJson'\n\n if data_tag in message:\n #Send the 'data' from the input to be checked\n extracted_params = parse_chat_input(message[data_tag]) #returns {choice, chat-data or None}\n logging.info('INPUT PARAMS:'+str(extracted_params))\n\n if extracted_params is None:\n logging.info('Missing (input param data) from protocol message')\n else:\n if 'choice' not in extracted_params or chat_data_tag not in extracted_params:\n logging.info('Missing (choice AND/OR data) in protocol message')\n else:\n choice = extracted_params.pop('choice')\n if choice == 1:\n return (choice, check_login_message(extracted_params))\n if choice == 2:\n #Add the session data back onto the result\n extracted_params.update(message[param_tag])\n return (choice, check_chat_message(extracted_params))\n if choice == 3:\n return (choice, (True, None))\n return (-1, (None, None))", "def process_all_call_arg(self, code, option):\n return code", "def from_output(cls, output: bytes, method: Method):\n s = None\n if b\"=====ERROR=====\" in output:\n s = cls.ERROR\n elif b\"=====UNKNOWN=====\" in output:\n s = cls.UNKNOWN\n elif b\"=====UNSATISFIABLE=====\" in output:\n s = cls.UNSATISFIABLE\n elif (\n b\"=====UNSATorUNBOUNDED=====\" in output or b\"=====UNBOUNDED=====\" in output\n ):\n s = cls.UNBOUNDED\n elif method is Method.SATISFY:\n if b\"==========\" in output:\n s = cls.ALL_SOLUTIONS\n elif b\"----------\" in output:\n s = cls.SATISFIED\n else:\n if b\"==========\" in output:\n s = cls.OPTIMAL_SOLUTION\n elif b\"----------\" in output:\n s = cls.SATISFIED\n return s", "def process_check_protocol_callback(data):\n signature = data.get('signature')\n if not signature:\n return {'error': 'missing signature'}\n try:\n task = AnalysisTask.objects.get(id=signature)\n except AnalysisTask.DoesNotExist:\n return {'error': 'invalid signature'}\n\n entity = task.entity\n version = task.version\n task.delete() # So it can be re-run later if desired, and keep the table small\n\n if 'returntype' not in data:\n return {'error': 'missing returntype'}\n success = data['returntype'] == 'success'\n\n if success:\n # Store protocol interface in the repocache\n if 'required' not in data or 'optional' not in data or 'ioputs' not in data:\n return {'error': 'missing terms'}\n cached_version = entity.repocache.get_version(version)\n cached_version.parsed_ok = True\n cached_version.interface.all().delete() # Remove results of any previous analysis\n cached_version.save()\n terms = [\n ProtocolInterface(protocol_version=cached_version, term=term, optional=False)\n for term in data['required']\n ] + [\n ProtocolInterface(protocol_version=cached_version, term=term, optional=True)\n for term in data['optional']\n ]\n try:\n ProtocolInterface.objects.bulk_create(terms)\n except IntegrityError as e:\n return {'error': 'duplicate term provided: ' + str(e)}\n kinds = {name: value for value, name in ProtocolIoputs.KIND_CHOICES}\n ioputs = [\n ProtocolIoputs(\n protocol_version=cached_version,\n name=ioput['name'],\n units=ioput['units'],\n kind=kinds[ioput['kind']])\n for ioput in data['ioputs']\n ]\n # Store a flag so we know the interface has been analysed\n ioputs.append(ProtocolIoputs(protocol_version=cached_version, name=' ', kind=ProtocolIoputs.FLAG))\n try:\n ProtocolIoputs.objects.bulk_create(ioputs)\n except IntegrityError as e:\n return {'error': 'duplicate input or output provided: ' + str(e)}\n else:\n # Store error message as an ephemeral file\n error_message = data.get('returnmsg', '').replace('<br/>', '\\n')\n if not error_message:\n return {'error': 'no error message supplied'}\n commit = entity.repo.get_commit(version)\n\n if ERROR_FILE_NAME in commit.filenames:\n logger.warning('Error file already exists in commit. New error message is: %s', error_message)\n else:\n content = b'Error analysing protocol:\\n\\n' + error_message.encode('UTF-8')\n commit.add_ephemeral_file(ERROR_FILE_NAME, content)\n # Don't try to analyse this protocol again\n cached_version = entity.repocache.get_version(version)\n cached_version.parsed_ok = False\n cached_version.save()\n ProtocolIoputs(protocol_version=cached_version, name=' ', kind=ProtocolIoputs.FLAG).save()\n\n return {}", "def test_get_request_output(self):\n pass", "def _process_convert_output_(self, output_data, **kwargs):\n accept_input, current_state, output = output_data\n if kwargs['full_output']:\n if current_state.label() is None:\n return (accept_input, current_state, None)\n else:\n return (accept_input, current_state, output)\n else:\n if not accept_input:\n return None\n return output", "def _process_convert_output_(self, output_data, **kwargs):\n accept_input, current_state, output = output_data\n return (accept_input, current_state, output)", "def post_call(self, date='2014-02-12', time='22:20:33', rnumber='0674767730', rname='Romain', snumber='0617382221', sname='Lolo'):\n xmldata1 = \"<?xml version=\\\"1.0\\\" encoding=\\\"UTF-8\\\"?>\" + \\\n \"<s:Envelope xmlns:s=\\\"http://schemas.xmlsoap.org/soap/envelope/\\\" s:encodingStyle=\\\"http://schemas.xmlsoap.org/soap/encoding/\\\">\" + \\\n \"<s:Body>\" + \\\n \"<u:AddMessage xmlns:u=\\\"urn:samsung.com:service:MessageBoxService:1\\\">\" + \\\n \"<MessageType>text/xml</MessageType>\" + \\\n \"<MessageID>call</MessageID>\" + \\\n \"<Message>\" + \\\n \"&lt;Category&gt;Incoming Call&lt;/Category&gt;\" + \\\n \"&lt;DisplayType&gt;Maximum&lt;/DisplayType&gt;\" + \\\n \"&lt;CallTime&gt;\" + \\\n \"&lt;Date&gt;\"\n xmldata2 = \"&lt;/Date&gt;\" + \\\n \"&lt;Time&gt;\"\n xmldata3 = \"&lt;/Time&gt;\" + \\\n \"&lt;/CallTime&gt;\" + \\\n \"&lt;Callee&gt;\" + \\\n \"&lt;Number&gt;\"\n xmldata4 = \"&lt;/Number&gt;\" + \\\n \"&lt;Name&gt;\"\n xmldata5 = \"&lt;/Name&gt;\" + \\\n \"&lt;/Callee&gt;\" + \\\n \"&lt;Caller&gt;\" + \\\n \"&lt;Number&gt;\"\n xmldata6 = \"&lt;/Number&gt;\" + \\\n \"&lt;Name&gt;\"\n xmldata7 = \"&lt;/Name&gt;\" + \\\n \"&lt;/Caller&gt;\" + \\\n \"</Message>\" + \\\n \"</u:AddMessage>\" + \\\n \"</s:Body>\" + \\\n \"</s:Envelope>\"\n\n #Create Header for Message\n header = \"POST /PMR/control/MessageBoxService HTTP/1.0\\r\\n\" + \\\n \"Content-Type: text/xml; charset=\\\"utf-8\\\"\\r\\n\" + \\\n \"Host: \" + self.host + \"\\r\\n\" + \\\n \"Content-Length: \" + str(len(xmldata1) + len(date) + \\\n len(xmldata2) + len(time) + \\\n len(xmldata3) + len(rnumber) + \\\n len(xmldata4) + len(rname) + \\\n len(xmldata5) + len(snumber) + \\\n len(xmldata6) + len(sname) + \\\n len(xmldata7)) + \"\\r\\n\" + \\\n \"SOAPACTION: urn:samsung.com:service:MessageBoxService:1#AddMessage\\r\\n\" + \\\n \"Connection: close\\r\\n\\r\\n\"\n #Create socket\n full_soap_request = header + \\\n xmldata1 + date + \\\n xmldata2 + time + \\\n xmldata3 + rnumber + \\\n xmldata4 + rname + \\\n xmldata5 + snumber +\\\n xmldata6 + sname +\\\n xmldata7\n msg_port = 52235;\n\n try:\n # Open Socket\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.connect((self.host, msg_port))\n sock.send(full_soap_request.encode('utf-8'))\n read = sock.recv(1024)\n # print(\"\\n\\n Reader \\n\\n\" + read)\n sock.close()\n except socket.error, e:\n raise TVError(e[1], 'post_call')\n finally:\n sock.close()\n sock = None", "def get_net_context(self, net):\n inputs = []\n outputs = []\n in_cons = []\n out_cons = []\n\n for con in self.connections:\n if net == self.connections[con].input:\n outputs += [self.connections[con].output]\n in_cons += [con]\n if net == self.connections[con].output:\n inputs += [self.connections[con].input]\n out_cons += [con]\n\n return inputs, in_cons, out_cons, outputs", "def get_info_on_inputs(named_inputs, n_unnamed_inputs):\r\n n_named_inputs = len(named_inputs)\r\n\r\n def get_plural(n):\r\n if n > 1:\r\n return 's'\r\n else:\r\n return ''\r\n\r\n if n_named_inputs == 0:\r\n if n_unnamed_inputs == 0:\r\n msg = 'The function is supposed to have no input.'\r\n else:\r\n if n_unnamed_inputs == 1:\r\n msg = (\"The function has a single input variable which has no \"\r\n \"name, and thus cannot be assigned through a keyword\"\r\n \" argument (use 'name=...' in a Variable's \"\r\n \"constructor to give it a name).\")\r\n else:\r\n # Use plural.\r\n msg = (\"The function has %s inputs, but none of them is named,\"\r\n \" and thus they cannot be assigned through keyword \"\r\n \"arguments (use 'name=...' in a Variable's \"\r\n \"constructor to give it a name).\" % n_unnamed_inputs)\r\n else:\r\n if n_unnamed_inputs == 0:\r\n msg = (\"The function has %s named input%s (%s).\" % (\r\n n_named_inputs, get_plural(n_named_inputs),\r\n ', '.join(named_inputs)))\r\n else:\r\n msg = (\"The function has %s named input%s (%s), and %s unnamed \"\r\n \"input%s which thus cannot be accessed through keyword \"\r\n \"argument%s (use 'name=...' in a variable's constructor \"\r\n \"to give it a name).\" % (\r\n n_named_inputs, get_plural(n_named_inputs),\r\n ', '.join(named_inputs), n_unnamed_inputs,\r\n get_plural(n_unnamed_inputs),\r\n get_plural(n_unnamed_inputs)))\r\n return msg", "def response_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"response_types\")", "def response_types(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"response_types\")", "def _parse_output_request(self, req):\n\n output_request = self._output_request_type_check(req)\n\n for request in output_request:\n if request not in self.OUT_ATTRS:\n msg = ('User output request \"{}\" not recognized. '\n 'Will attempt to extract from PySAM.'.format(request))\n logger.debug(msg)\n\n modules = []\n for request in output_request:\n if request in self.OPTIONS:\n modules.append(self.OPTIONS[request])\n\n if not any(modules):\n msg = ('None of the user output requests were recognized. '\n 'Cannot run reV econ. '\n 'At least one of the following must be requested: {}'\n .format(list(self.OPTIONS.keys())))\n logger.exception(msg)\n raise ExecutionError(msg)\n\n b1 = [m == modules[0] for m in modules]\n b2 = np.array([m == WindBos for m in modules])\n b3 = np.array([m == SingleOwner for m in modules])\n\n if all(b1):\n self._sam_module = modules[0]\n self._fun = modules[0].reV_run\n elif all(b2 | b3):\n self._sam_module = SingleOwner\n self._fun = SingleOwner.reV_run\n else:\n msg = ('Econ outputs requested from different SAM modules not '\n 'currently supported. Output request variables require '\n 'SAM methods: {}'.format(modules))\n raise ValueError(msg)\n\n return list(set(output_request))", "def _call(self, name, *args, **kwargs):\r\n if len(args) and len(kwargs):\r\n raise TypeError(\r\n 'Use positional or keyword argument only.'\r\n )\r\n\r\n callinfo = getattr(self, name).callinfo\r\n soapAction = callinfo.soapAction\r\n url = callinfo.location\r\n (protocol, host, uri, query, fragment, identifier) = urlparse(url)\r\n port = '80'\r\n if host.find(':') >= 0:\r\n host, port = host.split(':')\r\n\r\n binding = Binding(host=host, tracefile=self._tracefile,\r\n ssl=(protocol == 'https'),\r\n port=port, url=None, typesmodule=self._typesmodule,\r\n nsdict=self._nsdict, soapaction=self._soapAction,\r\n ns=self._ns, op_ns=self._op_ns)\r\n\r\n if self._use_wsdl:\r\n request, response = self._getTypeCodes(callinfo)\r\n if len(kwargs): args = kwargs\r\n if request is None:\r\n request = Any(oname=name)\r\n binding.Send(url=uri, opname=None, obj=args,\r\n nsdict=self._nsdict, soapaction=soapAction, requesttypecode=request)\r\n return binding.Receive(replytype=response)\r\n\r\n apply(getattr(binding, callinfo.methodName), args)\r\n\r\n return binding.Receive()", "def rpc_info():", "def validate_input(request):\n\n # Validate errors inputs\n request_data = JSONParser().parse(request)\n\n if 'action' not in request_data:\n return_data = {'error': 'No action'}\n logger.error('validate_input method: error:{} request:{}'.format(return_data, request))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n if 'east' not in request_data:\n return_data = {'error': 'No east'}\n logger.error('validate_input method: error:{} request:{}'.format(return_data, request))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n if 'west' not in request_data:\n return_data = {'error': 'No west'}\n logger.error('validate_input method: error:{} request:{}'.format(return_data, request))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Debug mode checking condition\n if 'number' in request_data and 'stops' in request_data:\n\n # If current status is break then making debug mode\n if ValidateError.check_current_status() == 'break':\n return CreateDebugMode.validate_input_to_create_debug_mode(request_data)\n\n # If current status is started or pending then return error_robotworking message\n elif ValidateError.check_current_status() in ['started', 'pending']:\n return_data = {'status': 'error', 'error': 'robotworking'}\n logger.error('validation_errors.check_current_status debug mode method: error:robotworking request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Else return error_status message\n else:\n return_data = {'status': 'error', 'error': 'status'}\n logger.error('validation_errors.check_current_status debug mode method: error:status unknown request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Disconnection checking condition\n elif request_data['action'] == 'disconnect':\n\n # If current status is success or revoked or no_uuid then making disconnection\n if ValidateError.check_current_status() in ['success', 'revoked', 'no_uuid', 'failure']:\n return CreateDisconnect.validate_input_to_create_disconnection(request_data)\n\n # If current status is started or pending or break then return error_robotworking message\n elif ValidateError.check_current_status() in ['started', 'pending', 'break']:\n return_data = {'status': 'error', 'error': 'robotworking'}\n logger.error('validation_errors.check_current_status disconnection method: '\n 'error:robotworking request:{}'.format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Else return error_status message\n else:\n return_data = {'status': 'error', 'error': 'status'}\n logger.error('validation_errors.check_current_status disconnection method:'\n ' error:status unknown request:{}'.format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Connection checking condition\n elif request_data['action'] == 'connect':\n\n # If current status is success or revoked or no_uuid then making connection\n if ValidateError.check_current_status() in ['success', 'revoked', 'no_uuid', 'failure']:\n return CreateConnection.validate_input_to_create_connection(request_data)\n\n # If current status is started or pending or break then return error_robotworking message\n elif ValidateError.check_current_status() in ['started', 'pending', 'break']:\n return_data = {'status': 'error', 'error': 'robotworking'}\n logger.error('validation_errors.check_current_status connection method: error:robotworking request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Else return error_status message\n else:\n return_data = {'status': 'error', 'error': 'status unknown'}\n logger.error('validation_errors.check_current_status connection method: error:status request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)\n\n # Create connection in connection table\n elif request.data['action'] == 'create_connection':\n return ConnectionUtilities.create_dummy_connection(request_data)\n\n # Else return error_operation message\n else:\n return_data = {'status': 'error', 'error': 'operation'}\n logger.error('validation_errors.check_current_status connection method: error:action unknown request:{}'\n .format(request_data))\n return Response(return_data, status=status.HTTP_400_BAD_REQUEST)", "def _handle_info_response(self, resp, info, prev_info):\r\n if info.line_num != prev_info.line_num:\r\n return\r\n\r\n if resp['calltip']:\r\n info.editor.show_calltip('Arguments', resp['calltip'],\r\n signature=True,\r\n at_position=prev_info.position)\r\n\r\n if resp['name']:\r\n self.send_to_inspector.emit(\r\n resp['name'], resp['argspec'],\r\n resp['note'], resp['docstring'],\r\n not prev_info.auto)", "def valid_properties(self) -> Tuple[str]:\n tt = self.transaction_type\n if tt == \"GENESIS\":\n return (\n \"token_type\",\n \"transaction_type\",\n \"lokad_id\",\n \"nft_flag\",\n \"ticker\",\n \"token_name\",\n \"token_doc_url\",\n \"token_doc_hash\",\n \"decimals\",\n \"mint_baton_vout\",\n \"initial_token_mint_quantity\",\n )\n elif tt == \"MINT\":\n return (\n \"token_type\",\n \"transaction_type\",\n \"lokad_id\",\n \"nft_flag\",\n \"token_id\",\n \"token_id_hex\",\n \"mint_baton_vout\",\n \"additional_token_quantity\",\n )\n elif tt == \"SEND\":\n return (\n \"token_type\",\n \"transaction_type\",\n \"lokad_id\",\n \"nft_flag\",\n \"token_id\",\n \"token_id_hex\",\n \"token_output\",\n )\n elif tt == \"COMMIT\":\n return (\n \"token_type\",\n \"transaction_type\",\n \"lokad_id\",\n \"nft_flag\",\n \"info\",\n )\n raise InvalidOutputMessage(\"Unknown transaction_type\", tt)", "def InfoCall(connection, functionno, rc):\n\n ssl_logging = logging.getLogger('SSL_InfoCall')\n\n ssl_logging.debug('In InfoCall')\n ssl_logging.debug('State : %s' % connection.state_string())\n ssl_logging.debug('Fuction Number: %s' % functionno)\n ssl_logging.debug('Return Code : %s' % rc)\n return 0", "def response_types(self) -> pulumi.Output[Optional[Sequence[str]]]:\n return pulumi.get(self, \"response_types\")", "def get_input_output_args(io_info):\n args = []\n if io_info is None:\n return args\n for item in io_info:\n if isinstance(item, dict):\n arg = get_single_io_arg(item)\n args.append(arg)\n elif isinstance(item, list):\n dyn_arg = []\n for info in item:\n arg = get_single_io_arg(info)\n dyn_arg.append(arg)\n args.append(tuple(dyn_arg))\n return args", "def get_input(incoming):\n try:\n return incoming.get_output, incoming.get_output_shape()\n except AttributeError:\n return lambda **kwargs: incoming, [d if isinstance(d, int) else -1 for d in incoming.get_shape().as_list()]", "def GetInputCount(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def handle_request(self, query, request):\r\n request_pdu = None\r\n response_pdu = \"\"\r\n slave_id = None\r\n function_code = None\r\n func_code = None\r\n slave = None\r\n response = None\r\n\r\n try:\r\n # extract the pdu and the slave id\r\n slave_id, request_pdu = query.parse_request(request)\r\n if len(request_pdu) > 0:\r\n (func_code, ) = struct.unpack(\">B\", request_pdu[0])\r\n # 43 is Device Information\r\n if func_code == 43:\r\n # except will throw MissingKeyError\r\n slave = self.get_slave(slave_id)\r\n response_pdu = slave.handle_request(request_pdu)\r\n # make the full response\r\n response = query.build_response(response_pdu)\r\n # get the slave and let him execute the action\r\n elif slave_id == 0:\r\n # broadcast\r\n for key in self._slaves:\r\n response_pdu = self._slaves[key].handle_request(request_pdu, broadcast=True)\r\n response = query.build_response(response_pdu)\r\n elif slave_id == 255:\r\n r = struct.pack(\">BB\", func_code + 0x80, 0x0B)\r\n response = query.build_response(r)\r\n else:\r\n slave = self.get_slave(slave_id)\r\n response_pdu = slave.handle_request(request_pdu)\r\n # make the full response\r\n response = query.build_response(response_pdu)\r\n except (IOError, MissingKeyError) as e:\r\n # If the request was not handled correctly, return a server error response\r\n r = struct.pack(\">BB\", func_code + 0x80, defines.SLAVE_DEVICE_FAILURE)\r\n response = query.build_response(r)\r\n\r\n if slave:\r\n function_code = slave.function_code\r\n\r\n return (response, {'request': request_pdu.encode('hex'),\r\n 'slave_id': slave_id,\r\n 'function_code': function_code,\r\n 'response': response_pdu.encode('hex')})", "def get_req_data(self, data):\n self.rdata=data\n if self.rdata == 1 or \\\n self.rdata == 0 or \\\n self.rdata == \"\":\n return 1\n rt=\"\"\n for ot in self.rdata.split('\\n'):\n ot = ot.rstrip('\\r\\n')\n logging.debug(\">>>>>>> Check value of output string: %s\" % ot)\n if not (ot.startswith(\"vRA \") or \\\n ot.startswith(\"JRE Version:\") or \\\n ot.startswith(\"CloudClient is \") or \\\n ot.startswith(\"true\") or \\\n len(ot) == 0):\n if ot.startswith(\"Error\"):\n print (\"Error: %s\" % ot.split('Error')[-1])\n sys.stdout.flush()\n sys.exit(1)\n else:\n logging.debug(\"SR# %s\" % ot)\n rt=ot\n break\n if not rt:\n print \"No service request is found: %s\" % rt\n return 1\n else:\n return rt", "def command_type(self):\n if self._is_push_command():\n command_type = Parser.C_PUSH\n elif self._is_pop_command():\n command_type = Parser.C_POP\n elif self._is_arithmetic_command():\n command_type = Parser.C_ARITHMETIC\n elif self._is_comment_line() or self._is_blank_line():\n command_type = Parser.IGNORE\n else:\n command_type = Parser.INVALID\n return command_type", "def oic_pre_construct(self, cli_info, request_args=None, **kwargs):\n for prop in self.msg_type.c_param.keys():\n if prop in request_args:\n continue\n try:\n request_args[prop] = cli_info.behaviour[prop]\n except KeyError:\n pass\n\n if \"post_logout_redirect_uris\" not in request_args:\n try:\n request_args[\n \"post_logout_redirect_uris\"] = \\\n cli_info.post_logout_redirect_uris\n except AttributeError:\n pass\n\n if \"redirect_uris\" not in request_args:\n try:\n request_args[\"redirect_uris\"] = cli_info.redirect_uris\n except AttributeError:\n raise MissingRequiredAttribute(\"redirect_uris\", request_args)\n\n try:\n if cli_info.provider_info[\n 'require_request_uri_registration'] is True:\n request_args['request_uris'] = cli_info.generate_request_uris(\n cli_info.requests_dir)\n except KeyError:\n pass\n\n return request_args, {}", "def RequestInformation(self, request, inInfoVec, outInfoVec):\n from vtkmodules.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline as sddp\n\n iin = inInfoVec[1].GetInformationObject(0)\n outInfoVec.GetInformationObject(0).Set(sddp.WHOLE_EXTENT(), iin.Get(sddp.WHOLE_EXTENT()), 6);\n return 1", "def get_decoding_op(self):\n return self._dual.get_op('output')", "def _check_validconnectioninput(self):\n # Check if name is valid\n if self._check_name(self.symbol):\n second_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '.' is used:\n if self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n # Check if device input begins with 'I'\n if self.names.get_name_string(self.symbol.id)[0] == \"I\":\n # Check if input number is a positive number\n try:\n inputno = int(\n self.names.get_name_string(\n self.symbol.id)[\n 1:])\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n except BaseException:\n # Input number is not valid\n self._display_syntax_error(\"number\")\n self._semicolon_skipper()\n return None, None\n # OR if DType input\n elif self._check_validdtypeinput(self.symbol):\n second_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return second_device, second_port\n else:\n # Input is not valid\n self._display_syntax_error(\"input\")\n self._semicolon_skipper()\n return None, None\n else:\n # No '.'\n self._display_syntax_error(\"period\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def write_operation_type(self) -> Optional[pulumi.Input[str]]:\n return pulumi.get(self, \"write_operation_type\")", "def print_results(request, response, procedure_name) -> None:\n procedure_names_dict = {\n 'SquareRoot': calculator_pb2_grpc.CalculatorServicer.SquareRoot.__name__,\n 'Square': calculator_pb2_grpc.CalculatorServicer.Square.__name__,\n }\n print_string = f\"Request: {procedure_names_dict[procedure_name]} for {request.value}.\\nResponse: {response.value}.\\n\"\n print(print_string)", "def status_determine():\n b_status = False\n b_statusInput = True\n b_statusAnalyze = True\n b_statusOutput = True\n nonlocal dret_inputSet\n nonlocal dret_analyze\n nonlocal dret_outputSet\n nonlocal fn_inputReadCallback\n nonlocal fn_analysisCallback\n nonlocal fn_outputWriteCallback\n\n if fn_inputReadCallback:\n if 'status' in dret_inputSet.keys():\n b_statusInput = dret_inputSet['status']\n if fn_analysisCallback:\n if 'status' in dret_analyze.keys():\n b_statusAnalyze = dret_analyze['status']\n if fn_outputWriteCallback:\n if 'status' in dret_outputSet.keys():\n b_statusOutput = dret_outputSet['status']\n\n b_status = b_statusInput and b_statusAnalyze and b_statusOutput\n return {\n 'status': b_status\n }", "def _dispatch(self, request):\n endpoint, values, context, param = self.read_request(request)\n if endpoint == 'init':\n parse_config(param.get('config', {}))\n return self.init_info()\n else:\n if endpoint == 'provider':\n return self.call_provider(context, values['name'],\n values['action'], param)\n elif endpoint == 'handler':\n return self.call_handler(context, values['name'], param)\n else:\n return self.call_func(context, endpoint, values['name'], param)", "def call(self, inputs, training=False):\n prev_merge_mode = self.merge_mode\n self.set_merge_mode('merge')\n enc_outputs = self.call_encoder(inputs, training)\n dec_outputs = self.call_decoder(enc_outputs, training)\n self.set_merge_mode(prev_merge_mode)\n\n return dec_outputs", "def map_operation(**kwargs):\n operation = kwargs.get(\"data\", \"\")\n operation_mapper = {\"POST\": \"CREATE\"}\n return operation_mapper.get(operation, operation)", "def input_or_output(self):\n return \"I\"", "def call(self, inputs):\n raise NotImplementedError", "def input_type():\n pass", "def _call(*,\n call,\n method,\n headers,\n params=None,\n data=None,\n no_headers=False,\n json_output=True,\n raw=False,\n **kwargs):\n if not headers and not data and not params and not no_headers:\n raise AttributeError(\n 'Asked to do a call to the cloud without valid headers, data or params. This would never work.'\n )\n\n try:\n if method is requests.put:\n if data:\n response = method(call, headers=headers, data=data, timeout=5)\n else:\n raise AttributeError('PUT call with no data, this would fail!')\n elif method is requests.post:\n if data and params:\n response = method(call, headers=headers, data=data, params=params, timeout=5)\n elif data:\n response = method(call, headers=headers, data=data, timeout=5)\n elif params:\n response = method(call, headers=headers, params=params, timeout=5)\n else:\n raise AttributeError(\n 'POST call with no data or params, this probably makes no sense!')\n\n elif params:\n response = method(call, headers=headers, params=params, timeout=5)\n else:\n response = method(call, headers=headers, timeout=5)\n\n except requests.exceptions.RequestException as e: # pragma: no cover\n raise ConnectionError(str(e)) from None\n\n if response.status_code == 200:\n if raw:\n return response\n if json_output:\n return response.json()\n else:\n return response.text\n\n elif response.status_code == 410: # pragma: no cover\n raise APIError(\n response.status_code,\n 'API version outdated. Update python-cozify. {reason} - {url} - {message}'.format(\n reason=response.reason, url=response.url, message=response.text))\n else: # pragma: no cover\n raise APIError(\n response.status_code, '{reason} - {url} - {message}'.format(reason=response.reason,\n url=response.url,\n message=response.text))", "def external_call( d, output, stack, context ):\n\targuments = d[\"args\"].split(\" \")\n\tstack_collect = stack[-len(arguments):]\n\tfor arg, desired in zip(stack_collect, arguments):\n\t\tresult = type_system.type_reduce( arg, desired, output, stack, context )\n\t\tif result != \"success\":\n\t\t\treturn \"pass\"\n\toutput.append(\" call %s\\n\" % d[\"name\"])\n\tfor i in arguments:\n\t\tstack.pop()\n\tretvals = d[\"ret\"].split(\" \")\n\tfor val in retvals:\n\t\tstack.append( type_system.Datum( [[val]], [None] ) )\n\treturn \"success\"", "def form_translate(input_data, operation):\n \n # In case you're trying to produce this behavior on the command line for\n # rapid testing, the command that eventually gets called is: \n # java -jar form_translate.jar <operation> < form.xml > output\n #\n # You can pass in a filename or a full string/stream of xml data\n p = subprocess.Popen([\"java\",\"-jar\",\n config.FORM_TRANSLATE_JAR_LOCATION,\n operation], \n shell=False, \n stdout=subprocess.PIPE,\n stdin=subprocess.PIPE,\n stderr=subprocess.PIPE)\n p.stdin.write(input_data)\n p.stdin.flush()\n p.stdin.close()\n \n output = p.stdout.read() \n error = p.stderr.read()\n \n # todo: this is horrible.\n has_error = \"exception\" in error.lower() \n \n return {\"success\": not has_error,\n \"errstring\": error,\n \"outstring\": output}", "def _process_info_message(self, message):\n # Extract the output resolution from the appropriate message, if\n # it's present.\n contents = message.get('contents', None)\n if message['messageCode'] == 'JOB_RUNNING_RESOLUTION':\n self._resolution = contents['resolutionMs']\n elif message['messageCode'] == 'FETCH_NUM_TIMESERIES':\n self._num_input_timeseries += int(message['numInputTimeSeries'])\n elif message['messageCode'] == 'FIND_MATCHED_NO_TIMESERIES':\n self._find_matched_no_timeseries = True\n elif message['messageCode'] == 'FIND_LIMITED_RESULT_SET':\n self._find_limited_resultset = True\n self._find_matched_size = contents['matchedSize']\n self._find_limit_size = contents['limitSize']\n elif message['messageCode'] == 'GROUPBY_MISSING_PROPERTY':\n self._group_by_missing_property = True\n self._group_by_missing_properties = contents['propertyNames']", "def SendOutputs(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def get_operation_obect(self, method):\n pass", "def generate_interface_types(interface, name=None):\n\n if not isinstance(interface, type):\n interface = type(interface)\n\n module = interface.__module__\n\n method_signatures = {\n name: MethodSignature.from_method(getattr(interface, name))\n for name in classes.get_public_method_names(interface)\n }\n\n args_type, types, makers = make_args_type(module, method_signatures)\n request_type = dataclasses.make_dataclass(\n (name or interface.__name__) + 'Request',\n [('args', args_type)],\n namespace={\n 'Args': args_type,\n 'types': types,\n 'm': makers,\n },\n frozen=True,\n )\n request_type.__module__ = module\n\n result_type = make_result_type(module, method_signatures)\n error_type = make_error_type(module, interface, method_signatures)\n response_type = dataclasses.make_dataclass(\n (name or interface.__name__) + 'Response',\n [\n (\n 'result',\n typing.Optional[result_type],\n dataclasses.field(default=None),\n ),\n (\n 'error',\n typing.Optional[error_type],\n dataclasses.field(default=None),\n ),\n ],\n namespace={\n 'Result': result_type,\n 'Error': error_type,\n },\n frozen=True,\n )\n response_type.__module__ = module\n\n return request_type, response_type", "def parse_operations(self, operation_type: str) -> Tuple[Operation]:\n if operation_type is None:\n return tuple()\n query_type: SchemaType = self.types.get(operation_type)\n if query_type is None:\n return tuple()\n return tuple([Operation(f, self.settings) for f in query_type.fields])", "def full_dispatch_request(self):\n fn = getattr(self, g.action, None)\n if fn is None:\n abort(404, 'Unimplemented action %r' % g.action)\n inputs = self.__class__.schema_inputs.get(g.action)\n outputs = self.__class__.schema_outputs.get(g.action)\n output_types = self.__class__.output_types\n if inputs is not None:\n (errors, values) = validate(g.request_data, inputs)\n if errors:\n return dict(errors), 400\n else:\n if isinstance(values, dict):\n rv = fn(**values)\n else:\n rv = fn(values)\n else:\n rv = fn()\n rv, code, headers = unpack(rv)\n if outputs is not None:\n (errors, values) = validate(rv, outputs, proxy_types=output_types)\n if errors:\n return dict(errors), 500\n else:\n rv = values\n return rv, code, headers", "def PostInputsNiFi(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def __show_protocol__(self, data):\n t_16 = t_16_Bit_Options()\n t_8 = t_8_Bit_Options()\n t_var = t_8_Bit_Options()\n print('Received ' + str(len(data)) + ' Bytest')\n\n #----------------------------------------------------------------------\n print('='*80)\n print('Handling Protocol response: ' + hexlify(data))\n #----------------------------------------------------------------------\n print('='*80)\n print('Overhead Bytes: ' + hexlify(data[:BP_TOOL.OVERHEAD]))\n print('Number of UINT16 bitstream data = ' + str(data[BP_TOOL.UINT16S]))\n print('Number of UINT8 bitstream data = ' + str(data[BP_TOOL.UINT8S]))\n print('Number of var bitstream data = ' + str(data[BP_TOOL.VARS]))\n print('Follow = ' + str(self.get_follow(data)))\n print('Length = ' + str(self.get_length(data)))\n start = self.get_follow_and_length(data)\n end = start + BP_TOOL.SIZE_FOLLOW + BP_TOOL.SIZE_LEN\n print('Following bytes and length = ' + hexlify(data[start:end]))\n #----------------------------------------------------------------------\n print('='*80)\n bits = self.get_16bit_options_bits(data)\n values = self.get_16bit_options(data)\n options = self.get_options_requested(bits)\n\n # Display the options if exist\n if len(options):\n print('UINT16 bits...... : ' + hexlify(bits))\n print('UINT16 data...... : ' + hexlify(values))\n print('UINT16 Num of opts ... : ' + str(len(values) // 2))\n print('UINT16 options... : ' + str(options))\n print('-'*80)\n for x in range(len(options)):\n value = (values[x*2] << 8) | (values[x*2 + 1])\n opt = options[x]\n t_16.set_value(opt, value)\n print('Option: ' + t_16.options[opt]['name'] + ' ' + str(value))\n pprint.pprint(t_16.options)\n else:\n print('No 16 bit options')\n\n #----------------------------------------------------------------------\n print('-'*80)\n bits = self.get_8bit_options_bits(data)\n values = self.get_8bit_options(data)\n options = self.get_options_requested(bits)\n # Display the options if exist\n if len(options):\n print('UINT8 bits...... : ' + hexlify(bits))\n print('UINT8 data...... : ' + hexlify(values))\n print('UINT8 options... : ' + str(options))\n print('-'*80)\n for x in range(len(options)):\n value = values[x]\n opt = options[x]\n t_8.set_value(opt, value)\n print('Option: ' + t_8.options[x]['name'] + ' ' + str(value))\n pprint.pprint(t_8.options)\n else:\n print('No 8 bit options')\n\n #----------------------------------------------------------------------\n print('-'*80)\n bits = self.get_var_options_bits(data)\n values = self.get_var_options(data)\n print('VARS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n # Display the options if exist\n if len(values):\n pprint.pprint(values)\n else:\n print('No var bit options')\n\n print('VAR options... : ' + str(self.get_options_requested(bits)))\n print('VARS !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')\n print('-'*80)", "def _process_ok_response(self, bis, request):\n #\n # this call gives the Request an opinion on which serial version\n # to use\n #\n version = request.get_serial_version(self._client.serial_version)\n if version <= 3:\n # V3 and earlier protocols start with an error byte (0 is no error)\n code = bis.read_byte()\n else:\n # V4 starts with an NSON MAP. If the type is correct then treat it\n # as a V4 response. If it's not correct then it's a V3\n # error code from a V3 proxy. If it's a V4 map, code will be 0\n code = SerdeUtil.check_for_map(bis)\n if code == 0:\n res = request.create_serializer(version).deserialize(\n request, bis, version)\n if request.is_query_request():\n if not request.is_simple_query():\n request.get_driver().set_client(self._client)\n return res\n\n \"\"\"\n Operation failed. Handle the failure and throw an appropriate\n exception.\n \"\"\"\n err = SerdeUtil.read_string(bis)\n\n # special case for TNF errors on WriteMultiple with many tables\n if code == SerdeUtil.USER_ERROR.TABLE_NOT_FOUND and \\\n isinstance(request, operations.WriteMultipleRequest):\n raise self._handle_write_multiple_table_not_found(code, err, request)\n\n raise SerdeUtil.map_exception(code, err)", "def parse_request(self, request):\n response=''\n http_code = 200\n\n request_line = request.splitlines()[0]\n request_method, path, request_version = request_line.split()\n\n #Try to split path into it's components: the operation requested and the keyvalue\n try:\n request_op, request_keyvalue = path.split('?')\n request_op = request_op[1:]\n\n #If request is a get we split in a different order than if it's a set\n if request_op == 'get':\n request_value, request_key = request_keyvalue.split('=')\n response, http_code = self.get_value(request_key)\n elif request_op == 'set':\n request_key, request_value = request_keyvalue.split('=')\n response, http_code = self.set_value(request_key, request_value)\n else:\n response = 'Unknown operation in URL. Must be either GET or SET.'\n http_code = 400\n\n except ValueError: #Catch any paths that don't match the form we're interested in\n response = dedent(\"\"\"Incorrect path (%s)\n Requested URL must take the form http://%s:%s/[operation]?[value]\"\"\" % (path, self.server_address, self.server_port))\n http_code = 400\n return response, http_code\n\n return response, http_code", "def visit_Call(self, node: ast.Call) -> None:\n self._check_open_call_context(node)\n self._check_type_compare(node)\n self._check_range_len(node)\n self.generic_visit(node)", "def check_request(self, params, permission):\n exception = {}\n\n if permission.get('qgs_project') is None:\n # service unknown or not permitted\n exception = {\n 'code': \"Service configuration error\",\n 'message': \"Service unknown or unsupported\"\n }\n elif not params.get('REQUEST'):\n # REQUEST missing or blank\n exception = {\n 'code': \"OperationNotSupported\",\n 'message': \"Please check the value of the REQUEST parameter\"\n }\n else:\n service = params.get('SERVICE', '')\n request = params.get('REQUEST', '').upper()\n\n if service == 'WMS' and request == 'GETFEATUREINFO':\n # check info format\n info_format = params.get('INFO_FORMAT', 'text/plain')\n if re.match('^application/vnd.ogc.gml.+$', info_format):\n # do not support broken GML3 info format\n # i.e. 'application/vnd.ogc.gml/3.1.1'\n exception = {\n 'code': \"InvalidFormat\",\n 'message': (\n \"Feature info format '%s' is not supported. \"\n \"Possibilities are 'text/plain', 'text/html' or \"\n \"'text/xml'.\"\n % info_format\n )\n }\n elif service == 'WMS' and request == 'GETPRINT':\n # check print templates\n template = params.get('TEMPLATE')\n if template and template not in permission['print_templates']:\n # allow only permitted print templates\n exception = {\n 'code': \"Error\",\n 'message': (\n 'Composer template not found or not permitted'\n )\n }\n\n if not exception:\n # check layers params\n\n # lookup for layers params by request\n # {\n # <SERVICE>: {\n # <REQUEST>: [\n # <optional layers param>, <mandatory layers param>\n # ]\n # }\n # }\n ogc_layers_params = {\n 'WMS': {\n 'GETMAP': ['LAYERS', None],\n 'GETFEATUREINFO': ['LAYERS', 'QUERY_LAYERS'],\n 'GETLEGENDGRAPHIC': [None, 'LAYER'],\n 'GETLEGENDGRAPHICS': [None, 'LAYER'], # QGIS legacy request\n 'DESCRIBELAYER': [None, 'LAYERS'],\n 'GETSTYLES': [None, 'LAYERS']\n },\n 'WFS': {\n 'DESCRIBEFEATURETYPE': ['TYPENAME', None],\n 'GETFEATURE': [None, 'TYPENAME']\n }\n }\n\n layer_params = ogc_layers_params.get(service, {}).get(request, {})\n\n if service == 'WMS' and request == 'GETPRINT':\n # find map layers param for GetPrint (usually 'map0:LAYERS')\n for key, value in params.items():\n if key.endswith(\":LAYERS\"):\n layer_params = [key, None]\n break\n\n if layer_params:\n permitted_layers = permission['public_layers']\n filename = params.get('FILENAME', '')\n if (service == 'WMS' and (\n (request == 'GETMAP' and filename) or request == 'GETPRINT'\n )):\n # When doing a raster export (GetMap with FILENAME)\n # or printing (GetPrint), also allow background layers\n permitted_layers += permission['background_layers']\n if layer_params[0] is not None:\n # check optional layers param\n exception = self.check_layers(\n layer_params[0], params, permitted_layers, False\n )\n if not exception and layer_params[1] is not None:\n # check mandatory layers param\n exception = self.check_layers(\n layer_params[1], params, permitted_layers, True\n )\n\n return exception", "def _check_validconnectionoutput(self):\n # Check if name is valid and has been initialised\n if self._check_name(self.symbol):\n first_device = self.symbol\n self.symbol = self.scanner.get_symbol()\n # Check if '->' is used\n if self._is_arrow(self.symbol):\n return first_device, None\n elif self._is_period(self.symbol):\n self.symbol = self.scanner.get_symbol()\n if self._check_validdtypeoutput(self.symbol):\n first_port = self.symbol\n self.symbol = self.scanner.get_symbol()\n return first_device, first_port\n else:\n # Invalid DType output\n self._display_syntax_error(\"doutput\")\n self._semicolon_skipper()\n return None, None\n else:\n # Neither an arrow nor a DType output\n self._display_syntax_error(\"arrowperiod\")\n self._semicolon_skipper()\n return None, None\n else:\n # Device does not exist\n self._display_syntax_error(\"devicename\")\n self._semicolon_skipper()\n return None, None", "def StreamInputs(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')", "def guess_calling_convention(self):\n return calldef_types.CALLING_CONVENTION_TYPES.UNKNOWN", "def getsolutioninfo(self,whichsol_): # 3\n if not isinstance(whichsol_,soltype): raise TypeError(\"Argument whichsol has wrong type\")\n res,resargs = self.__obj.getsolutioninfo(whichsol_)\n if res != 0:\n result,msg = self.__getlasterror(res)\n raise Error(rescode(res),msg)\n _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value = resargs\n return _pobj_return_value,_pviolcon_return_value,_pviolvar_return_value,_pviolbarvar_return_value,_pviolcone_return_value,_pviolitg_return_value,_dobj_return_value,_dviolcon_return_value,_dviolvar_return_value,_dviolbarvar_return_value,_dviolcone_return_value", "def getOperationMetadata(self, request, context):\n context.set_code(grpc.StatusCode.UNIMPLEMENTED)\n context.set_details('Method not implemented!')\n raise NotImplementedError('Method not implemented!')" ]
[ "0.5174658", "0.5018817", "0.4834404", "0.47635424", "0.4631196", "0.46073565", "0.45646647", "0.45536888", "0.4535534", "0.45211482", "0.44949257", "0.4465704", "0.44512537", "0.4426451", "0.44040138", "0.44033703", "0.43914264", "0.43877032", "0.4385859", "0.43828747", "0.43793657", "0.43721932", "0.43482167", "0.43448585", "0.43419588", "0.4333053", "0.43312353", "0.43153423", "0.430005", "0.4298266", "0.4298266", "0.4285821", "0.4282567", "0.42745984", "0.42703947", "0.4267555", "0.4266104", "0.42545578", "0.42513797", "0.4249028", "0.42391375", "0.42360276", "0.4233659", "0.42255718", "0.4224528", "0.42235708", "0.4218548", "0.42155606", "0.42153934", "0.42153934", "0.42153436", "0.42152348", "0.42145038", "0.42096385", "0.42071396", "0.42040783", "0.41933978", "0.41911128", "0.41905853", "0.41853023", "0.4180585", "0.4174984", "0.41718042", "0.41696757", "0.41683215", "0.4164832", "0.41639182", "0.41632143", "0.4160123", "0.4160123", "0.4160123", "0.4160123", "0.4155205", "0.41499013", "0.4143705", "0.41330612", "0.41322654", "0.4130583", "0.4127633", "0.4124138", "0.4114674", "0.4114665", "0.41135204", "0.41077372", "0.41022998", "0.4100404", "0.40995762", "0.40990332", "0.4098139", "0.40882084", "0.40859428", "0.40825382", "0.40824807", "0.4080589", "0.4079259", "0.40771422", "0.4072683", "0.40718368", "0.40690076", "0.4066481" ]
0.73895764
0
Returns typecodes representing a parameter set parameters list of WSDLTools.ParameterInfo instances representing the parts of a WSDL Message.
def _getTypeCode(self, parameters, literal=False): ofwhat = [] for part in parameters: namespaceURI,localName = part.type if part.element_type: #global element element = self._wsdl.types[namespaceURI].elements[localName] tc = self._getElement(element, literal=literal, local=False, namespaceURI=namespaceURI) else: #local element name = part.name typeClass = self._getTypeClass(namespaceURI, localName) if not typeClass: tp = self._wsdl.types[namespaceURI].types[localName] tc = self._getType(tp, name, literal, local=True, namespaceURI=namespaceURI) else: tc = typeClass(name) ofwhat.append(tc) return ofwhat
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def parameterTypes(self, p_int): # real signature unknown; restored from __doc__\n return []", "def getParamsType(self):\n\t\treturn [\"int\", \"int\"]", "def parameterNames(self, p_int): # real signature unknown; restored from __doc__\n return []", "def _fi_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.fi_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result", "def _f_in_parameters(self) -> List[Tuple[str, str]]:\n result = list() # type: List[Tuple[str, str]]\n for param in self.params:\n type_list = param.f_type()\n for type_name, postfix in type_list:\n result.append((type_name, param.name + postfix))\n return result", "def parameter_names(self) -> List[str]:", "def _fc_in_parameters(self) -> List[str]:\n result = list() # type: List[str]\n\n for param in self.params:\n type_list = param.fc_type()\n for type_name, postfix in type_list:\n result.append('{} {}'.format(type_name, param.name + postfix))\n\n return result", "def build_parameters(self) -> List[str]:\n param_bits = []\n for name in self.parameters:\n param_bits.extend(self.build_parameter_by_name(name) or [])\n return param_bits", "def generate_parameter_map(rpc_blocks, full_signature, param_values):\n parameter_map = []\n list_object_pattern = re.compile(\n r\"(java\\.util\\.(?:[A-Za-z]+)?List(?:[0-9/]+)?)<([a-zA-Z0-9./]+)[<>]?(?:(.*[^>]))?>\"\n )\n\n # Append type index for each parameter value\n for i, param in enumerate(param_values):\n param_type = full_signature[i]\n\n # If parameter is of type list, append index of list type\n if re.match(list_object_pattern, param_type):\n list_type = re.findall(list_object_pattern, param_type)[0][0]\n parameter_map.append(str(rpc_blocks.index(list_type) - 2))\n\n # If not of type list, appeand index of simple type\n else:\n parameter_map.append(str(rpc_blocks.index(full_signature[i]) - 2))\n\n # For each indexed parameter append type and value indicies\n for i, param in enumerate(param_values):\n param_type = full_signature[i]\n\n # If parameter is a string object, append value index\n if param_type.startswith(COMPLEX_TYPES[\"STRING\"]):\n parameter_map.append(str(rpc_blocks.index(param) - 2))\n\n # Else if the parameter is simple type, append value\n elif param_type in SIMPLE_TYPES.keys():\n parameter_map.append(str(param))\n\n # Else if the parameter is a java list object, append type index and length\n elif re.match(list_object_pattern, param_type):\n list_length = 1\n list_type = list(filter(None, re.findall(list_object_pattern, param_type)[0]))\n\n parameter_map.append(\n str(rpc_blocks.index(list_type[0] if len(list_type) < 3 else list_type[1]) - 2)\n )\n parameter_map.append(str(list_length))\n\n # For each element of the list, append type and value indicies\n for _ in range(list_length):\n parameter_map.append(\n str(rpc_blocks.index(list_type[1] if len(list_type) < 3 else list_type[2]) - 2)\n )\n parameter_map.append(str(rpc_blocks.index(param) - 2))\n\n # If parameter is a so far unhandled complex type, append runtime index and value\n elif any(\n param_type.startswith(val)\n for val in COMPLEX_TYPES.values()\n ):\n parameter_map.append(str(rpc_blocks.index(param_type) - 2))\n parameter_map.append(str(param))\n\n # If parameter is unknown, append runtime index and value index\n else:\n parameter_map.append(str(rpc_blocks.index(param_type) - 2))\n parameter_map.append(str(rpc_blocks.index(param) - 2))\n\n return parameter_map", "def getParameterList(self):\n inputList = []\n for name, n in zip(self._names, self._inputs):\n inputList += ['%s.x%d' % (name, i) for i in range(n)]\n return inputList", "def params(self):\n params = []\n\n for item in self._definition.get('params', []):\n params.append(Parameter(**item))\n\n return params", "def _get_parameters(self):\n return (self.SYMBOL, self.parameterArray())", "def get_method_parameter_values(code, line, full_sig):\n param_list = []\n offset = int((get_offset(code, line, \"catch(\") - 3))\n line += (1 + int(offset / 2))\n\n for i in range(int(offset / 2)):\n param_pattern = re.compile(rf\", ?(?:'' ?\\+ ?)?(?:{R_VAR}\\(?.*, ?)?(.*[^\\)])\\)\\)?;\")\n nested_boolean_pattern = re.compile(rf\".*\\?(('?{R_VAR}'?):('?{R_VAR}'?))\")\n param = re.findall(param_pattern, code[line])[0].replace(\"'\", \"\")\n\n # if parameter is boolean, append 0\n if (\n nested_boolean_pattern.search(code[line])\n or full_sig[i].startswith(COMPLEX_TYPES[\"BOOLEAN\"])\n ):\n param_list.append(\"§0§\")\n\n # if parameter is numeric, append directly\n elif is_number(param) or full_sig[i].startswith(\"I\"):\n param_list.append(f\"§{param if is_number(param) else randint(0,99)}§\")\n\n # else, treat as tring and append with previx\n else:\n param = param.replace(\" \", \"_\").replace(\"|\", \"\\\\!\").replace(\"\\\\\", \"\\\\\\\\\")\n param_list.append(f\"§param_{param}§\")\n line += 1\n\n return param_list", "def parameters_list(self):\n return [getattr(self.parameters, p) for p in self.parameters_names()]", "def Params(self):\n params = list()\n if self.currtok[1].name in {\"INT\", \"FLOAT\", \"BOOLEAN\"}:\n type = self.Type()\n self.ids[self.currtok[0]] = type\n id = self.primary()\n par = Param(type, id)\n params.append(par)\n while self.currtok[1].name in {\"COMMA\"}:\n self.currtok = next(self.tg)\n type = self.Type()\n self.ids[self.currtok[0]] = type\n id = self.primary()\n par = Param(type, id)\n params.append(par)\n return Params(params)", "def params(self) -> Tuple[Parameter, ...]:\n raise NotImplementedError()", "def classifyParameters(self):\n\n arguments = []\n options = []\n outputs = []\n for parameter in self.parameters():\n if parameter.channel == 'output' and not (\n parameter.isExternalType() or parameter.typ == 'file'):\n outputs.append(parameter)\n elif parameter.index is not None:\n arguments.append(parameter)\n if parameter.flag is not None or parameter.longflag is not None:\n logger.warning(\"Parameter %s has both index=%d and flag set.\" % (\n parameter.identifier(), parameter.index))\n else:\n options.append(parameter)\n arguments.sort(key = lambda parameter: parameter.index)\n return (arguments, options, outputs)", "def get_parameters(self, doc, type_table):\n scraper = TypeScraper(type_table=type_table)\n r = docscrape.NumpyDocString(doc)\n paras = {}\n for p in r['Parameters']:\n\n para_str = str(p.type)\n para_type = scraper.scrap(para_str)\n if self.is_valid_para(para_type, type_table):\n paras[p.name] = scraper.scrap(para_str)\n else:\n continue\n return paras", "def parameter(self):\n res = dict()\n for x in self._desc.fields:\n if x.type == FieldDescriptor.TYPE_MESSAGE:\n parameter = ParameterGroup(x)\n elif x.type == FieldDescriptor.TYPE_ENUM:\n parameter = ParameterEnum(x)\n else:\n parameter = ParameterPrimitive(x)\n\n res[x.name] = parameter\n\n return res", "def __parameters__(self) -> tuple[TypeVar, ...]:\n return super().__getattribute__(\"_parameters\")", "def compile_parameter_list(self):\r\n if self.__tokenizer.token_type() != TYPES_DIC[\"SYMBOL\"]:\r\n type = self.__get_type()\r\n self.__advance()\r\n name = self.__tokenizer.identifier()\r\n self.__subroutine_symbols.define(name, type, \"argument\")\r\n self.__advance()\r\n while self.__tokenizer.symbol() != ')':\r\n self.__advance()\r\n type = self.__get_type()\r\n self.__advance()\r\n name = self.__tokenizer.identifier()\r\n self.__subroutine_symbols.define(name, type, \"argument\")\r\n self.__advance()", "def parameters(self):\n return []", "def _get_parameters(self) -> list:\n return self.parameters", "def parameter_names(self) -> list:\n parameters = []\n parameters.extend(self.properties.parameter_names)\n return parameters", "def get_parameters(self):\n result = self.generate_parameters()\n id, vector = result\n if not vector:\n return id, vector\n\n params = {}\n for idx in range(len(self.param_names)):\n name = self.param_names[idx]\n settings = self.param_settings[idx]\n value = vector[idx]\n if settings['type'] == \"int\":\n value = int(round(value))\n elif settings['type'] == \"float\":\n value = float(value)\n params[name] = value\n return id, params", "def parameters(self) -> google.protobuf.internal.containers.RepeatedCompositeFieldContainer[global___Statement.Declaration]:", "def getTypeCode(self):\n return _libsbml.Parameter_getTypeCode(self)", "def get_ext_param_descriptions(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_description(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def param(self):\r\n\r\n return []", "def parameters(self) -> List[Parameter]:\n return self._parameters", "def _declare_parameters(self):\n def to_modelica(arg):\n \"\"\" Convert to Modelica array.\n \"\"\"\n # Check for strings and booleans\n if isinstance(arg, str):\n return '\\\\\"' + arg + '\\\\\"'\n elif isinstance(arg, bool):\n if arg is True:\n return 'true'\n else:\n return 'false'\n try:\n return '{' + \", \".join(to_modelica(x) for x in arg) + '}'\n except TypeError:\n return repr(arg)\n dec = list()\n\n for k, v in list(self._parameters_.items()):\n # Dymola requires vectors of parameters to be set in the format\n # p = {1, 2, 3} rather than in the format of python arrays, which\n # is p = [1, 2, 3].\n # Hence, we convert the value of the parameter if required.\n s = to_modelica(v)\n dec.append('{param}={value}'.format(param=k, value=s))\n\n return dec", "def get_parameter_descriptions(parameters):\n\n lines = []\n opt_lines = []\n for param in parameters:\n param_name = check_param(flatten_param(param['name']))\n if param['required']:\n required = 'required'\n lines.append(':param {0}: ({1}) {2}'.format(param_name, required,\n param['description']))\n lines.append(':type {0}: {1}'.format(param_name, param['type']))\n else:\n required = 'optional'\n opt_lines.append(':param {0}: ({1}) {2}'.format(param_name,\n required, param['description']))\n opt_lines.append(':type {0}: {1} or None'.format(param_name,\n param['type']))\n\n return lines + opt_lines", "def params(self):\n return tuple(self._params)", "def param(self):\n return []", "def param(self):\n return []", "def parameter_values(self) -> List[Tuple[str, Any]]:\n pvs = [(param, getattr(self, variable))\n for variable, param in self.variable_name_to_query_param.items()]\n return [(p, v) for p, v in pvs if v is not None]", "def param_strs(self):\n name_len = max(len(p.name) for p in self)\n value_len = max(len(p.value_str) for p in self.params.values())\n units_len = max(len(p.units) for p in self.params.values())\n return [(p.name.ljust(name_len), p.value_str.ljust(value_len),\n p.units.ljust(units_len), p.__doc__)\n for p in self.params.values() if p]", "def parameters(self):\n params = []\n for layer in (self.conv1, self.conv2, self.conv3, self.conv4, self.dense1, self.dense2):\n params += list(layer.parameters)\n return params", "def parameters(self):\n return [p for _, a in vars(self).items() for p in self._params(a)]", "def decode(self):\n # make a copy since in case of XML it could be necessary to modify\n # the raw_params - filter out elements different than params\n raw_params = deepcopy(self.raw_params)\n params_def = self.params_def\n # ignore other tags than \"param\" and \"paramRepeat\"\n # e.g. sequencer may create tags like \"hookPlace\"\n if isinstance(raw_params, etree._Element):\n for raw_param in raw_params:\n if not raw_param.tag in (\"param\", \"paramrepeat\"):\n raw_params.remove(raw_param)\n\n params = []\n # check if too many parameters were passed\n len_params_def = len(params_def)\n if len(raw_params) > len_params_def:\n msg = (\"%r are supernumerary with respect to definition\" %\n raw_params[len_params_def:])\n raise SupernumeraryParam, msg\n # iterate over definition since missing values may just mean using\n # the default values\n for i, param_def in enumerate(params_def):\n try:\n raw_param = raw_params[i]\n except IndexError:\n raw_param = None\n obj = self.decodeNormal(raw_param, param_def)\n params.append(obj)\n self.params = params\n return self.params", "def typeParameters():\n\t\td = AdaptiveBatAlgorithm.typeParameters()\n\t\td.pop('A', None), d.pop('r', None)\n\t\td.update({\n\t\t\t'A_l': lambda x: isinstance(x, (float, int)) and x >= 0,\n\t\t\t'A_u': lambda x: isinstance(x, (float, int)) and x >= 0,\n\t\t\t'r_l': lambda x: isinstance(x, (float, int)) and x >= 0,\n\t\t\t'r_u': lambda x: isinstance(x, (float, int)) and x >= 0,\n\t\t\t'tao_1': lambda x: isinstance(x, (float, int)) and 0 <= x <= 1,\n\t\t\t'tao_2': lambda x: isinstance(x, (float, int)) and 0 <= x <= 1\n\t\t})\n\t\treturn d", "def getListOfParameters(self):\n return self.model.getListOfParameters()", "def param(self):\r\n return []", "def parameters_names(cls):\n return cls._Parameters._fields", "def getParameters(self): #$NON-NLS-1$\r", "def identifiers(self):\n identifiers = []\n\n for item in self._definition.get('identifiers', []):\n identifiers.append(Parameter(**item))\n\n return identifiers", "def get_params(node):\n if node.type == 'parameter':\n return [(self.descend(node.args[0]), types.translation[self.descend(node.args[1])])]\n else:\n l = []\n for p in node.args:\n l.extend(get_params(p))\n return l", "def get_parameters_list(self):\n return self.description[\"config\"][\"values\"].keys()", "def get_parameter_names(self):\n parNames = []\n # for par in self.variables: # TODO: LIKELY A BUG! DOES THE SAME AS get_variable_names()\n for par in self.parameters: # TRYING TO SOLVE THE ISSUE\n # EstimationVariable\n parNames.append(par.name)\n return parNames", "def get_design_parameters(self):\n return self.__get_one_type_params(DesignParameter)", "def generate_parameters(self):\n id = len(self.param_values)\n\n vector = []\n \n for idx in range(len(self.param_names)):\n name = self.param_names[idx]\n value = None\n settings = self.param_settings[idx]\n new_value = self.get_next_parameter(name, value, settings, id=id)\n vector.append(self.clamp(new_value, settings['min'], settings['max']))\n\n self.param_values.append(vector)\n self.result.append(None)\n return id, vector", "def parameters(self):\n return {\"P\": self.P,\n \"T\": self.T}", "def get_param_names(self):\n return list(self.params.keys())", "def params(self) -> List[ParamSpec]:\n return self._params", "def compile_parameter_list(self):\r\n var_type = None\r\n while True:\r\n self.tokenizer.advance()\r\n tok_type = self.tokenizer.token_type()\r\n if tok_type == JackTokenizer.KEYWORD_T:\r\n var_type = self.tokenizer.key_word()\r\n elif tok_type == JackTokenizer.IDENTIFIER_T:\r\n if not var_type:\r\n var_type = self.tokenizer.identifier()\r\n else:\r\n var_name = self.tokenizer.identifier()\r\n self.symbol_table.define(var_name, var_type, SymbolTable.ARG)\r\n var_type = None\r\n else:\r\n sym = self.tokenizer.symbol()\r\n if sym == \")\":\r\n break", "def getTypeCode(self):\n return _libsbml.LocalParameter_getTypeCode(self)", "def get_parameter_declarations(self, root):\n param_declare_group = etree.SubElement(root, \"ParameterDeclarations\")\n if QgsProject.instance().mapLayersByName(\"Parameter Declarations\"):\n param_layer = QgsProject.instance().mapLayersByName(\"Parameter Declarations\")[0]\n for feature in param_layer.getFeatures():\n param_name = feature[\"Parameter Name\"]\n param_type = feature[\"Type\"]\n param_value = feature[\"Value\"]\n\n param = etree.SubElement(param_declare_group, \"ParameterDeclaration\")\n param.set(\"name\", param_name)\n param.set(\"type\", param_type)\n param.set(\"value\", param_value)", "def compile_params(parameters):\n return tuple(p.values()[0] for p in parameters[1:])", "def get_paginator_parameters(self, paginator):\n fields = []\n if hasattr(paginator, 'get_schema_fields'):\n fields = paginator.get_schema_fields(self.view)\n\n return [self.coreapi_field_to_parameter(field) for field in fields]", "def get_field_parameters(self, in_parms):\n if len(in_parms) == 0: # Check if there are params\n return None # If that's the case, return None\n\n values = [] # Empty values\n is_msg = False # Check if the param is a message\n for parm in in_parms: # Loop over params\n if parm.type == \"Field\": # If it is a message\n is_msg = True # Set is_message to true\n continue # Go to top of loop\n _type = eval(parm.type) # create a type object\n value = _type(parm.value) # Create the value, and cast it to the type\n values.append(value) # Add that into the parameters\n if is_msg is True: # check if is a message\n return in_parms # Return input params\n elif len(values) == 1: # If there is only one element\n return values[-1] # Return just that element\n else: # Otherwise\n return values # Return the params", "def split_params(self, params):\n\t\tindex = 0\n\t\tacc = ''\n\t\tret = [] #return value (is ret a bad name?)\n\t\twhile index < len(params):\n\t\t\tif params[index] == ',': #End of a parameter\n\t\t\t\tret.append(acc)\n\t\t\t\tacc = ''\n\t\t\telif params[index] == '(': #start of a type that is a function\n\t\t\t\tend = params.match_paren(index)\n\t\t\t\twhile index <= end: #so the commas in the function type\n\t\t\t\t\t# are disregarded\n\t\t\t\t\tacc += params[index]\n\t\t\t\t\tindex += 1\n\t\t\t\tcontinue #so index doesn't get incremented again\n\t\t\telse:\n\t\t\t\tacc += params[index]\n\t\t\tindex += 1\n\n\t\tif acc: #if they ended the list with a comma then acc would be ''\n\t\t\tret.append(acc) #parameters not ended with a comma,\n\t\t\t# acc last the last param\n\n\t\treturn ret", "def _fi_out_parameters(self) -> Tuple[str, List[Tuple[str, str]]]:\n out_pars = self.ret_type.fi_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_list = list() # type: List[Tuple[str, str]]\n for par_type, par_name in out_pars:\n out_par_list.append((par_type, 'ret_val' + par_name))\n\n return ('', out_par_list)", "def extract_method_signature(code, line):\n line += 5\n method_signature = []\n offset = get_offset(code, line, \"catch(\")\n param_pattern = re.compile(rf\"{R_VAR}\\(.*, ?.*, ?(.*)\\)\\);\")\n\n for _ in range(int((offset - 2) / 2)):\n parameter = parse_parameter(code, re.findall(param_pattern, code[line])[0])\n\n # If List type found, assume ArrayList implementation of Strings\n if parameter.startswith(COMPLEX_TYPES[\"LIST\"]):\n parameter += f\"<{COMPLEX_TYPES['ARRAY']}/4159755760\"\n parameter += f\"<{COMPLEX_TYPES['STRING']}/2004016611>>\"\n\n # If specific List implementation found, assume it is of Strings\n elif re.match(r\"java\\.util\\.[A-Za-z]+List/.*\", parameter):\n parameter += f\"<{COMPLEX_TYPES['STRING']}/2004016611>\"\n\n method_signature.append(parameter)\n\n line += 1\n\n return method_signature", "def getListOfParameters(self, *args):\n return _libsbml.Model_getListOfParameters(self, *args)", "def params(self):\n if isinstance(self.request, list):\n return unmunchify(self.request)\n (params, _) = xmlrpc.loads(self.request)\n return params", "def get_parameters(parameters):\n\n arg_list = []\n opt_list = []\n for param in parameters:\n param_name = param['name']\n param_required = param['required']\n if param_required:\n arg_list.append(format_parameter(param_name, param_required))\n else:\n opt_list.append(format_parameter(param_name, param_required))\n\n return arg_list + opt_list", "def get_paramnames_list(self):\n # TODO include syselem?\n\n query = \"SELECT NAME FROM %s\" % self.__schema\n with self.__connection.cursor() as cursor:\n cursor.execute(query)\n result = cursor.fetchall()\n return [val['NAME'] for val in result]", "def parameters(self):\n return self.pars", "def _GetParameters(\n self,\n required_path_params: Iterable[FieldDescriptor],\n optional_path_params: Iterable[FieldDescriptor],\n query_params: Iterable[FieldDescriptor],\n ) -> List[Dict[str, Union[str, bool, SchemaReference, ArraySchema,\n DescribedSchema]]]:\n parameters = []\n\n req_path_params_set = set(required_path_params)\n opt_path_params_set = set(optional_path_params)\n query_params_set = set(query_params)\n for field_d in req_path_params_set | opt_path_params_set | query_params_set:\n parameter_obj = {\"name\": casing.SnakeToCamel(field_d.name)}\n if field_d in req_path_params_set:\n parameter_obj[\"in\"] = \"path\"\n parameter_obj[\"required\"] = True\n elif field_d in opt_path_params_set:\n parameter_obj[\"in\"] = \"path\"\n else:\n parameter_obj[\"in\"] = \"query\"\n\n parameter_obj[\"schema\"] = self._GetDescribedSchema(field_d)\n\n parameters.append(parameter_obj)\n\n return parameters", "def parameters(self):\n return {\"W\": self.W,\n \"T\": self.T,\n \"P\": self.P,\n \"Wo\": self.Wo,\n \"To\": self.To,\n \"Po\": self.Po}", "def params(self):\n return self._pars", "def get_params(self, pnames=None):\n l = []\n if pnames is None:\n pnames = self._params.keys()\n for pname in pnames:\n p = self._params[pname]\n if isinstance(p, Parameter):\n l.append(p)\n return l", "def _fc_out_parameters(self) -> Tuple[str, List[str]]:\n out_pars = self.ret_type.fc_ret_type()\n if len(out_pars) == 1:\n return (out_pars[0][0], [])\n\n out_par_strl = list() # type: List[str]\n for type_name, postfix in out_pars:\n out_par_strl.append('{} {}'.format(\n type_name, self.ret_type.name + postfix))\n return ('void', out_par_strl)", "def parameters(self):\n return [term.parameter for term in self.terms]", "def parameter_tuple_maker(parameter_code, code_list, i):\n\n return (parameter_code, code_list[i])", "def get_ext_param_names(self):\n num_param = core.xc_func_info_get_n_ext_params(self.xc_func_info)\n\n ret = []\n for p in range(num_param):\n tmp = core.xc_func_info_get_ext_params_name(self.xc_func_info, p)\n ret.append(tmp.decode(\"UTF-8\"))\n\n return ret", "def _getTypeCodes(self, callinfo):\r\n prefix = None\r\n self._resetPrefixDict()\r\n if callinfo.use == 'encoded':\r\n prefix = self._getPrefix(callinfo.namespace)\r\n try:\r\n requestTC = self._getTypeCode(parameters=callinfo.getInParameters(), literal=(callinfo.use=='literal'))\r\n except EvaluateException, ex:\r\n print \"DEBUG: Request Failed to generate --\", ex\r\n requestTC = None\r\n\r\n self._resetPrefixDict()\r\n try:\r\n replyTC = self._getTypeCode(parameters=callinfo.getOutParameters(), literal=(callinfo.use=='literal'))\r\n except EvaluateException, ex:\r\n print \"DEBUG: Response Failed to generate --\", ex\r\n replyTC = None\r\n \r\n request = response = None\r\n if callinfo.style == 'rpc':\r\n if requestTC: request = TC.Struct(pyclass=None, ofwhat=requestTC, pname=callinfo.methodName)\r\n if replyTC: response = TC.Struct(pyclass=None, ofwhat=replyTC, pname='%sResponse' %callinfo.methodName)\r\n else:\r\n if requestTC: request = requestTC[0]\r\n if replyTC: response = replyTC[0]\r\n\r\n #THIS IS FOR RPC/ENCODED, DOC/ENCODED Wrapper\r\n if request and prefix and callinfo.use == 'encoded':\r\n request.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s=\"%(namespaceURI)s\"' \\\r\n %{'prefix':prefix, 'name':request.oname, 'namespaceURI':callinfo.namespace}\r\n\r\n return request, response", "def _parse_parameters(self, parameter_protos):\n logger.debug(\"Start to parse parameters from proto.\")\n for parameter in parameter_protos:\n if not parameter.name:\n logger.warning(\"Finding a parameter with an empty name will not save it.\")\n continue\n check_invalid_character(parameter.name)\n node = Node(name=parameter.name, node_id=parameter.name)\n node.type = NodeTypeEnum.PARAMETER.value\n node.output_shape = self._get_shape_by_parse_type_proto(parameter.type)\n node.output_nums = len(node.output_shape)\n node.output_data_type = self._get_data_type_by_parse_type_proto(parameter.type, node)\n attr = dict(\n type=self._get_data_type_by_parse_type_proto(parameter.type, node),\n shape=str(self._get_shape_by_parse_type_proto(parameter.type))\n )\n node.add_attr(attr)\n\n self._cache_node(node)\n logger.debug(\"Foreach graph proto parameters, node id: %s, node name: %s, \"\n \"node def name: %s\", node.node_id, node.name, parameter.name)", "def compile_ids(parameters):\n return tuple(p.keys()[0] for p in parameters[1:])", "def get_parameters(self, separate_):\n if separate_:\n return self.F, self.J, self.prox\n else :\n return self.Fone, self.Jone, self.prox", "def parameter_list(self):\n return [\n [encut, kpoint_mesh]\n for encut, kpoint_mesh in zip(\n self._job.iteration_frame.ENCUT, self._job.iteration_frame.KPOINT_MESH\n )\n ]", "def _formal_params(self, doclet):\n name, paren, params = self.arguments[0].partition('(')\n return ('(%s' % params) if params else '(%s)' % ', '.join(doclet['meta']['code']['paramnames'])", "def get_str_param_names(self):\n # Exclude self.api and self.names from the command string\n return self.get_attribute_names(FormattedParameter)", "def get_params(self):\n return []", "def get_params_as_list(self):\n\n\t\tparams = [self.shape_slope, self.z_thick, self.thick, self.length]\n\t\treturn params", "def parse_params(txt):\n res = list()\n # First, slipt with stuff looking like \\TYPE:\n splitted = re.split(r'\\s*\\\\(\\w+)\\s*:', txt)\n # We now have a list looking like:\n # ['', 'flag', '....', 'param', '...']\n i = 1\n while i < len(splitted) - 1:\n type = splitted[i]\n rest = splitted[i+1]\n if type == \"argn\":\n name = \"remaining args\"\n desc = rest\n else:\n # first word is the name, the rest is the description:\n match = re.match(r'\\s*(\\w+)\\s*(.*)', rest, re.DOTALL)\n if not match:\n print(\"warning, failed to parse parameters\")\n print(\"near\", rest)\n break\n (name, desc) = match.groups()\n desc = clean_indent(desc)\n res.append((type, name, desc))\n i += 2\n return res", "def methodSignature(self, req, method):\n p = self.get_method(method)\n return [','.join([RPC_TYPES[x] for x in sig]) for sig in p.xmlrpc_signatures()]", "def packParameters(params):\r\n buf = bytes(\"\")\r\n\r\n for param in params:\r\n if type(param) == memoryview:\r\n buf += bytes(param.tobytes())\r\n\r\n elif issubclass(type(param), CIDType):\r\n buf += bytes(param.pack().tobytes())\r\n\r\n else:\r\n buf += bytes(param)\r\n\r\n return buf", "def param_json(self) -> Optional[List[Dict]]:\n # Handle empty dict and None value\n if not self.param:\n return []\n return [\n {\n \"prop\": k,\n \"direct\": \"IN\",\n \"type\": \"VARCHAR\",\n \"value\": v,\n }\n for k, v in self.param.items()\n ]", "def create_parameter_list(path_params):\n param_list = []\n for param in path_params:\n parameter = {}\n parameter['in'] = 'path'\n parameter['name'] = str(param)\n parameter['description'] = 'ID of ' + str(param)[:-2]\n parameter['required'] = True\n parameter['type'] = 'string'\n param_list.append(parameter)\n return param_list", "def parameters(self):\n return self.pkt.payload[2:]", "def get_params_list():\n return common.QOL_PARAMS", "def values(self):\n return {n: getattr(self, n) for n in self._hparam_types.keys()}", "def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText", "def getmethparlist(ob):\n defText = callText = \"\"\n # bit of a hack for methods - turn it into a function\n # but we drop the \"self\" param.\n # Try and build one for Python defined functions\n args, varargs, varkw = inspect.getargs(ob.__code__)\n items2 = args[1:]\n realArgs = args[1:]\n defaults = ob.__defaults__ or []\n defaults = [\"=%r\" % (value,) for value in defaults]\n defaults = [\"\"] * (len(realArgs)-len(defaults)) + defaults\n items1 = [arg + dflt for arg, dflt in zip(realArgs, defaults)]\n if varargs is not None:\n items1.append(\"*\" + varargs)\n items2.append(\"*\" + varargs)\n if varkw is not None:\n items1.append(\"**\" + varkw)\n items2.append(\"**\" + varkw)\n defText = \", \".join(items1)\n defText = \"(%s)\" % defText\n callText = \", \".join(items2)\n callText = \"(%s)\" % callText\n return defText, callText", "def parameters(self):\n return [i.parameter for i in self.joints.values()]", "def methodSignature(self, name):\r\n methods = self._listMethods()\r\n for method in methods:\r\n if method == name:\r\n rtype = None\r\n ptypes = []\r\n parsed = gettags(methods[method])\r\n for thing in parsed:\r\n if thing[1] == 'return': # tag name\r\n rtype = thing[2] # datatype\r\n elif thing[1] == 'param': # tag name\r\n ptypes.append(thing[2]) # datatype\r\n if rtype is None:\r\n raise RPCError(Faults.SIGNATURE_UNSUPPORTED)\r\n return [rtype] + ptypes\r\n raise RPCError(Faults.SIGNATURE_UNSUPPORTED)", "def parameters(self):\n return self._params", "def return_parameter_names():\n return list(titles), list(labels)", "def parameters(self):\n raise NotImplementedError('Abstract method \"parameters\" must be '\n 'specialised!')" ]
[ "0.6973047", "0.63543373", "0.6333325", "0.628187", "0.62337446", "0.6057316", "0.6028395", "0.6020107", "0.60162276", "0.5864634", "0.5859402", "0.58460873", "0.5785523", "0.5735038", "0.5657189", "0.5608447", "0.56071746", "0.5583623", "0.55738485", "0.55229664", "0.5505756", "0.5493563", "0.54874027", "0.548255", "0.5477482", "0.5460601", "0.54441035", "0.5426989", "0.5421317", "0.54047585", "0.54003024", "0.53997904", "0.5391674", "0.53752714", "0.53752714", "0.5366381", "0.53526354", "0.5347416", "0.53416455", "0.53396225", "0.53339463", "0.53214407", "0.5314998", "0.5309011", "0.5307162", "0.5293975", "0.52792567", "0.5274513", "0.5274282", "0.5252961", "0.52257025", "0.5221024", "0.52189946", "0.5203584", "0.5198364", "0.51920855", "0.5190797", "0.5189176", "0.5181118", "0.5177193", "0.51754963", "0.5175333", "0.51664984", "0.51477903", "0.5140619", "0.51288813", "0.51177627", "0.5109485", "0.5103466", "0.5099938", "0.5097136", "0.50905865", "0.50828034", "0.50819546", "0.5079473", "0.50751066", "0.5063971", "0.5063072", "0.5056461", "0.50558925", "0.50516826", "0.5046655", "0.5038978", "0.5013095", "0.50020343", "0.50006866", "0.49947912", "0.49941272", "0.4993597", "0.49818012", "0.49772528", "0.4969707", "0.4967129", "0.49530426", "0.49530426", "0.4948864", "0.4945405", "0.49446562", "0.49410713", "0.49400368" ]
0.61186475
5
namespaces typecodes representing global elements with literal encoding. typeCode typecode representing an element. namespaceURI namespace literal True/False
def _globalElement(self, typeCode, namespaceURI, literal): if literal: typeCode.oname = '%(prefix)s:%(name)s xmlns:%(prefix)s="%(namespaceURI)s"' \ %{'prefix':self._getPrefix(namespaceURI), 'name':typeCode.oname, 'namespaceURI':namespaceURI}
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def XmlTypeNamespace(self) -> str:", "def is_namespace_type(self):\n raise exceptions.NotImplementedError()", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def element_type(self) -> global___Type:", "def patch_well_known_namespaces(etree_module):\n etree_module._namespace_map.update({\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\": \"rdf\", \n \"http://purl.org/rss/1.0/\": \"rss\", \n \"http://purl.org/rss/1.0/modules/taxonomy/\": \"taxo\", \n \"http://purl.org/dc/elements/1.1/\": \"dc\", \n \"http://purl.org/rss/1.0/modules/syndication/\": \"syn\", \n \"http://www.w3.org/2003/01/geo/wgs84_pos#\": \"geo\"})", "def _AppIdNamespaceKindForKey(self, key):\n last_path = key.path().element_list()[-1]\n return (datastore_types.EncodeAppIdNamespace(key.app(), key.name_space()),\n last_path.type())", "def hasNamespaceURI(self, *args):\n return _libsbml.XMLToken_hasNamespaceURI(self, *args)", "def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)", "def visit_Typedef(self, node):\n return str_node(node)", "def test_namespaceFound(self):\n xp = XPathQuery(\"/foo[@xmlns='testns']/bar\")\n self.assertEqual(xp.matches(self.e), 1)", "def namespaces(self) -> NamespacesType:\n return self.schema.namespaces", "def getEnumerationTypeXmlStub (typeName): \n\tsimpleType = createSchemaElement(\"simpleType\")\n\tsimpleType.setAttribute (\"name\",typeName)\n\trestriction = createSchemaElement(\"restriction\")\n\trestriction.setAttribute (\"base\", qp(\"token\"))\n\tsimpleType.appendChild (restriction)\n\treturn simpleType", "def _getTypeClass(self, namespaceURI, localName):\r\n bti = BaseTypeInterpreter()\r\n simpleTypeClass = bti.get_typeclass(localName, namespaceURI)\r\n return simpleTypeClass", "def XmlTypeName(self) -> str:", "def XmlNamespace(self) -> str:", "def getNamespaceURI(self, *args):\n return _libsbml.XMLToken_getNamespaceURI(self, *args)", "def translate_custom_types(self):\n\n\t\t# Preparing variables\n\t\ta_residue_names = self.a_atoms[\"residue_name\"]\t\t# Loads the names of residues\n\t\ta_atom_name = self.a_atoms[\"atom_name\"]\t\t# Loads the names of the atoms\n\t\ta_atom_symbol = self.a_atoms[\"element_symbol\"]\t\t# Loads the elements symbols\n\t\tl_s_custom_types = []\t\t# Contains the list of converted types\n\t\td_translate_custom = {\t\t# Conversion dictionary for custom types\n\t\t\t\"O\": \"OC\",\n\t\t\t\"H\": \"H\",\n\t\t\t\"N\": \"NAM\",\n\t\t\t\"C\": \"XOT\",\n\t\t\t\"CA\": \"XOT\",\n\t\t\t\"CB\": \"XOT\",\n\t\t\t\"OXT\": \"XOT\"\n\t\t}\n\n\t\t# STEP 1 : Converting the atom types ---------------- #\n\t\t# For each element to convert\n\t\tfor i_element in range(len(a_residue_names)):\n\n\t\t\t# If the residue is one of the main amino acids\n\t\t\tif a_residue_names[i_element] in elem_config.RES:\n\n\t\t\t\t# Hydrogen\n\t\t\t\tif a_atom_symbol[i_element] == \"H\":\n\t\t\t\t\ts_custom_type = \"H\"\n\n\t\t\t\t# If the atom is one of the main carbon chain\n\t\t\t\telif a_atom_name[i_element] in d_translate_custom.keys():\n\t\t\t\t\ts_custom_type = d_translate_custom[a_atom_name[i_element]]\n\n\t\t\t\t# Nitrogen in Arginine\n\t\t\t\telif a_residue_names[i_element] == \"ARG\" and a_atom_name[i_element] in elem_config.NARG[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Carbon SP2 in aromatic ring\n\t\t\t\telif a_residue_names[i_element] in elem_config.CAR.keys() and a_atom_name[i_element] in elem_config.CAR[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"CAR\"\n\n\t\t\t\t# Oxygen in hydroxyl or phenol\n\t\t\t\telif a_residue_names[i_element] in elem_config.OHY.keys() and a_atom_name[i_element] == elem_config.OHY[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"OH\"\n\n\t\t\t\t# Nitrogen in amide\n\t\t\t\telif a_residue_names[i_element] in elem_config.NAM.keys() and a_atom_name[i_element] == elem_config.NAM[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NAM\"\n\n\t\t\t\t# Nitrogen in Histidine\n\t\t\t\telif a_residue_names[i_element] in elem_config.NHIS.keys() and a_atom_name[i_element] in elem_config.NHIS[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Central carbon from ARG, GLN, GLU, ASP, ASN\n\t\t\t\telif a_residue_names[i_element] in elem_config.CE.keys() and elem_config.CE[a_residue_names[i_element]] == a_atom_name[i_element]:\n\t\t\t\t\ts_custom_type = \"CAR\"\n\n\t\t\t\t# Oxygen in carbonyl\n\t\t\t\telif a_residue_names[i_element] in elem_config.OC.keys() and a_atom_name[i_element] == elem_config.OC[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"OC\"\n\n\t\t\t\t# Oxygen in carboxylate and oxygen in C-terminal\n\t\t\t\telif a_residue_names[i_element] in elem_config.OOX.keys() and \\\n\t\t\t\t\t\t(a_atom_name[i_element] == elem_config.OOX[a_residue_names[i_element]][0] or\n\t\t\t\t\t\t a_atom_name[i_element] == elem_config.OOX[a_residue_names[i_element]][1]):\n\t\t\t\t\ts_custom_type = \"OOX\"\n\n\t\t\t\t# Nitrogen in Lysine\n\t\t\t\telif a_residue_names[i_element] in elem_config.NLYS.keys() and a_atom_name[i_element] == elem_config.NLYS[a_residue_names[i_element]]:\n\t\t\t\t\ts_custom_type = \"NBAS\"\n\n\t\t\t\t# Unknown element within a amino acid\n\t\t\t\telse:\n\t\t\t\t\ts_custom_type = \"XOT\"\n\t\t\t# End if\n\n\t\t\t# If the element is a metallic atom\n\t\t\telif a_atom_symbol[i_element] in elem_config.METAL:\n\t\t\t\ts_custom_type = \"META\"\n\n\t\t\t# If the element is a halogen\n\t\t\telif a_atom_symbol[i_element] in elem_config.HALO:\n\t\t\t\ts_custom_type = \"HALO\"\n\n\t\t\t# If the element is a water molecule\n\t\t\telif a_residue_names[i_element] == \"HOH\" and a_atom_name[i_element] == \"O\":\n\t\t\t\ts_custom_type = \"OOW\"\n\n\t\t\t# If the element is not known\n\t\t\telse:\n\n\t\t\t\t# If the element can be converted\n\t\t\t\tif a_atom_symbol[i_element] in d_translate_custom.keys():\n\t\t\t\t\ts_custom_type = d_translate_custom[a_atom_symbol[i_element]]\n\n\t\t\t\t# If it cannot\n\t\t\t\telse:\n\t\t\t\t\ts_custom_type = \"HETATM\"\n\t\t\t# End if\n\n\t\t\tl_s_custom_types.append(s_custom_type)\t\t# Saves the new element type\n\t\t# End for\n\t\t# END STEP 1 ---------------------------------------- #\n\n\t\t# STEP 2 : Saving the list of custom types ---------- #\n\t\tself.a_atoms[\"custom_type\"] = l_s_custom_types\t\t# Saves the list of custom types\n\t\t# END STEP 2 ---------------------------------------- #", "def header_hook(header, data):\n\n for e in header.enums:\n e[\"x_namespace\"] = e[\"namespace\"]", "def namespace_for(uri: Union[URIRef, Namespace, str]) -> str:\n uri = str(uri)\n if uri not in namespaces.values():\n namespaces[AnonNS().ns] = uri\n return [k for k, v in namespaces.items() if uri == v][0]", "def hasNamespaceNS(self, *args):\n return _libsbml.XMLToken_hasNamespaceNS(self, *args)", "def _getElement(self, element, literal=False, local=False, namespaceURI=None):\r\n if not element.isElement():\r\n raise TypeError, 'Expecting an ElementDeclaration'\r\n\r\n tc = None\r\n elementName = element.getAttribute('name')\r\n tp = element.getTypeDefinition('type')\r\n\r\n typeObj = None\r\n if not (tp or element.content):\r\n nsuriType,localName = element.getAttribute('type')\r\n typeClass = self._getTypeClass(nsuriType,localName)\r\n \r\n typeObj = typeClass(elementName)\r\n elif not tp:\r\n tp = element.content\r\n\r\n if not typeObj:\r\n typeObj = self._getType(tp, elementName, literal, local, namespaceURI)\r\n\r\n minOccurs = int(element.getAttribute('minOccurs'))\r\n typeObj.optional = not minOccurs\r\n\r\n maxOccurs = element.getAttribute('maxOccurs')\r\n typeObj.repeatable = (maxOccurs == 'unbounded') or (int(maxOccurs) > 1)\r\n\r\n return typeObj", "def xmlrpc_namespace():", "def test_xml_to_dict_net_namespace(self):\n xml = \"\"\"\n <a\n xmlns=\"urn:ietf:params:xml:ns:netconf:base:1.0\"\n >\n <b xmlns=\"something\">b</b>\n <!-- Comment, ignore it -->\n </a>\n \"\"\"\n xmlns = {\n \"_\": utils.NETCONF_NAMESPACE\n }\n result = utils.generate_dict_node(etree.XML(xml), xmlns)\n # check dict\n self.assertEqual(\n {'a': {'_something@b': 'b'}},\n result\n )\n # check xmlns\n self.assertEqual(\n {\n '_': utils.NETCONF_NAMESPACE,\n '_something': 'something'\n }, xmlns\n )", "def __init__ (self, ns_or_tagraw, pred=None, value=None) :\n self.__namespace__ = None\n self.__predicate__ = None\n self.__value__ = None\n self.__ismachinetag__ = False\n self.__isnumeric__ = False\n\n if pred :\n\n re_nspred = re.compile(r\"^([a-z](?:[a-z0-9_]+))$\", re.IGNORECASE)\n\n if re_nspred.match(ns_or_tagraw) and re_nspred.match(pred) and value :\n self.__namespace__ = ns_or_tagraw\n self.__predicate__ = pred\n self.__value__ = value\n else :\n\n re_tag = re.compile(r\"^([a-z](?:[a-z0-9_]+))\\:([a-z](?:[a-z0-9_]+))\\=(.+)$\", re.IGNORECASE)\n m = re_tag.findall(ns_or_tagraw)\n\n if m :\n self.__namespace__ = m[0][0]\n self.__predicate__ = m[0][1]\n self.__value__ = m[0][2]\n\n if self.__namespace__ and self.__predicate__ and self.__value__ :\n self.__ismachinetag__ = True\n\n valtype = type(self.__value__)\n\n if valtype == types.IntType or valtype == types.FloatType :\n self.__isnumeric__ = True\n else :\n re_num = re.compile(r\"^-?\\d+(\\.\\d+)?$\", re.IGNORECASE)\n m = re_num.findall(self.__value__)\n\n if m :\n\n self.__isnumeric__ = True\n self.__value__ = unicode(self.__value__)\n\n if m[0] :\n self.__value_numeric__ = float(self.__value__)\n else :\n self.__value_numeric__ = int(self.__value__)", "def test_getLocalType(self):\n cases = [\n (self.test_eac + \"NE00800.xml\", \"Archival Series\"),\n (self.test_eac + \"NE00916.xml\", \"Archival Collection\"),\n (self.test_eac + \"NE01201.xml\", \"Person\"),\n (self.test_eac + \"NE01000.xml\", \"Glossary Term\"),\n ]\n for case in cases:\n source, expected = case\n doc = EacCpf.EacCpf(source,'http://www.example.com')\n self.assertNotEqual(doc, None)\n result = doc.getLocalType()\n self.assertEqual(result, expected)", "def hasURI(self, *args):\n return _libsbml.XMLNamespaces_hasURI(self, *args)", "def getNamespace(self):\n pass;", "def idl_type(field, namespace):\n\n out = ''\n if field.is_map:\n out = 'map <{0},'.format(idl_type(field.map_key, namespace))\n\n if field.is_array:\n out += 'repeated '\n\n if field.data_type in (schema.Field.DataType.STRUCT,\n schema.Field.DataType.ENUM):\n out += field.metadata.full_name.replace(namespace, '').strip('.')\n else:\n out += field.data_type.value\n\n if field.is_map:\n out += '>'\n\n return out", "def namespaces(self):\n return ()", "def getNamespace(self, parent: ghidra.program.model.symbol.Namespace, namespaceName: unicode) -> ghidra.program.model.symbol.Namespace:\n ...", "def test_get_namespaces_names(self):\n pass", "def _getTypeCode(self, parameters, literal=False):\r\n ofwhat = []\r\n for part in parameters:\r\n namespaceURI,localName = part.type\r\n\r\n if part.element_type:\r\n #global element\r\n element = self._wsdl.types[namespaceURI].elements[localName]\r\n tc = self._getElement(element, literal=literal, local=False, namespaceURI=namespaceURI)\r\n else:\r\n #local element\r\n name = part.name\r\n typeClass = self._getTypeClass(namespaceURI, localName)\r\n if not typeClass:\r\n tp = self._wsdl.types[namespaceURI].types[localName]\r\n tc = self._getType(tp, name, literal, local=True, namespaceURI=namespaceURI)\r\n else:\r\n tc = typeClass(name)\r\n ofwhat.append(tc)\r\n return ofwhat", "def _match_entry_type_string(code_entry, string_list):\n entry_type = re.match(r\"<(AST.*):.*\", code_entry.get('type')).group(1)\n return bool(entry_type in string_list)", "def type(self) -> global___Type:", "def _getPrefix(self, namespaceURI):\r\n prefixDict = self._getPrefixDict()\r\n if prefixDict.has_key(namespaceURI):\r\n prefix = prefixDict[namespaceURI]\r\n else:\r\n prefix = 'ns1'\r\n while prefix in prefixDict.values():\r\n prefix = 'ns%d' %int(prefix[-1]) + 1\r\n prefixDict[namespaceURI] = prefix\r\n return prefix", "def test_createElementNS():\n\n assert not _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS();\n x.createElementNS(\"foo\");\n x.createElementNS(\"foo\", \"bar\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", \"script\");\n \"\"\").failed()\n\n assert _do_test_raw(\"\"\"\n var x = \"foo\";\n x.createElementNS(\"foo\", bar);\n \"\"\").failed()\n\n # Test for https://github.com/mozilla/amo-validator/issues/368\n assert not _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'panelview')\n \"\"\").failed()\n\n # Creating a <script> element raises a warning of course.\n assert _do_test_raw(\"\"\"\n var x = \"foo\",\n nsXUL = \"http://www.mozilla.org/keymaster/gatekeeper/there.is.only.xul\";\n\n x.createElementNS(nsXUL, 'script')\n \"\"\").failed()", "def _fixNS(self, namespace):\n if isinstance(namespace, bytes):\n namespace = str(namespace, encoding=\"utf-8\")\n\n if namespace == OPENID_NS:\n if self._openid_ns_uri is None:\n raise UndefinedOpenIDNamespace('OpenID namespace not set')\n else:\n namespace = self._openid_ns_uri\n\n if namespace != BARE_NS and not isinstance(namespace, str):\n raise TypeError(\n \"Namespace must be BARE_NS, OPENID_NS or a string. got %r\" %\n (namespace, ))\n\n if namespace != BARE_NS and ':' not in namespace:\n fmt = 'OpenID 2.0 namespace identifiers SHOULD be URIs. Got %r'\n warnings.warn(fmt % (namespace, ), DeprecationWarning)\n\n if namespace == 'sreg':\n fmt = 'Using %r instead of \"sreg\" as namespace'\n warnings.warn(\n fmt % (SREG_URI, ),\n DeprecationWarning, )\n return SREG_URI\n\n return namespace", "def containsUri(self, *args):\n return _libsbml.XMLNamespaces_containsUri(self, *args)", "def _get_element_ns(self, element):\n\n if (self._client == None):\n raise ValueError('Specification is not imported yet')\n\n ns = None\n for key in self._client.wsdl.schema.types.keys():\n if (key[0] == element):\n ns = key[1]\n break\n\n return ns", "def SBMLNamespaces_isSBMLNamespace(*args):\n return _libsbml.SBMLNamespaces_isSBMLNamespace(*args)", "def test_typedef00801m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00801m/typeDef00801m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00801m/typeDef00801m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def hasNS(self, *args):\n return _libsbml.XMLNamespaces_hasNS(self, *args)", "def test_type_attribute(self):\n\n self._create_string()\n self.assertEquals(\"%s:%s\" % (\"xs\",\"string\"), self.string.schema_node.get(\"type\"))", "def get_pyxb_namespaces():\n return pyxb.namespace.utility.AvailableNamespaces()", "def _BuildTypeMaps(self, type_namespaces):\n for type_namespace in type_namespaces:\n self.type_namespaces_map[type_namespace.namespace] = type_namespace\n for entity_type in type_namespace.valid_types_map.values():\n if entity_type.guid:\n if entity_type.guid in self.type_guids_map:\n dup_guid_entry = self.type_guids_map[entity_type.guid]\n dup_guid_type = self.GetEntityType(dup_guid_entry.namespace,\n dup_guid_entry.typename)\n if dup_guid_type is None:\n raise RuntimeError('Duplicate type with guid ' + entity_type.guid\n + ' should always be mapped')\n entity_type.AddFinding(\n findings_lib.DuplicateGuidsError(type_namespace.namespace,\n entity_type, dup_guid_type))\n dup_guid_type.AddFinding(\n findings_lib.DuplicateGuidsError(dup_guid_entry.namespace,\n dup_guid_type, entity_type))\n self.type_guids_map[entity_type.guid] = EntityIdByEntry(\n namespace=type_namespace.namespace, typename=entity_type.typename)", "def test_read_net_namespace(self):\n pass", "def test_typedef00205m_type_def00205m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00205m/typeDef00205m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00205m/typeDef00205m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef00802m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00802m/typeDef00802m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00802m/typeDef00802m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def get_type(self) -> str:\n # Note: this name conflicts with existing python builtins\n return self[\"Sns\"][\"Type\"]", "def pyxb_get_type_name(obj_pyxb):\n return pyxb_get_namespace_name(obj_pyxb).split('}')[-1]", "def getTypeCode(self):\n return _libsbml.ReplacedBy_getTypeCode(self)", "def test_typedef00202m_type_def00202m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00202m/typeDef00202m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00202m/typeDef00202m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef01201m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01201m/typeDef01201m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01201m/typeDef01201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def astType(cls, source):\n if source == '':\n return cls.BLANK\n if source == \"OPENQASM 2.0;\":\n return cls.DECLARATION_QASM_2_0\n x = QTRegEx.COMMENT.search(source)\n if x:\n return cls.COMMENT\n x = QTRegEx.INCLUDE.search(source)\n if x:\n return cls.INCLUDE\n x = QTRegEx.CTL_2.search(source)\n if x:\n if x.group(1) == 'if':\n return cls.CTL_2\n x = QTRegEx.QREG.search(source)\n if x:\n return cls.QREG\n x = QTRegEx.CREG.search(source)\n if x:\n return cls.CREG\n x = QTRegEx.MEASURE.search(source)\n if x:\n return cls.MEASURE\n x = QTRegEx.BARRIER.search(source)\n if x:\n return cls.BARRIER\n x = QTRegEx.GATE.search(source)\n if x:\n return cls.GATE\n x = QTRegEx.OP.search(source)\n if x:\n return cls.OP\n return cls.UNKNOWN", "def validateOneNamespace(self, doc, elem, prefix, ns, value):\n if doc is None: doc__o = None\n else: doc__o = doc._o\n if elem is None: elem__o = None\n else: elem__o = elem._o\n if ns is None: ns__o = None\n else: ns__o = ns._o\n ret = libxml2mod.xmlValidateOneNamespace(self._o, doc__o, elem__o, prefix, ns__o, value)\n return ret", "def test_get_node_type_name(self):\n pass", "def test_typedef00502m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00502m/typeDef00502m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00502m/typeDef00502m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_typedef00204m_type_def00204m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00204m/typeDef00204m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00204m/typeDef00204m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def SBMLNamespaces_getSupportedNamespaces():\n return _libsbml.SBMLNamespaces_getSupportedNamespaces()", "def __generate_object_term__(self, datatype, value):\n if datatype == NS_MGR.xsd.anyURI.rdflib:\n term = rdflib.URIRef(value)\n elif datatype:\n term = rdflib.Literal(value, datatype=datatype)\n else:\n term = rdflib.Literal(value)\n return term", "def XrefTypeName(typecode):\n assert typecode in _ref_types, \"unknown reference type %d\" % typecode\n return _ref_types[typecode]", "def getTypeCode(self):\n return _libsbml.SBMLDocument_getTypeCode(self)", "def test_typedef00203m_type_def00203m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00203m/typeDef00203m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00203m/typeDef00203m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _BuildNamespaceFolderMap(self, type_folders):\n for folder in type_folders:\n self.namespace_folder_map[folder.local_namespace.namespace] = folder", "def setOpenIDNamespace(self, openid_ns_uri, implicit):\n if isinstance(openid_ns_uri, bytes):\n openid_ns_uri = str(openid_ns_uri, encoding=\"utf-8\")\n if openid_ns_uri not in self.allowed_openid_namespaces:\n raise InvalidOpenIDNamespace(openid_ns_uri)\n\n self.namespaces.addAlias(openid_ns_uri, NULL_NAMESPACE, implicit)\n self._openid_ns_uri = openid_ns_uri", "def explore_type(name, datatype, is_child):\n actual_type = datatype.strip_typedefs()\n if is_child:\n print (\"The type of %s is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n else:\n print (\"The type '%s' is a typedef of type '%s'.\" %\n (name, str(actual_type)))\n\n Explorer.explore_type(name, actual_type, is_child)\n return False", "def getTypeCode(self):\n return _libsbml.ReplacedElement_getTypeCode(self)", "def get_type_label(type_url):\n return type_dict[type_url]", "def _cim_scope_code_type():\n return {\n 'name' : 'cim_scope_code_type',\n 'is_open' : False,\n 'doc' : 'This would cover quality issues with the CIM itself',\n 'members' : [\n ('dataset', None),\n ('software', None),\n ('service', None),\n ('model', None),\n ('modelComponent', None),\n ('simulation', None),\n ('experiment', None),\n ('numericalRequirement', None),\n ('ensemble', None),\n ('file', None),\n ],\n }", "def test_typedef00402m_type_def00402m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00402m/typeDef00402m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00402m/typeDef00402m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _some_namespaces(self):\n n = Namespace(doc='top')\n n.add_option('aaa', '2011-05-04T15:10:00', 'the a',\n short_form='a',\n from_string_converter=dtu.datetime_from_ISO_string\n )\n n.c = Namespace(doc='c space')\n n.c.add_option('fred', 'stupid', 'husband from Flintstones')\n n.c.add_option('wilma', 'waspish', 'wife from Flintstones')\n n.c.e = Namespace(doc='e space')\n n.c.e.add_option('dwight',\n default=97,\n doc='my uncle')\n n.c.add_option('dwight',\n default=98,\n doc='your uncle')\n n.d = Namespace(doc='d space')\n n.d.add_option('fred', 'crabby', 'male neighbor from I Love Lucy')\n n.d.add_option('ethel', 'silly',\n 'female neighbor from I Love Lucy')\n n.x = Namespace(doc='x space')\n n.x.add_option('size', 100, 'how big in tons', short_form='s')\n n.x.add_option('password', 'secret', 'the password')\n return n", "def test_typedef00501m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00501m/typeDef00501m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00501m/typeDef00501m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def make_key(element_name, element_type, namespace):\n # only distinguish 'element' vs other types\n if element_type in ('complexType', 'simpleType'):\n eltype = 'complexType'\n else:\n eltype = element_type\n if eltype not in ('element', 'complexType', 'simpleType'):\n raise RuntimeError(\"Unknown element type %s = %s\" % (element_name, eltype))\n return (element_name, eltype, namespace)", "def test_typedef00901m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00901m/typeDef00901m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00901m/typeDef00901m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _add_type(self, production, index, m_type):\n fully_qualified_name = None\n current_namespace = self._get_current_namespace()\n if current_namespace is not None:\n fully_qualified_name = current_namespace.fully_qualified_name()\n namespace_types = self._get_type_or_namespace_from_fully_qualified_name(fully_qualified_name)\n if m_type.name in namespace_types:\n raise ParseError(self.production_to_coord(production, index),\n \"Name '{0}' already exists\".format(m_type.fully_qualified_name()))\n namespace_types[m_type.name] = m_type", "def test_typedef01202m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01202m/typeDef01202m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01202m/typeDef01202m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def isCountyName(elem):\r\n return (elem.attrib['k'] == \"tiger:county\")", "def typeName (self, typecode):\n if typecode == qmf2.SCHEMA_DATA_VOID: return \"void\"\n elif typecode == qmf2.SCHEMA_DATA_BOOL: return \"bool\"\n elif typecode == qmf2.SCHEMA_DATA_INT: return \"int\"\n elif typecode == qmf2.SCHEMA_DATA_FLOAT: return \"float\"\n elif typecode == qmf2.SCHEMA_DATA_STRING: return \"string\"\n elif typecode == qmf2.SCHEMA_DATA_MAP: return \"map\"\n elif typecode == qmf2.SCHEMA_DATA_LIST: return \"list\"\n elif typecode == qmf2.SCHEMA_DATA_UUID: return \"uuid\"\n else:\n raise ValueError (\"Invalid type code: %s\" % str(typecode))", "def element_type(self):\r\n result = conf.lib.clang_getElementType(self)\r\n if result.kind == TypeKind.INVALID:\r\n raise Exception('Element type not available on this type.')\r\n\r\n return result", "def getURI(self):\n return _libsbml.ISBMLExtensionNamespaces_getURI(self)", "def type(self) -> URIType:\n if self.study_instance_uid is None:\n return URIType.SERVICE\n elif self.series_instance_uid is None:\n return URIType.STUDY\n elif self.sop_instance_uid is None:\n return URIType.SERIES\n elif self.frames is None:\n return URIType.INSTANCE\n return URIType.FRAME", "def exportTypes( c ) :\n assert str(type(c)) == \"<type '_mysql.connection'>\"\n xml = \"\"\n cT = sqlQuery ( c, \"select * from CrisisKind;\" )\n oT = sqlQuery ( c, \"select * from OrganizationKind;\" )\n pT = sqlQuery ( c, \"select * from PersonKind;\" ) \n for i in cT:\n xml += openTagAtt (\"CrisisKind\", \"crisisKindIdent\", i[0])\n xml += openCloseTag (\"Name\", i[1])\n xml += openCloseTag (\"Description\", i[2])\n xml += closeTag (\"CrisisKind\") \n for i in oT:\n xml += openTagAtt (\"OrganizationKind\", \"organizationKindIdent\", i[0])\n xml += openCloseTag (\"Name\", i[1])\n xml += openCloseTag (\"Description\", i[2])\n xml += closeTag (\"OrganizationKind\")\n for i in pT:\n xml += openTagAtt (\"PersonKind\", \"personKindIdent\", i[0])\n xml += openCloseTag (\"Name\", i[1])\n xml += openCloseTag (\"Description\", i[2])\n xml += closeTag (\"PersonKind\")\n assert str ( type ( xml ) ) == \"<type 'str'>\"\n return xml", "def page_namespace(tree):\n root_name = ET.QName(tree.getroot().tag)\n if root_name.localname == \"PcGts\":\n return root_name.namespace\n else:\n raise ValueError(\"Not a PAGE tree\")", "def SBMLNamespaces_getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)", "def _get_type_mapping():\n return {\n Box.SPACE_NAME: Box,\n Dict.SPACE_NAME: Dict,\n Discrete.SPACE_NAME: Discrete\n }", "def test_typedef00201m_type_def00201m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00201m/typeDef00201m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def dnaxmlroot(dnaxmlformattype):\n if dnaxmlformattype == 'stn':\n formattext = 'Station File'\n elif dnaxmlformattype == 'msr':\n formattext = 'Measurement File'\n else:\n raise ValueError(\"ValueError: dnaxmlformattype must be either 'stn' or 'msr'\")\n NS = 'http://www.w3.org/2001/XMLSchema-instance'\n location_attribute = '{%s}noNamespaceSchemaLocation' % NS\n dnaxmlroot = ET.Element('DnaXmlFormat', attrib={location_attribute: 'DynaML.xsd'})\n dnaxmlroot.set('type', formattext)\n return dnaxmlroot", "def test_typedef00301m_type_def00301m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00301m/typeDef00301m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00301m/typeDef00301m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def _from_c_repr(c_repr):\n # We create a dummy module with a global variable of the requested type,\n # parse that module, and return the type of the global variable.\n # Include stdint.h to recognize the intX_t typedefs.\n module = parse(\"\"\"\n #include <stdint.h>\n\n {} a;\n \"\"\".format(c_repr))\n return module.global_vars['a'].type", "def type(name):", "def test_value():\n uri = 'http://dbpedia.org/resource/California'\n values = [\n Value.from_uri(uri),\n Value.from_namespace_fragment('dbr', 'California'),\n ]\n for val in values:\n assert val.is_uri\n assert not val.is_literal\n assert val.uri == uri\n assert val.namespace == 'dbr'\n assert val.prefix == 'http://dbpedia.org/resource/'\n assert val.fragment == 'California'\n assert str(val) == val.rdf_format == f'<{uri}>'\n try:\n print(val.literal_value)\n assert False, 'should raise ValueError'\n except ValueError:\n pass\n try:\n print(val.lang)\n assert False, 'should raise ValueError'\n except ValueError:\n pass\n try:\n print(val.datatype)\n assert False, 'should raise ValueError'\n except ValueError:\n pass\n literal = '\"xyz\"@en^^<http://example.org/ns/userDatatype>'\n val = Value.from_literal(literal)\n assert val.literal_value == 'xyz'\n assert val.lang == 'en'\n assert val.datatype.rdf_format == '<http://example.org/ns/userDatatype>'\n assert val.rdf_format == literal\n literals = [\n ('false', 'boolean'),\n ('1', 'integer'),\n ('3.14', 'double'),\n ('\"hello\"', None),\n ]\n for literal, literal_type in literals:\n if literal in ('true', 'false'):\n python_literal = literal_eval(literal.title())\n else:\n python_literal = literal_eval(literal)\n print(literal, literal.title(), python_literal)\n for val in [Value.from_literal(literal), Value.from_python_literal(python_literal)]:\n assert val.literal_value == python_literal\n assert val.lang is None\n if literal_type is None:\n assert val.datatype is None\n else:\n assert val.datatype.rdf_format == f'<http://www.w3.org/2001/XMLSchema#{literal_type}>'\n assert val.rdf_format == literal", "def test_typedef01301m1_positive(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef01301m/typeDef01301m1.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef01301m/typeDef01301m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def explore_expr(expr, value, is_child):\n actual_type = value.type.strip_typedefs()\n print (\"The value of '%s' is of type '%s' \"\n \"which is a typedef of type '%s'\" %\n (expr, str(value.type), str(actual_type)))\n\n Explorer.explore_expr(expr, value.cast(actual_type), is_child)\n return False", "def rdf_type(self):\n return self._rdf_type", "def test_typedef00403m_type_def00403m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00403m/typeDef00403m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00403m/typeDef00403m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def test_translate_resourcetypes_type_uid(self):\n self.assertEqual(\"nodes/007ff4e5-fe72-4c4b-b858-4c5f37dff946\",\n util.translate_resourcetypes(\"/nodes/007ff4e5-fe72-4c4b-b858-4c5f37dff946/\"))\n self.assertEqual(\"nodes/007ff4e5-fe72-4c4b-b858-4c5f37dff946\",\n util.translate_resourcetypes(\"/nodes/007FF4E5-FE72-4C4B-B858-4C5F37DFF946\"))", "def page_namespace(tree):\n root_name = ET.QName(tree.getroot().tag)\n if root_name.localname == 'PcGts':\n return root_name.namespace\n else:\n raise ValueError('Not a PAGE tree')", "def getURI(self):\n return _libsbml.SBMLNamespaces_getURI(self)", "def test_typedef00401m_type_def00401m1_p(mode, save_output, output_format):\n assert_bindings(\n schema=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m.xsd\",\n instance=\"sunData/ElemDecl/typeDef/typeDef00401m/typeDef00401m1_p.xml\",\n class_name=\"Root\",\n version=\"1.1\",\n mode=mode,\n save_output=save_output,\n output_format=output_format,\n structure_style=\"filenames\",\n )", "def xsd_type(dtype):\n return XSD_TYPE_MAP.get(dtype,'string')" ]
[ "0.66442066", "0.5593534", "0.5443724", "0.5412149", "0.5365882", "0.5329653", "0.5311929", "0.5237586", "0.5178215", "0.5165827", "0.5055696", "0.5044016", "0.5007665", "0.4926739", "0.48959085", "0.48641986", "0.48631665", "0.4855509", "0.48434836", "0.48183277", "0.4792356", "0.47862798", "0.47404408", "0.47294393", "0.4723793", "0.47111255", "0.4702394", "0.46919808", "0.46903983", "0.46584937", "0.46271244", "0.46186867", "0.4617531", "0.4604043", "0.45993757", "0.45985585", "0.4591038", "0.4586497", "0.45848095", "0.45825526", "0.45807338", "0.4573924", "0.45574927", "0.45526972", "0.45472842", "0.45332465", "0.4530956", "0.45251143", "0.45243555", "0.45225555", "0.45212564", "0.45162475", "0.45096263", "0.44960347", "0.4493138", "0.44918758", "0.44866842", "0.44828385", "0.44737914", "0.4473183", "0.4472023", "0.44692904", "0.44670665", "0.44466957", "0.44392848", "0.44391468", "0.44376433", "0.4437629", "0.44349527", "0.44334894", "0.4433334", "0.44330135", "0.44300586", "0.44294038", "0.44290915", "0.44268504", "0.442076", "0.4419924", "0.4417466", "0.44173992", "0.43981576", "0.4397718", "0.43933913", "0.43869466", "0.4383478", "0.43826568", "0.43791485", "0.4378129", "0.43760198", "0.43731675", "0.43726635", "0.43709844", "0.43701172", "0.43693835", "0.4368849", "0.43682677", "0.43614888", "0.43580577", "0.43551713", "0.43525833" ]
0.6797826
0
Retrieves a prefix/namespace mapping. namespaceURI namespace
def _getPrefix(self, namespaceURI): prefixDict = self._getPrefixDict() if prefixDict.has_key(namespaceURI): prefix = prefixDict[namespaceURI] else: prefix = 'ns1' while prefix in prefixDict.values(): prefix = 'ns%d' %int(prefix[-1]) + 1 prefixDict[namespaceURI] = prefix return prefix
{ "objective": { "self": [], "paired": [], "triplet": [ [ "query", "document", "negatives" ] ] } }
[ "def prefix_to_ns(self, prefix):\n defin = self.module.i_ctx.get_module(\n self.module.i_prefixes[prefix][0])\n return defin.search_one(\"namespace\").arg", "def get_namespace(self, prefix):\n try:\n return self.parser.namespaces[prefix]\n except KeyError as err:\n raise self.error('FONS0004', 'No namespace found for prefix %s' % str(err))", "def namespace_for(uri: Union[URIRef, Namespace, str]) -> str:\n uri = str(uri)\n if uri not in namespaces.values():\n namespaces[AnonNS().ns] = uri\n return [k for k, v in namespaces.items() if uri == v][0]", "def getNamespacePrefix(self, namespace):\n return self.namespaceTable.get(namespace, None)", "def ns_prefix_dict(g):\n return {ns: prefix.toPython() for (ns, prefix) in g.namespaces()}", "def prefixForNamespace (self, namespace):\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n return next(iter(pfxs))\n return None", "def getNamespacePrefixDict(xmlString):\n \n nss = {} \n defCnt = 0\n matches = re.findall(r'\\s+xmlns:?(\\w*?)\\s*=\\s*[\\'\"](.*?)[\\'\"]', xmlString)\n for match in matches:\n prefix = match[0]; ns = match[1]\n if prefix == '':\n defCnt += 1\n prefix = '_' * defCnt\n nss[prefix] = ns\n return nss", "def getPrefix(self, *args):\n return _libsbml.XMLNamespaces_getPrefix(self, *args)", "def getNamespacePrefix(self, *args):\n return _libsbml.XMLToken_getNamespacePrefix(self, *args)", "def namespace_map(self, target):\n self._check_target(target)\n return target.namespace_map or self._default_namespace_map", "def namespaces(self):\n return [self._namespace_prefix]", "def from_ns(match):\n return ns.get(match.group(1), match.group())", "def GetNamespace(self, namespace_name):\n return self.type_namespaces_map.get(namespace_name, None)", "def _get_prefixes(self):\n return self._dispatch_json(\"get\", self._db_base(\"prefixes\")).get(\"@context\")", "def namespace(self):\n return VarLookupDict(self._namespaces)", "def get_prefixes(context: str = \"go\"):\n context = load_context(context)\n extended_prefix_map = context.as_extended_prefix_map()\n converter = Converter.from_extended_prefix_map(extended_prefix_map)\n cmaps = converter.prefix_map\n # hacky solution to: https://github.com/geneontology/go-site/issues/2000\n cmap_remapped = remap_prefixes(cmaps)\n\n return cmap_remapped", "def get_namespace(self, namespace, lowercase=True, trim_namespace=True):\n\t\treturn self.get_namespace_view(namespace, lowercase, trim_namespace).copy()", "def getNamespaceIndexByPrefix(self, *args):\n return _libsbml.XMLToken_getNamespaceIndexByPrefix(self, *args)", "def xpathNsLookup(self, prefix):\n ret = libxml2mod.xmlXPathNsLookup(self._o, prefix)\n return ret", "def namespace(self, namespace):\n return self.client.call('GET',\n self.name, params={'namespace': namespace})", "def _getnamespaces(cls):\n return \" \".join(Kmlable._namespaces)", "def SBMLNamespaces_getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)", "def getNamespaces(self, *args):\n return _libsbml.SBMLNamespaces_getNamespaces(self, *args)", "def getIndexByPrefix(self, *args):\n return _libsbml.XMLNamespaces_getIndexByPrefix(self, *args)", "def LookupNamespace(self, prefix):\n ret = libxml2mod.xmlTextReaderLookupNamespace(self._o, prefix)\n return ret", "def namespace(self):\n return self.__key.namespace()", "def qname_to_prefixed(qname, namespaces):\n if not qname:\n return qname\n\n namespace = get_namespace(qname)\n for prefix, uri in sorted(filter(lambda x: x[1] == namespace, namespaces.items()), reverse=True):\n if not uri:\n return '%s:%s' % (prefix, qname) if prefix else qname\n elif prefix:\n return qname.replace('{%s}' % uri, '%s:' % prefix)\n else:\n return qname.replace('{%s}' % uri, '')\n else:\n return qname", "def namespaces(self, psuedo=True):\n if self._namespaces == None:\n result = self.call({'action': 'query',\n 'meta': 'siteinfo',\n 'siprop': 'namespaces'})\n self._namespaces = {}\n self._psuedo_namespaces = {}\n for nsid in result['query']['namespaces']:\n if int(nsid) >= 0:\n self._namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n else:\n self._psuedo_namespaces[int(nsid)] = \\\n result['query']['namespaces'][nsid]['*']\n if psuedo:\n retval = {}\n retval.update(self._namespaces)\n retval.update(self._psuedo_namespaces)\n return retval\n else:\n return self._namespaces", "def get_ns_dict(xml):\n \n nss = {} \n def_cnt = 0\n matches = re.findall(r'\\s+xmlns:?(\\w*?)\\s*=\\s*[\\'\"](.*?)[\\'\"]', xml)\n for match in matches:\n prefix = match[0]; ns = match[1]\n if prefix == '':\n def_cnt += 1\n prefix = '_' * def_cnt\n nss[prefix] = ns\n return nss", "def prefixes(self):\n # a new OntCuries-like object that wraps NamespaceManager\n # and can leverage its trie\n self.namespace_manager\n raise NotImplementedError('yet')", "def getSBMLNamespaceURI(*args):\n return _libsbml.SBMLNamespaces_getSBMLNamespaceURI(*args)", "def test_get_namespaces_names(self):\n pass", "def _index_and_mapping(self, namespace):\n index, doc_type = namespace.split('.', 1)\n return index.lower(), doc_type", "def address_space_prefixes(self) -> pulumi.Output[Sequence[str]]:\n return pulumi.get(self, \"address_space_prefixes\")", "def getNamespaceURI(self, *args):\n return _libsbml.XMLToken_getNamespaceURI(self, *args)", "def get_namespaces(self, label_selector=None):\n return self.core_client.list_namespace(label_selector=label_selector)", "def patch_well_known_namespaces(etree_module):\n etree_module._namespace_map.update({\n \"http://www.w3.org/1999/02/22-rdf-syntax-ns#\": \"rdf\", \n \"http://purl.org/rss/1.0/\": \"rss\", \n \"http://purl.org/rss/1.0/modules/taxonomy/\": \"taxo\", \n \"http://purl.org/dc/elements/1.1/\": \"dc\", \n \"http://purl.org/rss/1.0/modules/syndication/\": \"syn\", \n \"http://www.w3.org/2003/01/geo/wgs84_pos#\": \"geo\"})", "def get_namespace(self) -> str:\n return self._namespace", "def getNamespaces(self):\n return _libsbml.SBase_getNamespaces(self)", "def declareNamespace (self, namespace, prefix=None, add_to_map=False):\n if not isinstance(namespace, pyxb.namespace.Namespace):\n raise pyxb.UsageError('declareNamespace: must be given a namespace instance')\n if namespace.isAbsentNamespace():\n raise pyxb.UsageError('declareNamespace: namespace must not be an absent namespace')\n if prefix is None:\n prefix = namespace.prefix()\n if prefix is None:\n pfxs = self.__inScopePrefixes.get(namespace)\n if pfxs:\n prefix = next(iter(pfxs))\n while prefix is None:\n self.__namespacePrefixCounter += 1\n candidate_prefix = 'ns%d' % (self.__namespacePrefixCounter,)\n if not (candidate_prefix in self.__inScopeNamespaces):\n prefix = candidate_prefix\n ns = self.__inScopePrefixes.get(prefix)\n if ns:\n if ns != namespace:\n raise pyxb.LogicError('Prefix %s is already in use for %s' % (prefix, ns))\n return prefix\n if not self.__mutableInScopeNamespaces:\n self.__clonePrefixMap()\n self.__mutableInScopeNamespaces = True\n self.__addPrefixMap(prefix, namespace)\n return prefix", "def get_namespace(key):\n\tif key not in REGISTRY:\n\t\traise KeyError(\"key:{0} does not exist\".format(key))\n\n\treturn REGISTRY[key]", "def _get_xpath_namespaces(self, namespaces: Optional[NamespacesType] = None) \\\n -> Dict[str, str]:\n if namespaces is None:\n namespaces = {k: v for k, v in self.namespaces.items() if k}\n namespaces[''] = self.xpath_default_namespace\n elif '' not in namespaces:\n namespaces[''] = self.xpath_default_namespace\n\n xpath_namespaces: Dict[str, str] = XPath2Parser.DEFAULT_NAMESPACES.copy()\n xpath_namespaces.update(namespaces)\n return xpath_namespaces", "def _getPrefixDict(self):\r\n if not hasattr(self, '_prefixDict'):\r\n self.__prefixDict = {}\r\n return self.__prefixDict", "def GetNamespaces(self):\n return list(self.type_namespaces_map.values())", "def namespace(self) -> str:\n return pulumi.get(self, \"namespace\")", "def getIndex(self, *args):\n return _libsbml.XMLNamespaces_getIndex(self, *args)", "def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Output[Optional[str]]:\n return pulumi.get(self, \"namespace\")", "def test_get_namespace(self):\n pass", "def namespaces(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]:\n return pulumi.get(self, \"namespaces\")", "def namespace(self):\n return self._namespace", "def namespace(self):\n assert self._namespace\n return self._namespace", "def namespace_to_dict(n):\n assert isinstance(n, SimpleNamespace)\n return _namespace_to_dict_util(n)", "def _DefaultNamespace():\n return namespace_manager.get_namespace()", "def _get_xml_namespace(root_tag):\n m = re.match(r\"\\{.*\\}\", root_tag)\n return m.group(0) if m else \"\"", "def get_identifiers_org_namespace(prefix):\n try:\n return get_identifiers_org_namespaces()[prefix]\n except KeyError:\n raise InvalidIdentifiersOrgUri('`{}` is not a valid prefix of a Identifiers.org namespace.'.format(prefix))", "def prefixed_to_qname(name, namespaces):\n if not name or name[0] == '{':\n return name\n\n try:\n prefix, name = name.split(':')\n except ValueError:\n if ':' in name:\n raise XMLSchemaValueError(\"wrong format for reference name %r\" % name)\n try:\n uri = namespaces['']\n except KeyError:\n return name\n else:\n return '{%s}%s' % (uri, name) if uri else name\n else:\n if not prefix or not name:\n raise XMLSchemaValueError(\"wrong format for reference name %r\" % name)\n try:\n uri = namespaces[prefix]\n except KeyError:\n raise XMLSchemaValueError(\"prefix %r not found in namespace map\" % prefix)\n else:\n return '{%s}%s' % (uri, name) if uri else name", "def _load_namespaces(self):\n nsdocs = self._docset.get_namespaces()\n for nsdoc in nsdocs:\n nsobj = Namespace(nsdoc)\n self._docmap[nsdoc] = nsobj\n self._namespaces.add(nsobj)", "def get_uri_prefix() -> str:\n Config.__get()\n assert Config.__config is not None\n return Config.__config.get(\"wsgi\", \"uri_prefix\").strip()", "def getURI(self, *args):\n return _libsbml.XMLNamespaces_getURI(self, *args)", "def getNamespace(self):\n pass;", "def get_constants(prefix):\n return dict( (getattr(socket, n), n)\n for n in dir(socket)\n if n.startswith(prefix)\n )", "def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> Optional[str]:\n return pulumi.get(self, \"namespace\")", "def _load_namespaces(self, result):\n self._namespaces = {}\n\n for namespace in result[\"query\"][\"namespaces\"].values():\n ns_id = namespace[\"id\"]\n name = namespace[\"*\"]\n try:\n canonical = namespace[\"canonical\"]\n except KeyError:\n self._namespaces[ns_id] = [name]\n else:\n if name != canonical:\n self._namespaces[ns_id] = [name, canonical]\n else:\n self._namespaces[ns_id] = [name]\n\n for namespace in result[\"query\"][\"namespacealiases\"]:\n ns_id = namespace[\"id\"]\n alias = namespace[\"*\"]\n self._namespaces[ns_id].append(alias)", "def namespaces(self):\n if not self._namespaces:\n self.update_namespaces_info()\n\n return self._namespaces", "def get(self, *args):\n return _libsbml.ListWrapperSBMLNamespaces_get(self, *args)", "def namespace(self) -> str:\n return self._namespace", "def get_namespaces():\r\n\r\n print 'Getting namespaces'\r\n tree = etree.parse('http://lesswrong.wikia.com/wiki/Special:AllPages', parser)\r\n options = tree.xpath('//select[@id=\"namespace\"]/option')\r\n namespaces = [option.get('value') for option in options]\r\n pprint(namespaces)\r\n return namespaces", "def getPrefix(self):\n return _libsbml.XMLTriple_getPrefix(self)", "def get_prefixes(buildout):\n\n prefixes = parse_list(buildout.get('prefixes', ''))\n return [os.path.abspath(k) for k in prefixes if os.path.exists(k)]", "def remap_prefixes(cmap):\n cmap[\"MGI\"] = \"http://identifiers.org/mgi/MGI:\"\n cmap[\"WB\"] = \"http://identifiers.org/wormbase/\"\n return cmap", "def getNamespaces(self):\n return _libsbml.SBMLDocument_getNamespaces(self)", "def namespaces(self) -> NamespacesType:\n return self.schema.namespaces", "def getNamespaces(self):\n return _libsbml.XMLToken_getNamespaces(self)", "def namespace (self) :\n\n return self.__namespace__", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def namespace(self) -> pulumi.Input[str]:\n return pulumi.get(self, \"namespace\")", "def getURI(self):\n return _libsbml.SBMLNamespaces_getURI(self)", "def prefix(self):\n return self[\"prefix\"]", "def prefix(self):\n return self[\"prefix\"]", "def xmlrpc_namespace():", "def _get_namespace(self, data):\n ns_name = data['filename'].namespace\n try:\n return models.Namespace.objects.get(name=ns_name)\n except models.Namespace.DoesNotExist:\n raise exceptions.ValidationError(\n f'Namespace \"{ns_name}\" does not exist.'\n )", "def getPrefix(self):\n return _libsbml.SBase_getPrefix(self)", "def getPrefix(self):\n raise NotImplementedError", "def getNamespaceIndex(self, *args):\n return _libsbml.XMLToken_getNamespaceIndex(self, *args)", "def namespace(self, namespace, lowercase=True, trim_namespace=True):\n\t\treturn Config(\n\t\t\t\tself.root_path,\n\t\t\t\tself.get_namespace_view(namespace, lowercase, trim_namespace)\n\t\t\t)", "def get_constants(prefix):\n return {\n getattr(socket, n): n\n for n in dir(socket)\n if n.startswith(prefix)\n }" ]
[ "0.74894905", "0.723001", "0.7178781", "0.7092518", "0.7036941", "0.6799343", "0.67157125", "0.67054284", "0.66477394", "0.6579663", "0.64730036", "0.646861", "0.6420464", "0.64164484", "0.64045894", "0.63432497", "0.63370234", "0.63120365", "0.63070714", "0.62537974", "0.62380385", "0.6221364", "0.621491", "0.6214344", "0.62062514", "0.6199084", "0.61269677", "0.6110802", "0.60916305", "0.60792", "0.60053015", "0.59371406", "0.58909696", "0.5887794", "0.5874347", "0.58507776", "0.5849891", "0.5849509", "0.5849272", "0.58485943", "0.5837763", "0.5827174", "0.58189166", "0.5805659", "0.5804454", "0.58023566", "0.5791487", "0.5791487", "0.5791487", "0.5791487", "0.5791487", "0.5771173", "0.5769542", "0.576573", "0.5753271", "0.5739721", "0.573836", "0.5736382", "0.57145244", "0.5693619", "0.568704", "0.5667896", "0.5665741", "0.5665281", "0.5652284", "0.564829", "0.564829", "0.564829", "0.56460255", "0.5613991", "0.5591623", "0.5590134", "0.5584967", "0.55844194", "0.5581965", "0.5579256", "0.5569686", "0.5567639", "0.55638665", "0.5553283", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.55432224", "0.5527904", "0.55248153", "0.55248153", "0.5518788", "0.55155444", "0.5504374", "0.5495718", "0.5474006", "0.54726416", "0.5470884" ]
0.78372264
0